From d7cd500d29f1dd05655726542678234c5e5d5b4c Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Wed, 13 Aug 2025 10:38:38 +0800 Subject: [PATCH 001/145] mini --- .../vehicle_state/vehicle_parameters.py | 11 + .../dataset_specific/kitti_360/__init__ .py | 0 .../kitti_360/kitti_360_data_converter.py | 456 ++++++++++++++++++ .../default_dataset_conversion.yaml | 3 +- .../config/datasets/kitti360_dataset.yaml | 16 + .../code/hydra/config.yaml | 60 +++ .../2025.08.11.15.45.36/code/hydra/hydra.yaml | 177 +++++++ .../code/hydra/overrides.yaml | 1 + exp/my_run/2025.08.11.15.45.36/log.txt | 10 + jbwang_test.py | 68 +++ notebooks/dataset/jbwang_test.py | 86 ++++ notebooks/jbwang_viz_test.py | 252 ++++++++++ notebooks/nuplan/nuplan_sensor_loading.ipynb | 27 +- requirements.txt | 2 +- 14 files changed, 1165 insertions(+), 4 deletions(-) create mode 100644 d123/dataset/dataset_specific/kitti_360/__init__ .py create mode 100644 d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py create mode 100644 d123/script/config/datasets/kitti360_dataset.yaml create mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml create mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml create mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml create mode 100644 exp/my_run/2025.08.11.15.45.36/log.txt create mode 100644 jbwang_test.py create mode 100644 notebooks/dataset/jbwang_test.py create mode 100644 notebooks/jbwang_viz_test.py diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py index 8fe4d048..17480042 100644 --- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py @@ -60,6 +60,17 @@ def get_wopd_pacifica_parameters() -> VehicleParameters: rear_axle_to_center_longitudinal=1.461, ) +def get_kitti360_station_wagon_parameters() -> VehicleParameters: + #TODO except wheel_base, all need to be checked + return VehicleParameters( + vehicle_name="kitti360_station_wagon", + width=2.297, + length=5.176, + height=1.400, + wheel_base=2.710, + rear_axle_to_center_vertical=0.45, + rear_axle_to_center_longitudinal=1.461, + ) def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3: """ diff --git a/d123/dataset/dataset_specific/kitti_360/__init__ .py b/d123/dataset/dataset_specific/kitti_360/__init__ .py new file mode 100644 index 00000000..e69de29b diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py new file mode 100644 index 00000000..b6e97d8c --- /dev/null +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -0,0 +1,456 @@ +import gc +import json +import os +from dataclasses import asdict +from functools import partial +from pathlib import Path +from typing import Any, Dict, Final, List, Optional, Tuple, Union + +import numpy as np +import datetime +import hashlib +import pyarrow as pa +from PIL import Image +from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map + +from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.common.datatypes.time.time_point import TimePoint +from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index +from d123.common.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters +from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index +from d123.common.geometry.vector import Vector3DIndex +from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table +from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.dataset.logs.log_metadata import LogMetadata + +KITTI360_DT: Final[float] = 0.1 +SORT_BY_TIMESTAMP: Final[bool] = True + +KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) + +#TODO carera mismatch +KITTI360_CAMERA_TYPES: Final[Dict[CameraType, str]] = { + CameraType.CAM_L0: "image_00", + CameraType.CAM_R0: "image_01", + # TODO fisheye camera + # CameraType.CAM_L1: "image_02", + # CameraType.CAM_R1: "image_03", +} + +DIR_2D_RAW = "data_2d_raw" +DIR_2D_SMT = "data_2d_semantics" +DIR_3D_RAW = "data_3d_raw" +DIR_3D_SMT = "data_3d_semantics" +DIR_3D_BBOX = "data_3d_bboxes" +DIR_POSES = "data_poses" +DIR_CALIB = "calibration" + +#TODO PATH_2D_RAW_ROOT +PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT +PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT +PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW +PATH_3D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_SMT +PATH_3D_BBOX_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_BBOX +PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES +PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB + +KITTI360_REQUIRED_MODALITY_ROOTS: Dict[str, Path] = { + DIR_2D_RAW: PATH_2D_RAW_ROOT, + # DIR_2D_SMT: PATH_2D_SMT_ROOT, + # DIR_3D_RAW: PATH_3D_RAW_ROOT, + # DIR_3D_SMT: PATH_3D_SMT_ROOT, + # DIR_3D_BBOX: PATH_3D_BBOX_ROOT, + # DIR_POSES: PATH_POSES_ROOT, +} + + +def create_token(input_data: str) -> str: + # TODO: Refactor this function. + # TODO: Add a general function to create tokens from arbitrary data. + if isinstance(input_data, str): + input_data = input_data.encode("utf-8") + + hash_obj = hashlib.sha256(input_data) + return hash_obj.hexdigest()[:16] + +def _load_calibration() -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]: + """ + 读取 KITTI-360 全局标定文件,返回: + - intrinsics[image_02] = 3x3 + - c2e[image_02] = 4x4(camera->ego/body),这里将 cam_to_pose 视为 camera->vehicle(简化) + """ + calib_dir = KITTI360_DATA_ROOT / DIR_CALIB + intrinsics: Dict[str, np.ndarray] = {} + c2e: Dict[str, np.ndarray] = {} + + # 内参:perspective.txt 中的 P_rect_0{0..3} + persp = calib_dir / "perspective.txt" + if persp.exists(): + with open(persp, "r") as f: + lines = [ln.strip() for ln in f if ln.strip()] + for ln in lines: + if ln.startswith("P_rect_02"): + intrinsics["image_02"] = _read_projection_matrix(ln) + elif ln.startswith("P_rect_03"): + intrinsics["image_03"] = _read_projection_matrix(ln) + + # 外参:cam_to_pose.txt 中 Tr_cam02(相机到车体/pose) + c2p = calib_dir / "cam_to_pose.txt" + if c2p.exists(): + with open(c2p, "r") as f: + lines = [ln.strip() for ln in f if ln.strip()] + for ln in lines: + if ln.startswith("Tr_cam02"): + vals = [float(x) for x in ln.split(":")[1].strip().split()] + T = np.array(vals, dtype=np.float64).reshape(4, 4) + c2e["image_02"] = T + elif ln.startswith("Tr_cam03"): + vals = [float(x) for x in ln.split(":")[1].strip().split()] + T = np.array(vals, dtype=np.float64).reshape(4, 4) + c2e["image_03"] = T + + return intrinsics, c2e + +class Kitti360DataConverter(RawDataConverter): + def __init__( + self, + splits: List[str], + log_path: Union[Path, str], + data_converter_config: DataConverterConfig, + ) -> None: + super().__init__(data_converter_config) + for split in splits: + assert ( + split in self.get_available_splits() + ), f"Split {split} is not available. Available splits: {self.available_splits}" + + self._splits: List[str] = splits + self._log_path: Path = Path(log_path) + self._log_paths_per_split: Dict[str, List[Path]] = self._collect_log_paths() + + def _collect_log_paths(self) -> Dict[str, List[Path]]: + """ + Collect candidate sequence folders under data_2d_raw that end with '_sync', + and keep only those sequences that are present in ALL required modality roots + (e.g., data_2d_semantics, data_3d_raw, etc.). + """ + missing_roots = [str(p) for p in KITTI360_REQUIRED_MODALITY_ROOTS.values() if not p.exists()] + if missing_roots: + raise FileNotFoundError(f"KITTI-360 required roots missing: {missing_roots}") + + # Enumerate candidate sequences from data_2d_raw + candidates = sorted(p for p in PATH_2D_RAW_ROOT.iterdir() if p.is_dir() and p.name.endswith("_sync")) + + valid_seqs: List[Path] = [] + for seq_dir in candidates: + seq_name = seq_dir.name + missing_modalities = [ + modality_name + for modality_name, root in KITTI360_REQUIRED_MODALITY_ROOTS.items() + if not (root / seq_name).exists() + ] + if not missing_modalities: + valid_seqs.append(seq_dir) #KITTI360_DATA_ROOT / DIR_2D_RAW /seq_name + #TODO warnings + # else: + # warnings.warn( + # f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. " + # f"Root: {KITTI360_DATA_ROOT}" + # ) + return {"kitti360": valid_seqs} + + def get_available_splits(self) -> List[str]: + """Returns a list of available raw data types.""" + return ["kitti360"] + + def convert_maps(self, worker: WorkerPool) -> None: + print("KITTI-360 does not provide standard maps. Skipping map conversion.") + return None + + def convert_logs(self, worker: WorkerPool) -> None: + log_args = [ + { + "log_path": log_path, + "split": split, + } + for split, log_paths in self._log_paths_per_split.items() + for log_path in log_paths + ] + + worker_map( + worker, + partial( + convert_kitti360_log_to_arrow, + data_converter_config=self.data_converter_config, + ), + log_args, + ) + +def convert_kitti360_log_to_arrow( + args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig +) -> List[Any]: + + for log_info in args: + log_path: Path = log_info["log_path"] + split: str = log_info["split"] + log_name = log_path.stem + + if not log_path.exists(): + raise FileNotFoundError(f"Log path {log_path} does not exist.") + log_file_path = data_converter_config.output_path / split / f"{log_name}.arrow" + + if data_converter_config.force_log_conversion or not log_file_path.exists(): + log_file_path.unlink(missing_ok=True) + if not log_file_path.parent.exists(): + log_file_path.parent.mkdir(parents=True, exist_ok=True) + + schema_column_list = [ + ("token", pa.string()), + ("timestamp", pa.int64()), + ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), + ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), + ("detections_token", pa.list_(pa.string())), + ("detections_type", pa.list_(pa.int16())), + ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), + ("traffic_light_ids", pa.list_(pa.int64())), + ("traffic_light_types", pa.list_(pa.int16())), + ("scenario_tag", pa.list_(pa.string())), + ("route_lane_group_ids", pa.list_(pa.int64())), + ] + if data_converter_config.lidar_store_option is not None: + if data_converter_config.lidar_store_option == "path": + schema_column_list.append(("lidar", pa.string())) + elif data_converter_config.lidar_store_option == "binary": + raise NotImplementedError("Binary lidar storage is not implemented.") + + # TODO: Adjust how cameras are added + if data_converter_config.camera_store_option is not None: + for cam_type in KITTI360_CAMERA_TYPES.keys(): + if data_converter_config.camera_store_option == "path": + schema_column_list.append((cam_type.serialize(), pa.string())) + schema_column_list.append((f"{cam_type.serialize()}_extrinsic", pa.list_(pa.float64(), 16))) + elif data_converter_config.camera_store_option == "binary": + raise NotImplementedError("Binary camera storage is not implemented.") + + recording_schema = pa.schema(schema_column_list) + #TODO location + metadata = LogMetadata( + dataset="kitti360", + log_name=log_name, + location="None", + timestep_seconds=KITTI360_DT, + map_has_z=False, + ) + + #TODO vehicle parameters + vehicle_parameters = get_kitti360_station_wagon_parameters() + camera_metadata = get_kitti360_camera_metadata() + recording_schema = recording_schema.with_metadata( + { + "log_metadata": json.dumps(asdict(metadata)), + "vehicle_parameters": json.dumps(asdict(vehicle_parameters)), + "camera_metadata": camera_metadata_dict_to_json(camera_metadata), + } + ) + + _write_recording_table(log_name, recording_schema, log_file_path, data_converter_config) + + gc.collect() + return [] + + +def get_kitti360_camera_metadata() -> Dict[str, CameraMetadata]: + + persp = PATH_CALIB_ROOT / "perspective.txt" + + assert persp.exists() + result = {"image_00": {}, "image_01": {}} + + with open(persp, "r") as f: + lines = [ln.strip() for ln in f if ln.strip()] + for ln in lines: + key, value = ln.split(" ", 1) + cam_id = key.split("_")[-1][:2] + if key.startswith("P_rect_"): + result[f"image_{cam_id}"]["intrinsic"] = _read_projection_matrix(ln) + elif key.startswith("S_rect_"): + result[f"image_{cam_id}"]["wh"] = [int(round(float(x))) for x in value.split()] + elif key.startswith("D_"): + result[f"image_{cam_id}"]["distortion"] = [float(x) for x in value.split()] + + log_cam_infos: Dict[str, CameraMetadata] = {} + for cam_type, cam_name in KITTI360_CAMERA_TYPES.items(): + log_cam_infos[cam_type.serialize()] = CameraMetadata( + camera_type=cam_type, + width=result[cam_name]["wh"][0], + height=result[cam_name]["wh"][1], + intrinsic=np.array(result[cam_name]["intrinsic"]), + distortion=np.array(result[cam_name]["distortion"]), + ) + return log_cam_infos + +def _read_projection_matrix(p_line: str) -> np.ndarray: + parts = p_line.split(" ", 1) + if len(parts) != 2: + raise ValueError(f"Bad projection line: {p_line}") + vals = [float(x) for x in parts[1].strip().split()] + P = np.array(vals, dtype=np.float64).reshape(3, 4) + K = P[:, :3] + return K + +def _write_recording_table( + log_name: str, + recording_schema: pa.Schema, + log_file_path: Path, + data_converter_config: DataConverterConfig +) -> None: + + ts_list = _read_timestamps(log_name) + + with pa.OSFile(str(log_file_path), "wb") as sink: + with pa.ipc.new_file(sink, recording_schema) as writer: + for i, tp in enumerate(ts_list): + row_data = { + "token": [create_token(f"{log_name}_{i}")], + "timestamp": [tp.time_us], + "detections_state": [], + "detections_velocity": [], + "detections_token": [], + "detections_type": [], + "ego_states": [], + "traffic_light_ids": [], + "traffic_light_types": [], + "scenario_tag": [], + "route_lane_group_ids": [], + } + + if data_converter_config.lidar_store_option is not None: + row_data["lidar"] = [] + # row_data["lidar"] = [_extract_lidar(log_name, data_converter_config)] + + if data_converter_config.camera_store_option is not None: + # camera_data_dict = _extract_camera(log_db, lidar_pc, source_log_path, data_converter_config) + camera_data_dict = {} + for camera_type, camera_data in camera_data_dict.items(): + if camera_data is not None: + row_data[camera_type.serialize()] = [camera_data[0]] + row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] + else: + row_data[camera_type.serialize()] = [None] + row_data[f"{camera_type.serialize()}_extrinsic"] = [None] + + batch = pa.record_batch(row_data, schema=recording_schema) + writer.write_batch(batch) + + if SORT_BY_TIMESTAMP: + recording_table = open_arrow_table(log_file_path) + recording_table = recording_table.sort_by([("timestamp", "ascending")]) + write_arrow_table(recording_table, log_file_path) + +#TODO default timestamps +# If timestamps are not provided, we can generate them based on the KITTI-360 DT +def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: + + ts_file = PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt" + if ts_file.exists(): + tps: List[TimePoint] = [] + with open(ts_file, "r") as f: + for line in f: + s = line.strip() + if not s: + continue + dt_str, ns_str = s.split('.') + dt_obj = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S") + dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc) + unix_epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + + total_seconds = (dt_obj - unix_epoch).total_seconds() + + ns_value = int(ns_str) + us_from_ns = ns_value // 1000 + + total_us = int(total_seconds * 1_000_000) + us_from_ns + + tps.append(TimePoint.from_us(total_us)) + return tps + return None + +#TODO lidar extraction +def _extract_lidar(log_name: str, data_converter_config: DataConverterConfig) -> Optional[str]: + lidar: Optional[str] = None + lidar_full_path = DIR_3D_SMT / "train" / log_name / "0000000002_0000000385.ply" + if lidar_full_path.exists(): + if data_converter_config.lidar_store_option == "path": + lidar = f"{log_name}/lidar/{sample_name}.npy" + elif data_converter_config.lidar_store_option == "binary": + raise NotImplementedError("Binary lidar storage is not implemented.") + else: + raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}") + return lidar + +def _extract_camera(): + pass + + + +# for idx in range(n_frames): +# token = f"{seq_name}_{idx:06d}" +# t_us = ts_list[idx].time_us + +# row = { +# "token": [token], +# "timestamp": [t_us], +# # 以下先填空/占位,方便后续替换为真实标注 +# "detections_state": [[]], +# "detections_velocity": [[]], +# "detections_token": [[]], +# "detections_type": [[]], +# "ego_states": [([0.0] * len(EgoStateSE3Index))], # 占位 +# "traffic_light_ids": [[]], +# "traffic_light_types": [[]], +# "scenario_tag": [["unknown"]], +# "route_lane_group_ids": [[]], +# } + +# # lidar 路径(若存在) +# if data_converter_config.lidar_store_option is not None: +# # velodyne bin:KITTI-360/data_3d_raw//velodyne_points/data/0000000000.bin +# velodyne_dir = ( +# KITTI360_DATA_ROOT / DIR_3D / seq_name / "velodyne_points" / "data" +# ) +# # 文件名位数可能为 10 位,这里做两种尝试 +# bin_path = None +# for fmt in [f"{idx:010d}.bin", f"{idx:06d}.bin", f"{idx:08d}.bin"]: +# cand = velodyne_dir / fmt +# if cand.exists(): +# bin_path = cand +# break +# row["lidar"] = [str(bin_path.relative_to(KITTI360_DATA_ROOT)) if bin_path else None] + +# # 相机路径与外参 +# if data_converter_config.camera_store_option is not None: +# for cam_type, cam_dir_name in KITTI360_CAMERA_TYPES.items(): +# img_dir = seq_dir_2d / cam_dir_name / "data" +# # 文件名位数尝试 +# img_path = None +# for ext in (".png", ".jpg", ".jpeg"): +# for fmt in [f"{idx:010d}{ext}", f"{idx:06d}{ext}", f"{idx:08d}{ext}"]: +# cand = img_dir / fmt +# if cand.exists(): +# img_path = cand +# break +# if img_path: +# break +# if img_path is not None: +# rel = str(img_path.relative_to(KITTI360_DATA_ROOT)) +# row[cam_type.serialize()] = [rel] +# # 外参:固定 cam->ego(全局标定),逐帧不变(如需 rolling/姿态,可在此替换) +# T = c2e.get(KITTI360_CAMERA_TYPES[cam_type], np.eye(4, dtype=np.float64)) +# row[f"{cam_type.serialize()}_extrinsic"] = [T.astype(np.float64).reshape(-1).tolist()] +# else: +# row[cam_type.serialize()] = [None] +# row[f"{cam_type.serialize()}_extrinsic"] = [None] + +# batch = pa.record_batch(row, schema=recording_schema) +# writer.write_batch(batch) +# del batch, row \ No newline at end of file diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index cceb2911..bc48ed00 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -15,9 +15,10 @@ defaults: - default_dataset_paths - _self_ - datasets: + - kitti360_dataset # - nuplan_private_dataset # - carla_dataset - - wopd_dataset + # - wopd_dataset force_log_conversion: False force_map_conversion: True diff --git a/d123/script/config/datasets/kitti360_dataset.yaml b/d123/script/config/datasets/kitti360_dataset.yaml new file mode 100644 index 00000000..418d36a4 --- /dev/null +++ b/d123/script/config/datasets/kitti360_dataset.yaml @@ -0,0 +1,16 @@ +nuplan_dataset: + _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter + _convert_: 'all' + + splits: ["kitti360"] + log_path: ${oc.env:KITTI360_DATA_ROOT} + + data_converter_config: + _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _convert_: 'all' + + output_path: ${d123_data_root} + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + camera_store_option: "path" + lidar_store_option: "path" diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml new file mode 100644 index 00000000..86d05e7b --- /dev/null +++ b/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml @@ -0,0 +1,60 @@ +worker: + _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed + _convert_: all + master_node_ip: null + threads_per_node: null + debug_mode: false + log_to_driver: true + logs_subdir: logs + use_distributed: false +scene_filter: + _target_: d123.dataset.scene.scene_filter.SceneFilter + _convert_: all + split_types: null + split_names: null + log_names: null + map_names: null + scene_tokens: null + timestamp_threshold_s: null + ego_displacement_minimum_m: null + duration_s: 9.2 + history_s: 3.0 +scene_builder: + _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder + _convert_: all + dataset_path: ${d123_data_root} +distributed_timeout_seconds: 7200 +selected_simulation_metrics: null +verbose: false +logger_level: info +logger_format_string: null +max_number_of_workers: null +gpu: true +seed: 42 +d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} +d123_maps_root: ${oc.env:D123_MAPS_ROOT} +d123_data_root: ${oc.env:D123_DATA_ROOT} +nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} +nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} +experiment_name: my_run +date_format: '%Y.%m.%d.%H.%M.%S' +experiment_uid: ${now:${date_format}} +output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} +force_log_conversion: false +force_map_conversion: true +datasets: + nuplan_private_dataset: + _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter + _convert_: all + splits: + - nuplan_private_test + log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits + data_converter_config: + _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _convert_: all + output_path: ${d123_data_root} + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + camera_store_option: path + lidar_store_option: path diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml new file mode 100644 index 00000000..bf09b447 --- /dev/null +++ b/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml @@ -0,0 +1,177 @@ +hydra: + run: + dir: ${output_dir} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] + - %(message)s' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: + - pkg://d123.script.config + - pkg://d123.script.config.common + callbacks: {} + output_subdir: ${output_dir}/code/hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - experiment_name=my_run + job: + name: run_dataset_conversion + chdir: false + override_dirname: experiment_name=my_run + id: ??? + num: ??? + config_name: default_dataset_conversion + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/jbwang/d123/d123/script + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/jbwang/d123/d123/script/config/dataset_conversion + schema: file + provider: main + - path: hydra_plugins.hydra_colorlog.conf + schema: pkg + provider: hydra-colorlog + - path: d123.script.config + schema: pkg + provider: hydra.searchpath in main + - path: d123.script.config.common + schema: pkg + provider: hydra.searchpath in main + - path: '' + schema: structured + provider: schema + output_dir: /home/jbwang/d123/exp/my_run/2025.08.11.15.45.36 + choices: + scene_builder: default_scene_builder + scene_filter: all_scenes + worker: ray_distributed + hydra/env: default + hydra/callbacks: null + hydra/job_logging: colorlog + hydra/hydra_logging: colorlog + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml new file mode 100644 index 00000000..373bde0c --- /dev/null +++ b/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml @@ -0,0 +1 @@ +- experiment_name=my_run diff --git a/exp/my_run/2025.08.11.15.45.36/log.txt b/exp/my_run/2025.08.11.15.45.36/log.txt new file mode 100644 index 00000000..2bdc0b60 --- /dev/null +++ b/exp/my_run/2025.08.11.15.45.36/log.txt @@ -0,0 +1,10 @@ +2025-08-11 15:45:36,813 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... +2025-08-11 15:46:10,300 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! +2025-08-11 15:46:34,960 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed +2025-08-11 15:46:34,962 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 +Number of CPUs per node: 64 +Number of GPUs per node: 8 +Number of threads across all nodes: 64 +2025-08-11 15:46:34,962 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! +2025-08-11 15:46:34,963 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... +2025-08-11 15:46:34,964 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... diff --git a/jbwang_test.py b/jbwang_test.py new file mode 100644 index 00000000..ac3afac5 --- /dev/null +++ b/jbwang_test.py @@ -0,0 +1,68 @@ +# from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB + +# # # 打开数据库文件 +# # db = NuPlanDB(db_path="/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.05.12.22.00.38_veh-35_01008_01518.db") +# NUPLAN_DATA_ROOT = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini" +# log_path +# log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None) + +# # 获取第1050帧数据 +# frame = db.get_frame(1050) +# img_front = frame.camera_front # 前视图像 +# point_cloud = frame.lidar # 点云 + +# # 获取本片段所有车辆状态 +# status_data = db.get_vehicle_status() # 返回DataFrame +# print(status_data) + + + +# from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import NuplanDataConverter, DataConverterConfig +# spits = ["nuplan_mini_train"] +# log_path = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini/" +# converter = NuplanDataConverter( +# log_path=log_path, +# splits=spits, +# data_converter_config=DataConverterConfig(output_path="data/jbwang/d123"), +# ) +# # converter.convert_logs() +from pathlib import Path +log_paths_per_split = { + "nuplan_mini_train": [ + "2021","2022"] + } +log_args = [ + { + "log_path": log_path, + "split": split, + } + for split, log_paths in log_paths_per_split.items() + for log_path in log_paths + ] +PATH_2D_RAW_ROOT = Path("/nas/datasets/KITTI-360/data_3d_raw/") +candidates = sorted(p for p in PATH_2D_RAW_ROOT.iterdir() if p.is_dir() and p.name.endswith("_sync")) +# print(log_args) +# print(candidates) +# print(candidates[0].name) +# print(candidates[0].stem) +# print(type(candidates[0].name)) +# print(type(candidates[0].stem)) +# PATH_2D_RAW_ROOT_new = PATH_2D_RAW_ROOT/"123"/candidates[0].name +# print(PATH_2D_RAW_ROOT_new) + + + +# import hashlib +# def create_token(input_data: str) -> str: +# # TODO: Refactor this function. +# # TODO: Add a general function to create tokens from arbitrary data. +# if isinstance(input_data, str): +# input_data = input_data.encode("utf-8") + +# hash_obj = hashlib.sha256(input_data) +# return hash_obj.hexdigest()[:16] + +# log_name = "1230_asd_" +# for i in range(20): +# a = create_token(f"{log_name}_{i}") +# print(a) diff --git a/notebooks/dataset/jbwang_test.py b/notebooks/dataset/jbwang_test.py new file mode 100644 index 00000000..caaa3201 --- /dev/null +++ b/notebooks/dataset/jbwang_test.py @@ -0,0 +1,86 @@ +s3_uri = "/data/jbwang/d123/data/nuplan_mini_train/2021.10.11.07.12.18_veh-50_00211_00304.arrow" +# s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow" +# s3_uri = "/data/jbwang/d123/data/carla/_Rep0_routes_validation1_route0_07_23_14_33_15.arrow" +# s3_uri = "/data/jbwang/d123/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow" + +import pyarrow as pa +import pyarrow.fs as fs +import pyarrow.dataset as ds + +import os + +s3_fs = fs.S3FileSystem() +from d123.common.utils.timer import Timer + + +timer = Timer() +timer.start() + +dataset = ds.dataset(f"{s3_uri}", format="ipc") +timer.log("1. Dataset loaded") + +# Get all column names and remove the ones you want to drop +all_columns = dataset.schema.names +# print("all_columns", all_columns) +# print("Schema:") +# print(dataset.schema) +# columns_to_keep = [col for col in all_columns if col not in ["front_cam_demo", "front_cam_transform"]] +timer.log("2. Columns filtered") + +table = dataset.to_table(columns=all_columns) +# print("table",table) +# print(table["token"]) +for col in table.column_names: + if col == "lidar": + continue + print(f"Column: {col}, Type: {table.schema.field(col).type}") + tokens = table[col] # 或 table.column("token") + # print(len(tokens)) + print(tokens.slice(0, 4).to_pylist()) +# print(table["traffic_light_ids"]) +timer.log("3. Table created") +# Save locally +# with pa.ipc.new_file("filtered_file.arrow", table.schema) as writer: +# writer.write_table(table) +timer.log("4. Table saved locally") + +timer.end() +timer.stats(verbose=False) + +# 查看nuplan数据库的表结构和内容 + +# from pathlib import Path +# from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB +# from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc +# from sqlalchemy import inspect, select +# from sqlalchemy.orm import Session +# from sqlalchemy import func +# from nuplan.database.nuplan_db_orm.ego_pose import EgoPose + +# NUPLAN_DATA_ROOT = Path("/nas/datasets/nuplan/") # 按你实际路径 +# log_path = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.05.12.22.00.38_veh-35_01008_01518.db" + +# db = NuPlanDB(NUPLAN_DATA_ROOT, log_path, None) +# # print(db.log) +# print(db.log.map_version) +# # print("log.cameras",db.log.cameras) +# # print("Log name:", db.log_name) +# # print("lidar",db.lidar_pc) +# # print("scenario_tags", db.scenario_tag) +# # print(db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp)).first()) + +# # persp = Path("/nas/datasets/KITTI-360/calibration/perspective.txt") +# # with open(persp, "r") as f: +# # lines = [ln.strip() for ln in f if ln.strip()] +# # print(lines) + +# from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import get_kitti360_camera_metadata + +# print(get_kitti360_camera_metadata()) + + + +# from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import _read_timestamps +# result = _read_timestamps("2013_05_28_drive_0000_sync") +# print(len(result)) +# print([result[0].time_us]) \ No newline at end of file diff --git a/notebooks/jbwang_viz_test.py b/notebooks/jbwang_viz_test.py new file mode 100644 index 00000000..73f05dbf --- /dev/null +++ b/notebooks/jbwang_viz_test.py @@ -0,0 +1,252 @@ +# from typing import Tuple + +# import matplotlib.pyplot as plt + +# from nuplan.planning.utils.multithreading.worker_sequential import Sequential + +# from d123.dataset.scene.scene_builder import ArrowSceneBuilder +# from d123.dataset.scene.scene_filter import SceneFilter +# from d123.dataset.scene.abstract_scene import AbstractScene + +# from typing import Dict +# from d123.common.datatypes.sensor.camera import CameraType +# from d123.common.visualization.matplotlib.camera import add_camera_ax +# from d123.common.visualization.matplotlib.camera import add_box_detections_to_camera_ax + +# # split = "nuplan_private_test" +# # log_names = ["2021.09.29.17.35.58_veh-44_00066_00432"] + + + + +# # splits = ["carla"] +# splits = ["nuplan_private_test"] +# # splits = ["wopd_train"] +# # log_names = None + + + +# # splits = ["nuplan_private_test"] +# log_names = None + +# scene_tokens = None + +# scene_filter = SceneFilter( +# split_names=splits, +# log_names=log_names, +# scene_tokens=scene_tokens, +# duration_s=19, +# history_s=0.0, +# timestamp_threshold_s=20, +# shuffle=False, +# camera_types=[CameraType.CAM_F0], +# ) +# scene_builder = ArrowSceneBuilder("/data/jbwang/d123/data/") +# worker = Sequential() +# # worker = RayDistributed() +# scenes = scene_builder.get_scenes(scene_filter, worker) + +# print(f"Found {len(scenes)} scenes") + + +# from typing import List, Optional, Tuple +# import matplotlib.pyplot as plt +# import numpy as np +# from d123.common.geometry.base import Point2D +# from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10 +# from d123.common.visualization.color.config import PlotConfig +# from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG +# from d123.common.visualization.matplotlib.observation import ( +# add_box_detections_to_ax, +# add_default_map_on_ax, +# add_ego_vehicle_to_ax, +# add_traffic_lights_to_ax, +# ) +# from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax +# from d123.dataset.maps.abstract_map import AbstractMap +# from d123.dataset.maps.abstract_map_objects import AbstractLane +# from d123.dataset.maps.map_datatypes import MapLayer +# from d123.dataset.scene.abstract_scene import AbstractScene + + +# import shapely.geometry as geom + +# LEFT_CONFIG: PlotConfig = PlotConfig( +# fill_color=TAB_10[2], +# fill_color_alpha=1.0, +# line_color=TAB_10[2], +# line_color_alpha=0.5, +# line_width=1.0, +# line_style="-", +# zorder=3, +# ) + +# RIGHT_CONFIG: PlotConfig = PlotConfig( +# fill_color=TAB_10[3], +# fill_color_alpha=1.0, +# line_color=TAB_10[3], +# line_color_alpha=0.5, +# line_width=1.0, +# line_style="-", +# zorder=3, +# ) + + +# LANE_CONFIG: PlotConfig = PlotConfig( +# fill_color=BLACK, +# fill_color_alpha=1.0, +# line_color=BLACK, +# line_color_alpha=0.0, +# line_width=0.0, +# line_style="-", +# zorder=5, +# ) + +# ROAD_EDGE_CONFIG: PlotConfig = PlotConfig( +# fill_color=DARKER_GREY.set_brightness(0.0), +# fill_color_alpha=1.0, +# line_color=DARKER_GREY.set_brightness(0.0), +# line_color_alpha=1.0, +# line_width=1.0, +# line_style="-", +# zorder=3, +# ) + +# ROAD_LINE_CONFIG: PlotConfig = PlotConfig( +# fill_color=DARKER_GREY, +# fill_color_alpha=1.0, +# line_color=NEW_TAB_10[5], +# line_color_alpha=1.0, +# line_width=1.5, +# line_style="-", +# zorder=3, +# ) + + +# def add_debug_map_on_ax( +# ax: plt.Axes, +# map_api: AbstractMap, +# point_2d: Point2D, +# radius: float, +# route_lane_group_ids: Optional[List[int]] = None, +# ) -> None: +# layers: List[MapLayer] = [ +# MapLayer.LANE, +# MapLayer.LANE_GROUP, +# MapLayer.GENERIC_DRIVABLE, +# MapLayer.CARPARK, +# MapLayer.CROSSWALK, +# MapLayer.INTERSECTION, +# MapLayer.WALKWAY, +# MapLayer.ROAD_EDGE, +# MapLayer.ROAD_LINE, +# ] +# x_min, x_max = point_2d.x - radius, point_2d.x + radius +# y_min, y_max = point_2d.y - radius, point_2d.y + radius +# patch = geom.box(x_min, y_min, x_max, y_max) +# map_objects_dict = map_api.query(geometry=patch, layers=layers, predicate="intersects") + +# done = False +# for layer, map_objects in map_objects_dict.items(): +# for map_object in map_objects: +# try: +# if layer in [ +# # MapLayer.GENERIC_DRIVABLE, +# # MapLayer.CARPARK, +# # MapLayer.CROSSWALK, +# # MapLayer.INTERSECTION, +# # MapLayer.WALKWAY, +# ]: +# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer]) + +# # if layer in [MapLayer.LANE_GROUP]: +# # add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer]) + +# if layer in [MapLayer.LANE]: +# map_object: AbstractLane +# if map_object.right_lane is not None and map_object.left_lane is not None and not done: +# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, LANE_CONFIG) +# add_shapely_polygon_to_ax(ax, map_object.right_lane.shapely_polygon, RIGHT_CONFIG) +# add_shapely_polygon_to_ax(ax, map_object.left_lane.shapely_polygon, LEFT_CONFIG) +# done = True +# else: +# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer]) + + +# # add_shapely_linestring_to_ax(ax, map_object.right_boundary.linestring, RIGHT_CONFIG) +# # add_shapely_linestring_to_ax(ax, map_object.left_boundary.linestring, LEFT_CONFIG) +# # add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, LANE_CONFIG) + +# # centroid = map_object.shapely_polygon.centroid +# # ax.text( +# # centroid.x, +# # centroid.y, +# # str(map_object.id), +# # horizontalalignment="center", +# # verticalalignment="center", +# # fontsize=8, +# # bbox=dict(facecolor="white", alpha=0.7, boxstyle="round,pad=0.2"), +# # ) +# # if layer in [MapLayer.ROAD_EDGE]: +# # add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, ROAD_EDGE_CONFIG) +# # edge_lengths.append(map_object.polyline_3d.linestring.length) + +# if layer in [MapLayer.ROAD_LINE]: +# line_type = int(map_object.road_line_type) +# plt_config = PlotConfig( +# fill_color=NEW_TAB_10[line_type % len(NEW_TAB_10)], +# fill_color_alpha=1.0, +# line_color=NEW_TAB_10[line_type % len(NEW_TAB_10)], +# line_color_alpha=1.0, +# line_width=1.5, +# line_style="-", +# zorder=3, +# ) +# add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, plt_config) + +# except Exception: +# import traceback + +# print(f"Error adding map object of type {layer.name} and id {map_object.id}") +# traceback.print_exc() + +# ax.set_title(f"Map: {map_api.map_name}") + + +# def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes: + +# ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) +# box_detections = scene.get_box_detections_at_iteration(iteration) + +# point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d +# add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None) +# # add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None) +# # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api) + +# add_box_detections_to_ax(ax, box_detections) +# add_ego_vehicle_to_ax(ax, ego_vehicle_state) + +# zoom = 1.0 +# ax.set_xlim(point_2d.x - radius * zoom, point_2d.x + radius * zoom) +# ax.set_ylim(point_2d.y - radius * zoom, point_2d.y + radius * zoom) + +# ax.set_aspect("equal", adjustable="box") +# return ax + + +# def plot_scene_at_iteration( +# scene: AbstractScene, iteration: int = 0, radius: float = 80 +# ) -> Tuple[plt.Figure, plt.Axes]: + +# size = 15 + +# fig, ax = plt.subplots(figsize=(size, size)) +# _plot_scene_on_ax(ax, scene, iteration, radius) +# return fig, ax + + +# scene_index = 1 +# fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=100, radius=100) + +# # fig.savefig(f"/home/daniel/scene_{scene_index}_iteration_1.pdf", dpi=300, bbox_inches="tight") + diff --git a/notebooks/nuplan/nuplan_sensor_loading.ipynb b/notebooks/nuplan/nuplan_sensor_loading.ipynb index 0dd69b4e..8291f265 100644 --- a/notebooks/nuplan/nuplan_sensor_loading.ipynb +++ b/notebooks/nuplan/nuplan_sensor_loading.ipynb @@ -21,7 +21,18 @@ "execution_count": null, "id": "1", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "import numpy as np\n", "\n", @@ -33,7 +44,19 @@ "execution_count": null, "id": "2", "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'd123'", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mModuleNotFoundError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01md123\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mdataset\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mdataset_specific\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mnuplan\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mnuplan_data_converter\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m NuplanDataConverter\n", + "\u001b[31mModuleNotFoundError\u001b[39m: No module named 'd123'" + ] + } + ], "source": [ "from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import NuplanDataConverter" ] diff --git a/requirements.txt b/requirements.txt index f0c697e2..b022f008 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2 +# nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2 # nuplan requirements aioboto3 From a4f664ea5fe560c5195228ab390fd2bde1ccf457 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Fri, 15 Aug 2025 14:59:52 +0800 Subject: [PATCH 002/145] finish kitti360v0.0.1 --- d123/common/datatypes/sensor/lidar_index.py | 7 + .../kitti_360/{__init__ .py => __init__.py} | 0 .../dataset_specific/kitti_360/jbwang_test.py | 154 +++++++ .../kitti_360/kitti_360_data_converter.py | 391 +++++++++++------- .../kitti_360/kitti_360_helper.py | 102 +++++ .../dataset_specific/kitti_360/labels.py | 168 ++++++++ .../default_dataset_conversion.yaml | 4 - .../config/datasets/kitti360_dataset.yaml | 2 +- .../code/hydra/config.yaml | 60 +++ .../2025.08.15.14.31.57/code/hydra/hydra.yaml | 177 ++++++++ .../code/hydra/overrides.yaml | 1 + exp/kitti360_test/2025.08.15.14.31.57/log.txt | 10 + .../code/hydra/config.yaml | 60 +++ .../2025.08.15.14.36.40/code/hydra/hydra.yaml | 177 ++++++++ .../code/hydra/overrides.yaml | 1 + exp/kitti360_test/2025.08.15.14.36.40/log.txt | 10 + .../code/hydra/config.yaml | 60 +++ .../2025.08.15.14.40.29/code/hydra/hydra.yaml | 177 ++++++++ .../code/hydra/overrides.yaml | 1 + exp/kitti_test2/2025.08.15.14.40.29/log.txt | 10 + .../code/hydra/config.yaml | 60 +++ .../2025.08.15.14.43.13/code/hydra/hydra.yaml | 177 ++++++++ .../code/hydra/overrides.yaml | 1 + exp/kitti_test2/2025.08.15.14.43.13/log.txt | 12 + .../code/hydra/config.yaml | 60 +++ .../2025.08.15.14.46.49/code/hydra/hydra.yaml | 177 ++++++++ .../code/hydra/overrides.yaml | 1 + exp/kitti_test2/2025.08.15.14.46.49/log.txt | 10 + .../code/hydra/config.yaml | 60 +++ .../2025.08.15.14.50.55/code/hydra/hydra.yaml | 177 ++++++++ .../code/hydra/overrides.yaml | 1 + exp/kitti_test2/2025.08.15.14.50.55/log.txt | 11 + .../code/hydra/config.yaml | 60 +++ .../2025.08.15.14.52.39/code/hydra/hydra.yaml | 177 ++++++++ .../code/hydra/overrides.yaml | 1 + exp/kitti_test2/2025.08.15.14.52.39/log.txt | 11 + jbwang_test.py | 19 +- jbwang_test2.py | 70 ++++ notebooks/dataset/jbwang_test.py | 11 +- 39 files changed, 2508 insertions(+), 160 deletions(-) rename d123/dataset/dataset_specific/kitti_360/{__init__ .py => __init__.py} (100%) create mode 100644 d123/dataset/dataset_specific/kitti_360/jbwang_test.py create mode 100644 d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py create mode 100644 d123/dataset/dataset_specific/kitti_360/labels.py create mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml create mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml create mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml create mode 100644 exp/kitti360_test/2025.08.15.14.31.57/log.txt create mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml create mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml create mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml create mode 100644 exp/kitti360_test/2025.08.15.14.36.40/log.txt create mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.40.29/log.txt create mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.43.13/log.txt create mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.46.49/log.txt create mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.50.55/log.txt create mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml create mode 100644 exp/kitti_test2/2025.08.15.14.52.39/log.txt create mode 100644 jbwang_test2.py diff --git a/d123/common/datatypes/sensor/lidar_index.py b/d123/common/datatypes/sensor/lidar_index.py index 0df92cff..4e7ad133 100644 --- a/d123/common/datatypes/sensor/lidar_index.py +++ b/d123/common/datatypes/sensor/lidar_index.py @@ -60,3 +60,10 @@ class WopdLidarIndex(LiDARIndex): X = 3 Y = 4 Z = 5 + +@register_lidar_index +class Kitti360LidarIndex(LiDARIndex): + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 \ No newline at end of file diff --git a/d123/dataset/dataset_specific/kitti_360/__init__ .py b/d123/dataset/dataset_specific/kitti_360/__init__.py similarity index 100% rename from d123/dataset/dataset_specific/kitti_360/__init__ .py rename to d123/dataset/dataset_specific/kitti_360/__init__.py diff --git a/d123/dataset/dataset_specific/kitti_360/jbwang_test.py b/d123/dataset/dataset_specific/kitti_360/jbwang_test.py new file mode 100644 index 00000000..6f0bdbd9 --- /dev/null +++ b/d123/dataset/dataset_specific/kitti_360/jbwang_test.py @@ -0,0 +1,154 @@ +import gc +import json +import os +import pickle +from dataclasses import asdict +from functools import partial +from pathlib import Path +from typing import Any, Dict, Final, List, Optional, Tuple, Union + +import numpy as np +import pyarrow as pa +import yaml +from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens +from nuplan.database.nuplan_db_orm.ego_pose import EgoPose +from nuplan.database.nuplan_db_orm.lidar_box import LidarBox +from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc +from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB +from nuplan.planning.simulation.observation.observation_type import CameraChannel +from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map +from pyquaternion import Quaternion +from sqlalchemy import func + + +from kitti_360_data_converter import _extract_ego_state_all,get_kitti360_lidar_metadata,_extract_cameras,_extract_detections + +# a = _extract_ego_state_all("2013_05_28_drive_0000_sync") +# print(a[0]) +# print(a[1]) +# print(a[10]) +from d123.common.datatypes.time.time_point import TimePoint +from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json + +NUPLAN_CAMERA_TYPES = { + CameraType.CAM_F0: CameraChannel.CAM_F0, + CameraType.CAM_B0: CameraChannel.CAM_B0, + CameraType.CAM_L0: CameraChannel.CAM_L0, + CameraType.CAM_L1: CameraChannel.CAM_L1, + CameraType.CAM_L2: CameraChannel.CAM_L2, + CameraType.CAM_R0: CameraChannel.CAM_R0, + CameraType.CAM_R1: CameraChannel.CAM_R1, + CameraType.CAM_R2: CameraChannel.CAM_R2, +} + +NUPLAN_DATA_ROOT = Path(os.environ["NUPLAN_DATA_ROOT"]) +NUPLAN_ROLLING_SHUTTER_S: Final[TimePoint] = TimePoint.from_s(1 / 60) + +def _extract_camera( + log_db: NuPlanDB, + lidar_pc: LidarPc, + source_log_path: Path, +) -> Dict[CameraType, Union[str, bytes]]: + + camera_dict: Dict[str, Union[str, bytes]] = {} + sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" + + log_cam_infos = {camera.token: camera for camera in log_db.log.cameras} + for camera_type, camera_channel in NUPLAN_CAMERA_TYPES.items(): + camera_data: Optional[Union[str, bytes]] = None + c2e: Optional[List[float]] = None + image_class = list(get_images_from_lidar_tokens(source_log_path, [lidar_pc.token], [str(camera_channel.value)])) + # print("image_class",image_class) + if len(image_class) != 0: + image = image_class[0] + filename_jpg = sensor_root / image.filename_jpg + + timestamp = image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us + img_ego_pose: EgoPose = ( + log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first() + ) + img_e2g = img_ego_pose.trans_matrix + g2e = lidar_pc.ego_pose.trans_matrix_inv + img_e2e = g2e @ img_e2g + cam_info = log_cam_infos[image.camera_token] + c2img_e = cam_info.trans_matrix + c2e = img_e2e @ c2img_e + # print(f"Camera {camera_type} found for lidar {lidar_pc.token} at timestamp {timestamp}") + print(camera_type,"c2e:", c2e) + camera_dict[camera_type] = camera_data + + return camera_dict + + +def get_cam_info_from_lidar_pc(log,log_file, lidar_pc, rolling_shutter_s=1/60): + + retrieved_images = get_images_from_lidar_tokens( + log_file, [lidar_pc.token], [str(channel.value) for channel in CameraChannel] + ) + + # if interp_trans: + # neighbours = [] + # ego_poses_dict = {} + # for ego_pose in log.ego_poses: + # ego_poses_dict[ego_pose.token] = ego_pose + # if abs(ego_pose.timestamp - lidar_pc.ego_pose.timestamp) / 1e6 < 0.5: + # neighbours.append(ego_pose) + # timestamps = [pose.timestamp for pose in neighbours] + # translations = [pose.translation_np for pose in neighbours] + # splines = [CubicSpline(timestamps, [translation[i] for translation in translations]) for i in range(2)] + + log_cam_infos = {camera.token : camera for camera in log.camera} + cams = {} + for img in retrieved_images: + channel = img.channel + filename = img.filename_jpg + + # if interp_trans: + # img_ego_pose = ego_poses_dict[img.ego_pose_token] + # interpolated_translation = np.array([splines[0](timestamp), splines[1](timestamp), img_ego_pose.z]) + # delta = interpolated_translation - lidar_pc.ego_pose.translation_np + # delta = np.dot(lidar_pc.ego_pose.quaternion.rotation_matrix.T, delta) + if channel == "CAM_F0": + timestamp = img.timestamp + (rolling_shutter_s * 1e6) + img_ego_pose = log.session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first() + img_e2g = img_ego_pose.trans_matrix + # print("img_e2g:", img_e2g) + + g2e = lidar_pc.ego_pose.trans_matrix_inv + # print("g2e:", g2e) #change obviously + img_e2e = g2e @ img_e2g + # print("img_e2e:", img_e2e) + cam_info = log_cam_infos[img.camera_token] + c2img_e = cam_info.trans_matrix + # print("c2img_e:", c2img_e) + c2e = img_e2e @ c2img_e + # print("channel:", channel, "c2e:", c2e) + + cams[channel] = dict( + data_path = filename, + timestamp = img.timestamp, + token=img.token, + sensor2ego_rotation = Quaternion(matrix=c2e[:3, :3]), + sensor2ego_translation = c2e[:3, 3], + cam_intrinsic = cam_info.intrinsic_np, + distortion = cam_info.distortion_np, + ) + + + if len(cams) != 8: + return None + # print(cams) + return cams + +if __name__ == "__main__": + # Example usage + # data_converter_config: DataConverterConfig + # log_path = Path("/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.10.11.07.12.18_veh-50_00211_00304.db") + # log_path = Path("/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.09.16.15.12.03_veh-42_01037_01434.db") + # log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None) + + # for lidar_pc in log_db.lidar_pc: # Replace with actual token + # # camera_data = _extract_camera(log_db, lidar_pc, log_path) + # camera_data = get_cam_info_from_lidar_pc(log_db,log_path, lidar_pc, rolling_shutter_s=1/60) + # print(_extract_cameras("2013_05_28_drive_0000_sync",0)) + _extract_detections("2013_05_28_drive_0000_sync", 0) \ No newline at end of file diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index b6e97d8c..c79ce0b2 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -9,27 +9,35 @@ import numpy as np import datetime import hashlib +import xml.etree.ElementTree as ET import pyarrow as pa from PIL import Image + from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map +from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from d123.common.datatypes.sensor.lidar_index import Kitti360LidarIndex from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index -from d123.common.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters +from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.common.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3 +from d123.common.geometry.base import StateSE3 from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index -from d123.common.geometry.vector import Vector3DIndex +from d123.common.geometry.vector import Vector3D, Vector3DIndex from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata +from kitti_360_helper import KITTI360Bbox3D + KITTI360_DT: Final[float] = 0.1 SORT_BY_TIMESTAMP: Final[bool] = True KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) #TODO carera mismatch -KITTI360_CAMERA_TYPES: Final[Dict[CameraType, str]] = { +KITTI360_CAMERA_TYPES = { CameraType.CAM_L0: "image_00", CameraType.CAM_R0: "image_01", # TODO fisheye camera @@ -63,6 +71,16 @@ # DIR_POSES: PATH_POSES_ROOT, } +#TODO +KIITI360_DETECTION_NAME_DICT = { + "truck": DetectionType.VEHICLE, + "bus": DetectionType.VEHICLE, + "car": DetectionType.VEHICLE, + "motorcycle": DetectionType.BICYCLE, + "bicycle": DetectionType.BICYCLE, + "pedestrian": DetectionType.PEDESTRIAN, +} + def create_token(input_data: str) -> str: # TODO: Refactor this function. @@ -73,43 +91,6 @@ def create_token(input_data: str) -> str: hash_obj = hashlib.sha256(input_data) return hash_obj.hexdigest()[:16] -def _load_calibration() -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]: - """ - 读取 KITTI-360 全局标定文件,返回: - - intrinsics[image_02] = 3x3 - - c2e[image_02] = 4x4(camera->ego/body),这里将 cam_to_pose 视为 camera->vehicle(简化) - """ - calib_dir = KITTI360_DATA_ROOT / DIR_CALIB - intrinsics: Dict[str, np.ndarray] = {} - c2e: Dict[str, np.ndarray] = {} - - # 内参:perspective.txt 中的 P_rect_0{0..3} - persp = calib_dir / "perspective.txt" - if persp.exists(): - with open(persp, "r") as f: - lines = [ln.strip() for ln in f if ln.strip()] - for ln in lines: - if ln.startswith("P_rect_02"): - intrinsics["image_02"] = _read_projection_matrix(ln) - elif ln.startswith("P_rect_03"): - intrinsics["image_03"] = _read_projection_matrix(ln) - - # 外参:cam_to_pose.txt 中 Tr_cam02(相机到车体/pose) - c2p = calib_dir / "cam_to_pose.txt" - if c2p.exists(): - with open(c2p, "r") as f: - lines = [ln.strip() for ln in f if ln.strip()] - for ln in lines: - if ln.startswith("Tr_cam02"): - vals = [float(x) for x in ln.split(":")[1].strip().split()] - T = np.array(vals, dtype=np.float64).reshape(4, 4) - c2e["image_02"] = T - elif ln.startswith("Tr_cam03"): - vals = [float(x) for x in ln.split(":")[1].strip().split()] - T = np.array(vals, dtype=np.float64).reshape(4, 4) - c2e["image_03"] = T - - return intrinsics, c2e class Kitti360DataConverter(RawDataConverter): def __init__( @@ -204,6 +185,19 @@ def convert_kitti360_log_to_arrow( if not log_file_path.parent.exists(): log_file_path.parent.mkdir(parents=True, exist_ok=True) + metadata = LogMetadata( + dataset="kitti360", + log_name=log_name, + location="None", + timestep_seconds=KITTI360_DT, + map_has_z=False, + ) + + vehicle_parameters = get_kitti360_station_wagon_parameters() + camera_metadata = get_kitti360_camera_metadata() + #TODO now only velodyne lidar + lidar_metadata = get_kitti360_lidar_metadata() + schema_column_list = [ ("token", pa.string()), ("timestamp", pa.int64()), @@ -218,38 +212,29 @@ def convert_kitti360_log_to_arrow( ("route_lane_group_ids", pa.list_(pa.int64())), ] if data_converter_config.lidar_store_option is not None: - if data_converter_config.lidar_store_option == "path": - schema_column_list.append(("lidar", pa.string())) - elif data_converter_config.lidar_store_option == "binary": - raise NotImplementedError("Binary lidar storage is not implemented.") + for lidar_type in lidar_metadata.keys(): + if data_converter_config.lidar_store_option == "path": + schema_column_list.append((lidar_type.serialize(), pa.string())) + elif data_converter_config.lidar_store_option == "binary": + raise NotImplementedError("Binary lidar storage is not implemented.") - # TODO: Adjust how cameras are added if data_converter_config.camera_store_option is not None: - for cam_type in KITTI360_CAMERA_TYPES.keys(): + for camera_type in camera_metadata.keys(): if data_converter_config.camera_store_option == "path": - schema_column_list.append((cam_type.serialize(), pa.string())) - schema_column_list.append((f"{cam_type.serialize()}_extrinsic", pa.list_(pa.float64(), 16))) + schema_column_list.append((camera_type.serialize(), pa.string())) + schema_column_list.append( + (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4)) + ) elif data_converter_config.camera_store_option == "binary": raise NotImplementedError("Binary camera storage is not implemented.") recording_schema = pa.schema(schema_column_list) - #TODO location - metadata = LogMetadata( - dataset="kitti360", - log_name=log_name, - location="None", - timestep_seconds=KITTI360_DT, - map_has_z=False, - ) - - #TODO vehicle parameters - vehicle_parameters = get_kitti360_station_wagon_parameters() - camera_metadata = get_kitti360_camera_metadata() recording_schema = recording_schema.with_metadata( { "log_metadata": json.dumps(asdict(metadata)), "vehicle_parameters": json.dumps(asdict(vehicle_parameters)), "camera_metadata": camera_metadata_dict_to_json(camera_metadata), + "lidar_metadata": lidar_metadata_dict_to_json(lidar_metadata), } ) @@ -298,6 +283,35 @@ def _read_projection_matrix(p_line: str) -> np.ndarray: K = P[:, :3] return K +def get_kitti360_lidar_metadata(log_name: str) -> Dict[LiDARType, LiDARMetadata]: + metadata: Dict[LiDARType, LiDARMetadata] = {} + + cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" + if not cam2pose_txt.exists(): + raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") + + cam2velo_txt = PATH_CALIB_ROOT / "calib_cam_to_velo.txt" + if not cam2velo_txt.exists(): + raise FileNotFoundError(f"calib_cam_to_velo.txt file not found: {cam2velo_txt}") + + lastrow = np.array([0,0,0,1]).reshape(1,4) + + with open(cam2pose_txt, 'r') as f: + image_00 = next(f) + values = list(map(float, image_00.strip().split()[1:])) + matrix = np.array(values).reshape(3, 4) + cam2pose = np.concatenate((matrix, lastrow)) + + cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow)) + extrinsic = cam2velo @ np.linalg.inv(cam2pose) + + metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( + lidar_type=LiDARType.LIDAR_TOP, + lidar_index=Kitti360LidarIndex, + extrinsic=extrinsic, + ) + return metadata + def _write_recording_table( log_name: str, recording_schema: pa.Schema, @@ -306,31 +320,33 @@ def _write_recording_table( ) -> None: ts_list = _read_timestamps(log_name) + ego_state_all = _extract_ego_state_all(log_name) + detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list)) with pa.OSFile(str(log_file_path), "wb") as sink: with pa.ipc.new_file(sink, recording_schema) as writer: - for i, tp in enumerate(ts_list): + for idx, tp in enumerate(ts_list): + row_data = { - "token": [create_token(f"{log_name}_{i}")], + "token": [create_token(f"{log_name}_{idx}")], "timestamp": [tp.time_us], - "detections_state": [], - "detections_velocity": [], - "detections_token": [], - "detections_type": [], - "ego_states": [], - "traffic_light_ids": [], - "traffic_light_types": [], - "scenario_tag": [], - "route_lane_group_ids": [], + "detections_state": [detections_states[idx]], + "detections_velocity": [detections_velocity[idx]], + "detections_token": [detections_tokens[idx]], + "detections_type": [detections_types[idx]], + "ego_states": [ego_state_all[idx]], + "traffic_light_ids": [[]], + #may TODO traffic light types + "traffic_light_types": [[]], + "scenario_tag": [['unknown']], + "route_lane_group_ids": [[]], } if data_converter_config.lidar_store_option is not None: - row_data["lidar"] = [] - # row_data["lidar"] = [_extract_lidar(log_name, data_converter_config)] + row_data["lidar"] = [_extract_lidar(log_name, idx, data_converter_config)] if data_converter_config.camera_store_option is not None: - # camera_data_dict = _extract_camera(log_db, lidar_pc, source_log_path, data_converter_config) - camera_data_dict = {} + camera_data_dict = _extract_cameras(log_name, idx, data_converter_config) for camera_type, camera_data in camera_data_dict.items(): if camera_data is not None: row_data[camera_type.serialize()] = [camera_data[0]] @@ -348,9 +364,8 @@ def _write_recording_table( write_arrow_table(recording_table, log_file_path) #TODO default timestamps -# If timestamps are not provided, we can generate them based on the KITTI-360 DT def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: - + # unix ts_file = PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt" if ts_file.exists(): tps: List[TimePoint] = [] @@ -375,82 +390,160 @@ def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: return tps return None +def _extract_ego_state_all(log_name: str) -> List[List[float]]: + + ego_state_all: List[List[float]] = [] + + pose_file = PATH_POSES_ROOT / log_name / "poses.txt" + if not pose_file.exists(): + raise FileNotFoundError(f"Pose file not found: {pose_file}") + poses = np.loadtxt(pose_file) + poses_time = poses[:, 0] - 1 # Adjusting time to start from 0 + + #TODO + oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data" + + for idx in range(len(list(oxts_path.glob("*.txt")))): + oxts_path_file = oxts_path / f"{int(idx):010d}.txt" + oxts_data = np.loadtxt(oxts_path_file) + + roll, pitch, yaw = oxts_data[3:6] + vehicle_parameters = get_kitti360_station_wagon_parameters() + + pos = np.searchsorted(poses_time, idx, side='right') - 1 + + rear_axle_pose = StateSE3( + x=poses[pos, 4], + y=poses[pos, 8], + z=poses[pos, 12], + roll=roll, + pitch=pitch, + yaw=yaw, + ) + # NOTE: The height to rear axle is not provided the dataset and is merely approximated. + center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) + dynamic_state = DynamicStateSE3( + velocity=Vector3D( + x=oxts_data[8], + y=oxts_data[9], + z=oxts_data[10], + ), + acceleration=Vector3D( + x=oxts_data[14], + y=oxts_data[15], + z=oxts_data[16], + ), + angular_velocity=Vector3D( + x=oxts_data[20], + y=oxts_data[21], + z=oxts_data[22], + ), + ) + ego_state_all.append( + EgoStateSE3( + center_se3=center, + dynamic_state_se3=dynamic_state, + vehicle_parameters=vehicle_parameters, + timepoint=None, + ).array.tolist() + ) + return ego_state_all + +#TODO now only divided by data_3d_semantics +# We may distinguish between image and lidar detections +# besides, now it is based only on start and end frame +def _extract_detections( + log_name: str, + ts_len: int +) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: + + detections_states: List[List[List[float]]] = [[] for _ in range(ts_len)] + detections_velocity: List[List[List[float]]] = [[] for _ in range(ts_len)] + detections_tokens: List[List[str]] = [[] for _ in range(ts_len)] + detections_types: List[List[int]] = [[] for _ in range(ts_len)] + + bbox_3d_path = PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" + if not bbox_3d_path.exists(): + raise FileNotFoundError(f"BBox 3D file not found: {bbox_3d_path}") + + tree = ET.parse(bbox_3d_path) + root = tree.getroot() + + for child in root: + label = child.find('label').text + if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys(): + continue + obj = KITTI360Bbox3D() + obj.parseBbox(child) + + # static + if obj.timestamp == -1: + start_frame = obj.start_frame + end_frame = obj.end_frame + for frame in range(start_frame, end_frame + 1): + #TODO check if valid in each frame + if frame < 0 or frame >= ts_len: + continue + #TODO check yaw + detections_states[frame].append(obj.get_state_array()) + detections_velocity[frame].append([0.0, 0.0, 0.0]) + detections_tokens[frame].append(str(obj.globalID)) + detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[label])) + # dynamic + else: + frame = obj.timestamp + detections_states[frame].append(obj.get_state_array()) + #TODO velocity not provided + detections_velocity[frame].append([0.0, 0.0, 0.0]) + detections_tokens[frame].append(str(obj.globalID)) + detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[label])) + + return detections_states, detections_velocity, detections_tokens, detections_types + #TODO lidar extraction -def _extract_lidar(log_name: str, data_converter_config: DataConverterConfig) -> Optional[str]: +def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Optional[str]: lidar: Optional[str] = None - lidar_full_path = DIR_3D_SMT / "train" / log_name / "0000000002_0000000385.ply" + lidar_full_path = DIR_3D_RAW / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" if lidar_full_path.exists(): if data_converter_config.lidar_store_option == "path": - lidar = f"{log_name}/lidar/{sample_name}.npy" + lidar = f"/data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin" elif data_converter_config.lidar_store_option == "binary": raise NotImplementedError("Binary lidar storage is not implemented.") else: raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}") - return lidar - -def _extract_camera(): - pass - - - -# for idx in range(n_frames): -# token = f"{seq_name}_{idx:06d}" -# t_us = ts_list[idx].time_us - -# row = { -# "token": [token], -# "timestamp": [t_us], -# # 以下先填空/占位,方便后续替换为真实标注 -# "detections_state": [[]], -# "detections_velocity": [[]], -# "detections_token": [[]], -# "detections_type": [[]], -# "ego_states": [([0.0] * len(EgoStateSE3Index))], # 占位 -# "traffic_light_ids": [[]], -# "traffic_light_types": [[]], -# "scenario_tag": [["unknown"]], -# "route_lane_group_ids": [[]], -# } - -# # lidar 路径(若存在) -# if data_converter_config.lidar_store_option is not None: -# # velodyne bin:KITTI-360/data_3d_raw//velodyne_points/data/0000000000.bin -# velodyne_dir = ( -# KITTI360_DATA_ROOT / DIR_3D / seq_name / "velodyne_points" / "data" -# ) -# # 文件名位数可能为 10 位,这里做两种尝试 -# bin_path = None -# for fmt in [f"{idx:010d}.bin", f"{idx:06d}.bin", f"{idx:08d}.bin"]: -# cand = velodyne_dir / fmt -# if cand.exists(): -# bin_path = cand -# break -# row["lidar"] = [str(bin_path.relative_to(KITTI360_DATA_ROOT)) if bin_path else None] - -# # 相机路径与外参 -# if data_converter_config.camera_store_option is not None: -# for cam_type, cam_dir_name in KITTI360_CAMERA_TYPES.items(): -# img_dir = seq_dir_2d / cam_dir_name / "data" -# # 文件名位数尝试 -# img_path = None -# for ext in (".png", ".jpg", ".jpeg"): -# for fmt in [f"{idx:010d}{ext}", f"{idx:06d}{ext}", f"{idx:08d}{ext}"]: -# cand = img_dir / fmt -# if cand.exists(): -# img_path = cand -# break -# if img_path: -# break -# if img_path is not None: -# rel = str(img_path.relative_to(KITTI360_DATA_ROOT)) -# row[cam_type.serialize()] = [rel] -# # 外参:固定 cam->ego(全局标定),逐帧不变(如需 rolling/姿态,可在此替换) -# T = c2e.get(KITTI360_CAMERA_TYPES[cam_type], np.eye(4, dtype=np.float64)) -# row[f"{cam_type.serialize()}_extrinsic"] = [T.astype(np.float64).reshape(-1).tolist()] -# else: -# row[cam_type.serialize()] = [None] -# row[f"{cam_type.serialize()}_extrinsic"] = [None] - -# batch = pa.record_batch(row, schema=recording_schema) -# writer.write_batch(batch) -# del batch, row \ No newline at end of file + return {LiDARType.LIDAR_TOP: lidar} if lidar else None + +#TODO check camera extrinsic now is from camera to pose +def _extract_cameras( + log_name: str, idx: int, data_converter_config: DataConverterConfig +) -> Dict[CameraType, Optional[str]]: + + camera_dict: Dict[str, Union[str, bytes]] = {} + for camera_type, cam_dir_name in KITTI360_CAMERA_TYPES.items(): + img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rect" / f"{idx:010d}.png" + if img_path_png.exists(): + + cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" + if not cam2pose_txt.exists(): + raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") + + lastrow = np.array([0,0,0,1]).reshape(1,4) + + with open(cam2pose_txt, 'r') as f: + for line in f: + parts = line.strip().split() + key = parts[0][:-1] + if key == cam_dir_name: + values = list(map(float, parts[1:])) + matrix = np.array(values).reshape(3, 4) + cam2pose = np.concatenate((matrix, lastrow)) + + if data_converter_config.camera_store_option == "path": + camera_data = str(img_path_png), cam2pose.flatten().tolist() + elif data_converter_config.camera_store_option == "binary": + with open(img_path_png, "rb") as f: + camera_data = f.read(), cam2pose + else: + raise FileNotFoundError(f"Camera image not found: {img_path_png}") + camera_dict[camera_type] = camera_data + return camera_dict diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py new file mode 100644 index 00000000..da79cf3e --- /dev/null +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py @@ -0,0 +1,102 @@ +import numpy as np + +from collections import defaultdict +from labels import kittiId2label + +from scipy.linalg import polar +from scipy.spatial.transform import Rotation as R + +from d123.common.geometry.base import StateSE3 +from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3 + +DEFAULT_ROLL = 0.0 +DEFAULT_PITCH = 0.0 + +MAX_N = 1000 +def local2global(semanticId, instanceId): + globalId = semanticId*MAX_N + instanceId + if isinstance(globalId, np.ndarray): + return globalId.astype(np.int32) + else: + return int(globalId) + +def global2local(globalId): + semanticId = globalId // MAX_N + instanceId = globalId % MAX_N + if isinstance(globalId, np.ndarray): + return semanticId.astype(np.int32), instanceId.astype(np.int32) + else: + return int(semanticId), int(instanceId) + +class KITTI360Bbox3D(): + # Constructor + def __init__(self): + + # the ID of the corresponding object + self.semanticId = -1 + self.instanceId = -1 + self.annotationId = -1 + self.globalID = -1 + + # the window that contains the bbox + self.start_frame = -1 + self.end_frame = -1 + + # timestamp of the bbox (-1 if statis) + self.timestamp = -1 + + # name + self.name = '' + + def parseOpencvMatrix(self, node): + rows = int(node.find('rows').text) + cols = int(node.find('cols').text) + data = node.find('data').text.split(' ') + + mat = [] + for d in data: + d = d.replace('\n', '') + if len(d)<1: + continue + mat.append(float(d)) + mat = np.reshape(mat, [rows, cols]) + return mat + + def parseBbox(self, child): + semanticIdKITTI = int(child.find('semanticId').text) + self.semanticId = kittiId2label[semanticIdKITTI].id + self.instanceId = int(child.find('instanceId').text) + self.name = kittiId2label[semanticIdKITTI].name + + self.start_frame = int(child.find('start_frame').text) + self.end_frame = int(child.find('end_frame').text) + + self.timestamp = int(child.find('timestamp').text) + + self.annotationId = int(child.find('index').text) + 1 + + self.globalID = local2global(self.semanticId, self.instanceId) + transform = self.parseOpencvMatrix(child.find('transform')) + self.R = transform[:3,:3] + self.T = transform[:3,3] + + def polar_decompose_rotation_scale(self): + Rm, Sm = polar(self.R) + scale = np.diag(Sm) + yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) + + return scale, (yaw, pitch, roll) + + def get_state_array(self): + scale, (yaw, pitch, roll) = self.polar_decompose_rotation_scale() + center = StateSE3( + x=self.T[0], + y=self.T[1], + z=self.T[2], + roll=DEFAULT_ROLL, + pitch=DEFAULT_PITCH, + yaw=yaw, + ) + bounding_box_se3 = BoundingBoxSE3(center, scale[0], scale[1], scale[2]) + + return bounding_box_se3.array \ No newline at end of file diff --git a/d123/dataset/dataset_specific/kitti_360/labels.py b/d123/dataset/dataset_specific/kitti_360/labels.py new file mode 100644 index 00000000..38f8a91c --- /dev/null +++ b/d123/dataset/dataset_specific/kitti_360/labels.py @@ -0,0 +1,168 @@ +#!/usr/bin/python +# +# KITTI-360 labels +# + +from collections import namedtuple + + +#-------------------------------------------------------------------------------- +# Definitions +#-------------------------------------------------------------------------------- + +# a label and all meta information +Label = namedtuple( 'Label' , [ + + 'name' , # The identifier of this label, e.g. 'car', 'person', ... . + # We use them to uniquely name a class + + 'id' , # An integer ID that is associated with this label. + # The IDs are used to represent the label in ground truth images + # An ID of -1 means that this label does not have an ID and thus + # is ignored when creating ground truth images (e.g. license plate). + # Do not modify these IDs, since exactly these IDs are expected by the + # evaluation server. + + 'kittiId' , # An integer ID that is associated with this label for KITTI-360 + # NOT FOR RELEASING + + 'trainId' , # Feel free to modify these IDs as suitable for your method. Then create + # ground truth images with train IDs, using the tools provided in the + # 'preparation' folder. However, make sure to validate or submit results + # to our evaluation server using the regular IDs above! + # For trainIds, multiple labels might have the same ID. Then, these labels + # are mapped to the same class in the ground truth images. For the inverse + # mapping, we use the label that is defined first in the list below. + # For example, mapping all void-type classes to the same ID in training, + # might make sense for some approaches. + # Max value is 255! + + 'category' , # The name of the category that this label belongs to + + 'categoryId' , # The ID of this category. Used to create ground truth images + # on category level. + + 'hasInstances', # Whether this label distinguishes between single instances or not + + 'ignoreInEval', # Whether pixels having this class as ground truth label are ignored + # during evaluations or not + + 'ignoreInInst', # Whether pixels having this class as ground truth label are ignored + # during evaluations of instance segmentation or not + + 'color' , # The color of this label + ] ) + + +#-------------------------------------------------------------------------------- +# A list of all labels +#-------------------------------------------------------------------------------- + +# Please adapt the train IDs as appropriate for your approach. +# Note that you might want to ignore labels with ID 255 during training. +# Further note that the current train IDs are only a suggestion. You can use whatever you like. +# Make sure to provide your results using the original IDs and not the training IDs. +# Note that many IDs are ignored in evaluation and thus you never need to predict these! + +labels = [ + # name id kittiId, trainId category catId hasInstances ignoreInEval ignoreInInst color + Label( 'unlabeled' , 0 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ), + Label( 'ego vehicle' , 1 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ), + Label( 'rectification border' , 2 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ), + Label( 'out of roi' , 3 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ), + Label( 'static' , 4 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ), + Label( 'dynamic' , 5 , -1 , 255 , 'void' , 0 , False , True , True , (111, 74, 0) ), + Label( 'ground' , 6 , -1 , 255 , 'void' , 0 , False , True , True , ( 81, 0, 81) ), + Label( 'road' , 7 , 1 , 0 , 'flat' , 1 , False , False , False , (128, 64,128) ), + Label( 'sidewalk' , 8 , 3 , 1 , 'flat' , 1 , False , False , False , (244, 35,232) ), + Label( 'parking' , 9 , 2 , 255 , 'flat' , 1 , False , True , True , (250,170,160) ), + Label( 'rail track' , 10 , 10, 255 , 'flat' , 1 , False , True , True , (230,150,140) ), + Label( 'building' , 11 , 11, 2 , 'construction' , 2 , True , False , False , ( 70, 70, 70) ), + Label( 'wall' , 12 , 7 , 3 , 'construction' , 2 , False , False , False , (102,102,156) ), + Label( 'fence' , 13 , 8 , 4 , 'construction' , 2 , False , False , False , (190,153,153) ), + Label( 'guard rail' , 14 , 30, 255 , 'construction' , 2 , False , True , True , (180,165,180) ), + Label( 'bridge' , 15 , 31, 255 , 'construction' , 2 , False , True , True , (150,100,100) ), + Label( 'tunnel' , 16 , 32, 255 , 'construction' , 2 , False , True , True , (150,120, 90) ), + Label( 'pole' , 17 , 21, 5 , 'object' , 3 , True , False , True , (153,153,153) ), + Label( 'polegroup' , 18 , -1 , 255 , 'object' , 3 , False , True , True , (153,153,153) ), + Label( 'traffic light' , 19 , 23, 6 , 'object' , 3 , True , False , True , (250,170, 30) ), + Label( 'traffic sign' , 20 , 24, 7 , 'object' , 3 , True , False , True , (220,220, 0) ), + Label( 'vegetation' , 21 , 5 , 8 , 'nature' , 4 , False , False , False , (107,142, 35) ), + Label( 'terrain' , 22 , 4 , 9 , 'nature' , 4 , False , False , False , (152,251,152) ), + Label( 'sky' , 23 , 9 , 10 , 'sky' , 5 , False , False , False , ( 70,130,180) ), + Label( 'person' , 24 , 19, 11 , 'human' , 6 , True , False , False , (220, 20, 60) ), + Label( 'rider' , 25 , 20, 12 , 'human' , 6 , True , False , False , (255, 0, 0) ), + Label( 'car' , 26 , 13, 13 , 'vehicle' , 7 , True , False , False , ( 0, 0,142) ), + Label( 'truck' , 27 , 14, 14 , 'vehicle' , 7 , True , False , False , ( 0, 0, 70) ), + Label( 'bus' , 28 , 34, 15 , 'vehicle' , 7 , True , False , False , ( 0, 60,100) ), + Label( 'caravan' , 29 , 16, 255 , 'vehicle' , 7 , True , True , True , ( 0, 0, 90) ), + Label( 'trailer' , 30 , 15, 255 , 'vehicle' , 7 , True , True , True , ( 0, 0,110) ), + Label( 'train' , 31 , 33, 16 , 'vehicle' , 7 , True , False , False , ( 0, 80,100) ), + Label( 'motorcycle' , 32 , 17, 17 , 'vehicle' , 7 , True , False , False , ( 0, 0,230) ), + Label( 'bicycle' , 33 , 18, 18 , 'vehicle' , 7 , True , False , False , (119, 11, 32) ), + Label( 'garage' , 34 , 12, 2 , 'construction' , 2 , True , True , True , ( 64,128,128) ), + Label( 'gate' , 35 , 6 , 4 , 'construction' , 2 , False , True , True , (190,153,153) ), + Label( 'stop' , 36 , 29, 255 , 'construction' , 2 , True , True , True , (150,120, 90) ), + Label( 'smallpole' , 37 , 22, 5 , 'object' , 3 , True , True , True , (153,153,153) ), + Label( 'lamp' , 38 , 25, 255 , 'object' , 3 , True , True , True , (0, 64, 64) ), + Label( 'trash bin' , 39 , 26, 255 , 'object' , 3 , True , True , True , (0, 128,192) ), + Label( 'vending machine' , 40 , 27, 255 , 'object' , 3 , True , True , True , (128, 64, 0) ), + Label( 'box' , 41 , 28, 255 , 'object' , 3 , True , True , True , (64, 64,128) ), + Label( 'unknown construction' , 42 , 35, 255 , 'void' , 0 , False , True , True , (102, 0, 0) ), + Label( 'unknown vehicle' , 43 , 36, 255 , 'void' , 0 , False , True , True , ( 51, 0, 51) ), + Label( 'unknown object' , 44 , 37, 255 , 'void' , 0 , False , True , True , ( 32, 32, 32) ), + Label( 'license plate' , -1 , -1, -1 , 'vehicle' , 7 , False , True , True , ( 0, 0,142) ), +] + +#-------------------------------------------------------------------------------- +# Create dictionaries for a fast lookup +#-------------------------------------------------------------------------------- + +# Please refer to the main method below for example usages! + +# name to label object +name2label = { label.name : label for label in labels } +# id to label object +id2label = { label.id : label for label in labels } +# trainId to label object +trainId2label = { label.trainId : label for label in reversed(labels) } +# KITTI-360 ID to cityscapes ID +kittiId2label = { label.kittiId : label for label in labels } +# category to list of label objects +category2labels = {} +for label in labels: + category = label.category + if category in category2labels: + category2labels[category].append(label) + else: + category2labels[category] = [label] + +#-------------------------------------------------------------------------------- +# Assure single instance name +#-------------------------------------------------------------------------------- + +# returns the label name that describes a single instance (if possible) +# e.g. input | output +# ---------------------- +# car | car +# cargroup | car +# foo | None +# foogroup | None +# skygroup | None +def assureSingleInstanceName( name ): + # if the name is known, it is not a group + if name in name2label: + return name + # test if the name actually denotes a group + if not name.endswith("group"): + return None + # remove group + name = name[:-len("group")] + # test if the new name exists + if not name in name2label: + return None + # test if the new name denotes a label that actually has instances + if not name2label[name].hasInstances: + return None + # all good then + return name diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index b844fa03..e1c76c60 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -17,11 +17,7 @@ defaults: - datasets: - kitti360_dataset # - nuplan_private_dataset -<<<<<<< HEAD # - carla_dataset -======= - - carla_dataset ->>>>>>> dev_v0.0.6 # - wopd_dataset force_log_conversion: True diff --git a/d123/script/config/datasets/kitti360_dataset.yaml b/d123/script/config/datasets/kitti360_dataset.yaml index 418d36a4..17b9e863 100644 --- a/d123/script/config/datasets/kitti360_dataset.yaml +++ b/d123/script/config/datasets/kitti360_dataset.yaml @@ -1,4 +1,4 @@ -nuplan_dataset: +kitti360_dataset: _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter _convert_: 'all' diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml new file mode 100644 index 00000000..a505c4d2 --- /dev/null +++ b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml @@ -0,0 +1,60 @@ +worker: + _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed + _convert_: all + master_node_ip: null + threads_per_node: null + debug_mode: false + log_to_driver: true + logs_subdir: logs + use_distributed: false +scene_filter: + _target_: d123.dataset.scene.scene_filter.SceneFilter + _convert_: all + split_types: null + split_names: null + log_names: null + map_names: null + scene_tokens: null + timestamp_threshold_s: null + ego_displacement_minimum_m: null + duration_s: 9.2 + history_s: 3.0 +scene_builder: + _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder + _convert_: all + dataset_path: ${d123_data_root} +distributed_timeout_seconds: 7200 +selected_simulation_metrics: null +verbose: false +logger_level: info +logger_format_string: null +max_number_of_workers: null +gpu: true +seed: 42 +d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} +d123_maps_root: ${oc.env:D123_MAPS_ROOT} +d123_data_root: ${oc.env:D123_DATA_ROOT} +nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} +nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} +experiment_name: kitti360_test +date_format: '%Y.%m.%d.%H.%M.%S' +experiment_uid: ${now:${date_format}} +output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} +force_log_conversion: true +force_map_conversion: false +datasets: + nuplan_dataset: + _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter + _convert_: all + splits: + - kitti360 + log_path: ${oc.env:KITTI360_DATA_ROOT} + data_converter_config: + _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _convert_: all + output_path: ${d123_data_root} + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + camera_store_option: path + lidar_store_option: path diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml new file mode 100644 index 00000000..406ccbe7 --- /dev/null +++ b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml @@ -0,0 +1,177 @@ +hydra: + run: + dir: ${output_dir} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] + - %(message)s' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: + - pkg://d123.script.config + - pkg://d123.script.config.common + callbacks: {} + output_subdir: ${output_dir}/code/hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - experiment_name=kitti360_test + job: + name: run_dataset_conversion + chdir: false + override_dirname: experiment_name=kitti360_test + id: ??? + num: ??? + config_name: default_dataset_conversion + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/jbwang/d123/d123/script + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/jbwang/d123/d123/script/config/dataset_conversion + schema: file + provider: main + - path: hydra_plugins.hydra_colorlog.conf + schema: pkg + provider: hydra-colorlog + - path: d123.script.config + schema: pkg + provider: hydra.searchpath in main + - path: d123.script.config.common + schema: pkg + provider: hydra.searchpath in main + - path: '' + schema: structured + provider: schema + output_dir: /home/jbwang/d123/exp/kitti360_test/2025.08.15.14.31.57 + choices: + scene_builder: default_scene_builder + scene_filter: all_scenes + worker: ray_distributed + hydra/env: default + hydra/callbacks: null + hydra/job_logging: colorlog + hydra/hydra_logging: colorlog + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml new file mode 100644 index 00000000..6c8e6217 --- /dev/null +++ b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml @@ -0,0 +1 @@ +- experiment_name=kitti360_test diff --git a/exp/kitti360_test/2025.08.15.14.31.57/log.txt b/exp/kitti360_test/2025.08.15.14.31.57/log.txt new file mode 100644 index 00000000..984f705a --- /dev/null +++ b/exp/kitti360_test/2025.08.15.14.31.57/log.txt @@ -0,0 +1,10 @@ +2025-08-15 14:31:57,385 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... +2025-08-15 14:32:14,105 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! +2025-08-15 14:32:35,603 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed +2025-08-15 14:32:35,604 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 +Number of CPUs per node: 64 +Number of GPUs per node: 8 +Number of threads across all nodes: 64 +2025-08-15 14:32:35,604 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! +2025-08-15 14:32:35,604 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... +2025-08-15 14:32:35,605 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml new file mode 100644 index 00000000..0fd6120d --- /dev/null +++ b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml @@ -0,0 +1,60 @@ +worker: + _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed + _convert_: all + master_node_ip: null + threads_per_node: null + debug_mode: false + log_to_driver: true + logs_subdir: logs + use_distributed: false +scene_filter: + _target_: d123.dataset.scene.scene_filter.SceneFilter + _convert_: all + split_types: null + split_names: null + log_names: null + map_names: null + scene_tokens: null + timestamp_threshold_s: null + ego_displacement_minimum_m: null + duration_s: 9.2 + history_s: 3.0 +scene_builder: + _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder + _convert_: all + dataset_path: ${d123_data_root} +distributed_timeout_seconds: 7200 +selected_simulation_metrics: null +verbose: false +logger_level: info +logger_format_string: null +max_number_of_workers: null +gpu: true +seed: 42 +d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} +d123_maps_root: ${oc.env:D123_MAPS_ROOT} +d123_data_root: ${oc.env:D123_DATA_ROOT} +nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} +nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} +experiment_name: kitti360_test +date_format: '%Y.%m.%d.%H.%M.%S' +experiment_uid: ${now:${date_format}} +output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} +force_log_conversion: true +force_map_conversion: false +datasets: + kitti360_dataset: + _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter + _convert_: all + splits: + - kitti360 + log_path: ${oc.env:KITTI360_DATA_ROOT} + data_converter_config: + _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _convert_: all + output_path: ${d123_data_root} + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + camera_store_option: path + lidar_store_option: path diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml new file mode 100644 index 00000000..4eee2c65 --- /dev/null +++ b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml @@ -0,0 +1,177 @@ +hydra: + run: + dir: ${output_dir} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] + - %(message)s' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: + - pkg://d123.script.config + - pkg://d123.script.config.common + callbacks: {} + output_subdir: ${output_dir}/code/hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - experiment_name=kitti360_test + job: + name: run_dataset_conversion + chdir: false + override_dirname: experiment_name=kitti360_test + id: ??? + num: ??? + config_name: default_dataset_conversion + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/jbwang/d123/d123/script + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/jbwang/d123/d123/script/config/dataset_conversion + schema: file + provider: main + - path: hydra_plugins.hydra_colorlog.conf + schema: pkg + provider: hydra-colorlog + - path: d123.script.config + schema: pkg + provider: hydra.searchpath in main + - path: d123.script.config.common + schema: pkg + provider: hydra.searchpath in main + - path: '' + schema: structured + provider: schema + output_dir: /home/jbwang/d123/exp/kitti360_test/2025.08.15.14.36.40 + choices: + scene_builder: default_scene_builder + scene_filter: all_scenes + worker: ray_distributed + hydra/env: default + hydra/callbacks: null + hydra/job_logging: colorlog + hydra/hydra_logging: colorlog + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml new file mode 100644 index 00000000..6c8e6217 --- /dev/null +++ b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml @@ -0,0 +1 @@ +- experiment_name=kitti360_test diff --git a/exp/kitti360_test/2025.08.15.14.36.40/log.txt b/exp/kitti360_test/2025.08.15.14.36.40/log.txt new file mode 100644 index 00000000..5f939dac --- /dev/null +++ b/exp/kitti360_test/2025.08.15.14.36.40/log.txt @@ -0,0 +1,10 @@ +2025-08-15 14:36:40,989 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... +2025-08-15 14:36:56,167 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! +2025-08-15 14:37:18,685 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed +2025-08-15 14:37:18,686 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 +Number of CPUs per node: 64 +Number of GPUs per node: 8 +Number of threads across all nodes: 64 +2025-08-15 14:37:18,686 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! +2025-08-15 14:37:18,686 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... +2025-08-15 14:37:18,687 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml new file mode 100644 index 00000000..5ce47ba9 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml @@ -0,0 +1,60 @@ +worker: + _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed + _convert_: all + master_node_ip: null + threads_per_node: null + debug_mode: false + log_to_driver: true + logs_subdir: logs + use_distributed: false +scene_filter: + _target_: d123.dataset.scene.scene_filter.SceneFilter + _convert_: all + split_types: null + split_names: null + log_names: null + map_names: null + scene_tokens: null + timestamp_threshold_s: null + ego_displacement_minimum_m: null + duration_s: 9.2 + history_s: 3.0 +scene_builder: + _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder + _convert_: all + dataset_path: ${d123_data_root} +distributed_timeout_seconds: 7200 +selected_simulation_metrics: null +verbose: false +logger_level: info +logger_format_string: null +max_number_of_workers: null +gpu: true +seed: 42 +d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} +d123_maps_root: ${oc.env:D123_MAPS_ROOT} +d123_data_root: ${oc.env:D123_DATA_ROOT} +nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} +nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} +experiment_name: kitti_test2 +date_format: '%Y.%m.%d.%H.%M.%S' +experiment_uid: ${now:${date_format}} +output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} +force_log_conversion: true +force_map_conversion: false +datasets: + kitti360_dataset: + _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter + _convert_: all + splits: + - kitti360 + log_path: ${oc.env:KITTI360_DATA_ROOT} + data_converter_config: + _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _convert_: all + output_path: ${d123_data_root} + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + camera_store_option: path + lidar_store_option: path diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml new file mode 100644 index 00000000..2d1c615a --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml @@ -0,0 +1,177 @@ +hydra: + run: + dir: ${output_dir} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] + - %(message)s' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: + - pkg://d123.script.config + - pkg://d123.script.config.common + callbacks: {} + output_subdir: ${output_dir}/code/hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - experiment_name=kitti_test2 + job: + name: run_dataset_conversion + chdir: false + override_dirname: experiment_name=kitti_test2 + id: ??? + num: ??? + config_name: default_dataset_conversion + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/jbwang/d123 + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/jbwang/d123/d123/script/config/dataset_conversion + schema: file + provider: main + - path: hydra_plugins.hydra_colorlog.conf + schema: pkg + provider: hydra-colorlog + - path: d123.script.config + schema: pkg + provider: hydra.searchpath in main + - path: d123.script.config.common + schema: pkg + provider: hydra.searchpath in main + - path: '' + schema: structured + provider: schema + output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.40.29 + choices: + scene_builder: default_scene_builder + scene_filter: all_scenes + worker: ray_distributed + hydra/env: default + hydra/callbacks: null + hydra/job_logging: colorlog + hydra/hydra_logging: colorlog + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml new file mode 100644 index 00000000..676c1042 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml @@ -0,0 +1 @@ +- experiment_name=kitti_test2 diff --git a/exp/kitti_test2/2025.08.15.14.40.29/log.txt b/exp/kitti_test2/2025.08.15.14.40.29/log.txt new file mode 100644 index 00000000..8437d38e --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.40.29/log.txt @@ -0,0 +1,10 @@ +2025-08-15 14:40:29,427 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... +2025-08-15 14:40:42,538 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! +2025-08-15 14:41:00,324 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed +2025-08-15 14:41:00,325 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 +Number of CPUs per node: 64 +Number of GPUs per node: 8 +Number of threads across all nodes: 64 +2025-08-15 14:41:00,325 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! +2025-08-15 14:41:00,325 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... +2025-08-15 14:41:00,326 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml new file mode 100644 index 00000000..de70bfa3 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml @@ -0,0 +1,60 @@ +worker: + _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed + _convert_: all + master_node_ip: null + threads_per_node: null + debug_mode: false + log_to_driver: true + logs_subdir: logs + use_distributed: false +scene_filter: + _target_: d123.dataset.scene.scene_filter.SceneFilter + _convert_: all + split_types: null + split_names: null + log_names: null + map_names: null + scene_tokens: null + timestamp_threshold_s: null + ego_displacement_minimum_m: null + duration_s: 9.2 + history_s: 3.0 +scene_builder: + _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder + _convert_: all + dataset_path: ${d123_data_root} +distributed_timeout_seconds: 7200 +selected_simulation_metrics: null +verbose: false +logger_level: info +logger_format_string: null +max_number_of_workers: null +gpu: true +seed: 42 +d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} +d123_maps_root: ${oc.env:D123_MAPS_ROOT} +d123_data_root: ${oc.env:D123_DATA_ROOT} +nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} +nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} +experiment_name: kitti_test2 +date_format: '%Y.%m.%d.%H.%M.%S' +experiment_uid: ${now:${date_format}} +output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} +force_log_conversion: true +force_map_conversion: false +datasets: + nuplan_private_dataset: + _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter + _convert_: all + splits: + - nuplan_private_test + log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits + data_converter_config: + _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _convert_: all + output_path: ${d123_data_root} + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + camera_store_option: path + lidar_store_option: path diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml new file mode 100644 index 00000000..cca44d29 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml @@ -0,0 +1,177 @@ +hydra: + run: + dir: ${output_dir} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] + - %(message)s' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: + - pkg://d123.script.config + - pkg://d123.script.config.common + callbacks: {} + output_subdir: ${output_dir}/code/hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - experiment_name=kitti_test2 + job: + name: run_dataset_conversion + chdir: false + override_dirname: experiment_name=kitti_test2 + id: ??? + num: ??? + config_name: default_dataset_conversion + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/jbwang/d123 + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/jbwang/d123/d123/script/config/dataset_conversion + schema: file + provider: main + - path: hydra_plugins.hydra_colorlog.conf + schema: pkg + provider: hydra-colorlog + - path: d123.script.config + schema: pkg + provider: hydra.searchpath in main + - path: d123.script.config.common + schema: pkg + provider: hydra.searchpath in main + - path: '' + schema: structured + provider: schema + output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.43.13 + choices: + scene_builder: default_scene_builder + scene_filter: all_scenes + worker: ray_distributed + hydra/env: default + hydra/callbacks: null + hydra/job_logging: colorlog + hydra/hydra_logging: colorlog + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml new file mode 100644 index 00000000..676c1042 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml @@ -0,0 +1 @@ +- experiment_name=kitti_test2 diff --git a/exp/kitti_test2/2025.08.15.14.43.13/log.txt b/exp/kitti_test2/2025.08.15.14.43.13/log.txt new file mode 100644 index 00000000..fec50568 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.43.13/log.txt @@ -0,0 +1,12 @@ +2025-08-15 14:43:13,965 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... +2025-08-15 14:43:24,401 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! +2025-08-15 14:43:39,643 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed +2025-08-15 14:43:39,644 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 +Number of CPUs per node: 64 +Number of GPUs per node: 8 +Number of threads across all nodes: 64 +2025-08-15 14:43:39,644 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! +2025-08-15 14:43:39,644 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... +2025-08-15 14:43:39,645 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... +2025-08-15 14:43:44,316 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:21} Building RawDataProcessor...DONE! +2025-08-15 14:43:44,316 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:34} Processing dataset: NuplanDataConverter diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml new file mode 100644 index 00000000..5ce47ba9 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml @@ -0,0 +1,60 @@ +worker: + _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed + _convert_: all + master_node_ip: null + threads_per_node: null + debug_mode: false + log_to_driver: true + logs_subdir: logs + use_distributed: false +scene_filter: + _target_: d123.dataset.scene.scene_filter.SceneFilter + _convert_: all + split_types: null + split_names: null + log_names: null + map_names: null + scene_tokens: null + timestamp_threshold_s: null + ego_displacement_minimum_m: null + duration_s: 9.2 + history_s: 3.0 +scene_builder: + _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder + _convert_: all + dataset_path: ${d123_data_root} +distributed_timeout_seconds: 7200 +selected_simulation_metrics: null +verbose: false +logger_level: info +logger_format_string: null +max_number_of_workers: null +gpu: true +seed: 42 +d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} +d123_maps_root: ${oc.env:D123_MAPS_ROOT} +d123_data_root: ${oc.env:D123_DATA_ROOT} +nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} +nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} +experiment_name: kitti_test2 +date_format: '%Y.%m.%d.%H.%M.%S' +experiment_uid: ${now:${date_format}} +output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} +force_log_conversion: true +force_map_conversion: false +datasets: + kitti360_dataset: + _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter + _convert_: all + splits: + - kitti360 + log_path: ${oc.env:KITTI360_DATA_ROOT} + data_converter_config: + _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _convert_: all + output_path: ${d123_data_root} + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + camera_store_option: path + lidar_store_option: path diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml new file mode 100644 index 00000000..bd9698a2 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml @@ -0,0 +1,177 @@ +hydra: + run: + dir: ${output_dir} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] + - %(message)s' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: + - pkg://d123.script.config + - pkg://d123.script.config.common + callbacks: {} + output_subdir: ${output_dir}/code/hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - experiment_name=kitti_test2 + job: + name: run_dataset_conversion + chdir: false + override_dirname: experiment_name=kitti_test2 + id: ??? + num: ??? + config_name: default_dataset_conversion + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/jbwang/d123 + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/jbwang/d123/d123/script/config/dataset_conversion + schema: file + provider: main + - path: hydra_plugins.hydra_colorlog.conf + schema: pkg + provider: hydra-colorlog + - path: d123.script.config + schema: pkg + provider: hydra.searchpath in main + - path: d123.script.config.common + schema: pkg + provider: hydra.searchpath in main + - path: '' + schema: structured + provider: schema + output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.46.49 + choices: + scene_builder: default_scene_builder + scene_filter: all_scenes + worker: ray_distributed + hydra/env: default + hydra/callbacks: null + hydra/job_logging: colorlog + hydra/hydra_logging: colorlog + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml new file mode 100644 index 00000000..676c1042 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml @@ -0,0 +1 @@ +- experiment_name=kitti_test2 diff --git a/exp/kitti_test2/2025.08.15.14.46.49/log.txt b/exp/kitti_test2/2025.08.15.14.46.49/log.txt new file mode 100644 index 00000000..00286f48 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.46.49/log.txt @@ -0,0 +1,10 @@ +2025-08-15 14:46:49,566 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... +2025-08-15 14:46:59,509 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! +2025-08-15 14:47:14,118 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed +2025-08-15 14:47:14,118 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 +Number of CPUs per node: 64 +Number of GPUs per node: 8 +Number of threads across all nodes: 64 +2025-08-15 14:47:14,119 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! +2025-08-15 14:47:14,119 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... +2025-08-15 14:47:14,122 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml new file mode 100644 index 00000000..5ce47ba9 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml @@ -0,0 +1,60 @@ +worker: + _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed + _convert_: all + master_node_ip: null + threads_per_node: null + debug_mode: false + log_to_driver: true + logs_subdir: logs + use_distributed: false +scene_filter: + _target_: d123.dataset.scene.scene_filter.SceneFilter + _convert_: all + split_types: null + split_names: null + log_names: null + map_names: null + scene_tokens: null + timestamp_threshold_s: null + ego_displacement_minimum_m: null + duration_s: 9.2 + history_s: 3.0 +scene_builder: + _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder + _convert_: all + dataset_path: ${d123_data_root} +distributed_timeout_seconds: 7200 +selected_simulation_metrics: null +verbose: false +logger_level: info +logger_format_string: null +max_number_of_workers: null +gpu: true +seed: 42 +d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} +d123_maps_root: ${oc.env:D123_MAPS_ROOT} +d123_data_root: ${oc.env:D123_DATA_ROOT} +nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} +nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} +experiment_name: kitti_test2 +date_format: '%Y.%m.%d.%H.%M.%S' +experiment_uid: ${now:${date_format}} +output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} +force_log_conversion: true +force_map_conversion: false +datasets: + kitti360_dataset: + _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter + _convert_: all + splits: + - kitti360 + log_path: ${oc.env:KITTI360_DATA_ROOT} + data_converter_config: + _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _convert_: all + output_path: ${d123_data_root} + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + camera_store_option: path + lidar_store_option: path diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml new file mode 100644 index 00000000..acff45d7 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml @@ -0,0 +1,177 @@ +hydra: + run: + dir: ${output_dir} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] + - %(message)s' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: + - pkg://d123.script.config + - pkg://d123.script.config.common + callbacks: {} + output_subdir: ${output_dir}/code/hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - experiment_name=kitti_test2 + job: + name: run_dataset_conversion + chdir: false + override_dirname: experiment_name=kitti_test2 + id: ??? + num: ??? + config_name: default_dataset_conversion + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/jbwang/d123 + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/jbwang/d123/d123/script/config/dataset_conversion + schema: file + provider: main + - path: hydra_plugins.hydra_colorlog.conf + schema: pkg + provider: hydra-colorlog + - path: d123.script.config + schema: pkg + provider: hydra.searchpath in main + - path: d123.script.config.common + schema: pkg + provider: hydra.searchpath in main + - path: '' + schema: structured + provider: schema + output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.50.55 + choices: + scene_builder: default_scene_builder + scene_filter: all_scenes + worker: ray_distributed + hydra/env: default + hydra/callbacks: null + hydra/job_logging: colorlog + hydra/hydra_logging: colorlog + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml new file mode 100644 index 00000000..676c1042 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml @@ -0,0 +1 @@ +- experiment_name=kitti_test2 diff --git a/exp/kitti_test2/2025.08.15.14.50.55/log.txt b/exp/kitti_test2/2025.08.15.14.50.55/log.txt new file mode 100644 index 00000000..9902e0ce --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.50.55/log.txt @@ -0,0 +1,11 @@ +2025-08-15 14:50:55,950 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... +2025-08-15 14:51:19,466 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! +2025-08-15 14:51:52,653 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed +2025-08-15 14:51:52,653 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 +Number of CPUs per node: 64 +Number of GPUs per node: 8 +Number of threads across all nodes: 64 +2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! +2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... +2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... +2025-08-15 14:51:52,655 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:17} Instantiating dataset type: {'_target_': 'd123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter', '_convert_': 'all', 'splits': ['kitti360'], 'log_path': '${oc.env:KITTI360_DATA_ROOT}', 'data_converter_config': {'_target_': 'd123.dataset.dataset_specific.raw_data_converter.DataConverterConfig', '_convert_': 'all', 'output_path': '${d123_data_root}', 'force_log_conversion': '${force_log_conversion}', 'force_map_conversion': '${force_map_conversion}', 'camera_store_option': 'path', 'lidar_store_option': 'path'}} diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml new file mode 100644 index 00000000..de70bfa3 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml @@ -0,0 +1,60 @@ +worker: + _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed + _convert_: all + master_node_ip: null + threads_per_node: null + debug_mode: false + log_to_driver: true + logs_subdir: logs + use_distributed: false +scene_filter: + _target_: d123.dataset.scene.scene_filter.SceneFilter + _convert_: all + split_types: null + split_names: null + log_names: null + map_names: null + scene_tokens: null + timestamp_threshold_s: null + ego_displacement_minimum_m: null + duration_s: 9.2 + history_s: 3.0 +scene_builder: + _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder + _convert_: all + dataset_path: ${d123_data_root} +distributed_timeout_seconds: 7200 +selected_simulation_metrics: null +verbose: false +logger_level: info +logger_format_string: null +max_number_of_workers: null +gpu: true +seed: 42 +d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} +d123_maps_root: ${oc.env:D123_MAPS_ROOT} +d123_data_root: ${oc.env:D123_DATA_ROOT} +nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} +nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} +experiment_name: kitti_test2 +date_format: '%Y.%m.%d.%H.%M.%S' +experiment_uid: ${now:${date_format}} +output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} +force_log_conversion: true +force_map_conversion: false +datasets: + nuplan_private_dataset: + _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter + _convert_: all + splits: + - nuplan_private_test + log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits + data_converter_config: + _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _convert_: all + output_path: ${d123_data_root} + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + camera_store_option: path + lidar_store_option: path diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml new file mode 100644 index 00000000..d053f8e7 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml @@ -0,0 +1,177 @@ +hydra: + run: + dir: ${output_dir} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + colorlog: + (): colorlog.ColoredFormatter + format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] + - %(message)s' + log_colors: + DEBUG: purple + INFO: green + WARNING: yellow + ERROR: red + CRITICAL: red + handlers: + console: + class: logging.StreamHandler + formatter: colorlog + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: + - pkg://d123.script.config + - pkg://d123.script.config.common + callbacks: {} + output_subdir: ${output_dir}/code/hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - experiment_name=kitti_test2 + job: + name: run_dataset_conversion + chdir: false + override_dirname: experiment_name=kitti_test2 + id: ??? + num: ??? + config_name: default_dataset_conversion + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/jbwang/d123 + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/jbwang/d123/d123/script/config/dataset_conversion + schema: file + provider: main + - path: hydra_plugins.hydra_colorlog.conf + schema: pkg + provider: hydra-colorlog + - path: d123.script.config + schema: pkg + provider: hydra.searchpath in main + - path: d123.script.config.common + schema: pkg + provider: hydra.searchpath in main + - path: '' + schema: structured + provider: schema + output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.52.39 + choices: + scene_builder: default_scene_builder + scene_filter: all_scenes + worker: ray_distributed + hydra/env: default + hydra/callbacks: null + hydra/job_logging: colorlog + hydra/hydra_logging: colorlog + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml new file mode 100644 index 00000000..676c1042 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml @@ -0,0 +1 @@ +- experiment_name=kitti_test2 diff --git a/exp/kitti_test2/2025.08.15.14.52.39/log.txt b/exp/kitti_test2/2025.08.15.14.52.39/log.txt new file mode 100644 index 00000000..e2585299 --- /dev/null +++ b/exp/kitti_test2/2025.08.15.14.52.39/log.txt @@ -0,0 +1,11 @@ +2025-08-15 14:52:39,717 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... +2025-08-15 14:53:02,994 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! +2025-08-15 14:53:36,548 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed +2025-08-15 14:53:36,549 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 +Number of CPUs per node: 64 +Number of GPUs per node: 8 +Number of threads across all nodes: 64 +2025-08-15 14:53:36,549 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! +2025-08-15 14:53:36,549 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... +2025-08-15 14:53:36,550 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... +2025-08-15 14:53:36,550 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:17} Instantiating dataset type: {'_target_': 'd123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter', '_convert_': 'all', 'splits': ['nuplan_private_test'], 'log_path': '${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits', 'data_converter_config': {'_target_': 'd123.dataset.dataset_specific.raw_data_converter.DataConverterConfig', '_convert_': 'all', 'output_path': '${d123_data_root}', 'force_log_conversion': '${force_log_conversion}', 'force_map_conversion': '${force_map_conversion}', 'camera_store_option': 'path', 'lidar_store_option': 'path'}} diff --git a/jbwang_test.py b/jbwang_test.py index ac3afac5..ff320df9 100644 --- a/jbwang_test.py +++ b/jbwang_test.py @@ -65,4 +65,21 @@ # log_name = "1230_asd_" # for i in range(20): # a = create_token(f"{log_name}_{i}") -# print(a) +# print(a)ee + + +import numpy as np +from pathlib import Path +a = np.loadtxt("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/0000000000.txt") +b = np.loadtxt("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0018_sync/poses.txt") +data = b +ts = data[:, 0].astype(np.int32) +poses = np.reshape(data[:, 1:], (-1, 3, 4)) +poses = np.concatenate((poses, np.tile(np.array([0, 0, 0, 1]).reshape(1,1,4),(poses.shape[0],1,1))), 1) +print(a) +print(b.shape) +print(ts.shape) +print(poses.shape) + +ccc = Path("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/") +print(len(list(ccc.glob("*.txt")))) \ No newline at end of file diff --git a/jbwang_test2.py b/jbwang_test2.py new file mode 100644 index 00000000..b1229356 --- /dev/null +++ b/jbwang_test2.py @@ -0,0 +1,70 @@ +# import numpy as np +# import pickle + +# # path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin" +# # a = np.fromfile(path, dtype=np.float32) + +# # print(a.shape) +# # print(a[:10]) + +# # path2 = "/nas/datasets/KITTI-360/calibration/calib_cam_to_pose.txt" +# # c = np.loadtxt(path2) +# # print(c) + +# import open3d as o3d +# import numpy as np + +# def read_ply_file(file_path): +# # 读取 PLY 文件 +# pcd = o3d.io.read_point_cloud(file_path) +# print(len(pcd.points), len(pcd.colors)) +# # 提取顶点信息 +# points = np.asarray(pcd.points) # x, y, z +# colors = np.asarray(pcd.colors) # red, green, blue +# # semantics = np.asarray(pcd.semantic) # semanticID, instanceID, isVisible, confidence + +# # 将所有信息合并到一个数组中 +# vertices = np.hstack((points, colors)) + +# return vertices + +# # 示例用法 +# file_path = '/nas/datasets/KITTI-360/data_3d_semantics/train/2013_05_28_drive_0000_sync/static/0000000002_0000000385.ply' # 替换为你的 PLY 文件路径 +# vertices = read_ply_file(file_path) + +# # 打印前几个顶点信息 +# print("顶点信息 (前5个顶点):") +# print(vertices[:5]) + +import numpy as np +from scipy.linalg import polar +from scipy.spatial.transform import Rotation as R + +def polar_decompose_rotation_scale(A: np.ndarray): + """ + A: 3x3 (含旋转+缩放+剪切) + 返回: + Rm: 纯旋转 + Sm: 对称正定 (缩放+剪切) + scale: 近似轴缩放(从 Sm 特征值开方或对角提取;若存在剪切需谨慎) + yaw,pitch,roll: 使用 ZYX 序列 (常对应 yaw(Z), pitch(Y), roll(X)) + """ + Rm, Sm = polar(A) # A = Rm @ Sm + # 近似各向缩放(若无剪切): + scale = np.diag(Sm) + # 欧拉角 + yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) + return { + "R": Rm, + "S": Sm, + "scale_diag": scale, + "yaw_pitch_roll": (yaw, pitch, roll), + } + +M = np.array([ + [-3.97771668e+00, -1.05715942e+00,-2.18206085e-02], + [2.43555284e+00, -1.72707462e+00, -1.03932284e-02], + [-4.41359095e-02, -2.94448305e-02, 1.39303744e+00], +]) +out = polar_decompose_rotation_scale(M) +print(out) \ No newline at end of file diff --git a/notebooks/dataset/jbwang_test.py b/notebooks/dataset/jbwang_test.py index caaa3201..0996734b 100644 --- a/notebooks/dataset/jbwang_test.py +++ b/notebooks/dataset/jbwang_test.py @@ -1,5 +1,5 @@ -s3_uri = "/data/jbwang/d123/data/nuplan_mini_train/2021.10.11.07.12.18_veh-50_00211_00304.arrow" -# s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow" +# s3_uri = "/data/jbwang/d123/data/nuplan_mini_train/2021.10.11.07.12.18_veh-50_00211_00304.arrow" +s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow" # s3_uri = "/data/jbwang/d123/data/carla/_Rep0_routes_validation1_route0_07_23_14_33_15.arrow" # s3_uri = "/data/jbwang/d123/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow" @@ -33,10 +33,11 @@ for col in table.column_names: if col == "lidar": continue - print(f"Column: {col}, Type: {table.schema.field(col).type}") - tokens = table[col] # 或 table.column("token") + print(f"Column : {col}, Type: {table.schema.field(col).type}") + # tokens = table[col] # 或 table.column("token") + # print(tokens) # print(len(tokens)) - print(tokens.slice(0, 4).to_pylist()) + # print(tokens.slice(0, 100).to_pylist()) # print(table["traffic_light_ids"]) timer.log("3. Table created") # Save locally From 5b8a2074e5e014536ae9408a4955ff167fcf536e Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Fri, 15 Aug 2025 15:00:52 +0800 Subject: [PATCH 003/145] delete exp/* --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 22cfdee9..0baa64c4 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,4 @@ *.csv *.log *.mp4 +exp/* From 0db8c31aea69a814e66a003e6a970c6b776b1d17 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Fri, 15 Aug 2025 15:54:04 +0800 Subject: [PATCH 004/145] fix hydra and other bugs --- .../kitti_360/kitti_360_data_converter.py | 30 +++++++++++-------- .../kitti_360/kitti_360_helper.py | 6 ++-- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index c79ce0b2..02dc0add 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -28,8 +28,7 @@ from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata - -from kitti_360_helper import KITTI360Bbox3D +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D KITTI360_DT: Final[float] = 0.1 SORT_BY_TIMESTAMP: Final[bool] = True @@ -62,11 +61,10 @@ PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB +#TODO check all paths KITTI360_REQUIRED_MODALITY_ROOTS: Dict[str, Path] = { DIR_2D_RAW: PATH_2D_RAW_ROOT, - # DIR_2D_SMT: PATH_2D_SMT_ROOT, - # DIR_3D_RAW: PATH_3D_RAW_ROOT, - # DIR_3D_SMT: PATH_3D_SMT_ROOT, + DIR_3D_RAW: PATH_3D_RAW_ROOT, # DIR_3D_BBOX: PATH_3D_BBOX_ROOT, # DIR_POSES: PATH_POSES_ROOT, } @@ -138,6 +136,7 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]: # f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. " # f"Root: {KITTI360_DATA_ROOT}" # ) + print("valid",valid_seqs) return {"kitti360": valid_seqs} def get_available_splits(self) -> List[str]: @@ -244,7 +243,7 @@ def convert_kitti360_log_to_arrow( return [] -def get_kitti360_camera_metadata() -> Dict[str, CameraMetadata]: +def get_kitti360_camera_metadata() -> Dict[CameraType, CameraMetadata]: persp = PATH_CALIB_ROOT / "perspective.txt" @@ -265,7 +264,7 @@ def get_kitti360_camera_metadata() -> Dict[str, CameraMetadata]: log_cam_infos: Dict[str, CameraMetadata] = {} for cam_type, cam_name in KITTI360_CAMERA_TYPES.items(): - log_cam_infos[cam_type.serialize()] = CameraMetadata( + log_cam_infos[cam_type] = CameraMetadata( camera_type=cam_type, width=result[cam_name]["wh"][0], height=result[cam_name]["wh"][1], @@ -283,7 +282,7 @@ def _read_projection_matrix(p_line: str) -> np.ndarray: K = P[:, :3] return K -def get_kitti360_lidar_metadata(log_name: str) -> Dict[LiDARType, LiDARMetadata]: +def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: metadata: Dict[LiDARType, LiDARMetadata] = {} cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" @@ -343,7 +342,12 @@ def _write_recording_table( } if data_converter_config.lidar_store_option is not None: - row_data["lidar"] = [_extract_lidar(log_name, idx, data_converter_config)] + lidar_data_dict = _extract_lidar(log_name, idx, data_converter_config) + for lidar_type, lidar_data in lidar_data_dict.items(): + if lidar_data is not None: + row_data[lidar_type.serialize()] = [lidar_data] + else: + row_data[lidar_type.serialize()] = [None] if data_converter_config.camera_store_option is not None: camera_data_dict = _extract_cameras(log_name, idx, data_converter_config) @@ -363,7 +367,7 @@ def _write_recording_table( recording_table = recording_table.sort_by([("timestamp", "ascending")]) write_arrow_table(recording_table, log_file_path) -#TODO default timestamps +#TODO default timestamps and Synchronization all other parts def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: # unix ts_file = PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt" @@ -501,9 +505,9 @@ def _extract_detections( return detections_states, detections_velocity, detections_tokens, detections_types #TODO lidar extraction -def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Optional[str]: +def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: lidar: Optional[str] = None - lidar_full_path = DIR_3D_RAW / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" + lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" if lidar_full_path.exists(): if data_converter_config.lidar_store_option == "path": lidar = f"/data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin" @@ -511,7 +515,7 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverter raise NotImplementedError("Binary lidar storage is not implemented.") else: raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}") - return {LiDARType.LIDAR_TOP: lidar} if lidar else None + return {LiDARType.LIDAR_TOP: lidar} #TODO check camera extrinsic now is from camera to pose def _extract_cameras( diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py index da79cf3e..c86d9604 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py @@ -1,13 +1,13 @@ import numpy as np from collections import defaultdict -from labels import kittiId2label from scipy.linalg import polar from scipy.spatial.transform import Rotation as R from d123.common.geometry.base import StateSE3 from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3 +from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label DEFAULT_ROLL = 0.0 DEFAULT_PITCH = 0.0 @@ -93,8 +93,8 @@ def get_state_array(self): x=self.T[0], y=self.T[1], z=self.T[2], - roll=DEFAULT_ROLL, - pitch=DEFAULT_PITCH, + roll=roll, + pitch=pitch, yaw=yaw, ) bounding_box_se3 = BoundingBoxSE3(center, scale[0], scale[1], scale[2]) From 2d129ee0df6980d219e529a57a60ba6e487a47d6 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Sun, 17 Aug 2025 12:27:05 +0800 Subject: [PATCH 005/145] add pose calibration to align with nuplan --- d123/script/run_viser.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/d123/script/run_viser.py b/d123/script/run_viser.py index e682a96e..e977c669 100644 --- a/d123/script/run_viser.py +++ b/d123/script/run_viser.py @@ -19,10 +19,13 @@ def main(cfg: DictConfig) -> None: worker = build_worker(cfg) scene_filter = build_scene_filter(cfg.scene_filter) + logger.info(f"Scene filter: {scene_filter}") + logger.info(f"Using {cfg.scene_builder}") + scene_filter.duration_s = 50 scene_builder = build_scene_builder(cfg.scene_builder) scenes = scene_builder.get_scenes(scene_filter, worker=worker) - - ViserVisualizationServer(scenes=scenes) + logger.info(f"Found {len(scenes)} scenes.") + ViserVisualizationServer(scenes=scenes,scene_index=0) if __name__ == "__main__": From 7dd70e6e0713628cb8cf19c9c282814ca04d2e5b Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Sun, 17 Aug 2025 12:30:07 +0800 Subject: [PATCH 006/145] add pose calibration to align with nuplan --- d123/dataset/arrow/conversion.py | 1 + .../kitti_360/kitti_360_data_converter.py | 55 ++++-- jbwang_test.py | 43 +++-- jbwang_test2.py | 162 +++++++++++------- notebooks/dataset/jbwang_test.py | 7 +- 5 files changed, 172 insertions(+), 96 deletions(-) diff --git a/d123/dataset/arrow/conversion.py b/d123/dataset/arrow/conversion.py index d9afba6f..69488545 100644 --- a/d123/dataset/arrow/conversion.py +++ b/d123/dataset/arrow/conversion.py @@ -33,6 +33,7 @@ DATASET_SENSOR_ROOT: Dict[str, Path] = { "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", "carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs", + "kitti360": Path(os.environ["KITTI360_DATA_ROOT"]), } diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index 02dc0add..7e13b905 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -12,6 +12,7 @@ import xml.etree.ElementTree as ET import pyarrow as pa from PIL import Image +import logging from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map @@ -61,12 +62,11 @@ PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB -#TODO check all paths KITTI360_REQUIRED_MODALITY_ROOTS: Dict[str, Path] = { DIR_2D_RAW: PATH_2D_RAW_ROOT, DIR_3D_RAW: PATH_3D_RAW_ROOT, - # DIR_3D_BBOX: PATH_3D_BBOX_ROOT, - # DIR_POSES: PATH_POSES_ROOT, + DIR_POSES: PATH_POSES_ROOT, + DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train", } #TODO @@ -79,6 +79,20 @@ "pedestrian": DetectionType.PEDESTRIAN, } +KITTI3602NUPLAN_IMU_CALIBRATION = np.array([ + [1, 0, 0, 0], + [0, -1, 0, 0], + [0, 0, -1, 0], + [0, 0, 0, 1], + ], dtype=np.float64) + +KITTI3602NUPLAN_LIDAR_CALIBRATION = np.array([ + [0, -1, 0, 0], + [1, 0, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], dtype=np.float64) + def create_token(input_data: str) -> str: # TODO: Refactor this function. @@ -120,23 +134,30 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]: # Enumerate candidate sequences from data_2d_raw candidates = sorted(p for p in PATH_2D_RAW_ROOT.iterdir() if p.is_dir() and p.name.endswith("_sync")) + def _has_modality(seq_name: str, modality_name: str, root: Path) -> bool: + if modality_name == DIR_3D_BBOX: + # expected: data_3d_bboxes/train/.xml + xml_path = root / f"{seq_name}.xml" + return xml_path.exists() + else: + return (root / seq_name).exists() + valid_seqs: List[Path] = [] for seq_dir in candidates: seq_name = seq_dir.name missing_modalities = [ modality_name for modality_name, root in KITTI360_REQUIRED_MODALITY_ROOTS.items() - if not (root / seq_name).exists() + if not _has_modality(seq_name, modality_name, root) ] if not missing_modalities: valid_seqs.append(seq_dir) #KITTI360_DATA_ROOT / DIR_2D_RAW /seq_name - #TODO warnings - # else: - # warnings.warn( - # f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. " - # f"Root: {KITTI360_DATA_ROOT}" - # ) - print("valid",valid_seqs) + else: + logging.info( + f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. " + f"Root: {KITTI360_DATA_ROOT}" + ) + logging.info(f"vadid sequences found: {valid_seqs}") return {"kitti360": valid_seqs} def get_available_splits(self) -> List[str]: @@ -144,7 +165,7 @@ def get_available_splits(self) -> List[str]: return ["kitti360"] def convert_maps(self, worker: WorkerPool) -> None: - print("KITTI-360 does not provide standard maps. Skipping map conversion.") + logging.info("KITTI-360 does not provide standard maps. Skipping map conversion.") return None def convert_logs(self, worker: WorkerPool) -> None: @@ -184,6 +205,7 @@ def convert_kitti360_log_to_arrow( if not log_file_path.parent.exists(): log_file_path.parent.mkdir(parents=True, exist_ok=True) + #TODO location metadata = LogMetadata( dataset="kitti360", log_name=log_name, @@ -300,13 +322,17 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: values = list(map(float, image_00.strip().split()[1:])) matrix = np.array(values).reshape(3, 4) cam2pose = np.concatenate((matrix, lastrow)) + cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow)) + cam2velo = KITTI3602NUPLAN_LIDAR_CALIBRATION @ cam2velo + extrinsic = cam2velo @ np.linalg.inv(cam2pose) metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( lidar_type=LiDARType.LIDAR_TOP, lidar_index=Kitti360LidarIndex, + #TODO extrinsic needed to be same with nuplan extrinsic=extrinsic, ) return metadata @@ -367,7 +393,7 @@ def _write_recording_table( recording_table = recording_table.sort_by([("timestamp", "ascending")]) write_arrow_table(recording_table, log_file_path) -#TODO default timestamps and Synchronization all other parts +#TODO default timestamps and Synchronization all other sequences def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: # unix ts_file = PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt" @@ -504,7 +530,7 @@ def _extract_detections( return detections_states, detections_velocity, detections_tokens, detections_types -#TODO lidar extraction +#TODO lidar extraction now only velo def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: lidar: Optional[str] = None lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" @@ -541,6 +567,7 @@ def _extract_cameras( values = list(map(float, parts[1:])) matrix = np.array(values).reshape(3, 4) cam2pose = np.concatenate((matrix, lastrow)) + cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose if data_converter_config.camera_store_option == "path": camera_data = str(img_path_png), cam2pose.flatten().tolist() diff --git a/jbwang_test.py b/jbwang_test.py index ff320df9..e42f512a 100644 --- a/jbwang_test.py +++ b/jbwang_test.py @@ -68,18 +68,31 @@ # print(a)ee -import numpy as np -from pathlib import Path -a = np.loadtxt("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/0000000000.txt") -b = np.loadtxt("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0018_sync/poses.txt") -data = b -ts = data[:, 0].astype(np.int32) -poses = np.reshape(data[:, 1:], (-1, 3, 4)) -poses = np.concatenate((poses, np.tile(np.array([0, 0, 0, 1]).reshape(1,1,4),(poses.shape[0],1,1))), 1) -print(a) -print(b.shape) -print(ts.shape) -print(poses.shape) - -ccc = Path("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/") -print(len(list(ccc.glob("*.txt")))) \ No newline at end of file +# import numpy as np +# from pathlib import Path +# a = np.loadtxt("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/0000000000.txt") +# b = np.loadtxt("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0018_sync/poses.txt") +# data = b +# ts = data[:, 0].astype(np.int32) +# poses = np.reshape(data[:, 1:], (-1, 3, 4)) +# poses = np.concatenate((poses, np.tile(np.array([0, 0, 0, 1]).reshape(1,1,4),(poses.shape[0],1,1))), 1) +# print(a) +# print(b.shape) +# print(ts.shape) +# print(poses.shape) + +# ccc = Path("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/") +# print(len(list(ccc.glob("*.txt")))) + + + + +from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import convert_nuplan_map_to_gpkg + +from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig + +MAP_LOCATIONS = {"sg-one-north", "us-ma-boston", "us-nv-las-vegas-strip", "us-pa-pittsburgh-hazelwood"} +maps = list(MAP_LOCATIONS) + +data_conveter_config = DataConverterConfig(output_path = "/nas/datasets/nuplan/maps") +convert_nuplan_map_to_gpkg(maps,data_conveter_config) \ No newline at end of file diff --git a/jbwang_test2.py b/jbwang_test2.py index b1229356..b406c52c 100644 --- a/jbwang_test2.py +++ b/jbwang_test2.py @@ -1,70 +1,104 @@ -# import numpy as np -# import pickle +# # import numpy as np +# # import pickle + +# # # path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin" +# # # a = np.fromfile(path, dtype=np.float32) + +# # # print(a.shape) +# # # print(a[:10]) + +# # # path2 = "/nas/datasets/KITTI-360/calibration/calib_cam_to_pose.txt" +# # # c = np.loadtxt(path2) +# # # print(c) + +# # import open3d as o3d +# # import numpy as np + +# # def read_ply_file(file_path): +# # # 读取 PLY 文件 +# # pcd = o3d.io.read_point_cloud(file_path) +# # print(len(pcd.points), len(pcd.colors)) +# # # 提取顶点信息 +# # points = np.asarray(pcd.points) # x, y, z +# # colors = np.asarray(pcd.colors) # red, green, blue +# # # semantics = np.asarray(pcd.semantic) # semanticID, instanceID, isVisible, confidence + +# # # 将所有信息合并到一个数组中 +# # vertices = np.hstack((points, colors)) -# # path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin" -# # a = np.fromfile(path, dtype=np.float32) +# # return vertices -# # print(a.shape) -# # print(a[:10]) +# # # 示例用法 +# # file_path = '/nas/datasets/KITTI-360/data_3d_semantics/train/2013_05_28_drive_0000_sync/static/0000000002_0000000385.ply' # 替换为你的 PLY 文件路径 +# # vertices = read_ply_file(file_path) -# # path2 = "/nas/datasets/KITTI-360/calibration/calib_cam_to_pose.txt" -# # c = np.loadtxt(path2) -# # print(c) +# # # 打印前几个顶点信息 +# # print("顶点信息 (前5个顶点):") +# # print(vertices[:5]) -# import open3d as o3d # import numpy as np +# from scipy.linalg import polar +# from scipy.spatial.transform import Rotation as R + +# def polar_decompose_rotation_scale(A: np.ndarray): +# """ +# A: 3x3 (含旋转+缩放+剪切) +# 返回: +# Rm: 纯旋转 +# Sm: 对称正定 (缩放+剪切) +# scale: 近似轴缩放(从 Sm 特征值开方或对角提取;若存在剪切需谨慎) +# yaw,pitch,roll: 使用 ZYX 序列 (常对应 yaw(Z), pitch(Y), roll(X)) +# """ +# Rm, Sm = polar(A) # A = Rm @ Sm +# # 近似各向缩放(若无剪切): +# scale = np.diag(Sm) +# # 欧拉角 +# yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) +# return { +# "R": Rm, +# "S": Sm, +# "scale_diag": scale, +# "yaw_pitch_roll": (yaw, pitch, roll), +# } + +# M = np.array([ +# [-3.97771668e+00, -1.05715942e+00,-2.18206085e-02], +# [2.43555284e+00, -1.72707462e+00, -1.03932284e-02], +# [-4.41359095e-02, -2.94448305e-02, 1.39303744e+00], +# ]) +# out = polar_decompose_rotation_scale(M) +# print(out) + + +import glob +import os +import cv2 + +def to_video(folder_path, fps=15, downsample=2): + imgs_path = glob.glob(os.path.join(folder_path, '*png*')) + # imgs_path = sorted(imgs_path)[:19] + imgs_path = sorted(imgs_path)[:700:1] + img_array = [] + for img_path in imgs_path: + img = cv2.imread(img_path) + height, width, channel = img.shape + img = cv2.resize(img, (width // downsample, height // + downsample), interpolation=cv2.INTER_AREA) + height, width, channel = img.shape + size = (width, height) + img_array.append(img) + + # media.write_video(os.path.join(folder_path, 'video.mp4'), img_array, fps=10) + mp4_path = os.path.join("/data/jbwang/d123/video/", 'video_one_episode.mp4') + if os.path.exists(mp4_path): + os.remove(mp4_path) + out = cv2.VideoWriter( + mp4_path, + cv2.VideoWriter_fourcc(*'DIVX'), fps, size + ) + for i in range(len(img_array)): + out.write(img_array[i]) + out.release() + +to_video("/nas/datasets/KITTI-360/2013_05_28_drive_0000_sync/image_00/data_rect/") -# def read_ply_file(file_path): -# # 读取 PLY 文件 -# pcd = o3d.io.read_point_cloud(file_path) -# print(len(pcd.points), len(pcd.colors)) -# # 提取顶点信息 -# points = np.asarray(pcd.points) # x, y, z -# colors = np.asarray(pcd.colors) # red, green, blue -# # semantics = np.asarray(pcd.semantic) # semanticID, instanceID, isVisible, confidence - -# # 将所有信息合并到一个数组中 -# vertices = np.hstack((points, colors)) - -# return vertices - -# # 示例用法 -# file_path = '/nas/datasets/KITTI-360/data_3d_semantics/train/2013_05_28_drive_0000_sync/static/0000000002_0000000385.ply' # 替换为你的 PLY 文件路径 -# vertices = read_ply_file(file_path) - -# # 打印前几个顶点信息 -# print("顶点信息 (前5个顶点):") -# print(vertices[:5]) - -import numpy as np -from scipy.linalg import polar -from scipy.spatial.transform import Rotation as R - -def polar_decompose_rotation_scale(A: np.ndarray): - """ - A: 3x3 (含旋转+缩放+剪切) - 返回: - Rm: 纯旋转 - Sm: 对称正定 (缩放+剪切) - scale: 近似轴缩放(从 Sm 特征值开方或对角提取;若存在剪切需谨慎) - yaw,pitch,roll: 使用 ZYX 序列 (常对应 yaw(Z), pitch(Y), roll(X)) - """ - Rm, Sm = polar(A) # A = Rm @ Sm - # 近似各向缩放(若无剪切): - scale = np.diag(Sm) - # 欧拉角 - yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) - return { - "R": Rm, - "S": Sm, - "scale_diag": scale, - "yaw_pitch_roll": (yaw, pitch, roll), - } - -M = np.array([ - [-3.97771668e+00, -1.05715942e+00,-2.18206085e-02], - [2.43555284e+00, -1.72707462e+00, -1.03932284e-02], - [-4.41359095e-02, -2.94448305e-02, 1.39303744e+00], -]) -out = polar_decompose_rotation_scale(M) -print(out) \ No newline at end of file diff --git a/notebooks/dataset/jbwang_test.py b/notebooks/dataset/jbwang_test.py index 0996734b..c2cabfbe 100644 --- a/notebooks/dataset/jbwang_test.py +++ b/notebooks/dataset/jbwang_test.py @@ -1,7 +1,8 @@ # s3_uri = "/data/jbwang/d123/data/nuplan_mini_train/2021.10.11.07.12.18_veh-50_00211_00304.arrow" -s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow" +# s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow" # s3_uri = "/data/jbwang/d123/data/carla/_Rep0_routes_validation1_route0_07_23_14_33_15.arrow" # s3_uri = "/data/jbwang/d123/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow" +s3_uri = "/data/jbwang/d123/data2/kitti360_c2e_train/2013_05_28_drive_0000_sync_c2e.arrow" import pyarrow as pa import pyarrow.fs as fs @@ -34,9 +35,9 @@ if col == "lidar": continue print(f"Column : {col}, Type: {table.schema.field(col).type}") - # tokens = table[col] # 或 table.column("token") + tokens = table[col] # 或 table.column("token") # print(tokens) - # print(len(tokens)) + print(len(tokens)) # print(tokens.slice(0, 100).to_pylist()) # print(table["traffic_light_ids"]) timer.log("3. Table created") From 7110af5ef464912c1fe51a673c503f8a293a9d79 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Mon, 18 Aug 2025 12:37:43 +0800 Subject: [PATCH 007/145] finish dynamic car and static car remains some bug(start and end frame) --- .../kitti_360/kitti_360_data_converter.py | 75 ++++++++++++++---- .../kitti_360/kitti_360_helper.py | 58 +++++++++++--- .../default_dataset_conversion.yaml | 4 - d123/script/run_viser.py | 9 +-- jbwang_test2.py | 79 +++++++++++-------- 5 files changed, 159 insertions(+), 66 deletions(-) diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index 7e13b905..efc0bdf2 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -7,6 +7,7 @@ from typing import Any, Dict, Final, List, Optional, Tuple, Union import numpy as np +from collections import defaultdict import datetime import hashlib import xml.etree.ElementTree as ET @@ -69,7 +70,7 @@ DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train", } -#TODO +#TODO now only parts of labels are used KIITI360_DETECTION_NAME_DICT = { "truck": DetectionType.VEHICLE, "bus": DetectionType.VEHICLE, @@ -332,7 +333,6 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( lidar_type=LiDARType.LIDAR_TOP, lidar_index=Kitti360LidarIndex, - #TODO extrinsic needed to be same with nuplan extrinsic=extrinsic, ) return metadata @@ -345,8 +345,11 @@ def _write_recording_table( ) -> None: ts_list = _read_timestamps(log_name) - ego_state_all = _extract_ego_state_all(log_name) + #TODO + print("extracting detections...") detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list)) + print("extracting states...") + ego_state_all = _extract_ego_state_all(log_name) with pa.OSFile(str(log_file_path), "wb") as sink: with pa.ipc.new_file(sink, recording_schema) as writer: @@ -437,6 +440,7 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: oxts_path_file = oxts_path / f"{int(idx):010d}.txt" oxts_data = np.loadtxt(oxts_path_file) + #TODO check roll, pitch, yaw roll, pitch, yaw = oxts_data[3:6] vehicle_parameters = get_kitti360_station_wagon_parameters() @@ -479,7 +483,7 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: ) return ego_state_all -#TODO now only divided by data_3d_semantics +#TODO # We may distinguish between image and lidar detections # besides, now it is based only on start and end frame def _extract_detections( @@ -499,6 +503,18 @@ def _extract_detections( tree = ET.parse(bbox_3d_path) root = tree.getroot() + dynamic_groups: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list) + + lidra_data_all = [] + for index in range(ts_len): + lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{index:010d}.bin" + if not lidar_full_path.exists(): + logging.warning(f"LiDAR file not found for frame {index}: {lidar_full_path}") + continue + lidar_data = np.fromfile(lidar_full_path, dtype=np.float32) + lidar_data = lidar_data.reshape(-1, 4)[:, :3] # Keep only x, y, z coordinates + lidra_data_all.append(lidar_data) + for child in root: label = child.find('label').text if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys(): @@ -506,27 +522,57 @@ def _extract_detections( obj = KITTI360Bbox3D() obj.parseBbox(child) - # static + #static object if obj.timestamp == -1: start_frame = obj.start_frame end_frame = obj.end_frame for frame in range(start_frame, end_frame + 1): - #TODO check if valid in each frame - if frame < 0 or frame >= ts_len: - continue - #TODO check yaw + lidar_data = lidra_data_all[frame] + #TODO check yaw and box visible + # if obj.box_visible_in_point_cloud(lidar_data): detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append([0.0, 0.0, 0.0]) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[label])) - # dynamic + detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.label])) else: + ann_id = obj.annotationId + dynamic_groups[ann_id].append(obj) + + # dynamic object + for ann_id, obj_list in dynamic_groups.items(): + obj_list.sort(key=lambda obj: obj.timestamp) + num_frames = len(obj_list) + + positions = [obj.get_state_array()[:3] for obj in obj_list] + timestamps = [int(obj.timestamp) for obj in obj_list] + + velocities = [] + + for i in range(1, num_frames - 1): + dt_frames = timestamps[i+1] - timestamps[i-1] + if dt_frames > 0: + dt = dt_frames * KITTI360_DT + vel = (positions[i+1] - positions[i-1]) / dt + # Transform velocity to the ego frame + vel = obj_list[i].Rm.T @ vel + else: + vel = np.zeros(3) + velocities.append(vel) + + if num_frames > 1: + # first and last frame + velocities.insert(0, velocities[0]) + velocities.append(velocities[-1]) + elif num_frames == 1: + velocities.append(np.zeros(3)) + + for obj, vel in zip(obj_list, velocities): frame = obj.timestamp detections_states[frame].append(obj.get_state_array()) - #TODO velocity not provided - detections_velocity[frame].append([0.0, 0.0, 0.0]) + detections_velocity[frame].append(vel) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[label])) + detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.label])) + return detections_states, detections_velocity, detections_tokens, detections_types @@ -543,7 +589,6 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverter raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}") return {LiDARType.LIDAR_TOP: lidar} -#TODO check camera extrinsic now is from camera to pose def _extract_cameras( log_name: str, idx: int, data_converter_config: DataConverterConfig ) -> Dict[CameraType, Optional[str]]: diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py index c86d9604..dc1d10cf 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py @@ -47,6 +47,9 @@ def __init__(self): # name self.name = '' + + #label + self.label = '' def parseOpencvMatrix(self, node): rows = int(node.find('rows').text) @@ -75,28 +78,63 @@ def parseBbox(self, child): self.annotationId = int(child.find('index').text) + 1 + self.label = child.find('label').text + self.globalID = local2global(self.semanticId, self.instanceId) + self.parseVertices(child) + self.parse_scale_rotation() + + def parseVertices(self, child): transform = self.parseOpencvMatrix(child.find('transform')) - self.R = transform[:3,:3] - self.T = transform[:3,3] + R = transform[:3,:3] + T = transform[:3,3] + vertices = self.parseOpencvMatrix(child.find('vertices')) + + vertices = np.matmul(R, vertices.transpose()).transpose() + T + self.vertices = vertices + + self.R = R + self.T = T - def polar_decompose_rotation_scale(self): + def parse_scale_rotation(self): Rm, Sm = polar(self.R) scale = np.diag(Sm) yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) - return scale, (yaw, pitch, roll) - + self.Rm = np.array(Rm) + self.scale = scale + self.yaw = yaw + self.pitch = pitch + self.roll = roll + + # self.pose = np.eye(4, dtype=np.float64) + # self.pose[:3, :3] = self.Rm + # self.pose[:3, 3] = self.T + # self.w2e = np.linalg.inv(self.pose) + def get_state_array(self): - scale, (yaw, pitch, roll) = self.polar_decompose_rotation_scale() center = StateSE3( x=self.T[0], y=self.T[1], z=self.T[2], - roll=roll, - pitch=pitch, - yaw=yaw, + roll=self.roll, + pitch=self.pitch, + yaw=self.yaw, ) + scale = self.scale bounding_box_se3 = BoundingBoxSE3(center, scale[0], scale[1], scale[2]) - return bounding_box_se3.array \ No newline at end of file + return bounding_box_se3.array + + def box_visible_in_point_cloud(self, points): + # points: (N,3) , box: (8,3) + box = self.vertices + O, A, B, C = box[0], box[1], box[2], box[5] + OA = A - O + OB = B - O + OC = C - O + POA, POB, POC = (points @ OA[..., None])[:, 0], (points @ OB[..., None])[:, 0], (points @ OC[..., None])[:, 0] + mask = (np.dot(O, OA) < POA) & (POA < np.dot(A, OA)) & \ + (np.dot(O, OB) < POB) & (POB < np.dot(B, OB)) & \ + (np.dot(O, OC) < POC) & (POC < np.dot(C, OC)) + return True if np.sum(mask) > 100 else False \ No newline at end of file diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index 97ca3a7a..e1c76c60 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -15,12 +15,8 @@ defaults: - default_dataset_paths - _self_ - datasets: -<<<<<<< HEAD - kitti360_dataset # - nuplan_private_dataset -======= - - nuplan_private_dataset ->>>>>>> dev_v0.0.6 # - carla_dataset # - wopd_dataset diff --git a/d123/script/run_viser.py b/d123/script/run_viser.py index e977c669..faaf08ca 100644 --- a/d123/script/run_viser.py +++ b/d123/script/run_viser.py @@ -19,14 +19,11 @@ def main(cfg: DictConfig) -> None: worker = build_worker(cfg) scene_filter = build_scene_filter(cfg.scene_filter) - logger.info(f"Scene filter: {scene_filter}") - logger.info(f"Using {cfg.scene_builder}") - scene_filter.duration_s = 50 scene_builder = build_scene_builder(cfg.scene_builder) scenes = scene_builder.get_scenes(scene_filter, worker=worker) - logger.info(f"Found {len(scenes)} scenes.") - ViserVisualizationServer(scenes=scenes,scene_index=0) + + ViserVisualizationServer(scenes=scenes) if __name__ == "__main__": - main() + main() \ No newline at end of file diff --git a/jbwang_test2.py b/jbwang_test2.py index b406c52c..aa685428 100644 --- a/jbwang_test2.py +++ b/jbwang_test2.py @@ -69,36 +69,53 @@ # out = polar_decompose_rotation_scale(M) # print(out) +# import numpy as np +# path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin" +# a = np.fromfile(path, dtype=np.float32) +# a = a.reshape((-1,4)) +# print(a[10000:10010,:3]) -import glob -import os -import cv2 - -def to_video(folder_path, fps=15, downsample=2): - imgs_path = glob.glob(os.path.join(folder_path, '*png*')) - # imgs_path = sorted(imgs_path)[:19] - imgs_path = sorted(imgs_path)[:700:1] - img_array = [] - for img_path in imgs_path: - img = cv2.imread(img_path) - height, width, channel = img.shape - img = cv2.resize(img, (width // downsample, height // - downsample), interpolation=cv2.INTER_AREA) - height, width, channel = img.shape - size = (width, height) - img_array.append(img) - - # media.write_video(os.path.join(folder_path, 'video.mp4'), img_array, fps=10) - mp4_path = os.path.join("/data/jbwang/d123/video/", 'video_one_episode.mp4') - if os.path.exists(mp4_path): - os.remove(mp4_path) - out = cv2.VideoWriter( - mp4_path, - cv2.VideoWriter_fourcc(*'DIVX'), fps, size - ) - for i in range(len(img_array)): - out.write(img_array[i]) - out.release() - -to_video("/nas/datasets/KITTI-360/2013_05_28_drive_0000_sync/image_00/data_rect/") +import gc +import json +import os +from dataclasses import asdict +from functools import partial +from pathlib import Path +from typing import Any, Dict, Final, List, Optional, Tuple, Union + +import numpy as np +from collections import defaultdict +import datetime +import hashlib +import xml.etree.ElementTree as ET +import pyarrow as pa +from PIL import Image +import logging + +from d123.common.datatypes.detection.detection_types import DetectionType +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D + + +bbox_3d_path = Path("/nas/datasets/KITTI-360/data_3d_bboxes/train/2013_05_28_drive_0000_sync.xml") + +tree = ET.parse(bbox_3d_path) +root = tree.getroot() + +KIITI360_DETECTION_NAME_DICT = { + "truck": DetectionType.VEHICLE, + "bus": DetectionType.VEHICLE, + "car": DetectionType.VEHICLE, + "motorcycle": DetectionType.BICYCLE, + "bicycle": DetectionType.BICYCLE, + "pedestrian": DetectionType.PEDESTRIAN, +} + +for child in root: + label = child.find('label').text + if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys(): + continue + obj = KITTI360Bbox3D() + obj.parseBbox(child) + # print(obj.Rm) + # print(Sigma) \ No newline at end of file From 778604d4ace9cd9bbc3e27445d3a5f0449786426 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Tue, 19 Aug 2025 15:23:08 +0800 Subject: [PATCH 008/145] nearly done kitti_360 but remains some questions --- .../config/dataset_conversion/default_dataset_conversion.yaml | 2 +- d123/script/run_viser.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index d8fa5988..52915f13 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -15,11 +15,11 @@ defaults: - default_dataset_paths - _self_ - datasets: - - kitti360_dataset # - nuplan_private_dataset # - carla_dataset # - wopd_dataset # - av2_sensor_dataset + - kitti360_dataset force_log_conversion: True force_map_conversion: False diff --git a/d123/script/run_viser.py b/d123/script/run_viser.py index faaf08ca..8973acea 100644 --- a/d123/script/run_viser.py +++ b/d123/script/run_viser.py @@ -21,7 +21,7 @@ def main(cfg: DictConfig) -> None: scene_filter = build_scene_filter(cfg.scene_filter) scene_builder = build_scene_builder(cfg.scene_builder) scenes = scene_builder.get_scenes(scene_filter, worker=worker) - + ViserVisualizationServer(scenes=scenes) From 94bc3f420ab5ba7486556e8aa72e30786e25f0db Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Tue, 19 Aug 2025 15:24:35 +0800 Subject: [PATCH 009/145] nearly done kitti_360 but remains some questions --- .gitignore | 3 +- .../dataset_specific/kitti_360/jbwang_test.py | 5 +- .../kitti_360/kitti_360_data_converter.py | 121 ++++++------ .../kitti_360/kitti_360_helper.py | 14 +- docs/datasets/kitti-360.rst | 10 +- jbwang_test2.py | 27 ++- notebooks/dataset/jbwang_test.py | 14 +- notebooks/gym/jbwang_test.py | 180 ++++++++++++++++++ 8 files changed, 302 insertions(+), 72 deletions(-) create mode 100644 notebooks/gym/jbwang_test.py diff --git a/.gitignore b/.gitignore index 3a820809..426cc468 100644 --- a/.gitignore +++ b/.gitignore @@ -23,8 +23,7 @@ *.csv *.log *.mp4 -exp/* - +exp/ # Sphinx documentation docs/_build/ diff --git a/d123/dataset/dataset_specific/kitti_360/jbwang_test.py b/d123/dataset/dataset_specific/kitti_360/jbwang_test.py index 6f0bdbd9..e480783e 100644 --- a/d123/dataset/dataset_specific/kitti_360/jbwang_test.py +++ b/d123/dataset/dataset_specific/kitti_360/jbwang_test.py @@ -21,7 +21,7 @@ from sqlalchemy import func -from kitti_360_data_converter import _extract_ego_state_all,get_kitti360_lidar_metadata,_extract_cameras,_extract_detections +from kitti_360_data_converter import _extract_ego_state_all,get_kitti360_lidar_metadata,_extract_cameras,_extract_detections,_read_timestamps # a = _extract_ego_state_all("2013_05_28_drive_0000_sync") # print(a[0]) @@ -151,4 +151,5 @@ def get_cam_info_from_lidar_pc(log,log_file, lidar_pc, rolling_shutter_s=1/60): # # camera_data = _extract_camera(log_db, lidar_pc, log_path) # camera_data = get_cam_info_from_lidar_pc(log_db,log_path, lidar_pc, rolling_shutter_s=1/60) # print(_extract_cameras("2013_05_28_drive_0000_sync",0)) - _extract_detections("2013_05_28_drive_0000_sync", 0) \ No newline at end of file + # _extract_detections("2013_05_28_drive_0000_sync", 0) + print(_read_timestamps("2013_05_28_drive_0000_sync")) \ No newline at end of file diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index efc0bdf2..81057042 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -37,7 +37,7 @@ KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) -#TODO carera mismatch +#TODO cameraType KITTI360_CAMERA_TYPES = { CameraType.CAM_L0: "image_00", CameraType.CAM_R0: "image_01", @@ -55,6 +55,7 @@ DIR_CALIB = "calibration" #TODO PATH_2D_RAW_ROOT +# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW @@ -206,13 +207,12 @@ def convert_kitti360_log_to_arrow( if not log_file_path.parent.exists(): log_file_path.parent.mkdir(parents=True, exist_ok=True) - #TODO location metadata = LogMetadata( dataset="kitti360", log_name=log_name, - location="None", + location=None, timestep_seconds=KITTI360_DT, - map_has_z=False, + map_has_z=True, ) vehicle_parameters = get_kitti360_station_wagon_parameters() @@ -345,11 +345,9 @@ def _write_recording_table( ) -> None: ts_list = _read_timestamps(log_name) - #TODO - print("extracting detections...") - detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list)) - print("extracting states...") ego_state_all = _extract_ego_state_all(log_name) + ego_states_xyz = np.array([ego_state[:3] for ego_state in ego_state_all],dtype=np.float64) + detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list),ego_states_xyz) with pa.OSFile(str(log_file_path), "wb") as sink: with pa.ipc.new_file(sink, recording_schema) as writer: @@ -364,7 +362,6 @@ def _write_recording_table( "detections_type": [detections_types[idx]], "ego_states": [ego_state_all[idx]], "traffic_light_ids": [[]], - #may TODO traffic light types "traffic_light_types": [[]], "scenario_tag": [['unknown']], "route_lane_group_ids": [[]], @@ -391,36 +388,44 @@ def _write_recording_table( batch = pa.record_batch(row_data, schema=recording_schema) writer.write_batch(batch) + del batch + if SORT_BY_TIMESTAMP: recording_table = open_arrow_table(log_file_path) recording_table = recording_table.sort_by([("timestamp", "ascending")]) write_arrow_table(recording_table, log_file_path) -#TODO default timestamps and Synchronization all other sequences +#TODO Synchronization all other sequences) def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: # unix - ts_file = PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt" - if ts_file.exists(): - tps: List[TimePoint] = [] - with open(ts_file, "r") as f: - for line in f: - s = line.strip() - if not s: - continue - dt_str, ns_str = s.split('.') - dt_obj = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S") - dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc) - unix_epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) - - total_seconds = (dt_obj - unix_epoch).total_seconds() - - ns_value = int(ns_str) - us_from_ns = ns_value // 1000 - - total_us = int(total_seconds * 1_000_000) + us_from_ns - - tps.append(TimePoint.from_us(total_us)) - return tps + # default using velodyne timestamps,if not available, use camera timestamps + ts_files = [ + PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt", + PATH_2D_RAW_ROOT / log_name / "image_00" / "timestamps.txt", + PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt", + ] + for ts_file in ts_files: + if ts_file.exists(): + tps: List[TimePoint] = [] + with open(ts_file, "r") as f: + for line in f: + s = line.strip() + if not s: + continue + dt_str, ns_str = s.split('.') + dt_obj = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S") + dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc) + unix_epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + + total_seconds = (dt_obj - unix_epoch).total_seconds() + + ns_value = int(ns_str) + us_from_ns = ns_value // 1000 + + total_us = int(total_seconds * 1_000_000) + us_from_ns + + tps.append(TimePoint.from_us(total_us)) + return tps return None def _extract_ego_state_all(log_name: str) -> List[List[float]]: @@ -434,8 +439,12 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: poses_time = poses[:, 0] - 1 # Adjusting time to start from 0 #TODO + #oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data" oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data" + pose_idx = 0 + poses_time_len = len(poses_time) + for idx in range(len(list(oxts_path.glob("*.txt")))): oxts_path_file = oxts_path / f"{int(idx):010d}.txt" oxts_data = np.loadtxt(oxts_path_file) @@ -444,7 +453,10 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: roll, pitch, yaw = oxts_data[3:6] vehicle_parameters = get_kitti360_station_wagon_parameters() - pos = np.searchsorted(poses_time, idx, side='right') - 1 + while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] <= idx: + pose_idx += 1 + pos = pose_idx + # pos = np.searchsorted(poses_time, idx, side='right') - 1 rear_axle_pose = StateSE3( x=poses[pos, 4], @@ -454,7 +466,7 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: pitch=pitch, yaw=yaw, ) - # NOTE: The height to rear axle is not provided the dataset and is merely approximated. + center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) dynamic_state = DynamicStateSE3( velocity=Vector3D( @@ -483,12 +495,10 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: ) return ego_state_all -#TODO -# We may distinguish between image and lidar detections -# besides, now it is based only on start and end frame def _extract_detections( log_name: str, - ts_len: int + ts_len: int, + ego_states_xyz: np.ndarray ) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: detections_states: List[List[List[float]]] = [[] for _ in range(ts_len)] @@ -505,15 +515,16 @@ def _extract_detections( dynamic_groups: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list) - lidra_data_all = [] - for index in range(ts_len): - lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{index:010d}.bin" - if not lidar_full_path.exists(): - logging.warning(f"LiDAR file not found for frame {index}: {lidar_full_path}") - continue - lidar_data = np.fromfile(lidar_full_path, dtype=np.float32) - lidar_data = lidar_data.reshape(-1, 4)[:, :3] # Keep only x, y, z coordinates - lidra_data_all.append(lidar_data) + + # lidra_data_all = [] + # for index in range(ts_len): + # lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{index:010d}.bin" + # if not lidar_full_path.exists(): + # logging.warning(f"LiDAR file not found for frame {index}: {lidar_full_path}") + # continue + # lidar_data = np.fromfile(lidar_full_path, dtype=np.float32) + # lidar_data = lidar_data.reshape(-1, 4)[:, :3] # Keep only x, y, z coordinates + # lidra_data_all.append(lidar_data) for child in root: label = child.find('label').text @@ -524,11 +535,13 @@ def _extract_detections( #static object if obj.timestamp == -1: - start_frame = obj.start_frame - end_frame = obj.end_frame - for frame in range(start_frame, end_frame + 1): - lidar_data = lidra_data_all[frame] - #TODO check yaw and box visible + # first filter by radius + obj.filter_by_radius(ego_states_xyz,radius=50.0) + # then filter by pointcloud + for frame in obj.valid_radius_frames: + # TODO in the future, now is too slow because cpu in the server is not free + # or using config? + # lidar_data = lidra_data_all[frame] # if obj.box_visible_in_point_cloud(lidar_data): detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append([0.0, 0.0, 0.0]) @@ -553,8 +566,7 @@ def _extract_detections( if dt_frames > 0: dt = dt_frames * KITTI360_DT vel = (positions[i+1] - positions[i-1]) / dt - # Transform velocity to the ego frame - vel = obj_list[i].Rm.T @ vel + vel = KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3] @ obj_list[i].Rm.T @ vel else: vel = np.zeros(3) velocities.append(vel) @@ -573,7 +585,6 @@ def _extract_detections( detections_tokens[frame].append(str(obj.globalID)) detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.label])) - return detections_states, detections_velocity, detections_tokens, detections_types #TODO lidar extraction now only velo diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py index dc1d10cf..d4622867 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py @@ -41,6 +41,7 @@ def __init__(self): # the window that contains the bbox self.start_frame = -1 self.end_frame = -1 + self.valid_radius_frames = [] # timestamp of the bbox (-1 if statis) self.timestamp = -1 @@ -70,8 +71,8 @@ def parseBbox(self, child): self.semanticId = kittiId2label[semanticIdKITTI].id self.instanceId = int(child.find('instanceId').text) self.name = kittiId2label[semanticIdKITTI].name - - self.start_frame = int(child.find('start_frame').text) + + self.start_frame = int(child.find('start_frame').text) self.end_frame = int(child.find('end_frame').text) self.timestamp = int(child.find('timestamp').text) @@ -126,6 +127,15 @@ def get_state_array(self): return bounding_box_se3.array + def filter_by_radius(self,ego_state_xyz,radius=50.0): + # first stage of detection, used to filter out detections by radius + + for index in range(len(ego_state_xyz)): + ego_state = ego_state_xyz[index] + distance = np.linalg.norm(ego_state[:3] - self.T) + if distance <= radius: + self.valid_radius_frames.append(index) + def box_visible_in_point_cloud(self, points): # points: (N,3) , box: (8,3) box = self.vertices diff --git a/docs/datasets/kitti-360.rst b/docs/datasets/kitti-360.rst index 76100d27..5846e53b 100644 --- a/docs/datasets/kitti-360.rst +++ b/docs/datasets/kitti-360.rst @@ -7,12 +7,12 @@ KiTTI-360 :alt: Dataset sample image :width: 290px - | **Paper:** `Name of Paper `_ - | **Download:** `Documentation `_ - | **Code:** [Code] - | **Documentation:** [License type] + | **Paper:** `KITTI-360: A Novel Dataset and Benchmarks for Urban Scene Understanding in 2D and 3D `_ + | **Download:** `www.cvlibs.net/datasets/kitti-360 `_ + | **Code:** `www.github.com/autonomousvision/kitti360Scripts `_ + | **Documentation:** `kitti-360 Document`_ | **License:** [License type] - | **Duration:** [Duration here] + | **Duration:** 320k image | **Supported Versions:** [Yes/No/Conditions] | **Redistribution:** [Yes/No/Conditions] diff --git a/jbwang_test2.py b/jbwang_test2.py index aa685428..93d86a11 100644 --- a/jbwang_test2.py +++ b/jbwang_test2.py @@ -97,6 +97,7 @@ from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D +#TODO train and train_full bbox_3d_path = Path("/nas/datasets/KITTI-360/data_3d_bboxes/train/2013_05_28_drive_0000_sync.xml") tree = ET.parse(bbox_3d_path) @@ -110,12 +111,34 @@ "bicycle": DetectionType.BICYCLE, "pedestrian": DetectionType.PEDESTRIAN, } - +# x,y,z = 881.2268115,3247.493293,115.239219 +# x,y,z = 867.715474,3229.630439,115.189221 # 自车 +# x,y,z = 873.533508, 3227.16235, 115.185341 # 要找的那个人 +x,y,z = 874.233508, 3231.56235, 115.185341 # 要找的那个车 +CENTER_REF = np.array([x, y, z], dtype=np.float64) +objs_name = [] for child in root: label = child.find('label').text if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys(): continue obj = KITTI360Bbox3D() obj.parseBbox(child) + # obj.parseVertices(child) + name = child.find('label').text + # if obj.start_frame < 10030 and obj.end_frame > 10030: + center = np.array(obj.T, dtype=np.float64) + dist = np.linalg.norm(center - CENTER_REF) + if dist < 7: + print(f"Object ID: {obj.name}, Start Frame: {obj.start_frame}, End Frame: {obj.end_frame},self.annotationId: {obj.annotationId},{obj.timestamp},{obj.T}") + objs_name.append(obj.name) +print(len(objs_name)) +print(set(objs_name)) # print(obj.Rm) - # print(Sigma) \ No newline at end of file + # print(Sigma) +names = [] +for child in root: + label = child.find('label').text + if child.find('transform') is None: + continue + names.append(label) +print(set(names)) \ No newline at end of file diff --git a/notebooks/dataset/jbwang_test.py b/notebooks/dataset/jbwang_test.py index c2cabfbe..c37d8d40 100644 --- a/notebooks/dataset/jbwang_test.py +++ b/notebooks/dataset/jbwang_test.py @@ -2,7 +2,9 @@ # s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow" # s3_uri = "/data/jbwang/d123/data/carla/_Rep0_routes_validation1_route0_07_23_14_33_15.arrow" # s3_uri = "/data/jbwang/d123/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow" -s3_uri = "/data/jbwang/d123/data2/kitti360_c2e_train/2013_05_28_drive_0000_sync_c2e.arrow" +# s3_uri = "/data/jbwang/d123/data2/kitti360_c2e_train/2013_05_28_drive_0000_sync_c2e.arrow" +s3_uri = "/data/jbwang/d123/data2/kitti360_detection_all_test/2013_05_28_drive_0000_sync.arrow" + import pyarrow as pa import pyarrow.fs as fs @@ -35,10 +37,14 @@ if col == "lidar": continue print(f"Column : {col}, Type: {table.schema.field(col).type}") - tokens = table[col] # 或 table.column("token") + tokens = table["detections_velocity"] # 或 table.column("token") + # tokens = table["detections_type"] # print(tokens) - print(len(tokens)) - # print(tokens.slice(0, 100).to_pylist()) + # print(len(tokens)) + result = tokens.slice(1470, 40).to_pylist() + # for item in result: + # print(len(item)) +print(result) # print(table["traffic_light_ids"]) timer.log("3. Table created") # Save locally diff --git a/notebooks/gym/jbwang_test.py b/notebooks/gym/jbwang_test.py new file mode 100644 index 00000000..663e2899 --- /dev/null +++ b/notebooks/gym/jbwang_test.py @@ -0,0 +1,180 @@ +from d123.dataset.scene.scene_builder import ArrowSceneBuilder +from d123.dataset.scene.scene_filter import SceneFilter + +from d123.common.multithreading.worker_sequential import Sequential +# from d123.common.multithreading.worker_ray import RayDistributed + +import os, psutil + +from pathlib import Path +from typing import Optional, Tuple + +import matplotlib.animation as animation +import matplotlib.pyplot as plt +from tqdm import tqdm + +from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 +from d123.common.geometry.base import Point2D, StateSE2 +from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2 +from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG +from d123.common.visualization.matplotlib.observation import ( + add_bounding_box_to_ax, + add_box_detections_to_ax, + add_default_map_on_ax, + add_traffic_lights_to_ax, + add_ego_vehicle_to_ax, +) +from d123.dataset.arrow.conversion import TrafficLightDetectionWrapper +from d123.dataset.maps.abstract_map import AbstractMap +from d123.common.datatypes.detection.detection import BoxDetectionWrapper +from d123.dataset.scene.abstract_scene import AbstractScene +import io +from PIL import Image + + + +def _plot_scene_on_ax( + ax: plt.Axes, + map_api: AbstractMap, + ego_state: EgoStateSE2, + initial_ego_state: Optional[EgoStateSE2], + box_detections: BoxDetectionWrapper, + traffic_light_detections: TrafficLightDetectionWrapper, + radius: float = 120, +) -> plt.Axes: + + if initial_ego_state is not None: + point_2d = initial_ego_state.center.point_2d + else: + point_2d = ego_state.center.point_2d + add_default_map_on_ax(ax, map_api, point_2d, radius=radius) + add_traffic_lights_to_ax(ax, traffic_light_detections, map_api) + + add_box_detections_to_ax(ax, box_detections) + add_ego_vehicle_to_ax(ax, ego_state) + + ax.set_xlim(point_2d.x - radius, point_2d.x + radius) + ax.set_ylim(point_2d.y - radius, point_2d.y + radius) + + ax.set_aspect("equal", adjustable="box") + return ax + + +def plot_scene_to_image( + map_api: AbstractMap, + ego_state: EgoStateSE2, + initial_ego_state: Optional[EgoStateSE2], + box_detections: BoxDetectionWrapper, + traffic_light_detections: TrafficLightDetectionWrapper, + radius: float = 120, + figsize: Tuple[int, int] = (8, 8), +) -> Image: + + fig, ax = plt.subplots(figsize=figsize) + _plot_scene_on_ax(ax, map_api, ego_state, initial_ego_state, box_detections, traffic_light_detections, radius) + ax.set_aspect("equal", adjustable="box") + plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05) + # plt.tight_layout() + + buf = io.BytesIO() + fig.savefig(buf, format="png", bbox_inches="tight") + plt.close(fig) + buf.seek(0) + img = Image.open(buf) + return img + + +def print_memory_usage(): + process = psutil.Process(os.getpid()) + memory_info = process.memory_info() + print(f"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB") + + +split = "kitti360_detection_all_and_vel" +scene_tokens = None +log_names = None + +scene_filter = SceneFilter( + split_names=[split], log_names=log_names, scene_tokens=scene_tokens, duration_s=15.1, history_s=1.0 +) +scene_builder = ArrowSceneBuilder("/data/jbwang/d123/data2/") +worker = Sequential() +# worker = RayDistributed() +scenes = scene_builder.get_scenes(scene_filter, worker) + +print(len(scenes)) + +for scene in scenes[:10]: + print(scene.log_name, scene.token) + +from d123.dataset.arrow.conversion import DetectionType +from d123.simulation.gym.gym_env import GymEnvironment +from d123.simulation.observation.agents_observation import _filter_agents_by_type + +import time + +images = [] +agent_rollouts = [] +plot: bool = True +action = [1.0, -0.0] # Placeholder action, replace with actual action logic +env = GymEnvironment(scenes) + +start = time.time() + +map_api, ego_state, detection_observation, current_scene = env.reset(scenes[1460]) +initial_ego_state = ego_state +cars, _, _ = _filter_agents_by_type(detection_observation.box_detections, detection_types=[DetectionType.VEHICLE]) +agent_rollouts.append(BoxDetectionWrapper(cars)) +if plot: + images.append( + plot_scene_to_image( + map_api, + ego_state, + initial_ego_state, + detection_observation.box_detections, + detection_observation.traffic_light_detections, + ) + ) + + +for i in range(160): + ego_state, detection_observation, end = env.step(action) + cars, _, _ = _filter_agents_by_type(detection_observation.box_detections, detection_types=[DetectionType.VEHICLE]) + agent_rollouts.append(BoxDetectionWrapper(cars)) + if plot: + images.append( + plot_scene_to_image( + map_api, + ego_state, + initial_ego_state, + detection_observation.box_detections, + detection_observation.traffic_light_detections, + ) + ) + if end: + print("End of scene reached.") + break + +time_s = time.time() - start +print(time_s) +print(151/ time_s) + +import numpy as np + + +def create_gif(images, output_path, duration=100): + """ + Create a GIF from a list of PIL images. + + Args: + images (list): List of PIL.Image objects. + output_path (str): Path to save the GIF. + duration (int): Duration between frames in milliseconds. + """ + if images: + print(len(images)) + images_p = [img.convert("P", palette=Image.ADAPTIVE) for img in images] + images_p[0].save(output_path, save_all=True, append_images=images_p[1:], duration=duration, loop=0) + + +create_gif(images, f"/data/jbwang/d123/data2/{split}_{current_scene.token}.gif", duration=20) \ No newline at end of file From d1945b0c470c653f893b65026be80ef66336767d Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Thu, 21 Aug 2025 19:01:27 +0800 Subject: [PATCH 010/145] finish lidar vis and fix some bugs --- .gitignore | 2 + d123/common/datatypes/sensor/camera.py | 48 +++++++ .../vehicle_state/vehicle_parameters.py | 9 +- d123/common/visualization/viser/server.py | 10 +- d123/dataset/arrow/conversion.py | 6 +- .../kitti_360/kitti_360_data_converter.py | 124 ++++++++++-------- .../kitti_360/kitti_360_helper.py | 24 ++++ .../dataset_specific/kitti_360/labels.py | 40 ++++++ .../dataset_specific/kitti_360/load_sensor.py | 27 ++++ jbwang_test2.py | 10 +- 10 files changed, 232 insertions(+), 68 deletions(-) create mode 100644 d123/dataset/dataset_specific/kitti_360/load_sensor.py diff --git a/.gitignore b/.gitignore index 426cc468..971a12d1 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,5 @@ docs/_build/ docs/build/ _build/ .doctrees/ + +jbwang_* diff --git a/d123/common/datatypes/sensor/camera.py b/d123/common/datatypes/sensor/camera.py index 56fe6f07..c2a33d9d 100644 --- a/d123/common/datatypes/sensor/camera.py +++ b/d123/common/datatypes/sensor/camera.py @@ -104,6 +104,54 @@ def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict for camera_type, metadata in camera_metadata_dict.items() } +#TODO Code Refactoring +@dataclass +class FisheyeMEICameraMetadata: + camera_type: CameraType + width: int + height: int + mirror_parameters: int + distortion: npt.NDArray[np.float64] # k1,k2,p1,p2 + projection_parameters: npt.NDArray[np.float64] #gamma1,gamma2,u0,v0 + + def to_dict(self) -> Dict[str, Any]: + # TODO: remove None types. Only a placeholder for now. + return { + "camera_type": int(self.camera_type), + "width": self.width, + "height": self.height, + "mirror_parameters": self.mirror_parameters, + "distortion": self.distortion.tolist() if self.distortion is not None else None, + "projection_parameters": self.projection_parameters.tolist() if self.projection_parameters is not None else None, + } + + def cam2image(self, points_3d: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + ''' camera coordinate to image plane ''' + norm = np.linalg.norm(points_3d, axis=1) + + x = points_3d[:,0] / norm + y = points_3d[:,1] / norm + z = points_3d[:,2] / norm + + x /= z+self.mirror_parameters + y /= z+self.mirror_parameters + + k1 = self.distortion[0] + k2 = self.distortion[1] + gamma1 = self.projection_parameters[0] + gamma2 = self.projection_parameters[1] + u0 = self.projection_parameters[2] + v0 = self.projection_parameters[3] + + ro2 = x*x + y*y + x *= 1 + k1*ro2 + k2*ro2*ro2 + y *= 1 + k1*ro2 + k2*ro2*ro2 + + x = gamma1*x + u0 + y = gamma2*y + v0 + + return x, y, norm * points_3d[:,2] / np.abs(points_3d[:,2]) + @dataclass class Camera: diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py index 5adda6b7..21a91668 100644 --- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py @@ -60,15 +60,16 @@ def get_wopd_chrysler_pacifica_parameters() -> VehicleParameters: ) def get_kitti360_station_wagon_parameters() -> VehicleParameters: - #TODO except wheel_base, all need to be checked + #NOTE: Parameters are estimated from the vehicle model. + #https://www.cvlibs.net/datasets/kitti-360/documentation.php return VehicleParameters( vehicle_name="kitti360_station_wagon", - width=2.297, - length=5.176, + width=1.800, + length=3.500, height=1.400, wheel_base=2.710, rear_axle_to_center_vertical=0.45, - rear_axle_to_center_longitudinal=1.461, + rear_axle_to_center_longitudinal=2.71/2 + 0.05, ) def get_av2_ford_fusion_hybrid_parameters() -> VehicleParameters: diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py index cdca86c4..7511cdbc 100644 --- a/d123/common/visualization/viser/server.py +++ b/d123/common/visualization/viser/server.py @@ -33,23 +33,23 @@ LINE_WIDTH: float = 4.0 # Bounding box config: -BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "mesh" +BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "lines" # Map config: -MAP_AVAILABLE: bool = True +MAP_AVAILABLE: bool = False # Cameras config: -VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0] +# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0] # VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types -# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L, CameraType.CAM_STEREO_R] +VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L, CameraType.CAM_STEREO_R] # VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [] VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_F0] CAMERA_SCALE: float = 1.0 # Lidar config: -LIDAR_AVAILABLE: bool = False +LIDAR_AVAILABLE: bool = True LIDAR_TYPES: List[LiDARType] = [ LiDARType.LIDAR_MERGED, diff --git a/d123/dataset/arrow/conversion.py b/d123/dataset/arrow/conversion.py index 2429f56f..d9e5e664 100644 --- a/d123/dataset/arrow/conversion.py +++ b/d123/dataset/arrow/conversion.py @@ -34,7 +34,7 @@ "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", "carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs", # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor", - # "kitti360": Path(os.environ["KITTI360_DATA_ROOT"]), + "kitti360": Path(os.environ["KITTI360_DATA_ROOT"]), } @@ -155,6 +155,10 @@ def get_lidar_from_arrow_table( lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "wopd": raise NotImplementedError + elif log_metadata.dataset == "kitti360": + from d123.dataset.dataset_specific.kitti_360.load_sensor import load_kitti360_lidar_from_path + + lidar = load_kitti360_lidar_from_path(full_lidar_path, lidar_metadata) else: raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index 81057042..77f3fff0 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -1,6 +1,8 @@ import gc import json import os +import re +import yaml from dataclasses import asdict from functools import partial from pathlib import Path @@ -18,7 +20,7 @@ from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.common.datatypes.sensor.camera import CameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.common.datatypes.sensor.lidar_index import Kitti360LidarIndex from d123.common.datatypes.time.time_point import TimePoint @@ -30,18 +32,18 @@ from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION +from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label KITTI360_DT: Final[float] = 0.1 SORT_BY_TIMESTAMP: Final[bool] = True KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) -#TODO cameraType KITTI360_CAMERA_TYPES = { - CameraType.CAM_L0: "image_00", - CameraType.CAM_R0: "image_01", - # TODO fisheye camera + CameraType.CAM_STEREO_L: "image_00", + CameraType.CAM_STEREO_R: "image_01", + # TODO need code refactoring to support fisheye cameras # CameraType.CAM_L1: "image_02", # CameraType.CAM_R1: "image_03", } @@ -71,31 +73,6 @@ DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train", } -#TODO now only parts of labels are used -KIITI360_DETECTION_NAME_DICT = { - "truck": DetectionType.VEHICLE, - "bus": DetectionType.VEHICLE, - "car": DetectionType.VEHICLE, - "motorcycle": DetectionType.BICYCLE, - "bicycle": DetectionType.BICYCLE, - "pedestrian": DetectionType.PEDESTRIAN, -} - -KITTI3602NUPLAN_IMU_CALIBRATION = np.array([ - [1, 0, 0, 0], - [0, -1, 0, 0], - [0, 0, -1, 0], - [0, 0, 0, 1], - ], dtype=np.float64) - -KITTI3602NUPLAN_LIDAR_CALIBRATION = np.array([ - [0, -1, 0, 0], - [1, 0, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1], - ], dtype=np.float64) - - def create_token(input_data: str) -> str: # TODO: Refactor this function. # TODO: Add a general function to create tokens from arbitrary data. @@ -266,12 +243,12 @@ def convert_kitti360_log_to_arrow( return [] -def get_kitti360_camera_metadata() -> Dict[CameraType, CameraMetadata]: +def get_kitti360_camera_metadata() -> Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]]: persp = PATH_CALIB_ROOT / "perspective.txt" assert persp.exists() - result = {"image_00": {}, "image_01": {}} + persp_result = {"image_00": {}, "image_01": {}} with open(persp, "r") as f: lines = [ln.strip() for ln in f if ln.strip()] @@ -279,21 +256,39 @@ def get_kitti360_camera_metadata() -> Dict[CameraType, CameraMetadata]: key, value = ln.split(" ", 1) cam_id = key.split("_")[-1][:2] if key.startswith("P_rect_"): - result[f"image_{cam_id}"]["intrinsic"] = _read_projection_matrix(ln) + persp_result[f"image_{cam_id}"]["intrinsic"] = _read_projection_matrix(ln) elif key.startswith("S_rect_"): - result[f"image_{cam_id}"]["wh"] = [int(round(float(x))) for x in value.split()] + persp_result[f"image_{cam_id}"]["wh"] = [int(round(float(x))) for x in value.split()] elif key.startswith("D_"): - result[f"image_{cam_id}"]["distortion"] = [float(x) for x in value.split()] + persp_result[f"image_{cam_id}"]["distortion"] = [float(x) for x in value.split()] + + fisheye_camera02_path = PATH_CALIB_ROOT / "image_02.yaml" + fisheye_camera03_path = PATH_CALIB_ROOT / "image_03.yaml" + assert fisheye_camera02_path.exists() and fisheye_camera03_path.exists() + fisheye02 = _readYAMLFile(fisheye_camera02_path) + fisheye03 = _readYAMLFile(fisheye_camera03_path) + fisheye_result = {"image_02": fisheye02, "image_03": fisheye03} - log_cam_infos: Dict[str, CameraMetadata] = {} + log_cam_infos: Dict[str, Union[CameraMetadata, FisheyeMEICameraMetadata]] = {} for cam_type, cam_name in KITTI360_CAMERA_TYPES.items(): - log_cam_infos[cam_type] = CameraMetadata( - camera_type=cam_type, - width=result[cam_name]["wh"][0], - height=result[cam_name]["wh"][1], - intrinsic=np.array(result[cam_name]["intrinsic"]), - distortion=np.array(result[cam_name]["distortion"]), - ) + if cam_name in ["image_00", "image_01"]: + log_cam_infos[cam_type] = CameraMetadata( + camera_type=cam_type, + width=persp_result[cam_name]["wh"][0], + height=persp_result[cam_name]["wh"][1], + intrinsic=np.array(persp_result[cam_name]["intrinsic"]), + distortion=np.array(persp_result[cam_name]["distortion"]), + ) + elif cam_name in ["image_02","image_03"]: + log_cam_infos[cam_type] = FisheyeMEICameraMetadata( + camera_type=cam_type, + width=fisheye_result[cam_name]["image_width"], + height=fisheye_result[cam_name]["image_height"], + mirror_parameters=fisheye_result[cam_name]["mirror_parameters"], + distortion=np.array(fisheye_result[cam_name]["distortion_parameters"]), + projection_parameters= np.array(fisheye_result[cam_name]["projection_parameters"]), + ) + return log_cam_infos def _read_projection_matrix(p_line: str) -> np.ndarray: @@ -305,6 +300,19 @@ def _read_projection_matrix(p_line: str) -> np.ndarray: K = P[:, :3] return K +def _readYAMLFile(fileName): + '''make OpenCV YAML file compatible with python''' + ret = {} + skip_lines=1 # Skip the first line which says "%YAML:1.0". Or replace it with "%YAML 1.0" + with open(fileName) as fin: + for i in range(skip_lines): + fin.readline() + yamlFileOut = fin.read() + myRe = re.compile(r":([^ ])") # Add space after ":", if it doesn't exist. Python yaml requirement + yamlFileOut = myRe.sub(r': \1', yamlFileOut) + ret = yaml.safe_load(yamlFileOut) + return ret + def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: metadata: Dict[LiDARType, LiDARMetadata] = {} @@ -326,9 +334,7 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow)) - cam2velo = KITTI3602NUPLAN_LIDAR_CALIBRATION @ cam2velo - - extrinsic = cam2velo @ np.linalg.inv(cam2pose) + extrinsic = cam2pose @ np.linalg.inv(cam2velo) metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( lidar_type=LiDARType.LIDAR_TOP, @@ -449,14 +455,14 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: oxts_path_file = oxts_path / f"{int(idx):010d}.txt" oxts_data = np.loadtxt(oxts_path_file) - #TODO check roll, pitch, yaw + #TODO check roll, pitch, yaw again roll, pitch, yaw = oxts_data[3:6] vehicle_parameters = get_kitti360_station_wagon_parameters() - while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] <= idx: + while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < idx: pose_idx += 1 pos = pose_idx - # pos = np.searchsorted(poses_time, idx, side='right') - 1 + # pos = np.searchsorted(pwwwoses_time, idx, side='right') - 1 rear_axle_pose = StateSE3( x=poses[pos, 4], @@ -527,8 +533,9 @@ def _extract_detections( # lidra_data_all.append(lidar_data) for child in root: - label = child.find('label').text - if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys(): + semanticIdKITTI = int(child.find('semanticId').text) + name = kittiId2label[semanticIdKITTI].name + if child.find('transform') is None or name not in KIITI360_DETECTION_NAME_DICT.keys(): continue obj = KITTI360Bbox3D() obj.parseBbox(child) @@ -546,7 +553,7 @@ def _extract_detections( detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append([0.0, 0.0, 0.0]) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.label])) + detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.name])) else: ann_id = obj.annotationId dynamic_groups[ann_id].append(obj) @@ -583,7 +590,7 @@ def _extract_detections( detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append(vel) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.label])) + detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.name])) return detections_states, detections_velocity, detections_tokens, detections_types @@ -593,7 +600,7 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverter lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" if lidar_full_path.exists(): if data_converter_config.lidar_store_option == "path": - lidar = f"/data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin" + lidar = f"data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin" elif data_converter_config.lidar_store_option == "binary": raise NotImplementedError("Binary lidar storage is not implemented.") else: @@ -606,9 +613,12 @@ def _extract_cameras( camera_dict: Dict[str, Union[str, bytes]] = {} for camera_type, cam_dir_name in KITTI360_CAMERA_TYPES.items(): - img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rect" / f"{idx:010d}.png" + if cam_dir_name in ["image_00", "image_01"]: + img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rect" / f"{idx:010d}.png" + elif cam_dir_name in ["image_02", "image_03"]: + img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rgb" / f"{idx:010d}.png" + if img_path_png.exists(): - cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" if not cam2pose_txt.exists(): raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py index d4622867..5c69264f 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py @@ -7,11 +7,33 @@ from d123.common.geometry.base import StateSE3 from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3 +from d123.common.geometry.transform.se3 import get_rotation_matrix from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label DEFAULT_ROLL = 0.0 DEFAULT_PITCH = 0.0 +addtional_calibration = get_rotation_matrix( + StateSE3( + x=0.0, + y=0.0, + z=0.0, + roll=np.deg2rad(1.0), + pitch=np.deg2rad(1.0), + yaw=np.deg2rad(0.0), + ) + ) + +kitti3602nuplan_imu_calibration_ideal = np.array([ + [1, 0, 0, 0], + [0, -1, 0, 0], + [0, 0, -1, 0], + [0, 0, 0, 1], + ], dtype=np.float64) + +KITTI3602NUPLAN_IMU_CALIBRATION = np.eye(4, dtype=np.float64) +KITTI3602NUPLAN_IMU_CALIBRATION[:3, :3] = addtional_calibration @ kitti3602nuplan_imu_calibration_ideal[:3, :3] + MAX_N = 1000 def local2global(semanticId, instanceId): globalId = semanticId*MAX_N + instanceId @@ -99,6 +121,8 @@ def parseVertices(self, child): def parse_scale_rotation(self): Rm, Sm = polar(self.R) + if np.linalg.det(Rm) < 0: + Rm[0] = -Rm[0] scale = np.diag(Sm) yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) diff --git a/d123/dataset/dataset_specific/kitti_360/labels.py b/d123/dataset/dataset_specific/kitti_360/labels.py index 38f8a91c..de24f152 100644 --- a/d123/dataset/dataset_specific/kitti_360/labels.py +++ b/d123/dataset/dataset_specific/kitti_360/labels.py @@ -166,3 +166,43 @@ def assureSingleInstanceName( name ): return None # all good then return name + +from d123.common.datatypes.detection.detection_types import DetectionType + +KIITI360_DETECTION_NAME_DICT = { + "traffic light": DetectionType.SIGN, + "traffic sign": DetectionType.SIGN, + "person": DetectionType.PEDESTRIAN, + "rider": DetectionType.BICYCLE, + "car": DetectionType.VEHICLE, + "truck": DetectionType.VEHICLE, + "bus": DetectionType.VEHICLE, + "caravan": DetectionType.VEHICLE, + "trailer": DetectionType.VEHICLE, + "train": DetectionType.VEHICLE, + "motorcycle": DetectionType.BICYCLE, + "bicycle": DetectionType.BICYCLE, + "stop": DetectionType.SIGN, +} + +# KIITI360_DETECTION_NAME_DICT = { +# "pole": DetectionType.GENERIC_OBJECT, +# "traffic light": DetectionType.SIGN, +# "traffic sign": DetectionType.SIGN, +# "person": DetectionType.PEDESTRIAN, +# "rider": DetectionType.BICYCLE, +# "car": DetectionType.VEHICLE, +# "truck": DetectionType.VEHICLE, +# "bus": DetectionType.VEHICLE, +# "caravan": DetectionType.VEHICLE, +# "trailer": DetectionType.VEHICLE, +# "train": DetectionType.VEHICLE, +# "motorcycle": DetectionType.BICYCLE, +# "bicycle": DetectionType.BICYCLE, +# "stop": DetectionType.SIGN, +# "smallpole": DetectionType.GENERIC_OBJECT, +# "lamp": DetectionType.GENERIC_OBJECT, +# "trash bin": DetectionType.GENERIC_OBJECT, +# "vending machine": DetectionType.GENERIC_OBJECT, +# "box": DetectionType.GENERIC_OBJECT, +# } diff --git a/d123/dataset/dataset_specific/kitti_360/load_sensor.py b/d123/dataset/dataset_specific/kitti_360/load_sensor.py new file mode 100644 index 00000000..2a23401f --- /dev/null +++ b/d123/dataset/dataset_specific/kitti_360/load_sensor.py @@ -0,0 +1,27 @@ +from pathlib import Path + +import numpy as np + +from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata + + +def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: + assert filepath.exists(), f"LiDAR file not found: {filepath}" + pcd = np.fromfile(filepath, dtype=np.float32) + pcd = np.reshape(pcd,[-1,4]) # [N,4] + + xyz = pcd[:, :3] + intensity = pcd[:, 3] + + ones = np.ones((xyz.shape[0], 1), dtype=pcd.dtype) + points_h = np.concatenate([xyz, ones], axis=1) #[N,4] + + transformed_h = lidar_metadata.extrinsic @ points_h.T #[4,N] + + transformed_xyz = transformed_h[:3, :] # (3,N) + + intensity_row = intensity[np.newaxis, :] # (1,N) + + point_cloud_4xN = np.vstack([transformed_xyz, intensity_row]).astype(np.float32) # (4,N) + + return LiDAR(metadata=lidar_metadata, point_cloud=point_cloud_4xN) diff --git a/jbwang_test2.py b/jbwang_test2.py index 93d86a11..7128a636 100644 --- a/jbwang_test2.py +++ b/jbwang_test2.py @@ -117,14 +117,21 @@ x,y,z = 874.233508, 3231.56235, 115.185341 # 要找的那个车 CENTER_REF = np.array([x, y, z], dtype=np.float64) objs_name = [] +lable_name = [] for child in root: label = child.find('label').text - if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys(): + # if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys(): + # continue + + if child.find('transform') is None: continue + print("this label is ",label) + print("!!!!!!!!!!!!!!!!!!!") obj = KITTI360Bbox3D() obj.parseBbox(child) # obj.parseVertices(child) name = child.find('label').text + lable_name.append(name) # if obj.start_frame < 10030 and obj.end_frame > 10030: center = np.array(obj.T, dtype=np.float64) dist = np.linalg.norm(center - CENTER_REF) @@ -133,6 +140,7 @@ objs_name.append(obj.name) print(len(objs_name)) print(set(objs_name)) +print(set(lable_name)) # print(obj.Rm) # print(Sigma) names = [] From 571282886c2ab75b2b8f439627a21978d9100ab8 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sun, 24 Aug 2025 18:06:24 +0200 Subject: [PATCH 011/145] Adding/testing autodocstrings for geometry documentation. --- docs/conf.py | 13 +++++++++++++ docs/datasets/nuplan.rst | 2 +- docs/geometry.rst | 13 +++++++++++++ docs/index.rst | 1 + pyproject.toml | 1 + 5 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 docs/geometry.rst diff --git a/docs/conf.py b/docs/conf.py index 9d7e8c5f..f114d331 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,6 +20,8 @@ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", + "sphinx.ext.napoleon", + "sphinx_copybutton", "myst_parser", ] @@ -66,3 +68,14 @@ "includehidden": True, "titles_only": False, } + +autodoc_typehints = "both" +autodoc_class_signature = "separated" +autodoc_default_options = { + "members": True, + "member-order": "bysource", + "undoc-members": True, + "inherited-members": True, + "exclude-members": "__init__, __post_init__", + "imported-members": True, +} diff --git a/docs/datasets/nuplan.rst b/docs/datasets/nuplan.rst index 065c26a5..c1590f03 100644 --- a/docs/datasets/nuplan.rst +++ b/docs/datasets/nuplan.rst @@ -1,5 +1,5 @@ nuPlan ------ +------ .. sidebar:: nuPlan diff --git a/docs/geometry.rst b/docs/geometry.rst new file mode 100644 index 00000000..263b7db0 --- /dev/null +++ b/docs/geometry.rst @@ -0,0 +1,13 @@ + +Geometry +======== + +.. autoclass:: d123.common.geometry.base.Point2D() + +.. autoclass:: d123.common.geometry.base.Point3D() + +.. autoclass:: d123.common.geometry.base.StateSE2() + +.. autoclass:: d123.common.geometry.base.StateSE3() + +.. autoclass:: d123.dataset.maps.abstract_map.AbstractMap() diff --git a/docs/index.rst b/docs/index.rst index b169f012..c923847f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -16,6 +16,7 @@ documentation for details. :caption: Contents: installation + geometry datasets/index schema visualization diff --git a/pyproject.toml b/pyproject.toml index d5419010..ad02829a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,6 +76,7 @@ docs = [ "sphinx-rtd-theme", "sphinx-autobuild", "myst-parser", + "sphinx-copybutton", ] nuplan = [ "nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2", From f991513c9f1dc3d02a5da46bd70df733c6446894 Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Sun, 24 Aug 2025 19:36:59 +0200 Subject: [PATCH 012/145] Move `geometry` out of `common` (#39) --- d123/common/datatypes/detection/detection.py | 8 ++++---- d123/common/datatypes/vehicle_state/ego_state.py | 6 +++--- .../vehicle_state/vehicle_parameters.py | 8 ++++---- .../visualization/matplotlib/camera copy.py | 6 +++--- d123/common/visualization/matplotlib/camera.py | 4 ++-- .../visualization/matplotlib/observation.py | 6 +++--- d123/common/visualization/matplotlib/utils.py | 2 +- d123/common/visualization/viser/utils.py | 10 +++++----- d123/common/visualization/viser/utils_v2.py | 13 +++++++------ d123/dataset/arrow/conversion.py | 4 ++-- .../conversion/map/opendrive/parser/geometry.py | 2 +- .../conversion/map/opendrive/parser/reference.py | 2 +- .../map/opendrive/utils/lane_helper.py | 4 ++-- .../map/opendrive/utils/objects_helper.py | 6 +++--- .../map/road_edge/road_edge_3d_utils.py | 4 ++-- .../dataset_specific/av2/av2_data_converter.py | 10 +++++----- .../dataset_specific/av2/av2_map_conversion.py | 8 ++++---- .../carla/carla_data_converter.py | 6 +++--- .../nuplan/nuplan_data_converter.py | 8 ++++---- .../wopd/waymo_map_utils/womp_boundary_utils.py | 12 ++++++------ .../wopd/waymo_map_utils/wopd_map_utils.py | 6 +++--- .../dataset_specific/wopd/wopd_data_converter.py | 10 +++++----- d123/dataset/maps/abstract_map.py | 2 +- d123/dataset/maps/abstract_map_objects.py | 2 +- d123/dataset/maps/gpkg/gpkg_map.py | 2 +- d123/dataset/maps/gpkg/gpkg_map_objects.py | 4 ++-- d123/{common => }/geometry/__init__.py | 0 d123/{common => }/geometry/base.py | 2 +- d123/{common => }/geometry/base_index.py | 0 .../geometry/bounding_box/__init__.py | 0 .../geometry/bounding_box/bounding_box.py | 6 +++--- .../geometry/bounding_box/bounding_box_index.py | 0 d123/{common => }/geometry/bounding_box/utils.py | 4 ++-- d123/{common => }/geometry/constants.py | 0 d123/{common => }/geometry/line/__init__.py | 0 d123/{common => }/geometry/line/helper.py | 2 +- d123/{common => }/geometry/line/polylines.py | 8 ++++---- d123/{common => }/geometry/occupancy_map.py | 0 d123/{common => }/geometry/transform/__init__.py | 0 d123/{common => }/geometry/transform/rotation.py | 4 ++-- .../{common => }/geometry/transform/se2_array.py | 4 ++-- d123/{common => }/geometry/transform/se3.py | 4 ++-- .../geometry/transform/tranform_2d.py | 4 ++-- d123/{common => }/geometry/units.py | 0 d123/{common => }/geometry/utils.py | 0 d123/{common => }/geometry/vector.py | 2 +- .../config/datasets/av2_sensor_dataset.yaml | 4 ++-- .../agents/constant_velocity_agents.py | 6 +++--- d123/simulation/agents/idm_agents.py | 10 +++++----- d123/simulation/agents/path_following.py | 8 ++++---- d123/simulation/agents/smart_agents.py | 8 ++++---- .../motion_model/kinematic_bicycle_model.py | 4 ++-- .../gym_observation/raster/raster_renderer.py | 8 ++++---- .../gym/environment/helper/environment_area.py | 6 +++--- .../gym/environment/helper/environment_cache.py | 4 ++-- d123/simulation/gym/gym_env.py | 2 +- .../metrics/sim_agents/interaction_based.py | 4 ++-- d123/simulation/metrics/sim_agents/kinematics.py | 2 +- d123/simulation/metrics/sim_agents/map_based.py | 8 ++++---- d123/simulation/metrics/sim_agents/sim_agents.py | 2 +- d123/simulation/metrics/sim_agents/utils.py | 2 +- .../planner_output/action_planner_output.py | 2 +- .../feature_builder/smart_feature_builder.py | 8 ++++---- notebooks/av2/delete_me.ipynb | 4 ++-- notebooks/av2/delete_me_map.ipynb | 8 ++++---- notebooks/deprecated/test_scene_builder.ipynb | 4 ++-- notebooks/gym/test_gym.ipynb | 4 ++-- notebooks/scene_rendering.ipynb | 6 +++--- notebooks/smarty/smart_rollout.ipynb | 2 +- notebooks/viz/bev_matplotlib.ipynb | 14 +++++++------- notebooks/viz/bev_matplotlib_prediction.ipynb | 2 +- notebooks/waymo_perception/lidar_testing.ipynb | 4 ++-- notebooks/waymo_perception/map_testing.ipynb | 16 ++++++++-------- notebooks/waymo_perception/testing.ipynb | 4 ++-- 74 files changed, 176 insertions(+), 175 deletions(-) rename d123/{common => }/geometry/__init__.py (100%) rename d123/{common => }/geometry/base.py (99%) rename d123/{common => }/geometry/base_index.py (100%) rename d123/{common => }/geometry/bounding_box/__init__.py (100%) rename d123/{common => }/geometry/bounding_box/bounding_box.py (91%) rename d123/{common => }/geometry/bounding_box/bounding_box_index.py (100%) rename d123/{common => }/geometry/bounding_box/utils.py (93%) rename d123/{common => }/geometry/constants.py (100%) rename d123/{common => }/geometry/line/__init__.py (100%) rename d123/{common => }/geometry/line/helper.py (96%) rename d123/{common => }/geometry/line/polylines.py (96%) rename d123/{common => }/geometry/occupancy_map.py (100%) rename d123/{common => }/geometry/transform/__init__.py (100%) rename d123/{common => }/geometry/transform/rotation.py (86%) rename d123/{common => }/geometry/transform/se2_array.py (97%) rename d123/{common => }/geometry/transform/se3.py (98%) rename d123/{common => }/geometry/transform/tranform_2d.py (88%) rename d123/{common => }/geometry/units.py (100%) rename d123/{common => }/geometry/utils.py (100%) rename d123/{common => }/geometry/vector.py (96%) diff --git a/d123/common/datatypes/detection/detection.py b/d123/common/datatypes/detection/detection.py index a836fd4f..6245892f 100644 --- a/d123/common/datatypes/detection/detection.py +++ b/d123/common/datatypes/detection/detection.py @@ -6,11 +6,11 @@ from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.time.time_point import TimePoint -from d123.common.geometry.base import StateSE2, StateSE3 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3 -from d123.common.geometry.occupancy_map import OccupancyMap2D -from d123.common.geometry.vector import Vector2D, Vector3D from d123.common.utils.enums import SerialIntEnum +from d123.geometry.base import StateSE2, StateSE3 +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3 +from d123.geometry.occupancy_map import OccupancyMap2D +from d123.geometry.vector import Vector2D, Vector3D @dataclass diff --git a/d123/common/datatypes/vehicle_state/ego_state.py b/d123/common/datatypes/vehicle_state/ego_state.py index 9ee67593..0fdf236e 100644 --- a/d123/common/datatypes/vehicle_state/ego_state.py +++ b/d123/common/datatypes/vehicle_state/ego_state.py @@ -22,10 +22,10 @@ rear_axle_se2_to_center_se2, rear_axle_se3_to_center_se3, ) -from d123.common.geometry.base import StateSE2, StateSE3 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3 -from d123.common.geometry.vector import Vector2D, Vector3D from d123.common.utils.enums import classproperty +from d123.geometry.base import StateSE2, StateSE3 +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3 +from d123.geometry.vector import Vector2D, Vector3D # TODO: Find an appropriate way to handle SE2 and SE3 states. diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py index 3b4f04b9..0cc14b7d 100644 --- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py @@ -1,7 +1,7 @@ -from d123.common.geometry.base import StateSE2, StateSE3, dataclass -from d123.common.geometry.transform.se3 import translate_se3_along_x, translate_se3_along_z -from d123.common.geometry.transform.tranform_2d import translate_along_yaw -from d123.common.geometry.vector import Vector2D +from d123.geometry.base import StateSE2, StateSE3, dataclass +from d123.geometry.transform.se3 import translate_se3_along_x, translate_se3_along_z +from d123.geometry.transform.tranform_2d import translate_along_yaw +from d123.geometry.vector import Vector2D # TODO: Add more vehicle parameters, potentially extend the parameters. diff --git a/d123/common/visualization/matplotlib/camera copy.py b/d123/common/visualization/matplotlib/camera copy.py index ea758366..b44e387b 100644 --- a/d123/common/visualization/matplotlib/camera copy.py +++ b/d123/common/visualization/matplotlib/camera copy.py @@ -14,10 +14,10 @@ from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.sensor.camera import Camera from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.common.geometry.base import StateSE3 -from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex -from d123.common.geometry.transform.se3 import convert_absolute_to_relative_se3_array, get_rotation_matrix from d123.common.visualization.color.default import BOX_DETECTION_CONFIG +from d123.geometry.base import StateSE3 +from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex +from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array, get_rotation_matrix # from navsim.common.dataclasses import Annotations, Camera, Lidar # from navsim.common.enums import BoundingBoxIndex, LidarIndex diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py index 52d873ae..d8412731 100644 --- a/d123/common/visualization/matplotlib/camera.py +++ b/d123/common/visualization/matplotlib/camera.py @@ -14,9 +14,9 @@ from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.sensor.camera import Camera from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex -from d123.common.geometry.transform.se3 import convert_absolute_to_relative_se3_array from d123.common.visualization.color.default import BOX_DETECTION_CONFIG +from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex +from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array # from navsim.common.dataclasses import Annotations, Camera, Lidar # from navsim.common.enums import BoundingBoxIndex, LidarIndex diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py index 7e225cc7..33bea28f 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/d123/common/visualization/matplotlib/observation.py @@ -7,9 +7,6 @@ from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3 -from d123.common.geometry.base import Point2D -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3 -from d123.common.geometry.transform.tranform_2d import translate_along_yaw from d123.common.visualization.color.config import PlotConfig from d123.common.visualization.color.default import ( BOX_DETECTION_CONFIG, @@ -29,6 +26,9 @@ from d123.dataset.maps.abstract_map_objects import AbstractLane from d123.dataset.maps.map_datatypes import MapLayer from d123.dataset.scene.abstract_scene import AbstractScene +from d123.geometry.base import Point2D +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3 +from d123.geometry.transform.tranform_2d import translate_along_yaw def add_default_map_on_ax( diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py index 144530ab..4beff462 100644 --- a/d123/common/visualization/matplotlib/utils.py +++ b/d123/common/visualization/matplotlib/utils.py @@ -8,8 +8,8 @@ import shapely.geometry as geom from matplotlib.path import Path -from d123.common.geometry.base import StateSE2, StateSE3 from d123.common.visualization.color.config import PlotConfig +from d123.geometry.base import StateSE2, StateSE3 def add_shapely_polygon_to_ax( diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index 561c3676..d03758ff 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -9,16 +9,16 @@ # from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters from d123.common.datatypes.sensor.camera import Camera, CameraType from d123.common.datatypes.sensor.lidar import LiDARType -from d123.common.geometry.base import Point3D, StateSE3 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3 -from d123.common.geometry.line.polylines import Polyline3D -from d123.common.geometry.transform.se3 import convert_relative_to_absolute_points_3d_array from d123.common.visualization.color.color import TAB_10, Color from d123.common.visualization.color.config import PlotConfig from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG, MAP_SURFACE_CONFIG from d123.dataset.maps.abstract_map import MapLayer from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject from d123.dataset.scene.abstract_scene import AbstractScene +from d123.geometry.base import Point3D, StateSE3 +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3 +from d123.geometry.line.polylines import Polyline3D +from d123.geometry.transform.se3 import convert_relative_to_absolute_points_3d_array # TODO: Refactor this file. # TODO: Add general utilities for 3D primitives and mesh support. @@ -233,7 +233,7 @@ def get_camera_values( camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame # Get the rotation matrix of the rear axle pose - from d123.common.geometry.transform.se3 import get_rotation_matrix + from d123.geometry.transform.se3 import get_rotation_matrix ego_transform = np.eye(4, dtype=np.float64) ego_transform[:3, :3] = get_rotation_matrix(rear_axle) diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py index 5db06ab8..f62a48f1 100644 --- a/d123/common/visualization/viser/utils_v2.py +++ b/d123/common/visualization/viser/utils_v2.py @@ -1,16 +1,17 @@ import numpy as np import numpy.typing as npt -# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters -from d123.common.geometry.base import Point3D, Point3DIndex -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3 -from d123.common.geometry.bounding_box.bounding_box_index import Corners3DIndex -from d123.common.geometry.transform.se3 import translate_body_frame -from d123.common.geometry.vector import Vector3D from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG from d123.common.visualization.viser.utils import BRIGHTNESS_FACTOR from d123.dataset.scene.abstract_scene import AbstractScene +# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters +from d123.geometry.base import Point3D, Point3DIndex +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3 +from d123.geometry.bounding_box.bounding_box_index import Corners3DIndex +from d123.geometry.transform.se3 import translate_body_frame +from d123.geometry.vector import Vector3D + # TODO: Refactor this file. # TODO: Add general utilities for 3D primitives and mesh support. diff --git a/d123/dataset/arrow/conversion.py b/d123/dataset/arrow/conversion.py index 68251f82..56b4b33b 100644 --- a/d123/dataset/arrow/conversion.py +++ b/d123/dataset/arrow/conversion.py @@ -25,10 +25,10 @@ from d123.common.datatypes.time.time_point import TimePoint from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.common.datatypes.vehicle_state.vehicle_parameters import VehicleParameters -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3 -from d123.common.geometry.vector import Vector3D from d123.dataset.logs.log_metadata import LogMetadata from d123.dataset.maps.abstract_map import List +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3 +from d123.geometry.vector import Vector3D DATASET_SENSOR_ROOT: Dict[str, Path] = { "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", diff --git a/d123/dataset/conversion/map/opendrive/parser/geometry.py b/d123/dataset/conversion/map/opendrive/parser/geometry.py index c1f337c0..4aabd57c 100644 --- a/d123/dataset/conversion/map/opendrive/parser/geometry.py +++ b/d123/dataset/conversion/map/opendrive/parser/geometry.py @@ -8,7 +8,7 @@ import numpy.typing as npt from scipy.special import fresnel -from d123.common.geometry.base import StateSE2Index +from d123.geometry.base import StateSE2Index @dataclass diff --git a/d123/dataset/conversion/map/opendrive/parser/reference.py b/d123/dataset/conversion/map/opendrive/parser/reference.py index b4c88dc1..d8ce3b7b 100644 --- a/d123/dataset/conversion/map/opendrive/parser/reference.py +++ b/d123/dataset/conversion/map/opendrive/parser/reference.py @@ -9,11 +9,11 @@ import numpy as np import numpy.typing as npt -from d123.common.geometry.base import Point3DIndex, StateSE2Index from d123.dataset.conversion.map.opendrive.parser.elevation import Elevation from d123.dataset.conversion.map.opendrive.parser.geometry import Arc, Geometry, Line, Spiral from d123.dataset.conversion.map.opendrive.parser.lane import LaneOffset, Width from d123.dataset.conversion.map.opendrive.parser.polynomial import Polynomial +from d123.geometry.base import Point3DIndex, StateSE2Index TOLERANCE: Final[float] = 1e-3 diff --git a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py index 79e0f0c1..b6c26131 100644 --- a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py +++ b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py @@ -6,8 +6,6 @@ import numpy.typing as npt import shapely -from d123.common.geometry.base import StateSE2Index -from d123.common.geometry.units import kmph_to_mps, mph_to_mps from d123.dataset.conversion.map.opendrive.parser.lane import Lane, LaneSection from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine from d123.dataset.conversion.map.opendrive.parser.road import RoadType @@ -16,6 +14,8 @@ derive_lane_id, lane_group_id_from_lane_id, ) +from d123.geometry.base import StateSE2Index +from d123.geometry.units import kmph_to_mps, mph_to_mps @dataclass diff --git a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py b/d123/dataset/conversion/map/opendrive/utils/objects_helper.py index 3cbb569e..da85ba5e 100644 --- a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py +++ b/d123/dataset/conversion/map/opendrive/utils/objects_helper.py @@ -5,11 +5,11 @@ import numpy.typing as npt import shapely -from d123.common.geometry.base import Point2D, Point3D, Point3DIndex, StateSE2 -from d123.common.geometry.transform.tranform_2d import translate_along_yaw -from d123.common.geometry.utils import normalize_angle from d123.dataset.conversion.map.opendrive.parser.objects import Object from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine +from d123.geometry.base import Point2D, Point3D, Point3DIndex, StateSE2 +from d123.geometry.transform.tranform_2d import translate_along_yaw +from d123.geometry.utils import normalize_angle # TODO: make naming consistent with group_collections.py diff --git a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py b/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py index 93a34526..b2fa42e9 100644 --- a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py +++ b/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py @@ -9,9 +9,9 @@ import shapely from shapely.geometry import LineString -from d123.common.geometry.base import Point3DIndex -from d123.common.geometry.occupancy_map import OccupancyMap2D from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings +from d123.geometry.base import Point3DIndex +from d123.geometry.occupancy_map import OccupancyMap2D logger = logging.getLogger(__name__) diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py index 0ff84a7e..25b7bec6 100644 --- a/d123/dataset/dataset_specific/av2/av2_data_converter.py +++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py @@ -19,11 +19,6 @@ get_av2_ford_fusion_hybrid_parameters, rear_axle_se3_to_center_se3, ) -from d123.common.geometry.base import StateSE3 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index -from d123.common.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL -from d123.common.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix -from d123.common.geometry.vector import Vector3D, Vector3DIndex from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.dataset.dataset_specific.av2.av2_constants import ( AV2_CAMERA_TYPE_MAPPING, @@ -39,6 +34,11 @@ from d123.dataset.dataset_specific.av2.av2_map_conversion import convert_av2_map from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata +from d123.geometry.base import StateSE3 +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index +from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix +from d123.geometry.vector import Vector3D, Vector3DIndex def create_token(input_data: str) -> str: diff --git a/d123/dataset/dataset_specific/av2/av2_map_conversion.py b/d123/dataset/dataset_specific/av2/av2_map_conversion.py index 390cbdb2..7fa53fad 100644 --- a/d123/dataset/dataset_specific/av2/av2_map_conversion.py +++ b/d123/dataset/dataset_specific/av2/av2_map_conversion.py @@ -1,3 +1,4 @@ +import json import warnings from pathlib import Path from typing import Any, Dict, Final, List @@ -8,17 +9,16 @@ import pandas as pd import shapely import shapely.geometry as geom -from flask import json -from d123.common.geometry.base import Point3DIndex -from d123.common.geometry.line.polylines import Polyline2D, Polyline3D -from d123.common.geometry.occupancy_map import OccupancyMap2D from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length from d123.dataset.conversion.map.road_edge.road_edge_3d_utils import ( get_road_edges_3d_from_generic_drivable_area_df, ) from d123.dataset.dataset_specific.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType +from d123.geometry.base import Point3DIndex +from d123.geometry.line.polylines import Polyline2D, Polyline3D +from d123.geometry.occupancy_map import OccupancyMap2D LANE_GROUP_MARK_TYPES: List[str] = [ "DASHED_WHITE", diff --git a/d123/dataset/dataset_specific/carla/carla_data_converter.py b/d123/dataset/dataset_specific/carla/carla_data_converter.py index f598bb1e..1e360007 100644 --- a/d123/dataset/dataset_specific/carla/carla_data_converter.py +++ b/d123/dataset/dataset_specific/carla/carla_data_converter.py @@ -16,9 +16,6 @@ from d123.common.datatypes.sensor.lidar_index import CarlaLidarIndex from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index from d123.common.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters -from d123.common.geometry.base import Point2D, Point3D -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index -from d123.common.geometry.vector import Vector3DIndex from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.conversion.map.opendrive.opendrive_map_conversion import convert_from_xodr @@ -27,6 +24,9 @@ from d123.dataset.maps.abstract_map import AbstractMap, MapLayer from d123.dataset.maps.abstract_map_objects import AbstractLane from d123.dataset.scene.arrow_scene import get_map_api_from_names +from d123.geometry.base import Point2D, Point3D +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index +from d123.geometry.vector import Vector3DIndex AVAILABLE_CARLA_MAP_LOCATIONS: Final[List[str]] = [ "Town01", # A small, simple town with a river and several bridges. diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py index 23a4d702..c65845fb 100644 --- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py +++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py @@ -31,15 +31,15 @@ get_nuplan_chrysler_pacifica_parameters, rear_axle_se3_to_center_se3, ) -from d123.common.geometry.base import StateSE3 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3, BoundingBoxSE3Index -from d123.common.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL -from d123.common.geometry.vector import Vector3D, Vector3DIndex from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata +from d123.geometry.base import StateSE3 +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3, BoundingBoxSE3Index +from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry.vector import Vector3D, Vector3DIndex TARGET_DT: Final[float] = 0.1 NUPLAN_DT: Final[float] = 0.05 diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py index a2ba1ee2..4c251252 100644 --- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py +++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py @@ -5,12 +5,12 @@ import numpy.typing as npt import shapely.geometry as geom -from d123.common.geometry.base import Point3D, StateSE2 -from d123.common.geometry.line.polylines import Polyline3D, PolylineSE2 -from d123.common.geometry.occupancy_map import OccupancyMap2D -from d123.common.geometry.transform.tranform_2d import translate_along_yaw -from d123.common.geometry.utils import normalize_angle -from d123.common.geometry.vector import Vector2D +from d123.geometry.base import Point3D, StateSE2 +from d123.geometry.line.polylines import Polyline3D, PolylineSE2 +from d123.geometry.occupancy_map import OccupancyMap2D +from d123.geometry.transform.tranform_2d import translate_along_yaw +from d123.geometry.utils import normalize_angle +from d123.geometry.vector import Vector2D MAX_LANE_WIDTH = 25.0 # meters MIN_LANE_WIDTH = 2.0 diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py index 6d3ef7ab..20a2be00 100644 --- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py +++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py @@ -9,11 +9,11 @@ import shapely.geometry as geom from waymo_open_dataset import dataset_pb2 -from d123.common.geometry.base import Point3DIndex -from d123.common.geometry.line.polylines import Polyline3D -from d123.common.geometry.units import mph_to_mps from d123.dataset.dataset_specific.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.geometry.base import Point3DIndex +from d123.geometry.line.polylines import Polyline3D +from d123.geometry.units import mph_to_mps # TODO: # - Implement stop signs diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py index f1145c8e..77f19ead 100644 --- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py +++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py @@ -21,17 +21,17 @@ from d123.common.datatypes.sensor.lidar_index import WopdLidarIndex from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.common.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters -from d123.common.geometry.base import Point3D, StateSE3 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index -from d123.common.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL -from d123.common.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix -from d123.common.geometry.vector import Vector3D, Vector3DIndex from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection from d123.dataset.logs.log_metadata import LogMetadata +from d123.geometry.base import Point3D, StateSE3 +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index +from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix +from d123.geometry.vector import Vector3D, Vector3DIndex os.environ["CUDA_VISIBLE_DEVICES"] = "-1" D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) diff --git a/d123/dataset/maps/abstract_map.py b/d123/dataset/maps/abstract_map.py index 681c90cf..f8c547e7 100644 --- a/d123/dataset/maps/abstract_map.py +++ b/d123/dataset/maps/abstract_map.py @@ -5,9 +5,9 @@ import shapely -from d123.common.geometry.base import Point2D from d123.dataset.maps.abstract_map_objects import AbstractMapObject from d123.dataset.maps.map_datatypes import MapLayer +from d123.geometry.base import Point2D # TODO: # - add docstrings diff --git a/d123/dataset/maps/abstract_map_objects.py b/d123/dataset/maps/abstract_map_objects.py index 96ce79de..a1696009 100644 --- a/d123/dataset/maps/abstract_map_objects.py +++ b/d123/dataset/maps/abstract_map_objects.py @@ -6,8 +6,8 @@ import shapely.geometry as geom import trimesh -from d123.common.geometry.line.polylines import Polyline2D, Polyline3D, PolylineSE2 from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.geometry.line.polylines import Polyline2D, Polyline3D, PolylineSE2 class AbstractMapObject(abc.ABC): diff --git a/d123/dataset/maps/gpkg/gpkg_map.py b/d123/dataset/maps/gpkg/gpkg_map.py index 32b02d41..f1fead93 100644 --- a/d123/dataset/maps/gpkg/gpkg_map.py +++ b/d123/dataset/maps/gpkg/gpkg_map.py @@ -11,7 +11,6 @@ import shapely import shapely.geometry as geom -from d123.common.geometry.base import Point2D from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.maps.abstract_map_objects import AbstractMapObject from d123.dataset.maps.gpkg.gpkg_map_objects import ( @@ -27,6 +26,7 @@ ) from d123.dataset.maps.gpkg.utils import load_gdf_with_geometry_columns from d123.dataset.maps.map_datatypes import MapLayer +from d123.geometry.base import Point2D USE_ARROW: bool = True diff --git a/d123/dataset/maps/gpkg/gpkg_map_objects.py b/d123/dataset/maps/gpkg/gpkg_map_objects.py index 1fa6de6a..4641cd97 100644 --- a/d123/dataset/maps/gpkg/gpkg_map_objects.py +++ b/d123/dataset/maps/gpkg/gpkg_map_objects.py @@ -10,8 +10,6 @@ import shapely.geometry as geom import trimesh -from d123.common.geometry.base import Point3DIndex -from d123.common.geometry.line.polylines import Polyline3D from d123.common.visualization.viser.utils import get_trimesh_from_boundaries from d123.dataset.maps.abstract_map_objects import ( AbstractCarpark, @@ -28,6 +26,8 @@ ) from d123.dataset.maps.gpkg.utils import get_row_with_value from d123.dataset.maps.map_datatypes import RoadEdgeType, RoadLineType +from d123.geometry.base import Point3DIndex +from d123.geometry.line.polylines import Polyline3D class GPKGSurfaceObject(AbstractSurfaceMapObject): diff --git a/d123/common/geometry/__init__.py b/d123/geometry/__init__.py similarity index 100% rename from d123/common/geometry/__init__.py rename to d123/geometry/__init__.py diff --git a/d123/common/geometry/base.py b/d123/geometry/base.py similarity index 99% rename from d123/common/geometry/base.py rename to d123/geometry/base.py index 7fdec5cc..b531f499 100644 --- a/d123/common/geometry/base.py +++ b/d123/geometry/base.py @@ -8,7 +8,7 @@ import numpy.typing as npt import shapely.geometry as geom -# from d123.common.geometry.transform.se3 import get_rotation_matrix +# from d123.geometry.transform.se3 import get_rotation_matrix from d123.common.utils.enums import classproperty # TODO: Reconsider if 2D/3D or SE2/SE3 structure would be better hierarchical, e.g. inheritance or composition. diff --git a/d123/common/geometry/base_index.py b/d123/geometry/base_index.py similarity index 100% rename from d123/common/geometry/base_index.py rename to d123/geometry/base_index.py diff --git a/d123/common/geometry/bounding_box/__init__.py b/d123/geometry/bounding_box/__init__.py similarity index 100% rename from d123/common/geometry/bounding_box/__init__.py rename to d123/geometry/bounding_box/__init__.py diff --git a/d123/common/geometry/bounding_box/bounding_box.py b/d123/geometry/bounding_box/bounding_box.py similarity index 91% rename from d123/common/geometry/bounding_box/bounding_box.py rename to d123/geometry/bounding_box/bounding_box.py index 625d24d9..ddf7ec15 100644 --- a/d123/common/geometry/bounding_box/bounding_box.py +++ b/d123/geometry/bounding_box/bounding_box.py @@ -7,9 +7,9 @@ import numpy.typing as npt import shapely -from d123.common.geometry.base import StateSE2, StateSE3 -from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, BoundingBoxSE3Index -from d123.common.geometry.bounding_box.utils import bbse2_array_to_corners_array +from d123.geometry.base import StateSE2, StateSE3 +from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, BoundingBoxSE3Index +from d123.geometry.bounding_box.utils import bbse2_array_to_corners_array # TODO: Reconsider naming SE2 and SE3 hierarchies. E.g. would inheritance be a better approach? diff --git a/d123/common/geometry/bounding_box/bounding_box_index.py b/d123/geometry/bounding_box/bounding_box_index.py similarity index 100% rename from d123/common/geometry/bounding_box/bounding_box_index.py rename to d123/geometry/bounding_box/bounding_box_index.py diff --git a/d123/common/geometry/bounding_box/utils.py b/d123/geometry/bounding_box/utils.py similarity index 93% rename from d123/common/geometry/bounding_box/utils.py rename to d123/geometry/bounding_box/utils.py index df205565..5da3530f 100644 --- a/d123/common/geometry/bounding_box/utils.py +++ b/d123/geometry/bounding_box/utils.py @@ -2,8 +2,8 @@ import numpy.typing as npt import shapely -from d123.common.geometry.base import Point2DIndex -from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, Corners2DIndex +from d123.geometry.base import Point2DIndex +from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, Corners2DIndex def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: diff --git a/d123/common/geometry/constants.py b/d123/geometry/constants.py similarity index 100% rename from d123/common/geometry/constants.py rename to d123/geometry/constants.py diff --git a/d123/common/geometry/line/__init__.py b/d123/geometry/line/__init__.py similarity index 100% rename from d123/common/geometry/line/__init__.py rename to d123/geometry/line/__init__.py diff --git a/d123/common/geometry/line/helper.py b/d123/geometry/line/helper.py similarity index 96% rename from d123/common/geometry/line/helper.py rename to d123/geometry/line/helper.py index 99036004..c3fc0f1a 100644 --- a/d123/common/geometry/line/helper.py +++ b/d123/geometry/line/helper.py @@ -2,7 +2,7 @@ import numpy.typing as npt from shapely.geometry import LineString -from d123.common.geometry.base import Point2DIndex, StateSE2Index +from d123.geometry.base import Point2DIndex, StateSE2Index def get_linestring_yaws(linestring: LineString) -> npt.NDArray[np.float64]: diff --git a/d123/common/geometry/line/polylines.py b/d123/geometry/line/polylines.py similarity index 96% rename from d123/common/geometry/line/polylines.py rename to d123/geometry/line/polylines.py index 37b799b5..78a417cd 100644 --- a/d123/common/geometry/line/polylines.py +++ b/d123/geometry/line/polylines.py @@ -9,10 +9,10 @@ import shapely.geometry as geom from scipy.interpolate import interp1d -from d123.common.geometry.base import Point2D, Point2DIndex, Point3D, Point3DIndex, StateSE2, StateSE2Index -from d123.common.geometry.constants import DEFAULT_Z -from d123.common.geometry.line.helper import get_linestring_yaws, get_path_progress -from d123.common.geometry.utils import normalize_angle +from d123.geometry.base import Point2D, Point2DIndex, Point3D, Point3DIndex, StateSE2, StateSE2Index +from d123.geometry.constants import DEFAULT_Z +from d123.geometry.line.helper import get_linestring_yaws, get_path_progress +from d123.geometry.utils import normalize_angle # TODO: Implement PolylineSE3 # TODO: Benchmark interpolation performance and reconsider reliance on LineString diff --git a/d123/common/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py similarity index 100% rename from d123/common/geometry/occupancy_map.py rename to d123/geometry/occupancy_map.py diff --git a/d123/common/geometry/transform/__init__.py b/d123/geometry/transform/__init__.py similarity index 100% rename from d123/common/geometry/transform/__init__.py rename to d123/geometry/transform/__init__.py diff --git a/d123/common/geometry/transform/rotation.py b/d123/geometry/transform/rotation.py similarity index 86% rename from d123/common/geometry/transform/rotation.py rename to d123/geometry/transform/rotation.py index 03072609..a99a2fc5 100644 --- a/d123/common/geometry/transform/rotation.py +++ b/d123/geometry/transform/rotation.py @@ -1,8 +1,8 @@ # import numpy as np # import numpy.typing as npt -# from d123.common.geometry.base import Point3DIndex, StateSE3, StateSE3Index -# from d123.common.geometry.vector import Vector3D +# from d123.geometry.base import Point3DIndex, StateSE3, StateSE3Index +# from d123.geometry.vector import Vector3D # def get_roll_pitch_yaw_from_rotation_matrix( diff --git a/d123/common/geometry/transform/se2_array.py b/d123/geometry/transform/se2_array.py similarity index 97% rename from d123/common/geometry/transform/se2_array.py rename to d123/geometry/transform/se2_array.py index c225a325..5112ea34 100644 --- a/d123/common/geometry/transform/se2_array.py +++ b/d123/geometry/transform/se2_array.py @@ -3,8 +3,8 @@ import numpy as np import numpy.typing as npt -from d123.common.geometry.base import StateSE2, StateSE2Index -from d123.common.geometry.line.polylines import normalize_angle +from d123.geometry.base import StateSE2, StateSE2Index +from d123.geometry.line.polylines import normalize_angle # TODO: Refactor 2D and 3D transform functions in a more consistent and general way. diff --git a/d123/common/geometry/transform/se3.py b/d123/geometry/transform/se3.py similarity index 98% rename from d123/common/geometry/transform/se3.py rename to d123/geometry/transform/se3.py index e61451b4..42fae1c2 100644 --- a/d123/common/geometry/transform/se3.py +++ b/d123/geometry/transform/se3.py @@ -1,8 +1,8 @@ import numpy as np import numpy.typing as npt -from d123.common.geometry.base import Point3DIndex, StateSE3, StateSE3Index -from d123.common.geometry.vector import Vector3D +from d123.geometry.base import Point3DIndex, StateSE3, StateSE3Index +from d123.geometry.vector import Vector3D # def get_rotation_matrix(state_se3: StateSE3) -> npt.NDArray[np.float64]: # R_x = np.array( diff --git a/d123/common/geometry/transform/tranform_2d.py b/d123/geometry/transform/tranform_2d.py similarity index 88% rename from d123/common/geometry/transform/tranform_2d.py rename to d123/geometry/transform/tranform_2d.py index fa8151bc..18998709 100644 --- a/d123/common/geometry/transform/tranform_2d.py +++ b/d123/geometry/transform/tranform_2d.py @@ -1,8 +1,8 @@ import numpy as np import numpy.typing as npt -from d123.common.geometry.base import StateSE2 -from d123.common.geometry.vector import Vector2D +from d123.geometry.base import StateSE2 +from d123.geometry.vector import Vector2D # TODO: Refactor 2D and 3D transform functions in a more consistent and general way. diff --git a/d123/common/geometry/units.py b/d123/geometry/units.py similarity index 100% rename from d123/common/geometry/units.py rename to d123/geometry/units.py diff --git a/d123/common/geometry/utils.py b/d123/geometry/utils.py similarity index 100% rename from d123/common/geometry/utils.py rename to d123/geometry/utils.py diff --git a/d123/common/geometry/vector.py b/d123/geometry/vector.py similarity index 96% rename from d123/common/geometry/vector.py rename to d123/geometry/vector.py index ef0e4cfa..dad25329 100644 --- a/d123/common/geometry/vector.py +++ b/d123/geometry/vector.py @@ -5,7 +5,7 @@ import numpy as np import numpy.typing as npt -from d123.common.geometry.base import Point2D, Point3D, Point3DIndex +from d123.geometry.base import Point2D, Point3D, Point3DIndex class Vector2DIndex(IntEnum): diff --git a/d123/script/config/datasets/av2_sensor_dataset.yaml b/d123/script/config/datasets/av2_sensor_dataset.yaml index 4f9a95ec..58a64f7a 100644 --- a/d123/script/config/datasets/av2_sensor_dataset.yaml +++ b/d123/script/config/datasets/av2_sensor_dataset.yaml @@ -3,7 +3,7 @@ av2_sensor_dataset: _convert_: 'all' splits: ["av2-sensor-mini_train"] - log_path: "/media/nvme1/argoverse" + log_path: "/mnt/elements_0/argoverse" data_converter_config: _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig @@ -12,5 +12,5 @@ av2_sensor_dataset: output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} - camera_store_option: "binary" + camera_store_option: "path" lidar_store_option: null diff --git a/d123/simulation/agents/constant_velocity_agents.py b/d123/simulation/agents/constant_velocity_agents.py index 75f4e343..b9a4b587 100644 --- a/d123/simulation/agents/constant_velocity_agents.py +++ b/d123/simulation/agents/constant_velocity_agents.py @@ -3,11 +3,11 @@ from typing import List, Optional from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.common.geometry.base import Point2D -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2 -from d123.common.geometry.transform.tranform_2d import translate_along_yaw from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene +from d123.geometry.base import Point2D +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2 +from d123.geometry.transform.tranform_2d import translate_along_yaw from d123.simulation.agents.abstract_agents import AbstractAgents diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py index 4e9159f5..118355a1 100644 --- a/d123/simulation/agents/idm_agents.py +++ b/d123/simulation/agents/idm_agents.py @@ -7,14 +7,14 @@ from shapely.geometry import CAP_STYLE, Polygon from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.common.geometry.base import Point2D, StateSE2 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2 -from d123.common.geometry.line.polylines import PolylineSE2 -from d123.common.geometry.transform.tranform_2d import translate_along_yaw -from d123.common.geometry.vector import Vector2D from d123.dataset.arrow.conversion import BoxDetectionWrapper from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene +from d123.geometry.base import Point2D, StateSE2 +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2 +from d123.geometry.line.polylines import PolylineSE2 +from d123.geometry.transform.tranform_2d import translate_along_yaw +from d123.geometry.vector import Vector2D from d123.simulation.agents.abstract_agents import AbstractAgents diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py index bc4ec577..357aac41 100644 --- a/d123/simulation/agents/path_following.py +++ b/d123/simulation/agents/path_following.py @@ -3,12 +3,12 @@ from typing import Dict, List, Optional from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.common.geometry.base import Point2D, StateSE2 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2 -from d123.common.geometry.line.polylines import PolylineSE2 -from d123.common.geometry.transform.tranform_2d import translate_along_yaw from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene +from d123.geometry.base import Point2D, StateSE2 +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2 +from d123.geometry.line.polylines import PolylineSE2 +from d123.geometry.transform.tranform_2d import translate_along_yaw from d123.simulation.agents.abstract_agents import AbstractAgents diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py index 18c1b48e..2b20a496 100644 --- a/d123/simulation/agents/smart_agents.py +++ b/d123/simulation/agents/smart_agents.py @@ -6,13 +6,13 @@ from torch_geometric.data import HeteroData from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.common.geometry.base import StateSE2 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2 -from d123.common.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array -from d123.common.geometry.utils import normalize_angle from d123.dataset.arrow.conversion import BoxDetectionWrapper, DetectionType from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene +from d123.geometry.base import StateSE2 +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2 +from d123.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array +from d123.geometry.utils import normalize_angle from d123.simulation.agents.abstract_agents import AbstractAgents from d123.training.feature_builder.smart_feature_builder import SMARTFeatureBuilder from d123.training.models.sim_agent.smart.datamodules.target_builder import _numpy_dict_to_torch diff --git a/d123/simulation/controller/motion_model/kinematic_bicycle_model.py b/d123/simulation/controller/motion_model/kinematic_bicycle_model.py index 8a4d9802..c1f343da 100644 --- a/d123/simulation/controller/motion_model/kinematic_bicycle_model.py +++ b/d123/simulation/controller/motion_model/kinematic_bicycle_model.py @@ -3,8 +3,8 @@ from d123.common.datatypes.time.time_point import TimeDuration, TimePoint from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2 -from d123.common.geometry.base import StateSE2 -from d123.common.geometry.vector import Vector2D +from d123.geometry.base import StateSE2 +from d123.geometry.vector import Vector2D from d123.simulation.controller.motion_model.abstract_motion_model import AbstractMotionModel diff --git a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py index c5a4d57a..604626c9 100644 --- a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py +++ b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py @@ -12,10 +12,10 @@ from shapely.affinity import scale as shapely_scale from d123.common.datatypes.detection.detection import BoxDetectionSE2, TrafficLightStatus -from d123.common.geometry.base import StateSE2 -from d123.common.geometry.transform.se2_array import convert_absolute_to_relative_point_2d_array -from d123.common.geometry.transform.tranform_2d import translate_along_yaw -from d123.common.geometry.vector import Vector2D +from d123.geometry.base import StateSE2 +from d123.geometry.transform.se2_array import convert_absolute_to_relative_point_2d_array +from d123.geometry.transform.tranform_2d import translate_along_yaw +from d123.geometry.vector import Vector2D from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea, RectangleEnvironmentArea from d123.simulation.gym.environment.helper.environment_cache import BoxDetectionCache, MapCache diff --git a/d123/simulation/gym/environment/helper/environment_area.py b/d123/simulation/gym/environment/helper/environment_area.py index 03f87cd1..db247458 100644 --- a/d123/simulation/gym/environment/helper/environment_area.py +++ b/d123/simulation/gym/environment/helper/environment_area.py @@ -3,9 +3,9 @@ from shapely import Polygon -from d123.common.geometry.base import StateSE2 -from d123.common.geometry.transform.tranform_2d import translate_along_yaw -from d123.common.geometry.vector import Vector2D +from d123.geometry.base import StateSE2 +from d123.geometry.transform.tranform_2d import translate_along_yaw +from d123.geometry.vector import Vector2D class AbstractEnvironmentArea(ABC): diff --git a/d123/simulation/gym/environment/helper/environment_cache.py b/d123/simulation/gym/environment/helper/environment_cache.py index 55fbea92..70a3dcdf 100644 --- a/d123/simulation/gym/environment/helper/environment_cache.py +++ b/d123/simulation/gym/environment/helper/environment_cache.py @@ -14,8 +14,6 @@ from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.common.geometry.base import StateSE2 -from d123.common.geometry.occupancy_map import OccupancyMap2D from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.maps.abstract_map_objects import ( AbstractCarpark, @@ -26,6 +24,8 @@ AbstractStopLine, ) from d123.dataset.maps.map_datatypes import MapLayer +from d123.geometry.base import StateSE2 +from d123.geometry.occupancy_map import OccupancyMap2D from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea from d123.simulation.planning.abstract_planner import PlannerInitialization, PlannerInput diff --git a/d123/simulation/gym/gym_env.py b/d123/simulation/gym/gym_env.py index b1ce0746..96672d16 100644 --- a/d123/simulation/gym/gym_env.py +++ b/d123/simulation/gym/gym_env.py @@ -5,9 +5,9 @@ from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2 -from d123.common.geometry.vector import Vector2D from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene +from d123.geometry.vector import Vector2D from d123.simulation.controller.motion_model.kinematic_bicycle_model import KinematicBicycleModel from d123.simulation.observation.abstract_observation import AbstractObservation from d123.simulation.observation.log_replay_observation import LogReplayObservation diff --git a/d123/simulation/metrics/sim_agents/interaction_based.py b/d123/simulation/metrics/sim_agents/interaction_based.py index 76474d55..05bae92e 100644 --- a/d123/simulation/metrics/sim_agents/interaction_based.py +++ b/d123/simulation/metrics/sim_agents/interaction_based.py @@ -3,9 +3,9 @@ import numpy as np import numpy.typing as npt -from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index -from d123.common.geometry.bounding_box.utils import bbse2_array_to_polygon_array from d123.dataset.arrow.conversion import BoxDetectionWrapper +from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index +from d123.geometry.bounding_box.utils import bbse2_array_to_polygon_array MAX_OBJECT_DISTANCE: Final[float] = 50.0 diff --git a/d123/simulation/metrics/sim_agents/kinematics.py b/d123/simulation/metrics/sim_agents/kinematics.py index fb599db0..9d032360 100644 --- a/d123/simulation/metrics/sim_agents/kinematics.py +++ b/d123/simulation/metrics/sim_agents/kinematics.py @@ -2,7 +2,7 @@ import numpy.typing as npt from scipy.signal import savgol_filter -from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index +from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index SECONDS_PER_ITERATION = 0.1 diff --git a/d123/simulation/metrics/sim_agents/map_based.py b/d123/simulation/metrics/sim_agents/map_based.py index dbd9a16d..5342be6e 100644 --- a/d123/simulation/metrics/sim_agents/map_based.py +++ b/d123/simulation/metrics/sim_agents/map_based.py @@ -4,13 +4,13 @@ import numpy.typing as npt import shapely -from d123.common.geometry.base import StateSE2, StateSE2Index -from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index -from d123.common.geometry.bounding_box.utils import Corners2DIndex, bbse2_array_to_corners_array -from d123.common.geometry.utils import normalize_angle from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.maps.abstract_map_objects import AbstractLane from d123.dataset.maps.map_datatypes import MapLayer +from d123.geometry.base import StateSE2, StateSE2Index +from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index +from d123.geometry.bounding_box.utils import Corners2DIndex, bbse2_array_to_corners_array +from d123.geometry.utils import normalize_angle MAX_LANE_CENTER_DISTANCE: Final[float] = 10.0 diff --git a/d123/simulation/metrics/sim_agents/sim_agents.py b/d123/simulation/metrics/sim_agents/sim_agents.py index 94eeed2c..7f9f372e 100644 --- a/d123/simulation/metrics/sim_agents/sim_agents.py +++ b/d123/simulation/metrics/sim_agents/sim_agents.py @@ -5,9 +5,9 @@ import numpy.typing as npt from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper, DetectionType -from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene +from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index from d123.simulation.metrics.sim_agents.histogram_metric import ( BinaryHistogramIntersectionMetric, HistogramIntersectionMetric, diff --git a/d123/simulation/metrics/sim_agents/utils.py b/d123/simulation/metrics/sim_agents/utils.py index cf40c5a6..2923038a 100644 --- a/d123/simulation/metrics/sim_agents/utils.py +++ b/d123/simulation/metrics/sim_agents/utils.py @@ -4,8 +4,8 @@ import numpy.typing as npt from d123.common.datatypes.detection.detection import BoxDetectionWrapper -from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index from d123.dataset.scene.abstract_scene import AbstractScene +from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index def _get_log_agents_array( diff --git a/d123/simulation/planning/planner_output/action_planner_output.py b/d123/simulation/planning/planner_output/action_planner_output.py index 0a62ed98..6d1ff54a 100644 --- a/d123/simulation/planning/planner_output/action_planner_output.py +++ b/d123/simulation/planning/planner_output/action_planner_output.py @@ -1,5 +1,5 @@ from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2 -from d123.common.geometry.vector import Vector2D +from d123.geometry.vector import Vector2D from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py index 829f0498..806350bf 100644 --- a/d123/training/feature_builder/smart_feature_builder.py +++ b/d123/training/feature_builder/smart_feature_builder.py @@ -7,10 +7,6 @@ from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.geometry.base import StateSE2, StateSE2Index -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2 -from d123.common.geometry.line.polylines import PolylineSE2 -from d123.common.geometry.transform.se2_array import convert_absolute_to_relative_se2_array from d123.common.visualization.color.default import TrafficLightStatus from d123.dataset.maps.abstract_map import MapLayer from d123.dataset.maps.abstract_map_objects import ( @@ -20,6 +16,10 @@ AbstractLaneGroup, ) from d123.dataset.scene.abstract_scene import AbstractScene +from d123.geometry.base import StateSE2, StateSE2Index +from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2 +from d123.geometry.line.polylines import PolylineSE2 +from d123.geometry.transform.se2_array import convert_absolute_to_relative_se2_array # TODO: Hind feature builder behind abstraction. diff --git a/notebooks/av2/delete_me.ipynb b/notebooks/av2/delete_me.ipynb index e28ef5ef..85226114 100644 --- a/notebooks/av2/delete_me.ipynb +++ b/notebooks/av2/delete_me.ipynb @@ -385,8 +385,8 @@ "\n", "from pyquaternion import Quaternion\n", "from d123.common.datatypes.detection.detection_types import DetectionType\n", - "from d123.common.geometry.base import StateSE2\n", - "from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2\n", + "from d123.geometry.base import StateSE2\n", + "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n", "from d123.common.visualization.color.config import PlotConfig\n", "from d123.common.visualization.color.default import BOX_DETECTION_CONFIG\n", "from d123.common.visualization.matplotlib.utils import add_shapely_polygon_to_ax\n", diff --git a/notebooks/av2/delete_me_map.ipynb b/notebooks/av2/delete_me_map.ipynb index 03e62838..901c232c 100644 --- a/notebooks/av2/delete_me_map.ipynb +++ b/notebooks/av2/delete_me_map.ipynb @@ -58,7 +58,7 @@ "import json\n", "from typing import Dict, List\n", "\n", - "from d123.common.geometry.line.polylines import Polyline3D\n", + "from d123.geometry.line.polylines import Polyline3D\n", "from d123.dataset.dataset_specific.av2.av2_map_conversion import _extract_lane_group_dict\n", "\n", "\n", @@ -122,11 +122,11 @@ "\n", "import shapely\n", "\n", - "from d123.common.geometry.base import Point3DIndex\n", + "from d123.geometry.base import Point3DIndex\n", "import geopandas as gpd\n", "\n", - "from d123.common.geometry.line.polylines import Polyline2D\n", - "from d123.common.geometry.occupancy_map import OccupancyMap2D\n", + "from d123.geometry.line.polylines import Polyline2D\n", + "from d123.geometry.occupancy_map import OccupancyMap2D\n", "\n", "import numpy.typing as npt\n", "\n", diff --git a/notebooks/deprecated/test_scene_builder.ipynb b/notebooks/deprecated/test_scene_builder.ipynb index 00de24f9..607cd184 100644 --- a/notebooks/deprecated/test_scene_builder.ipynb +++ b/notebooks/deprecated/test_scene_builder.ipynb @@ -72,8 +72,8 @@ "# import matplotlib.pyplot as plt\n", "# from tqdm import tqdm\n", "\n", - "# from d123.common.geometry.base import Point2D, StateSE2\n", - "# from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2\n", + "# from d123.geometry.base import Point2D, StateSE2\n", + "# from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n", "# from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG\n", "# from d123.common.visualization.matplotlib.observation import (\n", "# add_bounding_box_to_ax,\n", diff --git a/notebooks/gym/test_gym.ipynb b/notebooks/gym/test_gym.ipynb index bb6cf7dc..3fbb3a94 100644 --- a/notebooks/gym/test_gym.ipynb +++ b/notebooks/gym/test_gym.ipynb @@ -77,8 +77,8 @@ "from tqdm import tqdm\n", "\n", "from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2\n", - "from d123.common.geometry.base import Point2D, StateSE2\n", - "from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2\n", + "from d123.geometry.base import Point2D, StateSE2\n", + "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n", "from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG\n", "from d123.common.visualization.matplotlib.observation import (\n", " add_bounding_box_to_ax,\n", diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb index dad53954..0a50c5ed 100644 --- a/notebooks/scene_rendering.ipynb +++ b/notebooks/scene_rendering.ipynb @@ -63,9 +63,9 @@ "from typing import Tuple\n", "from d123.common.datatypes.detection.detection import BoxDetection\n", "from d123.common.datatypes.detection.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n", - "from d123.common.geometry.base import StateSE2\n", - "from d123.common.geometry.transform.tranform_2d import translate_along_yaw\n", - "from d123.common.geometry.vector import Vector2D\n", + "from d123.geometry.base import StateSE2\n", + "from d123.geometry.transform.tranform_2d import translate_along_yaw\n", + "from d123.geometry.vector import Vector2D\n", "from d123.common.visualization.matplotlib.observation import (\n", " add_box_detections_to_ax,\n", " add_default_map_on_ax,\n", diff --git a/notebooks/smarty/smart_rollout.ipynb b/notebooks/smarty/smart_rollout.ipynb index 3f3aac67..f8f71593 100644 --- a/notebooks/smarty/smart_rollout.ipynb +++ b/notebooks/smarty/smart_rollout.ipynb @@ -135,7 +135,7 @@ "source": [ "from matplotlib import pyplot as plt\n", "\n", - "from d123.common.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array\n", + "from d123.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array\n", "\n", "\n", "origin = scene.get_ego_state_at_iteration(0).bounding_box.center.state_se2\n", diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index 1feef159..3755c860 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -63,7 +63,7 @@ "from typing import List, Optional, Tuple\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", - "from d123.common.geometry.base import Point2D\n", + "from d123.geometry.base import Point2D\n", "from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", "from d123.common.visualization.color.config import PlotConfig\n", "from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", @@ -238,14 +238,14 @@ "\n", "scene_index = 9\n", "iteration = 99\n", - "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=30)\n", + "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n", "plt.show()\n", "\n", - "# camera = scenes[scene_index].get_camera_at_iteration(\n", - "# iteration=iteration, camera_type=CameraType.CAM_F0\n", - "# )\n", + "camera = scenes[scene_index].get_camera_at_iteration(\n", + " iteration=iteration, camera_type=CameraType.CAM_F0\n", + ")\n", "\n", - "# plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n", + "plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n", "# # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n", "\n", "# scenes[scene_index].log_name" @@ -333,7 +333,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "d123_dev", "language": "python", "name": "python3" }, diff --git a/notebooks/viz/bev_matplotlib_prediction.ipynb b/notebooks/viz/bev_matplotlib_prediction.ipynb index 041daa72..bed72f71 100644 --- a/notebooks/viz/bev_matplotlib_prediction.ipynb +++ b/notebooks/viz/bev_matplotlib_prediction.ipynb @@ -64,7 +64,7 @@ "from typing import List, Optional, Tuple\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", - "from d123.common.geometry.base import Point2D\n", + "from d123.geometry.base import Point2D\n", "from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", "from d123.common.visualization.color.config import PlotConfig\n", "from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", diff --git a/notebooks/waymo_perception/lidar_testing.ipynb b/notebooks/waymo_perception/lidar_testing.ipynb index 3cd2a77e..dfa0e65e 100644 --- a/notebooks/waymo_perception/lidar_testing.ipynb +++ b/notebooks/waymo_perception/lidar_testing.ipynb @@ -54,8 +54,8 @@ "import io\n", "from pyquaternion import Quaternion\n", "\n", - "from d123.common.geometry.base import StateSE3\n", - "from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3\n", + "from d123.geometry.base import StateSE3\n", + "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n", "\n", "from waymo_open_dataset.utils import frame_utils\n", "\n", diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb index c29fc212..a06d8f7c 100644 --- a/notebooks/waymo_perception/map_testing.ipynb +++ b/notebooks/waymo_perception/map_testing.ipynb @@ -54,8 +54,8 @@ "import io\n", "from pyquaternion import Quaternion\n", "\n", - "from d123.common.geometry.base import StateSE3\n", - "from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3\n", + "from d123.geometry.base import StateSE3\n", + "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n", "\n", "from waymo_open_dataset.utils import frame_utils\n", "\n", @@ -139,7 +139,7 @@ "source": [ "from collections import defaultdict\n", "\n", - "from d123.common.geometry.units import mph_to_mps\n", + "from d123.geometry.units import mph_to_mps\n", "\n", "\n", "dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n", @@ -382,7 +382,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.common.geometry.line.polylines import Polyline3D\n", + "from d123.geometry.line.polylines import Polyline3D\n", "import numpy as np\n", "\n", "\n", @@ -635,10 +635,10 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.common.geometry.base import StateSE2\n", - "# from d123.common.geometry.line.polylines import PolylineSE2\n", - "# from d123.common.geometry.transform.tranform_2d import translate_along_yaw\n", - "# from d123.common.geometry.vector import Vector2D\n", + "from d123.geometry.base import StateSE2\n", + "# from d123.geometry.line.polylines import PolylineSE2\n", + "# from d123.geometry.transform.tranform_2d import translate_along_yaw\n", + "# from d123.geometry.vector import Vector2D\n", "\n", "# size = 30\n", "# fig, ax = plt.subplots(figsize=(size, size))\n", diff --git a/notebooks/waymo_perception/testing.ipynb b/notebooks/waymo_perception/testing.ipynb index 19745f8f..9adc8510 100644 --- a/notebooks/waymo_perception/testing.ipynb +++ b/notebooks/waymo_perception/testing.ipynb @@ -51,8 +51,8 @@ "import io\n", "from pyquaternion import Quaternion\n", "\n", - "from d123.common.geometry.base import StateSE3\n", - "from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3\n", + "from d123.geometry.base import StateSE3\n", + "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n", "\n", "from waymo_open_dataset.utils import frame_utils\n", "\n", From 2580087ea96ded2f40181f725035660a3a160b53 Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Sun, 24 Aug 2025 20:41:11 +0200 Subject: [PATCH 013/145] Clean up `geometry` structure and import options --- .pre-commit-config.yaml | 1 + d123/common/datatypes/detection/detection.py | 5 +- .../datatypes/vehicle_state/ego_state.py | 4 +- .../vehicle_state/vehicle_parameters.py | 5 +- .../visualization/matplotlib/camera copy.py | 3 +- .../common/visualization/matplotlib/camera.py | 2 +- .../visualization/matplotlib/observation.py | 3 +- d123/common/visualization/matplotlib/utils.py | 2 +- d123/common/visualization/viser/utils.py | 4 +- d123/common/visualization/viser/utils_v2.py | 5 +- d123/dataset/arrow/conversion.py | 3 +- .../map/opendrive/parser/geometry.py | 2 +- .../map/opendrive/parser/reference.py | 2 +- .../map/opendrive/utils/lane_helper.py | 4 +- .../map/opendrive/utils/objects_helper.py | 4 +- .../map/road_edge/road_edge_3d_utils.py | 2 +- .../av2/av2_data_converter.py | 6 +- .../av2/av2_map_conversion.py | 4 +- .../carla/carla_data_converter.py | 4 +- .../nuplan/nuplan_data_converter.py | 6 +- .../waymo_map_utils/womp_boundary_utils.py | 7 +- .../wopd/waymo_map_utils/wopd_map_utils.py | 5 +- .../wopd/wopd_data_converter.py | 6 +- d123/dataset/maps/abstract_map.py | 2 +- d123/dataset/maps/abstract_map_objects.py | 2 +- d123/dataset/maps/gpkg/gpkg_map.py | 2 +- d123/dataset/maps/gpkg/gpkg_map_objects.py | 3 +- d123/geometry/__init__.py | 18 +++ d123/geometry/base_index.py | 1 - .../{bounding_box => }/bounding_box.py | 6 +- ...ounding_box_index.py => geometry_index.py} | 63 ++++++++ d123/geometry/line/__init__.py | 0 d123/geometry/point.py | 98 ++++++++++++ .../{line/polylines.py => polyline.py} | 9 +- d123/geometry/{base.py => se.py} | 147 +----------------- d123/geometry/transform/se2_array.py | 4 +- d123/geometry/transform/se3.py | 3 +- d123/geometry/transform/tranform_2d.py | 2 +- .../{bounding_box => utils}/__init__.py | 0 d123/geometry/utils/bounding_box_utils.py | 63 ++++++++ d123/geometry/{ => utils}/constants.py | 0 .../helper.py => utils/polyline_utils.py} | 2 +- .../{utils.py => utils/rotation_utils.py} | 0 d123/geometry/{ => utils}/units.py | 0 .../geometry/{bounding_box => utils}/utils.py | 3 +- d123/geometry/vector.py | 15 +- .../agents/constant_velocity_agents.py | 4 +- d123/simulation/agents/idm_agents.py | 7 +- d123/simulation/agents/path_following.py | 7 +- d123/simulation/agents/smart_agents.py | 6 +- .../motion_model/kinematic_bicycle_model.py | 2 +- .../gym_observation/raster/raster_renderer.py | 2 +- .../environment/helper/environment_area.py | 2 +- .../environment/helper/environment_cache.py | 2 +- .../metrics/sim_agents/interaction_based.py | 4 +- .../metrics/sim_agents/map_based.py | 8 +- .../metrics/sim_agents/sim_agents.py | 2 +- d123/simulation/metrics/sim_agents/utils.py | 2 +- .../feature_builder/smart_feature_builder.py | 5 +- notebooks/av2/delete_me.ipynb | 2 +- notebooks/deprecated/test_scene_builder.ipynb | 4 +- notebooks/gym/test_gym.ipynb | 2 +- notebooks/scene_rendering.ipynb | 2 +- notebooks/viz/bev_matplotlib.ipynb | 14 +- .../waymo_perception/lidar_testing.ipynb | 4 +- notebooks/waymo_perception/map_testing.ipynb | 80 +--------- notebooks/waymo_perception/testing.ipynb | 4 +- 67 files changed, 342 insertions(+), 355 deletions(-) delete mode 100644 d123/geometry/base_index.py rename d123/geometry/{bounding_box => }/bounding_box.py (92%) rename d123/geometry/{bounding_box/bounding_box_index.py => geometry_index.py} (59%) delete mode 100644 d123/geometry/line/__init__.py create mode 100644 d123/geometry/point.py rename d123/geometry/{line/polylines.py => polyline.py} (95%) rename d123/geometry/{base.py => se.py} (53%) rename d123/geometry/{bounding_box => utils}/__init__.py (100%) create mode 100644 d123/geometry/utils/bounding_box_utils.py rename d123/geometry/{ => utils}/constants.py (100%) rename d123/geometry/{line/helper.py => utils/polyline_utils.py} (96%) rename d123/geometry/{utils.py => utils/rotation_utils.py} (100%) rename d123/geometry/{ => utils}/units.py (100%) rename d123/geometry/{bounding_box => utils}/utils.py (94%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 983998e5..251ae7fb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -36,6 +36,7 @@ repos: hooks: - id: autoflake args: ['--in-place', '--remove-all-unused-imports', '--remove-unused-variable'] + exclude: __init__.py$ language_version: python3.12 - repo: https://github.com/pycqa/flake8 rev: 7.3.0 diff --git a/d123/common/datatypes/detection/detection.py b/d123/common/datatypes/detection/detection.py index 6245892f..075129b4 100644 --- a/d123/common/datatypes/detection/detection.py +++ b/d123/common/datatypes/detection/detection.py @@ -7,10 +7,7 @@ from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.time.time_point import TimePoint from d123.common.utils.enums import SerialIntEnum -from d123.geometry.base import StateSE2, StateSE3 -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3 -from d123.geometry.occupancy_map import OccupancyMap2D -from d123.geometry.vector import Vector2D, Vector3D +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, StateSE3, Vector2D, Vector3D @dataclass diff --git a/d123/common/datatypes/vehicle_state/ego_state.py b/d123/common/datatypes/vehicle_state/ego_state.py index 0fdf236e..d0487e60 100644 --- a/d123/common/datatypes/vehicle_state/ego_state.py +++ b/d123/common/datatypes/vehicle_state/ego_state.py @@ -23,9 +23,7 @@ rear_axle_se3_to_center_se3, ) from d123.common.utils.enums import classproperty -from d123.geometry.base import StateSE2, StateSE3 -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3 -from d123.geometry.vector import Vector2D, Vector3D +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, StateSE3, Vector2D, Vector3D # TODO: Find an appropriate way to handle SE2 and SE3 states. diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py index 0cc14b7d..c8a84828 100644 --- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py @@ -1,7 +1,8 @@ -from d123.geometry.base import StateSE2, StateSE3, dataclass +from dataclasses import dataclass + +from d123.geometry import StateSE2, StateSE3, Vector2D from d123.geometry.transform.se3 import translate_se3_along_x, translate_se3_along_z from d123.geometry.transform.tranform_2d import translate_along_yaw -from d123.geometry.vector import Vector2D # TODO: Add more vehicle parameters, potentially extend the parameters. diff --git a/d123/common/visualization/matplotlib/camera copy.py b/d123/common/visualization/matplotlib/camera copy.py index b44e387b..ed3a7d2a 100644 --- a/d123/common/visualization/matplotlib/camera copy.py +++ b/d123/common/visualization/matplotlib/camera copy.py @@ -15,8 +15,7 @@ from d123.common.datatypes.sensor.camera import Camera from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.common.visualization.color.default import BOX_DETECTION_CONFIG -from d123.geometry.base import StateSE3 -from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex +from d123.geometry import BoundingBoxSE3Index, Corners3DIndex, StateSE3 from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array, get_rotation_matrix # from navsim.common.dataclasses import Annotations, Camera, Lidar diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py index d8412731..2b8ecce4 100644 --- a/d123/common/visualization/matplotlib/camera.py +++ b/d123/common/visualization/matplotlib/camera.py @@ -15,7 +15,7 @@ from d123.common.datatypes.sensor.camera import Camera from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.common.visualization.color.default import BOX_DETECTION_CONFIG -from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex +from d123.geometry import BoundingBoxSE3Index, Corners3DIndex from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array # from navsim.common.dataclasses import Annotations, Camera, Lidar diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py index 33bea28f..ecddaa02 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/d123/common/visualization/matplotlib/observation.py @@ -26,8 +26,7 @@ from d123.dataset.maps.abstract_map_objects import AbstractLane from d123.dataset.maps.map_datatypes import MapLayer from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry.base import Point2D -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3 +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D from d123.geometry.transform.tranform_2d import translate_along_yaw diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py index 4beff462..9e030b80 100644 --- a/d123/common/visualization/matplotlib/utils.py +++ b/d123/common/visualization/matplotlib/utils.py @@ -9,7 +9,7 @@ from matplotlib.path import Path from d123.common.visualization.color.config import PlotConfig -from d123.geometry.base import StateSE2, StateSE3 +from d123.geometry import StateSE2, StateSE3 def add_shapely_polygon_to_ax( diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index d03758ff..afa5ea2e 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -15,9 +15,7 @@ from d123.dataset.maps.abstract_map import MapLayer from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry.base import Point3D, StateSE3 -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3 -from d123.geometry.line.polylines import Polyline3D +from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, StateSE3 from d123.geometry.transform.se3 import convert_relative_to_absolute_points_3d_array # TODO: Refactor this file. diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py index f62a48f1..44831f3e 100644 --- a/d123/common/visualization/viser/utils_v2.py +++ b/d123/common/visualization/viser/utils_v2.py @@ -6,11 +6,8 @@ from d123.dataset.scene.abstract_scene import AbstractScene # from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters -from d123.geometry.base import Point3D, Point3DIndex -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3 -from d123.geometry.bounding_box.bounding_box_index import Corners3DIndex +from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D from d123.geometry.transform.se3 import translate_body_frame -from d123.geometry.vector import Vector3D # TODO: Refactor this file. # TODO: Add general utilities for 3D primitives and mesh support. diff --git a/d123/dataset/arrow/conversion.py b/d123/dataset/arrow/conversion.py index 56b4b33b..ab078545 100644 --- a/d123/dataset/arrow/conversion.py +++ b/d123/dataset/arrow/conversion.py @@ -27,8 +27,7 @@ from d123.common.datatypes.vehicle_state.vehicle_parameters import VehicleParameters from d123.dataset.logs.log_metadata import LogMetadata from d123.dataset.maps.abstract_map import List -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3 -from d123.geometry.vector import Vector3D +from d123.geometry import BoundingBoxSE3, Vector3D DATASET_SENSOR_ROOT: Dict[str, Path] = { "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", diff --git a/d123/dataset/conversion/map/opendrive/parser/geometry.py b/d123/dataset/conversion/map/opendrive/parser/geometry.py index 4aabd57c..dc782a7a 100644 --- a/d123/dataset/conversion/map/opendrive/parser/geometry.py +++ b/d123/dataset/conversion/map/opendrive/parser/geometry.py @@ -8,7 +8,7 @@ import numpy.typing as npt from scipy.special import fresnel -from d123.geometry.base import StateSE2Index +from d123.geometry import StateSE2Index @dataclass diff --git a/d123/dataset/conversion/map/opendrive/parser/reference.py b/d123/dataset/conversion/map/opendrive/parser/reference.py index d8ce3b7b..5fe9211b 100644 --- a/d123/dataset/conversion/map/opendrive/parser/reference.py +++ b/d123/dataset/conversion/map/opendrive/parser/reference.py @@ -13,7 +13,7 @@ from d123.dataset.conversion.map.opendrive.parser.geometry import Arc, Geometry, Line, Spiral from d123.dataset.conversion.map.opendrive.parser.lane import LaneOffset, Width from d123.dataset.conversion.map.opendrive.parser.polynomial import Polynomial -from d123.geometry.base import Point3DIndex, StateSE2Index +from d123.geometry import Point3DIndex, StateSE2Index TOLERANCE: Final[float] = 1e-3 diff --git a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py index b6c26131..edd05423 100644 --- a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py +++ b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py @@ -14,8 +14,8 @@ derive_lane_id, lane_group_id_from_lane_id, ) -from d123.geometry.base import StateSE2Index -from d123.geometry.units import kmph_to_mps, mph_to_mps +from d123.geometry import StateSE2Index +from d123.geometry.utils.units import kmph_to_mps, mph_to_mps @dataclass diff --git a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py b/d123/dataset/conversion/map/opendrive/utils/objects_helper.py index da85ba5e..11c7f609 100644 --- a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py +++ b/d123/dataset/conversion/map/opendrive/utils/objects_helper.py @@ -7,9 +7,9 @@ from d123.dataset.conversion.map.opendrive.parser.objects import Object from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine -from d123.geometry.base import Point2D, Point3D, Point3DIndex, StateSE2 +from d123.geometry import Point2D, Point3D, Point3DIndex, StateSE2 from d123.geometry.transform.tranform_2d import translate_along_yaw -from d123.geometry.utils import normalize_angle +from d123.geometry.utils.rotation_utils import normalize_angle # TODO: make naming consistent with group_collections.py diff --git a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py b/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py index b2fa42e9..b88a44e0 100644 --- a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py +++ b/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py @@ -10,7 +10,7 @@ from shapely.geometry import LineString from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings -from d123.geometry.base import Point3DIndex +from d123.geometry import Point3DIndex from d123.geometry.occupancy_map import OccupancyMap2D logger = logging.getLogger(__name__) diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py index 25b7bec6..d57b4e52 100644 --- a/d123/dataset/dataset_specific/av2/av2_data_converter.py +++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py @@ -34,11 +34,9 @@ from d123.dataset.dataset_specific.av2.av2_map_conversion import convert_av2_map from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata -from d123.geometry.base import StateSE3 -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index -from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix -from d123.geometry.vector import Vector3D, Vector3DIndex +from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL def create_token(input_data: str) -> str: diff --git a/d123/dataset/dataset_specific/av2/av2_map_conversion.py b/d123/dataset/dataset_specific/av2/av2_map_conversion.py index 7fa53fad..daadcd3b 100644 --- a/d123/dataset/dataset_specific/av2/av2_map_conversion.py +++ b/d123/dataset/dataset_specific/av2/av2_map_conversion.py @@ -16,9 +16,7 @@ ) from d123.dataset.dataset_specific.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType -from d123.geometry.base import Point3DIndex -from d123.geometry.line.polylines import Polyline2D, Polyline3D -from d123.geometry.occupancy_map import OccupancyMap2D +from d123.geometry import OccupancyMap2D, Point3DIndex, Polyline2D, Polyline3D LANE_GROUP_MARK_TYPES: List[str] = [ "DASHED_WHITE", diff --git a/d123/dataset/dataset_specific/carla/carla_data_converter.py b/d123/dataset/dataset_specific/carla/carla_data_converter.py index 1e360007..c6ce3622 100644 --- a/d123/dataset/dataset_specific/carla/carla_data_converter.py +++ b/d123/dataset/dataset_specific/carla/carla_data_converter.py @@ -24,9 +24,7 @@ from d123.dataset.maps.abstract_map import AbstractMap, MapLayer from d123.dataset.maps.abstract_map_objects import AbstractLane from d123.dataset.scene.arrow_scene import get_map_api_from_names -from d123.geometry.base import Point2D, Point3D -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index -from d123.geometry.vector import Vector3DIndex +from d123.geometry import BoundingBoxSE3Index, Point2D, Point3D, Vector3DIndex AVAILABLE_CARLA_MAP_LOCATIONS: Final[List[str]] = [ "Town01", # A small, simple town with a river and several bridges. diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py index c65845fb..f57a5f5e 100644 --- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py +++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py @@ -36,10 +36,8 @@ from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata -from d123.geometry.base import StateSE3 -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3, BoundingBoxSE3Index -from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL -from d123.geometry.vector import Vector3D, Vector3DIndex +from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL TARGET_DT: Final[float] = 0.1 NUPLAN_DT: Final[float] = 0.05 diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py index 4c251252..5831ea2a 100644 --- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py +++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py @@ -5,12 +5,9 @@ import numpy.typing as npt import shapely.geometry as geom -from d123.geometry.base import Point3D, StateSE2 -from d123.geometry.line.polylines import Polyline3D, PolylineSE2 -from d123.geometry.occupancy_map import OccupancyMap2D +from d123.geometry import OccupancyMap2D, Point3D, Polyline3D, PolylineSE2, StateSE2, Vector2D from d123.geometry.transform.tranform_2d import translate_along_yaw -from d123.geometry.utils import normalize_angle -from d123.geometry.vector import Vector2D +from d123.geometry.utils.rotation_utils import normalize_angle MAX_LANE_WIDTH = 25.0 # meters MIN_LANE_WIDTH = 2.0 diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py index 20a2be00..6cf4f0f9 100644 --- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py +++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py @@ -11,9 +11,8 @@ from d123.dataset.dataset_specific.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType -from d123.geometry.base import Point3DIndex -from d123.geometry.line.polylines import Polyline3D -from d123.geometry.units import mph_to_mps +from d123.geometry import Point3DIndex, Polyline3D +from d123.geometry.utils.units import mph_to_mps # TODO: # - Implement stop signs diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py index 77f19ead..a6371421 100644 --- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py +++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py @@ -27,11 +27,9 @@ from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection from d123.dataset.logs.log_metadata import LogMetadata -from d123.geometry.base import Point3D, StateSE3 -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index -from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry import BoundingBoxSE3Index, Point3D, StateSE3, Vector3D, Vector3DIndex from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix -from d123.geometry.vector import Vector3D, Vector3DIndex +from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL os.environ["CUDA_VISIBLE_DEVICES"] = "-1" D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) diff --git a/d123/dataset/maps/abstract_map.py b/d123/dataset/maps/abstract_map.py index f8c547e7..be9eefeb 100644 --- a/d123/dataset/maps/abstract_map.py +++ b/d123/dataset/maps/abstract_map.py @@ -7,7 +7,7 @@ from d123.dataset.maps.abstract_map_objects import AbstractMapObject from d123.dataset.maps.map_datatypes import MapLayer -from d123.geometry.base import Point2D +from d123.geometry import Point2D # TODO: # - add docstrings diff --git a/d123/dataset/maps/abstract_map_objects.py b/d123/dataset/maps/abstract_map_objects.py index a1696009..14e85539 100644 --- a/d123/dataset/maps/abstract_map_objects.py +++ b/d123/dataset/maps/abstract_map_objects.py @@ -7,7 +7,7 @@ import trimesh from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType -from d123.geometry.line.polylines import Polyline2D, Polyline3D, PolylineSE2 +from d123.geometry import Polyline2D, Polyline3D, PolylineSE2 class AbstractMapObject(abc.ABC): diff --git a/d123/dataset/maps/gpkg/gpkg_map.py b/d123/dataset/maps/gpkg/gpkg_map.py index f1fead93..d466857e 100644 --- a/d123/dataset/maps/gpkg/gpkg_map.py +++ b/d123/dataset/maps/gpkg/gpkg_map.py @@ -26,7 +26,7 @@ ) from d123.dataset.maps.gpkg.utils import load_gdf_with_geometry_columns from d123.dataset.maps.map_datatypes import MapLayer -from d123.geometry.base import Point2D +from d123.geometry import Point2D USE_ARROW: bool = True diff --git a/d123/dataset/maps/gpkg/gpkg_map_objects.py b/d123/dataset/maps/gpkg/gpkg_map_objects.py index 4641cd97..1a274a53 100644 --- a/d123/dataset/maps/gpkg/gpkg_map_objects.py +++ b/d123/dataset/maps/gpkg/gpkg_map_objects.py @@ -26,8 +26,7 @@ ) from d123.dataset.maps.gpkg.utils import get_row_with_value from d123.dataset.maps.map_datatypes import RoadEdgeType, RoadLineType -from d123.geometry.base import Point3DIndex -from d123.geometry.line.polylines import Polyline3D +from d123.geometry import Point3DIndex, Polyline3D class GPKGSurfaceObject(AbstractSurfaceMapObject): diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py index e69de29b..ca210677 100644 --- a/d123/geometry/__init__.py +++ b/d123/geometry/__init__.py @@ -0,0 +1,18 @@ +from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3 +from d123.geometry.geometry_index import ( + BoundingBoxSE2Index, + BoundingBoxSE3Index, + Corners2DIndex, + Corners3DIndex, + Point2DIndex, + Point3DIndex, + StateSE2Index, + StateSE3Index, + Vector2DIndex, + Vector3DIndex, +) +from d123.geometry.occupancy_map import OccupancyMap2D +from d123.geometry.point import Point2D, Point3D +from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 +from d123.geometry.se import StateSE2, StateSE3 +from d123.geometry.vector import Vector2D, Vector3D diff --git a/d123/geometry/base_index.py b/d123/geometry/base_index.py deleted file mode 100644 index cb258618..00000000 --- a/d123/geometry/base_index.py +++ /dev/null @@ -1 +0,0 @@ -# TODO: Move base index here to avoid circular imports. diff --git a/d123/geometry/bounding_box/bounding_box.py b/d123/geometry/bounding_box.py similarity index 92% rename from d123/geometry/bounding_box/bounding_box.py rename to d123/geometry/bounding_box.py index ddf7ec15..b71c4fae 100644 --- a/d123/geometry/bounding_box/bounding_box.py +++ b/d123/geometry/bounding_box.py @@ -7,9 +7,9 @@ import numpy.typing as npt import shapely -from d123.geometry.base import StateSE2, StateSE3 -from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, BoundingBoxSE3Index -from d123.geometry.bounding_box.utils import bbse2_array_to_corners_array +from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index +from d123.geometry.se import StateSE2, StateSE3 +from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array # TODO: Reconsider naming SE2 and SE3 hierarchies. E.g. would inheritance be a better approach? diff --git a/d123/geometry/bounding_box/bounding_box_index.py b/d123/geometry/geometry_index.py similarity index 59% rename from d123/geometry/bounding_box/bounding_box_index.py rename to d123/geometry/geometry_index.py index c282b07e..a8652b37 100644 --- a/d123/geometry/bounding_box/bounding_box_index.py +++ b/d123/geometry/geometry_index.py @@ -3,6 +3,69 @@ from d123.common.utils.enums import classproperty +class Point2DIndex(IntEnum): + X = 0 + Y = 1 + + @classproperty + def XY(cls) -> slice: + return slice(cls.X, cls.Y + 1) + + +class Vector2DIndex(IntEnum): + X = 0 + Y = 1 + + +class StateSE2Index(IntEnum): + X = 0 + Y = 1 + YAW = 2 + + @classproperty + def XY(cls) -> slice: + return slice(cls.X, cls.Y + 1) + + +class Point3DIndex(IntEnum): + + X = 0 + Y = 1 + Z = 2 + + @classproperty + def XY(cls) -> slice: + return slice(cls.X, cls.Y + 1) + + +class Vector3DIndex(IntEnum): + X = 0 + Y = 1 + Z = 2 + + +class StateSE3Index(IntEnum): + + X = 0 + Y = 1 + Z = 2 + ROLL = 3 + PITCH = 4 + YAW = 5 + + @classproperty + def XY(cls) -> slice: + return slice(cls.X, cls.Y + 1) + + @classproperty + def XYZ(cls) -> slice: + return slice(cls.X, cls.Z + 1) + + @classproperty + def ROTATION_XYZ(cls) -> slice: + return slice(cls.ROLL, cls.YAW + 1) + + class BoundingBoxSE2Index(IntEnum): X = 0 Y = 1 diff --git a/d123/geometry/line/__init__.py b/d123/geometry/line/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/geometry/point.py b/d123/geometry/point.py new file mode 100644 index 00000000..c12cc22e --- /dev/null +++ b/d123/geometry/point.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Iterable + +import numpy as np +import numpy.typing as npt +import shapely.geometry as geom + +from d123.geometry.geometry_index import Point2DIndex, Point3DIndex + + +@dataclass +class Point2D: + """Class to represents 2D points.""" + + x: float # [m] location + y: float # [m] location + __slots__ = "x", "y" + + @classmethod + def from_array(cls, array: npt.NDArray[np.float64]) -> Point2D: + assert array.ndim == 1 + assert array.shape[0] == len(Point2DIndex) + return Point2D(array[Point2DIndex.X], array[Point2DIndex.Y]) + + @property + def array(self) -> npt.NDArray[np.float64]: + """ + Convert vector to array + :return: array containing [x, y] + """ + array = np.zeros(len(Point2DIndex), dtype=np.float64) + array[Point2DIndex.X] = self.x + array[Point2DIndex.Y] = self.y + return array + + @property + def shapely_point(self) -> geom.Point: + return geom.Point(self.x, self.y) + + def __iter__(self) -> Iterable[float]: + """ + :return: iterator of tuples (x, y) + """ + return iter((self.x, self.y)) + + def __hash__(self) -> int: + """Hash method""" + return hash((self.x, self.y)) + + +@dataclass +class Point3D: + """Class to represents 2D points.""" + + x: float # [m] location + y: float # [m] location + z: float # [m] location + __slots__ = "x", "y", "z" + + @classmethod + def from_array(cls, array: npt.NDArray[np.float64]) -> "Point3D": + assert array.ndim == 1, f"Array must be 1-dimensional, got shape {array.shape}" + assert array.shape[0] == len( + Point3DIndex + ), f"Array must have the same length as Point3DIndex, got shape {array.shape}" + return cls(array[Point3DIndex.X], array[Point3DIndex.Y], array[Point3DIndex.Z]) + + @property + def array(self) -> npt.NDArray[np.float64]: + """ + Convert vector to array + :return: array containing [x, y] + """ + array = np.zeros(len(Point3DIndex), dtype=np.float64) + array[Point3DIndex.X] = self.x + array[Point3DIndex.Y] = self.y + array[Point3DIndex.Z] = self.z + return array + + @property + def point_2d(self) -> Point2D: + return Point2D(self.x, self.y) + + @property + def shapely_point(self) -> geom.Point: + return geom.Point(self.x, self.y, self.z) + + def __iter__(self) -> Iterable[float]: + """ + :return: iterator of tuples (x, y) + """ + return iter((self.x, self.y, self.z)) + + def __hash__(self) -> int: + """Hash method""" + return hash((self.x, self.y, self.z)) diff --git a/d123/geometry/line/polylines.py b/d123/geometry/polyline.py similarity index 95% rename from d123/geometry/line/polylines.py rename to d123/geometry/polyline.py index 78a417cd..365b458e 100644 --- a/d123/geometry/line/polylines.py +++ b/d123/geometry/polyline.py @@ -9,10 +9,11 @@ import shapely.geometry as geom from scipy.interpolate import interp1d -from d123.geometry.base import Point2D, Point2DIndex, Point3D, Point3DIndex, StateSE2, StateSE2Index -from d123.geometry.constants import DEFAULT_Z -from d123.geometry.line.helper import get_linestring_yaws, get_path_progress -from d123.geometry.utils import normalize_angle +from d123.geometry.point import Point2D, Point2DIndex, Point3D, Point3DIndex +from d123.geometry.se import StateSE2, StateSE2Index +from d123.geometry.utils.constants import DEFAULT_Z +from d123.geometry.utils.polyline_utils import get_linestring_yaws, get_path_progress +from d123.geometry.utils.rotation_utils import normalize_angle # TODO: Implement PolylineSE3 # TODO: Benchmark interpolation performance and reconsider reliance on LineString diff --git a/d123/geometry/base.py b/d123/geometry/se.py similarity index 53% rename from d123/geometry/base.py rename to d123/geometry/se.py index b531f499..8abd908a 100644 --- a/d123/geometry/base.py +++ b/d123/geometry/se.py @@ -1,76 +1,14 @@ from __future__ import annotations from dataclasses import dataclass -from enum import IntEnum from typing import Iterable import numpy as np import numpy.typing as npt import shapely.geometry as geom -# from d123.geometry.transform.se3 import get_rotation_matrix -from d123.common.utils.enums import classproperty - -# TODO: Reconsider if 2D/3D or SE2/SE3 structure would be better hierarchical, e.g. inheritance or composition. - - -class Point2DIndex(IntEnum): - X = 0 - Y = 1 - - @classproperty - def XY(cls) -> slice: - return slice(cls.X, cls.Y + 1) - - -@dataclass -class Point2D: - """Class to represents 2D points.""" - - x: float # [m] location - y: float # [m] location - __slots__ = "x", "y" - - @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> Point2D: - assert array.ndim == 1 - assert array.shape[0] == len(Point2DIndex) - return Point2D(array[Point2DIndex.X], array[Point2DIndex.Y]) - - @property - def array(self) -> npt.NDArray[np.float64]: - """ - Convert vector to array - :return: array containing [x, y] - """ - array = np.zeros(len(Point2DIndex), dtype=np.float64) - array[Point2DIndex.X] = self.x - array[Point2DIndex.Y] = self.y - return array - - @property - def shapely_point(self) -> geom.Point: - return geom.Point(self.x, self.y) - - def __iter__(self) -> Iterable[float]: - """ - :return: iterator of tuples (x, y) - """ - return iter((self.x, self.y)) - - def __hash__(self) -> int: - """Hash method""" - return hash((self.x, self.y)) - - -class StateSE2Index(IntEnum): - X = 0 - Y = 1 - YAW = 2 - - @classproperty - def XY(cls) -> slice: - return slice(cls.X, cls.Y + 1) +from d123.geometry.geometry_index import StateSE2Index, StateSE3Index +from d123.geometry.point import Point2D, Point3D @dataclass @@ -123,87 +61,6 @@ def __hash__(self) -> int: return hash((self.x, self.y)) -class Point3DIndex(IntEnum): - - X = 0 - Y = 1 - Z = 2 - - @classproperty - def XY(cls) -> slice: - return slice(cls.X, cls.Y + 1) - - -@dataclass -class Point3D: - """Class to represents 2D points.""" - - x: float # [m] location - y: float # [m] location - z: float # [m] location - __slots__ = "x", "y", "z" - - @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> "Point3D": - assert array.ndim == 1, f"Array must be 1-dimensional, got shape {array.shape}" - assert array.shape[0] == len( - Point3DIndex - ), f"Array must have the same length as Point3DIndex, got shape {array.shape}" - return cls(array[Point3DIndex.X], array[Point3DIndex.Y], array[Point3DIndex.Z]) - - @property - def array(self) -> npt.NDArray[np.float64]: - """ - Convert vector to array - :return: array containing [x, y] - """ - array = np.zeros(len(Point3DIndex), dtype=np.float64) - array[Point3DIndex.X] = self.x - array[Point3DIndex.Y] = self.y - array[Point3DIndex.Z] = self.z - return array - - @property - def point_2d(self) -> Point2D: - return Point2D(self.x, self.y) - - @property - def shapely_point(self) -> geom.Point: - return geom.Point(self.x, self.y, self.z) - - def __iter__(self) -> Iterable[float]: - """ - :return: iterator of tuples (x, y) - """ - return iter((self.x, self.y, self.z)) - - def __hash__(self) -> int: - """Hash method""" - return hash((self.x, self.y, self.z)) - - -class StateSE3Index(IntEnum): - - X = 0 - Y = 1 - Z = 2 - ROLL = 3 - PITCH = 4 - YAW = 5 - - @classproperty - def XY(cls) -> slice: - return slice(cls.X, cls.Y + 1) - - @classproperty - def XYZ(cls) -> slice: - return slice(cls.X, cls.Z + 1) - - @classproperty - def ROTATION_XYZ(cls) -> slice: - return slice(cls.ROLL, cls.YAW + 1) - - @dataclass class StateSE3: """Class to represents 2D points.""" diff --git a/d123/geometry/transform/se2_array.py b/d123/geometry/transform/se2_array.py index 5112ea34..97ff8bee 100644 --- a/d123/geometry/transform/se2_array.py +++ b/d123/geometry/transform/se2_array.py @@ -3,8 +3,8 @@ import numpy as np import numpy.typing as npt -from d123.geometry.base import StateSE2, StateSE2Index -from d123.geometry.line.polylines import normalize_angle +from d123.geometry.se import StateSE2, StateSE2Index +from d123.geometry.utils.rotation_utils import normalize_angle # TODO: Refactor 2D and 3D transform functions in a more consistent and general way. diff --git a/d123/geometry/transform/se3.py b/d123/geometry/transform/se3.py index 42fae1c2..6b4219d0 100644 --- a/d123/geometry/transform/se3.py +++ b/d123/geometry/transform/se3.py @@ -1,8 +1,7 @@ import numpy as np import numpy.typing as npt -from d123.geometry.base import Point3DIndex, StateSE3, StateSE3Index -from d123.geometry.vector import Vector3D +from d123.geometry import Point3DIndex, StateSE3, StateSE3Index, Vector3D # def get_rotation_matrix(state_se3: StateSE3) -> npt.NDArray[np.float64]: # R_x = np.array( diff --git a/d123/geometry/transform/tranform_2d.py b/d123/geometry/transform/tranform_2d.py index 18998709..b85e598b 100644 --- a/d123/geometry/transform/tranform_2d.py +++ b/d123/geometry/transform/tranform_2d.py @@ -1,7 +1,7 @@ import numpy as np import numpy.typing as npt -from d123.geometry.base import StateSE2 +from d123.geometry.se import StateSE2 from d123.geometry.vector import Vector2D # TODO: Refactor 2D and 3D transform functions in a more consistent and general way. diff --git a/d123/geometry/bounding_box/__init__.py b/d123/geometry/utils/__init__.py similarity index 100% rename from d123/geometry/bounding_box/__init__.py rename to d123/geometry/utils/__init__.py diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py new file mode 100644 index 00000000..b1ce46b2 --- /dev/null +++ b/d123/geometry/utils/bounding_box_utils.py @@ -0,0 +1,63 @@ +import numpy as np +import numpy.typing as npt +import shapely + +from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, Point2DIndex + + +def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """ + Converts an array of BoundingBoxSE2 objects to a coordinates array. + :param bbse2: Array of BoundingBoxSE2 objects. + :return: Coordinates array of shape (n, 5, 2) where n is the number of bounding boxes. + """ + assert bbse2.shape[-1] == len(BoundingBoxSE2Index) + + ndim_one: bool = bbse2.ndim == 1 + if ndim_one: + bbse2 = bbse2[None, :] + + corners_array = np.zeros((*bbse2.shape[:-1], len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64) + + centers = bbse2[..., BoundingBoxSE2Index.XY] + yaws = bbse2[..., BoundingBoxSE2Index.YAW] + half_length = bbse2[..., BoundingBoxSE2Index.LENGTH] / 2.0 + half_width = bbse2[..., BoundingBoxSE2Index.WIDTH] / 2.0 + + corners_array[..., Corners2DIndex.FRONT_LEFT, :] = translate_along_yaw_array(centers, yaws, half_length, half_width) + corners_array[..., Corners2DIndex.FRONT_RIGHT, :] = translate_along_yaw_array( + centers, yaws, half_length, -half_width + ) + corners_array[..., Corners2DIndex.BACK_RIGHT, :] = translate_along_yaw_array( + centers, yaws, -half_length, -half_width + ) + corners_array[..., Corners2DIndex.BACK_LEFT, :] = translate_along_yaw_array(centers, yaws, -half_length, half_width) + + return corners_array.squeeze(axis=0) if ndim_one else corners_array + + +def corners_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]: + polygons = shapely.creation.polygons(corners_array) + return polygons + + +def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + return corners_array_to_polygon_array(bbse2_array_to_corners_array(bbse2)) + + +def translate_along_yaw_array( + points_2d: npt.NDArray[np.float64], + headings: npt.NDArray[np.float64], + lon: npt.NDArray[np.float64], + lat: npt.NDArray[np.float64], +) -> npt.NDArray[np.float64]: + assert points_2d.shape[-1] == len(Point2DIndex) + half_pi = np.pi / 2.0 + translation: npt.NDArray[np.float64] = np.stack( + [ + (lat * np.cos(headings + half_pi)) + (lon * np.cos(headings)), + (lat * np.sin(headings + half_pi)) + (lon * np.sin(headings)), + ], + axis=-1, + ) + return points_2d + translation diff --git a/d123/geometry/constants.py b/d123/geometry/utils/constants.py similarity index 100% rename from d123/geometry/constants.py rename to d123/geometry/utils/constants.py diff --git a/d123/geometry/line/helper.py b/d123/geometry/utils/polyline_utils.py similarity index 96% rename from d123/geometry/line/helper.py rename to d123/geometry/utils/polyline_utils.py index c3fc0f1a..5e82ea9f 100644 --- a/d123/geometry/line/helper.py +++ b/d123/geometry/utils/polyline_utils.py @@ -2,7 +2,7 @@ import numpy.typing as npt from shapely.geometry import LineString -from d123.geometry.base import Point2DIndex, StateSE2Index +from d123.geometry.geometry_index import Point2DIndex, StateSE2Index def get_linestring_yaws(linestring: LineString) -> npt.NDArray[np.float64]: diff --git a/d123/geometry/utils.py b/d123/geometry/utils/rotation_utils.py similarity index 100% rename from d123/geometry/utils.py rename to d123/geometry/utils/rotation_utils.py diff --git a/d123/geometry/units.py b/d123/geometry/utils/units.py similarity index 100% rename from d123/geometry/units.py rename to d123/geometry/utils/units.py diff --git a/d123/geometry/bounding_box/utils.py b/d123/geometry/utils/utils.py similarity index 94% rename from d123/geometry/bounding_box/utils.py rename to d123/geometry/utils/utils.py index 5da3530f..b1ce46b2 100644 --- a/d123/geometry/bounding_box/utils.py +++ b/d123/geometry/utils/utils.py @@ -2,8 +2,7 @@ import numpy.typing as npt import shapely -from d123.geometry.base import Point2DIndex -from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, Corners2DIndex +from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, Point2DIndex def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: diff --git a/d123/geometry/vector.py b/d123/geometry/vector.py index dad25329..a3b8631d 100644 --- a/d123/geometry/vector.py +++ b/d123/geometry/vector.py @@ -1,16 +1,9 @@ from __future__ import annotations -from enum import IntEnum - import numpy as np import numpy.typing as npt -from d123.geometry.base import Point2D, Point3D, Point3DIndex - - -class Vector2DIndex(IntEnum): - X = 0 - Y = 1 +from d123.geometry.point import Point2D, Point3D, Point3DIndex class Vector2D(Point2D): @@ -35,12 +28,6 @@ def vector_2d(self) -> Vector2D: return self -class Vector3DIndex(IntEnum): - X = 0 - Y = 1 - Z = 2 - - class Vector3D(Point3D): @classmethod diff --git a/d123/simulation/agents/constant_velocity_agents.py b/d123/simulation/agents/constant_velocity_agents.py index b9a4b587..7d1a58d0 100644 --- a/d123/simulation/agents/constant_velocity_agents.py +++ b/d123/simulation/agents/constant_velocity_agents.py @@ -5,8 +5,8 @@ from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry.base import Point2D -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2 +from d123.geometry.bounding_box import BoundingBoxSE2 +from d123.geometry.point import Point2D from d123.geometry.transform.tranform_2d import translate_along_yaw from d123.simulation.agents.abstract_agents import AbstractAgents diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py index 118355a1..072773e1 100644 --- a/d123/simulation/agents/idm_agents.py +++ b/d123/simulation/agents/idm_agents.py @@ -10,9 +10,10 @@ from d123.dataset.arrow.conversion import BoxDetectionWrapper from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry.base import Point2D, StateSE2 -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2 -from d123.geometry.line.polylines import PolylineSE2 +from d123.geometry.bounding_box import BoundingBoxSE2 +from d123.geometry.point import Point2D +from d123.geometry.polyline import PolylineSE2 +from d123.geometry.se import StateSE2 from d123.geometry.transform.tranform_2d import translate_along_yaw from d123.geometry.vector import Vector2D from d123.simulation.agents.abstract_agents import AbstractAgents diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py index 357aac41..e4d740c3 100644 --- a/d123/simulation/agents/path_following.py +++ b/d123/simulation/agents/path_following.py @@ -5,9 +5,10 @@ from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry.base import Point2D, StateSE2 -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2 -from d123.geometry.line.polylines import PolylineSE2 +from d123.geometry.bounding_box import BoundingBoxSE2 +from d123.geometry.point import Point2D +from d123.geometry.polyline import PolylineSE2 +from d123.geometry.se import StateSE2 from d123.geometry.transform.tranform_2d import translate_along_yaw from d123.simulation.agents.abstract_agents import AbstractAgents diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py index 2b20a496..3af49624 100644 --- a/d123/simulation/agents/smart_agents.py +++ b/d123/simulation/agents/smart_agents.py @@ -9,10 +9,10 @@ from d123.dataset.arrow.conversion import BoxDetectionWrapper, DetectionType from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry.base import StateSE2 -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2 +from d123.geometry.bounding_box import BoundingBoxSE2 +from d123.geometry.se import StateSE2 from d123.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array -from d123.geometry.utils import normalize_angle +from d123.geometry.utils.rotation_utils import normalize_angle from d123.simulation.agents.abstract_agents import AbstractAgents from d123.training.feature_builder.smart_feature_builder import SMARTFeatureBuilder from d123.training.models.sim_agent.smart.datamodules.target_builder import _numpy_dict_to_torch diff --git a/d123/simulation/controller/motion_model/kinematic_bicycle_model.py b/d123/simulation/controller/motion_model/kinematic_bicycle_model.py index c1f343da..6ef5893c 100644 --- a/d123/simulation/controller/motion_model/kinematic_bicycle_model.py +++ b/d123/simulation/controller/motion_model/kinematic_bicycle_model.py @@ -3,7 +3,7 @@ from d123.common.datatypes.time.time_point import TimeDuration, TimePoint from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2 -from d123.geometry.base import StateSE2 +from d123.geometry.se import StateSE2 from d123.geometry.vector import Vector2D from d123.simulation.controller.motion_model.abstract_motion_model import AbstractMotionModel diff --git a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py index 604626c9..38d661ac 100644 --- a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py +++ b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py @@ -12,7 +12,7 @@ from shapely.affinity import scale as shapely_scale from d123.common.datatypes.detection.detection import BoxDetectionSE2, TrafficLightStatus -from d123.geometry.base import StateSE2 +from d123.geometry.se import StateSE2 from d123.geometry.transform.se2_array import convert_absolute_to_relative_point_2d_array from d123.geometry.transform.tranform_2d import translate_along_yaw from d123.geometry.vector import Vector2D diff --git a/d123/simulation/gym/environment/helper/environment_area.py b/d123/simulation/gym/environment/helper/environment_area.py index db247458..90f5a0c9 100644 --- a/d123/simulation/gym/environment/helper/environment_area.py +++ b/d123/simulation/gym/environment/helper/environment_area.py @@ -3,7 +3,7 @@ from shapely import Polygon -from d123.geometry.base import StateSE2 +from d123.geometry.se import StateSE2 from d123.geometry.transform.tranform_2d import translate_along_yaw from d123.geometry.vector import Vector2D diff --git a/d123/simulation/gym/environment/helper/environment_cache.py b/d123/simulation/gym/environment/helper/environment_cache.py index 70a3dcdf..4ab65f6a 100644 --- a/d123/simulation/gym/environment/helper/environment_cache.py +++ b/d123/simulation/gym/environment/helper/environment_cache.py @@ -24,8 +24,8 @@ AbstractStopLine, ) from d123.dataset.maps.map_datatypes import MapLayer -from d123.geometry.base import StateSE2 from d123.geometry.occupancy_map import OccupancyMap2D +from d123.geometry.se import StateSE2 from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea from d123.simulation.planning.abstract_planner import PlannerInitialization, PlannerInput diff --git a/d123/simulation/metrics/sim_agents/interaction_based.py b/d123/simulation/metrics/sim_agents/interaction_based.py index 05bae92e..7cbd9b92 100644 --- a/d123/simulation/metrics/sim_agents/interaction_based.py +++ b/d123/simulation/metrics/sim_agents/interaction_based.py @@ -4,8 +4,8 @@ import numpy.typing as npt from d123.dataset.arrow.conversion import BoxDetectionWrapper -from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index -from d123.geometry.bounding_box.utils import bbse2_array_to_polygon_array +from d123.geometry.geometry_index import BoundingBoxSE2Index +from d123.geometry.utils.bounding_box_utils import bbse2_array_to_polygon_array MAX_OBJECT_DISTANCE: Final[float] = 50.0 diff --git a/d123/simulation/metrics/sim_agents/map_based.py b/d123/simulation/metrics/sim_agents/map_based.py index 5342be6e..134a3c5e 100644 --- a/d123/simulation/metrics/sim_agents/map_based.py +++ b/d123/simulation/metrics/sim_agents/map_based.py @@ -7,10 +7,10 @@ from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.maps.abstract_map_objects import AbstractLane from d123.dataset.maps.map_datatypes import MapLayer -from d123.geometry.base import StateSE2, StateSE2Index -from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index -from d123.geometry.bounding_box.utils import Corners2DIndex, bbse2_array_to_corners_array -from d123.geometry.utils import normalize_angle +from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, StateSE2Index +from d123.geometry.se import StateSE2 +from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array +from d123.geometry.utils.rotation_utils import normalize_angle MAX_LANE_CENTER_DISTANCE: Final[float] = 10.0 diff --git a/d123/simulation/metrics/sim_agents/sim_agents.py b/d123/simulation/metrics/sim_agents/sim_agents.py index 7f9f372e..36033e70 100644 --- a/d123/simulation/metrics/sim_agents/sim_agents.py +++ b/d123/simulation/metrics/sim_agents/sim_agents.py @@ -7,7 +7,7 @@ from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper, DetectionType from d123.dataset.maps.abstract_map import AbstractMap from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index +from d123.geometry.geometry_index import BoundingBoxSE2Index from d123.simulation.metrics.sim_agents.histogram_metric import ( BinaryHistogramIntersectionMetric, HistogramIntersectionMetric, diff --git a/d123/simulation/metrics/sim_agents/utils.py b/d123/simulation/metrics/sim_agents/utils.py index 2923038a..c9d2bb3b 100644 --- a/d123/simulation/metrics/sim_agents/utils.py +++ b/d123/simulation/metrics/sim_agents/utils.py @@ -5,7 +5,7 @@ from d123.common.datatypes.detection.detection import BoxDetectionWrapper from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index +from d123.geometry.geometry_index import BoundingBoxSE2Index def _get_log_agents_array( diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py index 806350bf..4286b990 100644 --- a/d123/training/feature_builder/smart_feature_builder.py +++ b/d123/training/feature_builder/smart_feature_builder.py @@ -16,9 +16,8 @@ AbstractLaneGroup, ) from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry.base import StateSE2, StateSE2Index -from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2 -from d123.geometry.line.polylines import PolylineSE2 +from d123.geometry import BoundingBoxSE2, PolylineSE2, StateSE2 +from d123.geometry.geometry_index import StateSE2Index from d123.geometry.transform.se2_array import convert_absolute_to_relative_se2_array # TODO: Hind feature builder behind abstraction. diff --git a/notebooks/av2/delete_me.ipynb b/notebooks/av2/delete_me.ipynb index 85226114..434287f0 100644 --- a/notebooks/av2/delete_me.ipynb +++ b/notebooks/av2/delete_me.ipynb @@ -386,7 +386,7 @@ "from pyquaternion import Quaternion\n", "from d123.common.datatypes.detection.detection_types import DetectionType\n", "from d123.geometry.base import StateSE2\n", - "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n", + "from d123.geometry.bounding_box import BoundingBoxSE2\n", "from d123.common.visualization.color.config import PlotConfig\n", "from d123.common.visualization.color.default import BOX_DETECTION_CONFIG\n", "from d123.common.visualization.matplotlib.utils import add_shapely_polygon_to_ax\n", diff --git a/notebooks/deprecated/test_scene_builder.ipynb b/notebooks/deprecated/test_scene_builder.ipynb index 607cd184..e41ba4dd 100644 --- a/notebooks/deprecated/test_scene_builder.ipynb +++ b/notebooks/deprecated/test_scene_builder.ipynb @@ -72,8 +72,8 @@ "# import matplotlib.pyplot as plt\n", "# from tqdm import tqdm\n", "\n", - "# from d123.geometry.base import Point2D, StateSE2\n", - "# from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n", + "# from d123.geometry import Point2D, StateSE2\n", + "# from d123.geometry.bounding_box import BoundingBoxSE2\n", "# from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG\n", "# from d123.common.visualization.matplotlib.observation import (\n", "# add_bounding_box_to_ax,\n", diff --git a/notebooks/gym/test_gym.ipynb b/notebooks/gym/test_gym.ipynb index 3fbb3a94..c49f505c 100644 --- a/notebooks/gym/test_gym.ipynb +++ b/notebooks/gym/test_gym.ipynb @@ -78,7 +78,7 @@ "\n", "from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2\n", "from d123.geometry.base import Point2D, StateSE2\n", - "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n", + "from d123.geometry.bounding_box import BoundingBoxSE2\n", "from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG\n", "from d123.common.visualization.matplotlib.observation import (\n", " add_bounding_box_to_ax,\n", diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb index 0a50c5ed..4436e7ea 100644 --- a/notebooks/scene_rendering.ipynb +++ b/notebooks/scene_rendering.ipynb @@ -63,7 +63,7 @@ "from typing import Tuple\n", "from d123.common.datatypes.detection.detection import BoxDetection\n", "from d123.common.datatypes.detection.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n", - "from d123.geometry.base import StateSE2\n", + "from d123.geometry import StateSE2\n", "from d123.geometry.transform.tranform_2d import translate_along_yaw\n", "from d123.geometry.vector import Vector2D\n", "from d123.common.visualization.matplotlib.observation import (\n", diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index 3755c860..f8fe8c69 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -63,7 +63,7 @@ "from typing import List, Optional, Tuple\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", - "from d123.geometry.base import Point2D\n", + "from d123.geometry import Point2D\n", "from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", "from d123.common.visualization.color.config import PlotConfig\n", "from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", @@ -236,17 +236,17 @@ " return fig, ax\n", "\n", "\n", - "scene_index = 9\n", + "scene_index = 1\n", "iteration = 99\n", "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n", "plt.show()\n", "\n", - "camera = scenes[scene_index].get_camera_at_iteration(\n", - " iteration=iteration, camera_type=CameraType.CAM_F0\n", - ")\n", + "# camera = scenes[scene_index].get_camera_at_iteration(\n", + "# iteration=iteration, camera_type=CameraType.CAM_F0\n", + "# )\n", "\n", - "plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n", - "# # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n", + "# plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n", + "# # # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n", "\n", "# scenes[scene_index].log_name" ] diff --git a/notebooks/waymo_perception/lidar_testing.ipynb b/notebooks/waymo_perception/lidar_testing.ipynb index dfa0e65e..fa6a92a6 100644 --- a/notebooks/waymo_perception/lidar_testing.ipynb +++ b/notebooks/waymo_perception/lidar_testing.ipynb @@ -54,8 +54,8 @@ "import io\n", "from pyquaternion import Quaternion\n", "\n", - "from d123.geometry.base import StateSE3\n", - "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n", + "from d123.geometry import StateSE3\n", + "from d123.geometry.bounding_box import BoundingBoxSE3\n", "\n", "from waymo_open_dataset.utils import frame_utils\n", "\n", diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb index a06d8f7c..d7694c2d 100644 --- a/notebooks/waymo_perception/map_testing.ipynb +++ b/notebooks/waymo_perception/map_testing.ipynb @@ -54,8 +54,8 @@ "import io\n", "from pyquaternion import Quaternion\n", "\n", - "from d123.geometry.base import StateSE3\n", - "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n", + "from d123.geometry import StateSE3\n", + "from d123.geometry.bounding_box import BoundingBoxSE3\n", "\n", "from waymo_open_dataset.utils import frame_utils\n", "\n", @@ -139,7 +139,7 @@ "source": [ "from collections import defaultdict\n", "\n", - "from d123.geometry.units import mph_to_mps\n", + "from d123.geometry.utils.units import mph_to_mps\n", "\n", "\n", "dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n", @@ -382,7 +382,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.geometry.line.polylines import Polyline3D\n", + "from d123.geometry.polyline import Polyline3D\n", "import numpy as np\n", "\n", "\n", @@ -627,78 +627,6 @@ "ax.set_aspect(\"equal\")\n", "# lanes" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.geometry.base import StateSE2\n", - "# from d123.geometry.line.polylines import PolylineSE2\n", - "# from d123.geometry.transform.tranform_2d import translate_along_yaw\n", - "# from d123.geometry.vector import Vector2D\n", - "\n", - "# size = 30\n", - "# fig, ax = plt.subplots(figsize=(size, size))\n", - "\n", - "# lane_id = 114\n", - "# lane_ = lanes[lane_id]\n", - "\n", - "# lane_polyline_se2 = PolylineSE2.from_array(lane_[:, :2])\n", - "# BOUNDARY_STEP_SIZE = 0.5\n", - "\n", - "\n", - "# distances_se2 = np.linspace(\n", - "# 0, lane_polyline_se2.length, int(lane_polyline_se2.length / BOUNDARY_STEP_SIZE) + 1, endpoint=True\n", - "# )\n", - "# lane_query_se2 = lane_polyline_se2.interpolate(distances_se2)\n", - "\n", - "\n", - "# left_boundary = left_boundaries[lane_id].array\n", - "# right_boundary = right_boundaries[lane_id].array\n", - "\n", - "# assert len(left_boundary) > 0 and len(right_boundary) > 0\n", - "# ax.plot(left_boundary[:, 0], left_boundary[:, 1], color=\"lime\")\n", - "# ax.plot(right_boundary[:, 0], right_boundary[:, 1], color=\"red\")\n", - "\n", - "# ax.scatter(lane_query_se2[:, 0], lane_query_se2[:, 1], c=lane_query_se2[:, 2] / np.pi, cmap=\"viridis\")\n", - "\n", - "\n", - "# MAX_LANE_WIDTH = 25\n", - "# for state_se2_array in lane_query_se2:\n", - "# for sign in [1.0, -1.0]:\n", - "# perp_start_point = translate_along_yaw(StateSE2.from_array(state_se2_array), Vector2D(0.0, sign * 0.1))\n", - "# perp_end_point = translate_along_yaw(\n", - "# StateSE2.from_array(state_se2_array), Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0)\n", - "# )\n", - "# ax.plot(\n", - "# [perp_start_point.x, perp_end_point.x],\n", - "# [perp_start_point.y, perp_end_point.y],\n", - "# color=\"lime\" if sign > 0 else \"red\",\n", - "# linestyle=\"--\",\n", - "# alpha=0.5,\n", - "# )\n", - "\n", - "\n", - "# for road_edge in road_edges.values():\n", - "# # print(len(driveway))\n", - "# ax.plot(road_edge[:, 0], road_edge[:, 1], color=\"black\", label=\"road_edge\", linestyle=\"dashdot\")\n", - "\n", - "\n", - "\n", - "# for road_line in road_lines.values():\n", - "# # print(len(driveway))\n", - "# ax.plot(road_line[:, 0], road_line[:, 1], color=\"orange\", label=\"road_line\")\n", - "\n", - "# ax.set_aspect(\"equal\")\n", - "\n", - "# ax.set_xlim(lane_[:, 0].min() - 10, lane_[:, 0].max() + 10)\n", - "# ax.set_ylim(lane_[:, 1].min() - 10, lane_[:, 1].max() + 10)\n", - "\n", - "# lanes_map_features[lane_id]" - ] } ], "metadata": { diff --git a/notebooks/waymo_perception/testing.ipynb b/notebooks/waymo_perception/testing.ipynb index 9adc8510..d03d7d93 100644 --- a/notebooks/waymo_perception/testing.ipynb +++ b/notebooks/waymo_perception/testing.ipynb @@ -51,8 +51,8 @@ "import io\n", "from pyquaternion import Quaternion\n", "\n", - "from d123.geometry.base import StateSE3\n", - "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n", + "from d123.geometry import StateSE3\n", + "from d123.geometry.bounding_box import BoundingBoxSE3\n", "\n", "from waymo_open_dataset.utils import frame_utils\n", "\n", From 62654f3f13146a5f299aedf25e623c97caab9907 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Mon, 25 Aug 2025 14:31:07 +0800 Subject: [PATCH 014/145] fix ego_yaw_pitch_roll and get good results in lidar viser --- .../kitti_360/kitti_360_data_converter.py | 16 +- .../kitti_360/kitti_360_helper.py | 14 +- jbwang_test2.py | 216 ++++++++++++------ 3 files changed, 158 insertions(+), 88 deletions(-) diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index 77f3fff0..6433ca89 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -16,6 +16,7 @@ import pyarrow as pa from PIL import Image import logging +from pyquaternion import Quaternion from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map @@ -455,15 +456,24 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: oxts_path_file = oxts_path / f"{int(idx):010d}.txt" oxts_data = np.loadtxt(oxts_path_file) - #TODO check roll, pitch, yaw again - roll, pitch, yaw = oxts_data[3:6] vehicle_parameters = get_kitti360_station_wagon_parameters() while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < idx: pose_idx += 1 pos = pose_idx - # pos = np.searchsorted(pwwwoses_time, idx, side='right') - 1 + # pos = np.searchsorted(poses_time, idx, side='right') - 1 + # NOTE you can use oxts_data[3:6] as roll, pitch, yaw for simplicity + #roll, pitch, yaw = oxts_data[3:6] + r00, r01, r02 = poses[pos, 1:4] + r10, r11, r12 = poses[pos, 5:8] + r20, r21, r22 = poses[pos, 9:12] + R_mat = np.array([[r00, r01, r02], + [r10, r11, r12], + [r20, r21, r22]], dtype=np.float64) + R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3] + yaw, pitch, roll = Quaternion(matrix=R_mat_cali[:3, :3]).yaw_pitch_roll + rear_axle_pose = StateSE3( x=poses[pos, 4], y=poses[pos, 8], diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py index 5c69264f..7edcd6af 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py @@ -13,17 +13,6 @@ DEFAULT_ROLL = 0.0 DEFAULT_PITCH = 0.0 -addtional_calibration = get_rotation_matrix( - StateSE3( - x=0.0, - y=0.0, - z=0.0, - roll=np.deg2rad(1.0), - pitch=np.deg2rad(1.0), - yaw=np.deg2rad(0.0), - ) - ) - kitti3602nuplan_imu_calibration_ideal = np.array([ [1, 0, 0, 0], [0, -1, 0, 0], @@ -31,8 +20,7 @@ [0, 0, 0, 1], ], dtype=np.float64) -KITTI3602NUPLAN_IMU_CALIBRATION = np.eye(4, dtype=np.float64) -KITTI3602NUPLAN_IMU_CALIBRATION[:3, :3] = addtional_calibration @ kitti3602nuplan_imu_calibration_ideal[:3, :3] +KITTI3602NUPLAN_IMU_CALIBRATION = kitti3602nuplan_imu_calibration_ideal MAX_N = 1000 def local2global(semanticId, instanceId): diff --git a/jbwang_test2.py b/jbwang_test2.py index 7128a636..f9748db5 100644 --- a/jbwang_test2.py +++ b/jbwang_test2.py @@ -76,77 +76,149 @@ # print(a[10000:10010,:3]) -import gc -import json -import os -from dataclasses import asdict -from functools import partial -from pathlib import Path -from typing import Any, Dict, Final, List, Optional, Tuple, Union -import numpy as np -from collections import defaultdict -import datetime -import hashlib -import xml.etree.ElementTree as ET -import pyarrow as pa -from PIL import Image -import logging - -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D - - -#TODO train and train_full -bbox_3d_path = Path("/nas/datasets/KITTI-360/data_3d_bboxes/train/2013_05_28_drive_0000_sync.xml") - -tree = ET.parse(bbox_3d_path) -root = tree.getroot() - -KIITI360_DETECTION_NAME_DICT = { - "truck": DetectionType.VEHICLE, - "bus": DetectionType.VEHICLE, - "car": DetectionType.VEHICLE, - "motorcycle": DetectionType.BICYCLE, - "bicycle": DetectionType.BICYCLE, - "pedestrian": DetectionType.PEDESTRIAN, -} -# x,y,z = 881.2268115,3247.493293,115.239219 -# x,y,z = 867.715474,3229.630439,115.189221 # 自车 -# x,y,z = 873.533508, 3227.16235, 115.185341 # 要找的那个人 -x,y,z = 874.233508, 3231.56235, 115.185341 # 要找的那个车 -CENTER_REF = np.array([x, y, z], dtype=np.float64) -objs_name = [] -lable_name = [] -for child in root: - label = child.find('label').text - # if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys(): - # continue + + +# import gc +# import json +# import os +# from dataclasses import asdict +# from functools import partial +# from pathlib import Path +# from typing import Any, Dict, Final, List, Optional, Tuple, Union + +# import numpy as np +# from collections import defaultdict +# import datetime +# import hashlib +# import xml.etree.ElementTree as ET +# import pyarrow as pa +# from PIL import Image +# import logging + +# from d123.common.datatypes.detection.detection_types import DetectionType +# from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D + + +# #TODO train and train_full +# bbox_3d_path = Path("/nas/datasets/KITTI-360/data_3d_bboxes/train/2013_05_28_drive_0000_sync.xml") + +# tree = ET.parse(bbox_3d_path) +# root = tree.getroot() + +# KIITI360_DETECTION_NAME_DICT = { +# "truck": DetectionType.VEHICLE, +# "bus": DetectionType.VEHICLE, +# "car": DetectionType.VEHICLE, +# "motorcycle": DetectionType.BICYCLE, +# "bicycle": DetectionType.BICYCLE, +# "pedestrian": DetectionType.PEDESTRIAN, +# } +# # x,y,z = 881.2268115,3247.493293,115.239219 +# # x,y,z = 867.715474,3229.630439,115.189221 # 自车 +# # x,y,z = 873.533508, 3227.16235, 115.185341 # 要找的那个人 +# x,y,z = 874.233508, 3231.56235, 115.185341 # 要找的那个车 +# CENTER_REF = np.array([x, y, z], dtype=np.float64) +# objs_name = [] +# lable_name = [] +# for child in root: +# label = child.find('label').text +# # if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys(): +# # continue - if child.find('transform') is None: - continue - print("this label is ",label) - print("!!!!!!!!!!!!!!!!!!!") - obj = KITTI360Bbox3D() - obj.parseBbox(child) - # obj.parseVertices(child) - name = child.find('label').text - lable_name.append(name) - # if obj.start_frame < 10030 and obj.end_frame > 10030: - center = np.array(obj.T, dtype=np.float64) - dist = np.linalg.norm(center - CENTER_REF) - if dist < 7: - print(f"Object ID: {obj.name}, Start Frame: {obj.start_frame}, End Frame: {obj.end_frame},self.annotationId: {obj.annotationId},{obj.timestamp},{obj.T}") - objs_name.append(obj.name) -print(len(objs_name)) -print(set(objs_name)) -print(set(lable_name)) - # print(obj.Rm) - # print(Sigma) -names = [] -for child in root: - label = child.find('label').text - if child.find('transform') is None: - continue - names.append(label) -print(set(names)) \ No newline at end of file +# if child.find('transform') is None: +# continue +# print("this label is ",label) +# print("!!!!!!!!!!!!!!!!!!!") +# obj = KITTI360Bbox3D() +# obj.parseBbox(child) +# # obj.parseVertices(child) +# name = child.find('label').text +# lable_name.append(name) +# # if obj.start_frame < 10030 and obj.end_frame > 10030: +# center = np.array(obj.T, dtype=np.float64) +# dist = np.linalg.norm(center - CENTER_REF) +# if dist < 7: +# print(f"Object ID: {obj.name}, Start Frame: {obj.start_frame}, End Frame: {obj.end_frame},self.annotationId: {obj.annotationId},{obj.timestamp},{obj.T}") +# objs_name.append(obj.name) +# print(len(objs_name)) +# print(set(objs_name)) +# print(set(lable_name)) +# # print(obj.Rm) +# # print(Sigma) +# names = [] +# for child in root: +# label = child.find('label').text +# if child.find('transform') is None: +# continue +# names.append(label) +# print(set(names)) + +from scipy.spatial.transform import Rotation as R +import numpy as np +from pathlib import Path as PATH + +def get_rotation_matrix(roll,pitch,yaw): + # Intrinsic Z-Y'-X'' rotation: R = R_x(roll) @ R_y(pitch) @ R_z(yaw) + R_x = np.array( + [ + [1, 0, 0], + [0, np.cos(roll), -np.sin(roll)], + [0, np.sin(roll), np.cos(roll)], + ], + dtype=np.float64, + ) + R_y = np.array( + [ + [np.cos(pitch), 0, np.sin(pitch)], + [0, 1, 0], + [-np.sin(pitch), 0, np.cos(pitch)], + ], + dtype=np.float64, + ) + R_z = np.array( + [ + [np.cos(yaw), -np.sin(yaw), 0], + [np.sin(yaw), np.cos(yaw), 0], + [0, 0, 1], + ], + dtype=np.float64, + ) + return R_x @ R_y @ R_z + +oxts_path = PATH("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/" ) +pose_file = PATH("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0000_sync/poses.txt") +poses = np.loadtxt(pose_file) +poses_time = poses[:, 0] - 1 # Adjusting time to start from 0 + +pose_idx = 0 +poses_time_len = len(poses_time) + +from pyquaternion import Quaternion + +for idx in range(len(list(oxts_path.glob("*.txt")))): + oxts_path_file = oxts_path / f"{int(idx):010d}.txt" + oxts_data = np.loadtxt(oxts_path_file) + while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < idx: + pose_idx += 1 + pos = pose_idx + + r00, r01, r02 = poses[pos, 1:4] + r10, r11, r12 = poses[pos, 5:8] + r20, r21, r22 = poses[pos, 9:12] + R_mat = np.array([[r00, r01, r02], + [r10, r11, r12], + [r20, r21, r22]], dtype=np.float64) + calib = np.array([[1.0, 0.0, 0.0], + [0.0, -1.0, 0.0], + [0.0, 0.0, -1.0]], dtype=np.float64) + R_mat = R_mat @ calib + if idx <= 300: + # print("R_mat",R_mat) + new_yaw, new_pitch, new_roll = Quaternion(matrix=R_mat[:3, :3]).yaw_pitch_roll + # new_yaw,new_pitch,new_roll = R.from_matrix(R_mat).as_euler('yxz', degrees=False) + print("new",new_roll,new_pitch,new_yaw) + print("roll,pitch,yaw",oxts_data[3:6]) # 前6个元素是位置和速度 + roll, pitch, yaw = oxts_data[3:6] + # print("true",get_rotation_matrix(roll,pitch,yaw)) + # print("new",roll,pitch,yaw) \ No newline at end of file From c3f4053b0f750d5eda690d6c53c85d89ff623ee9 Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Mon, 25 Aug 2025 10:39:38 +0200 Subject: [PATCH 015/145] Refactor `geometry` functionality and add to docs. --- d123/common/utils/mixin.py | 49 ++++ d123/geometry/bounding_box.py | 164 ++++++++++-- d123/geometry/geometry_index.py | 54 ++++ d123/geometry/occupancy_map.py | 93 ++++--- d123/geometry/point.py | 64 +++-- d123/geometry/polyline.py | 233 +++++++++++++++--- d123/geometry/se.py | 163 +++++++++--- .../av2 => geometry/torch}/.gitkeep | 0 d123/geometry/transform/se3.py | 27 -- d123/geometry/utils/bounding_box_utils.py | 68 ++++- d123/geometry/vector.py | 194 +++++++++++++-- .../agents/constant_velocity_agents.py | 2 +- d123/simulation/agents/idm_agents.py | 6 +- d123/simulation/agents/path_following.py | 2 +- .../gym_observation/raster/raster_renderer.py | 10 +- .../components/time_to_collision.py | 6 +- .../reward_builder/default_reward_builder.py | 2 +- docs/conf.py | 4 +- docs/geometry.rst | 54 +++- notebooks/viz/bev_matplotlib.ipynb | 34 ++- 20 files changed, 1005 insertions(+), 224 deletions(-) create mode 100644 d123/common/utils/mixin.py rename d123/{dataset/dataset_specific/av2 => geometry/torch}/.gitkeep (100%) diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py new file mode 100644 index 00000000..5e7ecc0e --- /dev/null +++ b/d123/common/utils/mixin.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +import abc +import copy as pycopy +from functools import cached_property + +import numpy as np +import numpy.typing as npt + + +class ArrayMixin(abc.ABC): + """Abstract base class for geometric entities.""" + + @cached_property + @abc.abstractmethod + def array(self) -> npt.NDArray[np.float64]: + """The array representation of the geometric entity.""" + + def __array__(self, dtype: npt.DtypeLike = None, copy: bool = False) -> npt.NDArray: + array = self.array + return array if dtype is None else array.astype(dtype=dtype, copy=copy) + + def __len__(self) -> int: + """Return the length of the array.""" + return len(self.array) + + def __getitem__(self, key): + """Allow indexing into the array.""" + return self.array[key] + + def __eq__(self, other) -> bool: + """Equality comparison based on array values.""" + if isinstance(other, ArrayMixin): + return np.array_equal(self.array, other.array) + return False + + def shape(self) -> tuple: + """Return the shape of the array.""" + return self.array.shape + + def tolist(self) -> list: + """Convert the array to a Python list.""" + return self.array.tolist() + + def copy(self) -> ArrayMixin: + """Return a copy of the object with a copied array.""" + obj = pycopy.copy(self) + obj.array = self.array.copy() + return obj diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py index b71c4fae..6b612c4e 100644 --- a/d123/geometry/bounding_box.py +++ b/d123/geometry/bounding_box.py @@ -1,32 +1,60 @@ from __future__ import annotations +from ast import Dict from dataclasses import dataclass from functools import cached_property import numpy as np import numpy.typing as npt -import shapely +import shapely.geometry as geom -from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index +from d123.common.utils.mixin import ArrayMixin +from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex +from d123.geometry.point import Point2D, Point3D from d123.geometry.se import StateSE2, StateSE3 -from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array - -# TODO: Reconsider naming SE2 and SE3 hierarchies. E.g. would inheritance be a better approach? +from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array, bbse3_array_to_corners_array @dataclass -class BoundingBoxSE2: +class BoundingBoxSE2(ArrayMixin): + """ + Rotated bounding box in 2D defined by center (StateSE2), length and width. + + Example: + >>> from d123.geometry import StateSE2 + >>> bbox = BoundingBoxSE2(center=StateSE2(1.0, 2.0, 0.5), length=4.0, width=2.0) + >>> bbox.array + array([1. , 2. , 0.5, 4. , 2. ]) + >>> bbox.corners_array.shape + (4, 2) + >>> bbox.shapely_polygon.area + 8.0 + """ center: StateSE2 length: float width: float - @cached_property - def shapely_polygon(self) -> shapely.geometry.Polygon: - return shapely.geometry.Polygon(self.corners_array) + @classmethod + def from_array(cls, array: npt.NDArray[np.float64]) -> BoundingBoxSE2: + """Create a BoundingBoxSE2 from a numpy array, index by :class:`~d123.geometry.BoundingBoxSE2Index`. - @property + :param array: A 1D numpy array containing the bounding box parameters. + :return: A BoundingBoxSE2 instance. + """ + assert array.ndim == 1 and array.shape[-1] == len(BoundingBoxSE2Index) + return BoundingBoxSE2( + center=StateSE2.from_array(array[BoundingBoxSE2Index.SE2]), + length=array[BoundingBoxSE2Index.LENGTH], + width=array[BoundingBoxSE2Index.WIDTH], + ) + + @cached_property def array(self) -> npt.NDArray[np.float64]: + """Converts the BoundingBoxSE2 instance to a numpy array, indexed by :class:`~d123.geometry.BoundingBoxSE2Index`. + + :return: A numpy array of shape (5,) containing the bounding box parameters [x, y, yaw, length, width]. + """ array = np.zeros(len(BoundingBoxSE2Index), dtype=np.float64) array[BoundingBoxSE2Index.X] = self.center.x array[BoundingBoxSE2Index.Y] = self.center.y @@ -35,17 +63,56 @@ def array(self) -> npt.NDArray[np.float64]: array[BoundingBoxSE2Index.WIDTH] = self.width return array + @cached_property + def shapely_polygon(self) -> geom.Polygon: + """Return a Shapely polygon representation of the bounding box. + + :return: A Shapely polygon representing the bounding box. + """ + return geom.Polygon(self.corners_array) + @property def bounding_box_se2(self) -> BoundingBoxSE2: + """Returns bounding box itself for polymorphism. + + :return: A BoundingBoxSE2 instance representing the 2D bounding box. + """ return self - @property + @cached_property def corners_array(self) -> npt.NDArray[np.float64]: + """Returns the corner points of the bounding box as a numpy array. + + :return: A numpy array of shape (4, 2) containing the corner points of the bounding box, \ + indexed by :class:`~d123.geometry.Corners2DIndex` and :class:`~d123.geometry.Point2DIndex`. + """ return bbse2_array_to_corners_array(self.array) + @property + def corners_dict(self) -> Dict[Corners2DIndex, Point2D]: + """Returns the corner points of the bounding box as a dictionary. + + :return: A dictionary mapping :class:`~d123.geometry.Corners2DIndex` to :class:`~d123.geometry.Point2D` instances. + """ + corners_array = self.corners_array + return {index: Point2D.from_array(corners_array[index]) for index in Corners2DIndex} + @dataclass -class BoundingBoxSE3: +class BoundingBoxSE3(ArrayMixin): + """ + Rotated bounding box in 3D defined by center (StateSE3), length, width and height. + + Example: + >>> from d123.geometry import StateSE3 + >>> bbox = BoundingBoxSE3(center=StateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3), length=4.0, width=2.0, height=1.5) + >>> bbox.array + array([1. , 2. , 3. , 0.1, 0.2, 0.3, 4. , 2. , 1.5]) + >>> bbox.bounding_box_se2.array + array([1. , 2. , 0.3, 4. , 2. ]) + >>> bbox.shapely_polygon.area + 8.0 + """ center: StateSE3 length: float @@ -54,27 +121,27 @@ class BoundingBoxSE3: @classmethod def from_array(cls, array: npt.NDArray[np.float64]) -> BoundingBoxSE3: - return cls( + """Create a BoundingBoxSE3 from a numpy array. + + :param array: A 1D numpy array containing the bounding box parameters, indexed by \ + :class:`~d123.geometry.BoundingBoxSE3Index`. + :return: A BoundingBoxSE3 instance. + """ + assert array.ndim == 1 and array.shape[-1] == len(BoundingBoxSE3Index) + return BoundingBoxSE3( center=StateSE3.from_array(array[BoundingBoxSE3Index.STATE_SE3]), length=array[BoundingBoxSE3Index.LENGTH], width=array[BoundingBoxSE3Index.WIDTH], height=array[BoundingBoxSE3Index.HEIGHT], ) - @property - def bounding_box_se2(self) -> BoundingBoxSE2: - return BoundingBoxSE2( - center=StateSE2(self.center.x, self.center.y, self.center.yaw), - length=self.length, - width=self.width, - ) - - @property - def center_se3(self) -> StateSE3: - return self.center - - @property + @cached_property def array(self) -> npt.NDArray[np.float64]: + """Convert the BoundingBoxSE3 instance to a numpy array. + + :return: A 1D numpy array containing the bounding box parameters, indexed by \ + :class:`~d123.geometry.BoundingBoxSE3Index`. + """ array = np.zeros(len(BoundingBoxSE3Index), dtype=np.float64) array[BoundingBoxSE3Index.X] = self.center.x array[BoundingBoxSE3Index.Y] = self.center.y @@ -88,8 +155,51 @@ def array(self) -> npt.NDArray[np.float64]: return array @property - def shapely_polygon(self) -> shapely.geometry.Polygon: + def bounding_box_se2(self) -> BoundingBoxSE2: + """Converts the 3D bounding box to a 2D bounding box by dropping the z, roll and pitch components. + + :return: A BoundingBoxSE2 instance. + """ + return BoundingBoxSE2( + center=StateSE2(self.center.x, self.center.y, self.center.yaw), + length=self.length, + width=self.width, + ) + + @property + def center_se3(self) -> StateSE3: + """Returns the center of the bounding box as a StateSE3 instance. + + :return: The center of the bounding box as a StateSE3 instance. + """ + return self.center + + @property + def shapely_polygon(self) -> geom.Polygon: + """Return a Shapely polygon representation of the 2D projection of the bounding box. + + :return: A shapely polygon representing the 2D bounding box. + """ return self.bounding_box_se2.shapely_polygon + @cached_property + def corners_array(self) -> npt.NDArray[np.float64]: + """Returns the corner points of the bounding box as a numpy array, shape (8, 3). + + :return: A numpy array of shape (8, 3) containing the corner points of the bounding box, \ + indexed by :class:`~d123.geometry.Corners3DIndex` and :class:`~d123.geometry.Point3DIndex`. + """ + return bbse3_array_to_corners_array(self.array) + + @cached_property + def corners_dict(self) -> Dict[Corners3DIndex, Point3D]: + """Returns the corner points of the bounding box as a dictionary. + + :return: A dictionary mapping :class:`~d123.geometry.Corners3DIndex` to \ + :class:`~d123.geometry.Point3D` instances. + """ + corners_array = self.corners_array + return {index: Point3D.from_array(corners_array[index]) for index in Corners3DIndex} + BoundingBox = BoundingBoxSE2 | BoundingBoxSE3 diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py index a8652b37..f77951c2 100644 --- a/d123/geometry/geometry_index.py +++ b/d123/geometry/geometry_index.py @@ -4,6 +4,10 @@ class Point2DIndex(IntEnum): + """ + Indexes array-like representations of 2D points (x,y). + """ + X = 0 Y = 1 @@ -13,11 +17,23 @@ def XY(cls) -> slice: class Vector2DIndex(IntEnum): + """ + Indexes array-like representations of 2D vectors (x,y). + """ + X = 0 Y = 1 + @classproperty + def XY(cls) -> slice: + return slice(cls.X, cls.Y + 1) + class StateSE2Index(IntEnum): + """ + Indexes array-like representations of SE2 states (x,y,yaw). + """ + X = 0 Y = 1 YAW = 2 @@ -28,6 +44,9 @@ def XY(cls) -> slice: class Point3DIndex(IntEnum): + """ + Indexes array-like representations of 3D points (x,y,z). + """ X = 0 Y = 1 @@ -37,14 +56,26 @@ class Point3DIndex(IntEnum): def XY(cls) -> slice: return slice(cls.X, cls.Y + 1) + @classproperty + def XYZ(cls) -> slice: + return slice(cls.X, cls.Z + 1) + class Vector3DIndex(IntEnum): + """ + Indexes array-like representations of 3D vectors (x,y,z). + """ + X = 0 Y = 1 Z = 2 class StateSE3Index(IntEnum): + """ + Indexes array-like representations of SE3 states (x,y,z,roll,pitch,yaw). + TODO: Use quaternions for rotation. + """ X = 0 Y = 1 @@ -67,6 +98,10 @@ def ROTATION_XYZ(cls) -> slice: class BoundingBoxSE2Index(IntEnum): + """ + Indexes array-like representations of rotated 2D bounding boxes (x,y,yaw,length,width). + """ + X = 0 Y = 1 YAW = 2 @@ -83,6 +118,10 @@ def SE2(cls) -> slice: class Corners2DIndex(IntEnum): + """ + Indexes the corners of a BoundingBoxSE2 in the order: front-left, front-right, back-right, back-left. + """ + FRONT_LEFT = 0 FRONT_RIGHT = 1 BACK_RIGHT = 2 @@ -90,6 +129,11 @@ class Corners2DIndex(IntEnum): class BoundingBoxSE3Index(IntEnum): + """ + Indexes array-like representations of rotated 3D bounding boxes (x,y,z,roll,pitch,yaw,length,width,height). + TODO: Use quaternions for rotation. + """ + X = 0 Y = 1 Z = 2 @@ -112,8 +156,18 @@ def STATE_SE3(cls) -> slice: def ROTATION_XYZ(cls) -> slice: return slice(cls.ROLL, cls.YAW + 1) + @classproperty + def EXTENT(cls) -> slice: + return slice(cls.LENGTH, cls.HEIGHT + 1) + class Corners3DIndex(IntEnum): + """ + Indexes the corners of a BoundingBoxSE3 in the order: + front-left-bottom, front-right-bottom, back-right-bottom, back-left-bottom, + front-left-top, front-right-top, back-right-top, back-left-top. + """ + FRONT_LEFT_BOTTOM = 0 FRONT_RIGHT_BOTTOM = 1 BACK_RIGHT_BOTTOM = 2 diff --git a/d123/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py index 2a8085d6..d8df5a1d 100644 --- a/d123/geometry/occupancy_map.py +++ b/d123/geometry/occupancy_map.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Dict, List, Literal, Optional, Sequence, Union +from typing import Dict, List, Literal, Optional, Sequence, Tuple, Union import numpy as np import numpy.typing as npt @@ -8,7 +8,7 @@ from shapely.geometry.base import BaseGeometry from shapely.strtree import STRtree -# TODO: Figure out if a 3D equivalent is needed. +from d123.geometry.geometry_index import Point2DIndex class OccupancyMap2D: @@ -18,14 +18,14 @@ def __init__( ids: Optional[Union[List[str], List[int]]] = None, node_capacity: int = 10, ): - """ - Constructor of PDMOccupancyMap - :param geometries: list/array of polygons - :param ids: optional list of geometry identifiers + """Constructs a 2D occupancy map of shapely geometries using an str-tree for efficient spatial queries. + + :param geometries: list/array of shapely geometries + :param ids: optional list of geometry identifiers, either strings or integers :param node_capacity: max number of child nodes in str-tree, defaults to 10 """ + assert ids is None or len(ids) == len(geometries), "Length of ids must match length of geometries" - # assert len(tokens) == len(geometries) self._ids: Union[List[str], List[int]] = ( ids if ids is not None else [str(idx) for idx in range(len(geometries))] @@ -38,51 +38,59 @@ def __init__( @classmethod def from_dict(cls, geometry_dict: Dict[Union[str, int], BaseGeometry], node_capacity: int = 10) -> OccupancyMap2D: + """Constructs a 2D occupancy map from a dictionary of geometries. + + :param geometry_dict: Dictionary mapping geometry identifiers to shapely geometries + :param node_capacity: Max number of child nodes in str-tree, defaults to 10 + :return: OccupancyMap2D instance + """ ids = list(geometry_dict.keys()) geometries = list(geometry_dict.values()) return cls(geometries=geometries, ids=ids, node_capacity=node_capacity) def __getitem__(self, id: Union[str, int]) -> BaseGeometry: - """ - Retrieves geometry of token. - :param token: geometry identifier - :return: Geometry of token + """Retrieves geometry given an ID. + + :param id: geometry identifier + :return: Geometry of ID. """ return self._geometries[self._id_to_idx[id]] def __len__(self) -> int: """ - Number of geometries in the occupancy map - :return: int + :return: Number of geometries in the occupancy map. """ return len(self._ids) @property def ids(self) -> Union[List[str], List[int]]: - """ - Getter for track tokens in occupancy map - :return: list of strings + """Getter for geometry IDs in occupancy map + + :return: list of IDs """ return self._ids @property def geometries(self) -> Sequence[BaseGeometry]: + """Getter for geometries in occupancy map. + :return: list of geometries + """ return self._geometries @property def token_to_idx(self) -> Dict[Union[int, str], int]: - """ - Getter for track tokens in occupancy map - :return: dictionary of tokens and indices + """Mapping from geometry IDs to indices in the occupancy map. + + :return: dictionary of IDs and indices """ return self._id_to_idx def intersects(self, geometry: BaseGeometry) -> Union[List[str], List[int]]: - """ - Searches for intersecting geometries in the occupancy map + """Searches for intersecting geometries in the occupancy map. + :param geometry: geometries to query - :return: list of tokens for intersecting geometries + :return: list of IDs for intersecting geometries """ indices = self.query(geometry, predicate="intersects") return [self._ids[idx] for idx in indices] @@ -95,11 +103,17 @@ def query( ] = None, distance: Optional[float] = None, ) -> npt.NDArray[np.int64]: - """ - Function to directly calls shapely's query function on str-tree - :param geometry: geometries to query - :param predicate: see shapely, defaults to None - :return: query output + """Queries the str-tree for geometries that match the given predicate with the input geometry. + + :param geometry: Geometry or array_like + :param predicate: {None, 'intersects', 'within', 'contains', 'overlaps', 'crosses', 'touches', 'covers', \ + 'covered_by', 'contains_properly', 'dwithin'}, defaults to None + :param distance: number or array_like, defaults to None. + :return: ndarray with shape (n,) if geometry is a scalar. + Contains tree geometry indices. + :return: ndarray with shape (2, n) if geometry is an array_like + The first subarray contains input geometry indices. + The second subarray contains tree geometry indices. """ return self._str_tree.query(geometry, predicate=predicate, distance=distance) @@ -110,7 +124,16 @@ def query_nearest( return_distance: bool = False, exclusive: bool = False, all_matches: bool = True, - ): + ) -> Union[npt.NDArray[np.int64], Tuple[npt.NDArray[np.int64], npt.NDArray[np.float64]]]: + """Queries the str-tree for the nearest geometry to the input geometry. + + :param geometry: The input geometry to query. + :param max_distance: The maximum distance to consider, defaults to None. + :param return_distance: Whether to return the distance to the nearest geometry, defaults to False. + :param exclusive: Whether to exclude the input geometry from the results, defaults to False. + :param all_matches: Whether to return all matching geometries, defaults to True. + :return: The nearest geometry or geometries. + """ return self._str_tree.query_nearest( geometry, max_distance=max_distance, @@ -119,14 +142,16 @@ def query_nearest( all_matches=all_matches, ) - def points_in_polygons(self, points: npt.NDArray[np.float64]) -> npt.NDArray[np.bool_]: - """ - Determines wether input-points are in polygons of the occupancy map - :param points: input-points + def contains_vectorized(self, points: npt.NDArray[np.float64]) -> npt.NDArray[np.bool_]: + """Determines wether input-points are in geometries (i.e. polygons) of the occupancy map. + NOTE: This function can be significantly faster than using the str-tree, if the number of geometries is + relatively small compared to the number of input-points. + + :param points: array of shape (num_points, 2), indexed by :class:`~d123.geometry.Point2DIndex`. :return: boolean array of shape (polygons, input-points) """ output = np.zeros((len(self._geometries), len(points)), dtype=bool) - for i, polygon in enumerate(self._geometries): - output[i] = shapely.vectorized.contains(polygon, points[:, 0], points[:, 1]) + for i, geometry in enumerate(self._geometries): + output[i] = shapely.vectorized.contains(geometry, points[..., Point2DIndex.X], points[..., Point2DIndex.Y]) return output diff --git a/d123/geometry/point.py b/d123/geometry/point.py index c12cc22e..5641cf17 100644 --- a/d123/geometry/point.py +++ b/d123/geometry/point.py @@ -1,18 +1,23 @@ from __future__ import annotations from dataclasses import dataclass +from functools import cached_property from typing import Iterable import numpy as np import numpy.typing as npt import shapely.geometry as geom +from d123.common.utils.mixin import ArrayMixin from d123.geometry.geometry_index import Point2DIndex, Point3DIndex @dataclass -class Point2D: - """Class to represents 2D points.""" +class Point2D(ArrayMixin): + """Class to represents 2D points. + + :return: A Point2D instance. + """ x: float # [m] location y: float # [m] location @@ -20,15 +25,23 @@ class Point2D: @classmethod def from_array(cls, array: npt.NDArray[np.float64]) -> Point2D: + """Constructs a Point2D from a numpy array. + + :param array: Array of shape (2,) representing the point coordinates [x, y], indexed by \ + :class:`~d123.geometry.Point2DIndex`. + :return: A Point2D instance. + """ + assert array.ndim == 1 assert array.shape[0] == len(Point2DIndex) return Point2D(array[Point2DIndex.X], array[Point2DIndex.Y]) - @property + @cached_property def array(self) -> npt.NDArray[np.float64]: - """ - Convert vector to array - :return: array containing [x, y] + """The array representation of the point. + + :return: A numpy array of shape (2,) containing the point coordinates [x, y], indexed by \ + :class:`~d123.geometry.Point2DIndex`. """ array = np.zeros(len(Point2DIndex), dtype=np.float64) array[Point2DIndex.X] = self.x @@ -37,12 +50,14 @@ def array(self) -> npt.NDArray[np.float64]: @property def shapely_point(self) -> geom.Point: + """The Shapely Point representation of the 2D point. + + :return: A Shapely Point representation of the 2D point. + """ return geom.Point(self.x, self.y) def __iter__(self) -> Iterable[float]: - """ - :return: iterator of tuples (x, y) - """ + """Iterator over point coordinates.""" return iter((self.x, self.y)) def __hash__(self) -> int: @@ -51,8 +66,8 @@ def __hash__(self) -> int: @dataclass -class Point3D: - """Class to represents 2D points.""" +class Point3D(ArrayMixin): + """Class to represents 3D points.""" x: float # [m] location y: float # [m] location @@ -61,17 +76,23 @@ class Point3D: @classmethod def from_array(cls, array: npt.NDArray[np.float64]) -> "Point3D": + """Constructs a Point3D from a numpy array. + + :param array: Array of shape (3,) representing the point coordinates [x, y, z], indexed by \ + :class:`~d123.geometry.Point3DIndex`. + :return: A Point3D instance. + """ assert array.ndim == 1, f"Array must be 1-dimensional, got shape {array.shape}" assert array.shape[0] == len( Point3DIndex ), f"Array must have the same length as Point3DIndex, got shape {array.shape}" return cls(array[Point3DIndex.X], array[Point3DIndex.Y], array[Point3DIndex.Z]) - @property + @cached_property def array(self) -> npt.NDArray[np.float64]: - """ - Convert vector to array - :return: array containing [x, y] + """Converts the Point3D instance to a numpy array, indexed by :class:`~d123.geometry.Point3DIndex`. + + :return: A numpy array of shape (3,) containing the point coordinates [x, y, z]. """ array = np.zeros(len(Point3DIndex), dtype=np.float64) array[Point3DIndex.X] = self.x @@ -81,16 +102,23 @@ def array(self) -> npt.NDArray[np.float64]: @property def point_2d(self) -> Point2D: + """The 2D projection of the 3D point. + + :return: A Point2D instance representing the 2D projection of the 3D point. + """ return Point2D(self.x, self.y) @property def shapely_point(self) -> geom.Point: + """The Shapely Point representation of the 3D point. \ + This geometry contains the z-coordinate, but many Shapely operations ignore it. + + :return: A Shapely Point representation of the 3D point. + """ return geom.Point(self.x, self.y, self.z) def __iter__(self) -> Iterable[float]: - """ - :return: iterator of tuples (x, y) - """ + """Iterator over the point coordinates (x, y, z).""" return iter((self.x, self.y, self.z)) def __hash__(self) -> int: diff --git a/d123/geometry/polyline.py b/d123/geometry/polyline.py index 365b458e..3fee304d 100644 --- a/d123/geometry/polyline.py +++ b/d123/geometry/polyline.py @@ -9,8 +9,10 @@ import shapely.geometry as geom from scipy.interpolate import interp1d -from d123.geometry.point import Point2D, Point2DIndex, Point3D, Point3DIndex -from d123.geometry.se import StateSE2, StateSE2Index +from d123.common.utils.mixin import ArrayMixin +from d123.geometry.geometry_index import Point2DIndex, Point3DIndex, StateSE2Index +from d123.geometry.point import Point2D, Point3D +from d123.geometry.se import StateSE2 from d123.geometry.utils.constants import DEFAULT_Z from d123.geometry.utils.polyline_utils import get_linestring_yaws, get_path_progress from d123.geometry.utils.rotation_utils import normalize_angle @@ -20,12 +22,18 @@ @dataclass -class Polyline2D: +class Polyline2D(ArrayMixin): + """Represents a interpolatable 2D polyline.""" linestring: geom.LineString @classmethod def from_linestring(cls, linestring: geom.LineString) -> Polyline2D: + """Creates a Polyline2D from a Shapely LineString. If the LineString has Z-coordinates, they are ignored. + + :param linestring: A Shapely LineString object. + :return: A Polyline2D instance. + """ if linestring.has_z: linestring_ = geom_creation.linestrings(*linestring.xy) else: @@ -34,6 +42,13 @@ def from_linestring(cls, linestring: geom.LineString) -> Polyline2D: @classmethod def from_array(cls, polyline_array: npt.NDArray[np.float32]) -> Polyline2D: + """Creates a Polyline2D from a numpy array. + + :param polyline_array: A numpy array of shape (N, 2) or (N, 3), e.g. indexed by \ + :class:`~d123.geometry.Point2DIndex` or :class:`~d123.geometry.Point3DIndex`. + :raises ValueError: If the input array is not of the expected shape. + :return: A Polyline2D instance. + """ assert polyline_array.ndim == 2 linestring: Optional[geom.LineString] = None if polyline_array.shape[-1] == len(Point2DIndex): @@ -44,37 +59,87 @@ def from_array(cls, polyline_array: npt.NDArray[np.float32]) -> Polyline2D: raise ValueError("Array must have shape (N, 2) or (N, 3) for Point2D or Point3D respectively.") return Polyline2D(linestring) + def from_discrete_points(cls, discrete_points: List[Point2D]) -> Polyline2D: + """Creates a Polyline2D from a list of discrete 2D points. + + :param discrete_points: A list of Point2D instances. + :return: A Polyline2D instance. + """ + return Polyline2D.from_array(np.array(discrete_points, dtype=np.float64)) + @property def array(self) -> npt.NDArray[np.float64]: - return np.array(self.linestring.coords, dtype=np.float64) + """Converts the polyline to a numpy array, indexed by :class:`~d123.geometry.Point2DIndex`. + + :return: A numpy array of shape (N, 2) representing the polyline. + """ + x, y = self.linestring.xy + array = np.zeros((len(x), len(Point2DIndex)), dtype=np.float64) + array[:, Point2DIndex.X] = x + array[:, Point2DIndex.Y] = y + return array @property - def polyline_se2(self) -> Polyline3D: + def polyline_se2(self) -> PolylineSE2: + """Converts the 2D polyline to a 2D SE(2) polyline and retrieves the yaw angles. + + :return: A PolylineSE2 instance representing the 2D polyline. + """ return PolylineSE2.from_linestring(self.linestring) @property def length(self) -> float: + """Returns the length of the polyline. + + :return: The length of the polyline. + """ return self.linestring.length - def interpolate(self, distances: Union[float, npt.NDArray[np.float64]]) -> Union[Point2D, npt.NDArray[np.float64]]: + def interpolate( + self, + distances: Union[float, npt.NDArray[np.float64]], + normalized: bool = False, + ) -> Union[Point2D, npt.NDArray[np.float64]]: + """Interpolates the polyline at the given distances. + + :param distances: The distances at which to interpolate the polyline. + :return: The interpolated point(s) on the polyline. + """ + distances_ = distances * self.length if normalized else distances + if isinstance(distances, float) or isinstance(distances, int): - point = self.linestring.interpolate(distances) + point = self.linestring.interpolate(distances_, normalized=normalized) return Point2D(point.x, point.y) else: - distances = np.asarray(distances, dtype=np.float64) - points = self.linestring.interpolate(distances) + distances = np.asarray(distances_, dtype=np.float64) + points = self.linestring.interpolate(distances_, normalized=normalized) return np.array([[p.x, p.y] for p in points], dtype=np.float64) - def project(self, point: Union[Point2D, npt.NDArray[np.float64]]) -> Union[Point2D, npt.NDArray[np.float64]]: + def project( + self, + point: Union[geom.Point, Point2D, StateSE2, npt.NDArray[np.float64]], + normalized: bool = False, + ) -> npt.NDArray[np.float64]: + """Projects a point onto the polyline and returns the distance along the polyline to the closest point. + + :param point: The point to project onto the polyline. + :param normalized: Whether to return the normalized distance, defaults to False. + :return: The distance along the polyline to the closest point. + """ if isinstance(point, Point2D): point_ = point.array + elif isinstance(point, StateSE2): + point_ = point.array[StateSE2Index.XY] + elif isinstance(point, geom.Point): + point_ = np.array(point.coords[0], dtype=np.float64) else: point_ = np.array(point, dtype=np.float64) - return self.linestring.project(point_) + return self.linestring.project(point_, normalized=normalized) @dataclass -class PolylineSE2: +class PolylineSE2(ArrayMixin): + """Represents a interpolatable SE2 polyline.""" se2_array: npt.NDArray[np.float64] linestring: Optional[geom.LineString] = None @@ -94,6 +159,11 @@ def __post_init__(self): @classmethod def from_linestring(cls, linestring: geom.LineString) -> PolylineSE2: + """Creates a PolylineSE2 from a LineString. This requires computing the yaw angles along the path. + + :param linestring: The LineString to convert. + :return: A PolylineSE2 representing the same path as the LineString. + """ points_2d = np.array(linestring.coords, dtype=np.float64)[..., StateSE2Index.XY] se2_array = np.zeros((len(points_2d), len(StateSE2Index)), dtype=np.float64) se2_array[:, StateSE2Index.XY] = points_2d @@ -102,6 +172,13 @@ def from_linestring(cls, linestring: geom.LineString) -> PolylineSE2: @classmethod def from_array(cls, polyline_array: npt.NDArray[np.float32]) -> PolylineSE2: + """Creates a PolylineSE2 from a numpy array. + + :param polyline_array: The input numpy array representing, either indexed by \ + :class:`~d123.geometry.Point2DIndex` or :class:`~d123.geometry.StateSE2Index`. + :raises ValueError: If the input array is not of the expected shape. + :return: A PolylineSE2 representing the same path as the input array. + """ assert polyline_array.ndim == 2 if polyline_array.shape[-1] == len(Point2DIndex): se2_array = np.zeros((len(polyline_array), len(StateSE2Index)), dtype=np.float64) @@ -110,19 +187,41 @@ def from_array(cls, polyline_array: npt.NDArray[np.float32]) -> PolylineSE2: elif polyline_array.shape[-1] == len(StateSE2Index): se2_array = np.array(polyline_array, dtype=np.float64) else: - raise ValueError + raise ValueError("Invalid polyline array shape.") return PolylineSE2(se2_array) @classmethod def from_discrete_se2(cls, discrete_se2: List[StateSE2]) -> PolylineSE2: - return PolylineSE2(np.array([se2.array for se2 in discrete_se2], dtype=np.float64)) + """Creates a PolylineSE2 from a list of discrete SE2 states. + + :param discrete_se2: The list of discrete SE2 states. + :return: A PolylineSE2 representing the same path as the discrete SE2 states. + """ + return PolylineSE2.from_array(np.array(discrete_se2, dtype=np.float64)) @property def length(self) -> float: + """Returns the length of the polyline. + + :return: The length of the polyline. + """ return float(self._progress[-1]) - def interpolate(self, distances: Union[float, npt.NDArray[np.float64]]) -> Union[StateSE2, npt.NDArray[np.float64]]: - clipped_distances = np.clip(distances, 1e-8, self.length) + def interpolate( + self, + distances: Union[float, npt.NDArray[np.float64]], + normalized: bool = False, + ) -> Union[StateSE2, npt.NDArray[np.float64]]: + """Interpolates the polyline at the given distances. + + :param distances: The distances along the polyline to interpolate. + :param normalized: Whether the distances are normalized (0 to 1), defaults to False + :return: The interpolated StateSE2 or an array of interpolated states, according to + """ + + distances_ = distances * self.length if normalized else distances + clipped_distances = np.clip(distances_, 1e-8, self.length) + interpolated_se2_array = self._interpolator(clipped_distances) interpolated_se2_array[..., StateSE2Index.YAW] = normalize_angle(interpolated_se2_array[..., StateSE2Index.YAW]) @@ -132,27 +231,41 @@ def interpolate(self, distances: Union[float, npt.NDArray[np.float64]]) -> Union return interpolated_se2_array def project( - self, point: Union[geom.Point, Point2D, npt.NDArray[np.float64]] - ) -> Union[Point2D, npt.NDArray[np.float64]]: + self, + point: Union[geom.Point, Point2D, StateSE2, npt.NDArray[np.float64]], + normalized: bool = False, + ) -> npt.NDArray[np.float64]: + """Projects a point onto the polyline and returns the distance along the polyline to the closest point. + + :param point: The point to project onto the polyline. + :param normalized: Whether to return the normalized distance, defaults to False. + :return: The distance along the polyline to the closest point. + """ if isinstance(point, Point2D): - point_ = geom.Point(point.x, point.y) - elif isinstance(point, np.ndarray) and point.shape[-1] == 2: - point_ = geom_creation.points(point) + point_ = point.array + elif isinstance(point, StateSE2): + point_ = point.array[StateSE2Index.XY] elif isinstance(point, geom.Point): - point_ = point + point_ = np.array(point.coords[0], dtype=np.float64) else: - raise ValueError("Point must be a Point2D, geom.Point, or a 2D numpy array.") - - return self.linestring.project(point_) + point_ = np.array(point, dtype=np.float64) + return self.linestring.project(point_, normalized=normalized) @dataclass -class Polyline3D: +class Polyline3D(ArrayMixin): + """Represents a interpolatable 3D polyline.""" linestring: geom.LineString @classmethod def from_linestring(cls, linestring: geom.LineString) -> Polyline3D: + """Creates a Polyline3D from a Shapely LineString. If the LineString does not have Z-coordinates, \ + a default Z-value is added. + + :param linestring: The input LineString. + :return: A Polyline3D instance. + """ return ( Polyline3D(linestring) if linestring.has_z @@ -161,27 +274,61 @@ def from_linestring(cls, linestring: geom.LineString) -> Polyline3D: @classmethod def from_array(cls, array: npt.NDArray[np.float64]) -> Polyline3D: - assert array.ndim == 2 and array.shape[1] == 3, "Array must be 2D with shape (N, 3)" + """Creates a Polyline3D from a numpy array. + + :param array: A numpy array of shape (N, 3) representing 3D points, e.g. indexed by \ + :class:`~d123.geometry.Point3DIndex`. + :return: A Polyline3D instance. + """ + assert array.ndim == 2 and array.shape[1] == 3, "Array must be 3D with shape (N, 3)" linestring = geom_creation.linestrings(*array.T) return Polyline3D(linestring) @property def polyline_2d(self) -> Polyline2D: + """Converts the 3D polyline to a 2D polyline by dropping the Z-coordinates. + + :return: A Polyline2D instance. + """ return Polyline2D(geom_creation.linestrings(*self.linestring.xy)) @property def polyline_se2(self) -> PolylineSE2: + """Converts the 3D polyline to a 2D SE(2) polyline. + + :return: A PolylineSE2 instance. + """ return PolylineSE2.from_linestring(self.linestring) @property def array(self) -> Polyline2D: + """Converts the 3D polyline to the discrete 3D points. + + :return: A numpy array of shape (N, 3), indexed by :class:`~d123.geometry.Point3DIndex`. + """ return np.array(self.linestring.coords, dtype=np.float64) @property def length(self) -> float: + """Returns the length of the 3D polyline. + + :return: The length of the polyline. + """ return self.linestring.length - def interpolate(self, distances: Union[float, npt.NDArray[np.float64]]) -> Union[Point3D, npt.NDArray[np.float64]]: + def interpolate( + self, + distances: Union[float, npt.NDArray[np.float64]], + normalized: bool = False, + ) -> Union[Point3D, npt.NDArray[np.float64]]: + """Interpolates the 3D polyline at the given distances. + + :param distances: A float or numpy array of distances along the polyline. + :param normalized: Whether to interpret the distances as fractions of the length. + :return: A Point3D instance or a numpy array of shape (N, 3) representing the interpolated points. + """ + distances * self.length if normalized else distances + if isinstance(distances, float) or isinstance(distances, int): point = self.linestring.interpolate(distances) return Point3D(point.x, point.y, point.z) @@ -190,8 +337,36 @@ def interpolate(self, distances: Union[float, npt.NDArray[np.float64]]) -> Union points = self.linestring.interpolate(distances) return np.array([[p.x, p.y, p.z] for p in points], dtype=np.float64) + def project( + self, + point: Union[geom.Point, Point2D, Point3D, npt.NDArray[np.float64]], + normalized: bool = False, + ) -> npt.NDArray[np.float64]: + """Projects a point onto the 3D polyline and returns the distance along the polyline to the closest point. + + :param point: The point to project. + :param normalized: Whether to return normalized distances, defaults to False. + :return: The distance along the polyline to the closest point. + """ + if isinstance(point, Point2D): + point_ = point.array + elif isinstance(point, StateSE2): + point_ = point.array[StateSE2Index.XY] + elif isinstance(point, Point3D): + point_ = point.array[Point3DIndex.XYZ] + elif isinstance(point, geom.Point): + point_ = np.array(point.coords[0], dtype=np.float64) + else: + point_ = np.array(point, dtype=np.float64) + return self.linestring.project(point_, normalized=normalized) + @dataclass class PolylineSE3: - # TODO: implement this class + # TODO: Implement PolylineSE3 once quaternions are used in StateSE3 + # Interpolating along SE3 states (i.e., 3D position + orientation) is meaningful, + # but more complex than SE2 due to 3D rotations (quaternions or rotation matrices). + # Linear interpolation of positions is straightforward, but orientation interpolation + # should use SLERP (spherical linear interpolation) for quaternions. + # This is commonly needed in robotics, animation, and path planning. pass diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 8abd908a..9c73e341 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -13,24 +13,31 @@ @dataclass class StateSE2: - """Class to represents 2D points.""" + """Class to represents a 2D pose as SE2 (x, y, yaw).""" - x: float # [m] location - y: float # [m] location - yaw: float # [m] location + x: float # [m] x-location + y: float # [m] y-location + yaw: float # [rad] yaw/heading __slots__ = "x", "y", "yaw" @classmethod def from_array(cls, array: npt.NDArray[np.float64]) -> StateSE2: + """Constructs a StateSE2 from a numpy array. + + :param array: Array of shape (3,) representing the state [x, y, yaw], indexed by \ + :class:`~d123.geometry.geometry_index.StateSE2Index`. + :return: A StateSE2 instance. + """ assert array.ndim == 1 assert array.shape[0] == len(StateSE2Index) return StateSE2(array[StateSE2Index.X], array[StateSE2Index.Y], array[StateSE2Index.YAW]) @property def array(self) -> npt.NDArray[np.float64]: - """ - Convert vector to array - :return: array containing [x, y] + """Converts the StateSE2 instance to a numpy array, indexed by \ + :class:`~d123.geometry.geometry_index.StateSE2Index`. + + :return: A numpy array of shape (3,) containing the state [x, y, yaw]. """ array = np.zeros(len(StateSE2Index), dtype=np.float64) array[StateSE2Index.X] = self.x @@ -39,10 +46,18 @@ def array(self) -> npt.NDArray[np.float64]: return array @property - def point_2d(self) -> Point2D: + def state_se2(self) -> StateSE2: + """The 2D pose itself. Helpful for polymorphism. + + :return: A StateSE2 instance representing the 2D pose. """ - Convert SE2 state to 2D point (drops heading) - :return: Point2D dataclass + return self + + @property + def point_2d(self) -> Point2D: + """The 2D projection of the 2D pose. + + :return: A Point2D instance representing the 2D projection of the 2D pose. """ return Point2D(self.x, self.y) @@ -51,30 +66,37 @@ def shapely_point(self) -> geom.Point: return geom.Point(self.x, self.y) def __iter__(self) -> Iterable[float]: - """ - :return: iterator of tuples (x, y) - """ - return iter((self.x, self.y)) + """Iterator over the state coordinates (x, y, yaw).""" + return iter((self.x, self.y, self.yaw)) def __hash__(self) -> int: """Hash method""" - return hash((self.x, self.y)) + return hash((self.x, self.y, self.yaw)) @dataclass class StateSE3: - """Class to represents 2D points.""" - - x: float # [m] location - y: float # [m] location - z: float # [m] location - roll: float - pitch: float - yaw: float + """ + Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw). + TODO: Use quaternions for rotation representation. + """ + + x: float # [m] x-location + y: float # [m] y-location + z: float # [m] z-location + roll: float # [rad] roll + pitch: float # [rad] pitch + yaw: float # [rad] yaw __slots__ = "x", "y", "z", "roll", "pitch", "yaw" @classmethod def from_array(cls, array: npt.NDArray[np.float64]) -> StateSE3: + """Constructs a StateSE3 from a numpy array. + + :param array: Array of shape (6,) representing the state [x, y, z, roll, pitch, yaw], indexed by \ + :class:`~d123.geometry.StateSE3Index`. + :return: A StateSE3 instance. + """ assert array.ndim == 1 assert array.shape[0] == len(StateSE3Index) return StateSE3( @@ -87,7 +109,12 @@ def from_array(cls, array: npt.NDArray[np.float64]) -> StateSE3: ) @classmethod - def from_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3: + def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3: + """Constructs a StateSE3 from a 4x4 transformation matrix. + + :param array: A 4x4 numpy array representing the transformation matrix. + :return: A StateSE3 instance. + """ assert array.ndim == 2 assert array.shape == (4, 4) translation = array[:3, 3] @@ -103,6 +130,10 @@ def from_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3: @property def array(self) -> npt.NDArray[np.float64]: + """Converts the StateSE3 instance to a numpy array, indexed by StateSE3Index. + + :return: A numpy array of shape (6,) containing the state [x, y, z, roll, pitch, yaw]. + """ array = np.zeros(len(StateSE3Index), dtype=np.float64) array[StateSE3Index.X] = self.x array[StateSE3Index.Y] = self.y @@ -112,28 +143,90 @@ def array(self) -> npt.NDArray[np.float64]: array[StateSE3Index.YAW] = self.yaw return array - # @property - # def matrix(self) -> npt.NDArray[np.float64]: - # """Convert SE3 state to 4x4 transformation matrix.""" - # R = get_rotation_matrix(self) - # translation = np.array([self.x, self.y, self.z], dtype=np.float64) - # matrix = np.eye(4, dtype=np.float64) - # matrix[:3, :3] = R - # matrix[:3, 3] = translation - # return matrix - @property def state_se2(self) -> StateSE2: + """Returns the 3D state as a 2D state by ignoring the z-axis. + + :return: A StateSE2 instance representing the 2D projection of the 3D state. + """ return StateSE2(self.x, self.y, self.yaw) @property def point_3d(self) -> Point3D: + """Returns the 3D point representation of the state. + + :return: A Point3D instance representing the 3D point. + """ return Point3D(self.x, self.y, self.z) @property def point_2d(self) -> Point2D: + """Returns the 2D point representation of the state. + + :return: A Point2D instance representing the 2D point. + """ return Point2D(self.x, self.y) @property def shapely_point(self) -> geom.Point: - return geom.Point(self.x, self.y, self.z) + """Returns the Shapely point representation of the state. + + :return: A Shapely Point instance representing the 3D point. + """ + return self.point_3d.shapely_point + + @property + def transformation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 4x4 transformation matrix representation of the state. + + :return: A 4x4 numpy array representing the transformation matrix. + """ + raise NotImplementedError("Transformation matrix conversion not implemented yet.") + + @property + def rotation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 3x3 rotation matrix representation of the state's orientation. + + :return: A 3x3 numpy array representing the rotation matrix. + """ + raise NotImplementedError("Rotation matrix conversion not implemented yet.") + + @property + def quaternion(self) -> npt.NDArray[np.float64]: + """Returns the quaternion (w, x, y, z) representation of the state's orientation. + + :return: A numpy array of shape (4,) representing the quaternion. + """ + raise NotImplementedError("Quaternion conversion not implemented yet.") + + def __iter__(self) -> Iterable[float]: + """Iterator over the state coordinates (x, y, z, roll, pitch, yaw).""" + return iter((self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) + + def __hash__(self) -> int: + """Hash method""" + return hash((self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) + + def __matmul__(self, other: StateSE3) -> StateSE3: + """Combines two SE3 states by applying the transformation of the other state to this state. + + :param other: Another StateSE3 instance representing the transformation to apply. + :return: A new StateSE3 instance representing the combined transformation. + """ + return StateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix) + + +@dataclass +class QuaternionSE3: + """Class representing a quaternion in SE3 space. + + TODO: Implement and replace StateSE3. + """ + + x: float + y: float + z: float + qw: float + qx: float + qy: float + qz: float diff --git a/d123/dataset/dataset_specific/av2/.gitkeep b/d123/geometry/torch/.gitkeep similarity index 100% rename from d123/dataset/dataset_specific/av2/.gitkeep rename to d123/geometry/torch/.gitkeep diff --git a/d123/geometry/transform/se3.py b/d123/geometry/transform/se3.py index 6b4219d0..02be57df 100644 --- a/d123/geometry/transform/se3.py +++ b/d123/geometry/transform/se3.py @@ -3,33 +3,6 @@ from d123.geometry import Point3DIndex, StateSE3, StateSE3Index, Vector3D -# def get_rotation_matrix(state_se3: StateSE3) -> npt.NDArray[np.float64]: -# R_x = np.array( -# [ -# [1, 0, 0], -# [0, np.cos(state_se3.roll), -np.sin(state_se3.roll)], -# [0, np.sin(state_se3.roll), np.cos(state_se3.roll)], -# ], -# dtype=np.float64, -# ) -# R_y = np.array( -# [ -# [np.cos(state_se3.pitch), 0, np.sin(state_se3.pitch)], -# [0, 1, 0], -# [-np.sin(state_se3.pitch), 0, np.cos(state_se3.pitch)], -# ], -# dtype=np.float64, -# ) -# R_z = np.array( -# [ -# [np.cos(state_se3.yaw), -np.sin(state_se3.yaw), 0], -# [np.sin(state_se3.yaw), np.cos(state_se3.yaw), 0], -# [0, 0, 1], -# ], -# dtype=np.float64, -# ) -# return R_z @ R_y @ R_x - def get_rotation_matrix(state_se3: StateSE3) -> npt.NDArray[np.float64]: # Intrinsic Z-Y'-X'' rotation: R = R_x(roll) @ R_y(pitch) @ R_z(yaw) diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py index b1ce46b2..a9c40077 100644 --- a/d123/geometry/utils/bounding_box_utils.py +++ b/d123/geometry/utils/bounding_box_utils.py @@ -2,7 +2,12 @@ import numpy.typing as npt import shapely -from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, Point2DIndex +from d123.geometry.geometry_index import ( + BoundingBoxSE2Index, + BoundingBoxSE3Index, + Corners2DIndex, + Point2DIndex, +) def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: @@ -36,13 +41,13 @@ def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[ return corners_array.squeeze(axis=0) if ndim_one else corners_array -def corners_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]: +def corners_2d_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]: polygons = shapely.creation.polygons(corners_array) return polygons def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - return corners_array_to_polygon_array(bbse2_array_to_corners_array(bbse2)) + return corners_2d_array_to_polygon_array(bbse2_array_to_corners_array(bbse2)) def translate_along_yaw_array( @@ -51,6 +56,7 @@ def translate_along_yaw_array( lon: npt.NDArray[np.float64], lat: npt.NDArray[np.float64], ) -> npt.NDArray[np.float64]: + # TODO: move somewhere else assert points_2d.shape[-1] == len(Point2DIndex) half_pi = np.pi / 2.0 translation: npt.NDArray[np.float64] = np.stack( @@ -61,3 +67,59 @@ def translate_along_yaw_array( axis=-1, ) return points_2d + translation + + +def bbse3_array_to_corners_array(bbse3_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Converts an array of BoundingBoxSE3 objects to a coordinates array. + TODO: Fix this function + + :param bbse3_array: Array of BoundingBoxSE3 objects, shape (..., 7) [x, y, z, yaw, pitch, roll, length, width, height]. + :return: Coordinates array of shape (..., 8, 3) where 8 is the number of corners. + """ + assert bbse3_array.shape[-1] == len(BoundingBoxSE3Index) + + ndim_one: bool = bbse3_array.ndim == 1 + if ndim_one: + bbse3_array = bbse3_array[None, :] + + # Extract parameters + centers = bbse3_array[..., BoundingBoxSE3Index.XYZ] # (..., 3) + yaws = bbse3_array[..., BoundingBoxSE3Index.YAW] # (...,) + pitches = bbse3_array[..., BoundingBoxSE3Index.PITCH] # (...,) + rolls = bbse3_array[..., BoundingBoxSE3Index.ROLL] # (...,) + + # Corner factors: (x, y, z) in box frame + factors = np.array( + [ + [+0.5, -0.5, -0.5], # FRONT_LEFT_BOTTOM + [+0.5, +0.5, -0.5], # FRONT_RIGHT_BOTTOM + [-0.5, +0.5, -0.5], # BACK_RIGHT_BOTTOM + [-0.5, -0.5, -0.5], # BACK_LEFT_BOTTOM + [+0.5, -0.5, +0.5], # FRONT_LEFT_TOP + [+0.5, +0.5, +0.5], # FRONT_RIGHT_TOP + [-0.5, +0.5, +0.5], # BACK_RIGHT_TOP + [-0.5, -0.5, +0.5], # BACK_LEFT_TOP + ], + dtype=np.float64, + ) # (8, 3) + + # Box extents + extents = bbse3_array[..., BoundingBoxSE3Index.EXTENT] # (...,) + corners_local = factors[None, :, :] * extents # (..., 8, 3) + + # Rotation matrices (yaw, pitch, roll) + def rotation_matrix(yaw, pitch, roll): + cy, sy = np.cos(yaw), np.sin(yaw) + cp, sp = np.cos(pitch), np.sin(pitch) + cr, sr = np.cos(roll), np.sin(roll) + Rz = np.array([[cy, -sy, 0], [sy, cy, 0], [0, 0, 1]]) + Ry = np.array([[cp, 0, sp], [0, 1, 0], [-sp, 0, cp]]) + Rx = np.array([[1, 0, 0], [0, cr, -sr], [0, sr, cr]]) + return Rz @ Ry @ Rx + + corners_world = np.empty((*bbse3_array.shape[:-1], 8, 3), dtype=np.float64) + for idx in np.ndindex(bbse3_array.shape[:-1]): + R = rotation_matrix(yaws[idx], pitches[idx], rolls[idx]) + corners_world[idx] = centers[idx] + (corners_local[idx] @ R.T) + + return corners_world.squeeze(axis=0) if ndim_one else corners_world diff --git a/d123/geometry/vector.py b/d123/geometry/vector.py index a3b8631d..3ab2ef51 100644 --- a/d123/geometry/vector.py +++ b/d123/geometry/vector.py @@ -1,57 +1,215 @@ from __future__ import annotations +from dataclasses import dataclass +from typing import Iterable + import numpy as np import numpy.typing as npt -from d123.geometry.point import Point2D, Point3D, Point3DIndex +from d123.geometry.geometry_index import Vector2DIndex, Vector3DIndex + + +@dataclass +class Vector2D: + """ + Class to represents 2D vectors, in x, y direction. + + Example: + >>> v1 = Vector2D(3.0, 4.0) + >>> v2 = Vector2D(1.0, 2.0) + >>> v3 = v1 + v2 + >>> v3 + Vector2D(4.0, 6.0) + >>> v1.array + array([3., 4.]) + >>> v1.magnitude + 5.0 + """ + + x: float # [m] x-component of the vector + y: float # [m] y-component of the vector + __slots__ = "x", "y" + + @classmethod + def from_array(cls, array: npt.NDArray[np.float64]) -> Vector2D: + """Constructs a Vector2D from a numpy array. + + :param array: Array of shape (2,) representing the vector components [x, y], indexed by \ + :class:`~d123.geometry.Vector2DIndex`. + :return: A Vector2D instance. + """ + assert array.ndim == 1 + assert array.shape[0] == len(Vector2DIndex) + return Vector2D(array[Vector2DIndex.X], array[Vector2DIndex.Y]) + + @property + def array(self) -> npt.NDArray[np.float64]: + """The array representation of the 2D vector. + + :return: A numpy array of shape (2,) containing the vector components [x, y], indexed by \ + :class:`~d123.geometry.Vector2DIndex`. + """ + array = np.zeros(len(Vector2DIndex), dtype=np.float64) + array[Vector2DIndex.X] = self.x + array[Vector2DIndex.Y] = self.y + return array + + @property + def magnitude(self) -> float: + """Calculates the magnitude (length) of the 2D vector. + :return: The magnitude of the vector. + """ + return float(np.linalg.norm(self.array)) + + @property + def vector_2d(self) -> Vector2D: + """The 2D vector itself. Handy for polymorphism. + + :return: A Vector2D instance representing the 2D vector. + """ + return self -class Vector2D(Point2D): def __add__(self, other: Vector2D) -> Vector2D: + """Adds two 2D vectors. + + :param other: The other vector to add. + :return: A new Vector2D instance representing the sum. + """ return Vector2D(self.x + other.x, self.y + other.y) def __sub__(self, other: Vector2D) -> Vector2D: + """Subtracts two 2D vectors. + + :param other: The other vector to subtract. + :return: A new Vector2D instance representing the difference. + """ return Vector2D(self.x - other.x, self.y - other.y) def __mul__(self, scalar: float) -> Vector2D: + """Multiplies the 2D vector by a scalar. + + :param scalar: The scalar value to multiply with. + :return: A new Vector2D instance representing the scaled vector. + """ return Vector2D(self.x * scalar, self.y * scalar) def __truediv__(self, scalar: float) -> Vector2D: + """Divides the 2D vector by a scalar. + + :param scalar: The scalar value to divide by. + :return: A new Vector2D instance representing the divided vector. + """ return Vector2D(self.x / scalar, self.y / scalar) + def __iter__(self) -> Iterable[float]: + """Iterator over vector components.""" + return iter((self.x, self.y)) + + def __hash__(self) -> int: + """Hash method""" + return hash((self.x, self.y)) + + +@dataclass +class Vector3D: + """ + Class to represents 3D vectors, in x, y, z direction. + + Example: + >>> v1 = Vector3D(1.0, 2.0, 3.0) + >>> v2 = Vector3D(4.0, 5.0, 6.0) + >>> v3 = v1 + v2 + >>> v3 + Vector3D(5.0, 7.0, 9.0) + >>> v1.array + array([1., 2., 3.]) + >>> v1.magnitude + 3.7416573867739413 + """ + + x: float # [m] x-component of the vector + y: float # [m] y-component of the vector + z: float # [m] z-component of the vector + __slots__ = "x", "y", "z" + + @classmethod + def from_array(cls, array: npt.NDArray[np.float64]) -> Vector3D: + """Constructs a Vector3D from a numpy array. + + :param array: Array of shape (3,), indexed by :class:`~d123.geometry.geometry_index.Vector3DIndex`. + :return: A Vector3D instance. + """ + assert array.ndim == 1 + assert array.shape[0] == len(Vector3DIndex) + return Vector3D(array[Vector3DIndex.X], array[Vector3DIndex.Y], array[Vector3DIndex.Z]) + + @property + def array(self) -> npt.NDArray[np.float64]: + """ + Returns the vector components as a numpy array + + :return: A numpy array representing the vector components [x, y, z], indexed by \ + :class:`~d123.geometry.geometry_index.Vector3DIndex`. + """ + array = np.zeros(len(Vector3DIndex), dtype=np.float64) + array[Vector3DIndex.X] = self.x + array[Vector3DIndex.Y] = self.y + array[Vector3DIndex.Z] = self.z + return array + + @property def magnitude(self) -> float: - """Calculate the magnitude of the vector.""" + """Calculates the magnitude (length) of the 3D vector. + + :return: The magnitude of the vector. + """ return float(np.linalg.norm(self.array)) @property def vector_2d(self) -> Vector2D: - return self + """Returns the 2D vector projection (x, y) of the 3D vector. - -class Vector3D(Point3D): - - @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> Vector3D: - assert array.ndim == 1 - assert array.shape[0] == len(Point3DIndex) - return cls(array[Point3DIndex.X], array[Point3DIndex.Y], array[Point3DIndex.Z]) + :return: A Vector2D instance representing the 2D projection. + """ + return Vector2D(self.x, self.y) def __add__(self, other: Vector3D) -> Vector3D: + """Adds two 3D vectors. + + :param other: The other vector to add. + :return: A new Vector2D instance representing the sum. + """ return Vector3D(self.x + other.x, self.y + other.y, self.z + other.z) def __sub__(self, other: Vector3D) -> Vector3D: + """Subtracts two 3D vectors. + + :param other: The other vector to subtract. + :return: A new Vector3D instance representing the difference. + """ return Vector3D(self.x - other.x, self.y - other.y, self.z - other.z) def __mul__(self, scalar: float) -> Vector3D: + """Multiplies the 2D vector by a scalar. + + :param scalar: The scalar value to multiply with. + :return: A new Vector3D instance representing the scaled vector. + """ return Vector3D(self.x * scalar, self.y * scalar, self.z * scalar) def __truediv__(self, scalar: float) -> Vector3D: + """Divides the 2D vector by a scalar. + + :param scalar: The scalar value to divide by. + :return: A new Vector3D instance representing the divided vector. + """ return Vector3D(self.x / scalar, self.y / scalar, self.z / scalar) - def magnitude(self) -> float: - """Calculate the magnitude of the vector.""" - return float(np.linalg.norm(self.array)) + def __iter__(self) -> Iterable[float]: + """Iterator over vector components.""" + return iter((self.x, self.y, self.z)) - @property - def vector_2d(self) -> Vector2D: - return Vector2D(self.x, self.y) + def __hash__(self) -> int: + """Hash method""" + return hash((self.x, self.y, self.z)) diff --git a/d123/simulation/agents/constant_velocity_agents.py b/d123/simulation/agents/constant_velocity_agents.py index 7d1a58d0..5201e768 100644 --- a/d123/simulation/agents/constant_velocity_agents.py +++ b/d123/simulation/agents/constant_velocity_agents.py @@ -49,7 +49,7 @@ def step(self, non_target_agents: List[BoxDetection]): time_delta_s = self._timestep_s * self._current_iteration current_target_agents = [] for initial_agent in self._initial_target_agents: - speed: float = float(initial_agent.velocity.vector_2d.magnitude()) + speed: float = float(initial_agent.velocity.vector_2d.magnitude) propagated_center = translate_along_yaw(initial_agent.center, Point2D(speed * time_delta_s, 0.0)) propagated_bounding_box = BoundingBoxSE2( diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py index 072773e1..ef8437cf 100644 --- a/d123/simulation/agents/idm_agents.py +++ b/d123/simulation/agents/idm_agents.py @@ -91,7 +91,7 @@ def reset( self._agent_paths_buffer[agent.metadata.track_token] = polyline_se2.linestring.buffer( agent.bounding_box_se2.width / 2, cap_style=CAP_STYLE.square ) - self._agent_initial_vel[agent.metadata.track_token] = float(agent.velocity.vector_2d.magnitude()) + self._agent_initial_vel[agent.metadata.track_token] = float(agent.velocity.vector_2d.magnitude) self._past_target_agents = self._initial_target_agents return self._initial_target_agents @@ -105,7 +105,7 @@ def step(self, non_target_agents: List[BoxDetection]): # time_delta_s = self._timestep_s * self._current_iteration current_target_agents = [] for past_agent in self._past_target_agents: - agent_velocity: float = float(past_agent.velocity.vector_2d.magnitude()) + agent_velocity: float = float(past_agent.velocity.vector_2d.magnitude) agent_path = self._agent_paths[past_agent.metadata.track_token] agent_path_buffer = self._agent_paths_buffer[past_agent.metadata.track_token] @@ -133,7 +133,7 @@ def step(self, non_target_agents: List[BoxDetection]): if leading_agent is not None: distance_to_lead_agent = past_agent.shapely_polygon.distance(leading_agent.shapely_polygon) - lead_agent_velocity = float(leading_agent.velocity.vector_2d.magnitude()) + lead_agent_velocity = float(leading_agent.velocity.vector_2d.magnitude) else: distance_to_lead_agent = float( np.clip(agent_path.length - agent_distance_on_path, a_min=0.0, a_max=None) diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py index e4d740c3..960486d0 100644 --- a/d123/simulation/agents/path_following.py +++ b/d123/simulation/agents/path_following.py @@ -72,7 +72,7 @@ def step(self, non_target_agents: List[BoxDetection]): time_delta_s = self._timestep_s * self._current_iteration current_target_agents = [] for initial_agent in self._initial_target_agents: - speed: float = float(initial_agent.velocity.vector_2d.magnitude()) + speed: float = float(initial_agent.velocity.vector_2d.magnitude) propagate_distance = speed * time_delta_s propagated_center = self._agent_paths[initial_agent.metadata.track_token].interpolate(propagate_distance) diff --git a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py index 38d661ac..ae8c1136 100644 --- a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py +++ b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py @@ -318,11 +318,11 @@ def _render_speed_line( :param agent: Agent object containing the state and velocity. :param color: Integer value of color """ - if box_detection.velocity.magnitude() > self._meter_per_pixel: + if box_detection.velocity.magnitude > self._meter_per_pixel: future = translate_along_yaw( pose=box_detection.center, translation=Vector2D( - x=box_detection.bounding_box_se2.half_length + box_detection.velocity.magnitude(), + x=box_detection.bounding_box_se2.half_length + box_detection.velocity.magnitude, y=0.0, ), ) @@ -445,7 +445,7 @@ def _render_detections_from_cache(self, box_detection_cache: BoxDetectionCache) polygon: Polygon = self._scale_polygon(vehicle.bounding_box_se2.shapely_polygon, self._vehicle_scaling) self._render_convex_polygons(mask, box_detection_cache.origin, [polygon], color=MAX_VALUE) vehicles_raster[mask > 0] = self._scale_to_color( - vehicle.velocity.magnitude(), + vehicle.velocity.magnitude, self._max_vehicle_speed, ) mask.fill(0) @@ -460,7 +460,7 @@ def _render_detections_from_cache(self, box_detection_cache: BoxDetectionCache) ) self._render_convex_polygons(mask, box_detection_cache.origin, [polygon], color=MAX_VALUE) pedestrians_raster[mask > 0] = self._scale_to_color( - pedestrian.velocity.magnitude(), + pedestrian.velocity.magnitude, self._max_pedestrian_speed, ) mask.fill(0) @@ -484,7 +484,7 @@ def _render_detections_from_cache(self, box_detection_cache: BoxDetectionCache) ego_polygon: Polygon = self._scale_polygon(ego_detection.shapely_polygon, self._vehicle_scaling) self._render_convex_polygons(mask, box_detection_cache.origin, [ego_polygon], color=MAX_VALUE) - ego_raster[mask > 0] = self._scale_to_color(ego_detection.velocity.magnitude(), self._max_vehicle_speed) + ego_raster[mask > 0] = self._scale_to_color(ego_detection.velocity.magnitude, self._max_vehicle_speed) mask.fill(0) return [vehicles_raster, pedestrians_raster, ego_raster] diff --git a/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py b/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py index 9a5fd2d1..7c3e0883 100644 --- a/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py +++ b/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py @@ -84,7 +84,7 @@ def calculate_ttc_v1(simulation_wrapper: SimulationWrapper, resolution: int = 2) ) = simulation_wrapper.current_planner_input.history.current_state assert isinstance(observation, DetectionsTracks) tracked_objects = observation.tracked_objects - ego_speed = ego_state.dynamic_car_state.center_velocity_2d.magnitude() + ego_speed = ego_state.dynamic_car_state.center_velocity_2d.magnitude if len(tracked_objects) == 0 or ego_speed < STOPPED_SPEED_THRESHOLD: return SUCCESS_TTC @@ -119,7 +119,7 @@ def _add_object(tracked_object: TrackedObject) -> bool: if _add_object(agent): agent_tokens.append(agent.track_token) agent_coords_list.append(_get_coords_array(agent.box)) - agent_dxy.append(_get_dxy(agent.box.center.heading, agent.velocity.magnitude())) + agent_dxy.append(_get_dxy(agent.box.center.heading, agent.velocity.magnitude)) agent_coords_array = np.array(agent_coords_list, dtype=np.float64) # (num_agents, 5, 2) agent_dxy = np.array(agent_dxy, dtype=np.float64) # (num_agents, 2) if len(agent_tokens) == 0: @@ -227,7 +227,7 @@ def _extract_tracks_info_excluding_collided_tracks( tracks_poses: List[List[float]] = [[*tracked_object.center] for tracked_object in tracked_objects] tracks_speed: List[float] = [ - tracked_object.velocity.magnitude() if isinstance(tracked_object, Agent) else 0 + tracked_object.velocity.magnitude if isinstance(tracked_object, Agent) else 0 for tracked_object in tracked_objects ] tracks_boxes: List[OrientedBox] = [tracked_object.box for tracked_object in tracked_objects] diff --git a/d123/simulation/gym/environment/reward_builder/default_reward_builder.py b/d123/simulation/gym/environment/reward_builder/default_reward_builder.py index 527d4e71..940b1f47 100644 --- a/d123/simulation/gym/environment/reward_builder/default_reward_builder.py +++ b/d123/simulation/gym/environment/reward_builder/default_reward_builder.py @@ -613,7 +613,7 @@ def _calculate_off_road( [[point.x, point.y] for point in ego_state.agent.box.all_corners()], dtype=np.float64, ) - corner_in_polygons = drivable_area_map.points_in_polygons(ego_corners) # (geom, 4) + corner_in_polygons = drivable_area_map.contains_vectorized(ego_corners) # (geom, 4) polygon_indices = np.where(corner_in_polygons.sum(axis=-1) > 0)[0] corners_dwithin_polygons = corner_in_polygons.sum(axis=0) > 0 diff --git a/docs/conf.py b/docs/conf.py index f114d331..b14d3fea 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -75,7 +75,7 @@ "members": True, "member-order": "bysource", "undoc-members": True, - "inherited-members": True, - "exclude-members": "__init__, __post_init__", + "inherited-members": False, + "exclude-members": "__init__, __post_init__, __new__", "imported-members": True, } diff --git a/docs/geometry.rst b/docs/geometry.rst index 263b7db0..957db541 100644 --- a/docs/geometry.rst +++ b/docs/geometry.rst @@ -2,12 +2,56 @@ Geometry ======== -.. autoclass:: d123.common.geometry.base.Point2D() +Geometric Primitives +-------------------- -.. autoclass:: d123.common.geometry.base.Point3D() +Points +~~~~~~ +.. autoclass:: d123.geometry.Point2D() -.. autoclass:: d123.common.geometry.base.StateSE2() +.. autoclass:: d123.geometry.Point3D() -.. autoclass:: d123.common.geometry.base.StateSE3() +Vectors +~~~~~~~ +.. autoclass:: d123.geometry.Vector2D() -.. autoclass:: d123.dataset.maps.abstract_map.AbstractMap() +.. autoclass:: d123.geometry.Vector3D() + +Super Euclidean States +~~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: d123.geometry.StateSE2() + +.. autoclass:: d123.geometry.StateSE3() + +Bounding Boxes +~~~~~~~~~~~~~~ +.. autoclass:: d123.geometry.BoundingBoxSE2() + +.. autoclass:: d123.geometry.BoundingBoxSE3() + +Indexing Enums +~~~~~~~~~~~~~~ +.. autoclass:: d123.geometry.Point2DIndex() + +.. autoclass:: d123.geometry.Point3DIndex() + +.. autoclass:: d123.geometry.Vector2DIndex() + +.. autoclass:: d123.geometry.Vector3DIndex() + +.. autoclass:: d123.geometry.StateSE2Index() + +.. autoclass:: d123.geometry.StateSE3Index() + +.. autoclass:: d123.geometry.BoundingBoxSE2Index() + +.. autoclass:: d123.geometry.BoundingBoxSE3Index() + +.. autoclass:: d123.geometry.Corners2DIndex() + +.. autoclass:: d123.geometry.Corners3DIndex() + + +Occupancy Map +------------- +.. autoclass:: d123.geometry.OccupancyMap2D() diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index f8fe8c69..9052f79b 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -20,6 +20,21 @@ "id": "1", "metadata": {}, "outputs": [], + "source": [ + "from d123.geometry import Point2D\n", + "import numpy as np\n", + "\n", + "import torch\n", + "\n", + "from d123.geometry.polyline import Polyline2D" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], "source": [ "# split = \"nuplan_private_test\"\n", "# log_names = [\"2021.09.29.17.35.58_veh-44_00066_00432\"]\n", @@ -56,7 +71,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -175,7 +190,6 @@ " map_object: AbstractLaneGroup\n", " add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])\n", "\n", - "\n", " if layer in [MapLayer.LANE]:\n", " add_shapely_linestring_to_ax(ax, map_object.centerline.linestring, CENTERLINE_CONFIG)\n", "\n", @@ -236,7 +250,7 @@ " return fig, ax\n", "\n", "\n", - "scene_index = 1\n", + "scene_index = 3\n", "iteration = 99\n", "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n", "plt.show()\n", @@ -254,14 +268,10 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "4", "metadata": {}, "outputs": [], "source": [ - "scene = scenes[scene_index]\n", - "\n", - "\n", - "scene.get_camera_at_iteration(camera_type=CameraType.CAM_F0, iteration=0)\n", "\n", "\n", "\n" @@ -270,7 +280,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -309,7 +319,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "6", "metadata": {}, "outputs": [], "source": [] @@ -317,7 +327,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [] @@ -325,7 +335,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "8", "metadata": {}, "outputs": [], "source": [] From 0773efd82c9549ed8d9fe23cc9963a25b91451ee Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Mon, 25 Aug 2025 15:27:03 +0200 Subject: [PATCH 016/145] Make most `geometry` types array-based. Add tests for some `geometry` types. --- d123/common/utils/mixin.py | 9 +- d123/geometry/bounding_box.py | 3 +- d123/geometry/geometry_index.py | 47 +++ d123/geometry/point.py | 111 ++++--- d123/geometry/polyline.py | 38 +-- d123/geometry/rotation.py | 241 +++++++++++++++ d123/geometry/se.py | 360 +++++++++++++++++----- d123/geometry/test/__init__.py | 0 d123/geometry/test/test_point.py | 203 ++++++++++++ d123/geometry/test/test_polyline.py | 326 ++++++++++++++++++++ d123/geometry/test/test_rotation.py | 240 +++++++++++++++ d123/geometry/test/test_vector.py | 172 +++++++++++ d123/geometry/utils/bounding_box_utils.py | 24 +- d123/geometry/utils/rotation_utils.py | 51 ++- d123/geometry/vector.py | 90 ++++-- 15 files changed, 1745 insertions(+), 170 deletions(-) create mode 100644 d123/geometry/rotation.py create mode 100644 d123/geometry/test/__init__.py create mode 100644 d123/geometry/test/test_point.py create mode 100644 d123/geometry/test/test_polyline.py create mode 100644 d123/geometry/test/test_rotation.py create mode 100644 d123/geometry/test/test_vector.py diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py index 5e7ecc0e..9290242b 100644 --- a/d123/common/utils/mixin.py +++ b/d123/common/utils/mixin.py @@ -1,20 +1,18 @@ from __future__ import annotations -import abc import copy as pycopy -from functools import cached_property import numpy as np import numpy.typing as npt -class ArrayMixin(abc.ABC): +class ArrayMixin: """Abstract base class for geometric entities.""" - @cached_property - @abc.abstractmethod + @property def array(self) -> npt.NDArray[np.float64]: """The array representation of the geometric entity.""" + raise NotImplementedError def __array__(self, dtype: npt.DtypeLike = None, copy: bool = False) -> npt.NDArray: array = self.array @@ -34,6 +32,7 @@ def __eq__(self, other) -> bool: return np.array_equal(self.array, other.array) return False + @property def shape(self) -> tuple: """Return the shape of the array.""" return self.array.shape diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py index 6b612c4e..3ea196e3 100644 --- a/d123/geometry/bounding_box.py +++ b/d123/geometry/bounding_box.py @@ -3,6 +3,7 @@ from ast import Dict from dataclasses import dataclass from functools import cached_property +from typing import Union import numpy as np import numpy.typing as npt @@ -202,4 +203,4 @@ def corners_dict(self) -> Dict[Corners3DIndex, Point3D]: return {index: Point3D.from_array(corners_array[index]) for index in Corners3DIndex} -BoundingBox = BoundingBoxSE2 | BoundingBoxSE3 +BoundingBox = Union[BoundingBoxSE2, BoundingBoxSE3] diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py index f77951c2..3f658838 100644 --- a/d123/geometry/geometry_index.py +++ b/d123/geometry/geometry_index.py @@ -71,6 +71,27 @@ class Vector3DIndex(IntEnum): Z = 2 +class EulerAnglesIndex(IntEnum): + """ + Indexes array-like representations of Euler angles (roll,pitch,yaw). + """ + + ROLL = 0 + PITCH = 1 + YAW = 2 + + +class QuaternionIndex(IntEnum): + """ + Indexes array-like representations of quaternions (qw,qx,qy,qz). + """ + + QW = 0 + QX = 1 + QY = 2 + QZ = 3 + + class StateSE3Index(IntEnum): """ Indexes array-like representations of SE3 states (x,y,z,roll,pitch,yaw). @@ -97,6 +118,32 @@ def ROTATION_XYZ(cls) -> slice: return slice(cls.ROLL, cls.YAW + 1) +class QuaternionSE3Index(IntEnum): + """ + Indexes array-like representations of SE3 states with quaternions (x,y,z,qw,qx,qy,qz). + """ + + X = 0 + Y = 1 + Z = 2 + QW = 3 + QX = 4 + QY = 5 + QZ = 6 + + @classproperty + def XY(cls) -> slice: + return slice(cls.X, cls.Y + 1) + + @classproperty + def XYZ(cls) -> slice: + return slice(cls.X, cls.Z + 1) + + @classproperty + def QUATERNION(cls) -> slice: + return slice(cls.QW, cls.QZ + 1) + + class BoundingBoxSE2Index(IntEnum): """ Indexes array-like representations of rotated 2D bounding boxes (x,y,yaw,length,width). diff --git a/d123/geometry/point.py b/d123/geometry/point.py index 5641cf17..b3daa3c8 100644 --- a/d123/geometry/point.py +++ b/d123/geometry/point.py @@ -1,6 +1,5 @@ from __future__ import annotations -from dataclasses import dataclass from functools import cached_property from typing import Iterable @@ -12,41 +11,57 @@ from d123.geometry.geometry_index import Point2DIndex, Point3DIndex -@dataclass class Point2D(ArrayMixin): - """Class to represents 2D points. + """Class to represents 2D points.""" - :return: A Point2D instance. - """ + _array: npt.NDArray[np.float64] - x: float # [m] location - y: float # [m] location - __slots__ = "x", "y" + def __init__(self, x: float, y: float): + """Initialize StateSE2 with x, y, yaw coordinates.""" + array = np.zeros(len(Point2DIndex), dtype=np.float64) + array[Point2DIndex.X] = x + array[Point2DIndex.Y] = y + object.__setattr__(self, "_array", array) @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> Point2D: + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Point2D: """Constructs a Point2D from a numpy array. :param array: Array of shape (2,) representing the point coordinates [x, y], indexed by \ :class:`~d123.geometry.Point2DIndex`. + :param copy: Whether to copy the input array. Defaults to True. :return: A Point2D instance. """ - assert array.ndim == 1 assert array.shape[0] == len(Point2DIndex) - return Point2D(array[Point2DIndex.X], array[Point2DIndex.Y]) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance - @cached_property + @property + def x(self) -> float: + """The x coordinate of the point. + + :return: The x coordinate of the point. + """ + return self._array[Point2DIndex.X] + + @property + def y(self) -> float: + """The y coordinate of the point. + + :return: The y coordinate of the point. + """ + return self._array[Point2DIndex.Y] + + @property def array(self) -> npt.NDArray[np.float64]: """The array representation of the point. :return: A numpy array of shape (2,) containing the point coordinates [x, y], indexed by \ :class:`~d123.geometry.Point2DIndex`. """ - array = np.zeros(len(Point2DIndex), dtype=np.float64) - array[Point2DIndex.X] = self.x - array[Point2DIndex.Y] = self.y - return array + return self._array @property def shapely_point(self) -> geom.Point: @@ -65,40 +80,66 @@ def __hash__(self) -> int: return hash((self.x, self.y)) -@dataclass class Point3D(ArrayMixin): """Class to represents 3D points.""" - x: float # [m] location - y: float # [m] location - z: float # [m] location - __slots__ = "x", "y", "z" + _array: npt.NDArray[np.float64] + + def __init__(self, x: float, y: float, z: float): + """Initialize Point3D with x, y, z coordinates.""" + array = np.zeros(len(Point3DIndex), dtype=np.float64) + array[Point3DIndex.X] = x + array[Point3DIndex.Y] = y + array[Point3DIndex.Z] = z + object.__setattr__(self, "_array", array) @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> "Point3D": + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Point3D: """Constructs a Point3D from a numpy array. :param array: Array of shape (3,) representing the point coordinates [x, y, z], indexed by \ :class:`~d123.geometry.Point3DIndex`. + :param copy: Whether to copy the input array. Defaults to True. :return: A Point3D instance. """ - assert array.ndim == 1, f"Array must be 1-dimensional, got shape {array.shape}" - assert array.shape[0] == len( - Point3DIndex - ), f"Array must have the same length as Point3DIndex, got shape {array.shape}" - return cls(array[Point3DIndex.X], array[Point3DIndex.Y], array[Point3DIndex.Z]) + assert array.ndim == 1 + assert array.shape[0] == len(Point3DIndex) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance @cached_property def array(self) -> npt.NDArray[np.float64]: - """Converts the Point3D instance to a numpy array, indexed by :class:`~d123.geometry.Point3DIndex`. + """The array representation of the point. - :return: A numpy array of shape (3,) containing the point coordinates [x, y, z]. + :return: A numpy array of shape (3,) containing the point coordinates [x, y, z], indexed by \ + :class:`~d123.geometry.Point3DIndex`. """ - array = np.zeros(len(Point3DIndex), dtype=np.float64) - array[Point3DIndex.X] = self.x - array[Point3DIndex.Y] = self.y - array[Point3DIndex.Z] = self.z - return array + return self._array + + @property + def x(self) -> float: + """The x coordinate of the point. + + :return: The x coordinate of the point. + """ + return self._array[Point3DIndex.X] + + @property + def y(self) -> float: + """The y coordinate of the point. + + :return: The y coordinate of the point. + """ + return self._array[Point3DIndex.Y] + + @property + def z(self) -> float: + """The z coordinate of the point. + + :return: The z coordinate of the point. + """ + return self._array[Point3DIndex.Z] @property def point_2d(self) -> Point2D: @@ -106,7 +147,7 @@ def point_2d(self) -> Point2D: :return: A Point2D instance representing the 2D projection of the 3D point. """ - return Point2D(self.x, self.y) + return Point2D.from_array(self.array[Point3DIndex.XY], copy=False) @property def shapely_point(self) -> geom.Point: diff --git a/d123/geometry/polyline.py b/d123/geometry/polyline.py index 3fee304d..51df68b5 100644 --- a/d123/geometry/polyline.py +++ b/d123/geometry/polyline.py @@ -105,14 +105,13 @@ def interpolate( :param distances: The distances at which to interpolate the polyline. :return: The interpolated point(s) on the polyline. """ - distances_ = distances * self.length if normalized else distances if isinstance(distances, float) or isinstance(distances, int): - point = self.linestring.interpolate(distances_, normalized=normalized) + point = self.linestring.interpolate(distances, normalized=normalized) return Point2D(point.x, point.y) else: - distances = np.asarray(distances_, dtype=np.float64) - points = self.linestring.interpolate(distances_, normalized=normalized) + distances_ = np.asarray(distances, dtype=np.float64) + points = self.linestring.interpolate(distances, normalized=normalized) return np.array([[p.x, p.y] for p in points], dtype=np.float64) def project( @@ -126,12 +125,10 @@ def project( :param normalized: Whether to return the normalized distance, defaults to False. :return: The distance along the polyline to the closest point. """ - if isinstance(point, Point2D): - point_ = point.array - elif isinstance(point, StateSE2): - point_ = point.array[StateSE2Index.XY] + if isinstance(point, Point2D) or isinstance(point, StateSE2): + point_ = point.shapely_point elif isinstance(point, geom.Point): - point_ = np.array(point.coords[0], dtype=np.float64) + point_ = point else: point_ = np.array(point, dtype=np.float64) return self.linestring.project(point_, normalized=normalized) @@ -241,12 +238,10 @@ def project( :param normalized: Whether to return the normalized distance, defaults to False. :return: The distance along the polyline to the closest point. """ - if isinstance(point, Point2D): - point_ = point.array - elif isinstance(point, StateSE2): - point_ = point.array[StateSE2Index.XY] + if isinstance(point, Point2D) or isinstance(point, StateSE2): + point_ = point.shapely_point elif isinstance(point, geom.Point): - point_ = np.array(point.coords[0], dtype=np.float64) + point_ = point else: point_ = np.array(point, dtype=np.float64) return self.linestring.project(point_, normalized=normalized) @@ -327,14 +322,13 @@ def interpolate( :param normalized: Whether to interpret the distances as fractions of the length. :return: A Point3D instance or a numpy array of shape (N, 3) representing the interpolated points. """ - distances * self.length if normalized else distances if isinstance(distances, float) or isinstance(distances, int): - point = self.linestring.interpolate(distances) + point = self.linestring.interpolate(distances, normalized=normalized) return Point3D(point.x, point.y, point.z) else: distances = np.asarray(distances, dtype=np.float64) - points = self.linestring.interpolate(distances) + points = self.linestring.interpolate(distances, normalized=normalized) return np.array([[p.x, p.y, p.z] for p in points], dtype=np.float64) def project( @@ -348,14 +342,10 @@ def project( :param normalized: Whether to return normalized distances, defaults to False. :return: The distance along the polyline to the closest point. """ - if isinstance(point, Point2D): - point_ = point.array - elif isinstance(point, StateSE2): - point_ = point.array[StateSE2Index.XY] - elif isinstance(point, Point3D): - point_ = point.array[Point3DIndex.XYZ] + if isinstance(point, Point2D) or isinstance(point, StateSE2) or isinstance(point, Point3D): + point_ = point.shapely_point elif isinstance(point, geom.Point): - point_ = np.array(point.coords[0], dtype=np.float64) + point_ = point else: point_ = np.array(point, dtype=np.float64) return self.linestring.project(point_, normalized=normalized) diff --git a/d123/geometry/rotation.py b/d123/geometry/rotation.py new file mode 100644 index 00000000..f98a601d --- /dev/null +++ b/d123/geometry/rotation.py @@ -0,0 +1,241 @@ +from __future__ import annotations + +from functools import cached_property + +import numpy as np +import numpy.typing as npt +import pyquaternion + +from d123.common.utils.mixin import ArrayMixin +from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex +from d123.geometry.utils.rotation_utils import get_rotation_matrix_from_euler_array + + +class EulerAngles(ArrayMixin): + """Class to represent 3D rotation using Euler angles (roll, pitch, yaw) in radians. + NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll). + See https://en.wikipedia.org/wiki/Euler_angles for more details. + """ + + _array: npt.NDArray[np.float64] + + def __init__(self, roll: float, pitch: float, yaw: float): + """Initialize EulerAngles with roll, pitch, yaw coordinates.""" + array = np.zeros(len(EulerAnglesIndex), dtype=np.float64) + array[EulerAnglesIndex.ROLL] = roll + array[EulerAnglesIndex.PITCH] = pitch + array[EulerAnglesIndex.YAW] = yaw + object.__setattr__(self, "_array", array) + + @classmethod + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> EulerAngles: + """Constructs a EulerAngles from a numpy array. + + :param array: Array of shape (3,) representing the euler angles [roll, pitch, yaw], indexed by \ + :class:`~d123.geometry.EulerAnglesIndex`. + :param copy: Whether to copy the input array. Defaults to True. + :return: A EulerAngles instance. + """ + assert array.ndim == 1 + assert array.shape[0] == len(EulerAnglesIndex) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance + + @classmethod + def from_rotation_matrix(cls, rotation_matrix: npt.NDArray[np.float64]) -> EulerAngles: + """Constructs a EulerAngles from a 3x3 rotation matrix. + NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll). + + :param rotation_matrix: A 3x3 numpy array representing the rotation matrix. + :return: A EulerAngles instance. + """ + assert rotation_matrix.ndim == 2 + assert rotation_matrix.shape == (3, 3) + quaternion = pyquaternion.Quaternion(matrix=rotation_matrix) + yaw, pitch, roll = quaternion.yaw_pitch_roll + return EulerAngles(roll=roll, pitch=pitch, yaw=yaw) + + @property + def roll(self) -> float: + """The roll (x-axis rotation) angle in radians. + + :return: The roll angle in radians. + """ + return self._array[EulerAnglesIndex.ROLL] + + @property + def pitch(self) -> float: + """The pitch (y-axis rotation) angle in radians. + + :return: The pitch angle in radians. + """ + return self._array[EulerAnglesIndex.PITCH] + + @property + def yaw(self) -> float: + """The yaw (z-axis rotation) angle in radians. + + :return: The yaw angle in radians. + """ + return self._array[EulerAnglesIndex.YAW] + + @cached_property + def array(self) -> npt.NDArray[np.float64]: + """Converts the EulerAngles instance to a numpy array. + + :return: A numpy array of shape (3,) containing the Euler angles [roll, pitch, yaw], indexed by \ + :class:`~d123.geometry.EulerAnglesIndex`. + """ + array = np.zeros(len(EulerAnglesIndex), dtype=np.float64) + array[EulerAnglesIndex.ROLL] = self.roll + array[EulerAnglesIndex.PITCH] = self.pitch + array[EulerAnglesIndex.YAW] = self.yaw + return array + + @property + def rotation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 3x3 rotation matrix representation of the Euler angles. + NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll). + + :return: A 3x3 numpy array representing the rotation matrix. + """ + return get_rotation_matrix_from_euler_array(self.array) + + def __iter__(self): + """Iterator over euler angles.""" + return iter((self.roll, self.pitch, self.yaw)) + + def __hash__(self): + """Hash function for euler angles.""" + return hash((self.roll, self.pitch, self.yaw)) + + +class Quaternion(ArrayMixin): + """ + Represents a quaternion for 3D rotations. + NOTE: This class uses the pyquaternion library for internal computations. + """ + + _array: npt.NDArray[np.float64] + + def __init__(self, qw: float, qx: float, qy: float, qz: float): + """Initialize Quaternion with qw, qx, qy, qz components.""" + array = np.zeros(len(QuaternionIndex), dtype=np.float64) + array[QuaternionIndex.QW] = qw + array[QuaternionIndex.QX] = qx + array[QuaternionIndex.QY] = qy + array[QuaternionIndex.QZ] = qz + object.__setattr__(self, "_array", array) + + @classmethod + def from_array(cls, arr: npt.NDArray[np.float64], copy: bool = True) -> Quaternion: + """Constructs a Quaternion from a numpy array. + + :param arr: A 1D numpy array of shape (4,) containing the quaternion components [qw, qx, qy, qz]. + :param copy: Whether to copy the array data, defaults to True. + :return: A Quaternion instance. + """ + assert arr.ndim == 1 + assert arr.shape[0] == len(QuaternionIndex) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", arr.copy() if copy else arr) + return instance + + @classmethod + def from_rotation_matrix(cls, rotation_matrix: npt.NDArray[np.float64]) -> Quaternion: + """Constructs a Quaternion from a 3x3 rotation matrix. + + :param rotation_matrix: A 3x3 numpy array representing the rotation matrix. + :return: A Quaternion instance. + """ + assert rotation_matrix.ndim == 2 + assert rotation_matrix.shape == (3, 3) + quaternion = pyquaternion.Quaternion(matrix=rotation_matrix) + return Quaternion(qw=quaternion.w, qx=quaternion.x, qy=quaternion.y, qz=quaternion.z) + + @classmethod + def from_euler_angles(cls, euler_angles: EulerAngles) -> Quaternion: + """Constructs a Quaternion from Euler angles. + NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll). + + :param euler_angles: An EulerAngles instance representing the Euler angles. + :return: A Quaternion instance. + """ + rotation_matrix = euler_angles.rotation_matrix + return Quaternion.from_rotation_matrix(rotation_matrix) + + @property + def qw(self) -> float: + """The scalar part of the quaternion. + + :return: The qw component. + """ + return self._array[QuaternionIndex.QW] + + @property + def qx(self) -> float: + """The x component of the quaternion. + + :return: The qx component. + """ + return self._array[QuaternionIndex.QX] + + @property + def qy(self) -> float: + """The y component of the quaternion. + + :return: The qy component. + """ + return self._array[QuaternionIndex.QY] + + @property + def qz(self) -> float: + """The z component of the quaternion. + + :return: The qz component. + """ + return self._array[QuaternionIndex.QZ] + + @property + def array(self) -> npt.NDArray[np.float64]: + """Converts the Quaternion instance to a numpy array. + + :return: A numpy array of shape (4,) containing the quaternion [qw, qx, qy, qz], indexed by \ + :class:`~d123.geometry.QuaternionIndex`. + """ + return self._array + + @property + def pyquaternion(self) -> pyquaternion.Quaternion: + """Returns the pyquaternion.Quaternion representation of the quaternion. + + :return: A pyquaternion.Quaternion representation of the quaternion. + """ + return pyquaternion.Quaternion(array=self.array) + + @cached_property + def euler_angles(self) -> EulerAngles: + """Returns the Euler angles (roll, pitch, yaw) representation of the quaternion. + NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll). + + :return: An EulerAngles instance representing the Euler angles. + """ + yaw, pitch, roll = self.pyquaternion.yaw_pitch_roll + return EulerAngles(roll=roll, pitch=pitch, yaw=yaw) + + @cached_property + def rotation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 3x3 rotation matrix representation of the quaternion. + + :return: A 3x3 numpy array representing the rotation matrix. + """ + return self.pyquaternion.rotation_matrix + + def __iter__(self): + """Iterator over quaternion components.""" + return iter((self.qw, self.qx, self.qy, self.qz)) + + def __hash__(self): + """Hash function for quaternion.""" + return hash((self.qw, self.qx, self.qy, self.qz)) diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 9c73e341..b2d8db2c 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -1,49 +1,66 @@ from __future__ import annotations -from dataclasses import dataclass from typing import Iterable import numpy as np import numpy.typing as npt import shapely.geometry as geom +from pyparsing import cached_property -from d123.geometry.geometry_index import StateSE2Index, StateSE3Index +from d123.common.utils.mixin import ArrayMixin +from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, StateSE2Index, StateSE3Index from d123.geometry.point import Point2D, Point3D +from d123.geometry.rotation import EulerAngles, Quaternion -@dataclass -class StateSE2: +class StateSE2(ArrayMixin): """Class to represents a 2D pose as SE2 (x, y, yaw).""" - x: float # [m] x-location - y: float # [m] y-location - yaw: float # [rad] yaw/heading - __slots__ = "x", "y", "yaw" + _array: npt.NDArray[np.float64] + + def __init__(self, x: float, y: float, yaw: float): + """Initialize StateSE2 with x, y, yaw coordinates.""" + array = np.zeros(len(StateSE2Index), dtype=np.float64) + array[StateSE2Index.X] = x + array[StateSE2Index.Y] = y + array[StateSE2Index.YAW] = yaw + object.__setattr__(self, "_array", array) @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> StateSE2: + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateSE2: """Constructs a StateSE2 from a numpy array. :param array: Array of shape (3,) representing the state [x, y, yaw], indexed by \ :class:`~d123.geometry.geometry_index.StateSE2Index`. + :param copy: Whether to copy the input array. Defaults to True. :return: A StateSE2 instance. """ assert array.ndim == 1 assert array.shape[0] == len(StateSE2Index) - return StateSE2(array[StateSE2Index.X], array[StateSE2Index.Y], array[StateSE2Index.YAW]) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance + + @property + def x(self) -> float: + return self._array[StateSE2Index.X] + + @property + def y(self) -> float: + return self._array[StateSE2Index.Y] + + @property + def yaw(self) -> float: + return self._array[StateSE2Index.YAW] @property def array(self) -> npt.NDArray[np.float64]: - """Converts the StateSE2 instance to a numpy array, indexed by \ - :class:`~d123.geometry.geometry_index.StateSE2Index`. + """Converts the StateSE2 instance to a numpy array - :return: A numpy array of shape (3,) containing the state [x, y, yaw]. + :return: A numpy array of shape (3,) containing the state, indexed by \ + :class:`~d123.geometry.geometry_index.StateSE2Index`. """ - array = np.zeros(len(StateSE2Index), dtype=np.float64) - array[StateSE2Index.X] = self.x - array[StateSE2Index.Y] = self.y - array[StateSE2Index.YAW] = self.yaw - return array + return self._array @property def state_se2(self) -> StateSE2: @@ -59,54 +76,68 @@ def point_2d(self) -> Point2D: :return: A Point2D instance representing the 2D projection of the 2D pose. """ - return Point2D(self.x, self.y) + return Point2D.from_array(self.array[StateSE2Index.XY]) @property - def shapely_point(self) -> geom.Point: - return geom.Point(self.x, self.y) + def rotation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 2x2 rotation matrix representation of the state's orientation. - def __iter__(self) -> Iterable[float]: - """Iterator over the state coordinates (x, y, yaw).""" - return iter((self.x, self.y, self.yaw)) + :return: A 2x2 numpy array representing the rotation matrix. + """ + cos_yaw = np.cos(self.yaw) + sin_yaw = np.sin(self.yaw) + return np.array([[cos_yaw, -sin_yaw], [sin_yaw, cos_yaw]], dtype=np.float64) - def __hash__(self) -> int: - """Hash method""" - return hash((self.x, self.y, self.yaw)) + @property + def transformation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 3x3 transformation matrix representation of the state. + + :return: A 3x3 numpy array representing the transformation matrix. + """ + matrix = np.zeros((3, 3), dtype=np.float64) + matrix[:2, :2] = self.rotation_matrix + matrix[0, 2] = self.x + matrix[1, 2] = self.y + return matrix + + @property + def shapely_point(self) -> geom.Point: + return geom.Point(self.x, self.y) -@dataclass -class StateSE3: +class StateSE3(ArrayMixin): """ Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw). TODO: Use quaternions for rotation representation. """ - x: float # [m] x-location - y: float # [m] y-location - z: float # [m] z-location - roll: float # [rad] roll - pitch: float # [rad] pitch - yaw: float # [rad] yaw - __slots__ = "x", "y", "z", "roll", "pitch", "yaw" + _array: npt.NDArray[np.float64] + + def __init__(self, x: float, y: float, z: float, roll: float, pitch: float, yaw: float): + """Initialize StateSE3 with x, y, z, roll, pitch, yaw coordinates.""" + array = np.zeros(len(StateSE3Index), dtype=np.float64) + array[StateSE3Index.X] = x + array[StateSE3Index.Y] = y + array[StateSE3Index.Z] = z + array[StateSE3Index.ROLL] = roll + array[StateSE3Index.PITCH] = pitch + array[StateSE3Index.YAW] = yaw + object.__setattr__(self, "_array", array) @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> StateSE3: + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateSE3: """Constructs a StateSE3 from a numpy array. :param array: Array of shape (6,) representing the state [x, y, z, roll, pitch, yaw], indexed by \ - :class:`~d123.geometry.StateSE3Index`. + :class:`~d123.geometry.geometry_index.StateSE3Index`. + :param copy: Whether to copy the input array. Defaults to True. :return: A StateSE3 instance. """ assert array.ndim == 1 assert array.shape[0] == len(StateSE3Index) - return StateSE3( - array[StateSE3Index.X], - array[StateSE3Index.Y], - array[StateSE3Index.Z], - array[StateSE3Index.ROLL], - array[StateSE3Index.PITCH], - array[StateSE3Index.YAW], - ) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance @classmethod def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3: @@ -119,29 +150,73 @@ def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3: assert array.shape == (4, 4) translation = array[:3, 3] rotation = array[:3, :3] + roll, pitch, yaw = EulerAngles.from_rotation_matrix(rotation) + return StateSE3( - x=translation[0], - y=translation[1], - z=translation[2], - roll=np.arctan2(rotation[2, 1], rotation[2, 2]), - pitch=np.arctan2(-rotation[2, 0], np.sqrt(rotation[2, 1] ** 2 + rotation[2, 2] ** 2)), - yaw=np.arctan2(rotation[1, 0], rotation[0, 0]), + x=translation[Point3DIndex.X], + y=translation[Point3DIndex.Y], + z=translation[Point3DIndex.Z], + roll=roll, + pitch=pitch, + yaw=yaw, ) + @property + def x(self) -> float: + """Returns the x-coordinate of the 3D state. + + :return: The x-coordinate. + """ + return self._array[StateSE3Index.X] + + @property + def y(self) -> float: + """Returns the y-coordinate of the 3D state. + + :return: The y-coordinate. + """ + return self._array[StateSE3Index.Y] + + @property + def z(self) -> float: + """Returns the z-coordinate of the 3D state. + + :return: The z-coordinate. + """ + return self._array[StateSE3Index.Z] + + @property + def roll(self) -> float: + """Returns the roll (x-axis rotation) of the 3D state. + + :return: The roll angle. + """ + return self._array[StateSE3Index.ROLL] + + @property + def pitch(self) -> float: + """Returns the pitch (y-axis rotation) of the 3D state. + + :return: The pitch angle. + """ + return self._array[StateSE3Index.PITCH] + + @property + def yaw(self) -> float: + """Returns the yaw (z-axis rotation) of the 3D state. + + :return: The yaw angle. + """ + return self._array[StateSE3Index.YAW] + @property def array(self) -> npt.NDArray[np.float64]: - """Converts the StateSE3 instance to a numpy array, indexed by StateSE3Index. + """Returns the StateSE3 instance as a numpy array. - :return: A numpy array of shape (6,) containing the state [x, y, z, roll, pitch, yaw]. + :return: A numpy array of shape (6,), indexed by \ + :class:`~d123.geometry.geometry_index.StateSE3Index`. """ - array = np.zeros(len(StateSE3Index), dtype=np.float64) - array[StateSE3Index.X] = self.x - array[StateSE3Index.Y] = self.y - array[StateSE3Index.Z] = self.z - array[StateSE3Index.ROLL] = self.roll - array[StateSE3Index.PITCH] = self.pitch - array[StateSE3Index.YAW] = self.yaw - return array + return self._array @property def state_se2(self) -> StateSE2: @@ -175,14 +250,6 @@ def shapely_point(self) -> geom.Point: """ return self.point_3d.shapely_point - @property - def transformation_matrix(self) -> npt.NDArray[np.float64]: - """Returns the 4x4 transformation matrix representation of the state. - - :return: A 4x4 numpy array representing the transformation matrix. - """ - raise NotImplementedError("Transformation matrix conversion not implemented yet.") - @property def rotation_matrix(self) -> npt.NDArray[np.float64]: """Returns the 3x3 rotation matrix representation of the state's orientation. @@ -191,6 +258,18 @@ def rotation_matrix(self) -> npt.NDArray[np.float64]: """ raise NotImplementedError("Rotation matrix conversion not implemented yet.") + @property + def transformation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 4x4 transformation matrix representation of the state. + + :return: A 4x4 numpy array representing the transformation matrix. + """ + rotation_matrix = self.rotation_matrix + transformation_matrix = np.eye(4, dtype=np.float64) + transformation_matrix[:3, :3] = rotation_matrix + transformation_matrix[3, :3] = self.array[StateSE3Index.XYZ] + return transformation_matrix + @property def quaternion(self) -> npt.NDArray[np.float64]: """Returns the quaternion (w, x, y, z) representation of the state's orientation. @@ -216,17 +295,142 @@ def __matmul__(self, other: StateSE3) -> StateSE3: return StateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix) -@dataclass class QuaternionSE3: """Class representing a quaternion in SE3 space. TODO: Implement and replace StateSE3. """ - x: float - y: float - z: float - qw: float - qx: float - qy: float - qz: float + _array: npt.NDArray[np.float64] + + def __init__(self, x: float, y: float, z: float, qw: float, qx: float, qy: float, qz: float): + """Initialize QuaternionSE3 with x, y, z, qw, qx, qy, qz coordinates.""" + array = np.zeros(len(QuaternionSE3Index), dtype=np.float64) + array[QuaternionSE3Index.X] = x + array[QuaternionSE3Index.Y] = y + array[QuaternionSE3Index.Z] = z + array[QuaternionSE3Index.QW] = qw + array[QuaternionSE3Index.QX] = qx + array[QuaternionSE3Index.QY] = qy + array[QuaternionSE3Index.QZ] = qz + object.__setattr__(self, "_array", array) + + @classmethod + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> QuaternionSE3: + """Constructs a QuaternionSE3 from a numpy array. + + :param array: Array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`. + :param copy: Whether to copy the input array. Defaults to True. + :return: A QuaternionSE3 instance. + """ + assert array.ndim == 1 + assert array.shape[0] == len(QuaternionSE3Index) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance + + @property + def x(self) -> float: + """Returns the x-coordinate of the quaternion. + + :return: The x-coordinate. + """ + return self._array[QuaternionSE3Index.X] + + @property + def y(self) -> float: + """Returns the y-coordinate of the quaternion. + + :return: The y-coordinate. + """ + return self._array[QuaternionSE3Index.Y] + + @property + def z(self) -> float: + """Returns the z-coordinate of the quaternion. + + :return: The z-coordinate. + """ + return self._array[QuaternionSE3Index.Z] + + @property + def qw(self) -> float: + """Returns the w-coordinate of the quaternion. + + :return: The w-coordinate. + """ + return self._array[QuaternionSE3Index.QW] + + @property + def qx(self) -> float: + """Returns the x-coordinate of the quaternion. + + :return: The x-coordinate. + """ + return self._array[QuaternionSE3Index.QX] + + @property + def qy(self) -> float: + """Returns the y-coordinate of the quaternion. + + :return: The y-coordinate. + """ + return self._array[QuaternionSE3Index.QY] + + @property + def qz(self) -> float: + """Returns the z-coordinate of the quaternion. + + :return: The z-coordinate. + """ + return self._array[QuaternionSE3Index.QZ] + + @property + def array(self) -> npt.NDArray[np.float64]: + """Converts the QuaternionSE3 instance to a numpy array. + + :return: A numpy array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`. + """ + return self._array + + @property + def state_se2(self) -> StateSE2: + """Returns the quaternion state as a 2D state by ignoring the z-axis. + + :return: A StateSE2 instance representing the 2D projection of the 3D state. + """ + # Convert quaternion to yaw angle + yaw = self.quaternion.euler_angles.yaw + return StateSE2(self.x, self.y, yaw) + + @property + def point_3d(self) -> Point3D: + """Returns the 3D point representation of the state. + + :return: A Point3D instance representing the 3D point. + """ + return Point3D(self.x, self.y, self.z) + + @property + def point_2d(self) -> Point2D: + """Returns the 2D point representation of the state. + + :return: A Point2D instance representing the 2D point. + """ + return Point2D(self.x, self.y) + + @property + def shapely_point(self) -> geom.Point: + """Returns the Shapely point representation of the state. + + :return: A Shapely Point instance representing the 3D point. + """ + return self.point_3d.shapely_point + + @cached_property + def quaternion(self) -> Quaternion: + """Returns the quaternion (w, x, y, z) representation of the state's orientation. + + :return: A Quaternion instance representing the quaternion. + """ + return Quaternion.from_array(self.array[QuaternionSE3Index.QUATERNION]) diff --git a/d123/geometry/test/__init__.py b/d123/geometry/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/geometry/test/test_point.py b/d123/geometry/test/test_point.py new file mode 100644 index 00000000..d3141bbb --- /dev/null +++ b/d123/geometry/test/test_point.py @@ -0,0 +1,203 @@ +import unittest +from unittest.mock import MagicMock, patch + +import numpy as np + +from d123.geometry import Point2D, Point2DIndex +from d123.geometry.geometry_index import Point3DIndex +from d123.geometry.point import Point3D + +# Point3D, Point3DIndex + + +class TestPoint2D(unittest.TestCase): + """Unit tests for Point2D class.""" + + def setUp(self): + """Set up test fixtures.""" + self.x_coord = 3.5 + self.y_coord = 4.2 + self.point = Point2D(x=self.x_coord, y=self.y_coord) + self.test_array = np.zeros([2], dtype=np.float64) + self.test_array[Point2DIndex.X] = self.x_coord + self.test_array[Point2DIndex.Y] = self.y_coord + + def test_init(self): + """Test Point2D initialization.""" + point = Point2D(1.0, 2.0) + self.assertEqual(point.x, 1.0) + self.assertEqual(point.y, 2.0) + + def test_from_array_valid(self): + """Test from_array class method with valid input.""" + # Mock Point2DIndex enum values + point = Point2D.from_array(self.test_array) + self.assertEqual(point.x, self.x_coord) + self.assertEqual(point.y, self.y_coord) + + def test_from_array_invalid_dimensions(self): + """Test from_array with invalid array dimensions.""" + # 2D array should raise assertion error + array_2d = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64) + with self.assertRaises(AssertionError): + Point2D.from_array(array_2d) + + # 3D array should raise assertion error + array_3d = np.array([[[1.0]]], dtype=np.float64) + with self.assertRaises(AssertionError): + Point2D.from_array(array_3d) + + def test_from_array_invalid_shape(self): + """Test from_array with invalid array shape.""" + + array_wrong_length = np.array([1.0, 2.0, 3.0], dtype=np.float64) + with self.assertRaises(AssertionError): + Point2D.from_array(array_wrong_length) + + # Empty array + empty_array = np.array([], dtype=np.float64) + with self.assertRaises(AssertionError): + Point2D.from_array(empty_array) + + def test_array_property(self): + """Test the array property.""" + expected_array = np.array([self.x_coord, self.y_coord], dtype=np.float64) + np.testing.assert_array_equal(self.point.array, expected_array) + self.assertEqual(self.point.array.dtype, np.float64) + self.assertEqual(self.point.array.shape, (2,)) + + def test_array_like(self): + """Test the __array__ behavior.""" + expected_array = np.array([self.x_coord, self.y_coord], dtype=np.float32) + output_array = np.array(self.point, dtype=np.float32) + np.testing.assert_array_equal(output_array, expected_array) + self.assertEqual(output_array.dtype, np.float32) + self.assertEqual(output_array.shape, (2,)) + + def test_shapely_point_property(self): + """Test the shapely_point property.""" + with patch("shapely.geometry.Point") as mock_point: + mock_point_instance = MagicMock() + mock_point.return_value = mock_point_instance + + result = self.point.shapely_point + + mock_point.assert_called_once_with(self.x_coord, self.y_coord) + self.assertEqual(result, mock_point_instance) + + def test_iter(self): + """Test the __iter__ method.""" + coords = list(self.point) + self.assertEqual(coords, [self.x_coord, self.y_coord]) + + # Test that it's actually iterable + x, y = self.point + self.assertEqual(x, self.x_coord) + self.assertEqual(y, self.y_coord) + + def test_hash(self): + """Test the __hash__ method.""" + point_dict = {self.point: "test"} + self.assertIn(self.point, point_dict) + self.assertEqual(point_dict[self.point], "test") + + +class TestPoint3D(unittest.TestCase): + """Unit tests for Point3D class.""" + + def setUp(self): + """Set up test fixtures.""" + self.x_coord = 3.5 + self.y_coord = 4.2 + self.z_coord = 5.1 + self.point = Point3D(self.x_coord, self.y_coord, self.z_coord) + self.test_array = np.zeros((3,), dtype=np.float64) + self.test_array[Point3DIndex.X] = self.x_coord + self.test_array[Point3DIndex.Y] = self.y_coord + self.test_array[Point3DIndex.Z] = self.z_coord + + def test_init(self): + """Test Point3D initialization.""" + point = Point3D(1.0, 2.0, 3.0) + self.assertEqual(point.x, 1.0) + self.assertEqual(point.y, 2.0) + self.assertEqual(point.z, 3.0) + + def test_from_array_valid(self): + """Test from_array class method with valid input.""" + # Mock Point3DIndex enum values + point = Point3D.from_array(self.test_array) + self.assertEqual(point.x, self.x_coord) + self.assertEqual(point.y, self.y_coord) + self.assertEqual(point.z, self.z_coord) + + def test_from_array_invalid_dimensions(self): + """Test from_array with invalid array dimensions.""" + # 2D array should raise assertion error + array_2d = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float64) + with self.assertRaises(AssertionError): + Point3D.from_array(array_2d) + + # 3D array should raise assertion error + array_3d = np.array([[[1.0]]], dtype=np.float64) + with self.assertRaises(AssertionError): + Point3D.from_array(array_3d) + + def test_from_array_invalid_shape(self): + """Test from_array with invalid array shape.""" + + array_wrong_length = np.array([1.0, 2.0], dtype=np.float64) + with self.assertRaises(AssertionError): + Point3D.from_array(array_wrong_length) + + # Empty array + empty_array = np.array([], dtype=np.float64) + with self.assertRaises(AssertionError): + Point3D.from_array(empty_array) + + def test_array_property(self): + """Test the array property.""" + expected_array = np.array([self.x_coord, self.y_coord, self.z_coord], dtype=np.float64) + np.testing.assert_array_equal(self.point.array, expected_array) + self.assertEqual(self.point.array.dtype, np.float64) + self.assertEqual(self.point.array.shape, (3,)) + + def test_array_like(self): + """Test the __array__ behavior.""" + expected_array = np.array([self.x_coord, self.y_coord, self.z_coord], dtype=np.float32) + output_array = np.array(self.point, dtype=np.float32) + np.testing.assert_array_equal(output_array, expected_array) + self.assertEqual(output_array.dtype, np.float32) + self.assertEqual(output_array.shape, (3,)) + + def test_shapely_point_property(self): + """Test the shapely_point property.""" + with patch("shapely.geometry.Point") as mock_point: + mock_point_instance = MagicMock() + mock_point.return_value = mock_point_instance + + result = self.point.shapely_point + + mock_point.assert_called_once_with(self.x_coord, self.y_coord, self.z_coord) + self.assertEqual(result, mock_point_instance) + + def test_iter(self): + """Test the __iter__ method.""" + coords = list(self.point) + self.assertEqual(coords, [self.x_coord, self.y_coord, self.z_coord]) + + # Test that it's actually iterable + x, y, z = self.point + self.assertEqual(x, self.x_coord) + self.assertEqual(y, self.y_coord) + self.assertEqual(z, self.z_coord) + + def test_hash(self): + """Test the __hash__ method.""" + point_dict = {self.point: "test"} + self.assertIn(self.point, point_dict) + self.assertEqual(point_dict[self.point], "test") + + +if __name__ == "__main__": + unittest.main() diff --git a/d123/geometry/test/test_polyline.py b/d123/geometry/test/test_polyline.py new file mode 100644 index 00000000..e4103364 --- /dev/null +++ b/d123/geometry/test/test_polyline.py @@ -0,0 +1,326 @@ +import unittest + +import numpy as np +import shapely.geometry as geom + +from d123.geometry.point import Point2D, Point3D +from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 +from d123.geometry.se import StateSE2 + + +class TestPolyline2D(unittest.TestCase): + """Test class for Polyline2D.""" + + def test_from_linestring(self): + """Test creating Polyline2D from LineString.""" + coords = [(0.0, 0.0), (1.0, 1.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline2D.from_linestring(linestring) + self.assertIsInstance(polyline, Polyline2D) + self.assertTrue(polyline.linestring.equals(linestring)) + + def test_from_linestring_with_z(self): + """Test creating Polyline2D from LineString with Z coordinates.""" + coords = [(0.0, 0.0, 1.0), (1.0, 1.0, 2.0), (2.0, 0.0, 3.0)] + linestring = geom.LineString(coords) + polyline = Polyline2D.from_linestring(linestring) + self.assertIsInstance(polyline, Polyline2D) + self.assertFalse(polyline.linestring.has_z) + + def test_from_array_2d(self): + """Test creating Polyline2D from 2D array.""" + array = np.array([[0.0, 0.0], [1.0, 1.0], [2.0, 0.0]], dtype=np.float32) + polyline = Polyline2D.from_array(array) + self.assertIsInstance(polyline, Polyline2D) + np.testing.assert_array_almost_equal(polyline.array, array) + + def test_from_array_3d(self): + """Test creating Polyline2D from 3D array.""" + array = np.array([[0.0, 0.0, 1.0], [1.0, 1.0, 2.0], [2.0, 0.0, 3.0]], dtype=np.float32) + polyline = Polyline2D.from_array(array) + self.assertIsInstance(polyline, Polyline2D) + expected = array[:, :2] + np.testing.assert_array_almost_equal(polyline.array, expected) + + def test_from_array_invalid_shape(self): + """Test creating Polyline2D from invalid array shape.""" + array = np.array([[0.0], [1.0], [2.0]], dtype=np.float32) + with self.assertRaises(ValueError): + Polyline2D.from_array(array) + + def test_array_property(self): + """Test array property.""" + coords = [(0.0, 0.0), (1.0, 1.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline2D.from_linestring(linestring) + array = polyline.array + self.assertEqual(array.shape, (3, 2)) + self.assertEqual(array.dtype, np.float64) + np.testing.assert_array_almost_equal(array, coords) + + def test_length_property(self): + """Test length property.""" + coords = [(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline2D.from_linestring(linestring) + self.assertEqual(polyline.length, 2.0) + + def test_interpolate_single_distance(self): + """Test interpolation with single distance.""" + coords = [(0.0, 0.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline2D.from_linestring(linestring) + point = polyline.interpolate(1.0) + self.assertIsInstance(point, Point2D) + self.assertEqual(point.x, 1.0) + self.assertEqual(point.y, 0.0) + + def test_interpolate_multiple_distances(self): + """Test interpolation with multiple distances.""" + coords = [(0.0, 0.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline2D.from_linestring(linestring) + points = polyline.interpolate(np.array([0.0, 1.0, 2.0])) + self.assertIsInstance(points, np.ndarray) + self.assertEqual(points.shape, (3, 2)) + expected = np.array([[0.0, 0.0], [1.0, 0.0], [2.0, 0.0]]) + np.testing.assert_array_almost_equal(points, expected) + + def test_interpolate_normalized(self): + """Test normalized interpolation.""" + coords = [(0.0, 0.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline2D.from_linestring(linestring) + point = polyline.interpolate(0.5, normalized=True) + self.assertIsInstance(point, Point2D) + self.assertEqual(point.x, 1.0) + self.assertEqual(point.y, 0.0) + + def test_project_point2d(self): + """Test projecting Point2D onto polyline.""" + coords = [(0.0, 0.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline2D.from_linestring(linestring) + point = Point2D(1.0, 1.0) + distance = polyline.project(point) + self.assertEqual(distance, 1.0) + + def test_project_statese2(self): + """Test projecting StateSE2 onto polyline.""" + coords = [(0.0, 0.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline2D.from_linestring(linestring) + state = StateSE2(1.0, 1.0, 0.0) + distance = polyline.project(state) + self.assertEqual(distance, 1.0) + + def test_polyline_se2_property(self): + """Test polyline_se2 property.""" + coords = [(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline2D.from_linestring(linestring) + polyline_se2 = polyline.polyline_se2 + self.assertIsInstance(polyline_se2, PolylineSE2) + + +class TestPolylineSE2(unittest.TestCase): + """Test class for PolylineSE2.""" + + def test_from_linestring(self): + """Test creating PolylineSE2 from LineString.""" + coords = [(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = PolylineSE2.from_linestring(linestring) + self.assertIsInstance(polyline, PolylineSE2) + self.assertEqual(polyline.se2_array.shape, (3, 3)) + + def test_from_array_2d(self): + """Test creating PolylineSE2 from 2D array.""" + array = np.array([[0.0, 0.0], [1.0, 0.0], [2.0, 0.0]], dtype=np.float32) + polyline = PolylineSE2.from_array(array) + self.assertIsInstance(polyline, PolylineSE2) + self.assertEqual(polyline.se2_array.shape, (3, 3)) + + def test_from_array_se2(self): + """Test creating PolylineSE2 from SE2 array.""" + array = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float32) + polyline = PolylineSE2.from_array(array) + self.assertIsInstance(polyline, PolylineSE2) + np.testing.assert_array_almost_equal(polyline.se2_array, array) + + def test_from_array_invalid_shape(self): + """Test creating PolylineSE2 from invalid array shape.""" + array = np.array([[0.0], [1.0], [2.0]], dtype=np.float32) + with self.assertRaises(ValueError): + PolylineSE2.from_array(array) + + def test_from_discrete_se2(self): + """Test creating PolylineSE2 from discrete SE2 states.""" + states = [StateSE2(0.0, 0.0, 0.0), StateSE2(1.0, 0.0, 0.0), StateSE2(2.0, 0.0, 0.0)] + polyline = PolylineSE2.from_discrete_se2(states) + self.assertIsInstance(polyline, PolylineSE2) + self.assertEqual(polyline.se2_array.shape, (3, 3)) + + def test_length_property(self): + """Test length property.""" + array = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64) + polyline = PolylineSE2.from_array(array) + self.assertEqual(polyline.length, 2.0) + + def test_interpolate_single_distance(self): + """Test interpolation with single distance.""" + array = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64) + polyline = PolylineSE2.from_array(array) + state = polyline.interpolate(1.0) + self.assertIsInstance(state, StateSE2) + self.assertEqual(state.x, 1.0) + self.assertEqual(state.y, 0.0) + + def test_interpolate_multiple_distances(self): + """Test interpolation with multiple distances.""" + array = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64) + polyline = PolylineSE2.from_array(array) + states = polyline.interpolate(np.array([0.0, 1.0, 2.0])) + self.assertIsInstance(states, np.ndarray) + self.assertEqual(states.shape, (3, 3)) + + def test_interpolate_normalized(self): + """Test normalized interpolation.""" + array = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64) + polyline = PolylineSE2.from_array(array) + state = polyline.interpolate(0.5, normalized=True) + self.assertIsInstance(state, StateSE2) + self.assertEqual(state.x, 1.0) + self.assertEqual(state.y, 0.0) + + def test_project_point2d(self): + """Test projecting Point2D onto SE2 polyline.""" + array = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64) + polyline = PolylineSE2.from_array(array) + point = Point2D(1.0, 1.0) + distance = polyline.project(point) + self.assertEqual(distance, 1.0) + + def test_project_statese2(self): + """Test projecting StateSE2 onto SE2 polyline.""" + array = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64) + polyline = PolylineSE2.from_array(array) + state = StateSE2(1.0, 1.0, 0.0) + distance = polyline.project(state) + self.assertEqual(distance, 1.0) + + +class TestPolyline3D(unittest.TestCase): + """Test class for Polyline3D.""" + + def test_from_linestring_with_z(self): + """Test creating Polyline3D from LineString with Z coordinates.""" + coords = [(0.0, 0.0, 1.0), (1.0, 1.0, 2.0), (2.0, 0.0, 3.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + self.assertIsInstance(polyline, Polyline3D) + self.assertTrue(polyline.linestring.has_z) + + def test_from_linestring_without_z(self): + """Test creating Polyline3D from LineString without Z coordinates.""" + coords = [(0.0, 0.0), (1.0, 1.0), (2.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + self.assertIsInstance(polyline, Polyline3D) + self.assertTrue(polyline.linestring.has_z) + + def test_from_array(self): + """Test creating Polyline3D from 3D array.""" + array = np.array([[0.0, 0.0, 1.0], [1.0, 1.0, 2.0], [2.0, 0.0, 3.0]], dtype=np.float64) + polyline = Polyline3D.from_array(array) + self.assertIsInstance(polyline, Polyline3D) + np.testing.assert_array_almost_equal(polyline.array, array) + + def test_from_array_invalid_shape(self): + """Test creating Polyline3D from invalid array shape.""" + array = np.array([[0.0, 0.0], [1.0, 1.0]], dtype=np.float64) + with self.assertRaises(AssertionError): + Polyline3D.from_array(array) + + def test_array_property(self): + """Test array property.""" + coords = [(0.0, 0.0, 1.0), (1.0, 1.0, 2.0), (2.0, 0.0, 3.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + array = polyline.array + self.assertEqual(array.shape, (3, 3)) + self.assertEqual(array.dtype, np.float64) + np.testing.assert_array_almost_equal(array, coords) + + def test_polyline_2d_property(self): + """Test polyline_2d property.""" + coords = [(0.0, 0.0, 1.0), (1.0, 1.0, 2.0), (2.0, 0.0, 3.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + polyline_2d = polyline.polyline_2d + self.assertIsInstance(polyline_2d, Polyline2D) + self.assertFalse(polyline_2d.linestring.has_z) + + def test_polyline_se2_property(self): + """Test polyline_se2 property.""" + coords = [(0.0, 0.0, 1.0), (1.0, 0.0, 2.0), (2.0, 0.0, 3.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + polyline_se2 = polyline.polyline_se2 + self.assertIsInstance(polyline_se2, PolylineSE2) + + def test_length_property(self): + """Test length property.""" + coords = [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (2.0, 0.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + self.assertEqual(polyline.length, 2.0) + + def test_interpolate_single_distance(self): + """Test interpolation with single distance.""" + coords = [(0.0, 0.0, 0.0), (2.0, 0.0, 2.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + point = polyline.interpolate(1.0) + self.assertIsInstance(point, Point3D) + self.assertEqual(point.x, 1.0) + self.assertEqual(point.y, 0.0) + self.assertEqual(point.z, 1.0) + + def test_interpolate_multiple_distances(self): + """Test interpolation with multiple distances.""" + coords = [(0.0, 0.0, 0.0), (2.0, 0.0, 2.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + points = polyline.interpolate(np.array([0.0, 1.0, 2.0])) + self.assertIsInstance(points, np.ndarray) + self.assertEqual(points.shape, (3, 3)) + + def test_interpolate_normalized(self): + """Test normalized interpolation.""" + coords = [(0.0, 0.0, 0.0), (2.0, 0.0, 2.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + point = polyline.interpolate(0.5, normalized=True) + self.assertIsInstance(point, Point3D) + self.assertEqual(point.x, 1.0) + self.assertEqual(point.y, 0.0) + self.assertEqual(point.z, 1.0) + + def test_project_point2d(self): + """Test projecting Point2D onto 3D polyline.""" + coords = [(0.0, 0.0, 0.0), (2.0, 0.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + point = Point2D(1.0, 1.0) + distance = polyline.project(point) + self.assertEqual(distance, 1.0) + + def test_project_point3d(self): + """Test projecting Point3D onto 3D polyline.""" + coords = [(0.0, 0.0, 0.0), (2.0, 0.0, 0.0)] + linestring = geom.LineString(coords) + polyline = Polyline3D.from_linestring(linestring) + point = Point3D(1.0, 1.0, 1.0) + distance = polyline.project(point) + self.assertEqual(distance, 1.0) diff --git a/d123/geometry/test/test_rotation.py b/d123/geometry/test/test_rotation.py new file mode 100644 index 00000000..65f902d9 --- /dev/null +++ b/d123/geometry/test/test_rotation.py @@ -0,0 +1,240 @@ +import unittest + +import numpy as np + +from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex +from d123.geometry.rotation import EulerAngles, Quaternion + + +class TestEulerAngles(unittest.TestCase): + """Unit tests for EulerAngles class.""" + + def setUp(self): + """Set up test fixtures.""" + self.roll = 0.1 + self.pitch = 0.2 + self.yaw = 0.3 + self.euler_angles = EulerAngles(self.roll, self.pitch, self.yaw) + self.test_array = np.zeros([3], dtype=np.float64) + self.test_array[EulerAnglesIndex.ROLL] = self.roll + self.test_array[EulerAnglesIndex.PITCH] = self.pitch + self.test_array[EulerAnglesIndex.YAW] = self.yaw + + def test_init(self): + """Test EulerAngles initialization.""" + euler = EulerAngles(roll=0.1, pitch=0.2, yaw=0.3) + self.assertEqual(euler.roll, 0.1) + self.assertEqual(euler.pitch, 0.2) + self.assertEqual(euler.yaw, 0.3) + + def test_from_array_valid(self): + """Test from_array class method with valid input.""" + euler = EulerAngles.from_array(self.test_array) + self.assertIsInstance(euler, EulerAngles) + self.assertAlmostEqual(euler.roll, self.roll) + self.assertAlmostEqual(euler.pitch, self.pitch) + self.assertAlmostEqual(euler.yaw, self.yaw) + + def test_from_array_invalid_shape(self): + """Test from_array with invalid array shape.""" + with self.assertRaises(AssertionError): + EulerAngles.from_array(np.array([1, 2])) + with self.assertRaises(AssertionError): + EulerAngles.from_array(np.array([[1, 2, 3]])) + + def test_from_array_copy(self): + """Test from_array with copy parameter.""" + original_array = self.test_array.copy() + euler_copy = EulerAngles.from_array(original_array, copy=True) + euler_no_copy = EulerAngles.from_array(original_array, copy=False) + + original_array[0] = 999.0 + self.assertNotEqual(euler_copy.roll, 999.0) + self.assertEqual(euler_no_copy.roll, 999.0) + + def test_from_rotation_matrix(self): + """Test from_rotation_matrix class method.""" + identity_matrix = np.eye(3) + euler = EulerAngles.from_rotation_matrix(identity_matrix) + self.assertAlmostEqual(euler.roll, 0.0, places=10) + self.assertAlmostEqual(euler.pitch, 0.0, places=10) + self.assertAlmostEqual(euler.yaw, 0.0, places=10) + + def test_from_rotation_matrix_invalid(self): + """Test from_rotation_matrix with invalid input.""" + with self.assertRaises(AssertionError): + EulerAngles.from_rotation_matrix(np.array([[1, 2]])) + with self.assertRaises(AssertionError): + EulerAngles.from_rotation_matrix(np.array([1, 2, 3])) + + def test_array_property(self): + """Test array property.""" + array = self.euler_angles.array + self.assertEqual(array.shape, (3,)) + self.assertEqual(array[EulerAnglesIndex.ROLL], self.roll) + self.assertEqual(array[EulerAnglesIndex.PITCH], self.pitch) + self.assertEqual(array[EulerAnglesIndex.YAW], self.yaw) + + def test_iterator(self): + """Test iterator functionality.""" + values = list(self.euler_angles) + self.assertEqual(values, [self.roll, self.pitch, self.yaw]) + + def test_hash(self): + """Test hash functionality.""" + euler1 = EulerAngles(0.1, 0.2, 0.3) + euler2 = EulerAngles(0.1, 0.2, 0.3) + euler3 = EulerAngles(0.1, 0.2, 0.4) + + self.assertEqual(hash(euler1), hash(euler2)) + self.assertNotEqual(hash(euler1), hash(euler3)) + + +class TestQuaternion(unittest.TestCase): + """Unit tests for Quaternion class.""" + + def setUp(self): + """Set up test fixtures.""" + self.qw = 1.0 + self.qx = 0.0 + self.qy = 0.0 + self.qz = 0.0 + self.quaternion = Quaternion(self.qw, self.qx, self.qy, self.qz) + self.test_array = np.zeros([4], dtype=np.float64) + self.test_array[QuaternionIndex.QW] = self.qw + self.test_array[QuaternionIndex.QX] = self.qx + self.test_array[QuaternionIndex.QY] = self.qy + self.test_array[QuaternionIndex.QZ] = self.qz + + def test_init(self): + """Test Quaternion initialization.""" + quat = Quaternion(1.0, 0.0, 0.0, 0.0) + self.assertEqual(quat.qw, 1.0) + self.assertEqual(quat.qx, 0.0) + self.assertEqual(quat.qy, 0.0) + self.assertEqual(quat.qz, 0.0) + + def test_from_array_valid(self): + """Test from_array class method with valid input.""" + quat = Quaternion.from_array(self.test_array) + self.assertAlmostEqual(quat.qw, self.qw) + self.assertAlmostEqual(quat.qx, self.qx) + self.assertAlmostEqual(quat.qy, self.qy) + self.assertAlmostEqual(quat.qz, self.qz) + + def test_from_array_invalid_shape(self): + """Test from_array with invalid array shape.""" + with self.assertRaises(AssertionError): + Quaternion.from_array(np.array([1, 2, 3])) + with self.assertRaises(AssertionError): + Quaternion.from_array(np.array([[1, 2, 3, 4]])) + + def test_from_array_copy(self): + """Test from_array with copy parameter.""" + original_array = self.test_array.copy() + quat_copy = Quaternion.from_array(original_array, copy=True) + quat_no_copy = Quaternion.from_array(original_array, copy=False) + + original_array[0] = 999.0 + self.assertNotEqual(quat_copy.qw, 999.0) + self.assertEqual(quat_no_copy.qw, 999.0) + + def test_from_rotation_matrix(self): + """Test from_rotation_matrix class method.""" + identity_matrix = np.eye(3) + quat = Quaternion.from_rotation_matrix(identity_matrix) + self.assertAlmostEqual(quat.qw, 1.0, places=10) + self.assertAlmostEqual(quat.qx, 0.0, places=10) + self.assertAlmostEqual(quat.qy, 0.0, places=10) + self.assertAlmostEqual(quat.qz, 0.0, places=10) + + def test_from_rotation_matrix_invalid(self): + """Test from_rotation_matrix with invalid input.""" + with self.assertRaises(AssertionError): + Quaternion.from_rotation_matrix(np.array([[1, 2]])) + with self.assertRaises(AssertionError): + Quaternion.from_rotation_matrix(np.array([1, 2, 3])) + + def test_from_euler_angles(self): + """Test from_euler_angles class method.""" + euler = EulerAngles(0.0, 0.0, 0.0) + quat = Quaternion.from_euler_angles(euler) + self.assertAlmostEqual(quat.qw, 1.0, places=10) + self.assertAlmostEqual(quat.qx, 0.0, places=10) + self.assertAlmostEqual(quat.qy, 0.0, places=10) + self.assertAlmostEqual(quat.qz, 0.0, places=10) + + def test_array_property(self): + """Test array property.""" + array = self.quaternion.array + self.assertEqual(array.shape, (4,)) + np.testing.assert_array_equal(array, self.test_array) + + def test_pyquaternion_property(self): + """Test pyquaternion property.""" + pyquat = self.quaternion.pyquaternion + self.assertEqual(pyquat.w, self.qw) + self.assertEqual(pyquat.x, self.qx) + self.assertEqual(pyquat.y, self.qy) + self.assertEqual(pyquat.z, self.qz) + + def test_euler_angles_property(self): + """Test euler_angles property.""" + euler = self.quaternion.euler_angles + self.assertIsInstance(euler, EulerAngles) + self.assertAlmostEqual(euler.roll, 0.0, places=10) + self.assertAlmostEqual(euler.pitch, 0.0, places=10) + self.assertAlmostEqual(euler.yaw, 0.0, places=10) + + def test_rotation_matrix_property(self): + """Test rotation_matrix property.""" + rot_matrix = self.quaternion.rotation_matrix + self.assertEqual(rot_matrix.shape, (3, 3)) + np.testing.assert_array_almost_equal(rot_matrix, np.eye(3)) + + def test_iterator(self): + """Test iterator functionality.""" + values = list(self.quaternion) + self.assertEqual(values, [self.qw, self.qx, self.qy, self.qz]) + + def test_hash(self): + """Test hash functionality.""" + quat1 = Quaternion(1.0, 0.0, 0.0, 0.0) + quat2 = Quaternion(1.0, 0.0, 0.0, 0.0) + quat3 = Quaternion(0.0, 1.0, 0.0, 0.0) + + self.assertEqual(hash(quat1), hash(quat2)) + self.assertNotEqual(hash(quat1), hash(quat3)) + + +class TestRotationConversions(unittest.TestCase): + """Test conversions between EulerAngles and Quaternion.""" + + def test_euler_to_quaternion_to_euler(self): + """Test round-trip conversion from Euler to Quaternion and back.""" + original_euler = EulerAngles(0.1, 0.2, 0.3) + quaternion = Quaternion.from_euler_angles(original_euler) + converted_euler = quaternion.euler_angles + + self.assertAlmostEqual(original_euler.roll, converted_euler.roll, places=10) + self.assertAlmostEqual(original_euler.pitch, converted_euler.pitch, places=10) + self.assertAlmostEqual(original_euler.yaw, converted_euler.yaw, places=10) + + def test_rotation_matrix_consistency(self): + """Test that rotation matrix conversions are consistent.""" + euler = EulerAngles(0.1, 0.2, 0.3) + quat = Quaternion.from_euler_angles(euler) + + euler_from_matrix = EulerAngles.from_rotation_matrix(euler.rotation_matrix) + quat_from_matrix = Quaternion.from_rotation_matrix(quat.rotation_matrix) + self.assertAlmostEqual(euler.roll, euler_from_matrix.roll, places=10) + self.assertAlmostEqual(euler.pitch, euler_from_matrix.pitch, places=10) + self.assertAlmostEqual(euler.yaw, euler_from_matrix.yaw, places=10) + self.assertAlmostEqual(quat.qw, quat_from_matrix.qw, places=10) + self.assertAlmostEqual(quat.qx, quat_from_matrix.qx, places=10) + self.assertAlmostEqual(quat.qy, quat_from_matrix.qy, places=10) + self.assertAlmostEqual(quat.qz, quat_from_matrix.qz, places=10) + + +if __name__ == "__main__": + unittest.main() diff --git a/d123/geometry/test/test_vector.py b/d123/geometry/test/test_vector.py new file mode 100644 index 00000000..526a2104 --- /dev/null +++ b/d123/geometry/test/test_vector.py @@ -0,0 +1,172 @@ +import unittest + +import numpy as np + +from d123.geometry import Vector2D, Vector2DIndex, Vector3D, Vector3DIndex + + +class TestVector2D(unittest.TestCase): + """Unit tests for Vector2D class.""" + + def setUp(self): + """Set up test fixtures.""" + self.x_coord = 3.5 + self.y_coord = 4.2 + self.vector = Vector2D(x=self.x_coord, y=self.y_coord) + self.test_array = np.zeros([2], dtype=np.float64) + self.test_array[Vector2DIndex.X] = self.x_coord + self.test_array[Vector2DIndex.Y] = self.y_coord + + def test_init(self): + """Test Vector2D initialization.""" + vector = Vector2D(1.0, 2.0) + self.assertEqual(vector.x, 1.0) + self.assertEqual(vector.y, 2.0) + + def test_from_array_valid(self): + """Test from_array class method with valid input.""" + vector = Vector2D.from_array(self.test_array) + self.assertEqual(vector.x, self.x_coord) + self.assertEqual(vector.y, self.y_coord) + + def test_from_array_invalid_dimensions(self): + """Test from_array with invalid array dimensions.""" + # 2D array should raise assertion error + array_2d = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64) + with self.assertRaises(AssertionError): + Vector2D.from_array(array_2d) + + # 3D array should raise assertion error + array_3d = np.array([[[1.0]]], dtype=np.float64) + with self.assertRaises(AssertionError): + Vector2D.from_array(array_3d) + + def test_from_array_invalid_shape(self): + """Test from_array with invalid array shape.""" + array_wrong_length = np.array([1.0, 2.0, 3.0], dtype=np.float64) + with self.assertRaises(AssertionError): + Vector2D.from_array(array_wrong_length) + + # Empty array + empty_array = np.array([], dtype=np.float64) + with self.assertRaises(AssertionError): + Vector2D.from_array(empty_array) + + def test_array_property(self): + """Test the array property.""" + expected_array = np.array([self.x_coord, self.y_coord], dtype=np.float64) + np.testing.assert_array_equal(self.vector.array, expected_array) + self.assertEqual(self.vector.array.dtype, np.float64) + self.assertEqual(self.vector.array.shape, (2,)) + + def test_array_like(self): + """Test the __array__ behavior.""" + expected_array = np.array([self.x_coord, self.y_coord], dtype=np.float32) + output_array = np.array(self.vector, dtype=np.float32) + np.testing.assert_array_equal(output_array, expected_array) + self.assertEqual(output_array.dtype, np.float32) + self.assertEqual(output_array.shape, (2,)) + + def test_iter(self): + """Test the __iter__ method.""" + coords = list(self.vector) + self.assertEqual(coords, [self.x_coord, self.y_coord]) + + # Test that it's actually iterable + x, y = self.vector + self.assertEqual(x, self.x_coord) + self.assertEqual(y, self.y_coord) + + def test_hash(self): + """Test the __hash__ method.""" + vector_dict = {self.vector: "test"} + self.assertIn(self.vector, vector_dict) + self.assertEqual(vector_dict[self.vector], "test") + + +class TestVector3D(unittest.TestCase): + """Unit tests for Vector3D class.""" + + def setUp(self): + """Set up test fixtures.""" + self.x_coord = 3.5 + self.y_coord = 4.2 + self.z_coord = 5.1 + self.vector = Vector3D(self.x_coord, self.y_coord, self.z_coord) + self.test_array = np.zeros((3,), dtype=np.float64) + self.test_array[Vector3DIndex.X] = self.x_coord + self.test_array[Vector3DIndex.Y] = self.y_coord + self.test_array[Vector3DIndex.Z] = self.z_coord + + def test_init(self): + """Test Vector3D initialization.""" + vector = Vector3D(1.0, 2.0, 3.0) + self.assertEqual(vector.x, 1.0) + self.assertEqual(vector.y, 2.0) + self.assertEqual(vector.z, 3.0) + + def test_from_array_valid(self): + """Test from_array class method with valid input.""" + vector = Vector3D.from_array(self.test_array) + self.assertEqual(vector.x, self.x_coord) + self.assertEqual(vector.y, self.y_coord) + self.assertEqual(vector.z, self.z_coord) + + def test_from_array_invalid_dimensions(self): + """Test from_array with invalid array dimensions.""" + # 2D array should raise assertion error + array_2d = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float64) + with self.assertRaises(AssertionError): + Vector3D.from_array(array_2d) + + # 3D array should raise assertion error + array_3d = np.array([[[1.0]]], dtype=np.float64) + with self.assertRaises(AssertionError): + Vector3D.from_array(array_3d) + + def test_from_array_invalid_shape(self): + """Test from_array with invalid array shape.""" + array_wrong_length = np.array([1.0, 2.0], dtype=np.float64) + with self.assertRaises(AssertionError): + Vector3D.from_array(array_wrong_length) + + # Empty array + empty_array = np.array([], dtype=np.float64) + with self.assertRaises(AssertionError): + Vector3D.from_array(empty_array) + + def test_array_property(self): + """Test the array property.""" + expected_array = np.array([self.x_coord, self.y_coord, self.z_coord], dtype=np.float64) + np.testing.assert_array_equal(self.vector.array, expected_array) + self.assertEqual(self.vector.array.dtype, np.float64) + self.assertEqual(self.vector.array.shape, (3,)) + + def test_array_like(self): + """Test the __array__ behavior.""" + expected_array = np.array([self.x_coord, self.y_coord, self.z_coord], dtype=np.float32) + output_array = np.array(self.vector, dtype=np.float32) + np.testing.assert_array_equal(output_array, expected_array) + self.assertEqual(output_array.dtype, np.float32) + self.assertEqual(output_array.shape, (3,)) + + def test_iter(self): + """Test the __iter__ method.""" + coords = list(self.vector) + self.assertEqual(coords, [self.x_coord, self.y_coord, self.z_coord]) + + # Test that it's actually iterable + x, y, z = self.vector + self.assertEqual(x, self.x_coord) + self.assertEqual(y, self.y_coord) + self.assertEqual(z, self.z_coord) + + def test_hash(self): + """Test the __hash__ method.""" + vector_dict = {self.vector: "test"} + self.assertIn(self.vector, vector_dict) + self.assertEqual(vector_dict[self.vector], "test") + + +if __name__ == "__main__": + unittest.main() diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py index a9c40077..ff8bb237 100644 --- a/d123/geometry/utils/bounding_box_utils.py +++ b/d123/geometry/utils/bounding_box_utils.py @@ -29,14 +29,30 @@ def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[ half_length = bbse2[..., BoundingBoxSE2Index.LENGTH] / 2.0 half_width = bbse2[..., BoundingBoxSE2Index.WIDTH] / 2.0 - corners_array[..., Corners2DIndex.FRONT_LEFT, :] = translate_along_yaw_array(centers, yaws, half_length, half_width) + corners_array[..., Corners2DIndex.FRONT_LEFT, :] = translate_along_yaw_array( + centers, + yaws, + half_length, + half_width, + ) corners_array[..., Corners2DIndex.FRONT_RIGHT, :] = translate_along_yaw_array( - centers, yaws, half_length, -half_width + centers, + yaws, + half_length, + -half_width, ) corners_array[..., Corners2DIndex.BACK_RIGHT, :] = translate_along_yaw_array( - centers, yaws, -half_length, -half_width + centers, + yaws, + -half_length, + -half_width, + ) + corners_array[..., Corners2DIndex.BACK_LEFT, :] = translate_along_yaw_array( + centers, + yaws, + -half_length, + half_width, ) - corners_array[..., Corners2DIndex.BACK_LEFT, :] = translate_along_yaw_array(centers, yaws, -half_length, half_width) return corners_array.squeeze(axis=0) if ndim_one else corners_array diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py index 4fa2e458..de12fc0f 100644 --- a/d123/geometry/utils/rotation_utils.py +++ b/d123/geometry/utils/rotation_utils.py @@ -1,14 +1,57 @@ +from typing import Union + import numpy as np +import numpy.typing as npt + +from d123.geometry.geometry_index import EulerAnglesIndex # TODO: move this somewhere else # TODO: Maybe rename wrap angle? # TODO: Add implementation for torch, jax, or whatever else is needed. -def normalize_angle(angle): +def normalize_angle(angle: Union[float, npt.NDArray[np.float64]]) -> Union[float, npt.NDArray[np.float64]]: """ Map a angle in range [-π, π] - :param angle: any angle as float - :return: normalized angle + :param angle: any angle as float or array of floats + :return: normalized angle or array of normalized angles """ - return np.arctan2(np.sin(angle), np.cos(angle)) + return ((angle + np.pi) % (2 * np.pi)) - np.pi + + +def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert euler_angles_array.ndim == 2 and euler_angles_array.shape[1] == len(EulerAnglesIndex) + + # Extract roll, pitch, yaw for all samples at once + roll = euler_angles_array[:, EulerAnglesIndex.ROLL] + pitch = euler_angles_array[:, EulerAnglesIndex.PITCH] + yaw = euler_angles_array[:, EulerAnglesIndex.YAW] + + # Compute sin/cos for all angles at once + cos_roll, sin_roll = np.cos(roll), np.sin(roll) + cos_pitch, sin_pitch = np.cos(pitch), np.sin(pitch) + cos_yaw, sin_yaw = np.cos(yaw), np.sin(yaw) + + # Build rotation matrices for entire batch + batch_size = euler_angles_array.shape[0] + rotation_matrices = np.zeros((batch_size, 3, 3), dtype=np.float64) + + # R_x @ R_y @ R_z components + rotation_matrices[:, 0, 0] = cos_pitch * cos_yaw + rotation_matrices[:, 0, 1] = -cos_pitch * sin_yaw + rotation_matrices[:, 0, 2] = sin_pitch + + rotation_matrices[:, 1, 0] = sin_roll * sin_pitch * cos_yaw + cos_roll * sin_yaw + rotation_matrices[:, 1, 1] = -sin_roll * sin_pitch * sin_yaw + cos_roll * cos_yaw + rotation_matrices[:, 1, 2] = -sin_roll * cos_pitch + + rotation_matrices[:, 2, 0] = -cos_roll * sin_pitch * cos_yaw + sin_roll * sin_yaw + rotation_matrices[:, 2, 1] = cos_roll * sin_pitch * sin_yaw + sin_roll * cos_yaw + rotation_matrices[:, 2, 2] = cos_roll * cos_pitch + + return rotation_matrices + + +def get_rotation_matrix_from_euler_array(euler_angles: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert euler_angles.ndim == 1 and euler_angles.shape[0] == len(EulerAnglesIndex) + return get_rotation_matrices_from_euler_array(euler_angles[None, :])[0] diff --git a/d123/geometry/vector.py b/d123/geometry/vector.py index 3ab2ef51..77a3f67f 100644 --- a/d123/geometry/vector.py +++ b/d123/geometry/vector.py @@ -6,11 +6,11 @@ import numpy as np import numpy.typing as npt +from d123.common.utils.mixin import ArrayMixin from d123.geometry.geometry_index import Vector2DIndex, Vector3DIndex -@dataclass -class Vector2D: +class Vector2D(ArrayMixin): """ Class to represents 2D vectors, in x, y direction. @@ -26,21 +26,45 @@ class Vector2D: 5.0 """ - x: float # [m] x-component of the vector - y: float # [m] y-component of the vector - __slots__ = "x", "y" + _array: npt.NDArray[np.float64] + + def __init__(self, x: float, y: float): + """Initialize Vector2D with x, y components.""" + array = np.zeros(len(Vector2DIndex), dtype=np.float64) + array[Vector2DIndex.X] = x + array[Vector2DIndex.Y] = y + object.__setattr__(self, "_array", array) @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> Vector2D: + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Vector2D: """Constructs a Vector2D from a numpy array. :param array: Array of shape (2,) representing the vector components [x, y], indexed by \ :class:`~d123.geometry.Vector2DIndex`. + :param copy: Whether to copy the input array. Defaults to True. :return: A Vector2D instance. """ assert array.ndim == 1 assert array.shape[0] == len(Vector2DIndex) - return Vector2D(array[Vector2DIndex.X], array[Vector2DIndex.Y]) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance + + @property + def x(self) -> float: + """The x component of the vector. + + :return: The x component of the vector. + """ + return self._array[Vector2DIndex.X] + + @property + def y(self) -> float: + """The y component of the vector. + + :return: The y component of the vector. + """ + return self._array[Vector2DIndex.Y] @property def array(self) -> npt.NDArray[np.float64]: @@ -112,7 +136,7 @@ def __hash__(self) -> int: @dataclass -class Vector3D: +class Vector3D(ArrayMixin): """ Class to represents 3D vectors, in x, y, z direction. @@ -128,21 +152,53 @@ class Vector3D: 3.7416573867739413 """ - x: float # [m] x-component of the vector - y: float # [m] y-component of the vector - z: float # [m] z-component of the vector - __slots__ = "x", "y", "z" + _array: npt.NDArray[np.float64] + + def __init__(self, x: float, y: float, z: float): + """Initialize Vector3D with x, y, z components.""" + array = np.zeros(len(Vector3DIndex), dtype=np.float64) + array[Vector3DIndex.X] = x + array[Vector3DIndex.Y] = y + array[Vector3DIndex.Z] = z + object.__setattr__(self, "_array", array) @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> Vector3D: + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Vector3D: """Constructs a Vector3D from a numpy array. :param array: Array of shape (3,), indexed by :class:`~d123.geometry.geometry_index.Vector3DIndex`. + :param copy: Whether to copy the input array. Defaults to True. :return: A Vector3D instance. """ assert array.ndim == 1 assert array.shape[0] == len(Vector3DIndex) - return Vector3D(array[Vector3DIndex.X], array[Vector3DIndex.Y], array[Vector3DIndex.Z]) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance + + @property + def x(self) -> float: + """The x component of the vector. + + :return: The x component of the vector. + """ + return self._array[Vector3DIndex.X] + + @property + def y(self) -> float: + """The y component of the vector. + + :return: The y component of the vector. + """ + return self._array[Vector3DIndex.Y] + + @property + def z(self) -> float: + """The z component of the vector. + + :return: The z component of the vector. + """ + return self._array[Vector3DIndex.Z] @property def array(self) -> npt.NDArray[np.float64]: @@ -152,11 +208,7 @@ def array(self) -> npt.NDArray[np.float64]: :return: A numpy array representing the vector components [x, y, z], indexed by \ :class:`~d123.geometry.geometry_index.Vector3DIndex`. """ - array = np.zeros(len(Vector3DIndex), dtype=np.float64) - array[Vector3DIndex.X] = self.x - array[Vector3DIndex.Y] = self.y - array[Vector3DIndex.Z] = self.z - return array + return self._array @property def magnitude(self) -> float: From 7e5d0da7300700ba16b00602db28a450ba98c90c Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Mon, 25 Aug 2025 17:43:34 +0200 Subject: [PATCH 017/145] Make bounding box array-like. Add tests for bounding boxes and Occupancy Map (#44) --- .../waymo_map_utils/womp_boundary_utils.py | 4 +- d123/geometry/bounding_box.py | 179 +++++++---- d123/geometry/occupancy_map.py | 2 +- d123/geometry/test/test_bounding_box.py | 215 +++++++++++++ d123/geometry/test/test_occupancy_map.py | 285 ++++++++++++++++++ notebooks/viz/bev_matplotlib.ipynb | 2 +- notebooks/viz/viser_testing_v2_scene.ipynb | 6 +- 7 files changed, 631 insertions(+), 62 deletions(-) create mode 100644 d123/geometry/test/test_bounding_box.py create mode 100644 d123/geometry/test/test_occupancy_map.py diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py index 5831ea2a..269b09b6 100644 --- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py +++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py @@ -67,7 +67,7 @@ def _collect_perpendicular_hits( perp_end_point = translate_along_yaw(lane_query_se2, Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0)) perp_linestring = geom.LineString([[perp_start_point.x, perp_start_point.y], [perp_end_point.x, perp_end_point.y]]) - lane_linestring = occupancy_2d.geometries[occupancy_2d.token_to_idx[lane_token]] + lane_linestring = occupancy_2d.geometries[occupancy_2d.id_to_idx[lane_token]] # 1. find intersecting lines, compute 3D distance intersecting_tokens = occupancy_2d.intersects(perp_linestring) @@ -75,7 +75,7 @@ def _collect_perpendicular_hits( perpendicular_hits: List[PerpendicularHit] = [] for intersecting_token in intersecting_tokens: intersecting_polyline_3d = get_polyline_from_token(polyline_dict, intersecting_token) - intersecting_linestring = occupancy_2d.geometries[occupancy_2d.token_to_idx[intersecting_token]] + intersecting_linestring = occupancy_2d.geometries[occupancy_2d.id_to_idx[intersecting_token]] centerline_hit_crossing: bool = ( lane_linestring.intersects(intersecting_linestring) if intersecting_token.startswith("lane_") else False ) diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py index 3ea196e3..dd211ca0 100644 --- a/d123/geometry/bounding_box.py +++ b/d123/geometry/bounding_box.py @@ -1,7 +1,6 @@ from __future__ import annotations from ast import Dict -from dataclasses import dataclass from functools import cached_property from typing import Union @@ -16,7 +15,6 @@ from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array, bbse3_array_to_corners_array -@dataclass class BoundingBoxSE2(ArrayMixin): """ Rotated bounding box in 2D defined by center (StateSE2), length and width. @@ -32,23 +30,67 @@ class BoundingBoxSE2(ArrayMixin): 8.0 """ - center: StateSE2 - length: float - width: float + _array: npt.NDArray[np.float64] + + def __init__(self, center: StateSE2, length: float, width: float): + """Initialize BoundingBoxSE2 with center (StateSE2), length and width. + + :param center: Center of the bounding box as a StateSE2 instance. + :param length: Length of the bounding box along the x-axis in the local frame. + :param width: Width of the bounding box along the y-axis in the local frame. + """ + array = np.zeros(len(BoundingBoxSE2Index), dtype=np.float64) + array[BoundingBoxSE2Index.SE2] = center.array + array[BoundingBoxSE2Index.LENGTH] = length + array[BoundingBoxSE2Index.WIDTH] = width + object.__setattr__(self, "_array", array) @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> BoundingBoxSE2: - """Create a BoundingBoxSE2 from a numpy array, index by :class:`~d123.geometry.BoundingBoxSE2Index`. + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> BoundingBoxSE2: + """Create a BoundingBoxSE2 from a numpy array. - :param array: A 1D numpy array containing the bounding box parameters. + :param array: A 1D numpy array containing the bounding box parameters, indexed by \ + :class:`~d123.geometry.BoundingBoxSE2Index`. + :param copy: Whether to copy the input array. Defaults to True. :return: A BoundingBoxSE2 instance. """ - assert array.ndim == 1 and array.shape[-1] == len(BoundingBoxSE2Index) - return BoundingBoxSE2( - center=StateSE2.from_array(array[BoundingBoxSE2Index.SE2]), - length=array[BoundingBoxSE2Index.LENGTH], - width=array[BoundingBoxSE2Index.WIDTH], - ) + assert array.ndim == 1 + assert array.shape[-1] == len(BoundingBoxSE2Index) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance + + @property + def center(self) -> StateSE2: + """The center of the bounding box as a StateSE2 instance. + + :return: The center of the bounding box as a StateSE2 instance. + """ + return StateSE2.from_array(self._array[BoundingBoxSE2Index.SE2]) + + @property + def center_se2(self) -> StateSE2: + """The center of the bounding box as a StateSE2 instance. + + :return: The center of the bounding box as a StateSE2 instance. + """ + return self.center + + @property + def length(self) -> float: + """The length of the bounding box along the x-axis in the local frame. + + :return: The length of the bounding box. + """ + return self._array[BoundingBoxSE2Index.LENGTH] + + @property + def width(self) -> float: + """The width of the bounding box along the y-axis in the local frame. + + :return: The width of the bounding box. + """ + return self._array[BoundingBoxSE2Index.WIDTH] @cached_property def array(self) -> npt.NDArray[np.float64]: @@ -56,13 +98,7 @@ def array(self) -> npt.NDArray[np.float64]: :return: A numpy array of shape (5,) containing the bounding box parameters [x, y, yaw, length, width]. """ - array = np.zeros(len(BoundingBoxSE2Index), dtype=np.float64) - array[BoundingBoxSE2Index.X] = self.center.x - array[BoundingBoxSE2Index.Y] = self.center.y - array[BoundingBoxSE2Index.YAW] = self.center.yaw - array[BoundingBoxSE2Index.LENGTH] = self.length - array[BoundingBoxSE2Index.WIDTH] = self.width - return array + return self._array @cached_property def shapely_polygon(self) -> geom.Polygon: @@ -99,7 +135,6 @@ def corners_dict(self) -> Dict[Corners2DIndex, Point2D]: return {index: Point2D.from_array(corners_array[index]) for index in Corners2DIndex} -@dataclass class BoundingBoxSE3(ArrayMixin): """ Rotated bounding box in 3D defined by center (StateSE3), length, width and height. @@ -115,45 +150,86 @@ class BoundingBoxSE3(ArrayMixin): 8.0 """ - center: StateSE3 - length: float - width: float - height: float + _array: npt.NDArray[np.float64] + + def __init__(self, center: StateSE3, length: float, width: float, height: float): + """Initialize BoundingBoxSE3 with center (StateSE3), length, width and height. + + :param center: Center of the bounding box as a StateSE3 instance. + :param length: Length of the bounding box along the x-axis in the local frame. + :param width: Width of the bounding box along the y-axis in the local frame. + :param height: Height of the bounding box along the z-axis in the local frame. + """ + array = np.zeros(len(BoundingBoxSE3Index), dtype=np.float64) + array[BoundingBoxSE3Index.STATE_SE3] = center.array + array[BoundingBoxSE3Index.LENGTH] = length + array[BoundingBoxSE3Index.WIDTH] = width + array[BoundingBoxSE3Index.HEIGHT] = height + object.__setattr__(self, "_array", array) @classmethod - def from_array(cls, array: npt.NDArray[np.float64]) -> BoundingBoxSE3: + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> BoundingBoxSE3: """Create a BoundingBoxSE3 from a numpy array. :param array: A 1D numpy array containing the bounding box parameters, indexed by \ :class:`~d123.geometry.BoundingBoxSE3Index`. + :param copy: Whether to copy the input array. Defaults to True. :return: A BoundingBoxSE3 instance. """ - assert array.ndim == 1 and array.shape[-1] == len(BoundingBoxSE3Index) - return BoundingBoxSE3( - center=StateSE3.from_array(array[BoundingBoxSE3Index.STATE_SE3]), - length=array[BoundingBoxSE3Index.LENGTH], - width=array[BoundingBoxSE3Index.WIDTH], - height=array[BoundingBoxSE3Index.HEIGHT], - ) + assert array.ndim == 1 + assert array.shape[-1] == len(BoundingBoxSE3Index) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance - @cached_property + @property + def center(self) -> StateSE3: + """The center of the bounding box as a StateSE3 instance. + + :return: The center of the bounding box as a StateSE3 instance. + """ + return StateSE3.from_array(self._array[BoundingBoxSE3Index.STATE_SE3]) + + @property + def center_se3(self) -> StateSE3: + """The center of the bounding box as a StateSE3 instance. + + :return: The center of the bounding box as a StateSE3 instance. + """ + return self.center + + @property + def length(self) -> float: + """The length of the bounding box along the x-axis in the local frame. + + :return: The length of the bounding box. + """ + return self._array[BoundingBoxSE3Index.LENGTH] + + @property + def width(self) -> float: + """The width of the bounding box along the y-axis in the local frame. + + :return: The width of the bounding box. + """ + return self._array[BoundingBoxSE3Index.WIDTH] + + @property + def height(self) -> float: + """The height of the bounding box along the z-axis in the local frame. + + :return: The height of the bounding box. + """ + return self._array[BoundingBoxSE3Index.HEIGHT] + + @property def array(self) -> npt.NDArray[np.float64]: """Convert the BoundingBoxSE3 instance to a numpy array. :return: A 1D numpy array containing the bounding box parameters, indexed by \ :class:`~d123.geometry.BoundingBoxSE3Index`. """ - array = np.zeros(len(BoundingBoxSE3Index), dtype=np.float64) - array[BoundingBoxSE3Index.X] = self.center.x - array[BoundingBoxSE3Index.Y] = self.center.y - array[BoundingBoxSE3Index.Z] = self.center.z - array[BoundingBoxSE3Index.ROLL] = self.center.roll - array[BoundingBoxSE3Index.PITCH] = self.center.pitch - array[BoundingBoxSE3Index.YAW] = self.center.yaw - array[BoundingBoxSE3Index.LENGTH] = self.length - array[BoundingBoxSE3Index.WIDTH] = self.width - array[BoundingBoxSE3Index.HEIGHT] = self.height - return array + return self._array @property def bounding_box_se2(self) -> BoundingBoxSE2: @@ -161,20 +237,13 @@ def bounding_box_se2(self) -> BoundingBoxSE2: :return: A BoundingBoxSE2 instance. """ + center_se3 = self.center_se3 return BoundingBoxSE2( - center=StateSE2(self.center.x, self.center.y, self.center.yaw), + center=StateSE2(center_se3.x, center_se3.y, center_se3.yaw), length=self.length, width=self.width, ) - @property - def center_se3(self) -> StateSE3: - """Returns the center of the bounding box as a StateSE3 instance. - - :return: The center of the bounding box as a StateSE3 instance. - """ - return self.center - @property def shapely_polygon(self) -> geom.Polygon: """Return a Shapely polygon representation of the 2D projection of the bounding box. diff --git a/d123/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py index d8df5a1d..c9886a14 100644 --- a/d123/geometry/occupancy_map.py +++ b/d123/geometry/occupancy_map.py @@ -79,7 +79,7 @@ def geometries(self) -> Sequence[BaseGeometry]: return self._geometries @property - def token_to_idx(self) -> Dict[Union[int, str], int]: + def id_to_idx(self) -> Dict[Union[int, str], int]: """Mapping from geometry IDs to indices in the occupancy map. :return: dictionary of IDs and indices diff --git a/d123/geometry/test/test_bounding_box.py b/d123/geometry/test/test_bounding_box.py new file mode 100644 index 00000000..545c511c --- /dev/null +++ b/d123/geometry/test/test_bounding_box.py @@ -0,0 +1,215 @@ +import unittest + +import numpy as np +import shapely.geometry as geom + +from d123.common.utils.mixin import ArrayMixin +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, StateSE3 +from d123.geometry.geometry_index import ( + BoundingBoxSE2Index, + BoundingBoxSE3Index, + Corners2DIndex, + Corners3DIndex, + Point2DIndex, +) + + +class TestBoundingBoxSE2(unittest.TestCase): + """Unit tests for BoundingBoxSE2 class.""" + + def setUp(self): + """Set up test fixtures.""" + self.center = StateSE2(1.0, 2.0, 0.5) + self.length = 4.0 + self.width = 2.0 + self.bbox = BoundingBoxSE2(self.center, self.length, self.width) + + def test_init(self): + """Test BoundingBoxSE2 initialization.""" + bbox = BoundingBoxSE2(self.center, self.length, self.width) + self.assertEqual(bbox.length, self.length) + self.assertEqual(bbox.width, self.width) + np.testing.assert_array_equal(bbox.center.array, self.center.array) + + def test_from_array(self): + """Test BoundingBoxSE2.from_array method.""" + array = np.array([1.0, 2.0, 0.5, 4.0, 2.0]) + bbox = BoundingBoxSE2.from_array(array) + np.testing.assert_array_equal(bbox.array, array) + + def test_from_array_copy(self): + """Test BoundingBoxSE2.from_array with copy parameter.""" + array = np.array([1.0, 2.0, 0.5, 4.0, 2.0]) + bbox_copy = BoundingBoxSE2.from_array(array, copy=True) + bbox_no_copy = BoundingBoxSE2.from_array(array, copy=False) + + array[0] = 999.0 + self.assertNotEqual(bbox_copy.array[0], 999.0) + self.assertEqual(bbox_no_copy.array[0], 999.0) + + def test_properties(self): + """Test BoundingBoxSE2 properties.""" + self.assertEqual(self.bbox.length, self.length) + self.assertEqual(self.bbox.width, self.width) + np.testing.assert_array_equal(self.bbox.center.array, self.center.array) + np.testing.assert_array_equal(self.bbox.center_se2.array, self.center.array) + + def test_array_property(self): + """Test array property.""" + expected = np.array([1.0, 2.0, 0.5, 4.0, 2.0]) + np.testing.assert_array_equal(self.bbox.array, expected) + + def test_array_mixin(self): + """Test that BoundingBoxSE2 is an instance of ArrayMixin.""" + self.assertIsInstance(self.bbox, ArrayMixin) + + expected = np.array([1.0, 2.0, 0.5, 4.0, 2.0], dtype=np.float16) + output_array = np.array(self.bbox, dtype=np.float16) + np.testing.assert_array_equal(output_array, expected) + self.assertEqual(output_array.dtype, np.float16) + self.assertEqual(output_array.shape, (len(BoundingBoxSE2Index),)) + + def test_bounding_box_se2_property(self): + """Test bounding_box_se2 property returns self.""" + self.assertIs(self.bbox.bounding_box_se2, self.bbox) + + def test_corners_array(self): + """Test corners_array property.""" + corners = self.bbox.corners_array + self.assertEqual(corners.shape, (len(Corners2DIndex), len(Point2DIndex))) + self.assertIsInstance(corners, np.ndarray) + + def test_corners_dict(self): + """Test corners_dict property.""" + corners_dict = self.bbox.corners_dict + self.assertEqual(len(corners_dict), len(Corners2DIndex)) + for index in Corners2DIndex: + self.assertIn(index, corners_dict) + self.assertIsInstance(corners_dict[index], Point2D) + + def test_shapely_polygon(self): + """Test shapely_polygon property.""" + polygon = self.bbox.shapely_polygon + self.assertIsInstance(polygon, geom.Polygon) + self.assertAlmostEqual(polygon.area, self.length * self.width) + + def test_array_assertions(self): + """Test array assertions in from_array.""" + # Test 2D array + with self.assertRaises(AssertionError): + BoundingBoxSE2.from_array(np.array([[1, 2, 3, 4, 5]])) + + # Test wrong size + with self.assertRaises(AssertionError): + BoundingBoxSE2.from_array(np.array([1, 2, 3, 4])) + + +class TestBoundingBoxSE3(unittest.TestCase): + """Unit tests for BoundingBoxSE3 class.""" + + def setUp(self): + """Set up test fixtures.""" + self.center = StateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3) + self.length = 4.0 + self.width = 2.0 + self.height = 1.5 + self.bbox = BoundingBoxSE3(self.center, self.length, self.width, self.height) + + def test_init(self): + """Test BoundingBoxSE3 initialization.""" + bbox = BoundingBoxSE3(self.center, self.length, self.width, self.height) + self.assertEqual(bbox.length, self.length) + self.assertEqual(bbox.width, self.width) + self.assertEqual(bbox.height, self.height) + np.testing.assert_array_equal(bbox.center.array, self.center.array) + + def test_from_array(self): + """Test BoundingBoxSE3.from_array method.""" + array = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5]) + bbox = BoundingBoxSE3.from_array(array) + np.testing.assert_array_equal(bbox.array, array) + + def test_from_array_copy(self): + """Test BoundingBoxSE3.from_array with copy parameter.""" + array = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5]) + bbox_copy = BoundingBoxSE3.from_array(array, copy=True) + bbox_no_copy = BoundingBoxSE3.from_array(array, copy=False) + + array[0] = 999.0 + self.assertNotEqual(bbox_copy.array[0], 999.0) + self.assertEqual(bbox_no_copy.array[0], 999.0) + + def test_properties(self): + """Test BoundingBoxSE3 properties.""" + self.assertEqual(self.bbox.length, self.length) + self.assertEqual(self.bbox.width, self.width) + self.assertEqual(self.bbox.height, self.height) + np.testing.assert_array_equal(self.bbox.center.array, self.center.array) + np.testing.assert_array_equal(self.bbox.center_se3.array, self.center.array) + + def test_array_property(self): + """Test array property.""" + expected = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5]) + np.testing.assert_array_equal(self.bbox.array, expected) + + def test_array_mixin(self): + """Test that BoundingBoxSE3 is an instance of ArrayMixin.""" + self.assertIsInstance(self.bbox, ArrayMixin) + + expected = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5], dtype=np.float16) + output_array = np.array(self.bbox, dtype=np.float16) + np.testing.assert_array_equal(output_array, expected) + self.assertEqual(output_array.dtype, np.float16) + self.assertEqual(output_array.shape, (len(BoundingBoxSE3Index),)) + + def test_bounding_box_se2_property(self): + """Test bounding_box_se2 property.""" + bbox_2d = self.bbox.bounding_box_se2 + self.assertIsInstance(bbox_2d, BoundingBoxSE2) + self.assertEqual(bbox_2d.length, self.length) + self.assertEqual(bbox_2d.width, self.width) + self.assertEqual(bbox_2d.center.x, self.center.x) + self.assertEqual(bbox_2d.center.y, self.center.y) + self.assertEqual(bbox_2d.center.yaw, self.center.yaw) + + def test_corners_array(self): + """Test corners_array property.""" + corners = self.bbox.corners_array + self.assertEqual(corners.shape, (8, 3)) + self.assertIsInstance(corners, np.ndarray) + + def test_corners_dict(self): + """Test corners_dict property.""" + corners_dict = self.bbox.corners_dict + self.assertEqual(len(corners_dict), 8) + for index in Corners3DIndex: + self.assertIn(index, corners_dict) + self.assertIsInstance(corners_dict[index], Point3D) + + def test_shapely_polygon(self): + """Test shapely_polygon property.""" + polygon = self.bbox.shapely_polygon + self.assertIsInstance(polygon, geom.Polygon) + self.assertAlmostEqual(polygon.area, self.length * self.width) + + def test_array_assertions(self): + """Test array assertions in from_array.""" + # Test 2D array + with self.assertRaises(AssertionError): + BoundingBoxSE3.from_array(np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])) + + # Test wrong size + with self.assertRaises(AssertionError): + BoundingBoxSE3.from_array(np.array([1, 2, 3, 4, 5, 6, 7, 8])) + + def test_zero_dimensions(self): + """Test bounding box with zero dimensions.""" + center = StateSE3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + bbox = BoundingBoxSE3(center, 0.0, 0.0, 0.0) + self.assertEqual(bbox.length, 0.0) + self.assertEqual(bbox.width, 0.0) + self.assertEqual(bbox.height, 0.0) + + +if __name__ == "__main__": + unittest.main() diff --git a/d123/geometry/test/test_occupancy_map.py b/d123/geometry/test/test_occupancy_map.py new file mode 100644 index 00000000..7344f4de --- /dev/null +++ b/d123/geometry/test/test_occupancy_map.py @@ -0,0 +1,285 @@ +import unittest + +import numpy as np +import shapely.geometry as geom + +from d123.geometry import OccupancyMap2D + + +class TestOccupancyMap2D(unittest.TestCase): + """Unit tests for OccupancyMap2D class.""" + + def setUp(self): + """Set up test fixtures with various geometries.""" + self.square1 = geom.Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]) + self.square2 = geom.Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]) + self.circle = geom.Point(1, 1).buffer(0.5) + self.line = geom.LineString([(0, 0), (1, 1)]) + + self.geometries = [self.square1, self.square2, self.circle, self.line] + self.string_ids = ["square1", "square2", "circle", "line"] + self.int_ids = [1, 2, 3, 4] + + def test_init_with_default_ids(self): + """Test initialization with default string IDs.""" + occ_map = OccupancyMap2D(self.geometries) + + self.assertEqual(len(occ_map), 4) + self.assertEqual(occ_map.ids, ["0", "1", "2", "3"]) + self.assertEqual(len(occ_map.geometries), 4) + + def test_init_with_string_ids(self): + """Test initialization with custom string IDs.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + + self.assertEqual(len(occ_map), 4) + self.assertEqual(occ_map.ids, self.string_ids) + self.assertEqual(occ_map["square1"], self.square1) + + def test_init_with_int_ids(self): + """Test initialization with integer IDs.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.int_ids) + + self.assertEqual(len(occ_map), 4) + self.assertEqual(occ_map.ids, self.int_ids) + self.assertEqual(occ_map[1], self.square1) + + def test_init_with_mismatched_ids_length(self): + """Test that initialization fails with mismatched IDs length.""" + with self.assertRaises(AssertionError): + OccupancyMap2D(self.geometries, ids=["id1", "id2"]) + + def test_init_with_custom_node_capacity(self): + """Test initialization with custom node capacity.""" + occ_map = OccupancyMap2D(self.geometries, node_capacity=5) + self.assertEqual(occ_map._node_capacity, 5) + + def test_from_dict_constructor(self): + """Test construction from dictionary.""" + geometry_dict = {"square": self.square1, "circle": self.circle, "line": self.line} + + occ_map = OccupancyMap2D.from_dict(geometry_dict) + + self.assertEqual(len(occ_map), 3) + self.assertEqual(set(occ_map.ids), set(["square", "circle", "line"])) + self.assertEqual(occ_map["square"], self.square1) + + def test_getitem_string_id(self): + """Test geometry retrieval by string ID.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + + self.assertEqual(occ_map["square1"], self.square1) + self.assertEqual(occ_map["circle"], self.circle) + + def test_getitem_int_id(self): + """Test geometry retrieval by integer ID.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.int_ids) + + self.assertEqual(occ_map[1], self.square1) + self.assertEqual(occ_map[3], self.circle) + + def test_getitem_invalid_id(self): + """Test that invalid ID raises KeyError.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + + with self.assertRaises(KeyError): + _ = occ_map["nonexistent"] + + def test_len(self): + """Test length property.""" + occ_map = OccupancyMap2D(self.geometries) + self.assertEqual(len(occ_map), 4) + + empty_map = OccupancyMap2D([]) + self.assertEqual(len(empty_map), 0) + + def test_ids_property(self): + """Test IDs property getter.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + self.assertEqual(occ_map.ids, self.string_ids) + + def test_geometries_property(self): + """Test geometries property getter.""" + occ_map = OccupancyMap2D(self.geometries) + self.assertEqual(list(occ_map.geometries), self.geometries) + + def test_id_to_idx_property(self): + """Test id_to_idx property.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + expected_mapping = {"square1": 0, "square2": 1, "circle": 2, "line": 3} + self.assertEqual(occ_map.id_to_idx, expected_mapping) + + def test_intersects_with_overlapping_geometry(self): + """Test intersects method with overlapping geometry.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + + # Create a geometry that intersects with square1 and circle + query_geom = geom.Polygon([(0.5, 0.5), (1.5, 0.5), (1.5, 1.5), (0.5, 1.5)]) + intersecting_ids = occ_map.intersects(query_geom) + + # NOTE: square2 does not intersect with the query geometry, the rest does. + self.assertIn("square1", intersecting_ids) + self.assertIn("circle", intersecting_ids) + self.assertIn("line", intersecting_ids) + self.assertEqual(len(intersecting_ids), 3) + + def test_intersects_with_non_overlapping_geometry(self): + """Test intersects method with non-overlapping geometry.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + + # Create a geometry that doesn't intersect with any + query_geom = geom.Polygon([(10, 10), (12, 10), (12, 12), (10, 12)]) + intersecting_ids = occ_map.intersects(query_geom) + + self.assertEqual(len(intersecting_ids), 0) + + def test_query_with_intersects_predicate(self): + """Test query method with intersects predicate.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + + query_geom = geom.Point(1, 1) + indices = occ_map.query(query_geom, predicate="intersects") + self.assertIsInstance(indices, np.ndarray) + self.assertIn(occ_map.id_to_idx["square1"], indices) + self.assertIn(occ_map.id_to_idx["circle"], indices) + self.assertIn(occ_map.id_to_idx["line"], indices) + self.assertNotIn(occ_map.id_to_idx["square2"], indices) + + def test_query_with_contains_predicate(self): + """Test query method with contains predicate.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + + query_geom = geom.Point(4, 4) + indices = occ_map.query(query_geom, predicate="within") + + self.assertIsInstance(indices, np.ndarray) + self.assertIn(occ_map.id_to_idx["square2"], indices) + self.assertNotIn(occ_map.id_to_idx["square1"], indices) + self.assertNotIn(occ_map.id_to_idx["circle"], indices) + self.assertNotIn(occ_map.id_to_idx["line"], indices) + + def test_query_with_distance(self): + """Test query method with distance parameter.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + + query_geom = geom.Point(4, 4) + indices = occ_map.query(query_geom, predicate="dwithin", distance=3.0) + + self.assertIsInstance(indices, np.ndarray) + self.assertIn(occ_map.id_to_idx["square2"], indices) + self.assertIn(occ_map.id_to_idx["square1"], indices) + self.assertNotIn(occ_map.id_to_idx["circle"], indices) + self.assertNotIn(occ_map.id_to_idx["line"], indices) + + def test_query_nearest_basic(self): + """Test query_nearest method basic functionality.""" + occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids) + + query_geom = geom.Point(4, 4) + nearest_indices = occ_map.query_nearest(query_geom) + + self.assertIsInstance(nearest_indices, np.ndarray) + + def test_query_nearest_with_distance(self): + """Test query_nearest method with return_distance=True.""" + occ_map = OccupancyMap2D(self.geometries) + + query_geom = geom.Point(1, 1) + result = occ_map.query_nearest(query_geom, return_distance=True) + + self.assertIsInstance(result, tuple) + self.assertEqual(len(result), 2) + indices, distances = result + self.assertIsInstance(indices, np.ndarray) + self.assertIsInstance(distances, np.ndarray) + + def test_query_nearest_with_max_distance(self): + """Test query_nearest method with max_distance.""" + occ_map = OccupancyMap2D(self.geometries) + + query_geom = geom.Point(10, 10) + + nearest_indices = occ_map.query_nearest(query_geom, max_distance=1.0) + self.assertIsInstance(nearest_indices, np.ndarray) + self.assertEqual(len(nearest_indices), 0) + + nearest_indices = occ_map.query_nearest(query_geom, max_distance=10.0) + self.assertIsInstance(nearest_indices, np.ndarray) + self.assertTrue(len(nearest_indices) > 0) + + def test_contains_vectorized_single_point(self): + """Test contains_vectorized with a single point.""" + occ_map = OccupancyMap2D(self.geometries) + + points = np.array([[1.0, 1.0]]) # Point inside square1 and circle + result = occ_map.contains_vectorized(points) + + self.assertEqual(result.shape, (4, 1)) + self.assertIsInstance(result, np.ndarray) + self.assertEqual(result.dtype, bool) + + def test_contains_vectorized_multiple_points(self): + """Test contains_vectorized with multiple points.""" + occ_map = OccupancyMap2D(self.geometries) + + points = np.array( + [ + [1.0, 1.0], # Inside square1 and circle + [4.0, 4.0], # Inside square2 + [10.0, 10.0], # Outside all geometries + ] + ) + result = occ_map.contains_vectorized(points) + + self.assertEqual(result.shape, (4, 3)) + self.assertIsInstance(result, np.ndarray) + self.assertEqual(result.dtype, bool) + + # Check specific containment results + # Point [1.0, 1.0] should be in square1 (index 0) and circle (index 2) + self.assertTrue(result[0, 0]) # square1 contains point 0 + self.assertFalse(result[1, 0]) # square2 does not contain point 0 + self.assertTrue(result[2, 0]) # circle contains point 0 + self.assertFalse(result[3, 0]) # line does not contain point 0 + + # Point [4.0, 4.0] should be in square2 (index 1) only + self.assertFalse(result[0, 1]) # square1 does not contain point 1 + self.assertTrue(result[1, 1]) # square2 contains point 1 + self.assertFalse(result[2, 1]) # circle does not contain point 1 + self.assertFalse(result[3, 1]) # line does not contain point 1 + + # Point [10.0, 10.0] should not be in any geometry + self.assertFalse(result[0, 2]) # square1 does not contain point 2 + self.assertFalse(result[1, 2]) # square2 does not contain point 2 + self.assertFalse(result[2, 2]) # circle does not contain point 2 + self.assertFalse(result[3, 2]) # line does not contain point 2 + + def test_contains_vectorized_empty_points(self): + """Test contains_vectorized with empty points array.""" + occ_map = OccupancyMap2D(self.geometries) + + points = np.empty((0, 2)) + result = occ_map.contains_vectorized(points) + + self.assertEqual(result.shape, (4, 0)) + + def test_empty_occupancy_map(self): + """Test behavior with empty geometry list.""" + occ_map = OccupancyMap2D([]) + + self.assertEqual(len(occ_map), 0) + self.assertEqual(occ_map.ids, []) + self.assertEqual(len(occ_map.geometries), 0) + + def test_single_geometry_map(self): + """Test behavior with single geometry.""" + occ_map = OccupancyMap2D([self.square1], ids=["single"]) + + self.assertEqual(len(occ_map), 1) + self.assertEqual(occ_map.ids, ["single"]) + self.assertEqual(occ_map["single"], self.square1) + + +if __name__ == "__main__": + + unittest.main() diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index 9052f79b..32328cbf 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -250,7 +250,7 @@ " return fig, ax\n", "\n", "\n", - "scene_index = 3\n", + "scene_index = 0\n", "iteration = 99\n", "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n", "plt.show()\n", diff --git a/notebooks/viz/viser_testing_v2_scene.ipynb b/notebooks/viz/viser_testing_v2_scene.ipynb index 07d91d1d..f2371058 100644 --- a/notebooks/viz/viser_testing_v2_scene.ipynb +++ b/notebooks/viz/viser_testing_v2_scene.ipynb @@ -28,9 +28,9 @@ "\n", "\n", "# splits = [\"nuplan_private_test\"]\n", - "splits = [\"carla\"]\n", + "# splits = [\"carla\"]\n", "# splits = [\"wopd_train\"]\n", - "# splits = [\"av2-sensor-mini_train\"]\n", + "splits = [\"av2-sensor-mini_train\"]\n", "log_names = None\n", "\n", "scene_tokens = None\n", @@ -99,7 +99,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "d123_dev", "language": "python", "name": "python3" }, From 7a9775da4e5007aa011fcd27b9a454a47cd22053 Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Mon, 25 Aug 2025 18:44:10 +0200 Subject: [PATCH 018/145] Refactor both transform se2 and se3. NOTE: Both are untested --- .../vehicle_state/vehicle_parameters.py | 8 +- .../visualization/matplotlib/camera copy.py | 329 ------------------ .../common/visualization/matplotlib/camera.py | 2 +- .../visualization/matplotlib/observation.py | 9 +- d123/common/visualization/viser/utils.py | 7 +- d123/common/visualization/viser/utils_v2.py | 2 +- .../wopd/wopd_data_converter.py | 2 +- d123/geometry/geometry_index.py | 8 + d123/geometry/se.py | 2 +- d123/geometry/transform/rotation.py | 27 -- d123/geometry/transform/se3.py | 179 ---------- d123/geometry/transform/tranform_2d.py | 22 -- .../{se2_array.py => transform_se2.py} | 91 ++++- d123/geometry/transform/transform_se3.py | 177 ++++++++++ d123/simulation/agents/smart_agents.py | 2 +- .../gym_observation/raster/raster_renderer.py | 2 +- .../feature_builder/smart_feature_builder.py | 2 +- 17 files changed, 285 insertions(+), 586 deletions(-) delete mode 100644 d123/common/visualization/matplotlib/camera copy.py delete mode 100644 d123/geometry/transform/rotation.py delete mode 100644 d123/geometry/transform/se3.py delete mode 100644 d123/geometry/transform/tranform_2d.py rename d123/geometry/transform/{se2_array.py => transform_se2.py} (51%) create mode 100644 d123/geometry/transform/transform_se3.py diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py index c8a84828..19ff334b 100644 --- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py @@ -1,8 +1,8 @@ from dataclasses import dataclass from d123.geometry import StateSE2, StateSE3, Vector2D -from d123.geometry.transform.se3 import translate_se3_along_x, translate_se3_along_z -from d123.geometry.transform.tranform_2d import translate_along_yaw +from d123.geometry.transform.transform_se2 import translate_se2_along_yaw +from d123.geometry.transform.transform_se3 import translate_se3_along_x, translate_se3_along_z # TODO: Add more vehicle parameters, potentially extend the parameters. @@ -115,7 +115,7 @@ def center_se2_to_rear_axle_se2(center_se2: StateSE2, vehicle_parameters: Vehicl :param vehicle_parameters: The vehicle parameters. :return: The rear axle state in 2D. """ - return translate_along_yaw(center_se2, Vector2D(-vehicle_parameters.rear_axle_to_center_longitudinal, 0)) + return translate_se2_along_yaw(center_se2, Vector2D(-vehicle_parameters.rear_axle_to_center_longitudinal, 0)) def rear_axle_se2_to_center_se2(rear_axle_se2: StateSE2, vehicle_parameters: VehicleParameters) -> StateSE2: @@ -125,4 +125,4 @@ def rear_axle_se2_to_center_se2(rear_axle_se2: StateSE2, vehicle_parameters: Veh :param vehicle_parameters: The vehicle parameters. :return: The center state in 2D. """ - return translate_along_yaw(rear_axle_se2, Vector2D(vehicle_parameters.rear_axle_to_center_longitudinal, 0)) + return translate_se2_along_yaw(rear_axle_se2, Vector2D(vehicle_parameters.rear_axle_to_center_longitudinal, 0)) diff --git a/d123/common/visualization/matplotlib/camera copy.py b/d123/common/visualization/matplotlib/camera copy.py deleted file mode 100644 index ed3a7d2a..00000000 --- a/d123/common/visualization/matplotlib/camera copy.py +++ /dev/null @@ -1,329 +0,0 @@ -# from typing import List, Optional, Tuple - -from typing import List, Optional, Tuple - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import numpy.typing as npt - -# from PIL import ImageColor -from pyquaternion import Quaternion - -from d123.common.datatypes.detection.detection import BoxDetectionSE3, BoxDetectionWrapper -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import Camera -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.common.visualization.color.default import BOX_DETECTION_CONFIG -from d123.geometry import BoundingBoxSE3Index, Corners3DIndex, StateSE3 -from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array, get_rotation_matrix - -# from navsim.common.dataclasses import Annotations, Camera, Lidar -# from navsim.common.enums import BoundingBoxIndex, LidarIndex -# from navsim.planning.scenario_builder.navsim_scenario_utils import tracked_object_types -# from navsim.visualization.config import AGENT_CONFIG -# from navsim.visualization.lidar import filter_lidar_pc, get_lidar_pc_color - - -def add_camera_ax(ax: plt.Axes, camera: Camera) -> plt.Axes: - """ - Adds camera image to matplotlib ax object - :param ax: matplotlib ax object - :param camera: navsim camera dataclass - :return: ax object with image - """ - ax.imshow(camera.image) - return ax - - -# FIXME: -# def add_lidar_to_camera_ax(ax: plt.Axes, camera: Camera, lidar: Lidar) -> plt.Axes: -# """ -# Adds camera image with lidar point cloud on matplotlib ax object -# :param ax: matplotlib ax object -# :param camera: navsim camera dataclass -# :param lidar: navsim lidar dataclass -# :return: ax object with image -# """ - -# image, lidar_pc = camera.image.copy(), lidar.lidar_pc.copy() -# image_height, image_width = image.shape[:2] - -# lidar_pc = filter_lidar_pc(lidar_pc) -# lidar_pc_colors = np.array(get_lidar_pc_color(lidar_pc)) - -# pc_in_cam, pc_in_fov_mask = _transform_pcs_to_images( -# lidar_pc, -# camera.sensor2lidar_rotation, -# camera.sensor2lidar_translation, -# camera.intrinsics, -# img_shape=(image_height, image_width), -# ) - -# for (x, y), color in zip(pc_in_cam[pc_in_fov_mask], lidar_pc_colors[pc_in_fov_mask]): -# color = (int(color[0]), int(color[1]), int(color[2])) -# cv2.circle(image, (int(x), int(y)), 5, color, -1) - -# ax.imshow(image) -# return ax - - -def add_box_detections_to_camera_ax( - ax: plt.Axes, - camera: Camera, - box_detections: BoxDetectionWrapper, - ego_state_se3: EgoStateSE3, -) -> plt.Axes: - - box_detection_array = np.zeros((len(box_detections.box_detections), len(BoundingBoxSE3Index)), dtype=np.float64) - detection_types = np.array( - [detection.metadata.detection_type for detection in box_detections.box_detections], dtype=object - ) - for idx, box_detection in enumerate(box_detections.box_detections): - assert isinstance( - box_detection, BoxDetectionSE3 - ), f"Box detection must be of type BoxDetectionSE3, got {type(box_detection)}" - box_detection_array[idx] = box_detection.bounding_box_se3.array - - box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] = convert_absolute_to_relative_se3_array( - ego_state_se3.rear_axle_se3, box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] - ) - detection_positions, detection_extents, detection_yaws = _transform_annotations_to_camera( - box_detection_array, camera.extrinsic - ) - - corners_norm = np.stack(np.unravel_index(np.arange(len(Corners3DIndex)), [2] * 3), axis=1) - corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] - corners_norm = corners_norm - np.array([0.5, 0.5, 0.5]) - corners = detection_extents.reshape([-1, 1, 3]) * corners_norm.reshape([1, 8, 3]) - - corners = _rotation_3d_in_axis(corners, detection_yaws, axis=1) - corners += detection_positions.reshape(-1, 1, 3) - - # Then draw project corners to image. - box_corners, corners_pc_in_fov = _transform_points_to_image(corners.reshape(-1, 3), camera.metadata.intrinsic) - box_corners = box_corners.reshape(-1, 8, 2) - corners_pc_in_fov = corners_pc_in_fov.reshape(-1, 8) - valid_corners = corners_pc_in_fov.any(-1) - - box_corners, detection_types = box_corners[valid_corners], detection_types[valid_corners] - image = _plot_rect_3d_on_img(camera.image.copy(), box_corners, detection_types) - - ax.imshow(image) - return ax - - -def _transform_annotations_to_camera( - boxes: npt.NDArray[np.float32], extrinsic: npt.NDArray[np.float64] -) -> npt.NDArray[np.float32]: - """ - Helper function to transform bounding boxes into camera frame - TODO: Refactor - :param boxes: array representation of bounding boxes - :param sensor2lidar_rotation: camera rotation - :param sensor2lidar_translation: camera translation - :return: bounding boxes in camera coordinates - """ - sensor2ego_rotation = extrinsic[:3, :3] - sensor2ego_translation = extrinsic[:3, 3] - - locs, rots = ( - boxes[:, BoundingBoxSE3Index.XYZ], - boxes[:, BoundingBoxSE3Index.YAW], - ) - dims_cam = boxes[ - :, [BoundingBoxSE3Index.LENGTH, BoundingBoxSE3Index.HEIGHT, BoundingBoxSE3Index.WIDTH] - ] # l, w, h -> l, h, w - - rots_cam = np.zeros_like(rots) - for idx, state_se3_array in enumerate(boxes[:, BoundingBoxSE3Index.STATE_SE3]): - rot = Quaternion(matrix=get_rotation_matrix(StateSE3.from_array(state_se3_array))) - rot = Quaternion(matrix=sensor2ego_rotation).inverse * rot - rots_cam[idx] = -rot.yaw_pitch_roll[0] - - lidar2cam_r = np.linalg.inv(sensor2ego_rotation) - lidar2cam_t = sensor2ego_translation @ lidar2cam_r.T - lidar2cam_rt = np.eye(4) - lidar2cam_rt[:3, :3] = lidar2cam_r.T - lidar2cam_rt[3, :3] = -lidar2cam_t - - locs_cam = np.concatenate([locs, np.ones_like(locs)[:, :1]], -1) # -1, 4 - locs_cam = lidar2cam_rt.T @ locs_cam.T - locs_cam = locs_cam.T - locs_cam = locs_cam[:, :-1] - return locs_cam, dims_cam, rots_cam - - -def _rotation_3d_in_axis(points: npt.NDArray[np.float32], angles: npt.NDArray[np.float32], axis: int = 0): - """ - Rotate 3D points by angles according to axis. - TODO: Refactor - :param points: array of points - :param angles: array of angles - :param axis: axis to perform rotation, defaults to 0 - :raises value: _description_ - :raises ValueError: if axis invalid - :return: rotated points - """ - rot_sin = np.sin(angles) - rot_cos = np.cos(angles) - ones = np.ones_like(rot_cos) - zeros = np.zeros_like(rot_cos) - if axis == 1: - rot_mat_T = np.stack( - [ - np.stack([rot_cos, zeros, -rot_sin]), - np.stack([zeros, ones, zeros]), - np.stack([rot_sin, zeros, rot_cos]), - ] - ) - elif axis == 2 or axis == -1: - rot_mat_T = np.stack( - [ - np.stack([rot_cos, -rot_sin, zeros]), - np.stack([rot_sin, rot_cos, zeros]), - np.stack([zeros, zeros, ones]), - ] - ) - elif axis == 0: - rot_mat_T = np.stack( - [ - np.stack([zeros, rot_cos, -rot_sin]), - np.stack([zeros, rot_sin, rot_cos]), - np.stack([ones, zeros, zeros]), - ] - ) - else: - raise ValueError(f"axis should in range [0, 1, 2], got {axis}") - return np.einsum("aij,jka->aik", points, rot_mat_T) - - -def _plot_rect_3d_on_img( - image: npt.NDArray[np.float32], - box_corners: npt.NDArray[np.float32], - detection_types: List[DetectionType], - thickness: int = 1, -) -> npt.NDArray[np.uint8]: - """ - Plot the boundary lines of 3D rectangular on 2D images. - TODO: refactor - :param image: The numpy array of image. - :param box_corners: Coordinates of the corners of 3D, shape of [N, 8, 2]. - :param box_labels: labels of boxes for coloring - :param thickness: pixel width of liens, defaults to 3 - :return: image with 3D bounding boxes - """ - line_indices = ( - (0, 1), - (0, 3), - (0, 4), - (1, 2), - (1, 5), - (3, 2), - (3, 7), - (4, 5), - (4, 7), - (2, 6), - (5, 6), - (6, 7), - ) - for i in range(len(box_corners)): - color = BOX_DETECTION_CONFIG[detection_types[i]].fill_color.rgb - corners = box_corners[i].astype(np.int64) - for start, end in line_indices: - cv2.line( - image, - (corners[start, 0], corners[start, 1]), - (corners[end, 0], corners[end, 1]), - color, - thickness, - cv2.LINE_AA, - ) - return image.astype(np.uint8) - - -def _transform_points_to_image( - points: npt.NDArray[np.float32], - intrinsic: npt.NDArray[np.float32], - image_shape: Optional[Tuple[int, int]] = None, - eps: float = 1e-3, -) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.bool_]]: - """ - Transforms points in camera frame to image pixel coordinates - TODO: refactor - :param points: points in camera frame - :param intrinsic: camera intrinsics - :param image_shape: shape of image in pixel - :param eps: lower threshold of points, defaults to 1e-3 - :return: points in pixel coordinates, mask of values in frame - """ - points = points[:, :3] - - viewpad = np.eye(4) - viewpad[: intrinsic.shape[0], : intrinsic.shape[1]] = intrinsic - - pc_img = np.concatenate([points, np.ones_like(points)[:, :1]], -1) - pc_img = viewpad @ pc_img.T - pc_img = pc_img.T - - cur_pc_in_fov = pc_img[:, 2] > eps - pc_img = pc_img[..., 0:2] / np.maximum(pc_img[..., 2:3], np.ones_like(pc_img[..., 2:3]) * eps) - if image_shape is not None: - img_h, img_w = image_shape - cur_pc_in_fov = ( - cur_pc_in_fov - & (pc_img[:, 0] < (img_w - 1)) - & (pc_img[:, 0] > 0) - & (pc_img[:, 1] < (img_h - 1)) - & (pc_img[:, 1] > 0) - ) - return pc_img, cur_pc_in_fov - - -# def _transform_pcs_to_images( -# lidar_pc: npt.NDArray[np.float32], -# sensor2lidar_rotation: npt.NDArray[np.float32], -# sensor2lidar_translation: npt.NDArray[np.float32], -# intrinsic: npt.NDArray[np.float32], -# img_shape: Optional[Tuple[int, int]] = None, -# eps: float = 1e-3, -# ) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.bool_]]: -# """ -# Transforms points in camera frame to image pixel coordinates -# TODO: refactor -# :param lidar_pc: lidar point cloud -# :param sensor2lidar_rotation: camera rotation -# :param sensor2lidar_translation: camera translation -# :param intrinsic: camera intrinsics -# :param img_shape: image shape in pixels, defaults to None -# :param eps: threshold for lidar pc height, defaults to 1e-3 -# :return: lidar pc in pixel coordinates, mask of values in frame -# """ -# pc_xyz = lidar_pc[LidarIndex.POSITION, :].T - -# lidar2cam_r = np.linalg.inv(sensor2lidar_rotation) -# lidar2cam_t = sensor2lidar_translation @ lidar2cam_r.T -# lidar2cam_rt = np.eye(4) -# lidar2cam_rt[:3, :3] = lidar2cam_r.T -# lidar2cam_rt[3, :3] = -lidar2cam_t - -# viewpad = np.eye(4) -# viewpad[: intrinsic.shape[0], : intrinsic.shape[1]] = intrinsic -# lidar2img_rt = viewpad @ lidar2cam_rt.T - -# cur_pc_xyz = np.concatenate([pc_xyz, np.ones_like(pc_xyz)[:, :1]], -1) -# cur_pc_cam = lidar2img_rt @ cur_pc_xyz.T -# cur_pc_cam = cur_pc_cam.T -# cur_pc_in_fov = cur_pc_cam[:, 2] > eps -# cur_pc_cam = cur_pc_cam[..., 0:2] / np.maximum(cur_pc_cam[..., 2:3], np.ones_like(cur_pc_cam[..., 2:3]) * eps) - -# if img_shape is not None: -# img_h, img_w = img_shape -# cur_pc_in_fov = ( -# cur_pc_in_fov -# & (cur_pc_cam[:, 0] < (img_w - 1)) -# & (cur_pc_cam[:, 0] > 0) -# & (cur_pc_cam[:, 1] < (img_h - 1)) -# & (cur_pc_cam[:, 1] > 0) -# ) -# return cur_pc_cam, cur_pc_in_fov diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py index 2b8ecce4..071a5284 100644 --- a/d123/common/visualization/matplotlib/camera.py +++ b/d123/common/visualization/matplotlib/camera.py @@ -16,7 +16,7 @@ from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.common.visualization.color.default import BOX_DETECTION_CONFIG from d123.geometry import BoundingBoxSE3Index, Corners3DIndex -from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array +from d123.geometry.transform.transform_se3 import convert_absolute_to_relative_se3_array # from navsim.common.dataclasses import Annotations, Camera, Lidar # from navsim.common.enums import BoundingBoxIndex, LidarIndex diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py index ecddaa02..6814f600 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/d123/common/visualization/matplotlib/observation.py @@ -27,7 +27,9 @@ from d123.dataset.maps.map_datatypes import MapLayer from d123.dataset.scene.abstract_scene import AbstractScene from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D -from d123.geometry.transform.tranform_2d import translate_along_yaw +from d123.geometry.geometry_index import StateSE2Index +from d123.geometry.transform.transform_se2 import translate_se2_along_yaw +from d123.geometry.vector import Vector2D def add_default_map_on_ax( @@ -156,7 +158,10 @@ def add_bounding_box_to_ax( ) arrow = np.zeros((2, 2), dtype=np.float64) arrow[0] = center_se2.point_2d.array - arrow[1] = translate_along_yaw(center_se2, Point2D(bounding_box.length / 2.0 + 0.5, 0.0)).point_2d.array + arrow[1] = translate_se2_along_yaw( + center_se2, + Vector2D(bounding_box.length / 2.0 + 0.5, 0.0), + ).array[StateSE2Index.XY] ax.plot( arrow[:, 0], arrow[:, 1], diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index afa5ea2e..f72fa666 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -16,7 +16,7 @@ from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject from d123.dataset.scene.abstract_scene import AbstractScene from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, StateSE3 -from d123.geometry.transform.se3 import convert_relative_to_absolute_points_3d_array +from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_points_3d_array # TODO: Refactor this file. # TODO: Add general utilities for 3D primitives and mesh support. @@ -231,11 +231,8 @@ def get_camera_values( camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame # Get the rotation matrix of the rear axle pose - from d123.geometry.transform.se3 import get_rotation_matrix - ego_transform = np.eye(4, dtype=np.float64) - ego_transform[:3, :3] = get_rotation_matrix(rear_axle) - ego_transform[:3, 3] = rear_axle.point_3d.array + ego_transform = rear_axle.transformation_matrix camera_transform = ego_transform @ camera_to_ego diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py index 44831f3e..c88faf3e 100644 --- a/d123/common/visualization/viser/utils_v2.py +++ b/d123/common/visualization/viser/utils_v2.py @@ -7,7 +7,7 @@ # from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D -from d123.geometry.transform.se3 import translate_body_frame +from d123.geometry.transform.transform_se3 import translate_body_frame # TODO: Refactor this file. # TODO: Add general utilities for 3D primitives and mesh support. diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py index a6371421..c1d1698f 100644 --- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py +++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py @@ -28,7 +28,7 @@ from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection from d123.dataset.logs.log_metadata import LogMetadata from d123.geometry import BoundingBoxSE3Index, Point3D, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix +from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL os.environ["CUDA_VISIBLE_DEVICES"] = "-1" diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py index 3f658838..e5c61154 100644 --- a/d123/geometry/geometry_index.py +++ b/d123/geometry/geometry_index.py @@ -70,6 +70,10 @@ class Vector3DIndex(IntEnum): Y = 1 Z = 2 + @classproperty + def XYZ(cls) -> slice: + return slice(cls.X, cls.Z + 1) + class EulerAnglesIndex(IntEnum): """ @@ -117,6 +121,10 @@ def XYZ(cls) -> slice: def ROTATION_XYZ(cls) -> slice: return slice(cls.ROLL, cls.YAW + 1) + @classproperty + def EULER_ANGLES(cls) -> slice: + return slice(cls.ROLL, cls.YAW + 1) + class QuaternionSE3Index(IntEnum): """ diff --git a/d123/geometry/se.py b/d123/geometry/se.py index b2d8db2c..5ab13283 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -256,7 +256,7 @@ def rotation_matrix(self) -> npt.NDArray[np.float64]: :return: A 3x3 numpy array representing the rotation matrix. """ - raise NotImplementedError("Rotation matrix conversion not implemented yet.") + return EulerAngles.from_array(self.array[StateSE3Index.EULER_ANGLES]).rotation_matrix @property def transformation_matrix(self) -> npt.NDArray[np.float64]: diff --git a/d123/geometry/transform/rotation.py b/d123/geometry/transform/rotation.py deleted file mode 100644 index a99a2fc5..00000000 --- a/d123/geometry/transform/rotation.py +++ /dev/null @@ -1,27 +0,0 @@ -# import numpy as np -# import numpy.typing as npt - -# from d123.geometry.base import Point3DIndex, StateSE3, StateSE3Index -# from d123.geometry.vector import Vector3D - - -# def get_roll_pitch_yaw_from_rotation_matrix( -# rotation_matrix: npt.NDArray[np.float64], -# ) -> Vector3D: -# """Extract roll, pitch, and yaw angles from a rotation matrix.""" -# assert rotation_matrix.shape == (3, 3), "Rotation matrix must be 3x3." - -# sy = np.sqrt(rotation_matrix[0, 0] ** 2 + rotation_matrix[1, 0] ** 2) - -# singular = sy < 1e-6 - -# if not singular: -# x = np.arctan2(rotation_matrix[2, 1], rotation_matrix[2, 2]) -# y = np.arctan2(-rotation_matrix[2, 0], sy) -# z = np.arctan2(rotation_matrix[1, 0], rotation_matrix[0, 0]) -# else: -# x = np.arctan2(-rotation_matrix[1, 2], rotation_matrix[1, 1]) -# y = np.arctan2(-rotation_matrix[2, 0], sy) -# z = 0.0 - -# return Vector3D(x=x, y=y, z=z) diff --git a/d123/geometry/transform/se3.py b/d123/geometry/transform/se3.py deleted file mode 100644 index 02be57df..00000000 --- a/d123/geometry/transform/se3.py +++ /dev/null @@ -1,179 +0,0 @@ -import numpy as np -import numpy.typing as npt - -from d123.geometry import Point3DIndex, StateSE3, StateSE3Index, Vector3D - - -def get_rotation_matrix(state_se3: StateSE3) -> npt.NDArray[np.float64]: - # Intrinsic Z-Y'-X'' rotation: R = R_x(roll) @ R_y(pitch) @ R_z(yaw) - R_x = np.array( - [ - [1, 0, 0], - [0, np.cos(state_se3.roll), -np.sin(state_se3.roll)], - [0, np.sin(state_se3.roll), np.cos(state_se3.roll)], - ], - dtype=np.float64, - ) - R_y = np.array( - [ - [np.cos(state_se3.pitch), 0, np.sin(state_se3.pitch)], - [0, 1, 0], - [-np.sin(state_se3.pitch), 0, np.cos(state_se3.pitch)], - ], - dtype=np.float64, - ) - R_z = np.array( - [ - [np.cos(state_se3.yaw), -np.sin(state_se3.yaw), 0], - [np.sin(state_se3.yaw), np.cos(state_se3.yaw), 0], - [0, 0, 1], - ], - dtype=np.float64, - ) - return R_x @ R_y @ R_z - - -def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3: - - R = get_rotation_matrix(state_se3) - z_axis = R[:, 2] - - new_x = state_se3.x + distance * z_axis[0] - new_y = state_se3.y + distance * z_axis[1] - new_z = state_se3.z + distance * z_axis[2] - - return StateSE3(new_x, new_y, new_z, state_se3.roll, state_se3.pitch, state_se3.yaw) - - -def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3: - - R = get_rotation_matrix(state_se3) - y_axis = R[:, 1] - - new_x = state_se3.x + distance * y_axis[0] - new_y = state_se3.y + distance * y_axis[1] - new_z = state_se3.z + distance * y_axis[2] - - return StateSE3(new_x, new_y, new_z, state_se3.roll, state_se3.pitch, state_se3.yaw) - - -def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3: - - R = get_rotation_matrix(state_se3) - x_axis = R[:, 0] - - new_x = state_se3.x + distance * x_axis[0] - new_y = state_se3.y + distance * x_axis[1] - new_z = state_se3.z + distance * x_axis[2] - - return StateSE3(new_x, new_y, new_z, state_se3.roll, state_se3.pitch, state_se3.yaw) - - -def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3: - R = get_rotation_matrix(state_se3) - - body_translation = vector_3d.array - - # Transform to world frame - world_translation = R @ body_translation - - return StateSE3( - state_se3.x + world_translation[0], - state_se3.y + world_translation[1], - state_se3.z + world_translation[2], - state_se3.roll, - state_se3.pitch, - state_se3.yaw, - ) - - -def convert_relative_to_absolute_points_3d_array( - origin: StateSE3, points_3d_array: npt.NDArray[np.float64] -) -> npt.NDArray[np.float64]: - - # TODO: implement function for origin as np.ndarray - - R = get_rotation_matrix(origin) - absolute_points = points_3d_array @ R.T + origin.point_3d.array - return absolute_points - - -def convert_absolute_to_relative_se3_array( - origin: StateSE3, se3_array: npt.NDArray[np.float64] -) -> npt.NDArray[np.float64]: - assert se3_array.shape[-1] == len(StateSE3Index) - # TODO: remove transform for-loop, use vectorized operations - - # Extract rotation and translation of origin - R_origin = get_rotation_matrix(origin) - t_origin = origin.point_3d.array - - # Prepare output array - rel_se3_array = np.empty_like(se3_array) - - # For each SE3 in the array - for i in range(se3_array.shape[0]): - abs_se3 = se3_array[i] - abs_pos = abs_se3[StateSE3Index.XYZ] - abs_rpy = abs_se3[StateSE3Index.ROLL : StateSE3Index.YAW + 1] - - # Relative position: rotate and translate - rel_pos = R_origin.T @ (abs_pos - t_origin) - - # Relative orientation: subtract origin's rpy - rel_rpy = abs_rpy - np.array([origin.roll, origin.pitch, origin.yaw], dtype=np.float64) - - rel_se3_array[i, StateSE3Index.X : StateSE3Index.Z + 1] = rel_pos - rel_se3_array[i, StateSE3Index.ROLL : StateSE3Index.YAW + 1] = rel_rpy - - return rel_se3_array - - -def convert_relative_to_absolute_se3_array( - origin: StateSE3, se3_array: npt.NDArray[np.float64] -) -> npt.NDArray[np.float64]: - assert se3_array.shape[-1] == len(StateSE3Index) - # TODO: remove transform for-loop, use vectorized operations - - # Extract rotation and translation of origin - R_origin = get_rotation_matrix(origin) - t_origin = origin.point_3d.array - - # Prepare output array - abs_se3_array = np.empty_like(se3_array) - - # For each SE3 in the array - for i in range(se3_array.shape[0]): - rel_se3 = se3_array[i] - rel_pos = rel_se3[StateSE3Index.XYZ] - rel_rpy = rel_se3[StateSE3Index.ROLL : StateSE3Index.YAW + 1] - - # Absolute position: rotate and translate - abs_pos = R_origin @ rel_pos + t_origin - - # Absolute orientation: add origin's rpy - abs_rpy = rel_rpy + np.array([origin.roll, origin.pitch, origin.yaw], dtype=np.float64) - - abs_se3_array[i, StateSE3Index.X : StateSE3Index.Z + 1] = abs_pos - abs_se3_array[i, StateSE3Index.ROLL : StateSE3Index.YAW + 1] = abs_rpy - - return abs_se3_array - - -def translate_points_3d_along_z( - state_se3: StateSE3, - points_3d: npt.NDArray[np.float64], - distance: float, -) -> npt.NDArray[np.float64]: - assert points_3d.shape[-1] == len(Point3DIndex) - - R = get_rotation_matrix(state_se3) - z_axis = R[:, 2] - - translated_points = np.zeros_like(points_3d) - - translated_points[..., Point3DIndex.X] = points_3d[..., Point3DIndex.X] + distance * z_axis[0] - translated_points[..., Point3DIndex.Y] = points_3d[..., Point3DIndex.Y] + distance * z_axis[1] - translated_points[..., Point3DIndex.Z] = points_3d[..., Point3DIndex.Z] + distance * z_axis[2] - - return translated_points diff --git a/d123/geometry/transform/tranform_2d.py b/d123/geometry/transform/tranform_2d.py deleted file mode 100644 index b85e598b..00000000 --- a/d123/geometry/transform/tranform_2d.py +++ /dev/null @@ -1,22 +0,0 @@ -import numpy as np -import numpy.typing as npt - -from d123.geometry.se import StateSE2 -from d123.geometry.vector import Vector2D - -# TODO: Refactor 2D and 3D transform functions in a more consistent and general way. - - -def translate(pose: StateSE2, translation: Vector2D) -> StateSE2: - return StateSE2(pose.x + translation.x, pose.y + translation.y, pose.yaw) - - -def translate_along_yaw(pose: StateSE2, translation: Vector2D) -> StateSE2: - half_pi = np.pi / 2.0 - translation: npt.NDArray[np.float64] = np.array( - [ - (translation.y * np.cos(pose.yaw + half_pi)) + (translation.x * np.cos(pose.yaw)), - (translation.y * np.sin(pose.yaw + half_pi)) + (translation.x * np.sin(pose.yaw)), - ] - ) - return translate(pose, Vector2D.from_array(translation)) diff --git a/d123/geometry/transform/se2_array.py b/d123/geometry/transform/transform_se2.py similarity index 51% rename from d123/geometry/transform/se2_array.py rename to d123/geometry/transform/transform_se2.py index 97ff8bee..c1f33cd4 100644 --- a/d123/geometry/transform/se2_array.py +++ b/d123/geometry/transform/transform_se2.py @@ -3,8 +3,10 @@ import numpy as np import numpy.typing as npt +from d123.geometry.geometry_index import Vector2DIndex from d123.geometry.se import StateSE2, StateSE2Index from d123.geometry.utils.rotation_utils import normalize_angle +from d123.geometry.vector import Vector2D # TODO: Refactor 2D and 3D transform functions in a more consistent and general way. @@ -12,11 +14,13 @@ def convert_absolute_to_relative_se2_array( origin: Union[StateSE2, npt.NDArray[np.float64]], state_se2_array: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: - """ - Converts an StateSE2 array from global to relative coordinates. + """Converts an StateSE2 array from global to relative coordinates. + :param origin: origin pose of relative coords system - :param state_se2_array: array of SE2 states with (x,y,θ) in last dim - :return: SE2 coords array in relative coordinates + :param state_se2_array: array of SE2 states with (x,y,yaw), indexed by \ + :class:`~d123.geometry.geometry_index.StateSE2Index`, in last dim + :return: SE2 array, index by \ + :class:`~d123.geometry.geometry_index.StateSE2Index`, in last dim """ if isinstance(origin, StateSE2): origin_array = origin.array @@ -28,11 +32,11 @@ def convert_absolute_to_relative_se2_array( rotate_rad = -origin_array[StateSE2Index.YAW] cos, sin = np.cos(rotate_rad), np.sin(rotate_rad) - R = np.array([[cos, -sin], [sin, cos]]) + R_inv = np.array([[cos, -sin], [sin, cos]]) state_se2_rel = state_se2_array - origin_array - state_se2_rel[..., :2] = state_se2_rel[..., :2] @ R.T - state_se2_rel[..., 2] = normalize_angle(state_se2_rel[..., 2]) + state_se2_rel[..., StateSE2Index.XY] = state_se2_rel[..., StateSE2Index.XY] @ R_inv.T + state_se2_rel[..., StateSE2Index.YAW] = normalize_angle(state_se2_rel[..., StateSE2Index.YAW]) return state_se2_rel @@ -40,8 +44,8 @@ def convert_absolute_to_relative_se2_array( def convert_absolute_to_relative_point_2d_array( origin: Union[StateSE2, npt.NDArray[np.float64]], point_2d_array: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: - """ - Converts an absolute 2D point array from global to relative coordinates. + """Converts an absolute 2D point array from global to relative coordinates. + :param origin: origin pose of relative coords system :param point_2d_array: array of 2D points with (x,y) in last dim :return: 2D points array in relative coordinates @@ -86,8 +90,8 @@ def convert_relative_to_absolute_se2_array( R = np.array([[cos, -sin], [sin, cos]]) state_se2_rel = state_se2_array + origin_array - state_se2_rel[..., :2] = state_se2_rel[..., :2] @ R.T - state_se2_rel[..., 2] = normalize_angle(state_se2_rel[..., 2]) + state_se2_rel[..., StateSE2Index.XY] = state_se2_rel[..., StateSE2Index.XY] @ R.T + state_se2_rel[..., StateSE2Index.YAW] = normalize_angle(state_se2_rel[..., StateSE2Index.YAW]) return state_se2_rel @@ -112,3 +116,68 @@ def convert_relative_to_absolute_point_2d_array( point_2d_abs = point_2d_abs + origin_array[..., StateSE2Index.XY] return point_2d_abs + + +def translate_se2(state_se2: StateSE2, translation: Vector2D) -> StateSE2: + """Translate a single SE2 state by a 2D vector. + + :param state_se2: SE2 state to translate + :param translation: 2D translation vector + :return: translated SE2 state + """ + translated_xy = state_se2.array[StateSE2Index.XY] + translation.array[Vector2DIndex.XY] + return StateSE2(translated_xy[0], translated_xy[1], state_se2.array[StateSE2Index.YAW]) + + +def translate_se2_array(state_se2_array: npt.NDArray[np.float64], translation: Vector2D) -> npt.NDArray[np.float64]: + """Translate an array of SE2 states by a 2D vector. + + :param state_se2_array: array of SE2 states, indexed by \ + :class:`~d123.geometry.geometry_index.StateSE2Index`, in last dim + :param translation: 2D translation vector + :return: translated SE2 array + """ + result = state_se2_array.copy() + result[..., StateSE2Index.XY] += translation.array[Vector2DIndex.XY] + return result + + +def translate_se2_along_yaw(state_se2: StateSE2, translation: Vector2D) -> StateSE2: + """Translate a single SE2 state along its local coordinate frame. + + :param state_se2: SE2 state to translate + :param translation: 2D translation in local frame (x: forward, y: left) + :return: translated SE2 state + """ + yaw = state_se2.array[StateSE2Index.YAW] + cos_yaw, sin_yaw = np.cos(yaw), np.sin(yaw) + + # Transform translation from local to global frame + global_translation = np.array( + [translation.x * cos_yaw - translation.y * sin_yaw, translation.x * sin_yaw + translation.y * cos_yaw] + ) + + return translate_se2(state_se2, Vector2D.from_array(global_translation)) + + +def translate_se2_array_along_yaw( + state_se2_array: npt.NDArray[np.float64], translation: Vector2D +) -> npt.NDArray[np.float64]: + """Translate an array of SE2 states along their respective local coordinate frames. + + :param state_se2_array: array of SE2 states with (x,y,yaw) in last dim + :param translation: 2D translation in local frame (x: forward, y: left) + :return: translated SE2 array + """ + result = state_se2_array.copy() + yaws = state_se2_array[..., StateSE2Index.YAW] + cos_yaws, sin_yaws = np.cos(yaws), np.sin(yaws) + + # Transform translation from local to global frame for each state + global_translation_x = translation.x * cos_yaws - translation.y * sin_yaws + global_translation_y = translation.x * sin_yaws + translation.y * cos_yaws + + result[..., StateSE2Index.X] += global_translation_x + result[..., StateSE2Index.Y] += global_translation_y + + return result diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py new file mode 100644 index 00000000..67edd050 --- /dev/null +++ b/d123/geometry/transform/transform_se3.py @@ -0,0 +1,177 @@ +from typing import Union + +import numpy as np +import numpy.typing as npt + +from d123.geometry import StateSE3, StateSE3Index, Vector3D +from d123.geometry.geometry_index import Vector3DIndex +from d123.geometry.rotation import EulerAngles +from d123.geometry.utils.rotation_utils import ( + get_rotation_matrices_from_euler_array, + get_rotation_matrix_from_euler_array, + normalize_angle, +) + + +def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3: + + R = state_se3.rotation_matrix + z_axis = R[:, 2] + + state_se3_array = state_se3.array.copy() + state_se3_array[StateSE3Index.X] += distance * z_axis[0] + state_se3_array[StateSE3Index.Y] += distance * z_axis[1] + state_se3_array[StateSE3Index.Z] += distance * z_axis[2] + return StateSE3.from_array(state_se3_array) + + +def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3: + + R = state_se3.rotation_matrix + y_axis = R[:, 1] + + state_se3_array = state_se3.array.copy() + state_se3_array[StateSE3Index.X] += distance * y_axis[0] + state_se3_array[StateSE3Index.Y] += distance * y_axis[1] + state_se3_array[StateSE3Index.Z] += distance * y_axis[2] + return StateSE3.from_array(state_se3_array) + + +def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3: + + R = state_se3.rotation_matrix + x_axis = R[:, 0] + + state_se3_array = state_se3.array.copy() + state_se3_array[StateSE3Index.X] += distance * x_axis[0] + state_se3_array[StateSE3Index.Y] += distance * x_axis[1] + state_se3_array[StateSE3Index.Z] += distance * x_axis[2] + + return StateSE3.from_array(state_se3_array) + + +def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3: + R = state_se3.rotation_matrix + + # Transform to world frame + world_translation = R @ vector_3d.array + + state_se3_array = state_se3.array.copy() + state_se3_array[StateSE3Index.XYZ] += world_translation[Vector3DIndex.XYZ] + + return StateSE3.from_array(state_se3_array) + + +def convert_relative_to_absolute_points_3d_array( + origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + + # TODO: implement function for origin as np.ndarray + if isinstance(origin, StateSE3): + origin_array = origin.array + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) + origin_array = origin + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + + R = EulerAngles.from_array(origin_array[StateSE3Index.EULER_ANGLES]).rotation_matrix + absolute_points = points_3d_array @ R.T + origin.point_3d.array + return absolute_points + + +def convert_absolute_to_relative_se3_array( + origin: Union[StateSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + if isinstance(origin, StateSE3): + origin_array = origin.array + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) + origin_array = origin + t_origin = origin_array[StateSE3Index.XYZ] + R_origin = get_rotation_matrix_from_euler_array(origin_array[StateSE3Index.EULER_ANGLES]) + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + + assert se3_array.shape[-1] == len(StateSE3Index) + + # Extract positions and orientations from se3_array + abs_positions = se3_array[..., StateSE3Index.XYZ] + abs_euler_angles = se3_array[..., StateSE3Index.EULER_ANGLES] + + # Vectorized relative position calculation + rel_positions = (abs_positions - t_origin) @ R_origin + + # Get rotation matrices for all absolute orientations + R_abs = get_rotation_matrices_from_euler_array(abs_euler_angles) + + # Compute relative rotations: R_rel = R_origin^T @ R_abs + np.transpose(R_origin) @ R_abs + + # Convert back to Euler angles (this may need a custom function) + # For now, using simple subtraction as approximation (this is incorrect for general rotations) + origin_euler = origin_array[StateSE3Index.EULER_ANGLES] + rel_euler_angles = abs_euler_angles - origin_euler + + # Prepare output array + rel_se3_array = se3_array.copy() + rel_se3_array[..., StateSE3Index.XYZ] = rel_positions + rel_se3_array[..., StateSE3Index.EULER_ANGLES] = normalize_angle(rel_euler_angles) + + return rel_se3_array + + +def convert_relative_to_absolute_se3_array( + origin: StateSE3, se3_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + if isinstance(origin, StateSE3): + origin_array = origin.array + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) + origin_array = origin + t_origin = origin_array[StateSE3Index.XYZ] + R_origin = get_rotation_matrix_from_euler_array(origin_array[StateSE3Index.EULER_ANGLES]) + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + assert se3_array.shape[-1] == len(StateSE3Index) + + # Extract relative positions and orientations + rel_positions = se3_array[..., StateSE3Index.XYZ] + rel_euler_angles = se3_array[..., StateSE3Index.EULER_ANGLES] + + # Vectorized absolute position calculation: rotate and translate + abs_positions = (R_origin @ rel_positions.T).T + t_origin + + # Vectorized absolute orientation: add origin's euler angles + origin_euler = np.array([origin.roll, origin.pitch, origin.yaw], dtype=np.float64) + abs_euler_angles = rel_euler_angles + origin_euler + + # Prepare output array + abs_se3_array = se3_array.copy() + abs_se3_array[..., StateSE3Index.XYZ] = abs_positions + abs_se3_array[..., StateSE3Index.EULER_ANGLES] = normalize_angle(abs_euler_angles) + + return abs_se3_array + + +def convert_absolute_to_relative_points_3d_array( + origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + + if isinstance(origin, StateSE3): + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) + t_origin = origin[StateSE3Index.XYZ] + R_origin = get_rotation_matrix_from_euler_array(origin[StateSE3Index.EULER_ANGLES]) + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + + # Translate points to origin frame, then rotate to body frame + relative_points = (points_3d_array - t_origin) @ R_origin + return relative_points diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py index 3af49624..9d2e2140 100644 --- a/d123/simulation/agents/smart_agents.py +++ b/d123/simulation/agents/smart_agents.py @@ -11,7 +11,7 @@ from d123.dataset.scene.abstract_scene import AbstractScene from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.se import StateSE2 -from d123.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array +from d123.geometry.transform.transform_se2 import convert_relative_to_absolute_point_2d_array from d123.geometry.utils.rotation_utils import normalize_angle from d123.simulation.agents.abstract_agents import AbstractAgents from d123.training.feature_builder.smart_feature_builder import SMARTFeatureBuilder diff --git a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py index ae8c1136..52da337c 100644 --- a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py +++ b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py @@ -13,8 +13,8 @@ from d123.common.datatypes.detection.detection import BoxDetectionSE2, TrafficLightStatus from d123.geometry.se import StateSE2 -from d123.geometry.transform.se2_array import convert_absolute_to_relative_point_2d_array from d123.geometry.transform.tranform_2d import translate_along_yaw +from d123.geometry.transform.transform_se2 import convert_absolute_to_relative_point_2d_array from d123.geometry.vector import Vector2D from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea, RectangleEnvironmentArea from d123.simulation.gym.environment.helper.environment_cache import BoxDetectionCache, MapCache diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py index 4286b990..9ea931bf 100644 --- a/d123/training/feature_builder/smart_feature_builder.py +++ b/d123/training/feature_builder/smart_feature_builder.py @@ -18,7 +18,7 @@ from d123.dataset.scene.abstract_scene import AbstractScene from d123.geometry import BoundingBoxSE2, PolylineSE2, StateSE2 from d123.geometry.geometry_index import StateSE2Index -from d123.geometry.transform.se2_array import convert_absolute_to_relative_se2_array +from d123.geometry.transform.transform_se2 import convert_absolute_to_relative_se2_array # TODO: Hind feature builder behind abstraction. From ca194846608358df6de3e09d4bf5110527392c68 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 25 Aug 2025 19:42:00 +0200 Subject: [PATCH 019/145] Fix wrong transformation matrix in SE3. --- d123/common/visualization/viser/utils.py | 20 -------------------- d123/geometry/se.py | 2 +- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index f72fa666..ad49b88a 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -243,26 +243,6 @@ def get_camera_values( return camera_position, camera_rotation, camera -def _get_ego_frame_pose(scene: AbstractScene, iteration: int) -> StateSE3: - - initial_point_3d = scene.get_ego_state_at_iteration(0).center_se3.point_3d - state_se3 = scene.get_ego_state_at_iteration(iteration).center_se3 - - state_se3.x = state_se3.x - initial_point_3d.x - state_se3.y = state_se3.y - initial_point_3d.y - state_se3.z = state_se3.z - initial_point_3d.z - - return state_se3 - - -def euler_to_quaternion_scipy(roll: float, pitch: float, yaw: float) -> npt.NDArray[np.float64]: - from scipy.spatial.transform import Rotation - - r = Rotation.from_euler("xyz", [roll, pitch, yaw], degrees=False) - quat = r.as_quat(scalar_first=True) - return quat - - def get_lidar_points( scene: AbstractScene, iteration: int, lidar_types: List[LiDARType] ) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]: diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 5ab13283..378128ca 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -267,7 +267,7 @@ def transformation_matrix(self) -> npt.NDArray[np.float64]: rotation_matrix = self.rotation_matrix transformation_matrix = np.eye(4, dtype=np.float64) transformation_matrix[:3, :3] = rotation_matrix - transformation_matrix[3, :3] = self.array[StateSE3Index.XYZ] + transformation_matrix[:3, 3] = self.array[StateSE3Index.XYZ] return transformation_matrix @property From 89565afcdb8f690d5790945fc1de6438d229908e Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 25 Aug 2025 20:48:37 +0200 Subject: [PATCH 020/145] Fix some stuff in the WOPD data converter. NOTE: It's significantly faster to flatten lists in arrow. --- d123/common/visualization/viser/server.py | 2 +- d123/dataset/arrow/conversion.py | 6 ++- .../waymo_map_utils/womp_boundary_utils.py | 8 ++-- .../wopd/wopd_data_converter.py | 37 ++++++++----------- .../default_dataset_conversion.yaml | 4 +- 5 files changed, 27 insertions(+), 30 deletions(-) diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py index 087a7e8b..686d3746 100644 --- a/d123/common/visualization/viser/server.py +++ b/d123/common/visualization/viser/server.py @@ -49,7 +49,7 @@ CAMERA_SCALE: float = 1.0 # Lidar config: -LIDAR_AVAILABLE: bool = False +LIDAR_AVAILABLE: bool = True LIDAR_TYPES: List[LiDARType] = [ LiDARType.LIDAR_MERGED, diff --git a/d123/dataset/arrow/conversion.py b/d123/dataset/arrow/conversion.py index ab078545..c961ab7d 100644 --- a/d123/dataset/arrow/conversion.py +++ b/d123/dataset/arrow/conversion.py @@ -157,10 +157,12 @@ def get_lidar_from_arrow_table( raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") else: + # FIXME: This is a temporary fix for WOPD dataset. The lidar data is stored as a flattened array of float32. + # Ideally the lidar index should handle the dimension. But for now we hardcode it here. + lidar_data = np.array(lidar_data, dtype=np.float32).reshape(-1, 3) + lidar_data = np.concatenate([np.zeros_like(lidar_data), lidar_data], axis=-1) if log_metadata.dataset == "wopd": - lidar_data = np.array(lidar_data, dtype=np.float64) lidar = LiDAR(metadata=lidar_metadata, point_cloud=lidar_data.T) else: raise NotImplementedError("Only string file paths for lidar data are supported.") - return lidar diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py index 269b09b6..99e63192 100644 --- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py +++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py @@ -6,7 +6,7 @@ import shapely.geometry as geom from d123.geometry import OccupancyMap2D, Point3D, Polyline3D, PolylineSE2, StateSE2, Vector2D -from d123.geometry.transform.tranform_2d import translate_along_yaw +from d123.geometry.transform.transform_se2 import translate_se2_along_yaw from d123.geometry.utils.rotation_utils import normalize_angle MAX_LANE_WIDTH = 25.0 # meters @@ -64,7 +64,7 @@ def _collect_perpendicular_hits( assert sign in [1.0, -1.0], "Sign must be either 1.0 (left) or -1.0 (right)" # perp_start_point = translate_along_yaw(lane_query_se2, Vector2D(0.0, sign * PERP_START_OFFSET)) perp_start_point = lane_query_se2 - perp_end_point = translate_along_yaw(lane_query_se2, Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0)) + perp_end_point = translate_se2_along_yaw(lane_query_se2, Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0)) perp_linestring = geom.LineString([[perp_start_point.x, perp_start_point.y], [perp_end_point.x, perp_end_point.y]]) lane_linestring = occupancy_2d.geometries[occupancy_2d.id_to_idx[lane_token]] @@ -261,7 +261,9 @@ def _get_default_boundary_point_3d( lane_query_se2: StateSE2, lane_query_3d: Point3D, sign: float ) -> Point3D: perp_boundary_distance = DEFAULT_LANE_WIDTH / 2.0 - boundary_point_se2 = translate_along_yaw(lane_query_se2, Vector2D(0.0, sign * perp_boundary_distance)) + boundary_point_se2 = translate_se2_along_yaw( + lane_query_se2, Vector2D(0.0, sign * perp_boundary_distance) + ) return Point3D(boundary_point_se2.x, boundary_point_se2.y, lane_query_3d.z) if no_boundary_ratio > 0.8: diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py index c1d1698f..812d40c6 100644 --- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py +++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py @@ -14,7 +14,6 @@ from pyquaternion import Quaternion from waymo_open_dataset import dataset_pb2 -from d123.common.datatypes.detection.detection import TrafficLightStatus from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json @@ -28,20 +27,18 @@ from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection from d123.dataset.logs.log_metadata import LogMetadata from d123.geometry import BoundingBoxSE3Index, Point3D, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix +from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +# TODO: Make keep_polar_features an optional argument. +# With polar features, the lidar loading time is SIGNIFICANTLY higher. + os.environ["CUDA_VISIBLE_DEVICES"] = "-1" D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) TARGET_DT: Final[float] = 0.1 SORT_BY_TIMESTAMP: Final[bool] = False -NUPLAN_TRAFFIC_STATUS_DICT: Final[Dict[str, TrafficLightStatus]] = { - "green": TrafficLightStatus.GREEN, - "red": TrafficLightStatus.RED, - "unknown": TrafficLightStatus.UNKNOWN, -} # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63 WOPD_DETECTION_NAME_DICT: Dict[int, DetectionType] = { @@ -248,9 +245,7 @@ def convert_wopd_tfrecord_log_to_arrow( if data_converter_config.lidar_store_option == "path": raise NotImplementedError("Filepath lidar storage is not implemented.") elif data_converter_config.lidar_store_option == "binary": - schema_column_list.append( - (lidar_type.serialize(), pa.list_(pa.list_(pa.float32(), len(WopdLidarIndex)))) - ) + schema_column_list.append((lidar_type.serialize(), (pa.list_(pa.float32())))) recording_schema = pa.schema(schema_column_list) recording_schema = recording_schema.with_metadata( @@ -491,16 +486,14 @@ def _extract_camera( transform = np.array(calibration.extrinsic.transform).reshape(4, 4) # FIXME: This is an ugly hack to convert to uniform camera convention. - flip_camera = get_rotation_matrix( - StateSE3( - x=0.0, - y=0.0, - z=0.0, - roll=np.deg2rad(0.0), - pitch=np.deg2rad(90.0), - yaw=np.deg2rad(-90.0), - ) - ) + flip_camera = StateSE3( + x=0.0, + y=0.0, + z=0.0, + roll=np.deg2rad(0.0), + pitch=np.deg2rad(90.0), + yaw=np.deg2rad(-90.0), + ).rotation_matrix transform[:3, :3] = transform[:3, :3] @ flip_camera context_extrinsic[camera_type] = transform @@ -533,12 +526,12 @@ def _extract_lidar( range_images=range_images, camera_projections=camera_projections, range_image_top_pose=range_image_top_pose, - keep_polar_features=True, + keep_polar_features=False, ) lidar_data: Dict[LiDARType, npt.NDArray[np.float32]] = {} for lidar_idx, frame_lidar in enumerate(frame.lasers): lidar_type = WOPD_LIDAR_TYPES[frame_lidar.name] - lidar_data[lidar_type] = np.array(points[lidar_idx], dtype=np.float32) + lidar_data[lidar_type] = np.array(points[lidar_idx], dtype=np.float32).flatten() return lidar_data diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index 0a4544da..b46e7c9d 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -17,8 +17,8 @@ defaults: - datasets: # - nuplan_private_dataset # - carla_dataset - # - wopd_dataset - - av2_sensor_dataset + - wopd_dataset + # - av2_sensor_dataset force_log_conversion: True force_map_conversion: True From 9b2899578ac3f49753b413fbb934aed9d922a438 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 25 Aug 2025 20:59:20 +0200 Subject: [PATCH 021/145] Update `contains_vectorized` from OccupancyMap2D to avoid deprecated function. --- d123/geometry/occupancy_map.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/d123/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py index c9886a14..b28348e7 100644 --- a/d123/geometry/occupancy_map.py +++ b/d123/geometry/occupancy_map.py @@ -152,6 +152,6 @@ def contains_vectorized(self, points: npt.NDArray[np.float64]) -> npt.NDArray[np """ output = np.zeros((len(self._geometries), len(points)), dtype=bool) for i, geometry in enumerate(self._geometries): - output[i] = shapely.vectorized.contains(geometry, points[..., Point2DIndex.X], points[..., Point2DIndex.Y]) + output[i] = shapely.contains_xy(geometry, points[..., Point2DIndex.X], points[..., Point2DIndex.Y]) return output From 5c1365c1bdbcb2a49ae603e259155873b532ecf8 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 25 Aug 2025 21:23:21 +0200 Subject: [PATCH 022/145] Add doctrings and (faulty) tests for transform functions (#44) --- d123/geometry/test/test_transform.py | 239 +++++++++++++++++++++++ d123/geometry/transform/transform_se3.py | 98 ++++++++-- 2 files changed, 318 insertions(+), 19 deletions(-) create mode 100644 d123/geometry/test/test_transform.py diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py new file mode 100644 index 00000000..7377bc41 --- /dev/null +++ b/d123/geometry/test/test_transform.py @@ -0,0 +1,239 @@ +# import unittest +# import numpy as np +# import numpy.typing as npt + +# from d123.geometry.se import StateSE2, StateSE3 +# from d123.geometry.transform.transform_se2 import ( +# convert_absolute_to_relative_se2_array, +# convert_absolute_to_relative_point_2d_array, +# convert_relative_to_absolute_se2_array, +# convert_relative_to_absolute_point_2d_array, +# translate_se2, +# translate_se2_array, +# translate_se2_along_yaw, +# translate_se2_array_along_yaw, +# ) +# from d123.geometry.transform.transform_se3 import ( +# translate_se3_along_z, +# translate_se3_along_y, +# translate_se3_along_x, +# translate_body_frame, +# convert_absolute_to_relative_se3_array, +# convert_relative_to_absolute_se3_array, +# convert_absolute_to_relative_points_3d_array, +# convert_relative_to_absolute_points_3d_array, +# ) +# from d123.geometry.vector import Vector2D + + +# class TestTransformSE2(unittest.TestCase): +# def test_translate_se2(self) -> None: +# pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=float)) +# translation: Vector2D = Vector2D(1.0, 1.0) +# result: StateSE2 = translate_se2(pose, translation) +# expected: StateSE2 = StateSE2.from_array(np.array([2.0, 3.0, 0.0], dtype=float)) +# np.testing.assert_array_almost_equal(result.array, expected.array) + +# def test_translate_se2_array(self) -> None: +# poses: npt.NDArray[np.float64] = np.array( +# [[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=float +# ) +# translation: Vector2D = Vector2D(1.0, 1.0) +# result: npt.NDArray[np.float64] = translate_se2_array(poses, translation) +# expected: npt.NDArray[np.float64] = np.array( +# [[2.0, 3.0, 0.0], [1.0, 1.0, np.pi / 2]], dtype=float +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_translate_se2_along_yaw(self) -> None: +# # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction) +# pose: npt.NDArray[np.float64] = np.array([0.0, 0.0, np.pi / 2], dtype=float) +# distance: float = 1.0 +# result: npt.NDArray[np.float64] = translate_se2_along_yaw(pose, distance) +# expected: npt.NDArray[np.float64] = np.array([0.0, 1.0, np.pi / 2], dtype=float) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_translate_se2_array_along_yaw(self) -> None: +# poses: npt.NDArray[np.float64] = np.array( +# [[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=float +# ) +# distance: float = 1.0 +# result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance) +# expected: npt.NDArray[np.float64] = np.array( +# [[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_convert_absolute_to_relative_se2_array(self) -> None: +# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float) +# absolute_poses: npt.NDArray[np.float64] = np.array( +# [[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float +# ) +# result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array( +# reference, absolute_poses +# ) +# expected: npt.NDArray[np.float64] = np.array( +# [[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=float +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_convert_relative_to_absolute_se2_array(self) -> None: +# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float) +# relative_poses: npt.NDArray[np.float64] = np.array( +# [[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=float +# ) +# result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array( +# reference, relative_poses +# ) +# expected: npt.NDArray[np.float64] = np.array( +# [[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_convert_absolute_to_relative_point_2d_array(self) -> None: +# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float) +# absolute_points: npt.NDArray[np.float64] = np.array( +# [[2.0, 2.0], [0.0, 1.0]], dtype=float +# ) +# result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array( +# reference, absolute_points +# ) +# expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=float) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_convert_relative_to_absolute_point_2d_array(self) -> None: +# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float) +# relative_points: npt.NDArray[np.float64] = np.array( +# [[1.0, 1.0], [-1.0, 0.0]], dtype=float +# ) +# result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array( +# reference, relative_points +# ) +# expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=float) +# np.testing.assert_array_almost_equal(result, expected) + + +# class TestTransformSE3(unittest.TestCase): +# def test_translate_se3_along_x(self) -> None: +# pose: npt.NDArray[np.float64] = np.array( +# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# distance: float = 1.0 +# result: npt.NDArray[np.float64] = translate_se3_along_x(pose, distance) +# expected: npt.NDArray[np.float64] = np.array( +# [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_translate_se3_along_y(self) -> None: +# pose: npt.NDArray[np.float64] = np.array( +# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# distance: float = 1.0 +# result: npt.NDArray[np.float64] = translate_se3_along_y(pose, distance) +# expected: npt.NDArray[np.float64] = np.array( +# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_translate_se3_along_z(self) -> None: +# pose: npt.NDArray[np.float64] = np.array( +# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# distance: float = 1.0 +# result: npt.NDArray[np.float64] = translate_se3_along_z(pose, distance) +# expected: npt.NDArray[np.float64] = np.array( +# [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_translate_body_frame(self) -> None: +# pose: npt.NDArray[np.float64] = np.array( +# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# translation: npt.NDArray[np.float64] = np.array([1.0, 0.0, 0.0], dtype=float) +# result: npt.NDArray[np.float64] = translate_body_frame(pose, translation) +# expected: npt.NDArray[np.float64] = np.array( +# [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_convert_absolute_to_relative_se3_array(self) -> None: +# reference: npt.NDArray[np.float64] = np.array( +# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# absolute_poses: npt.NDArray[np.float64] = np.array( +# [ +# [2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 1.0], +# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0], +# ], +# dtype=float, +# ) +# result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array( +# reference, absolute_poses +# ) +# expected: npt.NDArray[np.float64] = np.array( +# [ +# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], +# [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0], +# ], +# dtype=float, +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_convert_relative_to_absolute_se3_array(self) -> None: +# reference: npt.NDArray[np.float64] = np.array( +# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# relative_poses: npt.NDArray[np.float64] = np.array( +# [ +# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], +# [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0], +# ], +# dtype=float, +# ) +# result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array( +# reference, relative_poses +# ) +# expected: npt.NDArray[np.float64] = np.array( +# [ +# [2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 1.0], +# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0], +# ], +# dtype=float, +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_convert_absolute_to_relative_points_3d_array(self) -> None: +# reference: npt.NDArray[np.float64] = np.array( +# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# absolute_points: npt.NDArray[np.float64] = np.array( +# [[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=float +# ) +# result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array( +# reference, absolute_points +# ) +# expected: npt.NDArray[np.float64] = np.array( +# [[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=float +# ) +# np.testing.assert_array_almost_equal(result, expected) + +# def test_convert_relative_to_absolute_points_3d_array(self) -> None: +# reference: npt.NDArray[np.float64] = np.array( +# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float +# ) +# relative_points: npt.NDArray[np.float64] = np.array( +# [[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=float +# ) +# result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array( +# reference, relative_points +# ) +# expected: npt.NDArray[np.float64] = np.array( +# [[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=float +# ) +# np.testing.assert_array_almost_equal(result, expected) + + +# if __name__ == "__main__": +# unittest.main() diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py index 67edd050..3a04b5b2 100644 --- a/d123/geometry/transform/transform_se3.py +++ b/d123/geometry/transform/transform_se3.py @@ -4,7 +4,7 @@ import numpy.typing as npt from d123.geometry import StateSE3, StateSE3Index, Vector3D -from d123.geometry.geometry_index import Vector3DIndex +from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex from d123.geometry.rotation import EulerAngles from d123.geometry.utils.rotation_utils import ( get_rotation_matrices_from_euler_array, @@ -14,6 +14,12 @@ def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3: + """Translates a SE3 state along the Z-axis. + + :param state_se3: The SE3 state to translate. + :param distance: The distance to translate along the Z-axis. + :return: The translated SE3 state. + """ R = state_se3.rotation_matrix z_axis = R[:, 2] @@ -26,6 +32,12 @@ def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3: def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3: + """Translates a SE3 state along the Y-axis. + + :param state_se3: The SE3 state to translate. + :param distance: The distance to translate along the Y-axis. + :return: The translated SE3 state. + """ R = state_se3.rotation_matrix y_axis = R[:, 1] @@ -38,6 +50,12 @@ def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3: def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3: + """Translates a SE3 state along the X-axis. + + :param state_se3: The SE3 state to translate. + :param distance: The distance to translate along the X-axis. + :return: The translated SE3 state. + """ R = state_se3.rotation_matrix x_axis = R[:, 0] @@ -51,6 +69,13 @@ def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3: def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3: + """Translates a SE3 state along a vector in the body frame. + + :param state_se3: The SE3 state to translate. + :param vector_3d: The vector to translate along in the body frame. + :return: The translated SE3 state. + """ + R = state_se3.rotation_matrix # Transform to world frame @@ -62,27 +87,16 @@ def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3: return StateSE3.from_array(state_se3_array) -def convert_relative_to_absolute_points_3d_array( - origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] -) -> npt.NDArray[np.float64]: - - # TODO: implement function for origin as np.ndarray - if isinstance(origin, StateSE3): - origin_array = origin.array - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) - origin_array = origin - else: - raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") - - R = EulerAngles.from_array(origin_array[StateSE3Index.EULER_ANGLES]).rotation_matrix - absolute_points = points_3d_array @ R.T + origin.point_3d.array - return absolute_points - - def convert_absolute_to_relative_se3_array( origin: Union[StateSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: + """Converts an SE3 array from the absolute frame to the relative frame. + + :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param se3_array: The SE3 array in the absolute frame. + :raises TypeError: If the origin is not a StateSE3 or np.ndarray. + :return: The SE3 array in the relative frame, indexed by :class:`~d123.geometry.StateSE3Index`. + """ if isinstance(origin, StateSE3): origin_array = origin.array t_origin = origin.point_3d.array @@ -95,6 +109,7 @@ def convert_absolute_to_relative_se3_array( else: raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + assert se3_array.ndim >= 1 assert se3_array.shape[-1] == len(StateSE3Index) # Extract positions and orientations from se3_array @@ -126,6 +141,14 @@ def convert_absolute_to_relative_se3_array( def convert_relative_to_absolute_se3_array( origin: StateSE3, se3_array: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: + """Converts an SE3 array from the relative frame to the absolute frame. + + :param origin: The origin state in the relative frame, as a StateSE3 or np.ndarray. + :param se3_array: The SE3 array in the relative frame. + :raises TypeError: If the origin is not a StateSE3 or np.ndarray. + :return: The SE3 array in the absolute frame, indexed by :class:`~d123.geometry.StateSE3Index`. + """ + if isinstance(origin, StateSE3): origin_array = origin.array t_origin = origin.point_3d.array @@ -137,6 +160,8 @@ def convert_relative_to_absolute_se3_array( R_origin = get_rotation_matrix_from_euler_array(origin_array[StateSE3Index.EULER_ANGLES]) else: raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + + assert se3_array.ndim >= 1 assert se3_array.shape[-1] == len(StateSE3Index) # Extract relative positions and orientations @@ -161,6 +186,13 @@ def convert_relative_to_absolute_se3_array( def convert_absolute_to_relative_points_3d_array( origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: + """Converts 3D points from the absolute frame to the relative frame. + + :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param points_3d_array: The 3D points in the absolute frame. + :raises TypeError: If the origin is not a StateSE3 or np.ndarray. + :return: The 3D points in the relative frame , indexed by :class:`~d123.geometry.Point3DIndex`. + """ if isinstance(origin, StateSE3): t_origin = origin.point_3d.array @@ -172,6 +204,34 @@ def convert_absolute_to_relative_points_3d_array( else: raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + assert points_3d_array.ndim >= 1 + assert points_3d_array.shape[-1] == len(Point3DIndex) + # Translate points to origin frame, then rotate to body frame relative_points = (points_3d_array - t_origin) @ R_origin return relative_points + + +def convert_relative_to_absolute_points_3d_array( + origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + """Converts 3D points from the relative frame to the absolute frame. + + :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`. + :raises TypeError: If the origin is not a StateSE3 or np.ndarray. + :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`. + """ + if isinstance(origin, StateSE3): + origin_array = origin.array + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) + origin_array = origin + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + + assert points_3d_array.shape[-1] == len(Point3DIndex) + + R = EulerAngles.from_array(origin_array[StateSE3Index.EULER_ANGLES]).rotation_matrix + absolute_points = points_3d_array @ R.T + origin.point_3d.array + return absolute_points From be6629dcf48fee58f7de2d357a836bad7a947da0 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 25 Aug 2025 21:25:07 +0200 Subject: [PATCH 023/145] Fix wrong import of shapely. --- d123/geometry/occupancy_map.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/d123/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py index b28348e7..097709b9 100644 --- a/d123/geometry/occupancy_map.py +++ b/d123/geometry/occupancy_map.py @@ -4,7 +4,7 @@ import numpy as np import numpy.typing as npt -import shapely.vectorized +import shapely from shapely.geometry.base import BaseGeometry from shapely.strtree import STRtree From 91fd900bc9bc3551c5f97b3b2a75018664045f35 Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Tue, 26 Aug 2025 14:32:04 +0200 Subject: [PATCH 024/145] Add some tests for transform functions (#44) --- .../av2/av2_data_converter.py | 2 +- d123/geometry/test/test_point.py | 2 - d123/geometry/test/test_transform.py | 530 ++++++++++-------- 3 files changed, 292 insertions(+), 242 deletions(-) diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py index d57b4e52..f5e5e44a 100644 --- a/d123/dataset/dataset_specific/av2/av2_data_converter.py +++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py @@ -35,7 +35,7 @@ from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix +from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL diff --git a/d123/geometry/test/test_point.py b/d123/geometry/test/test_point.py index d3141bbb..94162e93 100644 --- a/d123/geometry/test/test_point.py +++ b/d123/geometry/test/test_point.py @@ -7,8 +7,6 @@ from d123.geometry.geometry_index import Point3DIndex from d123.geometry.point import Point3D -# Point3D, Point3DIndex - class TestPoint2D(unittest.TestCase): """Unit tests for Point2D class.""" diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py index 7377bc41..2151317d 100644 --- a/d123/geometry/test/test_transform.py +++ b/d123/geometry/test/test_transform.py @@ -1,239 +1,291 @@ -# import unittest -# import numpy as np -# import numpy.typing as npt - -# from d123.geometry.se import StateSE2, StateSE3 -# from d123.geometry.transform.transform_se2 import ( -# convert_absolute_to_relative_se2_array, -# convert_absolute_to_relative_point_2d_array, -# convert_relative_to_absolute_se2_array, -# convert_relative_to_absolute_point_2d_array, -# translate_se2, -# translate_se2_array, -# translate_se2_along_yaw, -# translate_se2_array_along_yaw, -# ) -# from d123.geometry.transform.transform_se3 import ( -# translate_se3_along_z, -# translate_se3_along_y, -# translate_se3_along_x, -# translate_body_frame, -# convert_absolute_to_relative_se3_array, -# convert_relative_to_absolute_se3_array, -# convert_absolute_to_relative_points_3d_array, -# convert_relative_to_absolute_points_3d_array, -# ) -# from d123.geometry.vector import Vector2D - - -# class TestTransformSE2(unittest.TestCase): -# def test_translate_se2(self) -> None: -# pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=float)) -# translation: Vector2D = Vector2D(1.0, 1.0) -# result: StateSE2 = translate_se2(pose, translation) -# expected: StateSE2 = StateSE2.from_array(np.array([2.0, 3.0, 0.0], dtype=float)) -# np.testing.assert_array_almost_equal(result.array, expected.array) - -# def test_translate_se2_array(self) -> None: -# poses: npt.NDArray[np.float64] = np.array( -# [[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=float -# ) -# translation: Vector2D = Vector2D(1.0, 1.0) -# result: npt.NDArray[np.float64] = translate_se2_array(poses, translation) -# expected: npt.NDArray[np.float64] = np.array( -# [[2.0, 3.0, 0.0], [1.0, 1.0, np.pi / 2]], dtype=float -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_translate_se2_along_yaw(self) -> None: -# # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction) -# pose: npt.NDArray[np.float64] = np.array([0.0, 0.0, np.pi / 2], dtype=float) -# distance: float = 1.0 -# result: npt.NDArray[np.float64] = translate_se2_along_yaw(pose, distance) -# expected: npt.NDArray[np.float64] = np.array([0.0, 1.0, np.pi / 2], dtype=float) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_translate_se2_array_along_yaw(self) -> None: -# poses: npt.NDArray[np.float64] = np.array( -# [[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=float -# ) -# distance: float = 1.0 -# result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance) -# expected: npt.NDArray[np.float64] = np.array( -# [[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_convert_absolute_to_relative_se2_array(self) -> None: -# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float) -# absolute_poses: npt.NDArray[np.float64] = np.array( -# [[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float -# ) -# result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array( -# reference, absolute_poses -# ) -# expected: npt.NDArray[np.float64] = np.array( -# [[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=float -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_convert_relative_to_absolute_se2_array(self) -> None: -# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float) -# relative_poses: npt.NDArray[np.float64] = np.array( -# [[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=float -# ) -# result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array( -# reference, relative_poses -# ) -# expected: npt.NDArray[np.float64] = np.array( -# [[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_convert_absolute_to_relative_point_2d_array(self) -> None: -# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float) -# absolute_points: npt.NDArray[np.float64] = np.array( -# [[2.0, 2.0], [0.0, 1.0]], dtype=float -# ) -# result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array( -# reference, absolute_points -# ) -# expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=float) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_convert_relative_to_absolute_point_2d_array(self) -> None: -# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float) -# relative_points: npt.NDArray[np.float64] = np.array( -# [[1.0, 1.0], [-1.0, 0.0]], dtype=float -# ) -# result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array( -# reference, relative_points -# ) -# expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=float) -# np.testing.assert_array_almost_equal(result, expected) - - -# class TestTransformSE3(unittest.TestCase): -# def test_translate_se3_along_x(self) -> None: -# pose: npt.NDArray[np.float64] = np.array( -# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# distance: float = 1.0 -# result: npt.NDArray[np.float64] = translate_se3_along_x(pose, distance) -# expected: npt.NDArray[np.float64] = np.array( -# [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_translate_se3_along_y(self) -> None: -# pose: npt.NDArray[np.float64] = np.array( -# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# distance: float = 1.0 -# result: npt.NDArray[np.float64] = translate_se3_along_y(pose, distance) -# expected: npt.NDArray[np.float64] = np.array( -# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_translate_se3_along_z(self) -> None: -# pose: npt.NDArray[np.float64] = np.array( -# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# distance: float = 1.0 -# result: npt.NDArray[np.float64] = translate_se3_along_z(pose, distance) -# expected: npt.NDArray[np.float64] = np.array( -# [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_translate_body_frame(self) -> None: -# pose: npt.NDArray[np.float64] = np.array( -# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# translation: npt.NDArray[np.float64] = np.array([1.0, 0.0, 0.0], dtype=float) -# result: npt.NDArray[np.float64] = translate_body_frame(pose, translation) -# expected: npt.NDArray[np.float64] = np.array( -# [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_convert_absolute_to_relative_se3_array(self) -> None: -# reference: npt.NDArray[np.float64] = np.array( -# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# absolute_poses: npt.NDArray[np.float64] = np.array( -# [ -# [2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 1.0], -# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0], -# ], -# dtype=float, -# ) -# result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array( -# reference, absolute_poses -# ) -# expected: npt.NDArray[np.float64] = np.array( -# [ -# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], -# [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0], -# ], -# dtype=float, -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_convert_relative_to_absolute_se3_array(self) -> None: -# reference: npt.NDArray[np.float64] = np.array( -# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# relative_poses: npt.NDArray[np.float64] = np.array( -# [ -# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], -# [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0], -# ], -# dtype=float, -# ) -# result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array( -# reference, relative_poses -# ) -# expected: npt.NDArray[np.float64] = np.array( -# [ -# [2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 1.0], -# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0], -# ], -# dtype=float, -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_convert_absolute_to_relative_points_3d_array(self) -> None: -# reference: npt.NDArray[np.float64] = np.array( -# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# absolute_points: npt.NDArray[np.float64] = np.array( -# [[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=float -# ) -# result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array( -# reference, absolute_points -# ) -# expected: npt.NDArray[np.float64] = np.array( -# [[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=float -# ) -# np.testing.assert_array_almost_equal(result, expected) - -# def test_convert_relative_to_absolute_points_3d_array(self) -> None: -# reference: npt.NDArray[np.float64] = np.array( -# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float -# ) -# relative_points: npt.NDArray[np.float64] = np.array( -# [[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=float -# ) -# result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array( -# reference, relative_points -# ) -# expected: npt.NDArray[np.float64] = np.array( -# [[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=float -# ) -# np.testing.assert_array_almost_equal(result, expected) - - -# if __name__ == "__main__": -# unittest.main() +import unittest + +import numpy as np +import numpy.typing as npt + +from d123.geometry.se import StateSE2, StateSE3 +from d123.geometry.transform.transform_se2 import ( + convert_absolute_to_relative_point_2d_array, + convert_absolute_to_relative_se2_array, + convert_relative_to_absolute_point_2d_array, + convert_relative_to_absolute_se2_array, + translate_se2, + translate_se2_along_yaw, + translate_se2_array, + translate_se2_array_along_yaw, +) +from d123.geometry.transform.transform_se3 import ( + convert_absolute_to_relative_points_3d_array, + convert_absolute_to_relative_se3_array, + convert_relative_to_absolute_points_3d_array, + convert_relative_to_absolute_se3_array, + translate_body_frame, + translate_se3_along_x, + translate_se3_along_y, + translate_se3_along_z, +) +from d123.geometry.vector import Vector2D, Vector3D + + +class TestTransformSE2(unittest.TestCase): + + def setUp(self): + self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal + + def test_translate_se2(self) -> None: + pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64)) + translation: Vector2D = Vector2D(1.0, 1.0) + + result: StateSE2 = translate_se2(pose, translation) + expected: StateSE2 = StateSE2.from_array(np.array([2.0, 3.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_negative_translation(self) -> None: + pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64)) + translation: Vector2D = Vector2D(-0.5, -1.5) + result: StateSE2 = translate_se2(pose, translation) + expected: StateSE2 = StateSE2.from_array(np.array([0.5, 0.5, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_with_rotation(self) -> None: + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 4], dtype=np.float64)) + translation: Vector2D = Vector2D(1.0, 0.0) + result: StateSE2 = translate_se2(pose, translation) + expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 4], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_array(self) -> None: + poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) + translation: Vector2D = Vector2D(1.0, 1.0) + result: npt.NDArray[np.float64] = translate_se2_array(poses, translation) + expected: npt.NDArray[np.float64] = np.array([[2.0, 3.0, 0.0], [1.0, 1.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_translate_se2_array_zero_translation(self) -> None: + poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) + translation: Vector2D = Vector2D(0.0, 0.0) + result: npt.NDArray[np.float64] = translate_se2_array(poses, translation) + expected: npt.NDArray[np.float64] = poses.copy() + np.testing.assert_array_almost_equal(result, expected) + + def test_translate_se2_along_yaw(self) -> None: + # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction) + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.deg2rad(90)], dtype=np.float64)) + vector: Vector2D = Vector2D(1.0, 0.0) + result: StateSE2 = translate_se2_along_yaw(pose, vector) + expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.deg2rad(90)], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_yaw_backward(self) -> None: + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + vector: Vector2D = Vector2D(-1.0, 0.0) + result: StateSE2 = translate_se2_along_yaw(pose, vector) + expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_yaw_diagonal(self) -> None: + pose: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.deg2rad(45)], dtype=np.float64)) + vector: Vector2D = Vector2D(1.0, 0.0) + result: StateSE2 = translate_se2_along_yaw(pose, vector) + expected: StateSE2 = StateSE2.from_array( + np.array([1.0 + np.sqrt(2.0) / 2, 0.0 + np.sqrt(2.0) / 2, np.deg2rad(45)], dtype=np.float64) + ) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_array_along_yaw(self) -> None: + poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) + distance: float = Vector2D(1.0, 0.0) + result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance) + expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_translate_se2_array_along_yaw_multiple_distances(self) -> None: + poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi]], dtype=np.float64) + distance: float = Vector2D(2.0, 0.0) + result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance) + expected: npt.NDArray[np.float64] = np.array([[2.0, 0.0, 0.0], [-2.0, 0.0, np.pi]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_se2_array(self) -> None: + origin: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(origin, absolute_poses) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_se2_array_with_rotation(self) -> None: + reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, np.pi / 2]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses) + expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_relative_to_absolute_se2_array(self) -> None: + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) + relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses) + expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_point_2d_array(self) -> None: + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_point_2d_array_with_rotation(self) -> None: + reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([[0.0, 1.0], [1.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, -1.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_relative_to_absolute_point_2d_array(self) -> None: + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) + relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points) + expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + +class TestTransformSE3(unittest.TestCase): + def test_translate_se3_along_x(self) -> None: + pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 1.0 + result: StateSE3 = translate_se3_along_x(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_x_negative(self) -> None: + pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = -0.5 + result: StateSE3 = translate_se3_along_x(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([0.5, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_y(self) -> None: + pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 1.0 + result: StateSE3 = translate_se3_along_y(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_y_with_existing_position(self) -> None: + pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 2.5 + result: StateSE3 = translate_se3_along_y(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([1.0, 4.5, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_z(self) -> None: + pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 1.0 + result: StateSE3 = translate_se3_along_z(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_z_large_distance(self) -> None: + pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 10.0 + result: StateSE3 = translate_se3_along_z(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 15.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_body_frame(self) -> None: + pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) + result: StateSE3 = translate_body_frame(pose, translation) + expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_body_frame_multiple_axes(self) -> None: + pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + translation: Vector3D = Vector3D.from_array(np.array([0.5, -1.0, 2.0], dtype=np.float64)) + result: StateSE3 = translate_body_frame(pose, translation) + expected: StateSE3 = StateSE3.from_array(np.array([1.5, 1.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_body_frame_zero_translation(self) -> None: + pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + translation: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + result: StateSE3 = translate_body_frame(pose, translation) + expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_convert_absolute_to_relative_se3_array(self) -> None: + reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array( + [ + [2.0, 2.0, 2.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float64, + ) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses) + expected: npt.NDArray[np.float64] = np.array( + [ + [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], + [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0], + ], + dtype=np.float64, + ) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_se3_array_single_pose(self) -> None: + reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses) + expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_relative_to_absolute_se3_array(self) -> None: + reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + relative_poses: npt.NDArray[np.float64] = np.array( + [ + [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], + [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0], + ], + dtype=np.float64, + ) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array(reference, relative_poses) + expected: npt.NDArray[np.float64] = np.array( + [ + [2.0, 2.0, 2.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float64, + ) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_points_3d_array(self) -> None: + reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_points_3d_array_origin_reference(self) -> None: + reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_relative_to_absolute_points_3d_array(self) -> None: + reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) + expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_relative_to_absolute_points_3d_array_empty(self) -> None: + reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + relative_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) + expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3) + np.testing.assert_array_almost_equal(result, expected) + + +if __name__ == "__main__": + unittest.main() From 87f48bda37fbd97225f8b27649463ac7c88e5743 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 26 Aug 2025 16:01:35 +0200 Subject: [PATCH 025/145] Refactor transform se2 and se3. Rename important functions for a consistent 2D/3D scheme. (#44) --- .../vehicle_state/vehicle_parameters.py | 30 +- .../visualization/matplotlib/observation.py | 8 +- d123/common/visualization/viser/utils_v2.py | 4 +- .../waymo_map_utils/womp_boundary_utils.py | 6 +- d123/geometry/bounding_box.py | 8 + d123/geometry/occupancy_map.py | 11 +- d123/geometry/rotation.py | 12 +- d123/geometry/se.py | 6 +- d123/geometry/test/test_transform.py | 328 +++++++++++++++++- d123/geometry/transform/transform_se2.py | 96 ++--- d123/geometry/transform/transform_se3.py | 26 +- notebooks/viz/bev_matplotlib.ipynb | 2 +- notebooks/viz/video_example.ipynb | 319 +++++++++++++++++ 13 files changed, 747 insertions(+), 109 deletions(-) create mode 100644 notebooks/viz/video_example.ipynb diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py index 19ff334b..152b9382 100644 --- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py @@ -1,8 +1,8 @@ from dataclasses import dataclass -from d123.geometry import StateSE2, StateSE3, Vector2D -from d123.geometry.transform.transform_se2 import translate_se2_along_yaw -from d123.geometry.transform.transform_se3 import translate_se3_along_x, translate_se3_along_z +from d123.geometry import StateSE2, StateSE3, Vector2D, Vector3D +from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame +from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame # TODO: Add more vehicle parameters, potentially extend the parameters. @@ -83,12 +83,13 @@ def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: Vehicl :param vehicle_parameters: The vehicle parameters. :return: The rear axle state. """ - return translate_se3_along_z( - translate_se3_along_x( - center_se3, + return translate_se3_along_body_frame( + center_se3, + Vector3D( -vehicle_parameters.rear_axle_to_center_longitudinal, + 0, + -vehicle_parameters.rear_axle_to_center_vertical, ), - -vehicle_parameters.rear_axle_to_center_vertical, ) @@ -99,12 +100,13 @@ def rear_axle_se3_to_center_se3(rear_axle_se3: StateSE3, vehicle_parameters: Veh :param vehicle_parameters: The vehicle parameters. :return: The center state. """ - return translate_se3_along_x( - translate_se3_along_z( - rear_axle_se3, + return translate_se3_along_body_frame( + rear_axle_se3, + Vector3D( + vehicle_parameters.rear_axle_to_center_longitudinal, + 0, vehicle_parameters.rear_axle_to_center_vertical, ), - vehicle_parameters.rear_axle_to_center_longitudinal, ) @@ -115,7 +117,7 @@ def center_se2_to_rear_axle_se2(center_se2: StateSE2, vehicle_parameters: Vehicl :param vehicle_parameters: The vehicle parameters. :return: The rear axle state in 2D. """ - return translate_se2_along_yaw(center_se2, Vector2D(-vehicle_parameters.rear_axle_to_center_longitudinal, 0)) + return translate_se2_along_body_frame(center_se2, Vector2D(-vehicle_parameters.rear_axle_to_center_longitudinal, 0)) def rear_axle_se2_to_center_se2(rear_axle_se2: StateSE2, vehicle_parameters: VehicleParameters) -> StateSE2: @@ -125,4 +127,6 @@ def rear_axle_se2_to_center_se2(rear_axle_se2: StateSE2, vehicle_parameters: Veh :param vehicle_parameters: The vehicle parameters. :return: The center state in 2D. """ - return translate_se2_along_yaw(rear_axle_se2, Vector2D(vehicle_parameters.rear_axle_to_center_longitudinal, 0)) + return translate_se2_along_body_frame( + rear_axle_se2, Vector2D(vehicle_parameters.rear_axle_to_center_longitudinal, 0) + ) diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py index 6814f600..27a2fff1 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/d123/common/visualization/matplotlib/observation.py @@ -28,7 +28,7 @@ from d123.dataset.scene.abstract_scene import AbstractScene from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D from d123.geometry.geometry_index import StateSE2Index -from d123.geometry.transform.transform_se2 import translate_se2_along_yaw +from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame from d123.geometry.vector import Vector2D @@ -153,12 +153,10 @@ def add_bounding_box_to_ax( if plot_config.marker_style is not None: assert plot_config.marker_style in ["-", "^"], f"Unknown marker style: {plot_config.marker_style}" if plot_config.marker_style == "-": - center_se2 = ( - bounding_box.center if isinstance(bounding_box, BoundingBoxSE2) else bounding_box.center.state_se2 - ) + center_se2 = bounding_box.center_se2 arrow = np.zeros((2, 2), dtype=np.float64) arrow[0] = center_se2.point_2d.array - arrow[1] = translate_se2_along_yaw( + arrow[1] = translate_se2_along_body_frame( center_se2, Vector2D(bounding_box.length / 2.0 + 0.5, 0.0), ).array[StateSE2Index.XY] diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py index c88faf3e..6a747698 100644 --- a/d123/common/visualization/viser/utils_v2.py +++ b/d123/common/visualization/viser/utils_v2.py @@ -7,7 +7,7 @@ # from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D -from d123.geometry.transform.transform_se3 import translate_body_frame +from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame # TODO: Refactor this file. # TODO: Add general utilities for 3D primitives and mesh support. @@ -31,7 +31,7 @@ def _get_bounding_box_corners(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.fl bounding_box_extent = np.array([bounding_box.length, bounding_box.width, bounding_box.height], dtype=np.float64) for idx, vec in corner_extent_factors.items(): vector_3d = Vector3D.from_array(bounding_box_extent * vec.array) - corners[idx] = translate_body_frame(bounding_box.center, vector_3d).point_3d.array + corners[idx] = translate_se3_along_body_frame(bounding_box.center, vector_3d).point_3d.array return corners diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py index 99e63192..84fe34ed 100644 --- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py +++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py @@ -6,7 +6,7 @@ import shapely.geometry as geom from d123.geometry import OccupancyMap2D, Point3D, Polyline3D, PolylineSE2, StateSE2, Vector2D -from d123.geometry.transform.transform_se2 import translate_se2_along_yaw +from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame from d123.geometry.utils.rotation_utils import normalize_angle MAX_LANE_WIDTH = 25.0 # meters @@ -64,7 +64,7 @@ def _collect_perpendicular_hits( assert sign in [1.0, -1.0], "Sign must be either 1.0 (left) or -1.0 (right)" # perp_start_point = translate_along_yaw(lane_query_se2, Vector2D(0.0, sign * PERP_START_OFFSET)) perp_start_point = lane_query_se2 - perp_end_point = translate_se2_along_yaw(lane_query_se2, Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0)) + perp_end_point = translate_se2_along_body_frame(lane_query_se2, Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0)) perp_linestring = geom.LineString([[perp_start_point.x, perp_start_point.y], [perp_end_point.x, perp_end_point.y]]) lane_linestring = occupancy_2d.geometries[occupancy_2d.id_to_idx[lane_token]] @@ -261,7 +261,7 @@ def _get_default_boundary_point_3d( lane_query_se2: StateSE2, lane_query_3d: Point3D, sign: float ) -> Point3D: perp_boundary_distance = DEFAULT_LANE_WIDTH / 2.0 - boundary_point_se2 = translate_se2_along_yaw( + boundary_point_se2 = translate_se2_along_body_frame( lane_query_se2, Vector2D(0.0, sign * perp_boundary_distance) ) return Point3D(boundary_point_se2.x, boundary_point_se2.y, lane_query_3d.z) diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py index dd211ca0..459e56bf 100644 --- a/d123/geometry/bounding_box.py +++ b/d123/geometry/bounding_box.py @@ -198,6 +198,14 @@ def center_se3(self) -> StateSE3: """ return self.center + @property + def center_se2(self) -> StateSE2: + """The center of the bounding box as a StateSE2 instance. + + :return: The center of the bounding box as a StateSE2 instance. + """ + return self.center_se3.state_se2 + @property def length(self) -> float: """The length of the bounding box along the x-axis in the local frame. diff --git a/d123/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py index 097709b9..648ea91a 100644 --- a/d123/geometry/occupancy_map.py +++ b/d123/geometry/occupancy_map.py @@ -99,7 +99,16 @@ def query( self, geometry: Union[BaseGeometry, np.ndarray], predicate: Optional[ - Literal["intersects", "within", "contains", "overlaps", "crosses", "touches", "covers", "covered_by"] + Literal[ + "intersects", + "within", + "contains", + "overlaps", + "crosses", + "touches", + "covers", + "covered_by", + ] ] = None, distance: Optional[float] = None, ) -> npt.NDArray[np.int64]: diff --git a/d123/geometry/rotation.py b/d123/geometry/rotation.py index f98a601d..3fc4afa9 100644 --- a/d123/geometry/rotation.py +++ b/d123/geometry/rotation.py @@ -80,20 +80,16 @@ def yaw(self) -> float: """ return self._array[EulerAnglesIndex.YAW] - @cached_property + @property def array(self) -> npt.NDArray[np.float64]: """Converts the EulerAngles instance to a numpy array. :return: A numpy array of shape (3,) containing the Euler angles [roll, pitch, yaw], indexed by \ :class:`~d123.geometry.EulerAnglesIndex`. """ - array = np.zeros(len(EulerAnglesIndex), dtype=np.float64) - array[EulerAnglesIndex.ROLL] = self.roll - array[EulerAnglesIndex.PITCH] = self.pitch - array[EulerAnglesIndex.YAW] = self.yaw - return array + return self._array - @property + @cached_property def rotation_matrix(self) -> npt.NDArray[np.float64]: """Returns the 3x3 rotation matrix representation of the Euler angles. NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll). @@ -206,7 +202,7 @@ def array(self) -> npt.NDArray[np.float64]: """ return self._array - @property + @cached_property def pyquaternion(self) -> pyquaternion.Quaternion: """Returns the pyquaternion.Quaternion representation of the quaternion. diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 378128ca..1eb021c6 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -256,7 +256,7 @@ def rotation_matrix(self) -> npt.NDArray[np.float64]: :return: A 3x3 numpy array representing the rotation matrix. """ - return EulerAngles.from_array(self.array[StateSE3Index.EULER_ANGLES]).rotation_matrix + return self.euler_angles.rotation_matrix @property def transformation_matrix(self) -> npt.NDArray[np.float64]: @@ -270,6 +270,10 @@ def transformation_matrix(self) -> npt.NDArray[np.float64]: transformation_matrix[:3, 3] = self.array[StateSE3Index.XYZ] return transformation_matrix + @cached_property + def euler_angles(self) -> EulerAngles: + return EulerAngles.from_array(self.array[StateSE3Index.EULER_ANGLES]) + @property def quaternion(self) -> npt.NDArray[np.float64]: """Returns the quaternion (w, x, y, z) representation of the state's orientation. diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py index 2151317d..f126e33a 100644 --- a/d123/geometry/test/test_transform.py +++ b/d123/geometry/test/test_transform.py @@ -3,6 +3,7 @@ import numpy as np import numpy.typing as npt +from d123.geometry.geometry_index import EulerAnglesIndex, Point2DIndex, Point3DIndex, StateSE2Index, StateSE3Index from d123.geometry.se import StateSE2, StateSE3 from d123.geometry.transform.transform_se2 import ( convert_absolute_to_relative_point_2d_array, @@ -10,16 +11,16 @@ convert_relative_to_absolute_point_2d_array, convert_relative_to_absolute_se2_array, translate_se2, - translate_se2_along_yaw, + translate_se2_along_body_frame, translate_se2_array, - translate_se2_array_along_yaw, + translate_se2_array_along_body_frame, ) from d123.geometry.transform.transform_se3 import ( convert_absolute_to_relative_points_3d_array, convert_absolute_to_relative_se3_array, convert_relative_to_absolute_points_3d_array, convert_relative_to_absolute_se3_array, - translate_body_frame, + translate_se3_along_body_frame, translate_se3_along_x, translate_se3_along_y, translate_se3_along_z, @@ -72,21 +73,21 @@ def test_translate_se2_along_yaw(self) -> None: # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction) pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.deg2rad(90)], dtype=np.float64)) vector: Vector2D = Vector2D(1.0, 0.0) - result: StateSE2 = translate_se2_along_yaw(pose, vector) + result: StateSE2 = translate_se2_along_body_frame(pose, vector) expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.deg2rad(90)], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) def test_translate_se2_along_yaw_backward(self) -> None: pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) vector: Vector2D = Vector2D(-1.0, 0.0) - result: StateSE2 = translate_se2_along_yaw(pose, vector) + result: StateSE2 = translate_se2_along_body_frame(pose, vector) expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) def test_translate_se2_along_yaw_diagonal(self) -> None: pose: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.deg2rad(45)], dtype=np.float64)) vector: Vector2D = Vector2D(1.0, 0.0) - result: StateSE2 = translate_se2_along_yaw(pose, vector) + result: StateSE2 = translate_se2_along_body_frame(pose, vector) expected: StateSE2 = StateSE2.from_array( np.array([1.0 + np.sqrt(2.0) / 2, 0.0 + np.sqrt(2.0) / 2, np.deg2rad(45)], dtype=np.float64) ) @@ -95,14 +96,14 @@ def test_translate_se2_along_yaw_diagonal(self) -> None: def test_translate_se2_array_along_yaw(self) -> None: poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) distance: float = Vector2D(1.0, 0.0) - result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance) + result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) np.testing.assert_array_almost_equal(result, expected) def test_translate_se2_array_along_yaw_multiple_distances(self) -> None: poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi]], dtype=np.float64) distance: float = Vector2D(2.0, 0.0) - result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance) + result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) expected: npt.NDArray[np.float64] = np.array([[2.0, 0.0, 0.0], [-2.0, 0.0, np.pi]], dtype=np.float64) np.testing.assert_array_almost_equal(result, expected) @@ -150,6 +151,11 @@ def test_convert_relative_to_absolute_point_2d_array(self) -> None: class TestTransformSE3(unittest.TestCase): + + def setUp(self): + self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal + self.num_consistency_tests = 10 # Number of random test cases for consistency checks + def test_translate_se3_along_x(self) -> None: pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) distance: float = 1.0 @@ -195,24 +201,84 @@ def test_translate_se3_along_z_large_distance(self) -> None: def test_translate_body_frame(self) -> None: pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) - result: StateSE3 = translate_body_frame(pose, translation) + result: StateSE3 = translate_se3_along_body_frame(pose, translation) expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array) def test_translate_body_frame_multiple_axes(self) -> None: pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) translation: Vector3D = Vector3D.from_array(np.array([0.5, -1.0, 2.0], dtype=np.float64)) - result: StateSE3 = translate_body_frame(pose, translation) + result: StateSE3 = translate_se3_along_body_frame(pose, translation) expected: StateSE3 = StateSE3.from_array(np.array([1.5, 1.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array) def test_translate_body_frame_zero_translation(self) -> None: pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) translation: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) - result: StateSE3 = translate_body_frame(pose, translation) + result: StateSE3 = translate_se3_along_body_frame(pose, translation) expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array) + def test_translate_body_frame_consistency(self) -> None: + + for _ in range(self.num_consistency_tests): + # Generate random parameters + x_distance: float = np.random.uniform(-10.0, 10.0) + y_distance: float = np.random.uniform(-10.0, 10.0) + z_distance: float = np.random.uniform(-10.0, 10.0) + + start_x: float = np.random.uniform(-5.0, 5.0) + start_y: float = np.random.uniform(-5.0, 5.0) + start_z: float = np.random.uniform(-5.0, 5.0) + + start_roll: float = np.random.uniform(-np.pi, np.pi) + start_pitch: float = np.random.uniform(-np.pi, np.pi) + start_yaw: float = np.random.uniform(-np.pi, np.pi) + + original_pose: StateSE3 = StateSE3.from_array( + np.array( + [ + start_x, + start_y, + start_z, + start_roll, + start_pitch, + start_yaw, + ], + dtype=np.float64, + ) + ) + + # x-axis translation + translation_x: Vector3D = Vector3D.from_array(np.array([x_distance, 0.0, 0.0], dtype=np.float64)) + result_body_frame_x: StateSE3 = translate_se3_along_body_frame(original_pose, translation_x) + result_axis_x: StateSE3 = translate_se3_along_x(original_pose, x_distance) + np.testing.assert_array_almost_equal(result_body_frame_x.array, result_axis_x.array, decimal=self.decimal) + + # y-axis translation + translation_y: Vector3D = Vector3D.from_array(np.array([0.0, y_distance, 0.0], dtype=np.float64)) + result_body_frame_y: StateSE3 = translate_se3_along_body_frame(original_pose, translation_y) + result_axis_y: StateSE3 = translate_se3_along_y(original_pose, y_distance) + np.testing.assert_array_almost_equal(result_body_frame_y.array, result_axis_y.array, decimal=self.decimal) + + # z-axis translation + translation_z: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, z_distance], dtype=np.float64)) + result_body_frame_z: StateSE3 = translate_se3_along_body_frame(original_pose, translation_z) + result_axis_z: StateSE3 = translate_se3_along_z(original_pose, z_distance) + np.testing.assert_array_almost_equal(result_body_frame_z.array, result_axis_z.array, decimal=self.decimal) + + # all axes translation + translation_all: Vector3D = Vector3D.from_array( + np.array([x_distance, y_distance, z_distance], dtype=np.float64) + ) + result_body_frame_all: StateSE3 = translate_se3_along_body_frame(original_pose, translation_all) + intermediate_pose: StateSE3 = translate_se3_along_x(original_pose, x_distance) + intermediate_pose = translate_se3_along_y(intermediate_pose, y_distance) + result_axis_all: StateSE3 = translate_se3_along_z(intermediate_pose, z_distance) + np.testing.assert_array_almost_equal( + result_body_frame_all.array, result_axis_all.array, decimal=self.decimal + ) + def test_convert_absolute_to_relative_se3_array(self) -> None: reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) absolute_poses: npt.NDArray[np.float64] = np.array( @@ -287,5 +353,245 @@ def test_convert_relative_to_absolute_points_3d_array_empty(self) -> None: np.testing.assert_array_almost_equal(result, expected) +class TestTransformConsistency(unittest.TestCase): + def setUp(self): + self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal + self.num_consistency_tests = 10 # Number of random test cases for consistency checks + + self.max_pose_xyz = 100.0 + self.min_random_poses = 1 + self.max_random_poses = 20 + + def _get_random_se2_array(self, size: int) -> npt.NDArray[np.float64]: + """Generate a random SE2 pose""" + random_se2_array = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, len(StateSE2Index))) + random_se2_array[:, StateSE2Index.YAW] = np.random.uniform(-np.pi, np.pi, size) # yaw angles + return random_se2_array + + def _get_random_se3_array(self, size: int) -> npt.NDArray[np.float64]: + """Generate a random SE3 poses""" + random_se3_array = np.zeros((size, len(StateSE3Index)), dtype=np.float64) + random_se3_array[:, StateSE3Index.XYZ] = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, 3)) + random_se3_array[:, StateSE3Index.EULER_ANGLES] = np.random.uniform( + -np.pi, np.pi, (size, len(EulerAnglesIndex)) + ) + return random_se3_array + + def test_se2_absolute_relative_conversion_consistency(self) -> None: + """Test that converting absolute->relative->absolute returns original poses""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = StateSE2.from_array(self._get_random_se2_array(1)[0]) + + # Generate random absolute poses + num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_poses = self._get_random_se2_array(num_poses) + + # Convert absolute -> relative -> absolute + relative_poses = convert_absolute_to_relative_se2_array(reference, absolute_poses) + recovered_absolute = convert_relative_to_absolute_se2_array(reference, relative_poses) + + np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal) + + def test_se2_points_absolute_relative_conversion_consistency(self) -> None: + """Test that converting absolute->relative->absolute returns original points""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = StateSE2.from_array(self._get_random_se2_array(1)[0]) + + # Generate random absolute points + num_points = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_points = self._get_random_se2_array(num_points)[:, StateSE2Index.XY] + + # Convert absolute -> relative -> absolute + relative_points = convert_absolute_to_relative_point_2d_array(reference, absolute_points) + recovered_absolute = convert_relative_to_absolute_point_2d_array(reference, relative_points) + + np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal) + + def test_se2_points_consistency(self) -> None: + """Test whether SE2 point and pose conversions are consistent""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = StateSE2.from_array(self._get_random_se2_array(1)[0]) + + # Generate random absolute points + num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_se2 = self._get_random_se2_array(num_poses) + + # Convert absolute -> relative -> absolute + relative_se2 = convert_absolute_to_relative_se2_array(reference, absolute_se2) + relative_points = convert_absolute_to_relative_point_2d_array( + reference, absolute_se2[..., StateSE2Index.XY] + ) + np.testing.assert_array_almost_equal( + relative_se2[..., StateSE2Index.XY], relative_points, decimal=self.decimal + ) + + recovered_absolute_se2 = convert_relative_to_absolute_se2_array(reference, relative_se2) + absolute_points = convert_relative_to_absolute_point_2d_array(reference, relative_points) + np.testing.assert_array_almost_equal( + recovered_absolute_se2[..., StateSE2Index.XY], absolute_points, decimal=self.decimal + ) + + def test_se3_absolute_relative_conversion_consistency(self) -> None: + """Test that converting absolute->relative->absolute returns original poses""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = StateSE3.from_array(self._get_random_se3_array(1)[0]) + + # Generate random absolute poses + num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_poses = self._get_random_se3_array(num_poses) + + # Convert absolute -> relative -> absolute + relative_poses = convert_absolute_to_relative_se3_array(reference, absolute_poses) + recovered_absolute = convert_relative_to_absolute_se3_array(reference, relative_poses) + + np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal) + + def test_se3_points_absolute_relative_conversion_consistency(self) -> None: + """Test that converting absolute->relative->absolute returns original points""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = StateSE3.from_array(self._get_random_se3_array(1)[0]) + + # Generate random absolute points + num_points = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_points = self._get_random_se3_array(num_points)[:, StateSE3Index.XYZ] + + # Convert absolute -> relative -> absolute + relative_points = convert_absolute_to_relative_points_3d_array(reference, absolute_points) + recovered_absolute = convert_relative_to_absolute_points_3d_array(reference, relative_points) + + np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal) + + def test_se3_points_consistency(self) -> None: + """Test whether SE3 point and pose conversions are consistent""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = StateSE3.from_array(self._get_random_se3_array(1)[0]) + + # Generate random absolute points + num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_se3 = self._get_random_se3_array(num_poses) + + # Convert absolute -> relative -> absolute + relative_se3 = convert_absolute_to_relative_se3_array(reference, absolute_se3) + relative_points = convert_absolute_to_relative_points_3d_array( + reference, absolute_se3[..., StateSE3Index.XYZ] + ) + np.testing.assert_array_almost_equal( + relative_se3[..., StateSE3Index.XYZ], relative_points, decimal=self.decimal + ) + + recovered_absolute_se3 = convert_relative_to_absolute_se3_array(reference, relative_se3) + absolute_points = convert_relative_to_absolute_points_3d_array(reference, relative_points) + np.testing.assert_array_almost_equal( + recovered_absolute_se3[..., StateSE3Index.XYZ], absolute_points, decimal=self.decimal + ) + + def test_se2_se3_translation_along_body_consistency(self) -> None: + """Test that SE2 and SE3 translations are consistent when SE3 has no z-component or rotation""" + for _ in range(self.num_consistency_tests): + # Create equivalent SE2 and SE3 poses (SE3 with z=0 and no rotations except yaw) + + pose_se2 = StateSE2.from_array(self._get_random_se2_array(1)[0]) + pose_se3 = StateSE3.from_array( + np.array([pose_se2.x, pose_se2.y, 0.0, 0.0, 0.0, pose_se2.yaw], dtype=np.float64) + ) + + # Test translation along x-axis + dx = np.random.uniform(-5.0, 5.0) + translated_se2_x = translate_se2_along_body_frame(pose_se2, Vector2D(dx, 0.0)) + translated_se3_x = translate_se3_along_x(pose_se3, dx) + + np.testing.assert_array_almost_equal( + translated_se2_x.array[StateSE2Index.XY], translated_se3_x.array[StateSE3Index.XY], decimal=self.decimal + ) + np.testing.assert_almost_equal( + translated_se2_x.array[StateSE2Index.YAW], + translated_se3_x.array[StateSE3Index.YAW], + decimal=self.decimal, + ) + + # Test translation along y-axis + dy = np.random.uniform(-5.0, 5.0) + translated_se2_y = translate_se2_along_body_frame(pose_se2, Vector2D(0.0, dy)) + translated_se3_y = translate_se3_along_y(pose_se3, dy) + + np.testing.assert_array_almost_equal( + translated_se2_y.array[StateSE2Index.XY], translated_se3_y.array[StateSE3Index.XY], decimal=self.decimal + ) + np.testing.assert_almost_equal( + translated_se2_y.array[StateSE2Index.YAW], + translated_se3_y.array[StateSE3Index.YAW], + decimal=self.decimal, + ) + + # Test translation along x- and y-axis + dx = np.random.uniform(-5.0, 5.0) + dy = np.random.uniform(-5.0, 5.0) + translated_se2_xy = translate_se2_along_body_frame(pose_se2, Vector2D(dx, dy)) + translated_se3_xy = translate_se3_along_body_frame(pose_se3, Vector3D(dx, dy, 0.0)) + np.testing.assert_array_almost_equal( + translated_se2_xy.array[StateSE2Index.XY], + translated_se3_xy.array[StateSE3Index.XY], + decimal=self.decimal, + ) + np.testing.assert_almost_equal( + translated_se2_xy.array[StateSE2Index.YAW], + translated_se3_xy.array[StateSE3Index.YAW], + decimal=self.decimal, + ) + + def test_se2_se3_point_conversion_consistency(self) -> None: + """Test that SE2 and SE3 point conversions are consistent for 2D points embedded in 3D""" + for _ in range(self.num_consistency_tests): + # Create equivalent SE2 and SE3 reference poses + x = np.random.uniform(-10.0, 10.0) + y = np.random.uniform(-10.0, 10.0) + yaw = np.random.uniform(-np.pi, np.pi) + + reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64)) + reference_se3 = StateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64)) + + # Generate 2D points and embed them in 3D with z=0 + num_points = np.random.randint(1, 8) + points_2d = np.random.uniform(-20.0, 20.0, (num_points, len(Point2DIndex))) + points_3d = np.column_stack([points_2d, np.zeros(num_points)]) + + # Convert using SE2 functions + relative_2d = convert_absolute_to_relative_point_2d_array(reference_se2, points_2d) + absolute_2d_recovered = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d) + + # Convert using SE3 functions + relative_3d = convert_absolute_to_relative_points_3d_array(reference_se3, points_3d) + absolute_3d_recovered = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d) + + # Check that SE2 and SE3 results are consistent (ignoring z-component) + np.testing.assert_array_almost_equal( + relative_2d, + relative_3d[..., Point3DIndex.XY], + decimal=self.decimal, + ) + np.testing.assert_array_almost_equal( + absolute_2d_recovered, + absolute_3d_recovered[..., Point3DIndex.XY], + decimal=self.decimal, + ) + # Z-component should remain zero + np.testing.assert_array_almost_equal( + relative_3d[..., Point3DIndex.Z], + np.zeros(num_points), + decimal=self.decimal, + ) + np.testing.assert_array_almost_equal( + absolute_3d_recovered[..., Point3DIndex.Z], + np.zeros(num_points), + decimal=self.decimal, + ) + + if __name__ == "__main__": unittest.main() diff --git a/d123/geometry/transform/transform_se2.py b/d123/geometry/transform/transform_se2.py index c1f33cd4..6331bd16 100644 --- a/d123/geometry/transform/transform_se2.py +++ b/d123/geometry/transform/transform_se2.py @@ -30,6 +30,8 @@ def convert_absolute_to_relative_se2_array( else: raise TypeError(f"Expected StateSE2 or np.ndarray, got {type(origin)}") + assert len(StateSE2Index) == state_se2_array.shape[-1] + rotate_rad = -origin_array[StateSE2Index.YAW] cos, sin = np.cos(rotate_rad), np.sin(rotate_rad) R_inv = np.array([[cos, -sin], [sin, cos]]) @@ -41,14 +43,14 @@ def convert_absolute_to_relative_se2_array( return state_se2_rel -def convert_absolute_to_relative_point_2d_array( - origin: Union[StateSE2, npt.NDArray[np.float64]], point_2d_array: npt.NDArray[np.float64] +def convert_relative_to_absolute_se2_array( + origin: Union[StateSE2, npt.NDArray[np.float64]], state_se2_array: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: - """Converts an absolute 2D point array from global to relative coordinates. - + """ + Converts an StateSE2 array from global to relative coordinates. :param origin: origin pose of relative coords system - :param point_2d_array: array of 2D points with (x,y) in last dim - :return: 2D points array in relative coordinates + :param state_se2_array: array of SE2 states with (x,y,θ) in last dim + :return: SE2 coords array in relative coordinates """ if isinstance(origin, StateSE2): origin_array = origin.array @@ -58,24 +60,30 @@ def convert_absolute_to_relative_point_2d_array( else: raise TypeError(f"Expected StateSE2 or np.ndarray, got {type(origin)}") - rotate_rad = -origin_array[StateSE2Index.YAW] + assert len(StateSE2Index) == state_se2_array.shape[-1] + + rotate_rad = origin_array[StateSE2Index.YAW] cos, sin = np.cos(rotate_rad), np.sin(rotate_rad) R = np.array([[cos, -sin], [sin, cos]]) - point_2d_rel = point_2d_array - origin_array[..., StateSE2Index.XY] - point_2d_rel = point_2d_rel @ R.T + state_se2_abs = np.zeros_like(state_se2_array, dtype=np.float64) + state_se2_abs[..., StateSE2Index.XY] = state_se2_array[..., StateSE2Index.XY] @ R.T + state_se2_abs[..., StateSE2Index.XY] += origin_array[..., StateSE2Index.XY] + state_se2_abs[..., StateSE2Index.YAW] = normalize_angle( + state_se2_array[..., StateSE2Index.YAW] + origin_array[..., StateSE2Index.YAW] + ) - return point_2d_rel + return state_se2_abs -def convert_relative_to_absolute_se2_array( - origin: Union[StateSE2, npt.NDArray[np.float64]], state_se2_array: npt.NDArray[np.float64] +def convert_absolute_to_relative_point_2d_array( + origin: Union[StateSE2, npt.NDArray[np.float64]], point_2d_array: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: - """ - Converts an StateSE2 array from global to relative coordinates. + """Converts an absolute 2D point array from global to relative coordinates. + :param origin: origin pose of relative coords system - :param state_se2_array: array of SE2 states with (x,y,θ) in last dim - :return: SE2 coords array in relative coordinates + :param point_2d_array: array of 2D points with (x,y) in last dim + :return: 2D points array in relative coordinates """ if isinstance(origin, StateSE2): origin_array = origin.array @@ -85,15 +93,14 @@ def convert_relative_to_absolute_se2_array( else: raise TypeError(f"Expected StateSE2 or np.ndarray, got {type(origin)}") - rotate_rad = origin_array[StateSE2Index.YAW] + rotate_rad = -origin_array[StateSE2Index.YAW] cos, sin = np.cos(rotate_rad), np.sin(rotate_rad) - R = np.array([[cos, -sin], [sin, cos]]) + R = np.array([[cos, -sin], [sin, cos]], dtype=np.float64) - state_se2_rel = state_se2_array + origin_array - state_se2_rel[..., StateSE2Index.XY] = state_se2_rel[..., StateSE2Index.XY] @ R.T - state_se2_rel[..., StateSE2Index.YAW] = normalize_angle(state_se2_rel[..., StateSE2Index.YAW]) + point_2d_rel = point_2d_array - origin_array[..., StateSE2Index.XY] + point_2d_rel = point_2d_rel @ R.T - return state_se2_rel + return point_2d_rel def convert_relative_to_absolute_point_2d_array( @@ -110,7 +117,7 @@ def convert_relative_to_absolute_point_2d_array( rotate_rad = origin_array[StateSE2Index.YAW] cos, sin = np.cos(rotate_rad), np.sin(rotate_rad) - R = np.array([[cos, -sin], [sin, cos]]) + R = np.array([[cos, -sin], [sin, cos]], dtype=np.float64) point_2d_abs = point_2d_array @ R.T point_2d_abs = point_2d_abs + origin_array[..., StateSE2Index.XY] @@ -137,30 +144,13 @@ def translate_se2_array(state_se2_array: npt.NDArray[np.float64], translation: V :param translation: 2D translation vector :return: translated SE2 array """ + assert len(StateSE2Index) == state_se2_array.shape[-1] result = state_se2_array.copy() result[..., StateSE2Index.XY] += translation.array[Vector2DIndex.XY] return result -def translate_se2_along_yaw(state_se2: StateSE2, translation: Vector2D) -> StateSE2: - """Translate a single SE2 state along its local coordinate frame. - - :param state_se2: SE2 state to translate - :param translation: 2D translation in local frame (x: forward, y: left) - :return: translated SE2 state - """ - yaw = state_se2.array[StateSE2Index.YAW] - cos_yaw, sin_yaw = np.cos(yaw), np.sin(yaw) - - # Transform translation from local to global frame - global_translation = np.array( - [translation.x * cos_yaw - translation.y * sin_yaw, translation.x * sin_yaw + translation.y * cos_yaw] - ) - - return translate_se2(state_se2, Vector2D.from_array(global_translation)) - - -def translate_se2_array_along_yaw( +def translate_se2_array_along_body_frame( state_se2_array: npt.NDArray[np.float64], translation: Vector2D ) -> npt.NDArray[np.float64]: """Translate an array of SE2 states along their respective local coordinate frames. @@ -169,15 +159,29 @@ def translate_se2_array_along_yaw( :param translation: 2D translation in local frame (x: forward, y: left) :return: translated SE2 array """ + assert len(StateSE2Index) == state_se2_array.shape[-1] result = state_se2_array.copy() yaws = state_se2_array[..., StateSE2Index.YAW] cos_yaws, sin_yaws = np.cos(yaws), np.sin(yaws) # Transform translation from local to global frame for each state - global_translation_x = translation.x * cos_yaws - translation.y * sin_yaws - global_translation_y = translation.x * sin_yaws + translation.y * cos_yaws + # Create rotation matrices for each state + R = np.stack([cos_yaws, -sin_yaws, sin_yaws, cos_yaws], axis=-1).reshape(*cos_yaws.shape, 2, 2) - result[..., StateSE2Index.X] += global_translation_x - result[..., StateSE2Index.Y] += global_translation_y + # Transform translation vector from local to global frame + translation_vector = translation.array[Vector2DIndex.XY] # [x, y] + global_translation = np.einsum("...ij,...j->...i", R, translation_vector) + + result[..., StateSE2Index.XY] += global_translation return result + + +def translate_se2_along_body_frame(state_se2: StateSE2, translation: Vector2D) -> StateSE2: + """Translate a single SE2 state along its local coordinate frame. + + :param state_se2: SE2 state to translate + :param translation: 2D translation in local frame (x: forward, y: left) + :return: translated SE2 state + """ + return StateSE2.from_array(translate_se2_array_along_body_frame(state_se2.array, translation), copy=False) diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py index 3a04b5b2..1412d61a 100644 --- a/d123/geometry/transform/transform_se3.py +++ b/d123/geometry/transform/transform_se3.py @@ -25,10 +25,8 @@ def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3: z_axis = R[:, 2] state_se3_array = state_se3.array.copy() - state_se3_array[StateSE3Index.X] += distance * z_axis[0] - state_se3_array[StateSE3Index.Y] += distance * z_axis[1] - state_se3_array[StateSE3Index.Z] += distance * z_axis[2] - return StateSE3.from_array(state_se3_array) + state_se3_array[StateSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ] + return StateSE3.from_array(state_se3_array, copy=False) def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3: @@ -43,10 +41,8 @@ def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3: y_axis = R[:, 1] state_se3_array = state_se3.array.copy() - state_se3_array[StateSE3Index.X] += distance * y_axis[0] - state_se3_array[StateSE3Index.Y] += distance * y_axis[1] - state_se3_array[StateSE3Index.Z] += distance * y_axis[2] - return StateSE3.from_array(state_se3_array) + state_se3_array[StateSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ] + return StateSE3.from_array(state_se3_array, copy=False) def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3: @@ -61,14 +57,11 @@ def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3: x_axis = R[:, 0] state_se3_array = state_se3.array.copy() - state_se3_array[StateSE3Index.X] += distance * x_axis[0] - state_se3_array[StateSE3Index.Y] += distance * x_axis[1] - state_se3_array[StateSE3Index.Z] += distance * x_axis[2] + state_se3_array[StateSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ] + return StateSE3.from_array(state_se3_array, copy=False) - return StateSE3.from_array(state_se3_array) - -def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3: +def translate_se3_along_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3: """Translates a SE3 state along a vector in the body frame. :param state_se3: The SE3 state to translate. @@ -77,14 +70,11 @@ def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3: """ R = state_se3.rotation_matrix - - # Transform to world frame world_translation = R @ vector_3d.array state_se3_array = state_se3.array.copy() state_se3_array[StateSE3Index.XYZ] += world_translation[Vector3DIndex.XYZ] - - return StateSE3.from_array(state_se3_array) + return StateSE3.from_array(state_se3_array, copy=False) def convert_absolute_to_relative_se3_array( diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index 32328cbf..3627a511 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -343,7 +343,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123_dev", + "display_name": "d123", "language": "python", "name": "python3" }, diff --git a/notebooks/viz/video_example.ipynb b/notebooks/viz/video_example.ipynb new file mode 100644 index 00000000..f9202dd7 --- /dev/null +++ b/notebooks/viz/video_example.ipynb @@ -0,0 +1,319 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", + "from d123.dataset.scene.scene_filter import SceneFilter\n", + "\n", + "from d123.common.multithreading.worker_sequential import Sequential\n", + "from d123.common.datatypes.sensor.camera import CameraType" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "from d123.geometry import Point2D\n", + "import numpy as np\n", + "\n", + "import torch\n", + "\n", + "from d123.geometry.polyline import Polyline2D" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "# split = \"nuplan_private_test\"\n", + "# log_names = [\"2021.09.29.17.35.58_veh-44_00066_00432\"]\n", + "\n", + "\n", + "# splits = [\"wopd_train\"]\n", + "# splits = [\"carla\"]\n", + "splits = [\"nuplan_private_test\"]\n", + "# splits = [\"av2-sensor-mini_train\"]\n", + "# log_names = None\n", + "\n", + "\n", + "log_names = None\n", + "scene_tokens = None\n", + "\n", + "scene_filter = SceneFilter(\n", + " split_names=splits,\n", + " log_names=log_names,\n", + " scene_tokens=scene_tokens,\n", + " duration_s=20,\n", + " history_s=0.0,\n", + " timestamp_threshold_s=20,\n", + " shuffle=True,\n", + " camera_types=[CameraType.CAM_F0],\n", + ")\n", + "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", + "worker = Sequential()\n", + "# worker = RayDistributed()\n", + "scenes = scene_builder.get_scenes(scene_filter, worker)\n", + "\n", + "print(f\"Found {len(scenes)} scenes\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List, Optional, Tuple\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "from d123.common.visualization.matplotlib.camera import add_camera_ax\n", + "from d123.geometry import Point2D\n", + "from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", + "from d123.common.visualization.color.config import PlotConfig\n", + "from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", + "from d123.common.visualization.matplotlib.observation import (\n", + " add_box_detections_to_ax,\n", + " add_default_map_on_ax,\n", + " add_ego_vehicle_to_ax,\n", + " add_traffic_lights_to_ax,\n", + ")\n", + "from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", + "from d123.dataset.maps.abstract_map import AbstractMap\n", + "from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", + "from d123.dataset.maps.gpkg.gpkg_map_objects import GPKGIntersection\n", + "from d123.dataset.maps.map_datatypes import MapLayer\n", + "from d123.dataset.scene.abstract_scene import AbstractScene\n", + "\n", + "\n", + "import shapely.geometry as geom\n", + "\n", + "\n", + "def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes:\n", + "\n", + " ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)\n", + " box_detections = scene.get_box_detections_at_iteration(iteration)\n", + "\n", + " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", + " # add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", + " add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", + " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api)\n", + "\n", + " add_box_detections_to_ax(ax, box_detections)\n", + " add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n", + "\n", + " zoom = 1.0\n", + " ax.set_xlim(point_2d.x - radius * zoom, point_2d.x + radius * zoom)\n", + " ax.set_ylim(point_2d.y - radius * zoom, point_2d.y + radius * zoom)\n", + "\n", + " ax.set_aspect(\"equal\", adjustable=\"box\")\n", + " return ax\n", + "\n", + "\n", + "def plot_scene_at_iteration(\n", + " scene: AbstractScene, iteration: int = 0, radius: float = 80\n", + ") -> Tuple[plt.Figure, plt.Axes]:\n", + "\n", + " fig, ax = plt.subplots(1, 2, figsize=(18, 5))\n", + "\n", + " camera = scene.get_camera_at_iteration(iteration=iteration, camera_type=CameraType.CAM_F0)\n", + " add_camera_ax(ax[0], camera)\n", + "\n", + " _plot_scene_on_ax(ax[1], scene, iteration, radius)\n", + " for ax_ in ax:\n", + " ax_.set_xticks([])\n", + " ax_.set_yticks([])\n", + " fig.subplots_adjust(wspace=-0.5) # Make the border between axes super small\n", + " fig.tight_layout()\n", + " return fig, ax\n", + "\n", + "\n", + "scene_index = 6\n", + "\n", + "iteration = 99\n", + "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=35)\n", + "plt.show()\n", + "\n", + "camera = scenes[scene_index].get_camera_at_iteration(\n", + " iteration=iteration, camera_type=CameraType.CAM_F0\n", + ")\n", + "\n", + "plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n", + "# # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n", + "\n", + "scenes[scene_index].log_name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "from typing import Union\n", + "\n", + "from matplotlib import animation\n", + "from tqdm import tqdm\n", + "\n", + "\n", + "def render_scene_animation(\n", + " scene: AbstractScene,\n", + " output_path: Union[str, Path],\n", + " start_idx: int = 0,\n", + " end_idx: Optional[int] = None,\n", + " step: int = 5,\n", + " fps: float = 2.0,\n", + " dpi: int = 300,\n", + " format: str = \"gif\",\n", + " radius: float = 35,\n", + ") -> None:\n", + " assert format in [\"mp4\", \"gif\"], \"Format must be either 'mp4' or 'gif'.\"\n", + " output_path = Path(output_path)\n", + " output_path.mkdir(parents=True, exist_ok=True)\n", + "\n", + " scene.open()\n", + "\n", + " if end_idx is None:\n", + " end_idx = scene.get_number_of_iterations()\n", + " end_idx = min(end_idx, scene.get_number_of_iterations())\n", + " fig, ax = plt.subplots(1, 2, figsize=(18, 5))\n", + " gs = fig.add_gridspec(1, 2, width_ratios=[6, 1])\n", + " ax[0].set_position(gs[0].get_position(fig))\n", + " ax[1].set_position(gs[1].get_position(fig))\n", + "\n", + " def update(i):\n", + " ax[0].clear()\n", + " ax[1].clear()\n", + " for ax_ in ax:\n", + " ax_.set_xticks([])\n", + " ax_.set_yticks([])\n", + " _plot_scene_on_ax(ax[1], scene, i, radius)\n", + " camera = scene.get_camera_at_iteration(iteration=i, camera_type=CameraType.CAM_F0)\n", + " add_camera_ax(ax[0], camera)\n", + " fig.subplots_adjust(wspace=-0.33, hspace=0.0)\n", + " plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01) # Remove all margins\n", + " pbar.update(1)\n", + "\n", + " frames = list(range(start_idx, end_idx, step))\n", + " pbar = tqdm(total=len(frames), desc=f\"Rendering {scene.log_name} as {format}\")\n", + " ani = animation.FuncAnimation(fig, update, frames=frames, repeat=False)\n", + "\n", + " ani.save(output_path / f\"{scene.log_name}_{scene.token}.{format}\", writer=\"ffmpeg\", fps=fps, dpi=dpi)\n", + " plt.close(fig)\n", + " scene.close()\n", + "\n", + "\n", + "render_scene_animation(scenes[scene_index], output_path=\"/home/daniel/scene_renders\", format=\"gif\", dpi=100, end_idx=200)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", + "from d123.dataset.maps.gpkg.gpkg_map import GPKGMap\n", + "\n", + "\n", + "map_api: GPKGMap = scenes[scene_index].map_api\n", + "\n", + "drivable_polygons = map_api._gpd_dataframes[MapLayer.LANE]\n", + "\n", + "\n", + "\n", + "linear_rings = get_road_edge_linear_rings(drivable_polygons.geometry.tolist())\n", + "rings_lengths = [ring.length for ring in linear_rings]\n", + "max_length_idx = np.argmax(rings_lengths)\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "size = 16\n", + "fig, ax = plt.subplots(figsize=(size, size))\n", + "\n", + "for idx, ring in enumerate(linear_rings):\n", + " if idx == max_length_idx:\n", + " ax.plot(*ring.xy, color=\"black\", linewidth=2, label=\"Longest Road Edge\")\n", + " else:\n", + " ax.plot(*ring.xy)\n", + "\n", + "\n", + "ax.set_aspect(\"equal\", adjustable=\"box\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "d123", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 48b11a96113a73b703d1a2fa52200864f374bdff Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Wed, 27 Aug 2025 16:31:55 +0800 Subject: [PATCH 026/145] finish preprocess detection script --- .../kitti_360/kitti_360_data_converter.py | 63 +++--- .../kitti_360/kitti_360_helper.py | 41 ++-- .../kitti_360/preprocess_detection.py | 189 ++++++++++++++++++ .../default_dataset_conversion.yaml | 2 +- jbwang_test2.py | 11 +- 5 files changed, 253 insertions(+), 53 deletions(-) create mode 100644 d123/dataset/dataset_specific/kitti_360/preprocess_detection.py diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index 6433ca89..03e5bd37 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -9,6 +9,7 @@ from typing import Any, Dict, Final, List, Optional, Tuple, Union import numpy as np +import pickle from collections import defaultdict import datetime import hashlib @@ -27,14 +28,12 @@ from d123.common.datatypes.time.time_point import TimePoint from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.common.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3 -from d123.common.geometry.base import StateSE3 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index -from d123.common.geometry.vector import Vector3D, Vector3DIndex from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label +from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex KITTI360_DT: Final[float] = 0.1 SORT_BY_TIMESTAMP: Final[bool] = True @@ -74,6 +73,9 @@ DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train", } +D123_DEVKIT_ROOT = Path(os.environ["D123_DEVKIT_ROOT"]) +PREPOCESS_DETECTION_DIR = D123_DEVKIT_ROOT / "d123" / "dataset" / "dataset_specific" / "kitti_360" / "detection_preprocess" + def create_token(input_data: str) -> str: # TODO: Refactor this function. # TODO: Add a general function to create tokens from arbitrary data. @@ -316,7 +318,15 @@ def _readYAMLFile(fileName): def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: metadata: Dict[LiDARType, LiDARMetadata] = {} + extrinsic = get_lidar_extrinsic() + metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( + lidar_type=LiDARType.LIDAR_TOP, + lidar_index=Kitti360LidarIndex, + extrinsic=extrinsic, + ) + return metadata +def get_lidar_extrinsic() -> np.ndarray: cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" if not cam2pose_txt.exists(): raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") @@ -336,13 +346,7 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow)) extrinsic = cam2pose @ np.linalg.inv(cam2velo) - - metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( - lidar_type=LiDARType.LIDAR_TOP, - lidar_index=Kitti360LidarIndex, - extrinsic=extrinsic, - ) - return metadata + return extrinsic def _write_recording_table( log_name: str, @@ -405,11 +409,10 @@ def _write_recording_table( #TODO Synchronization all other sequences) def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: # unix - # default using velodyne timestamps,if not available, use camera timestamps ts_files = [ - PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt", PATH_2D_RAW_ROOT / log_name / "image_00" / "timestamps.txt", PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt", + PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt", ] for ts_file in ts_files: if ts_file.exists(): @@ -531,16 +534,13 @@ def _extract_detections( dynamic_groups: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list) - - # lidra_data_all = [] - # for index in range(ts_len): - # lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{index:010d}.bin" - # if not lidar_full_path.exists(): - # logging.warning(f"LiDAR file not found for frame {index}: {lidar_full_path}") - # continue - # lidar_data = np.fromfile(lidar_full_path, dtype=np.float32) - # lidar_data = lidar_data.reshape(-1, 4)[:, :3] # Keep only x, y, z coordinates - # lidra_data_all.append(lidar_data) + detection_preprocess_path = PREPOCESS_DETECTION_DIR / f"{log_name}_detection_preprocessed.pkl" + if detection_preprocess_path.exists(): + with open(detection_preprocess_path, "rb") as f: + detection_preprocess_result = pickle.load(f) + records_dict = {record_item["global_id"]: record_item for record_item in detection_preprocess_result["records"]} + else: + detection_preprocess_result = None for child in root: semanticIdKITTI = int(child.find('semanticId').text) @@ -552,14 +552,12 @@ def _extract_detections( #static object if obj.timestamp == -1: - # first filter by radius - obj.filter_by_radius(ego_states_xyz,radius=50.0) - # then filter by pointcloud - for frame in obj.valid_radius_frames: - # TODO in the future, now is too slow because cpu in the server is not free - # or using config? - # lidar_data = lidra_data_all[frame] - # if obj.box_visible_in_point_cloud(lidar_data): + if detection_preprocess_result is None: + obj.filter_by_radius(ego_states_xyz,radius=50.0) + else: + obj.load_detection_preprocess(records_dict) + for record in obj.valid_frames["records"]: + frame = record["timestamp"] detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append([0.0, 0.0, 0.0]) detections_tokens[frame].append(str(obj.globalID)) @@ -606,6 +604,11 @@ def _extract_detections( #TODO lidar extraction now only velo def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: + + #NOTE special case for sequence 2013_05_28_drive_0002_sync which has no lidar data before frame 4391 + if log_name == "2013_05_28_drive_0002_sync" and idx <= 4390: + return {LiDARType.LIDAR_TOP: None} + lidar: Optional[str] = None lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" if lidar_full_path.exists(): diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py index 7edcd6af..76e3c9e0 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py @@ -1,13 +1,11 @@ import numpy as np from collections import defaultdict - +from typing import Dict, Optional, Any, List from scipy.linalg import polar from scipy.spatial.transform import Rotation as R -from d123.common.geometry.base import StateSE3 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3 -from d123.common.geometry.transform.se3 import get_rotation_matrix +from d123.geometry import BoundingBoxSE3, StateSE3 from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label DEFAULT_ROLL = 0.0 @@ -51,7 +49,6 @@ def __init__(self): # the window that contains the bbox self.start_frame = -1 self.end_frame = -1 - self.valid_radius_frames = [] # timestamp of the bbox (-1 if statis) self.timestamp = -1 @@ -92,6 +89,9 @@ def parseBbox(self, child): self.label = child.find('label').text self.globalID = local2global(self.semanticId, self.instanceId) + + self.valid_frames = {"global_id": self.globalID, "records": []} + self.parseVertices(child) self.parse_scale_rotation() @@ -119,11 +119,6 @@ def parse_scale_rotation(self): self.yaw = yaw self.pitch = pitch self.roll = roll - - # self.pose = np.eye(4, dtype=np.float64) - # self.pose[:3, :3] = self.Rm - # self.pose[:3, 3] = self.T - # self.w2e = np.linalg.inv(self.pose) def get_state_array(self): center = StateSE3( @@ -140,16 +135,17 @@ def get_state_array(self): return bounding_box_se3.array def filter_by_radius(self,ego_state_xyz,radius=50.0): - # first stage of detection, used to filter out detections by radius - - for index in range(len(ego_state_xyz)): - ego_state = ego_state_xyz[index] - distance = np.linalg.norm(ego_state[:3] - self.T) - if distance <= radius: - self.valid_radius_frames.append(index) + ''' first stage of detection, used to filter out detections by radius ''' + d = np.linalg.norm(ego_state_xyz - self.T[None, :], axis=1) + idxs = np.where(d <= radius)[0] + for idx in idxs: + self.valid_frames["records"].append({ + "timestamp": idx, + "points_in_box": None, + }) def box_visible_in_point_cloud(self, points): - # points: (N,3) , box: (8,3) + ''' points: (N,3) , box: (8,3) ''' box = self.vertices O, A, B, C = box[0], box[1], box[2], box[5] OA = A - O @@ -159,4 +155,11 @@ def box_visible_in_point_cloud(self, points): mask = (np.dot(O, OA) < POA) & (POA < np.dot(A, OA)) & \ (np.dot(O, OB) < POB) & (POB < np.dot(B, OB)) & \ (np.dot(O, OC) < POC) & (POC < np.dot(C, OC)) - return True if np.sum(mask) > 100 else False \ No newline at end of file + + points_in_box = np.sum(mask) + visible = True if points_in_box > 50 else False + return visible, points_in_box + + def load_detection_preprocess(self, records_dict: Dict[int, Any]): + if self.globalID in records_dict: + self.valid_frames["records"] = records_dict[self.globalID]["records"] \ No newline at end of file diff --git a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py new file mode 100644 index 00000000..e45e76d9 --- /dev/null +++ b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py @@ -0,0 +1,189 @@ +""" +This script precomputes static detection records for KITTI-360: + - Stage 1: radius filtering using ego positions (from poses.txt). + - Stage 2: LiDAR visibility check to fill per-frame point counts. +It writes a pickle containing, for each static object, all feasible frames and +their point counts to avoid recomputation in later pipelines. +We have precomputed and saved the pickle for all training logs, you can either +download them or run this script to generate +""" + +from __future__ import annotations +import os +import pickle +import logging +from pathlib import Path +from typing import Dict, List, Tuple, Optional, Any +from collections import defaultdict + +import numpy as np +import numpy.typing as npt +import xml.etree.ElementTree as ET + +KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) +DIR_3D_RAW = "data_3d_raw" +DIR_3D_BBOX = "data_3d_bboxes" +DIR_POSES = "data_poses" + +PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW +PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX +PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES + +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION +from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label +from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import get_lidar_extrinsic + +def _bbox_xml_path(log_name: str) -> Path: + return PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" + +def _lidar_frame_path(log_name: str, frame_idx: int) -> Path: + return PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{frame_idx:010d}.bin" + +def _load_lidar_xyz(filepath: Path) -> np.ndarray: + """Load one LiDAR frame and return Nx3 xyz.""" + arr = np.fromfile(filepath, dtype=np.float32) + return arr.reshape(-1, 4)[:, :3] + +def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]: + """Parse XML and collect static objects with valid class names.""" + xml_path = _bbox_xml_path(log_name) + if not xml_path.exists(): + raise FileNotFoundError(f"BBox 3D file not found: {xml_path}") + tree = ET.parse(xml_path) + root = tree.getroot() + objs: List[KITTI360Bbox3D] = [] + for child in root: + sem_id = int(child.find("semanticId").text) + name = kittiId2label[sem_id].name + timestamp = int(child.find('timestamp').text) # -1 for static objects + if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT or timestamp != -1: + continue + obj = KITTI360Bbox3D() + obj.parseBbox(child) + objs.append(obj) + return objs + +def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]: + """Load ego states from poses.txt.""" + + pose_file = PATH_POSES_ROOT / log_name / "poses.txt" + if not pose_file.exists(): + raise FileNotFoundError(f"Pose file not found: {pose_file}") + + poses = np.loadtxt(pose_file) + poses_time = poses[:, 0] - 1 # Adjusting time to start from 0 + + pose_idx = 0 + poses_time_len = len(poses_time) + + ego_states = [] + + for time_idx in range(length): + while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < time_idx: + pose_idx += 1 + pos = pose_idx + state_item = np.eye(4) + r00, r01, r02 = poses[pos, 1:4] + r10, r11, r12 = poses[pos, 5:8] + r20, r21, r22 = poses[pos, 9:12] + R_mat = np.array([[r00, r01, r02], + [r10, r11, r12], + [r20, r21, r22]], dtype=np.float64) + R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3] + ego_state_xyz = np.array([ + poses[pos, 4], + poses[pos, 8], + poses[pos, 12], + ]) + + state_item[:3, :3] = R_mat_cali + state_item[:3, 3] = ego_state_xyz + ego_states.append(state_item) + + return np.array(ego_states) # [N,4,4] + + +def process_detection( + log_name: str, + radius_m: float = 50.0, + output_dir: Optional[Path] = None, +) -> None: + """ + Precompute static detections filtering: + 1) filter by ego-centered radius over all frames + 2) filter by LiDAR point cloud visibility + Save per-frame static detections to a pickle to avoid recomputation. + """ + + lidar_dir = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" + if not lidar_dir.exists(): + raise FileNotFoundError(f"LiDAR data folder not found: {lidar_dir}") + ts_len = len(list(lidar_dir.glob("*.bin"))) + logging.info(f"[preprocess] {log_name}: found {ts_len} lidar frames") + + # 1) Parse static objects from XML + static_objs = _collect_static_objects(log_name) + logging.info(f"[preprocess] {log_name}: static objects = {len(static_objs)}") + + # 2) Filter by ego-centered radius + ego_states = _collect_ego_states(log_name,ts_len) + logging.info(f"[preprocess] {log_name}: ego states = {len(ego_states)}") + for obj in static_objs: + obj.filter_by_radius(ego_states[:, :3, 3], radius_m) + + # 3) Filter by LiDAR point cloud visibility + lidar_extrinsic = get_lidar_extrinsic() + for time_idx in range(ts_len): + logging.info(f"[preprocess] {log_name}: t={time_idx}") + lidar_path = _lidar_frame_path(log_name, time_idx) + lidar_xyz = _load_lidar_xyz(lidar_path) + + # lidar to pose + lidar_h = np.concatenate((lidar_xyz, np.ones((lidar_xyz.shape[0], 1), dtype=lidar_xyz.dtype)), axis=1) + lidar_in_imu = lidar_h @ lidar_extrinsic.T + lidar_in_imu = lidar_in_imu[:,:3] + + # pose to world + lidar_in_world = lidar_in_imu @ ego_states[time_idx][:3,:3].T + ego_states[time_idx][:3,3] + + for obj in static_objs: + if not any(record["timestamp"] == time_idx for record in obj.valid_frames["records"]): + continue + visible, points_in_box = obj.box_visible_in_point_cloud(lidar_in_world) + if not visible: + obj.valid_frames["records"] = [record for record in obj.valid_frames["records"] if record["timestamp"] != time_idx] + else: + for record in obj.valid_frames["records"]: + if record["timestamp"] == time_idx: + record["points_in_box"] = points_in_box + break + + # 4) Save pickle + records: List[Dict[str, Any]] = [] + for obj in static_objs: + records.append(obj.valid_frames) + if output_dir is None: + output_dir = PATH_3D_BBOX_ROOT / "preprocessed" + output_dir.mkdir(parents=True, exist_ok=True) + out_path = output_dir / f"{log_name}_detection_preprocessed.pkl" + payload = { + "log_name": log_name, + "records": records + } + with open(out_path, "wb") as f: + pickle.dump(payload, f) + logging.info(f"[preprocess] saved: {out_path}") + +if __name__ == "__main__": + import argparse + logging.basicConfig(level=logging.INFO) + parser = argparse.ArgumentParser(description="Precompute KITTI-360 static detections filters") + parser.add_argument("--log_name", default="2013_05_28_drive_0000_sync") + parser.add_argument("--radius", type=float, default=60.0) + parser.add_argument("--out", type=Path, default=None, help="output directory for pkl") + args = parser.parse_args() + process_detection( + log_name=args.log_name, + radius_m=args.radius, + output_dir=args.out, + ) diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index 2c474fe8..52915f13 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -22,4 +22,4 @@ defaults: - kitti360_dataset force_log_conversion: True -force_map_conversion: True +force_map_conversion: False diff --git a/jbwang_test2.py b/jbwang_test2.py index f9748db5..183df813 100644 --- a/jbwang_test2.py +++ b/jbwang_test2.py @@ -213,12 +213,17 @@ def get_rotation_matrix(roll,pitch,yaw): [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], dtype=np.float64) R_mat = R_mat @ calib + from d123.geometry.rotation import EulerAngles if idx <= 300: # print("R_mat",R_mat) + new_yaw, new_pitch, new_roll = Quaternion(matrix=R_mat[:3, :3]).yaw_pitch_roll + R = EulerAngles.from_array(np.array([new_roll, new_pitch, new_yaw])).rotation_matrix + # print("R from yaw_pitch_roll",R) + print(R_mat - R) # new_yaw,new_pitch,new_roll = R.from_matrix(R_mat).as_euler('yxz', degrees=False) - print("new",new_roll,new_pitch,new_yaw) - print("roll,pitch,yaw",oxts_data[3:6]) # 前6个元素是位置和速度 - roll, pitch, yaw = oxts_data[3:6] + # print("new",new_roll,new_pitch,new_yaw) + # print("roll,pitch,yaw",oxts_data[3:6]) # 前6个元素是位置和速度 + # roll, pitch, yaw = oxts_data[3:6] # print("true",get_rotation_matrix(roll,pitch,yaw)) # print("new",roll,pitch,yaw) \ No newline at end of file From 6dd5de2494b0afc38d30fe324efbdf46d2298a92 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Wed, 27 Aug 2025 20:38:45 +0200 Subject: [PATCH 027/145] A few changes for refactoring and readability. --- d123/__init__.py | 2 +- d123/common/utils/mixin.py | 11 +++-- .../map/opendrive/opendrive_converter.py | 0 .../map/opendrive/parser/reference.py | 7 ++- .../map/opendrive/utils/lane_helper.py | 6 +-- d123/geometry/geometry_index.py | 6 +-- d123/geometry/se.py | 22 ++++++++- d123/geometry/transform/__init__.py | 15 ++++++ d123/geometry/transform/transform_se2.py | 49 +++++++++---------- d123/geometry/transform/transform_se3.py | 7 --- docs/geometry.rst | 34 ++++++++++++- pyproject.toml | 6 ++- 12 files changed, 112 insertions(+), 53 deletions(-) delete mode 100644 d123/dataset/conversion/map/opendrive/opendrive_converter.py diff --git a/d123/__init__.py b/d123/__init__.py index b1a19e32..6526deb4 100644 --- a/d123/__init__.py +++ b/d123/__init__.py @@ -1 +1 @@ -__version__ = "0.0.5" +__version__ = "0.0.7" diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py index 9290242b..252a4eee 100644 --- a/d123/common/utils/mixin.py +++ b/d123/common/utils/mixin.py @@ -1,7 +1,5 @@ from __future__ import annotations -import copy as pycopy - import numpy as np import numpy.typing as npt @@ -9,6 +7,11 @@ class ArrayMixin: """Abstract base class for geometric entities.""" + @classmethod + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> ArrayMixin: + """Create an instance from a NumPy array.""" + raise NotImplementedError + @property def array(self) -> npt.NDArray[np.float64]: """The array representation of the geometric entity.""" @@ -43,6 +46,4 @@ def tolist(self) -> list: def copy(self) -> ArrayMixin: """Return a copy of the object with a copied array.""" - obj = pycopy.copy(self) - obj.array = self.array.copy() - return obj + return self.__class__.from_array(self.array, copy=True) diff --git a/d123/dataset/conversion/map/opendrive/opendrive_converter.py b/d123/dataset/conversion/map/opendrive/opendrive_converter.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/dataset/conversion/map/opendrive/parser/reference.py b/d123/dataset/conversion/map/opendrive/parser/reference.py index 5fe9211b..bc13537d 100644 --- a/d123/dataset/conversion/map/opendrive/parser/reference.py +++ b/d123/dataset/conversion/map/opendrive/parser/reference.py @@ -60,7 +60,7 @@ def interpolate_se2(self, s: float, t: float = 0.0, lane_section_end: bool = Fal s = self.length else: raise ValueError( - f"s={s} is beyond the end of the plan view (length={self.length}) with tolerance={TOLERANCE}." + f"PlanView: s={s} is beyond the end of the plan view (length={self.length}) with tolerance={TOLERANCE}." ) # Find the geometry segment containing s @@ -131,6 +131,11 @@ def _find_polynomial(s: float, polynomials: List[Polynomial], lane_section_end: out_polynomial = polynomial break + # s_values = np.array([poly.s for poly in polynomials]) + # side = "left" if lane_section_end else "right" + # poly_idx = np.searchsorted(s_values, s, side=side) - 1 + # poly_idx = int(np.clip(poly_idx, 0, len(polynomials) - 1)) + # return polynomials[poly_idx] return out_polynomial def interpolate_se2(self, s: float, t: float = 0.0, lane_section_end: bool = False) -> npt.NDArray[np.float64]: diff --git a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py index edd05423..a21bd625 100644 --- a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py +++ b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py @@ -50,9 +50,9 @@ def type(self) -> str: def _s_positions(self) -> npt.NDArray[np.float64]: length = self.s_range[1] - self.s_range[0] _s_positions = np.linspace( - self.s_range[0], - self.s_range[1], - int(np.ceil(length / self.interpolation_step_size)) + 1, + start=self.s_range[0], + stop=self.s_range[1], + num=int(np.ceil(length / self.interpolation_step_size)) + 1, endpoint=True, dtype=np.float64, ) diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py index e5c61154..5f56abd9 100644 --- a/d123/geometry/geometry_index.py +++ b/d123/geometry/geometry_index.py @@ -117,10 +117,6 @@ def XY(cls) -> slice: def XYZ(cls) -> slice: return slice(cls.X, cls.Z + 1) - @classproperty - def ROTATION_XYZ(cls) -> slice: - return slice(cls.ROLL, cls.YAW + 1) - @classproperty def EULER_ANGLES(cls) -> slice: return slice(cls.ROLL, cls.YAW + 1) @@ -208,7 +204,7 @@ def STATE_SE3(cls) -> slice: return slice(cls.X, cls.YAW + 1) @classproperty - def ROTATION_XYZ(cls) -> slice: + def EULER_ANGLES(cls) -> slice: return slice(cls.ROLL, cls.YAW + 1) @classproperty diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 1eb021c6..2bc56da9 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -274,6 +274,18 @@ def transformation_matrix(self) -> npt.NDArray[np.float64]: def euler_angles(self) -> EulerAngles: return EulerAngles.from_array(self.array[StateSE3Index.EULER_ANGLES]) + @property + def quaternion_se3(self) -> QuaternionSE3: + """Returns the QuaternionSE3 representation of the state. + + :return: A QuaternionSE3 instance representing the quaternion. + """ + quaternion_se3_array = np.zeros(len(QuaternionSE3Index), dtype=np.float64) + quaternion_se3_array[QuaternionSE3Index.XYZ] = self.array[StateSE3Index.XYZ] + quaternion_se3_array[QuaternionSE3Index.QUATERNION] = Quaternion.from_euler_angles(self.euler_angles) + + return QuaternionSE3.from_array(quaternion_se3_array) + @property def quaternion(self) -> npt.NDArray[np.float64]: """Returns the quaternion (w, x, y, z) representation of the state's orientation. @@ -299,7 +311,7 @@ def __matmul__(self, other: StateSE3) -> StateSE3: return StateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix) -class QuaternionSE3: +class QuaternionSE3(ArrayMixin): """Class representing a quaternion in SE3 space. TODO: Implement and replace StateSE3. @@ -438,3 +450,11 @@ def quaternion(self) -> Quaternion: :return: A Quaternion instance representing the quaternion. """ return Quaternion.from_array(self.array[QuaternionSE3Index.QUATERNION]) + + @property + def rotation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 3x3 rotation matrix representation of the state's orientation. + + :return: A 3x3 numpy array representing the rotation matrix. + """ + return self.quaternion.rotation_matrix diff --git a/d123/geometry/transform/__init__.py b/d123/geometry/transform/__init__.py index e69de29b..96088028 100644 --- a/d123/geometry/transform/__init__.py +++ b/d123/geometry/transform/__init__.py @@ -0,0 +1,15 @@ +from d123.geometry.transform.transform_se2 import ( + convert_absolute_to_relative_se2_array, + convert_relative_to_absolute_se2_array, + translate_se2_along_body_frame, + translate_se2_along_x, + translate_se2_along_y, +) +from d123.geometry.transform.transform_se3 import ( + convert_absolute_to_relative_se3_array, + convert_relative_to_absolute_se3_array, + translate_se3_along_body_frame, + translate_se3_along_x, + translate_se3_along_y, + translate_se3_along_z, +) diff --git a/d123/geometry/transform/transform_se2.py b/d123/geometry/transform/transform_se2.py index 6331bd16..faaa3587 100644 --- a/d123/geometry/transform/transform_se2.py +++ b/d123/geometry/transform/transform_se2.py @@ -8,8 +8,6 @@ from d123.geometry.utils.rotation_utils import normalize_angle from d123.geometry.vector import Vector2D -# TODO: Refactor 2D and 3D transform functions in a more consistent and general way. - def convert_absolute_to_relative_se2_array( origin: Union[StateSE2, npt.NDArray[np.float64]], state_se2_array: npt.NDArray[np.float64] @@ -125,31 +123,6 @@ def convert_relative_to_absolute_point_2d_array( return point_2d_abs -def translate_se2(state_se2: StateSE2, translation: Vector2D) -> StateSE2: - """Translate a single SE2 state by a 2D vector. - - :param state_se2: SE2 state to translate - :param translation: 2D translation vector - :return: translated SE2 state - """ - translated_xy = state_se2.array[StateSE2Index.XY] + translation.array[Vector2DIndex.XY] - return StateSE2(translated_xy[0], translated_xy[1], state_se2.array[StateSE2Index.YAW]) - - -def translate_se2_array(state_se2_array: npt.NDArray[np.float64], translation: Vector2D) -> npt.NDArray[np.float64]: - """Translate an array of SE2 states by a 2D vector. - - :param state_se2_array: array of SE2 states, indexed by \ - :class:`~d123.geometry.geometry_index.StateSE2Index`, in last dim - :param translation: 2D translation vector - :return: translated SE2 array - """ - assert len(StateSE2Index) == state_se2_array.shape[-1] - result = state_se2_array.copy() - result[..., StateSE2Index.XY] += translation.array[Vector2DIndex.XY] - return result - - def translate_se2_array_along_body_frame( state_se2_array: npt.NDArray[np.float64], translation: Vector2D ) -> npt.NDArray[np.float64]: @@ -185,3 +158,25 @@ def translate_se2_along_body_frame(state_se2: StateSE2, translation: Vector2D) - :return: translated SE2 state """ return StateSE2.from_array(translate_se2_array_along_body_frame(state_se2.array, translation), copy=False) + + +def translate_se2_along_x(state_se2: StateSE2, distance: float) -> StateSE2: + """Translate a single SE2 state along its local X-axis. + + :param state_se2: SE2 state to translate + :param distance: distance to translate along the local X-axis + :return: translated SE2 state + """ + translation = Vector2D.from_array(np.array([distance, 0.0], dtype=np.float64)) + return StateSE2.from_array(translate_se2_array_along_body_frame(state_se2.array, translation), copy=False) + + +def translate_se2_along_y(state_se2: StateSE2, distance: float) -> StateSE2: + """Translate a single SE2 state along its local Y-axis. + + :param state_se2: SE2 state to translate + :param distance: distance to translate along the local Y-axis + :return: translated SE2 state + """ + translation = Vector2D.from_array(np.array([0.0, distance], dtype=np.float64)) + return StateSE2.from_array(translate_se2_array_along_body_frame(state_se2.array, translation), copy=False) diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py index 1412d61a..abed7552 100644 --- a/d123/geometry/transform/transform_se3.py +++ b/d123/geometry/transform/transform_se3.py @@ -7,7 +7,6 @@ from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex from d123.geometry.rotation import EulerAngles from d123.geometry.utils.rotation_utils import ( - get_rotation_matrices_from_euler_array, get_rotation_matrix_from_euler_array, normalize_angle, ) @@ -109,12 +108,6 @@ def convert_absolute_to_relative_se3_array( # Vectorized relative position calculation rel_positions = (abs_positions - t_origin) @ R_origin - # Get rotation matrices for all absolute orientations - R_abs = get_rotation_matrices_from_euler_array(abs_euler_angles) - - # Compute relative rotations: R_rel = R_origin^T @ R_abs - np.transpose(R_origin) @ R_abs - # Convert back to Euler angles (this may need a custom function) # For now, using simple subtraction as approximation (this is incorrect for general rotations) origin_euler = origin_array[StateSE3Index.EULER_ANGLES] diff --git a/docs/geometry.rst b/docs/geometry.rst index 957db541..61b3f65d 100644 --- a/docs/geometry.rst +++ b/docs/geometry.rst @@ -17,8 +17,8 @@ Vectors .. autoclass:: d123.geometry.Vector3D() -Super Euclidean States -~~~~~~~~~~~~~~~~~~~~~~ +Special Euclidean Group +~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: d123.geometry.StateSE2() .. autoclass:: d123.geometry.StateSE3() @@ -52,6 +52,36 @@ Indexing Enums .. autoclass:: d123.geometry.Corners3DIndex() +Transformations +--------------- + +Transformations in 2D +~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: d123.geometry.transform.convert_absolute_to_relative_se2_array + +.. autofunction:: d123.geometry.transform.convert_relative_to_absolute_se2_array + +.. autofunction:: d123.geometry.transform.translate_se2_along_body_frame + +.. autofunction:: d123.geometry.transform.translate_se2_along_x + +.. autofunction:: d123.geometry.transform.translate_se2_along_y + + +Transformations in 3D +~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: d123.geometry.transform.convert_absolute_to_relative_se3_array + +.. autofunction:: d123.geometry.transform.convert_relative_to_absolute_se3_array + +.. autofunction:: d123.geometry.transform.translate_se3_along_body_frame + +.. autofunction:: d123.geometry.transform.translate_se3_along_x + +.. autofunction:: d123.geometry.transform.translate_se3_along_y + +.. autofunction:: d123.geometry.transform.translate_se3_along_z + Occupancy Map ------------- .. autoclass:: d123.geometry.OccupancyMap2D() diff --git a/pyproject.toml b/pyproject.toml index ad02829a..dda67ef7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ classifiers = [ "License :: OSI Approved :: Apache Software License", ] name = "d123" -version = "v0.0.6" +version = "v0.0.7" authors = [{ name = "Daniel Dauner", email = "daniel.dauner@gmail.com" }] description = "TODO" readme = "README.md" @@ -64,6 +64,10 @@ dependencies = [ "viser", ] +[project.scripts] +d123-viser = "d123.script.run_viser:main" + + [project.optional-dependencies] dev = [ "black", From 6ed982db5553e929d7963d5d08c49d256a8ea4f1 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Wed, 27 Aug 2025 21:00:19 +0200 Subject: [PATCH 028/145] Add more transformation tests. --- d123/geometry/test/test_transform.py | 444 +++++++++++++++++++++++---- 1 file changed, 382 insertions(+), 62 deletions(-) diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py index f126e33a..89161153 100644 --- a/d123/geometry/test/test_transform.py +++ b/d123/geometry/test/test_transform.py @@ -10,9 +10,9 @@ convert_absolute_to_relative_se2_array, convert_relative_to_absolute_point_2d_array, convert_relative_to_absolute_se2_array, - translate_se2, translate_se2_along_body_frame, - translate_se2_array, + translate_se2_along_x, + translate_se2_along_y, translate_se2_array_along_body_frame, ) from d123.geometry.transform.transform_se3 import ( @@ -33,58 +33,73 @@ class TestTransformSE2(unittest.TestCase): def setUp(self): self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal - def test_translate_se2(self) -> None: - pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64)) - translation: Vector2D = Vector2D(1.0, 1.0) - - result: StateSE2 = translate_se2(pose, translation) - expected: StateSE2 = StateSE2.from_array(np.array([2.0, 3.0, 0.0], dtype=np.float64)) + def test_translate_se2_along_x(self) -> None: + """Tests translating a SE2 state along the X-axis.""" + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 1.0 + result: StateSE2 = translate_se2_along_x(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - def test_translate_se2_negative_translation(self) -> None: + def test_translate_se2_along_x_negative(self) -> None: + """Tests translating a SE2 state along the X-axis in the negative direction.""" pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64)) - translation: Vector2D = Vector2D(-0.5, -1.5) - result: StateSE2 = translate_se2(pose, translation) - expected: StateSE2 = StateSE2.from_array(np.array([0.5, 0.5, 0.0], dtype=np.float64)) + distance: float = -0.5 + result: StateSE2 = translate_se2_along_x(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([0.5, 2.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - def test_translate_se2_with_rotation(self) -> None: - pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 4], dtype=np.float64)) - translation: Vector2D = Vector2D(1.0, 0.0) - result: StateSE2 = translate_se2(pose, translation) - expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 4], dtype=np.float64)) + def test_translate_se2_along_x_with_rotation(self) -> None: + """Tests translating a SE2 state along the X-axis with 90 degree rotation.""" + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) + distance: float = 1.0 + result: StateSE2 = translate_se2_along_x(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - def test_translate_se2_array(self) -> None: - poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) - translation: Vector2D = Vector2D(1.0, 1.0) - result: npt.NDArray[np.float64] = translate_se2_array(poses, translation) - expected: npt.NDArray[np.float64] = np.array([[2.0, 3.0, 0.0], [1.0, 1.0, np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) + def test_translate_se2_along_y(self) -> None: + """Tests translating a SE2 state along the Y-axis.""" + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 1.0 + result: StateSE2 = translate_se2_along_y(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - def test_translate_se2_array_zero_translation(self) -> None: - poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) - translation: Vector2D = Vector2D(0.0, 0.0) - result: npt.NDArray[np.float64] = translate_se2_array(poses, translation) - expected: npt.NDArray[np.float64] = poses.copy() - np.testing.assert_array_almost_equal(result, expected) + def test_translate_se2_along_y_negative(self) -> None: + """Tests translating a SE2 state along the Y-axis in the negative direction.""" + pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64)) + distance: float = -1.5 + result: StateSE2 = translate_se2_along_y(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.5, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_y_with_rotation(self) -> None: + """Tests translating a SE2 state along the Y-axis with -90 degree rotation.""" + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, -np.pi / 2], dtype=np.float64)) + distance: float = 2.0 + result: StateSE2 = translate_se2_along_y(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([2.0, 0.0, -np.pi / 2], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - def test_translate_se2_along_yaw(self) -> None: + def test_translate_se2_along_body_frame_forward(self) -> None: + """Tests translating a SE2 state along the body frame forward direction, with 90 degree rotation.""" # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction) - pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.deg2rad(90)], dtype=np.float64)) + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) vector: Vector2D = Vector2D(1.0, 0.0) result: StateSE2 = translate_se2_along_body_frame(pose, vector) - expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.deg2rad(90)], dtype=np.float64)) + expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - def test_translate_se2_along_yaw_backward(self) -> None: + def test_translate_se2_along_body_frame_backward(self) -> None: + """Tests translating a SE2 state along the body frame backward direction.""" pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) vector: Vector2D = Vector2D(-1.0, 0.0) result: StateSE2 = translate_se2_along_body_frame(pose, vector) expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - def test_translate_se2_along_yaw_diagonal(self) -> None: + def test_translate_se2_along_body_frame_diagonal(self) -> None: + """Tests translating a SE2 state along the body frame diagonal direction.""" pose: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.deg2rad(45)], dtype=np.float64)) vector: Vector2D = Vector2D(1.0, 0.0) result: StateSE2 = translate_se2_along_body_frame(pose, vector) @@ -93,61 +108,127 @@ def test_translate_se2_along_yaw_diagonal(self) -> None: ) np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - def test_translate_se2_array_along_yaw(self) -> None: + def test_translate_se2_along_body_frame_lateral(self) -> None: + """Tests translating a SE2 state along the body frame lateral direction.""" + # Move 1 unit to the right (positive y in body frame) + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + vector: Vector2D = Vector2D(0.0, 1.0) + result: StateSE2 = translate_se2_along_body_frame(pose, vector) + expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_body_frame_lateral_with_rotation(self) -> None: + """Tests translating a SE2 state along the body frame lateral direction with 90 degree rotation.""" + # Move 1 unit to the right when facing 90 degrees + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) + vector: Vector2D = Vector2D(0.0, 1.0) + result: StateSE2 = translate_se2_along_body_frame(pose, vector) + expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, np.pi / 2], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_array_along_body_frame_single_distance(self) -> None: + """Tests translating a SE2 state array along the body frame forward direction.""" poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) - distance: float = Vector2D(1.0, 0.0) + distance: Vector2D = Vector2D(1.0, 0.0) result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - def test_translate_se2_array_along_yaw_multiple_distances(self) -> None: + def test_translate_se2_array_along_body_frame_multiple_distances(self) -> None: + """Tests translating a SE2 state array along the body frame forward direction with different distances.""" poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi]], dtype=np.float64) - distance: float = Vector2D(2.0, 0.0) + distance: Vector2D = Vector2D(2.0, 0.0) result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) expected: npt.NDArray[np.float64] = np.array([[2.0, 0.0, 0.0], [-2.0, 0.0, np.pi]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_translate_se2_array_along_body_frame_lateral(self) -> None: + """Tests translating a SE2 state array along the body frame lateral direction with 90 degree rotation.""" + poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) + distance: Vector2D = Vector2D(0.0, 1.0) + result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) + expected: npt.NDArray[np.float64] = np.array([[0.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) def test_convert_absolute_to_relative_se2_array(self) -> None: + """Tests converting absolute SE2 poses to relative SE2 poses.""" origin: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) absolute_poses: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(origin, absolute_poses) expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) def test_convert_absolute_to_relative_se2_array_with_rotation(self) -> None: + """Tests converting absolute SE2 poses to relative SE2 poses with 90 degree rotation.""" reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, np.pi / 2]], dtype=np.float64) result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses) expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_absolute_to_relative_se2_array_identity(self) -> None: + """Tests converting absolute SE2 poses to relative SE2 poses with identity transformation.""" + reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses) + expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) def test_convert_relative_to_absolute_se2_array(self) -> None: + """Tests converting relative SE2 poses to absolute SE2 poses.""" reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses) expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_relative_to_absolute_se2_array_with_rotation(self) -> None: + """Tests converting relative SE2 poses to absolute SE2 poses with rotation.""" + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64)) + relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) def test_convert_absolute_to_relative_point_2d_array(self) -> None: + """Tests converting absolute 2D points to relative 2D points.""" reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64) result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) def test_convert_absolute_to_relative_point_2d_array_with_rotation(self) -> None: + """Tests converting absolute 2D points to relative 2D points with 90 degree rotation.""" reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) absolute_points: npt.NDArray[np.float64] = np.array([[0.0, 1.0], [1.0, 0.0]], dtype=np.float64) result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, -1.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_absolute_to_relative_point_2d_array_empty(self) -> None: + """Tests converting an empty array of absolute 2D points to relative 2D points.""" + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) def test_convert_relative_to_absolute_point_2d_array(self) -> None: + """Tests converting relative 2D points to absolute 2D points.""" reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64) result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points) expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_relative_to_absolute_point_2d_array_with_rotation(self) -> None: + """Tests converting relative 2D points to absolute 2D points with 90 degree rotation.""" + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64)) + relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, 1.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [0.0, 0.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) class TestTransformSE3(unittest.TestCase): @@ -157,6 +238,7 @@ def setUp(self): self.num_consistency_tests = 10 # Number of random test cases for consistency checks def test_translate_se3_along_x(self) -> None: + """Tests translating a SE3 state along the body frame forward direction.""" pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) distance: float = 1.0 result: StateSE3 = translate_se3_along_x(pose, distance) @@ -164,13 +246,23 @@ def test_translate_se3_along_x(self) -> None: np.testing.assert_array_almost_equal(result.array, expected.array) def test_translate_se3_along_x_negative(self) -> None: + """Tests translating a SE3 state along the body frame backward direction.""" pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) distance: float = -0.5 result: StateSE3 = translate_se3_along_x(pose, distance) expected: StateSE3 = StateSE3.from_array(np.array([0.5, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array) + def test_translate_se3_along_x_with_rotation(self) -> None: + """Tests translating a SE3 state along the body frame forward direction with yaw rotation.""" + pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) + distance: float = 2.5 + result: StateSE3 = translate_se3_along_x(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([0.0, 2.5, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + def test_translate_se3_along_y(self) -> None: + """Tests translating a SE3 state along the body frame lateral direction.""" pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) distance: float = 1.0 result: StateSE3 = translate_se3_along_y(pose, distance) @@ -178,13 +270,31 @@ def test_translate_se3_along_y(self) -> None: np.testing.assert_array_almost_equal(result.array, expected.array) def test_translate_se3_along_y_with_existing_position(self) -> None: + """Tests translating a SE3 state along the body frame lateral direction with existing position.""" pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) distance: float = 2.5 result: StateSE3 = translate_se3_along_y(pose, distance) expected: StateSE3 = StateSE3.from_array(np.array([1.0, 4.5, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array) + def test_translate_se3_along_y_negative(self) -> None: + """Tests translating a SE3 state along the body frame lateral direction in the negative direction.""" + pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = -1.0 + result: StateSE3 = translate_se3_along_y(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_y_with_rotation(self) -> None: + """Tests translating a SE3 state along the body frame lateral direction with roll rotation.""" + pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, np.pi / 2, 0.0, 0.0], dtype=np.float64)) + distance: float = -1.0 + result: StateSE3 = translate_se3_along_y(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 2.0, np.pi / 2, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + def test_translate_se3_along_z(self) -> None: + """Tests translating a SE3 state along the body frame vertical direction.""" pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) distance: float = 1.0 result: StateSE3 = translate_se3_along_z(pose, distance) @@ -192,34 +302,65 @@ def test_translate_se3_along_z(self) -> None: np.testing.assert_array_almost_equal(result.array, expected.array) def test_translate_se3_along_z_large_distance(self) -> None: + """Tests translating a SE3 state along the body frame vertical direction with a large distance.""" pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) distance: float = 10.0 result: StateSE3 = translate_se3_along_z(pose, distance) expected: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 15.0, 0.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array) - def test_translate_body_frame(self) -> None: + def test_translate_se3_along_z_negative(self) -> None: + """Tests translating a SE3 state along the body frame vertical direction in the negative direction.""" + pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = -2.0 + result: StateSE3 = translate_se3_along_z(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_z_with_rotation(self) -> None: + """Tests translating a SE3 state along the body frame vertical direction with pitch rotation.""" + pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64)) + distance: float = 2.0 + result: StateSE3 = translate_se3_along_z(pose, distance) + expected: StateSE3 = StateSE3.from_array(np.array([3.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_body_frame(self) -> None: + """Tests translating a SE3 state along the body frame forward direction.""" pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) result: StateSE3 = translate_se3_along_body_frame(pose, translation) expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array) - def test_translate_body_frame_multiple_axes(self) -> None: + def test_translate_se3_along_body_frame_multiple_axes(self) -> None: + """Tests translating a SE3 state along the body frame in multiple axes.""" pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) translation: Vector3D = Vector3D.from_array(np.array([0.5, -1.0, 2.0], dtype=np.float64)) result: StateSE3 = translate_se3_along_body_frame(pose, translation) expected: StateSE3 = StateSE3.from_array(np.array([1.5, 1.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array) - def test_translate_body_frame_zero_translation(self) -> None: + def test_translate_se3_along_body_frame_zero_translation(self) -> None: + """Tests translating a SE3 state along the body frame with zero translation.""" pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) translation: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) result: StateSE3 = translate_se3_along_body_frame(pose, translation) expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) np.testing.assert_array_almost_equal(result.array, expected.array) - def test_translate_body_frame_consistency(self) -> None: + def test_translate_se3_along_body_frame_with_rotation(self) -> None: + """Tests translating a SE3 state along the body frame forward direction with yaw rotation.""" + # Rotate 90 degrees around z-axis, then translate 1 unit along body x-axis + pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) + translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) + result: StateSE3 = translate_se3_along_body_frame(pose, translation) + # Should move in +Y direction in world frame + expected: StateSE3 = StateSE3.from_array(np.array([0.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se3_along_body_frame_consistency(self) -> None: + """Tests consistency between translate_se3_along_body_frame and axis-specific translation functions.""" for _ in range(self.num_consistency_tests): # Generate random parameters @@ -280,6 +421,7 @@ def test_translate_body_frame_consistency(self) -> None: ) def test_convert_absolute_to_relative_se3_array(self) -> None: + """Tests converting absolute SE3 poses to relative SE3 poses.""" reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) absolute_poses: npt.NDArray[np.float64] = np.array( [ @@ -299,13 +441,23 @@ def test_convert_absolute_to_relative_se3_array(self) -> None: np.testing.assert_array_almost_equal(result, expected) def test_convert_absolute_to_relative_se3_array_single_pose(self) -> None: + """Tests converting a single absolute SE3 pose to a relative SE3 pose.""" reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64) result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses) expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64) np.testing.assert_array_almost_equal(result, expected) + def test_convert_absolute_to_relative_se3_array_with_rotation(self) -> None: + """Tests converting absolute SE3 poses to relative SE3 poses with 90 degree yaw rotation.""" + reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses) + expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0, 0.0, 0.0, -np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + def test_convert_relative_to_absolute_se3_array(self) -> None: + """Tests converting relative SE3 poses to absolute SE3 poses.""" reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) relative_poses: npt.NDArray[np.float64] = np.array( [ @@ -324,7 +476,16 @@ def test_convert_relative_to_absolute_se3_array(self) -> None: ) np.testing.assert_array_almost_equal(result, expected) + def test_convert_relative_to_absolute_se3_array_with_rotation(self) -> None: + """Tests converting relative SE3 poses to absolute SE3 poses with 90 degree yaw rotation.""" + reference: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) + relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array(reference, relative_poses) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + def test_convert_absolute_to_relative_points_3d_array(self) -> None: + """Tests converting absolute 3D points to relative 3D points.""" reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64) result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) @@ -332,13 +493,23 @@ def test_convert_absolute_to_relative_points_3d_array(self) -> None: np.testing.assert_array_almost_equal(result, expected) def test_convert_absolute_to_relative_points_3d_array_origin_reference(self) -> None: + """Tests converting absolute 3D points to relative 3D points with origin reference.""" reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64) result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64) np.testing.assert_array_almost_equal(result, expected) + def test_convert_absolute_to_relative_points_3d_array_with_rotation(self) -> None: + """Tests converting absolute 3D points to relative 3D points with 90 degree yaw rotation.""" + reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 1.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + def test_convert_relative_to_absolute_points_3d_array(self) -> None: + """Tests converting relative 3D points to absolute 3D points.""" reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64) result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) @@ -346,14 +517,25 @@ def test_convert_relative_to_absolute_points_3d_array(self) -> None: np.testing.assert_array_almost_equal(result, expected) def test_convert_relative_to_absolute_points_3d_array_empty(self) -> None: + """Tests converting an empty array of relative 3D points to absolute 3D points.""" reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) relative_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3) result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3) np.testing.assert_array_almost_equal(result, expected) + def test_convert_relative_to_absolute_points_3d_array_with_rotation(self) -> None: + """Tests converting relative 3D points to absolute 3D points with 90 degree yaw rotation.""" + reference: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) + relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + class TestTransformConsistency(unittest.TestCase): + """Tests to ensure consistency between different transformation functions.""" + def setUp(self): self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal self.num_consistency_tests = 10 # Number of random test cases for consistency checks @@ -434,6 +616,31 @@ def test_se2_points_consistency(self) -> None: recovered_absolute_se2[..., StateSE2Index.XY], absolute_points, decimal=self.decimal ) + def test_se2_translation_consistency(self) -> None: + """Test that SE2 translations are consistent between different methods""" + for _ in range(self.num_consistency_tests): + # Generate random pose + pose = StateSE2.from_array(self._get_random_se2_array(1)[0]) + + # Generate random distances + dx = np.random.uniform(-10.0, 10.0) + dy = np.random.uniform(-10.0, 10.0) + + # Test x-translation consistency + result_x_direct = translate_se2_along_x(pose, dx) + result_x_body = translate_se2_along_body_frame(pose, Vector2D(dx, 0.0)) + np.testing.assert_array_almost_equal(result_x_direct.array, result_x_body.array, decimal=self.decimal) + + # Test y-translation consistency + result_y_direct = translate_se2_along_y(pose, dy) + result_y_body = translate_se2_along_body_frame(pose, Vector2D(0.0, dy)) + np.testing.assert_array_almost_equal(result_y_direct.array, result_y_body.array, decimal=self.decimal) + + # Test combined translation + result_xy_body = translate_se2_along_body_frame(pose, Vector2D(dx, dy)) + result_xy_sequential = translate_se2_along_y(translate_se2_along_x(pose, dx), dy) + np.testing.assert_array_almost_equal(result_xy_body.array, result_xy_sequential.array, decimal=self.decimal) + def test_se3_absolute_relative_conversion_consistency(self) -> None: """Test that converting absolute->relative->absolute returns original poses""" for _ in range(self.num_consistency_tests): @@ -569,28 +776,141 @@ def test_se2_se3_point_conversion_consistency(self) -> None: relative_3d = convert_absolute_to_relative_points_3d_array(reference_se3, points_3d) absolute_3d_recovered = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d) - # Check that SE2 and SE3 results are consistent (ignoring z-component) + # Check that SE2 and SE3 conversions are consistent for the x,y components + np.testing.assert_array_almost_equal(relative_2d, relative_3d[:, Point3DIndex.XY], decimal=self.decimal) np.testing.assert_array_almost_equal( - relative_2d, - relative_3d[..., Point3DIndex.XY], - decimal=self.decimal, + absolute_2d_recovered, absolute_3d_recovered[:, Point3DIndex.XY], decimal=self.decimal ) + + # Check that z-components remain zero np.testing.assert_array_almost_equal( - absolute_2d_recovered, - absolute_3d_recovered[..., Point3DIndex.XY], - decimal=self.decimal, + relative_3d[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal ) - # Z-component should remain zero np.testing.assert_array_almost_equal( - relative_3d[..., Point3DIndex.Z], - np.zeros(num_points), + absolute_3d_recovered[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal + ) + + def test_se2_array_translation_consistency(self) -> None: + """Test that SE2 array translation is consistent with single pose translation""" + for _ in range(self.num_consistency_tests): + # Generate random poses + num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) + poses_array = self._get_random_se2_array(num_poses) + + # Generate random translation + dx = np.random.uniform(-5.0, 5.0) + dy = np.random.uniform(-5.0, 5.0) + translation = Vector2D(dx, dy) + + # Translate using array function + result_array = translate_se2_array_along_body_frame(poses_array, translation) + + # Translate each pose individually + result_individual = np.zeros_like(poses_array) + for i in range(num_poses): + pose = StateSE2.from_array(poses_array[i]) + translated = translate_se2_along_body_frame(pose, translation) + result_individual[i] = translated.array + + np.testing.assert_array_almost_equal(result_array, result_individual, decimal=self.decimal) + + def test_transform_empty_arrays(self) -> None: + """Test that transform functions handle empty arrays correctly""" + reference_se2 = StateSE2.from_array(np.array([1.0, 2.0, np.pi / 4], dtype=np.float64)) + reference_se3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], dtype=np.float64)) + + # Test SE2 empty arrays + empty_se2_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE2Index)) + empty_2d_points = np.array([], dtype=np.float64).reshape(0, len(Point2DIndex)) + + result_se2_poses = convert_absolute_to_relative_se2_array(reference_se2, empty_se2_poses) + result_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, empty_2d_points) + + self.assertEqual(result_se2_poses.shape, (0, len(StateSE2Index))) + self.assertEqual(result_2d_points.shape, (0, len(Point2DIndex))) + + # Test SE3 empty arrays + empty_se3_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE3Index)) + empty_3d_points = np.array([], dtype=np.float64).reshape(0, len(Point3DIndex)) + + result_se3_poses = convert_absolute_to_relative_se3_array(reference_se3, empty_se3_poses) + result_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, empty_3d_points) + + self.assertEqual(result_se3_poses.shape, (0, len(StateSE3Index))) + self.assertEqual(result_3d_points.shape, (0, len(Point3DIndex))) + + def test_transform_identity_operations(self) -> None: + """Test that transforms with identity reference frames work correctly""" + # Identity SE2 pose + identity_se2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + identity_se3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + + for _ in range(self.num_consistency_tests): + # Test SE2 identity transforms + num_poses = np.random.randint(1, 10) + se2_poses = self._get_random_se2_array(num_poses) + se2_points = se2_poses[:, StateSE2Index.XY] + + relative_se2_poses = convert_absolute_to_relative_se2_array(identity_se2, se2_poses) + relative_se2_points = convert_absolute_to_relative_point_2d_array(identity_se2, se2_points) + + np.testing.assert_array_almost_equal(se2_poses, relative_se2_poses, decimal=self.decimal) + np.testing.assert_array_almost_equal(se2_points, relative_se2_points, decimal=self.decimal) + + # Test SE3 identity transforms + se3_poses = self._get_random_se3_array(num_poses) + se3_points = se3_poses[:, StateSE3Index.XYZ] + + relative_se3_poses = convert_absolute_to_relative_se3_array(identity_se3, se3_poses) + relative_se3_points = convert_absolute_to_relative_points_3d_array(identity_se3, se3_points) + + np.testing.assert_array_almost_equal(se3_poses, relative_se3_poses, decimal=self.decimal) + np.testing.assert_array_almost_equal(se3_points, relative_se3_points, decimal=self.decimal) + + def test_transform_large_rotations(self) -> None: + """Test transforms with large rotation angles beyond [-π, π]""" + for _ in range(self.num_consistency_tests): + # Create poses with large rotation angles + large_yaw_se2 = np.random.uniform(-4 * np.pi, 4 * np.pi) + large_euler_se3 = np.random.uniform(-4 * np.pi, 4 * np.pi, 3) + + reference_se2 = StateSE2.from_array(np.array([0.0, 0.0, large_yaw_se2], dtype=np.float64)) + reference_se3 = StateSE3.from_array( + np.array([0.0, 0.0, 0.0, large_euler_se3[0], large_euler_se3[1], large_euler_se3[2]], dtype=np.float64) + ) + + # Generate test poses/points + test_se2_poses = self._get_random_se2_array(5) + test_se3_poses = self._get_random_se3_array(5) + test_2d_points = test_se2_poses[:, StateSE2Index.XY] + test_3d_points = test_se3_poses[:, StateSE3Index.XYZ] + + # Test round-trip conversions should still work + relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, test_se2_poses) + recovered_se2 = convert_relative_to_absolute_se2_array(reference_se2, relative_se2) + + relative_se3 = convert_absolute_to_relative_se3_array(reference_se3, test_se3_poses) + recovered_se3 = convert_relative_to_absolute_se3_array(reference_se3, relative_se3) + + relative_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, test_2d_points) + recovered_2d_points = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d_points) + + relative_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, test_3d_points) + recovered_3d_points = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d_points) + + # Check consistency (allowing for angle wrapping) + np.testing.assert_array_almost_equal( + test_se2_poses[:, StateSE2Index.XY], + recovered_se2[:, StateSE2Index.XY], decimal=self.decimal, ) np.testing.assert_array_almost_equal( - absolute_3d_recovered[..., Point3DIndex.Z], - np.zeros(num_points), + test_se3_poses[:, StateSE3Index.XYZ], + recovered_se3[:, StateSE3Index.XYZ], decimal=self.decimal, ) + np.testing.assert_array_almost_equal(test_2d_points, recovered_2d_points, decimal=self.decimal) + np.testing.assert_array_almost_equal(test_3d_points, recovered_3d_points, decimal=self.decimal) if __name__ == "__main__": From ac7fc40c90b2661bd2c72b14fa9cc4c5e6d588cb Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Thu, 28 Aug 2025 11:02:30 +0800 Subject: [PATCH 029/145] stop tracking jbwang_test script --- .../dataset_specific/kitti_360/jbwang_test.py | 155 ----------- .../kitti_360/kitti_360_data_converter.py | 9 +- .../code/hydra/config.yaml | 60 ----- .../2025.08.15.14.31.57/code/hydra/hydra.yaml | 177 ------------ .../code/hydra/overrides.yaml | 1 - exp/kitti360_test/2025.08.15.14.31.57/log.txt | 10 - .../code/hydra/config.yaml | 60 ----- .../2025.08.15.14.36.40/code/hydra/hydra.yaml | 177 ------------ .../code/hydra/overrides.yaml | 1 - exp/kitti360_test/2025.08.15.14.36.40/log.txt | 10 - .../code/hydra/config.yaml | 60 ----- .../2025.08.15.14.40.29/code/hydra/hydra.yaml | 177 ------------ .../code/hydra/overrides.yaml | 1 - exp/kitti_test2/2025.08.15.14.40.29/log.txt | 10 - .../code/hydra/config.yaml | 60 ----- .../2025.08.15.14.43.13/code/hydra/hydra.yaml | 177 ------------ .../code/hydra/overrides.yaml | 1 - exp/kitti_test2/2025.08.15.14.43.13/log.txt | 12 - .../code/hydra/config.yaml | 60 ----- .../2025.08.15.14.46.49/code/hydra/hydra.yaml | 177 ------------ .../code/hydra/overrides.yaml | 1 - exp/kitti_test2/2025.08.15.14.46.49/log.txt | 10 - .../code/hydra/config.yaml | 60 ----- .../2025.08.15.14.50.55/code/hydra/hydra.yaml | 177 ------------ .../code/hydra/overrides.yaml | 1 - exp/kitti_test2/2025.08.15.14.50.55/log.txt | 11 - .../code/hydra/config.yaml | 60 ----- .../2025.08.15.14.52.39/code/hydra/hydra.yaml | 177 ------------ .../code/hydra/overrides.yaml | 1 - exp/kitti_test2/2025.08.15.14.52.39/log.txt | 11 - .../code/hydra/config.yaml | 60 ----- .../2025.08.11.15.45.36/code/hydra/hydra.yaml | 177 ------------ .../code/hydra/overrides.yaml | 1 - exp/my_run/2025.08.11.15.45.36/log.txt | 10 - jbwang_test.py | 98 ------- jbwang_test2.py | 229 ---------------- notebooks/dataset/jbwang_test.py | 94 ------- notebooks/gym/jbwang_test.py | 180 ------------- notebooks/jbwang_viz_test.py | 252 ------------------ 39 files changed, 5 insertions(+), 3000 deletions(-) delete mode 100644 d123/dataset/dataset_specific/kitti_360/jbwang_test.py delete mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml delete mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml delete mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml delete mode 100644 exp/kitti360_test/2025.08.15.14.31.57/log.txt delete mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml delete mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml delete mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml delete mode 100644 exp/kitti360_test/2025.08.15.14.36.40/log.txt delete mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.40.29/log.txt delete mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.43.13/log.txt delete mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.46.49/log.txt delete mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.50.55/log.txt delete mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml delete mode 100644 exp/kitti_test2/2025.08.15.14.52.39/log.txt delete mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml delete mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml delete mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml delete mode 100644 exp/my_run/2025.08.11.15.45.36/log.txt delete mode 100644 jbwang_test.py delete mode 100644 jbwang_test2.py delete mode 100644 notebooks/dataset/jbwang_test.py delete mode 100644 notebooks/gym/jbwang_test.py delete mode 100644 notebooks/jbwang_viz_test.py diff --git a/d123/dataset/dataset_specific/kitti_360/jbwang_test.py b/d123/dataset/dataset_specific/kitti_360/jbwang_test.py deleted file mode 100644 index e480783e..00000000 --- a/d123/dataset/dataset_specific/kitti_360/jbwang_test.py +++ /dev/null @@ -1,155 +0,0 @@ -import gc -import json -import os -import pickle -from dataclasses import asdict -from functools import partial -from pathlib import Path -from typing import Any, Dict, Final, List, Optional, Tuple, Union - -import numpy as np -import pyarrow as pa -import yaml -from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens -from nuplan.database.nuplan_db_orm.ego_pose import EgoPose -from nuplan.database.nuplan_db_orm.lidar_box import LidarBox -from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc -from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB -from nuplan.planning.simulation.observation.observation_type import CameraChannel -from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map -from pyquaternion import Quaternion -from sqlalchemy import func - - -from kitti_360_data_converter import _extract_ego_state_all,get_kitti360_lidar_metadata,_extract_cameras,_extract_detections,_read_timestamps - -# a = _extract_ego_state_all("2013_05_28_drive_0000_sync") -# print(a[0]) -# print(a[1]) -# print(a[10]) -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json - -NUPLAN_CAMERA_TYPES = { - CameraType.CAM_F0: CameraChannel.CAM_F0, - CameraType.CAM_B0: CameraChannel.CAM_B0, - CameraType.CAM_L0: CameraChannel.CAM_L0, - CameraType.CAM_L1: CameraChannel.CAM_L1, - CameraType.CAM_L2: CameraChannel.CAM_L2, - CameraType.CAM_R0: CameraChannel.CAM_R0, - CameraType.CAM_R1: CameraChannel.CAM_R1, - CameraType.CAM_R2: CameraChannel.CAM_R2, -} - -NUPLAN_DATA_ROOT = Path(os.environ["NUPLAN_DATA_ROOT"]) -NUPLAN_ROLLING_SHUTTER_S: Final[TimePoint] = TimePoint.from_s(1 / 60) - -def _extract_camera( - log_db: NuPlanDB, - lidar_pc: LidarPc, - source_log_path: Path, -) -> Dict[CameraType, Union[str, bytes]]: - - camera_dict: Dict[str, Union[str, bytes]] = {} - sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" - - log_cam_infos = {camera.token: camera for camera in log_db.log.cameras} - for camera_type, camera_channel in NUPLAN_CAMERA_TYPES.items(): - camera_data: Optional[Union[str, bytes]] = None - c2e: Optional[List[float]] = None - image_class = list(get_images_from_lidar_tokens(source_log_path, [lidar_pc.token], [str(camera_channel.value)])) - # print("image_class",image_class) - if len(image_class) != 0: - image = image_class[0] - filename_jpg = sensor_root / image.filename_jpg - - timestamp = image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us - img_ego_pose: EgoPose = ( - log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first() - ) - img_e2g = img_ego_pose.trans_matrix - g2e = lidar_pc.ego_pose.trans_matrix_inv - img_e2e = g2e @ img_e2g - cam_info = log_cam_infos[image.camera_token] - c2img_e = cam_info.trans_matrix - c2e = img_e2e @ c2img_e - # print(f"Camera {camera_type} found for lidar {lidar_pc.token} at timestamp {timestamp}") - print(camera_type,"c2e:", c2e) - camera_dict[camera_type] = camera_data - - return camera_dict - - -def get_cam_info_from_lidar_pc(log,log_file, lidar_pc, rolling_shutter_s=1/60): - - retrieved_images = get_images_from_lidar_tokens( - log_file, [lidar_pc.token], [str(channel.value) for channel in CameraChannel] - ) - - # if interp_trans: - # neighbours = [] - # ego_poses_dict = {} - # for ego_pose in log.ego_poses: - # ego_poses_dict[ego_pose.token] = ego_pose - # if abs(ego_pose.timestamp - lidar_pc.ego_pose.timestamp) / 1e6 < 0.5: - # neighbours.append(ego_pose) - # timestamps = [pose.timestamp for pose in neighbours] - # translations = [pose.translation_np for pose in neighbours] - # splines = [CubicSpline(timestamps, [translation[i] for translation in translations]) for i in range(2)] - - log_cam_infos = {camera.token : camera for camera in log.camera} - cams = {} - for img in retrieved_images: - channel = img.channel - filename = img.filename_jpg - - # if interp_trans: - # img_ego_pose = ego_poses_dict[img.ego_pose_token] - # interpolated_translation = np.array([splines[0](timestamp), splines[1](timestamp), img_ego_pose.z]) - # delta = interpolated_translation - lidar_pc.ego_pose.translation_np - # delta = np.dot(lidar_pc.ego_pose.quaternion.rotation_matrix.T, delta) - if channel == "CAM_F0": - timestamp = img.timestamp + (rolling_shutter_s * 1e6) - img_ego_pose = log.session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first() - img_e2g = img_ego_pose.trans_matrix - # print("img_e2g:", img_e2g) - - g2e = lidar_pc.ego_pose.trans_matrix_inv - # print("g2e:", g2e) #change obviously - img_e2e = g2e @ img_e2g - # print("img_e2e:", img_e2e) - cam_info = log_cam_infos[img.camera_token] - c2img_e = cam_info.trans_matrix - # print("c2img_e:", c2img_e) - c2e = img_e2e @ c2img_e - # print("channel:", channel, "c2e:", c2e) - - cams[channel] = dict( - data_path = filename, - timestamp = img.timestamp, - token=img.token, - sensor2ego_rotation = Quaternion(matrix=c2e[:3, :3]), - sensor2ego_translation = c2e[:3, 3], - cam_intrinsic = cam_info.intrinsic_np, - distortion = cam_info.distortion_np, - ) - - - if len(cams) != 8: - return None - # print(cams) - return cams - -if __name__ == "__main__": - # Example usage - # data_converter_config: DataConverterConfig - # log_path = Path("/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.10.11.07.12.18_veh-50_00211_00304.db") - # log_path = Path("/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.09.16.15.12.03_veh-42_01037_01434.db") - # log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None) - - # for lidar_pc in log_db.lidar_pc: # Replace with actual token - # # camera_data = _extract_camera(log_db, lidar_pc, log_path) - # camera_data = get_cam_info_from_lidar_pc(log_db,log_path, lidar_pc, rolling_shutter_s=1/60) - # print(_extract_cameras("2013_05_28_drive_0000_sync",0)) - # _extract_detections("2013_05_28_drive_0000_sync", 0) - print(_read_timestamps("2013_05_28_drive_0000_sync")) \ No newline at end of file diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index 03e5bd37..2cc40675 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -197,7 +197,6 @@ def convert_kitti360_log_to_arrow( vehicle_parameters = get_kitti360_station_wagon_parameters() camera_metadata = get_kitti360_camera_metadata() - #TODO now only velodyne lidar lidar_metadata = get_kitti360_lidar_metadata() schema_column_list = [ @@ -406,14 +405,17 @@ def _write_recording_table( recording_table = recording_table.sort_by([("timestamp", "ascending")]) write_arrow_table(recording_table, log_file_path) -#TODO Synchronization all other sequences) def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: # unix ts_files = [ + PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt", PATH_2D_RAW_ROOT / log_name / "image_00" / "timestamps.txt", PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt", - PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt", ] + + if log_name == "2013_05_28_drive_0002_sync": + ts_files = ts_files[1:] + for ts_file in ts_files: if ts_file.exists(): tps: List[TimePoint] = [] @@ -602,7 +604,6 @@ def _extract_detections( return detections_states, detections_velocity, detections_tokens, detections_types -#TODO lidar extraction now only velo def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: #NOTE special case for sequence 2013_05_28_drive_0002_sync which has no lidar data before frame 4391 diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml deleted file mode 100644 index a505c4d2..00000000 --- a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -worker: - _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed - _convert_: all - master_node_ip: null - threads_per_node: null - debug_mode: false - log_to_driver: true - logs_subdir: logs - use_distributed: false -scene_filter: - _target_: d123.dataset.scene.scene_filter.SceneFilter - _convert_: all - split_types: null - split_names: null - log_names: null - map_names: null - scene_tokens: null - timestamp_threshold_s: null - ego_displacement_minimum_m: null - duration_s: 9.2 - history_s: 3.0 -scene_builder: - _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder - _convert_: all - dataset_path: ${d123_data_root} -distributed_timeout_seconds: 7200 -selected_simulation_metrics: null -verbose: false -logger_level: info -logger_format_string: null -max_number_of_workers: null -gpu: true -seed: 42 -d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} -d123_maps_root: ${oc.env:D123_MAPS_ROOT} -d123_data_root: ${oc.env:D123_DATA_ROOT} -nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} -nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} -experiment_name: kitti360_test -date_format: '%Y.%m.%d.%H.%M.%S' -experiment_uid: ${now:${date_format}} -output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} -force_log_conversion: true -force_map_conversion: false -datasets: - nuplan_dataset: - _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter - _convert_: all - splits: - - kitti360 - log_path: ${oc.env:KITTI360_DATA_ROOT} - data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig - _convert_: all - output_path: ${d123_data_root} - force_log_conversion: ${force_log_conversion} - force_map_conversion: ${force_map_conversion} - camera_store_option: path - lidar_store_option: path diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml deleted file mode 100644 index 406ccbe7..00000000 --- a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml +++ /dev/null @@ -1,177 +0,0 @@ -hydra: - run: - dir: ${output_dir} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: - - pkg://d123.script.config - - pkg://d123.script.config.common - callbacks: {} - output_subdir: ${output_dir}/code/hydra - overrides: - hydra: - - hydra.mode=RUN - task: - - experiment_name=kitti360_test - job: - name: run_dataset_conversion - chdir: false - override_dirname: experiment_name=kitti360_test - id: ??? - num: ??? - config_name: default_dataset_conversion - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/jbwang/d123/d123/script - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/jbwang/d123/d123/script/config/dataset_conversion - schema: file - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: d123.script.config - schema: pkg - provider: hydra.searchpath in main - - path: d123.script.config.common - schema: pkg - provider: hydra.searchpath in main - - path: '' - schema: structured - provider: schema - output_dir: /home/jbwang/d123/exp/kitti360_test/2025.08.15.14.31.57 - choices: - scene_builder: default_scene_builder - scene_filter: all_scenes - worker: ray_distributed - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml deleted file mode 100644 index 6c8e6217..00000000 --- a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- experiment_name=kitti360_test diff --git a/exp/kitti360_test/2025.08.15.14.31.57/log.txt b/exp/kitti360_test/2025.08.15.14.31.57/log.txt deleted file mode 100644 index 984f705a..00000000 --- a/exp/kitti360_test/2025.08.15.14.31.57/log.txt +++ /dev/null @@ -1,10 +0,0 @@ -2025-08-15 14:31:57,385 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... -2025-08-15 14:32:14,105 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! -2025-08-15 14:32:35,603 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed -2025-08-15 14:32:35,604 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 -Number of CPUs per node: 64 -Number of GPUs per node: 8 -Number of threads across all nodes: 64 -2025-08-15 14:32:35,604 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! -2025-08-15 14:32:35,604 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... -2025-08-15 14:32:35,605 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml deleted file mode 100644 index 0fd6120d..00000000 --- a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -worker: - _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed - _convert_: all - master_node_ip: null - threads_per_node: null - debug_mode: false - log_to_driver: true - logs_subdir: logs - use_distributed: false -scene_filter: - _target_: d123.dataset.scene.scene_filter.SceneFilter - _convert_: all - split_types: null - split_names: null - log_names: null - map_names: null - scene_tokens: null - timestamp_threshold_s: null - ego_displacement_minimum_m: null - duration_s: 9.2 - history_s: 3.0 -scene_builder: - _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder - _convert_: all - dataset_path: ${d123_data_root} -distributed_timeout_seconds: 7200 -selected_simulation_metrics: null -verbose: false -logger_level: info -logger_format_string: null -max_number_of_workers: null -gpu: true -seed: 42 -d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} -d123_maps_root: ${oc.env:D123_MAPS_ROOT} -d123_data_root: ${oc.env:D123_DATA_ROOT} -nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} -nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} -experiment_name: kitti360_test -date_format: '%Y.%m.%d.%H.%M.%S' -experiment_uid: ${now:${date_format}} -output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} -force_log_conversion: true -force_map_conversion: false -datasets: - kitti360_dataset: - _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter - _convert_: all - splits: - - kitti360 - log_path: ${oc.env:KITTI360_DATA_ROOT} - data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig - _convert_: all - output_path: ${d123_data_root} - force_log_conversion: ${force_log_conversion} - force_map_conversion: ${force_map_conversion} - camera_store_option: path - lidar_store_option: path diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml deleted file mode 100644 index 4eee2c65..00000000 --- a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml +++ /dev/null @@ -1,177 +0,0 @@ -hydra: - run: - dir: ${output_dir} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: - - pkg://d123.script.config - - pkg://d123.script.config.common - callbacks: {} - output_subdir: ${output_dir}/code/hydra - overrides: - hydra: - - hydra.mode=RUN - task: - - experiment_name=kitti360_test - job: - name: run_dataset_conversion - chdir: false - override_dirname: experiment_name=kitti360_test - id: ??? - num: ??? - config_name: default_dataset_conversion - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/jbwang/d123/d123/script - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/jbwang/d123/d123/script/config/dataset_conversion - schema: file - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: d123.script.config - schema: pkg - provider: hydra.searchpath in main - - path: d123.script.config.common - schema: pkg - provider: hydra.searchpath in main - - path: '' - schema: structured - provider: schema - output_dir: /home/jbwang/d123/exp/kitti360_test/2025.08.15.14.36.40 - choices: - scene_builder: default_scene_builder - scene_filter: all_scenes - worker: ray_distributed - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml deleted file mode 100644 index 6c8e6217..00000000 --- a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- experiment_name=kitti360_test diff --git a/exp/kitti360_test/2025.08.15.14.36.40/log.txt b/exp/kitti360_test/2025.08.15.14.36.40/log.txt deleted file mode 100644 index 5f939dac..00000000 --- a/exp/kitti360_test/2025.08.15.14.36.40/log.txt +++ /dev/null @@ -1,10 +0,0 @@ -2025-08-15 14:36:40,989 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... -2025-08-15 14:36:56,167 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! -2025-08-15 14:37:18,685 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed -2025-08-15 14:37:18,686 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 -Number of CPUs per node: 64 -Number of GPUs per node: 8 -Number of threads across all nodes: 64 -2025-08-15 14:37:18,686 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! -2025-08-15 14:37:18,686 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... -2025-08-15 14:37:18,687 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml deleted file mode 100644 index 5ce47ba9..00000000 --- a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -worker: - _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed - _convert_: all - master_node_ip: null - threads_per_node: null - debug_mode: false - log_to_driver: true - logs_subdir: logs - use_distributed: false -scene_filter: - _target_: d123.dataset.scene.scene_filter.SceneFilter - _convert_: all - split_types: null - split_names: null - log_names: null - map_names: null - scene_tokens: null - timestamp_threshold_s: null - ego_displacement_minimum_m: null - duration_s: 9.2 - history_s: 3.0 -scene_builder: - _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder - _convert_: all - dataset_path: ${d123_data_root} -distributed_timeout_seconds: 7200 -selected_simulation_metrics: null -verbose: false -logger_level: info -logger_format_string: null -max_number_of_workers: null -gpu: true -seed: 42 -d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} -d123_maps_root: ${oc.env:D123_MAPS_ROOT} -d123_data_root: ${oc.env:D123_DATA_ROOT} -nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} -nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} -experiment_name: kitti_test2 -date_format: '%Y.%m.%d.%H.%M.%S' -experiment_uid: ${now:${date_format}} -output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} -force_log_conversion: true -force_map_conversion: false -datasets: - kitti360_dataset: - _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter - _convert_: all - splits: - - kitti360 - log_path: ${oc.env:KITTI360_DATA_ROOT} - data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig - _convert_: all - output_path: ${d123_data_root} - force_log_conversion: ${force_log_conversion} - force_map_conversion: ${force_map_conversion} - camera_store_option: path - lidar_store_option: path diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml deleted file mode 100644 index 2d1c615a..00000000 --- a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml +++ /dev/null @@ -1,177 +0,0 @@ -hydra: - run: - dir: ${output_dir} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: - - pkg://d123.script.config - - pkg://d123.script.config.common - callbacks: {} - output_subdir: ${output_dir}/code/hydra - overrides: - hydra: - - hydra.mode=RUN - task: - - experiment_name=kitti_test2 - job: - name: run_dataset_conversion - chdir: false - override_dirname: experiment_name=kitti_test2 - id: ??? - num: ??? - config_name: default_dataset_conversion - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/jbwang/d123 - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/jbwang/d123/d123/script/config/dataset_conversion - schema: file - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: d123.script.config - schema: pkg - provider: hydra.searchpath in main - - path: d123.script.config.common - schema: pkg - provider: hydra.searchpath in main - - path: '' - schema: structured - provider: schema - output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.40.29 - choices: - scene_builder: default_scene_builder - scene_filter: all_scenes - worker: ray_distributed - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml deleted file mode 100644 index 676c1042..00000000 --- a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- experiment_name=kitti_test2 diff --git a/exp/kitti_test2/2025.08.15.14.40.29/log.txt b/exp/kitti_test2/2025.08.15.14.40.29/log.txt deleted file mode 100644 index 8437d38e..00000000 --- a/exp/kitti_test2/2025.08.15.14.40.29/log.txt +++ /dev/null @@ -1,10 +0,0 @@ -2025-08-15 14:40:29,427 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... -2025-08-15 14:40:42,538 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! -2025-08-15 14:41:00,324 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed -2025-08-15 14:41:00,325 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 -Number of CPUs per node: 64 -Number of GPUs per node: 8 -Number of threads across all nodes: 64 -2025-08-15 14:41:00,325 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! -2025-08-15 14:41:00,325 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... -2025-08-15 14:41:00,326 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml deleted file mode 100644 index de70bfa3..00000000 --- a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -worker: - _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed - _convert_: all - master_node_ip: null - threads_per_node: null - debug_mode: false - log_to_driver: true - logs_subdir: logs - use_distributed: false -scene_filter: - _target_: d123.dataset.scene.scene_filter.SceneFilter - _convert_: all - split_types: null - split_names: null - log_names: null - map_names: null - scene_tokens: null - timestamp_threshold_s: null - ego_displacement_minimum_m: null - duration_s: 9.2 - history_s: 3.0 -scene_builder: - _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder - _convert_: all - dataset_path: ${d123_data_root} -distributed_timeout_seconds: 7200 -selected_simulation_metrics: null -verbose: false -logger_level: info -logger_format_string: null -max_number_of_workers: null -gpu: true -seed: 42 -d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} -d123_maps_root: ${oc.env:D123_MAPS_ROOT} -d123_data_root: ${oc.env:D123_DATA_ROOT} -nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} -nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} -experiment_name: kitti_test2 -date_format: '%Y.%m.%d.%H.%M.%S' -experiment_uid: ${now:${date_format}} -output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} -force_log_conversion: true -force_map_conversion: false -datasets: - nuplan_private_dataset: - _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter - _convert_: all - splits: - - nuplan_private_test - log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits - data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig - _convert_: all - output_path: ${d123_data_root} - force_log_conversion: ${force_log_conversion} - force_map_conversion: ${force_map_conversion} - camera_store_option: path - lidar_store_option: path diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml deleted file mode 100644 index cca44d29..00000000 --- a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml +++ /dev/null @@ -1,177 +0,0 @@ -hydra: - run: - dir: ${output_dir} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: - - pkg://d123.script.config - - pkg://d123.script.config.common - callbacks: {} - output_subdir: ${output_dir}/code/hydra - overrides: - hydra: - - hydra.mode=RUN - task: - - experiment_name=kitti_test2 - job: - name: run_dataset_conversion - chdir: false - override_dirname: experiment_name=kitti_test2 - id: ??? - num: ??? - config_name: default_dataset_conversion - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/jbwang/d123 - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/jbwang/d123/d123/script/config/dataset_conversion - schema: file - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: d123.script.config - schema: pkg - provider: hydra.searchpath in main - - path: d123.script.config.common - schema: pkg - provider: hydra.searchpath in main - - path: '' - schema: structured - provider: schema - output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.43.13 - choices: - scene_builder: default_scene_builder - scene_filter: all_scenes - worker: ray_distributed - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml deleted file mode 100644 index 676c1042..00000000 --- a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- experiment_name=kitti_test2 diff --git a/exp/kitti_test2/2025.08.15.14.43.13/log.txt b/exp/kitti_test2/2025.08.15.14.43.13/log.txt deleted file mode 100644 index fec50568..00000000 --- a/exp/kitti_test2/2025.08.15.14.43.13/log.txt +++ /dev/null @@ -1,12 +0,0 @@ -2025-08-15 14:43:13,965 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... -2025-08-15 14:43:24,401 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! -2025-08-15 14:43:39,643 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed -2025-08-15 14:43:39,644 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 -Number of CPUs per node: 64 -Number of GPUs per node: 8 -Number of threads across all nodes: 64 -2025-08-15 14:43:39,644 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! -2025-08-15 14:43:39,644 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... -2025-08-15 14:43:39,645 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... -2025-08-15 14:43:44,316 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:21} Building RawDataProcessor...DONE! -2025-08-15 14:43:44,316 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:34} Processing dataset: NuplanDataConverter diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml deleted file mode 100644 index 5ce47ba9..00000000 --- a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -worker: - _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed - _convert_: all - master_node_ip: null - threads_per_node: null - debug_mode: false - log_to_driver: true - logs_subdir: logs - use_distributed: false -scene_filter: - _target_: d123.dataset.scene.scene_filter.SceneFilter - _convert_: all - split_types: null - split_names: null - log_names: null - map_names: null - scene_tokens: null - timestamp_threshold_s: null - ego_displacement_minimum_m: null - duration_s: 9.2 - history_s: 3.0 -scene_builder: - _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder - _convert_: all - dataset_path: ${d123_data_root} -distributed_timeout_seconds: 7200 -selected_simulation_metrics: null -verbose: false -logger_level: info -logger_format_string: null -max_number_of_workers: null -gpu: true -seed: 42 -d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} -d123_maps_root: ${oc.env:D123_MAPS_ROOT} -d123_data_root: ${oc.env:D123_DATA_ROOT} -nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} -nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} -experiment_name: kitti_test2 -date_format: '%Y.%m.%d.%H.%M.%S' -experiment_uid: ${now:${date_format}} -output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} -force_log_conversion: true -force_map_conversion: false -datasets: - kitti360_dataset: - _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter - _convert_: all - splits: - - kitti360 - log_path: ${oc.env:KITTI360_DATA_ROOT} - data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig - _convert_: all - output_path: ${d123_data_root} - force_log_conversion: ${force_log_conversion} - force_map_conversion: ${force_map_conversion} - camera_store_option: path - lidar_store_option: path diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml deleted file mode 100644 index bd9698a2..00000000 --- a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml +++ /dev/null @@ -1,177 +0,0 @@ -hydra: - run: - dir: ${output_dir} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: - - pkg://d123.script.config - - pkg://d123.script.config.common - callbacks: {} - output_subdir: ${output_dir}/code/hydra - overrides: - hydra: - - hydra.mode=RUN - task: - - experiment_name=kitti_test2 - job: - name: run_dataset_conversion - chdir: false - override_dirname: experiment_name=kitti_test2 - id: ??? - num: ??? - config_name: default_dataset_conversion - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/jbwang/d123 - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/jbwang/d123/d123/script/config/dataset_conversion - schema: file - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: d123.script.config - schema: pkg - provider: hydra.searchpath in main - - path: d123.script.config.common - schema: pkg - provider: hydra.searchpath in main - - path: '' - schema: structured - provider: schema - output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.46.49 - choices: - scene_builder: default_scene_builder - scene_filter: all_scenes - worker: ray_distributed - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml deleted file mode 100644 index 676c1042..00000000 --- a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- experiment_name=kitti_test2 diff --git a/exp/kitti_test2/2025.08.15.14.46.49/log.txt b/exp/kitti_test2/2025.08.15.14.46.49/log.txt deleted file mode 100644 index 00286f48..00000000 --- a/exp/kitti_test2/2025.08.15.14.46.49/log.txt +++ /dev/null @@ -1,10 +0,0 @@ -2025-08-15 14:46:49,566 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... -2025-08-15 14:46:59,509 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! -2025-08-15 14:47:14,118 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed -2025-08-15 14:47:14,118 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 -Number of CPUs per node: 64 -Number of GPUs per node: 8 -Number of threads across all nodes: 64 -2025-08-15 14:47:14,119 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! -2025-08-15 14:47:14,119 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... -2025-08-15 14:47:14,122 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml deleted file mode 100644 index 5ce47ba9..00000000 --- a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -worker: - _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed - _convert_: all - master_node_ip: null - threads_per_node: null - debug_mode: false - log_to_driver: true - logs_subdir: logs - use_distributed: false -scene_filter: - _target_: d123.dataset.scene.scene_filter.SceneFilter - _convert_: all - split_types: null - split_names: null - log_names: null - map_names: null - scene_tokens: null - timestamp_threshold_s: null - ego_displacement_minimum_m: null - duration_s: 9.2 - history_s: 3.0 -scene_builder: - _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder - _convert_: all - dataset_path: ${d123_data_root} -distributed_timeout_seconds: 7200 -selected_simulation_metrics: null -verbose: false -logger_level: info -logger_format_string: null -max_number_of_workers: null -gpu: true -seed: 42 -d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} -d123_maps_root: ${oc.env:D123_MAPS_ROOT} -d123_data_root: ${oc.env:D123_DATA_ROOT} -nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} -nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} -experiment_name: kitti_test2 -date_format: '%Y.%m.%d.%H.%M.%S' -experiment_uid: ${now:${date_format}} -output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} -force_log_conversion: true -force_map_conversion: false -datasets: - kitti360_dataset: - _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter - _convert_: all - splits: - - kitti360 - log_path: ${oc.env:KITTI360_DATA_ROOT} - data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig - _convert_: all - output_path: ${d123_data_root} - force_log_conversion: ${force_log_conversion} - force_map_conversion: ${force_map_conversion} - camera_store_option: path - lidar_store_option: path diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml deleted file mode 100644 index acff45d7..00000000 --- a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml +++ /dev/null @@ -1,177 +0,0 @@ -hydra: - run: - dir: ${output_dir} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: - - pkg://d123.script.config - - pkg://d123.script.config.common - callbacks: {} - output_subdir: ${output_dir}/code/hydra - overrides: - hydra: - - hydra.mode=RUN - task: - - experiment_name=kitti_test2 - job: - name: run_dataset_conversion - chdir: false - override_dirname: experiment_name=kitti_test2 - id: ??? - num: ??? - config_name: default_dataset_conversion - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/jbwang/d123 - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/jbwang/d123/d123/script/config/dataset_conversion - schema: file - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: d123.script.config - schema: pkg - provider: hydra.searchpath in main - - path: d123.script.config.common - schema: pkg - provider: hydra.searchpath in main - - path: '' - schema: structured - provider: schema - output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.50.55 - choices: - scene_builder: default_scene_builder - scene_filter: all_scenes - worker: ray_distributed - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml deleted file mode 100644 index 676c1042..00000000 --- a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- experiment_name=kitti_test2 diff --git a/exp/kitti_test2/2025.08.15.14.50.55/log.txt b/exp/kitti_test2/2025.08.15.14.50.55/log.txt deleted file mode 100644 index 9902e0ce..00000000 --- a/exp/kitti_test2/2025.08.15.14.50.55/log.txt +++ /dev/null @@ -1,11 +0,0 @@ -2025-08-15 14:50:55,950 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... -2025-08-15 14:51:19,466 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! -2025-08-15 14:51:52,653 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed -2025-08-15 14:51:52,653 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 -Number of CPUs per node: 64 -Number of GPUs per node: 8 -Number of threads across all nodes: 64 -2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! -2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... -2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... -2025-08-15 14:51:52,655 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:17} Instantiating dataset type: {'_target_': 'd123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter', '_convert_': 'all', 'splits': ['kitti360'], 'log_path': '${oc.env:KITTI360_DATA_ROOT}', 'data_converter_config': {'_target_': 'd123.dataset.dataset_specific.raw_data_converter.DataConverterConfig', '_convert_': 'all', 'output_path': '${d123_data_root}', 'force_log_conversion': '${force_log_conversion}', 'force_map_conversion': '${force_map_conversion}', 'camera_store_option': 'path', 'lidar_store_option': 'path'}} diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml deleted file mode 100644 index de70bfa3..00000000 --- a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -worker: - _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed - _convert_: all - master_node_ip: null - threads_per_node: null - debug_mode: false - log_to_driver: true - logs_subdir: logs - use_distributed: false -scene_filter: - _target_: d123.dataset.scene.scene_filter.SceneFilter - _convert_: all - split_types: null - split_names: null - log_names: null - map_names: null - scene_tokens: null - timestamp_threshold_s: null - ego_displacement_minimum_m: null - duration_s: 9.2 - history_s: 3.0 -scene_builder: - _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder - _convert_: all - dataset_path: ${d123_data_root} -distributed_timeout_seconds: 7200 -selected_simulation_metrics: null -verbose: false -logger_level: info -logger_format_string: null -max_number_of_workers: null -gpu: true -seed: 42 -d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} -d123_maps_root: ${oc.env:D123_MAPS_ROOT} -d123_data_root: ${oc.env:D123_DATA_ROOT} -nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} -nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} -experiment_name: kitti_test2 -date_format: '%Y.%m.%d.%H.%M.%S' -experiment_uid: ${now:${date_format}} -output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} -force_log_conversion: true -force_map_conversion: false -datasets: - nuplan_private_dataset: - _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter - _convert_: all - splits: - - nuplan_private_test - log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits - data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig - _convert_: all - output_path: ${d123_data_root} - force_log_conversion: ${force_log_conversion} - force_map_conversion: ${force_map_conversion} - camera_store_option: path - lidar_store_option: path diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml deleted file mode 100644 index d053f8e7..00000000 --- a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml +++ /dev/null @@ -1,177 +0,0 @@ -hydra: - run: - dir: ${output_dir} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: - - pkg://d123.script.config - - pkg://d123.script.config.common - callbacks: {} - output_subdir: ${output_dir}/code/hydra - overrides: - hydra: - - hydra.mode=RUN - task: - - experiment_name=kitti_test2 - job: - name: run_dataset_conversion - chdir: false - override_dirname: experiment_name=kitti_test2 - id: ??? - num: ??? - config_name: default_dataset_conversion - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/jbwang/d123 - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/jbwang/d123/d123/script/config/dataset_conversion - schema: file - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: d123.script.config - schema: pkg - provider: hydra.searchpath in main - - path: d123.script.config.common - schema: pkg - provider: hydra.searchpath in main - - path: '' - schema: structured - provider: schema - output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.52.39 - choices: - scene_builder: default_scene_builder - scene_filter: all_scenes - worker: ray_distributed - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml deleted file mode 100644 index 676c1042..00000000 --- a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- experiment_name=kitti_test2 diff --git a/exp/kitti_test2/2025.08.15.14.52.39/log.txt b/exp/kitti_test2/2025.08.15.14.52.39/log.txt deleted file mode 100644 index e2585299..00000000 --- a/exp/kitti_test2/2025.08.15.14.52.39/log.txt +++ /dev/null @@ -1,11 +0,0 @@ -2025-08-15 14:52:39,717 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... -2025-08-15 14:53:02,994 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! -2025-08-15 14:53:36,548 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed -2025-08-15 14:53:36,549 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 -Number of CPUs per node: 64 -Number of GPUs per node: 8 -Number of threads across all nodes: 64 -2025-08-15 14:53:36,549 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! -2025-08-15 14:53:36,549 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... -2025-08-15 14:53:36,550 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... -2025-08-15 14:53:36,550 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:17} Instantiating dataset type: {'_target_': 'd123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter', '_convert_': 'all', 'splits': ['nuplan_private_test'], 'log_path': '${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits', 'data_converter_config': {'_target_': 'd123.dataset.dataset_specific.raw_data_converter.DataConverterConfig', '_convert_': 'all', 'output_path': '${d123_data_root}', 'force_log_conversion': '${force_log_conversion}', 'force_map_conversion': '${force_map_conversion}', 'camera_store_option': 'path', 'lidar_store_option': 'path'}} diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml deleted file mode 100644 index 86d05e7b..00000000 --- a/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -worker: - _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed - _convert_: all - master_node_ip: null - threads_per_node: null - debug_mode: false - log_to_driver: true - logs_subdir: logs - use_distributed: false -scene_filter: - _target_: d123.dataset.scene.scene_filter.SceneFilter - _convert_: all - split_types: null - split_names: null - log_names: null - map_names: null - scene_tokens: null - timestamp_threshold_s: null - ego_displacement_minimum_m: null - duration_s: 9.2 - history_s: 3.0 -scene_builder: - _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder - _convert_: all - dataset_path: ${d123_data_root} -distributed_timeout_seconds: 7200 -selected_simulation_metrics: null -verbose: false -logger_level: info -logger_format_string: null -max_number_of_workers: null -gpu: true -seed: 42 -d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} -d123_maps_root: ${oc.env:D123_MAPS_ROOT} -d123_data_root: ${oc.env:D123_DATA_ROOT} -nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} -nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} -experiment_name: my_run -date_format: '%Y.%m.%d.%H.%M.%S' -experiment_uid: ${now:${date_format}} -output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} -force_log_conversion: false -force_map_conversion: true -datasets: - nuplan_private_dataset: - _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter - _convert_: all - splits: - - nuplan_private_test - log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits - data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig - _convert_: all - output_path: ${d123_data_root} - force_log_conversion: ${force_log_conversion} - force_map_conversion: ${force_map_conversion} - camera_store_option: path - lidar_store_option: path diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml deleted file mode 100644 index bf09b447..00000000 --- a/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml +++ /dev/null @@ -1,177 +0,0 @@ -hydra: - run: - dir: ${output_dir} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: - - pkg://d123.script.config - - pkg://d123.script.config.common - callbacks: {} - output_subdir: ${output_dir}/code/hydra - overrides: - hydra: - - hydra.mode=RUN - task: - - experiment_name=my_run - job: - name: run_dataset_conversion - chdir: false - override_dirname: experiment_name=my_run - id: ??? - num: ??? - config_name: default_dataset_conversion - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/jbwang/d123/d123/script - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/jbwang/d123/d123/script/config/dataset_conversion - schema: file - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: d123.script.config - schema: pkg - provider: hydra.searchpath in main - - path: d123.script.config.common - schema: pkg - provider: hydra.searchpath in main - - path: '' - schema: structured - provider: schema - output_dir: /home/jbwang/d123/exp/my_run/2025.08.11.15.45.36 - choices: - scene_builder: default_scene_builder - scene_filter: all_scenes - worker: ray_distributed - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml deleted file mode 100644 index 373bde0c..00000000 --- a/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -- experiment_name=my_run diff --git a/exp/my_run/2025.08.11.15.45.36/log.txt b/exp/my_run/2025.08.11.15.45.36/log.txt deleted file mode 100644 index 2bdc0b60..00000000 --- a/exp/my_run/2025.08.11.15.45.36/log.txt +++ /dev/null @@ -1,10 +0,0 @@ -2025-08-11 15:45:36,813 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool... -2025-08-11 15:46:10,300 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local! -2025-08-11 15:46:34,960 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed -2025-08-11 15:46:34,962 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1 -Number of CPUs per node: 64 -Number of GPUs per node: 8 -Number of threads across all nodes: 64 -2025-08-11 15:46:34,962 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE! -2025-08-11 15:46:34,963 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching... -2025-08-11 15:46:34,964 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor... diff --git a/jbwang_test.py b/jbwang_test.py deleted file mode 100644 index e42f512a..00000000 --- a/jbwang_test.py +++ /dev/null @@ -1,98 +0,0 @@ -# from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB - -# # # 打开数据库文件 -# # db = NuPlanDB(db_path="/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.05.12.22.00.38_veh-35_01008_01518.db") -# NUPLAN_DATA_ROOT = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini" -# log_path -# log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None) - -# # 获取第1050帧数据 -# frame = db.get_frame(1050) -# img_front = frame.camera_front # 前视图像 -# point_cloud = frame.lidar # 点云 - -# # 获取本片段所有车辆状态 -# status_data = db.get_vehicle_status() # 返回DataFrame -# print(status_data) - - - -# from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import NuplanDataConverter, DataConverterConfig -# spits = ["nuplan_mini_train"] -# log_path = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini/" -# converter = NuplanDataConverter( -# log_path=log_path, -# splits=spits, -# data_converter_config=DataConverterConfig(output_path="data/jbwang/d123"), -# ) -# # converter.convert_logs() -from pathlib import Path -log_paths_per_split = { - "nuplan_mini_train": [ - "2021","2022"] - } -log_args = [ - { - "log_path": log_path, - "split": split, - } - for split, log_paths in log_paths_per_split.items() - for log_path in log_paths - ] -PATH_2D_RAW_ROOT = Path("/nas/datasets/KITTI-360/data_3d_raw/") -candidates = sorted(p for p in PATH_2D_RAW_ROOT.iterdir() if p.is_dir() and p.name.endswith("_sync")) -# print(log_args) -# print(candidates) -# print(candidates[0].name) -# print(candidates[0].stem) -# print(type(candidates[0].name)) -# print(type(candidates[0].stem)) -# PATH_2D_RAW_ROOT_new = PATH_2D_RAW_ROOT/"123"/candidates[0].name -# print(PATH_2D_RAW_ROOT_new) - - - -# import hashlib -# def create_token(input_data: str) -> str: -# # TODO: Refactor this function. -# # TODO: Add a general function to create tokens from arbitrary data. -# if isinstance(input_data, str): -# input_data = input_data.encode("utf-8") - -# hash_obj = hashlib.sha256(input_data) -# return hash_obj.hexdigest()[:16] - -# log_name = "1230_asd_" -# for i in range(20): -# a = create_token(f"{log_name}_{i}") -# print(a)ee - - -# import numpy as np -# from pathlib import Path -# a = np.loadtxt("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/0000000000.txt") -# b = np.loadtxt("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0018_sync/poses.txt") -# data = b -# ts = data[:, 0].astype(np.int32) -# poses = np.reshape(data[:, 1:], (-1, 3, 4)) -# poses = np.concatenate((poses, np.tile(np.array([0, 0, 0, 1]).reshape(1,1,4),(poses.shape[0],1,1))), 1) -# print(a) -# print(b.shape) -# print(ts.shape) -# print(poses.shape) - -# ccc = Path("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/") -# print(len(list(ccc.glob("*.txt")))) - - - - -from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import convert_nuplan_map_to_gpkg - -from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig - -MAP_LOCATIONS = {"sg-one-north", "us-ma-boston", "us-nv-las-vegas-strip", "us-pa-pittsburgh-hazelwood"} -maps = list(MAP_LOCATIONS) - -data_conveter_config = DataConverterConfig(output_path = "/nas/datasets/nuplan/maps") -convert_nuplan_map_to_gpkg(maps,data_conveter_config) \ No newline at end of file diff --git a/jbwang_test2.py b/jbwang_test2.py deleted file mode 100644 index 183df813..00000000 --- a/jbwang_test2.py +++ /dev/null @@ -1,229 +0,0 @@ -# # import numpy as np -# # import pickle - -# # # path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin" -# # # a = np.fromfile(path, dtype=np.float32) - -# # # print(a.shape) -# # # print(a[:10]) - -# # # path2 = "/nas/datasets/KITTI-360/calibration/calib_cam_to_pose.txt" -# # # c = np.loadtxt(path2) -# # # print(c) - -# # import open3d as o3d -# # import numpy as np - -# # def read_ply_file(file_path): -# # # 读取 PLY 文件 -# # pcd = o3d.io.read_point_cloud(file_path) -# # print(len(pcd.points), len(pcd.colors)) -# # # 提取顶点信息 -# # points = np.asarray(pcd.points) # x, y, z -# # colors = np.asarray(pcd.colors) # red, green, blue -# # # semantics = np.asarray(pcd.semantic) # semanticID, instanceID, isVisible, confidence - -# # # 将所有信息合并到一个数组中 -# # vertices = np.hstack((points, colors)) - -# # return vertices - -# # # 示例用法 -# # file_path = '/nas/datasets/KITTI-360/data_3d_semantics/train/2013_05_28_drive_0000_sync/static/0000000002_0000000385.ply' # 替换为你的 PLY 文件路径 -# # vertices = read_ply_file(file_path) - -# # # 打印前几个顶点信息 -# # print("顶点信息 (前5个顶点):") -# # print(vertices[:5]) - -# import numpy as np -# from scipy.linalg import polar -# from scipy.spatial.transform import Rotation as R - -# def polar_decompose_rotation_scale(A: np.ndarray): -# """ -# A: 3x3 (含旋转+缩放+剪切) -# 返回: -# Rm: 纯旋转 -# Sm: 对称正定 (缩放+剪切) -# scale: 近似轴缩放(从 Sm 特征值开方或对角提取;若存在剪切需谨慎) -# yaw,pitch,roll: 使用 ZYX 序列 (常对应 yaw(Z), pitch(Y), roll(X)) -# """ -# Rm, Sm = polar(A) # A = Rm @ Sm -# # 近似各向缩放(若无剪切): -# scale = np.diag(Sm) -# # 欧拉角 -# yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) -# return { -# "R": Rm, -# "S": Sm, -# "scale_diag": scale, -# "yaw_pitch_roll": (yaw, pitch, roll), -# } - -# M = np.array([ -# [-3.97771668e+00, -1.05715942e+00,-2.18206085e-02], -# [2.43555284e+00, -1.72707462e+00, -1.03932284e-02], -# [-4.41359095e-02, -2.94448305e-02, 1.39303744e+00], -# ]) -# out = polar_decompose_rotation_scale(M) -# print(out) - -# import numpy as np -# path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin" -# a = np.fromfile(path, dtype=np.float32) -# a = a.reshape((-1,4)) -# print(a[10000:10010,:3]) - - - - - -# import gc -# import json -# import os -# from dataclasses import asdict -# from functools import partial -# from pathlib import Path -# from typing import Any, Dict, Final, List, Optional, Tuple, Union - -# import numpy as np -# from collections import defaultdict -# import datetime -# import hashlib -# import xml.etree.ElementTree as ET -# import pyarrow as pa -# from PIL import Image -# import logging - -# from d123.common.datatypes.detection.detection_types import DetectionType -# from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D - - -# #TODO train and train_full -# bbox_3d_path = Path("/nas/datasets/KITTI-360/data_3d_bboxes/train/2013_05_28_drive_0000_sync.xml") - -# tree = ET.parse(bbox_3d_path) -# root = tree.getroot() - -# KIITI360_DETECTION_NAME_DICT = { -# "truck": DetectionType.VEHICLE, -# "bus": DetectionType.VEHICLE, -# "car": DetectionType.VEHICLE, -# "motorcycle": DetectionType.BICYCLE, -# "bicycle": DetectionType.BICYCLE, -# "pedestrian": DetectionType.PEDESTRIAN, -# } -# # x,y,z = 881.2268115,3247.493293,115.239219 -# # x,y,z = 867.715474,3229.630439,115.189221 # 自车 -# # x,y,z = 873.533508, 3227.16235, 115.185341 # 要找的那个人 -# x,y,z = 874.233508, 3231.56235, 115.185341 # 要找的那个车 -# CENTER_REF = np.array([x, y, z], dtype=np.float64) -# objs_name = [] -# lable_name = [] -# for child in root: -# label = child.find('label').text -# # if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys(): -# # continue - -# if child.find('transform') is None: -# continue -# print("this label is ",label) -# print("!!!!!!!!!!!!!!!!!!!") -# obj = KITTI360Bbox3D() -# obj.parseBbox(child) -# # obj.parseVertices(child) -# name = child.find('label').text -# lable_name.append(name) -# # if obj.start_frame < 10030 and obj.end_frame > 10030: -# center = np.array(obj.T, dtype=np.float64) -# dist = np.linalg.norm(center - CENTER_REF) -# if dist < 7: -# print(f"Object ID: {obj.name}, Start Frame: {obj.start_frame}, End Frame: {obj.end_frame},self.annotationId: {obj.annotationId},{obj.timestamp},{obj.T}") -# objs_name.append(obj.name) -# print(len(objs_name)) -# print(set(objs_name)) -# print(set(lable_name)) -# # print(obj.Rm) -# # print(Sigma) -# names = [] -# for child in root: -# label = child.find('label').text -# if child.find('transform') is None: -# continue -# names.append(label) -# print(set(names)) - -from scipy.spatial.transform import Rotation as R -import numpy as np -from pathlib import Path as PATH - -def get_rotation_matrix(roll,pitch,yaw): - # Intrinsic Z-Y'-X'' rotation: R = R_x(roll) @ R_y(pitch) @ R_z(yaw) - R_x = np.array( - [ - [1, 0, 0], - [0, np.cos(roll), -np.sin(roll)], - [0, np.sin(roll), np.cos(roll)], - ], - dtype=np.float64, - ) - R_y = np.array( - [ - [np.cos(pitch), 0, np.sin(pitch)], - [0, 1, 0], - [-np.sin(pitch), 0, np.cos(pitch)], - ], - dtype=np.float64, - ) - R_z = np.array( - [ - [np.cos(yaw), -np.sin(yaw), 0], - [np.sin(yaw), np.cos(yaw), 0], - [0, 0, 1], - ], - dtype=np.float64, - ) - return R_x @ R_y @ R_z - -oxts_path = PATH("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/" ) -pose_file = PATH("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0000_sync/poses.txt") -poses = np.loadtxt(pose_file) -poses_time = poses[:, 0] - 1 # Adjusting time to start from 0 - -pose_idx = 0 -poses_time_len = len(poses_time) - -from pyquaternion import Quaternion - -for idx in range(len(list(oxts_path.glob("*.txt")))): - oxts_path_file = oxts_path / f"{int(idx):010d}.txt" - oxts_data = np.loadtxt(oxts_path_file) - while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < idx: - pose_idx += 1 - pos = pose_idx - - r00, r01, r02 = poses[pos, 1:4] - r10, r11, r12 = poses[pos, 5:8] - r20, r21, r22 = poses[pos, 9:12] - R_mat = np.array([[r00, r01, r02], - [r10, r11, r12], - [r20, r21, r22]], dtype=np.float64) - calib = np.array([[1.0, 0.0, 0.0], - [0.0, -1.0, 0.0], - [0.0, 0.0, -1.0]], dtype=np.float64) - R_mat = R_mat @ calib - from d123.geometry.rotation import EulerAngles - if idx <= 300: - # print("R_mat",R_mat) - - new_yaw, new_pitch, new_roll = Quaternion(matrix=R_mat[:3, :3]).yaw_pitch_roll - R = EulerAngles.from_array(np.array([new_roll, new_pitch, new_yaw])).rotation_matrix - # print("R from yaw_pitch_roll",R) - print(R_mat - R) - # new_yaw,new_pitch,new_roll = R.from_matrix(R_mat).as_euler('yxz', degrees=False) - # print("new",new_roll,new_pitch,new_yaw) - # print("roll,pitch,yaw",oxts_data[3:6]) # 前6个元素是位置和速度 - # roll, pitch, yaw = oxts_data[3:6] - # print("true",get_rotation_matrix(roll,pitch,yaw)) - # print("new",roll,pitch,yaw) \ No newline at end of file diff --git a/notebooks/dataset/jbwang_test.py b/notebooks/dataset/jbwang_test.py deleted file mode 100644 index c37d8d40..00000000 --- a/notebooks/dataset/jbwang_test.py +++ /dev/null @@ -1,94 +0,0 @@ -# s3_uri = "/data/jbwang/d123/data/nuplan_mini_train/2021.10.11.07.12.18_veh-50_00211_00304.arrow" -# s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow" -# s3_uri = "/data/jbwang/d123/data/carla/_Rep0_routes_validation1_route0_07_23_14_33_15.arrow" -# s3_uri = "/data/jbwang/d123/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow" -# s3_uri = "/data/jbwang/d123/data2/kitti360_c2e_train/2013_05_28_drive_0000_sync_c2e.arrow" -s3_uri = "/data/jbwang/d123/data2/kitti360_detection_all_test/2013_05_28_drive_0000_sync.arrow" - - -import pyarrow as pa -import pyarrow.fs as fs -import pyarrow.dataset as ds - -import os - -s3_fs = fs.S3FileSystem() -from d123.common.utils.timer import Timer - - -timer = Timer() -timer.start() - -dataset = ds.dataset(f"{s3_uri}", format="ipc") -timer.log("1. Dataset loaded") - -# Get all column names and remove the ones you want to drop -all_columns = dataset.schema.names -# print("all_columns", all_columns) -# print("Schema:") -# print(dataset.schema) -# columns_to_keep = [col for col in all_columns if col not in ["front_cam_demo", "front_cam_transform"]] -timer.log("2. Columns filtered") - -table = dataset.to_table(columns=all_columns) -# print("table",table) -# print(table["token"]) -for col in table.column_names: - if col == "lidar": - continue - print(f"Column : {col}, Type: {table.schema.field(col).type}") - tokens = table["detections_velocity"] # 或 table.column("token") - # tokens = table["detections_type"] - # print(tokens) - # print(len(tokens)) - result = tokens.slice(1470, 40).to_pylist() - # for item in result: - # print(len(item)) -print(result) -# print(table["traffic_light_ids"]) -timer.log("3. Table created") -# Save locally -# with pa.ipc.new_file("filtered_file.arrow", table.schema) as writer: -# writer.write_table(table) -timer.log("4. Table saved locally") - -timer.end() -timer.stats(verbose=False) - -# 查看nuplan数据库的表结构和内容 - -# from pathlib import Path -# from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB -# from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc -# from sqlalchemy import inspect, select -# from sqlalchemy.orm import Session -# from sqlalchemy import func -# from nuplan.database.nuplan_db_orm.ego_pose import EgoPose - -# NUPLAN_DATA_ROOT = Path("/nas/datasets/nuplan/") # 按你实际路径 -# log_path = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.05.12.22.00.38_veh-35_01008_01518.db" - -# db = NuPlanDB(NUPLAN_DATA_ROOT, log_path, None) -# # print(db.log) -# print(db.log.map_version) -# # print("log.cameras",db.log.cameras) -# # print("Log name:", db.log_name) -# # print("lidar",db.lidar_pc) -# # print("scenario_tags", db.scenario_tag) -# # print(db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp)).first()) - -# # persp = Path("/nas/datasets/KITTI-360/calibration/perspective.txt") -# # with open(persp, "r") as f: -# # lines = [ln.strip() for ln in f if ln.strip()] -# # print(lines) - -# from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import get_kitti360_camera_metadata - -# print(get_kitti360_camera_metadata()) - - - -# from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import _read_timestamps -# result = _read_timestamps("2013_05_28_drive_0000_sync") -# print(len(result)) -# print([result[0].time_us]) \ No newline at end of file diff --git a/notebooks/gym/jbwang_test.py b/notebooks/gym/jbwang_test.py deleted file mode 100644 index 663e2899..00000000 --- a/notebooks/gym/jbwang_test.py +++ /dev/null @@ -1,180 +0,0 @@ -from d123.dataset.scene.scene_builder import ArrowSceneBuilder -from d123.dataset.scene.scene_filter import SceneFilter - -from d123.common.multithreading.worker_sequential import Sequential -# from d123.common.multithreading.worker_ray import RayDistributed - -import os, psutil - -from pathlib import Path -from typing import Optional, Tuple - -import matplotlib.animation as animation -import matplotlib.pyplot as plt -from tqdm import tqdm - -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.common.geometry.base import Point2D, StateSE2 -from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2 -from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG -from d123.common.visualization.matplotlib.observation import ( - add_bounding_box_to_ax, - add_box_detections_to_ax, - add_default_map_on_ax, - add_traffic_lights_to_ax, - add_ego_vehicle_to_ax, -) -from d123.dataset.arrow.conversion import TrafficLightDetectionWrapper -from d123.dataset.maps.abstract_map import AbstractMap -from d123.common.datatypes.detection.detection import BoxDetectionWrapper -from d123.dataset.scene.abstract_scene import AbstractScene -import io -from PIL import Image - - - -def _plot_scene_on_ax( - ax: plt.Axes, - map_api: AbstractMap, - ego_state: EgoStateSE2, - initial_ego_state: Optional[EgoStateSE2], - box_detections: BoxDetectionWrapper, - traffic_light_detections: TrafficLightDetectionWrapper, - radius: float = 120, -) -> plt.Axes: - - if initial_ego_state is not None: - point_2d = initial_ego_state.center.point_2d - else: - point_2d = ego_state.center.point_2d - add_default_map_on_ax(ax, map_api, point_2d, radius=radius) - add_traffic_lights_to_ax(ax, traffic_light_detections, map_api) - - add_box_detections_to_ax(ax, box_detections) - add_ego_vehicle_to_ax(ax, ego_state) - - ax.set_xlim(point_2d.x - radius, point_2d.x + radius) - ax.set_ylim(point_2d.y - radius, point_2d.y + radius) - - ax.set_aspect("equal", adjustable="box") - return ax - - -def plot_scene_to_image( - map_api: AbstractMap, - ego_state: EgoStateSE2, - initial_ego_state: Optional[EgoStateSE2], - box_detections: BoxDetectionWrapper, - traffic_light_detections: TrafficLightDetectionWrapper, - radius: float = 120, - figsize: Tuple[int, int] = (8, 8), -) -> Image: - - fig, ax = plt.subplots(figsize=figsize) - _plot_scene_on_ax(ax, map_api, ego_state, initial_ego_state, box_detections, traffic_light_detections, radius) - ax.set_aspect("equal", adjustable="box") - plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05) - # plt.tight_layout() - - buf = io.BytesIO() - fig.savefig(buf, format="png", bbox_inches="tight") - plt.close(fig) - buf.seek(0) - img = Image.open(buf) - return img - - -def print_memory_usage(): - process = psutil.Process(os.getpid()) - memory_info = process.memory_info() - print(f"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB") - - -split = "kitti360_detection_all_and_vel" -scene_tokens = None -log_names = None - -scene_filter = SceneFilter( - split_names=[split], log_names=log_names, scene_tokens=scene_tokens, duration_s=15.1, history_s=1.0 -) -scene_builder = ArrowSceneBuilder("/data/jbwang/d123/data2/") -worker = Sequential() -# worker = RayDistributed() -scenes = scene_builder.get_scenes(scene_filter, worker) - -print(len(scenes)) - -for scene in scenes[:10]: - print(scene.log_name, scene.token) - -from d123.dataset.arrow.conversion import DetectionType -from d123.simulation.gym.gym_env import GymEnvironment -from d123.simulation.observation.agents_observation import _filter_agents_by_type - -import time - -images = [] -agent_rollouts = [] -plot: bool = True -action = [1.0, -0.0] # Placeholder action, replace with actual action logic -env = GymEnvironment(scenes) - -start = time.time() - -map_api, ego_state, detection_observation, current_scene = env.reset(scenes[1460]) -initial_ego_state = ego_state -cars, _, _ = _filter_agents_by_type(detection_observation.box_detections, detection_types=[DetectionType.VEHICLE]) -agent_rollouts.append(BoxDetectionWrapper(cars)) -if plot: - images.append( - plot_scene_to_image( - map_api, - ego_state, - initial_ego_state, - detection_observation.box_detections, - detection_observation.traffic_light_detections, - ) - ) - - -for i in range(160): - ego_state, detection_observation, end = env.step(action) - cars, _, _ = _filter_agents_by_type(detection_observation.box_detections, detection_types=[DetectionType.VEHICLE]) - agent_rollouts.append(BoxDetectionWrapper(cars)) - if plot: - images.append( - plot_scene_to_image( - map_api, - ego_state, - initial_ego_state, - detection_observation.box_detections, - detection_observation.traffic_light_detections, - ) - ) - if end: - print("End of scene reached.") - break - -time_s = time.time() - start -print(time_s) -print(151/ time_s) - -import numpy as np - - -def create_gif(images, output_path, duration=100): - """ - Create a GIF from a list of PIL images. - - Args: - images (list): List of PIL.Image objects. - output_path (str): Path to save the GIF. - duration (int): Duration between frames in milliseconds. - """ - if images: - print(len(images)) - images_p = [img.convert("P", palette=Image.ADAPTIVE) for img in images] - images_p[0].save(output_path, save_all=True, append_images=images_p[1:], duration=duration, loop=0) - - -create_gif(images, f"/data/jbwang/d123/data2/{split}_{current_scene.token}.gif", duration=20) \ No newline at end of file diff --git a/notebooks/jbwang_viz_test.py b/notebooks/jbwang_viz_test.py deleted file mode 100644 index 73f05dbf..00000000 --- a/notebooks/jbwang_viz_test.py +++ /dev/null @@ -1,252 +0,0 @@ -# from typing import Tuple - -# import matplotlib.pyplot as plt - -# from nuplan.planning.utils.multithreading.worker_sequential import Sequential - -# from d123.dataset.scene.scene_builder import ArrowSceneBuilder -# from d123.dataset.scene.scene_filter import SceneFilter -# from d123.dataset.scene.abstract_scene import AbstractScene - -# from typing import Dict -# from d123.common.datatypes.sensor.camera import CameraType -# from d123.common.visualization.matplotlib.camera import add_camera_ax -# from d123.common.visualization.matplotlib.camera import add_box_detections_to_camera_ax - -# # split = "nuplan_private_test" -# # log_names = ["2021.09.29.17.35.58_veh-44_00066_00432"] - - - - -# # splits = ["carla"] -# splits = ["nuplan_private_test"] -# # splits = ["wopd_train"] -# # log_names = None - - - -# # splits = ["nuplan_private_test"] -# log_names = None - -# scene_tokens = None - -# scene_filter = SceneFilter( -# split_names=splits, -# log_names=log_names, -# scene_tokens=scene_tokens, -# duration_s=19, -# history_s=0.0, -# timestamp_threshold_s=20, -# shuffle=False, -# camera_types=[CameraType.CAM_F0], -# ) -# scene_builder = ArrowSceneBuilder("/data/jbwang/d123/data/") -# worker = Sequential() -# # worker = RayDistributed() -# scenes = scene_builder.get_scenes(scene_filter, worker) - -# print(f"Found {len(scenes)} scenes") - - -# from typing import List, Optional, Tuple -# import matplotlib.pyplot as plt -# import numpy as np -# from d123.common.geometry.base import Point2D -# from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10 -# from d123.common.visualization.color.config import PlotConfig -# from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG -# from d123.common.visualization.matplotlib.observation import ( -# add_box_detections_to_ax, -# add_default_map_on_ax, -# add_ego_vehicle_to_ax, -# add_traffic_lights_to_ax, -# ) -# from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax -# from d123.dataset.maps.abstract_map import AbstractMap -# from d123.dataset.maps.abstract_map_objects import AbstractLane -# from d123.dataset.maps.map_datatypes import MapLayer -# from d123.dataset.scene.abstract_scene import AbstractScene - - -# import shapely.geometry as geom - -# LEFT_CONFIG: PlotConfig = PlotConfig( -# fill_color=TAB_10[2], -# fill_color_alpha=1.0, -# line_color=TAB_10[2], -# line_color_alpha=0.5, -# line_width=1.0, -# line_style="-", -# zorder=3, -# ) - -# RIGHT_CONFIG: PlotConfig = PlotConfig( -# fill_color=TAB_10[3], -# fill_color_alpha=1.0, -# line_color=TAB_10[3], -# line_color_alpha=0.5, -# line_width=1.0, -# line_style="-", -# zorder=3, -# ) - - -# LANE_CONFIG: PlotConfig = PlotConfig( -# fill_color=BLACK, -# fill_color_alpha=1.0, -# line_color=BLACK, -# line_color_alpha=0.0, -# line_width=0.0, -# line_style="-", -# zorder=5, -# ) - -# ROAD_EDGE_CONFIG: PlotConfig = PlotConfig( -# fill_color=DARKER_GREY.set_brightness(0.0), -# fill_color_alpha=1.0, -# line_color=DARKER_GREY.set_brightness(0.0), -# line_color_alpha=1.0, -# line_width=1.0, -# line_style="-", -# zorder=3, -# ) - -# ROAD_LINE_CONFIG: PlotConfig = PlotConfig( -# fill_color=DARKER_GREY, -# fill_color_alpha=1.0, -# line_color=NEW_TAB_10[5], -# line_color_alpha=1.0, -# line_width=1.5, -# line_style="-", -# zorder=3, -# ) - - -# def add_debug_map_on_ax( -# ax: plt.Axes, -# map_api: AbstractMap, -# point_2d: Point2D, -# radius: float, -# route_lane_group_ids: Optional[List[int]] = None, -# ) -> None: -# layers: List[MapLayer] = [ -# MapLayer.LANE, -# MapLayer.LANE_GROUP, -# MapLayer.GENERIC_DRIVABLE, -# MapLayer.CARPARK, -# MapLayer.CROSSWALK, -# MapLayer.INTERSECTION, -# MapLayer.WALKWAY, -# MapLayer.ROAD_EDGE, -# MapLayer.ROAD_LINE, -# ] -# x_min, x_max = point_2d.x - radius, point_2d.x + radius -# y_min, y_max = point_2d.y - radius, point_2d.y + radius -# patch = geom.box(x_min, y_min, x_max, y_max) -# map_objects_dict = map_api.query(geometry=patch, layers=layers, predicate="intersects") - -# done = False -# for layer, map_objects in map_objects_dict.items(): -# for map_object in map_objects: -# try: -# if layer in [ -# # MapLayer.GENERIC_DRIVABLE, -# # MapLayer.CARPARK, -# # MapLayer.CROSSWALK, -# # MapLayer.INTERSECTION, -# # MapLayer.WALKWAY, -# ]: -# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer]) - -# # if layer in [MapLayer.LANE_GROUP]: -# # add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer]) - -# if layer in [MapLayer.LANE]: -# map_object: AbstractLane -# if map_object.right_lane is not None and map_object.left_lane is not None and not done: -# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, LANE_CONFIG) -# add_shapely_polygon_to_ax(ax, map_object.right_lane.shapely_polygon, RIGHT_CONFIG) -# add_shapely_polygon_to_ax(ax, map_object.left_lane.shapely_polygon, LEFT_CONFIG) -# done = True -# else: -# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer]) - - -# # add_shapely_linestring_to_ax(ax, map_object.right_boundary.linestring, RIGHT_CONFIG) -# # add_shapely_linestring_to_ax(ax, map_object.left_boundary.linestring, LEFT_CONFIG) -# # add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, LANE_CONFIG) - -# # centroid = map_object.shapely_polygon.centroid -# # ax.text( -# # centroid.x, -# # centroid.y, -# # str(map_object.id), -# # horizontalalignment="center", -# # verticalalignment="center", -# # fontsize=8, -# # bbox=dict(facecolor="white", alpha=0.7, boxstyle="round,pad=0.2"), -# # ) -# # if layer in [MapLayer.ROAD_EDGE]: -# # add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, ROAD_EDGE_CONFIG) -# # edge_lengths.append(map_object.polyline_3d.linestring.length) - -# if layer in [MapLayer.ROAD_LINE]: -# line_type = int(map_object.road_line_type) -# plt_config = PlotConfig( -# fill_color=NEW_TAB_10[line_type % len(NEW_TAB_10)], -# fill_color_alpha=1.0, -# line_color=NEW_TAB_10[line_type % len(NEW_TAB_10)], -# line_color_alpha=1.0, -# line_width=1.5, -# line_style="-", -# zorder=3, -# ) -# add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, plt_config) - -# except Exception: -# import traceback - -# print(f"Error adding map object of type {layer.name} and id {map_object.id}") -# traceback.print_exc() - -# ax.set_title(f"Map: {map_api.map_name}") - - -# def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes: - -# ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) -# box_detections = scene.get_box_detections_at_iteration(iteration) - -# point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d -# add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None) -# # add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None) -# # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api) - -# add_box_detections_to_ax(ax, box_detections) -# add_ego_vehicle_to_ax(ax, ego_vehicle_state) - -# zoom = 1.0 -# ax.set_xlim(point_2d.x - radius * zoom, point_2d.x + radius * zoom) -# ax.set_ylim(point_2d.y - radius * zoom, point_2d.y + radius * zoom) - -# ax.set_aspect("equal", adjustable="box") -# return ax - - -# def plot_scene_at_iteration( -# scene: AbstractScene, iteration: int = 0, radius: float = 80 -# ) -> Tuple[plt.Figure, plt.Axes]: - -# size = 15 - -# fig, ax = plt.subplots(figsize=(size, size)) -# _plot_scene_on_ax(ax, scene, iteration, radius) -# return fig, ax - - -# scene_index = 1 -# fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=100, radius=100) - -# # fig.savefig(f"/home/daniel/scene_{scene_index}_iteration_1.pdf", dpi=300, bbox_inches="tight") - From b4d06bdcac7e58c891728d5289e13df1b345f825 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Thu, 28 Aug 2025 15:30:26 +0800 Subject: [PATCH 030/145] ready to push --- d123/common/visualization/viser/server.py | 4 +- .../kitti_360/kitti_360_data_converter.py | 51 ++++++++++--------- .../kitti_360/preprocess_detection.py | 2 +- 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py index 990a90dd..6cba5dd5 100644 --- a/d123/common/visualization/viser/server.py +++ b/d123/common/visualization/viser/server.py @@ -43,9 +43,9 @@ # VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0] # VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types -VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L, CameraType.CAM_STEREO_R] +VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L] # VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [] -VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_F0] +VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_STEREO_L] CAMERA_SCALE: float = 1.0 # Lidar config: diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index 2cc40675..1b967fca 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -56,11 +56,11 @@ DIR_POSES = "data_poses" DIR_CALIB = "calibration" -#TODO PATH_2D_RAW_ROOT -# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW -PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT +PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW +# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW +# PATH_3D_RAW_ROOT: Path = Path("/data/jbwang/d123/data_3d_raw") PATH_3D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_SMT PATH_3D_BBOX_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_BBOX PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES @@ -406,7 +406,9 @@ def _write_recording_table( write_arrow_table(recording_table, log_file_path) def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: - # unix + """ + Read KITTI-360 timestamps for the given sequence and return Unix epoch timestamps. + """ ts_files = [ PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt", PATH_2D_RAW_ROOT / log_name / "image_00" / "timestamps.txt", @@ -449,10 +451,9 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: raise FileNotFoundError(f"Pose file not found: {pose_file}") poses = np.loadtxt(pose_file) poses_time = poses[:, 0] - 1 # Adjusting time to start from 0 - - #TODO - #oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data" - oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data" + + oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data" + # oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data" pose_idx = 0 poses_time_len = len(poses_time) @@ -632,29 +633,29 @@ def _extract_cameras( elif cam_dir_name in ["image_02", "image_03"]: img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rgb" / f"{idx:010d}.png" - if img_path_png.exists(): - cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" - if not cam2pose_txt.exists(): - raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") - - lastrow = np.array([0,0,0,1]).reshape(1,4) - - with open(cam2pose_txt, 'r') as f: - for line in f: - parts = line.strip().split() - key = parts[0][:-1] - if key == cam_dir_name: - values = list(map(float, parts[1:])) - matrix = np.array(values).reshape(3, 4) - cam2pose = np.concatenate((matrix, lastrow)) - cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose + cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" + if not cam2pose_txt.exists(): + raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") + + lastrow = np.array([0,0,0,1]).reshape(1,4) + with open(cam2pose_txt, 'r') as f: + for line in f: + parts = line.strip().split() + key = parts[0][:-1] + if key == cam_dir_name: + values = list(map(float, parts[1:])) + matrix = np.array(values).reshape(3, 4) + cam2pose = np.concatenate((matrix, lastrow)) + cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose + if img_path_png.exists(): if data_converter_config.camera_store_option == "path": camera_data = str(img_path_png), cam2pose.flatten().tolist() elif data_converter_config.camera_store_option == "binary": with open(img_path_png, "rb") as f: camera_data = f.read(), cam2pose else: - raise FileNotFoundError(f"Camera image not found: {img_path_png}") + #TODO + camera_data = None, cam2pose.flatten().tolist() camera_dict[camera_type] = camera_data return camera_dict diff --git a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py index e45e76d9..8b7c284f 100644 --- a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py +++ b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py @@ -163,7 +163,7 @@ def process_detection( for obj in static_objs: records.append(obj.valid_frames) if output_dir is None: - output_dir = PATH_3D_BBOX_ROOT / "preprocessed" + output_dir = PATH_3D_BBOX_ROOT / "preprocess" output_dir.mkdir(parents=True, exist_ok=True) out_path = output_dir / f"{log_name}_detection_preprocessed.pkl" payload = { From 493c02993ec34be3c6cef11c80186061af62deaa Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 28 Aug 2025 10:32:17 +0200 Subject: [PATCH 031/145] Make viser server not crash if cameras or lidar is not available. (#49) --- d123/common/visualization/viser/server.py | 45 ++-- d123/common/visualization/viser/utils.py | 19 +- .../transform/transform_quaternion_se3.py | 237 ++++++++++++++++++ d123/geometry/utils/rotation_utils.py | 4 - .../common/scene_filter/all_scenes.yaml | 5 + 5 files changed, 280 insertions(+), 30 deletions(-) create mode 100644 d123/geometry/transform/transform_quaternion_se3.py diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py index 686d3746..c9ce3601 100644 --- a/d123/common/visualization/viser/server.py +++ b/d123/common/visualization/viser/server.py @@ -1,6 +1,7 @@ import time from typing import Dict, List, Literal +import numpy as np import trimesh import viser @@ -8,6 +9,7 @@ from d123.common.datatypes.sensor.lidar import LiDARType from d123.common.visualization.viser.utils import ( get_bounding_box_meshes, + get_camera_if_available, get_camera_values, get_lidar_points, get_map_meshes, @@ -186,23 +188,26 @@ def _(_) -> None: current_frame_handle = mew_frame_handle for camera_type in VISUALIZE_CAMERA_GUI: - if camera_type in scene.available_camera_types: - camera_gui_handles[camera_type].image = scene.get_camera_at_iteration( - gui_timestep.value, camera_type - ).image + camera = get_camera_if_available(scene, camera_type, gui_timestep.value) + if camera is not None: + camera_gui_handles[camera_type].image = camera.image for camera_type in VISUALIZE_CAMERA_FRUSTUM: - if camera_type in scene.available_camera_types: - camera_position, camera_quaternion, camera = get_camera_values( - scene, camera_type, gui_timestep.value - ) - + camera = get_camera_if_available(scene, camera_type, gui_timestep.value) + if camera is not None: + camera_position, camera_quaternion = get_camera_values(scene, camera, gui_timestep.value) camera_frustum_handles[camera_type].position = camera_position.array camera_frustum_handles[camera_type].wxyz = camera_quaternion.q camera_frustum_handles[camera_type].image = camera.image if LIDAR_AVAILABLE: - points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES) + try: + points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES) + except Exception as e: + print(f"Error getting lidar points: {e}") + points = np.zeros((0, 3)) + colors = np.zeros((0, 3)) + gui_lidar.points = points gui_lidar.colors = colors @@ -221,19 +226,19 @@ def _(_) -> None: camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {} for camera_type in VISUALIZE_CAMERA_GUI: - if camera_type in scene.available_camera_types: + camera = get_camera_if_available(scene, camera_type, gui_timestep.value) + if camera is not None: with self.server.gui.add_folder(f"Camera {camera_type.serialize()}"): camera_gui_handles[camera_type] = self.server.gui.add_image( - image=scene.get_camera_at_iteration(gui_timestep.value, camera_type).image, + image=camera.image, label=camera_type.serialize(), format="jpeg", ) for camera_type in VISUALIZE_CAMERA_FRUSTUM: - if camera_type in scene.available_camera_types: - camera_position, camera_quaternion, camera = get_camera_values( - scene, camera_type, gui_timestep.value - ) + camera = get_camera_if_available(scene, camera_type, gui_timestep.value) + if camera is not None: + camera_position, camera_quaternion = get_camera_values(scene, camera, gui_timestep.value) camera_frustum_handles[camera_type] = self.server.scene.add_camera_frustum( f"camera_frustum_{camera_type.serialize()}", fov=camera.metadata.fov_y, @@ -245,7 +250,13 @@ def _(_) -> None: ) if LIDAR_AVAILABLE: - points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES) + try: + points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES) + except Exception as e: + print(f"Error getting lidar points: {e}") + points = np.zeros((0, 3)) + colors = np.zeros((0, 3)) + gui_lidar = self.server.scene.add_point_cloud( name="LiDAR", points=points, diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index ad49b88a..16717654 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -1,4 +1,4 @@ -from typing import List, Tuple +from typing import List, Optional, Tuple import numpy as np import numpy.typing as npt @@ -216,22 +216,23 @@ def _create_lane_mesh_from_boundary_arrays( return mesh -def get_camera_values( - scene: AbstractScene, camera_type: CameraType, iteration: int -) -> Tuple[Point3D, Quaternion, Camera]: +def get_camera_if_available(scene: AbstractScene, camera_type: CameraType, iteration: int) -> Optional[Camera]: + camera: Optional[Camera] = None + if camera_type in scene.available_camera_types: + camera: Camera = scene.get_camera_at_iteration(iteration, camera_type) + return camera + + +def get_camera_values(scene: AbstractScene, camera: Camera, iteration: int) -> Tuple[Point3D, Quaternion]: initial_point_3d = scene.get_ego_state_at_iteration(0).center_se3.point_3d rear_axle = scene.get_ego_state_at_iteration(iteration).rear_axle_se3 - camera = scene.get_camera_at_iteration(iteration, camera_type) - rear_axle_array = rear_axle.array rear_axle_array[:3] -= initial_point_3d.array rear_axle = StateSE3.from_array(rear_axle_array) camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame - # Get the rotation matrix of the rear axle pose - ego_transform = rear_axle.transformation_matrix camera_transform = ego_transform @ camera_to_ego @@ -240,7 +241,7 @@ def get_camera_values( camera_position = Point3D(*camera_transform[:3, 3]) camera_rotation = Quaternion(matrix=camera_transform[:3, :3]) - return camera_position, camera_rotation, camera + return camera_position, camera_rotation def get_lidar_points( diff --git a/d123/geometry/transform/transform_quaternion_se3.py b/d123/geometry/transform/transform_quaternion_se3.py new file mode 100644 index 00000000..9a74b601 --- /dev/null +++ b/d123/geometry/transform/transform_quaternion_se3.py @@ -0,0 +1,237 @@ +# TODO: Properly implement and test these functions + +# from typing import Union + +# import numpy as np +# import numpy.typing as npt + +# from d123.geometry import Vector3D +# from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, Vector3DIndex +# from d123.geometry.se import QuaternionSE3 + + +# def translate_qse3_along_z(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: +# """Translates a QuaternionSE3 state along the Z-axis. + +# :param state_se3: The QuaternionSE3 state to translate. +# :param distance: The distance to translate along the Z-axis. +# :return: The translated QuaternionSE3 state. +# """ +# R = state_se3.rotation_matrix +# z_axis = R[:, 2] + +# state_se3_array = state_se3.array.copy() +# state_se3_array[QuaternionSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ] +# return QuaternionSE3.from_array(state_se3_array, copy=False) + + +# def translate_qse3_along_y(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: +# """Translates a QuaternionSE3 state along the Y-axis. + +# :param state_se3: The QuaternionSE3 state to translate. +# :param distance: The distance to translate along the Y-axis. +# :return: The translated QuaternionSE3 state. +# """ +# R = state_se3.rotation_matrix +# y_axis = R[:, 1] + +# state_se3_array = state_se3.array.copy() +# state_se3_array[QuaternionSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ] +# return QuaternionSE3.from_array(state_se3_array, copy=False) + + +# def translate_qse3_along_x(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: +# """Translates a QuaternionSE3 state along the X-axis. + +# :param state_se3: The QuaternionSE3 state to translate. +# :param distance: The distance to translate along the X-axis. +# :return: The translated QuaternionSE3 state. +# """ +# R = state_se3.rotation_matrix +# x_axis = R[:, 0] + +# state_se3_array = state_se3.array.copy() +# state_se3_array[QuaternionSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ] +# return QuaternionSE3.from_array(state_se3_array, copy=False) + + +# def translate_qse3_along_body_frame(state_se3: QuaternionSE3, vector_3d: Vector3D) -> QuaternionSE3: +# """Translates a QuaternionSE3 state along a vector in the body frame. + +# :param state_se3: The QuaternionSE3 state to translate. +# :param vector_3d: The vector to translate along in the body frame. +# :return: The translated QuaternionSE3 state. +# """ +# R = state_se3.rotation_matrix +# world_translation = R @ vector_3d.array + +# state_se3_array = state_se3.array.copy() +# state_se3_array[QuaternionSE3Index.XYZ] += world_translation +# return QuaternionSE3.from_array(state_se3_array, copy=False) + + +# def convert_absolute_to_relative_qse3_array( +# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64] +# ) -> npt.NDArray[np.float64]: +# """Converts a QuaternionSE3 array from the absolute frame to the relative frame. + +# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz]. +# :param se3_array: The QuaternionSE3 array in the absolute frame [N, 7]. +# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray. +# :return: The QuaternionSE3 array in the relative frame [N, 7]. +# """ +# if isinstance(origin, QuaternionSE3): +# origin_ = origin +# elif isinstance(origin, np.ndarray): +# assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) +# origin_ = QuaternionSE3.from_array(origin) +# else: +# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") + +# assert se3_array.ndim >= 1 +# assert se3_array.shape[-1] == len(QuaternionSE3Index) + +# t_origin = origin_.point_3d.array +# R_origin = origin_.rotation_matrix + +# # Extract absolute positions and quaternions +# abs_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION] +# q_origin = origin_.quaternion + +# # Compute relative quaternions: q_rel = q_origin^-1 * q_abs +# if abs_quaternions.ndim == 1: +# rel_quaternions = _quaternion_multiply(_quaternion_multiply(q_origin), abs_quaternions) +# else: +# rel_quaternions = np.array([_quaternion_multiply(_quaternion_multiply(q_origin), q) for q in abs_quaternions]) + + +# # Prepare output array +# rel_se3_array = np.zeros_like(se3_array) +# rel_se3_array[..., QuaternionSE3Index.XYZ] = (se3_array[..., QuaternionSE3Index.XYZ] - t_origin) @ R_origin +# rel_se3_array[..., QuaternionSE3Index.QUATERNION] = rel_quaternions + +# return rel_se3_array + + +# def convert_relative_to_absolute_qse3_array( +# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64] +# ) -> npt.NDArray[np.float64]: +# """Converts a QuaternionSE3 array from the relative frame to the absolute frame. + +# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz]. +# :param se3_array: The QuaternionSE3 array in the relative frame [N, 7]. +# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray. +# :return: The QuaternionSE3 array in the absolute frame [N, 7]. +# """ +# if isinstance(origin, QuaternionSE3): +# t_origin = origin.translation +# q_origin = origin.quaternion +# R_origin = origin.rotation_matrix +# elif isinstance(origin, np.ndarray): +# assert origin.ndim == 1 and origin.shape[-1] == 7 +# t_origin = origin[:3] +# q_origin = origin[3:] +# origin_quat_se3 = QuaternionSE3.from_array(origin) +# R_origin = origin_quat_se3.rotation_matrix +# else: +# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") + +# assert se3_array.ndim >= 1 +# assert se3_array.shape[-1] == len(QuaternionSE3Index) + +# # Extract relative positions and quaternions +# rel_positions = se3_array[..., QuaternionSE3Index.XYZ] +# rel_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION] + +# # Compute absolute positions: R_origin @ p_rel + t_origin +# abs_positions = (R_origin @ rel_positions.T).T + t_origin + +# # Compute absolute quaternions: q_abs = q_origin * q_rel +# if rel_quaternions.ndim == 1: +# abs_quaternions = _quaternion_multiply(q_origin, rel_quaternions) +# else: +# abs_quaternions = np.array([_quaternion_multiply(q_origin, q) for q in rel_quaternions]) + +# # Prepare output array +# abs_se3_array = se3_array.copy() +# abs_se3_array[..., :3] = abs_positions +# abs_se3_array[..., 3:] = abs_quaternions + +# return abs_se3_array + + +# def convert_absolute_to_relative_points_q3d_array( +# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +# ) -> npt.NDArray[np.float64]: +# """Converts 3D points from the absolute frame to the relative frame. + +# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz]. +# :param points_3d_array: The 3D points in the absolute frame [N, 3]. +# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray. +# :return: The 3D points in the relative frame [N, 3]. +# """ +# if isinstance(origin, QuaternionSE3): +# t_origin = origin.point_3d.array +# R_origin_inv = origin.rotation_matrix.T +# elif isinstance(origin, np.ndarray): +# assert origin.ndim == 1 and origin.shape[-1] == 7 +# t_origin = origin[:3] +# origin_quat_se3 = QuaternionSE3.from_array(origin) +# R_origin_inv = origin_quat_se3.rotation_matrix.T +# else: +# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") + +# assert points_3d_array.ndim >= 1 +# assert points_3d_array.shape[-1] == len(Point3DIndex) + +# # Transform points: R_origin^T @ (p_abs - t_origin) +# relative_points = (points_3d_array - t_origin) @ R_origin_inv.T +# return relative_points + + +# def convert_relative_to_absolute_points_q3d_array( +# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +# ) -> npt.NDArray[np.float64]: +# """Converts 3D points from the relative frame to the absolute frame. + +# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz]. +# :param points_3d_array: The 3D points in the relative frame [N, 3]. +# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray. +# :return: The 3D points in the absolute frame [N, 3]. +# """ +# if isinstance(origin, QuaternionSE3): +# t_origin = origin.point_3d.array +# R_origin = origin.rotation_matrix +# elif isinstance(origin, np.ndarray): +# assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) +# t_origin = origin[QuaternionSE3Index.XYZ] +# origin_quat_se3 = QuaternionSE3.from_array(origin) +# R_origin = origin_quat_se3.rotation_matrix +# else: +# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") + +# assert points_3d_array.shape[-1] == 3 + +# # Transform points: R_origin @ p_rel + t_origin +# absolute_points = (R_origin @ points_3d_array.T).T + t_origin +# return absolute_points + + +# def _quaternion_multiply(q1: npt.NDArray[np.float64], q2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: +# """Multiply two quaternions [w, x, y, z]. + +# :param q1: First quaternion [w, x, y, z]. +# :param q2: Second quaternion [w, x, y, z]. +# :return: Product quaternion [w, x, y, z]. +# """ +# w1, x1, y1, z1 = q1 +# w2, x2, y2, z2 = q2 + +# return np.array( +# [ +# w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2, +# w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2, +# w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2, +# w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2, +# ] +# ) diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py index de12fc0f..da75c654 100644 --- a/d123/geometry/utils/rotation_utils.py +++ b/d123/geometry/utils/rotation_utils.py @@ -5,10 +5,6 @@ from d123.geometry.geometry_index import EulerAnglesIndex -# TODO: move this somewhere else -# TODO: Maybe rename wrap angle? -# TODO: Add implementation for torch, jax, or whatever else is needed. - def normalize_angle(angle: Union[float, npt.NDArray[np.float64]]) -> Union[float, npt.NDArray[np.float64]]: """ diff --git a/d123/script/config/common/scene_filter/all_scenes.yaml b/d123/script/config/common/scene_filter/all_scenes.yaml index d5e1b505..06ac76d5 100644 --- a/d123/script/config/common/scene_filter/all_scenes.yaml +++ b/d123/script/config/common/scene_filter/all_scenes.yaml @@ -13,3 +13,8 @@ ego_displacement_minimum_m: null duration_s: 9.2 history_s: 3.0 + +camera_types: null + +max_num_scenes: null +shuffle: false From 4c12d3d23012d44f7110036461e3939e70845e8f Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Fri, 29 Aug 2025 11:06:06 +0800 Subject: [PATCH 032/145] add fisheyecamera --- d123/common/datatypes/sensor/camera.py | 68 +++++++++++-------- .../kitti_360/kitti_360_data_converter.py | 6 +- d123/dataset/scene/arrow_scene.py | 4 +- 3 files changed, 45 insertions(+), 33 deletions(-) diff --git a/d123/common/datatypes/sensor/camera.py b/d123/common/datatypes/sensor/camera.py index c2a33d9d..e6dc60d6 100644 --- a/d123/common/datatypes/sensor/camera.py +++ b/d123/common/datatypes/sensor/camera.py @@ -2,7 +2,7 @@ import json from dataclasses import dataclass -from typing import Any, Dict +from typing import Any, Dict, Union import numpy as np import numpy.typing as npt @@ -80,31 +80,6 @@ def fov_y(self) -> float: return fov_y_rad -def camera_metadata_dict_to_json(camera_metadata: Dict[CameraType, CameraMetadata]) -> Dict[str, Dict[str, Any]]: - """ - Converts a dictionary of CameraMetadata to a JSON-serializable format. - :param camera_metadata: Dictionary of CameraMetadata. - :return: JSON-serializable dictionary. - """ - camera_metadata_dict = { - camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items() - } - return json.dumps(camera_metadata_dict) - - -def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, CameraMetadata]: - """ - Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata. - :param json_dict: JSON-serializable dictionary. - :return: Dictionary of CameraMetadata. - """ - camera_metadata_dict = json.loads(json_dict) - return { - CameraType.deserialize(camera_type): CameraMetadata.from_dict(metadata) - for camera_type, metadata in camera_metadata_dict.items() - } - -#TODO Code Refactoring @dataclass class FisheyeMEICameraMetadata: camera_type: CameraType @@ -124,6 +99,18 @@ def to_dict(self) -> Dict[str, Any]: "distortion": self.distortion.tolist() if self.distortion is not None else None, "projection_parameters": self.projection_parameters.tolist() if self.projection_parameters is not None else None, } + + @classmethod + def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata: + # TODO: remove None types. Only a placeholder for now. + return cls( + camera_type=CameraType(json_dict["camera_type"]), + width=json_dict["width"], + height=json_dict["height"], + mirror_parameters=json_dict["mirror_parameters"], + distortion=np.array(json_dict["distortion"]) if json_dict["distortion"] is not None else None, + projection_parameters=np.array(json_dict["projection_parameters"]) if json_dict["projection_parameters"] is not None else None, + ) def cam2image(self, points_3d: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: ''' camera coordinate to image plane ''' @@ -151,7 +138,34 @@ def cam2image(self, points_3d: npt.NDArray[np.float64]) -> npt.NDArray[np.float6 y = gamma2*y + v0 return x, y, norm * points_3d[:,2] / np.abs(points_3d[:,2]) - + +def camera_metadata_dict_to_json(camera_metadata: Dict[CameraType, CameraMetadata]) -> Dict[str, Dict[str, Any]]: + """ + Converts a dictionary of CameraMetadata to a JSON-serializable format. + :param camera_metadata: Dictionary of CameraMetadata. + :return: JSON-serializable dictionary. + """ + camera_metadata_dict = { + camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items() + } + return json.dumps(camera_metadata_dict) + + +def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]]: + """ + Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata. + :param json_dict: JSON-serializable dictionary. + :return: Dictionary of CameraMetadata. + """ + camera_metadata_dict = json.loads(json_dict) + out: Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]] = {} + for camera_type, metadata in camera_metadata_dict.items(): + cam_type = CameraType.deserialize(camera_type) + if isinstance(metadata, dict) and "mirror_parameters" in metadata: + out[cam_type] = FisheyeMEICameraMetadata.from_dict(metadata) + else: + out[cam_type] = CameraMetadata.from_dict(metadata) + return out @dataclass class Camera: diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index 1b967fca..93a84c9e 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -43,9 +43,8 @@ KITTI360_CAMERA_TYPES = { CameraType.CAM_STEREO_L: "image_00", CameraType.CAM_STEREO_R: "image_01", - # TODO need code refactoring to support fisheye cameras - # CameraType.CAM_L1: "image_02", - # CameraType.CAM_R1: "image_03", + CameraType.CAM_L1: "image_02", + CameraType.CAM_R1: "image_03", } DIR_2D_RAW = "data_2d_raw" @@ -655,7 +654,6 @@ def _extract_cameras( with open(img_path_png, "rb") as f: camera_data = f.read(), cam2pose else: - #TODO camera_data = None, cam2pose.flatten().tolist() camera_dict[camera_type] = camera_data return camera_dict diff --git a/d123/dataset/scene/arrow_scene.py b/d123/dataset/scene/arrow_scene.py index ecd68111..39d90c9c 100644 --- a/d123/dataset/scene/arrow_scene.py +++ b/d123/dataset/scene/arrow_scene.py @@ -6,7 +6,7 @@ from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, CameraType, camera_metadata_dict_from_json +from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_from_json from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json from d123.common.datatypes.time.time_point import TimePoint from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -70,7 +70,7 @@ def __init__( ) = _get_scene_data(arrow_file_path) self._metadata: LogMetadata = _metadata self._vehicle_parameters: VehicleParameters = _vehicle_parameters - self._camera_metadata: Dict[CameraType, CameraMetadata] = _camera_metadata + self._camera_metadata: Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]] = _camera_metadata self._lidar_metadata: Dict[LiDARType, LiDARMetadata] = _lidar_metadata self._map_api: Optional[AbstractMap] = None From 551bed981ce38b7a56084429743b881070f230b7 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 2 Sep 2025 16:43:14 +0200 Subject: [PATCH 033/145] Move nuplan specific dependencies as optional (#45) --- pyproject.toml | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index dda67ef7..44061249 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,43 +20,28 @@ readme = "README.md" requires-python = ">=3.9" license = {text = "Apache-2.0"} dependencies = [ - "aioboto3", - "aiofiles", "bokeh", - "casadi", - "control", - "Fiona", "geopandas", - "guppy3", "joblib", "matplotlib", - "nest_asyncio", "numpy", "opencv-python", "pandas", "Pillow", "psutil", "pyarrow", - "pyinstrument", "pyogrio", "pyquaternion", "pytest", "rasterio", "ray", - "retry", "rtree", "scipy", - "selenium", "setuptools", "shapely>=2.0.0", - "SQLAlchemy==1.4.27", - "sympy", - "tornado", "tqdm", - "ujson", "notebook", "pre-commit", - "cachetools", "hydra_colorlog", "hydra-core", "lxml", @@ -84,20 +69,32 @@ docs = [ ] nuplan = [ "nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2", + "ujson", + "tornado", + "sympy", + "SQLAlchemy==1.4.27", + "selenium", + "nest_asyncio", + "cachetools", + "aioboto3", + "aiofiles", + "casadi", + "control", + "pyinstrument", + "Fiona", + "guppy3", + "retry", ] waymo = [ "tensorflow==2.11.0", "waymo-open-dataset-tf-2-11-0", "intervaltree", ] -av2 = [ - "av2==0.2.1", -] [tool.setuptools.packages.find] where = ["."] include = ["d123*"] # Only include d123 package -exclude = ["notebooks*", "tests*", "docs*"] # Explicitly exclude notebooks +exclude = ["notebooks*", "docs*"] # Explicitly exclude notebooks [project.urls] "Homepage" = "https://github.com/DanielDauner/d123" From 3bcf0caacea61d263fe1be12d40140af21dc3d8a Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 2 Sep 2025 18:33:58 +0200 Subject: [PATCH 034/145] Add import errors for optional dependencies (#45) --- d123/common/utils/dependencies.py | 18 ++++++ .../dataset_specific/nuplan/load_sensor.py | 12 ++-- .../nuplan/nuplan_data_converter.py | 18 +++--- .../wopd/wopd_data_converter.py | 8 ++- requirements.txt | 58 ------------------- 5 files changed, 39 insertions(+), 75 deletions(-) create mode 100644 d123/common/utils/dependencies.py delete mode 100644 requirements.txt diff --git a/d123/common/utils/dependencies.py b/d123/common/utils/dependencies.py new file mode 100644 index 00000000..d750061e --- /dev/null +++ b/d123/common/utils/dependencies.py @@ -0,0 +1,18 @@ +from typing import List, Union + + +def check_dependencies(modules: Union[str, List[str,]], optional_name: str) -> None: + """ + Checks if the given modules can be imported, otherwise raises an ImportError with a message + :param modules: Module name or list of module names to check + :param optional_name: Name of the optional feature + :raises ImportError: If any of the modules cannot be imported + """ + modules = modules if isinstance(modules, list) else [modules] + for module in modules: + try: + __import__(module) + except ImportError: + raise ImportError( + f"Missing '{module}'. Install with: `pip install d123[{optional_name}]` or `pip install -e .[{optional_name}]`" + ) diff --git a/d123/dataset/dataset_specific/nuplan/load_sensor.py b/d123/dataset/dataset_specific/nuplan/load_sensor.py index 0bbdc406..3e0033b3 100644 --- a/d123/dataset/dataset_specific/nuplan/load_sensor.py +++ b/d123/dataset/dataset_specific/nuplan/load_sensor.py @@ -1,10 +1,13 @@ import io from pathlib import Path -from nuplan.database.utils.pointclouds.lidar import LidarPointCloud +from d123.common.utils.dependencies import check_dependencies from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata +check_dependencies(["nuplan"], "nuplan") +from nuplan.database.utils.pointclouds.lidar import LidarPointCloud + def load_nuplan_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: assert filepath.exists(), f"LiDAR file not found: {filepath}" @@ -12,10 +15,3 @@ def load_nuplan_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) - buffer = io.BytesIO(fp.read()) return LiDAR(metadata=lidar_metadata, point_cloud=LidarPointCloud.from_buffer(buffer, "pcd").points) - -# def load_camera_from_path(filename: str, metadata: CameraMetadata) -> Camera: -# camera_full_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" / filename -# assert camera_full_path.exists(), f"Camera file not found: {camera_full_path}" -# img = Image.open(camera_full_path) -# img.load() -# return Camera(metadata=metadata, image=np.asarray(img, dtype=np.uint8)) diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py index f57a5f5e..b7b52e0b 100644 --- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py +++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py @@ -10,14 +10,8 @@ import numpy as np import pyarrow as pa import yaml -from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens -from nuplan.database.nuplan_db_orm.ego_pose import EgoPose -from nuplan.database.nuplan_db_orm.lidar_box import LidarBox -from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc -from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB -from nuplan.planning.simulation.observation.observation_type import CameraChannel from pyquaternion import Quaternion -from sqlalchemy import func + import d123.dataset.dataset_specific.nuplan.utils as nuplan_utils from d123.common.datatypes.detection.detection import TrafficLightStatus @@ -32,6 +26,7 @@ rear_axle_se3_to_center_se3, ) from d123.common.multithreading.worker_utils import WorkerPool, worker_map +from d123.common.utils.dependencies import check_dependencies from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter @@ -39,6 +34,15 @@ from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +check_dependencies(["nuplan", "sqlalchemy"], "nuplan") +from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens +from nuplan.database.nuplan_db_orm.ego_pose import EgoPose +from nuplan.database.nuplan_db_orm.lidar_box import LidarBox +from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc +from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB +from nuplan.planning.simulation.observation.observation_type import CameraChannel +from sqlalchemy import func + TARGET_DT: Final[float] = 0.1 NUPLAN_DT: Final[float] = 0.05 SORT_BY_TIMESTAMP: Final[bool] = True diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py index 812d40c6..3e577a04 100644 --- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py +++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py @@ -10,9 +10,8 @@ import numpy as np import numpy.typing as npt import pyarrow as pa -import tensorflow as tf from pyquaternion import Quaternion -from waymo_open_dataset import dataset_pb2 + from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json @@ -21,6 +20,7 @@ from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.common.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters from d123.common.multithreading.worker_utils import WorkerPool, worker_map +from d123.common.utils.dependencies import check_dependencies from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map @@ -30,6 +30,10 @@ from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") +import tensorflow as tf +from waymo_open_dataset import dataset_pb2 + # TODO: Make keep_polar_features an optional argument. # With polar features, the lidar loading time is SIGNIFICANTLY higher. diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 4c17c209..00000000 --- a/requirements.txt +++ /dev/null @@ -1,58 +0,0 @@ -# nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2 - -# # # nuplan requirements -# aioboto3 -# aiofiles -# bokeh -# casadi -# control -# Fiona -# geopandas -# guppy3 -# joblib -# matplotlib -# nest_asyncio -# numpy -# opencv-python -# pandas -# Pillow -# psutil -# pyarrow -# pyinstrument -# pyogrio -# pyquaternion -# pytest -# rasterio -# ray -# retry -# rtree -# scipy -# selenium -# setuptools -# shapely>=2.0.0 -# SQLAlchemy==1.4.27 -# sympy -# tornado -# tqdm -# ujson -# notebook -# pre-commit -# cachetools - -# # hydra -# hydra_colorlog -# hydra-core - -# # d123 -# lxml -# trimesh -# viser -# gym==0.17.2 -# gymnasium==0.26.3 - -# # torch & lighting -# torch==2.6.0 -# torchvision==0.21.0 -# lightning -# tensorboard -# protobuf==4.25.3 From 4241723b677a3c1908b3d0548411b32b5b18d0fd Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Sun, 14 Sep 2025 16:04:11 +0800 Subject: [PATCH 035/145] refactor camera.py, create base CameraMetadata and rename origin into PinholeCameraMetadata --- d123/common/datatypes/sensor/camera.py | 38 +++++++++++------- .../av2/av2_data_converter.py | 8 ++-- .../carla/carla_data_converter.py | 6 +-- .../kitti_360/kitti_360_data_converter.py | 40 +++++-------------- .../kitti_360/kitti_360_helper.py | 32 ++++++++++++++- .../kitti_360/preprocess_detection.py | 10 ++--- .../nuplan/nuplan_data_converter.py | 10 ++--- .../wopd/wopd_data_converter.py | 8 ++-- d123/dataset/scene/arrow_scene.py | 4 +- 9 files changed, 87 insertions(+), 69 deletions(-) diff --git a/d123/common/datatypes/sensor/camera.py b/d123/common/datatypes/sensor/camera.py index e6dc60d6..a9cc209e 100644 --- a/d123/common/datatypes/sensor/camera.py +++ b/d123/common/datatypes/sensor/camera.py @@ -3,6 +3,7 @@ import json from dataclasses import dataclass from typing import Any, Dict, Union +from abc import ABC, abstractmethod import numpy as np import numpy.typing as npt @@ -26,13 +27,24 @@ class CameraType(SerialIntEnum): CAM_STEREO_L = 8 CAM_STEREO_R = 9 - @dataclass -class CameraMetadata: - +class CameraMetadata(ABC): camera_type: CameraType width: int height: int + + @abstractmethod + def to_dict(self) -> Dict[str, Any]: + ... + + @classmethod + @abstractmethod + def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata: + ... + +@dataclass +class PinholeCameraMetadata(CameraMetadata): + intrinsic: npt.NDArray[np.float64] # 3x3 matrix # TODO: don't store matrix but values. distortion: npt.NDArray[np.float64] # 5x1 vector # TODO: don't store matrix but values. @@ -47,7 +59,7 @@ def to_dict(self) -> Dict[str, Any]: } @classmethod - def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata: + def from_dict(cls, json_dict: Dict[str, Any]) -> PinholeCameraMetadata: # TODO: remove None types. Only a placeholder for now. return cls( camera_type=CameraType(json_dict["camera_type"]), @@ -81,11 +93,9 @@ def fov_y(self) -> float: @dataclass -class FisheyeMEICameraMetadata: - camera_type: CameraType - width: int - height: int - mirror_parameters: int +class FisheyeMEICameraMetadata(CameraMetadata): + + mirror_parameters: float distortion: npt.NDArray[np.float64] # k1,k2,p1,p2 projection_parameters: npt.NDArray[np.float64] #gamma1,gamma2,u0,v0 @@ -101,7 +111,7 @@ def to_dict(self) -> Dict[str, Any]: } @classmethod - def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata: + def from_dict(cls, json_dict: Dict[str, Any]) -> FisheyeMEICameraMetadata: # TODO: remove None types. Only a placeholder for now. return cls( camera_type=CameraType(json_dict["camera_type"]), @@ -151,26 +161,26 @@ def camera_metadata_dict_to_json(camera_metadata: Dict[CameraType, CameraMetadat return json.dumps(camera_metadata_dict) -def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]]: +def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]]: """ Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata. :param json_dict: JSON-serializable dictionary. :return: Dictionary of CameraMetadata. """ camera_metadata_dict = json.loads(json_dict) - out: Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]] = {} + out: Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = {} for camera_type, metadata in camera_metadata_dict.items(): cam_type = CameraType.deserialize(camera_type) if isinstance(metadata, dict) and "mirror_parameters" in metadata: out[cam_type] = FisheyeMEICameraMetadata.from_dict(metadata) else: - out[cam_type] = CameraMetadata.from_dict(metadata) + out[cam_type] = PinholeCameraMetadata.from_dict(metadata) return out @dataclass class Camera: - metadata: CameraMetadata + metadata: PinholeCameraMetadata image: npt.NDArray[np.uint8] extrinsic: npt.NDArray[np.float64] # 4x4 matrix diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py index f5e5e44a..d1dace89 100644 --- a/d123/dataset/dataset_specific/av2/av2_data_converter.py +++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py @@ -11,7 +11,7 @@ import pyarrow as pa from pyquaternion import Quaternion -from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.common.datatypes.time.time_point import TimePoint from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index @@ -234,17 +234,17 @@ def convert_av2_log_to_arrow( return [] -def get_av2_camera_metadata(log_path: Path) -> Dict[CameraType, CameraMetadata]: +def get_av2_camera_metadata(log_path: Path) -> Dict[CameraType, PinholeCameraMetadata]: intrinsics_file = log_path / "calibration" / "intrinsics.feather" intrinsics_df = pd.read_feather(intrinsics_file) - camera_metadata: Dict[CameraType, CameraMetadata] = {} + camera_metadata: Dict[CameraType, PinholeCameraMetadata] = {} for _, row in intrinsics_df.iterrows(): row = row.to_dict() camera_type = AV2_CAMERA_TYPE_MAPPING[row["sensor_name"]] - camera_metadata[camera_type] = CameraMetadata( + camera_metadata[camera_type] = PinholeCameraMetadata( camera_type=camera_type, width=row["width_px"], height=row["height_px"], diff --git a/d123/dataset/dataset_specific/carla/carla_data_converter.py b/d123/dataset/dataset_specific/carla/carla_data_converter.py index c6ce3622..5dede534 100644 --- a/d123/dataset/dataset_specific/carla/carla_data_converter.py +++ b/d123/dataset/dataset_specific/carla/carla_data_converter.py @@ -11,7 +11,7 @@ import numpy as np import pyarrow as pa -from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.common.datatypes.sensor.lidar_index import CarlaLidarIndex from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index @@ -247,7 +247,7 @@ def _get_metadata(location: str, log_name: str) -> LogMetadata: ) -def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[CameraType, CameraMetadata]: +def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[CameraType, PinholeCameraMetadata]: # FIXME: This is a placeholder function to return camera metadata. @@ -256,7 +256,7 @@ def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[CameraType dtype=np.float64, ) camera_metadata = { - CameraType.CAM_F0: CameraMetadata( + CameraType.CAM_F0: PinholeCameraMetadata( camera_type=CameraType.CAM_F0, width=1024, height=512, diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index 93a84c9e..aee14883 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -22,7 +22,7 @@ from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import CameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.common.datatypes.sensor.lidar_index import Kitti360LidarIndex from d123.common.datatypes.time.time_point import TimePoint @@ -31,7 +31,7 @@ from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex @@ -55,8 +55,8 @@ DIR_POSES = "data_poses" DIR_CALIB = "calibration" -PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW -# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT +# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW +PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW # PATH_3D_RAW_ROOT: Path = Path("/data/jbwang/d123/data_3d_raw") @@ -244,7 +244,7 @@ def convert_kitti360_log_to_arrow( return [] -def get_kitti360_camera_metadata() -> Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]]: +def get_kitti360_camera_metadata() -> Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]]: persp = PATH_CALIB_ROOT / "perspective.txt" @@ -270,10 +270,10 @@ def get_kitti360_camera_metadata() -> Dict[CameraType, Union[CameraMetadata, Fis fisheye03 = _readYAMLFile(fisheye_camera03_path) fisheye_result = {"image_02": fisheye02, "image_03": fisheye03} - log_cam_infos: Dict[str, Union[CameraMetadata, FisheyeMEICameraMetadata]] = {} + log_cam_infos: Dict[str, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = {} for cam_type, cam_name in KITTI360_CAMERA_TYPES.items(): if cam_name in ["image_00", "image_01"]: - log_cam_infos[cam_type] = CameraMetadata( + log_cam_infos[cam_type] = PinholeCameraMetadata( camera_type=cam_type, width=persp_result[cam_name]["wh"][0], height=persp_result[cam_name]["wh"][1], @@ -324,28 +324,6 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: ) return metadata -def get_lidar_extrinsic() -> np.ndarray: - cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" - if not cam2pose_txt.exists(): - raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") - - cam2velo_txt = PATH_CALIB_ROOT / "calib_cam_to_velo.txt" - if not cam2velo_txt.exists(): - raise FileNotFoundError(f"calib_cam_to_velo.txt file not found: {cam2velo_txt}") - - lastrow = np.array([0,0,0,1]).reshape(1,4) - - with open(cam2pose_txt, 'r') as f: - image_00 = next(f) - values = list(map(float, image_00.strip().split()[1:])) - matrix = np.array(values).reshape(3, 4) - cam2pose = np.concatenate((matrix, lastrow)) - cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose - - cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow)) - extrinsic = cam2pose @ np.linalg.inv(cam2velo) - return extrinsic - def _write_recording_table( log_name: str, recording_schema: pa.Schema, @@ -451,8 +429,8 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: poses = np.loadtxt(pose_file) poses_time = poses[:, 0] - 1 # Adjusting time to start from 0 - oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data" - # oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data" + # oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data" + oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data" pose_idx = 0 poses_time_len = len(poses_time) diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py index 76e3c9e0..77217b5d 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py @@ -8,6 +8,13 @@ from d123.geometry import BoundingBoxSE3, StateSE3 from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label +import os +from pathlib import Path + +KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) +DIR_CALIB = "calibration" +PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB + DEFAULT_ROLL = 0.0 DEFAULT_PITCH = 0.0 @@ -162,4 +169,27 @@ def box_visible_in_point_cloud(self, points): def load_detection_preprocess(self, records_dict: Dict[int, Any]): if self.globalID in records_dict: - self.valid_frames["records"] = records_dict[self.globalID]["records"] \ No newline at end of file + self.valid_frames["records"] = records_dict[self.globalID]["records"] + + +def get_lidar_extrinsic() -> np.ndarray: + cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" + if not cam2pose_txt.exists(): + raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") + + cam2velo_txt = PATH_CALIB_ROOT / "calib_cam_to_velo.txt" + if not cam2velo_txt.exists(): + raise FileNotFoundError(f"calib_cam_to_velo.txt file not found: {cam2velo_txt}") + + lastrow = np.array([0,0,0,1]).reshape(1,4) + + with open(cam2pose_txt, 'r') as f: + image_00 = next(f) + values = list(map(float, image_00.strip().split()[1:])) + matrix = np.array(values).reshape(3, 4) + cam2pose = np.concatenate((matrix, lastrow)) + cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose + + cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow)) + extrinsic = cam2pose @ np.linalg.inv(cam2velo) + return extrinsic \ No newline at end of file diff --git a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py index 8b7c284f..5827e779 100644 --- a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py +++ b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py @@ -25,13 +25,13 @@ DIR_3D_BBOX = "data_3d_bboxes" DIR_POSES = "data_poses" -PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW +# PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW +PATH_3D_RAW_ROOT = Path("/data/jbwang/d123/data_3d_raw/") PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label -from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import get_lidar_extrinsic def _bbox_xml_path(log_name: str) -> Path: return PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" @@ -178,9 +178,9 @@ def process_detection( import argparse logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="Precompute KITTI-360 static detections filters") - parser.add_argument("--log_name", default="2013_05_28_drive_0000_sync") + parser.add_argument("--log_name", default="2013_05_28_drive_0007_sync") parser.add_argument("--radius", type=float, default=60.0) - parser.add_argument("--out", type=Path, default=None, help="output directory for pkl") + parser.add_argument("--out", type=Path, default="detection_preprocess", help="output directory for pkl") args = parser.parse_args() process_detection( log_name=args.log_name, diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py index b7b52e0b..47e4bb02 100644 --- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py +++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py @@ -16,7 +16,7 @@ import d123.dataset.dataset_specific.nuplan.utils as nuplan_utils from d123.common.datatypes.detection.detection import TrafficLightStatus from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.common.datatypes.sensor.lidar_index import NuplanLidarIndex from d123.common.datatypes.time.time_point import TimePoint @@ -256,15 +256,15 @@ def convert_nuplan_log_to_arrow( return [] -def get_nuplan_camera_metadata(log_path: Path) -> Dict[CameraType, CameraMetadata]: +def get_nuplan_camera_metadata(log_path: Path) -> Dict[CameraType, PinholeCameraMetadata]: - def _get_camera_metadata(camera_type: CameraType) -> CameraMetadata: + def _get_camera_metadata(camera_type: CameraType) -> PinholeCameraMetadata: cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0] intrinsic = np.array(pickle.loads(cam.intrinsic)) rotation = np.array(pickle.loads(cam.rotation)) rotation = Quaternion(rotation).rotation_matrix distortion = np.array(pickle.loads(cam.distortion)) - return CameraMetadata( + return PinholeCameraMetadata( camera_type=camera_type, width=cam.width, height=cam.height, @@ -272,7 +272,7 @@ def _get_camera_metadata(camera_type: CameraType) -> CameraMetadata: distortion=distortion, ) - log_cam_infos: Dict[str, CameraMetadata] = {} + log_cam_infos: Dict[str, PinholeCameraMetadata] = {} for camera_type in NUPLAN_CAMERA_TYPES.keys(): log_cam_infos[camera_type] = _get_camera_metadata(camera_type) diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py index 3e577a04..2a0c6425 100644 --- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py +++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py @@ -14,7 +14,7 @@ from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.common.datatypes.sensor.lidar_index import WopdLidarIndex from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index @@ -275,9 +275,9 @@ def convert_wopd_tfrecord_log_to_arrow( def get_wopd_camera_metadata( initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig -) -> Dict[CameraType, CameraMetadata]: +) -> Dict[CameraType, PinholeCameraMetadata]: - cam_metadatas: Dict[CameraType, CameraMetadata] = {} + cam_metadatas: Dict[CameraType, PinholeCameraMetadata] = {} if data_converter_config.camera_store_option is not None: for calibration in initial_frame.context.camera_calibrations: camera_type = WOPD_CAMERA_TYPES[calibration.name] @@ -289,7 +289,7 @@ def get_wopd_camera_metadata( _distortions = np.array([k1, k2, p1, p2, k3]) if camera_type in WOPD_CAMERA_TYPES.values(): - cam_metadatas[camera_type] = CameraMetadata( + cam_metadatas[camera_type] = PinholeCameraMetadata( camera_type=camera_type, width=calibration.width, height=calibration.height, diff --git a/d123/dataset/scene/arrow_scene.py b/d123/dataset/scene/arrow_scene.py index 39d90c9c..6670f138 100644 --- a/d123/dataset/scene/arrow_scene.py +++ b/d123/dataset/scene/arrow_scene.py @@ -6,7 +6,7 @@ from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_from_json +from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_from_json from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json from d123.common.datatypes.time.time_point import TimePoint from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -70,7 +70,7 @@ def __init__( ) = _get_scene_data(arrow_file_path) self._metadata: LogMetadata = _metadata self._vehicle_parameters: VehicleParameters = _vehicle_parameters - self._camera_metadata: Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]] = _camera_metadata + self._camera_metadata: Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = _camera_metadata self._lidar_metadata: Dict[LiDARType, LiDARMetadata] = _lidar_metadata self._map_api: Optional[AbstractMap] = None From c4bab5f9210b91def0e5b2c3cf8677fa01269747 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sun, 21 Sep 2025 21:25:03 +0200 Subject: [PATCH 036/145] Update transform functions for quaternion se3 case. #43 #44 --- d123/geometry/geometry_index.py | 16 + d123/geometry/test/test_transform.py | 105 ++++- .../test/test_transform_quaternion.py | 193 ++++++++ .../transform/transform_quaternion_se3.py | 444 +++++++++--------- d123/geometry/transform/transform_se3.py | 23 +- d123/geometry/utils/rotation_utils.py | 107 ++++- 6 files changed, 616 insertions(+), 272 deletions(-) create mode 100644 d123/geometry/test/test_transform_quaternion.py diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py index 5f56abd9..93ff4f3a 100644 --- a/d123/geometry/geometry_index.py +++ b/d123/geometry/geometry_index.py @@ -95,6 +95,14 @@ class QuaternionIndex(IntEnum): QY = 2 QZ = 3 + @classproperty + def SCALAR(cls) -> int: + return cls.QW + + @classproperty + def VECTOR(cls) -> slice: + return slice(cls.QX, cls.QZ + 1) + class StateSE3Index(IntEnum): """ @@ -147,6 +155,14 @@ def XYZ(cls) -> slice: def QUATERNION(cls) -> slice: return slice(cls.QW, cls.QZ + 1) + @classproperty + def SCALAR(cls) -> slice: + return cls.QW + + @classproperty + def VECTOR(cls) -> slice: + return slice(cls.QX, cls.QZ + 1) + class BoundingBoxSE2Index(IntEnum): """ diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py index 89161153..8c033803 100644 --- a/d123/geometry/test/test_transform.py +++ b/d123/geometry/test/test_transform.py @@ -18,6 +18,7 @@ from d123.geometry.transform.transform_se3 import ( convert_absolute_to_relative_points_3d_array, convert_absolute_to_relative_se3_array, + convert_absolute_to_relative_se3_array, convert_relative_to_absolute_points_3d_array, convert_relative_to_absolute_se3_array, translate_se3_along_body_frame, @@ -537,7 +538,7 @@ class TestTransformConsistency(unittest.TestCase): """Tests to ensure consistency between different transformation functions.""" def setUp(self): - self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal + self.decimal = 4 # Decimal places for np.testing.assert_array_almost_equal self.num_consistency_tests = 10 # Number of random test cases for consistency checks self.max_pose_xyz = 100.0 @@ -554,9 +555,10 @@ def _get_random_se3_array(self, size: int) -> npt.NDArray[np.float64]: """Generate a random SE3 poses""" random_se3_array = np.zeros((size, len(StateSE3Index)), dtype=np.float64) random_se3_array[:, StateSE3Index.XYZ] = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, 3)) - random_se3_array[:, StateSE3Index.EULER_ANGLES] = np.random.uniform( - -np.pi, np.pi, (size, len(EulerAnglesIndex)) - ) + random_se3_array[:, StateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size) + random_se3_array[:, StateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size) + random_se3_array[:, StateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size) + return random_se3_array def test_se2_absolute_relative_conversion_consistency(self) -> None: @@ -790,6 +792,59 @@ def test_se2_se3_point_conversion_consistency(self) -> None: absolute_3d_recovered[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal ) + def test_se2_se3_pose_conversion_consistency(self) -> None: + """Test that SE2 and SE3 pose conversions are consistent for 2D points embedded in 3D""" + for _ in range(self.num_consistency_tests): + # Create equivalent SE2 and SE3 reference poses + x = np.random.uniform(-10.0, 10.0) + y = np.random.uniform(-10.0, 10.0) + yaw = np.random.uniform(-np.pi, np.pi) + + reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64)) + reference_se3 = StateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64)) + + # Generate 2D poses and embed them in 3D with z=0 and zero roll/pitch + num_poses = np.random.randint(1, 8) + pose_2d = self._get_random_se2_array(num_poses) + pose_3d = np.zeros((num_poses, len(StateSE3Index)), dtype=np.float64) + pose_3d[:, StateSE3Index.XY] = pose_2d[:, StateSE2Index.XY] + pose_3d[:, StateSE3Index.YAW] = pose_2d[:, StateSE2Index.YAW] + + # Convert using SE2 functions + relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, pose_2d) + absolute_se2_recovered = convert_relative_to_absolute_se2_array(reference_se2, relative_se2) + + # Convert using SE3 functions + relative_se3 = convert_absolute_to_relative_se3_array(reference_se3, pose_3d) + absolute_se3_recovered = convert_relative_to_absolute_se3_array(reference_se3, relative_se3) + + # Check that SE2 and SE3 conversions are consistent for the x,y components + np.testing.assert_array_almost_equal( + relative_se2[:, StateSE2Index.XY], relative_se3[:, StateSE3Index.XY], decimal=self.decimal + ) + np.testing.assert_array_almost_equal( + absolute_se2_recovered[:, StateSE2Index.XY], + absolute_se3_recovered[:, StateSE3Index.XY], + decimal=self.decimal, + ) + # Check that SE2 and SE3 conversions are consistent for the yaw component + np.testing.assert_array_almost_equal( + relative_se2[:, StateSE2Index.YAW], relative_se3[:, StateSE3Index.YAW], decimal=self.decimal + ) + np.testing.assert_array_almost_equal( + absolute_se2_recovered[:, StateSE2Index.YAW], + absolute_se3_recovered[:, StateSE3Index.YAW], + decimal=self.decimal, + ) + + # Check that z-components remain zero + np.testing.assert_array_almost_equal( + relative_se3[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal + ) + np.testing.assert_array_almost_equal( + absolute_se3_recovered[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal + ) + def test_se2_array_translation_consistency(self) -> None: """Test that SE2 array translation is consistent with single pose translation""" for _ in range(self.num_consistency_tests): @@ -814,30 +869,30 @@ def test_se2_array_translation_consistency(self) -> None: np.testing.assert_array_almost_equal(result_array, result_individual, decimal=self.decimal) - def test_transform_empty_arrays(self) -> None: - """Test that transform functions handle empty arrays correctly""" - reference_se2 = StateSE2.from_array(np.array([1.0, 2.0, np.pi / 4], dtype=np.float64)) - reference_se3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], dtype=np.float64)) + # def test_transform_empty_arrays(self) -> None: + # """Test that transform functions handle empty arrays correctly""" + # reference_se2 = StateSE2.from_array(np.array([1.0, 2.0, np.pi / 4], dtype=np.float64)) + # reference_se3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], dtype=np.float64)) - # Test SE2 empty arrays - empty_se2_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE2Index)) - empty_2d_points = np.array([], dtype=np.float64).reshape(0, len(Point2DIndex)) + # # Test SE2 empty arrays + # empty_se2_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE2Index)) + # empty_2d_points = np.array([], dtype=np.float64).reshape(0, len(Point2DIndex)) - result_se2_poses = convert_absolute_to_relative_se2_array(reference_se2, empty_se2_poses) - result_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, empty_2d_points) + # result_se2_poses = convert_absolute_to_relative_se2_array(reference_se2, empty_se2_poses) + # result_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, empty_2d_points) - self.assertEqual(result_se2_poses.shape, (0, len(StateSE2Index))) - self.assertEqual(result_2d_points.shape, (0, len(Point2DIndex))) + # self.assertEqual(result_se2_poses.shape, (0, len(StateSE2Index))) + # self.assertEqual(result_2d_points.shape, (0, len(Point2DIndex))) - # Test SE3 empty arrays - empty_se3_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE3Index)) - empty_3d_points = np.array([], dtype=np.float64).reshape(0, len(Point3DIndex)) + # # Test SE3 empty arrays + # empty_se3_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE3Index)) + # empty_3d_points = np.array([], dtype=np.float64).reshape(0, len(Point3DIndex)) - result_se3_poses = convert_absolute_to_relative_se3_array(reference_se3, empty_se3_poses) - result_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, empty_3d_points) + # result_se3_poses = convert_absolute_to_relative_se3_array(reference_se3, empty_se3_poses) + # result_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, empty_3d_points) - self.assertEqual(result_se3_poses.shape, (0, len(StateSE3Index))) - self.assertEqual(result_3d_points.shape, (0, len(Point3DIndex))) + # self.assertEqual(result_se3_poses.shape, (0, len(StateSE3Index))) + # self.assertEqual(result_3d_points.shape, (0, len(Point3DIndex))) def test_transform_identity_operations(self) -> None: """Test that transforms with identity reference frames work correctly""" @@ -864,7 +919,11 @@ def test_transform_identity_operations(self) -> None: relative_se3_poses = convert_absolute_to_relative_se3_array(identity_se3, se3_poses) relative_se3_points = convert_absolute_to_relative_points_3d_array(identity_se3, se3_points) - np.testing.assert_array_almost_equal(se3_poses, relative_se3_poses, decimal=self.decimal) + np.testing.assert_array_almost_equal( + se3_poses[..., StateSE3Index.EULER_ANGLES], + relative_se3_poses[..., StateSE3Index.EULER_ANGLES], + decimal=self.decimal, + ) np.testing.assert_array_almost_equal(se3_points, relative_se3_points, decimal=self.decimal) def test_transform_large_rotations(self) -> None: diff --git a/d123/geometry/test/test_transform_quaternion.py b/d123/geometry/test/test_transform_quaternion.py new file mode 100644 index 00000000..12c8433e --- /dev/null +++ b/d123/geometry/test/test_transform_quaternion.py @@ -0,0 +1,193 @@ +import unittest + +import numpy as np +import numpy.typing as npt + +from d123.geometry.geometry_index import ( + # EulerAnglesIndex, + # Point2DIndex, + # Point3DIndex, + QuaternionSE3Index, + # StateSE2Index, + StateSE3Index, +) +from d123.geometry.point import Point3D +from d123.geometry.rotation import EulerAngles, Quaternion +from d123.geometry.se import StateSE3, QuaternionSE3 +from d123.geometry.transform.transform_quaternion_se3 import ( + convert_absolute_to_relative_points_3d_array, + convert_absolute_to_relative_se3_array, + convert_relative_to_absolute_points_3d_array, + convert_relative_to_absolute_se3_array, + # translate_se3_along_body_frame, + # translate_se3_along_x, + # translate_se3_along_y, + # translate_se3_along_z, +) +import d123.geometry.transform.transform_se3 as euler_transform_se3 +from d123.geometry.utils.rotation_utils import ( + get_rotation_matrices_from_euler_array, + get_rotation_matrices_from_quaternion_array, +) + +# from d123.geometry.vector import Vector2D, Vector3D + + +class TestTransformQuaternion(unittest.TestCase): + + def setUp(self): + euler_se3_a = StateSE3( + x=1.0, + y=2.0, + z=3.0, + roll=np.deg2rad(90), + pitch=0.0, + yaw=0.0, + ) + euler_se3_b = StateSE3( + x=1.0, + y=-2.0, + z=3.0, + roll=0.0, + pitch=np.deg2rad(90), + yaw=0.0, + ) + euler_se3_c = StateSE3( + x=-1.0, + y=2.0, + z=-3.0, + roll=0.0, + pitch=0.0, + yaw=np.deg2rad(90), + ) + + quat_se3_a = euler_se3_a.quaternion_se3 + quat_se3_b = euler_se3_b.quaternion_se3 + quat_se3_c = euler_se3_c.quaternion_se3 + + self.euler_se3 = [euler_se3_a, euler_se3_b, euler_se3_c] + self.quat_se3 = [quat_se3_a, quat_se3_b, quat_se3_c] + + self.max_pose_xyz = 100.0 + + def _get_random_euler_se3_array(self, size: int) -> npt.NDArray[np.float64]: + """Generate a random SE3 poses""" + random_se3_array = np.zeros((size, len(StateSE3Index)), dtype=np.float64) + random_se3_array[:, StateSE3Index.XYZ] = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, 3)) + random_se3_array[:, StateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size) + random_se3_array[:, StateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size) + random_se3_array[:, StateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size) + + return random_se3_array + + def _convert_euler_se3_array_to_quat_se3_array( + self, euler_se3_array: npt.NDArray[np.float64] + ) -> npt.NDArray[np.float64]: + """Convert an array of SE3 poses from Euler angles to Quaternion representation""" + quat_se3_array = np.zeros((euler_se3_array.shape[0], len(QuaternionSE3Index)), dtype=np.float64) + quat_se3_array[:, QuaternionSE3Index.XYZ] = euler_se3_array[:, StateSE3Index.XYZ] + rotation_matrices = get_rotation_matrices_from_euler_array(euler_se3_array[:, StateSE3Index.EULER_ANGLES]) + for idx, rotation_matrix in enumerate(rotation_matrices): + quat = Quaternion.from_rotation_matrix(rotation_matrix) + quat_se3_array[idx, QuaternionSE3Index.QUATERNION] = quat.array + return quat_se3_array + + def test_sanity(self): + for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): + for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): + np.testing.assert_allclose( + quat_se3.point_3d.array, + euler_se3.point_3d.array, + atol=1e-6, + ) + np.testing.assert_allclose( + quat_se3.rotation_matrix, + euler_se3.rotation_matrix, + atol=1e-6, + ) + + def test_random_sanity(self): + for _ in range(10): + random_euler_se3_array = self._get_random_euler_se3_array(np.random.randint(1, 10)) + random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array) + + np.testing.assert_allclose( + random_euler_se3_array[:, StateSE3Index.XYZ], + random_quat_se3_array[:, QuaternionSE3Index.XYZ], + atol=1e-6, + ) + quat_rotation_matrices = get_rotation_matrices_from_quaternion_array( + random_quat_se3_array[:, QuaternionSE3Index.QUATERNION] + ) + euler_rotation_matrices = get_rotation_matrices_from_euler_array( + random_euler_se3_array[:, StateSE3Index.EULER_ANGLES] + ) + np.testing.assert_allclose(euler_rotation_matrices, quat_rotation_matrices, atol=1e-6) + + def test_convert_absolute_to_relative_points_3d_array(self): + + random_points_3d = np.random.rand(10, 3) + for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): + rel_points_quat = convert_absolute_to_relative_points_3d_array(quat_se3, random_points_3d) + rel_points_euler = euler_transform_se3.convert_absolute_to_relative_points_3d_array( + euler_se3, random_points_3d + ) + np.testing.assert_allclose(rel_points_quat, rel_points_euler, atol=1e-6) + + def test_convert_absolute_to_relative_se3_array(self): + + for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): + random_euler_se3_array = self._get_random_euler_se3_array(np.random.randint(1, 10)) + random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array) + + rel_se3_quat = convert_absolute_to_relative_se3_array(quat_se3, random_quat_se3_array) + rel_se3_euler = euler_transform_se3.convert_absolute_to_relative_se3_array( + euler_se3, random_euler_se3_array + ) + np.testing.assert_allclose( + rel_se3_euler[..., StateSE3Index.XYZ], rel_se3_quat[..., QuaternionSE3Index.XYZ], atol=1e-6 + ) + # We compare rotation matrices to avoid issues with quaternion sign ambiguity + quat_rotation_matrices = get_rotation_matrices_from_quaternion_array( + rel_se3_quat[..., QuaternionSE3Index.QUATERNION] + ) + euler_rotation_matrices = get_rotation_matrices_from_euler_array( + rel_se3_euler[..., StateSE3Index.EULER_ANGLES] + ) + np.testing.assert_allclose(quat_rotation_matrices, euler_rotation_matrices, atol=1e-6) + + def test_convert_relative_to_absolute_points_3d_array(self): + + random_points_3d = np.random.rand(10, 3) + for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): + rel_points_quat = convert_relative_to_absolute_points_3d_array(quat_se3, random_points_3d) + rel_points_euler = euler_transform_se3.convert_relative_to_absolute_points_3d_array( + euler_se3, random_points_3d + ) + np.testing.assert_allclose(rel_points_quat, rel_points_euler, atol=1e-6) + + def test_convert_relative_to_absolute_se3_array(self): + + for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): + random_euler_se3_array = self._get_random_euler_se3_array(np.random.randint(1, 10)) + random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array) + + abs_se3_quat = convert_relative_to_absolute_se3_array(quat_se3, random_quat_se3_array) + abs_se3_euler = euler_transform_se3.convert_relative_to_absolute_se3_array( + euler_se3, random_euler_se3_array + ) + np.testing.assert_allclose( + abs_se3_euler[..., StateSE3Index.XYZ], abs_se3_quat[..., QuaternionSE3Index.XYZ], atol=1e-6 + ) + # We compare rotation matrices to avoid issues with quaternion sign ambiguity + quat_rotation_matrices = get_rotation_matrices_from_quaternion_array( + abs_se3_quat[..., QuaternionSE3Index.QUATERNION] + ) + euler_rotation_matrices = get_rotation_matrices_from_euler_array( + abs_se3_euler[..., StateSE3Index.EULER_ANGLES] + ) + np.testing.assert_allclose(quat_rotation_matrices, euler_rotation_matrices, atol=1e-6) + + +if __name__ == "__main__": + unittest.main() diff --git a/d123/geometry/transform/transform_quaternion_se3.py b/d123/geometry/transform/transform_quaternion_se3.py index 9a74b601..32a6858b 100644 --- a/d123/geometry/transform/transform_quaternion_se3.py +++ b/d123/geometry/transform/transform_quaternion_se3.py @@ -1,237 +1,213 @@ -# TODO: Properly implement and test these functions +from typing import Union + +import numpy as np +import numpy.typing as npt + +from d123.geometry import Vector3D +from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, Vector3DIndex +from d123.geometry.se import QuaternionSE3 +from d123.geometry.utils.rotation_utils import ( + conjugate_quaternion_array, + get_rotation_matrix_from_quaternion_array, + multiply_quaternion_arrays, +) + + +def convert_absolute_to_relative_points_3d_array( + origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + """Converts 3D points from the absolute frame to the relative frame. + + :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param points_3d_array: The 3D points in the absolute frame. + :raises TypeError: If the origin is not a StateSE3 or np.ndarray. + :return: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`. + """ + + if isinstance(origin, QuaternionSE3): + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) + t_origin = origin[QuaternionSE3Index.XYZ] + R_origin = get_rotation_matrix_from_quaternion_array(origin[QuaternionSE3Index.QUATERNION]) + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + + assert points_3d_array.ndim >= 1 + assert points_3d_array.shape[-1] == len(Point3DIndex) + + # Translate points to origin frame, then rotate to body frame + relative_points = (points_3d_array - t_origin) @ R_origin + return relative_points + + +def convert_absolute_to_relative_se3_array( + origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + """Converts an SE3 array from the absolute frame to the relative frame. + + :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param se3_array: The SE3 array in the absolute frame. + :raises TypeError: If the origin is not a StateSE3 or np.ndarray. + :return: The SE3 array in the relative frame, indexed by :class:`~d123.geometry.StateSE3Index`. + """ + if isinstance(origin, QuaternionSE3): + origin_array = origin.array + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) + origin_array = origin + t_origin = origin_array[QuaternionSE3Index.XYZ] + R_origin = get_rotation_matrix_from_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION]) + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + + assert se3_array.ndim >= 1 + assert se3_array.shape[-1] == len(QuaternionSE3Index) + + abs_positions = se3_array[..., QuaternionSE3Index.XYZ] + abs_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION] + + rel_se3_array = np.zeros_like(se3_array) + + # 1. Vectorized relative position calculation: translate and rotate + rel_positions = (abs_positions - t_origin) @ R_origin + rel_se3_array[..., QuaternionSE3Index.XYZ] = rel_positions + + # 2. Vectorized relative orientation calculation: quaternion multiplication with conjugate + q_origin_conj = conjugate_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION]) + rel_quaternions = multiply_quaternion_arrays(q_origin_conj, abs_quaternions) + + rel_se3_array[..., QuaternionSE3Index.QUATERNION] = rel_quaternions + + return rel_se3_array + + +def convert_relative_to_absolute_points_3d_array( + origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + """Converts 3D points from the relative frame to the absolute frame. + + :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`. + :raises TypeError: If the origin is not a StateSE3 or np.ndarray. + :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`. + """ + if isinstance(origin, QuaternionSE3): + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) + t_origin = origin[QuaternionSE3Index.XYZ] + R_origin = get_rotation_matrix_from_quaternion_array(origin[QuaternionSE3Index.QUATERNION]) + else: + raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") + + assert points_3d_array.shape[-1] == len(Point3DIndex) + + absolute_points = points_3d_array @ R_origin.T + t_origin + return absolute_points + + +def convert_relative_to_absolute_se3_array( + origin: QuaternionSE3, se3_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + """Converts an SE3 array from the relative frame to the absolute frame. + + :param origin: The origin state in the relative frame, as a StateSE3 or np.ndarray. + :param se3_array: The SE3 array in the relative frame. + :raises TypeError: If the origin is not a StateSE3 or np.ndarray. + :return: The SE3 array in the absolute frame, indexed by :class:`~d123.geometry.StateSE3Index`. + """ + + if isinstance(origin, QuaternionSE3): + origin_array = origin.array + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) + origin_array = origin + t_origin = origin_array[QuaternionSE3Index.XYZ] + R_origin = get_rotation_matrix_from_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION]) + else: + raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") + + assert se3_array.ndim >= 1 + assert se3_array.shape[-1] == len(QuaternionSE3Index) + + # Extract relative positions and orientations + rel_positions = se3_array[..., QuaternionSE3Index.XYZ] + rel_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION] + + # Vectorized absolute position calculation: rotate and translate + abs_positions = (R_origin @ rel_positions.T).T + t_origin + abs_quaternions = multiply_quaternion_arrays(origin_array[QuaternionSE3Index.QUATERNION], rel_quaternions) + + # Prepare output array + abs_se3_array = se3_array.copy() + abs_se3_array[..., QuaternionSE3Index.XYZ] = abs_positions + abs_se3_array[..., QuaternionSE3Index.QUATERNION] = abs_quaternions + + return abs_se3_array + + +def translate_se3_along_z(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: + """Translates a QuaternionSE3 state along the Z-axis. + + :param state_se3: The QuaternionSE3 state to translate. + :param distance: The distance to translate along the Z-axis. + :return: The translated QuaternionSE3 state. + """ + R = state_se3.rotation_matrix + z_axis = R[:, 2] -# from typing import Union + state_se3_array = state_se3.array.copy() + state_se3_array[QuaternionSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ] + return QuaternionSE3.from_array(state_se3_array, copy=False) -# import numpy as np -# import numpy.typing as npt -# from d123.geometry import Vector3D -# from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, Vector3DIndex -# from d123.geometry.se import QuaternionSE3 - - -# def translate_qse3_along_z(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: -# """Translates a QuaternionSE3 state along the Z-axis. - -# :param state_se3: The QuaternionSE3 state to translate. -# :param distance: The distance to translate along the Z-axis. -# :return: The translated QuaternionSE3 state. -# """ -# R = state_se3.rotation_matrix -# z_axis = R[:, 2] - -# state_se3_array = state_se3.array.copy() -# state_se3_array[QuaternionSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ] -# return QuaternionSE3.from_array(state_se3_array, copy=False) - - -# def translate_qse3_along_y(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: -# """Translates a QuaternionSE3 state along the Y-axis. - -# :param state_se3: The QuaternionSE3 state to translate. -# :param distance: The distance to translate along the Y-axis. -# :return: The translated QuaternionSE3 state. -# """ -# R = state_se3.rotation_matrix -# y_axis = R[:, 1] - -# state_se3_array = state_se3.array.copy() -# state_se3_array[QuaternionSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ] -# return QuaternionSE3.from_array(state_se3_array, copy=False) - - -# def translate_qse3_along_x(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: -# """Translates a QuaternionSE3 state along the X-axis. - -# :param state_se3: The QuaternionSE3 state to translate. -# :param distance: The distance to translate along the X-axis. -# :return: The translated QuaternionSE3 state. -# """ -# R = state_se3.rotation_matrix -# x_axis = R[:, 0] - -# state_se3_array = state_se3.array.copy() -# state_se3_array[QuaternionSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ] -# return QuaternionSE3.from_array(state_se3_array, copy=False) - - -# def translate_qse3_along_body_frame(state_se3: QuaternionSE3, vector_3d: Vector3D) -> QuaternionSE3: -# """Translates a QuaternionSE3 state along a vector in the body frame. - -# :param state_se3: The QuaternionSE3 state to translate. -# :param vector_3d: The vector to translate along in the body frame. -# :return: The translated QuaternionSE3 state. -# """ -# R = state_se3.rotation_matrix -# world_translation = R @ vector_3d.array - -# state_se3_array = state_se3.array.copy() -# state_se3_array[QuaternionSE3Index.XYZ] += world_translation -# return QuaternionSE3.from_array(state_se3_array, copy=False) - - -# def convert_absolute_to_relative_qse3_array( -# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64] -# ) -> npt.NDArray[np.float64]: -# """Converts a QuaternionSE3 array from the absolute frame to the relative frame. - -# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz]. -# :param se3_array: The QuaternionSE3 array in the absolute frame [N, 7]. -# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray. -# :return: The QuaternionSE3 array in the relative frame [N, 7]. -# """ -# if isinstance(origin, QuaternionSE3): -# origin_ = origin -# elif isinstance(origin, np.ndarray): -# assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) -# origin_ = QuaternionSE3.from_array(origin) -# else: -# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") - -# assert se3_array.ndim >= 1 -# assert se3_array.shape[-1] == len(QuaternionSE3Index) - -# t_origin = origin_.point_3d.array -# R_origin = origin_.rotation_matrix - -# # Extract absolute positions and quaternions -# abs_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION] -# q_origin = origin_.quaternion - -# # Compute relative quaternions: q_rel = q_origin^-1 * q_abs -# if abs_quaternions.ndim == 1: -# rel_quaternions = _quaternion_multiply(_quaternion_multiply(q_origin), abs_quaternions) -# else: -# rel_quaternions = np.array([_quaternion_multiply(_quaternion_multiply(q_origin), q) for q in abs_quaternions]) - - -# # Prepare output array -# rel_se3_array = np.zeros_like(se3_array) -# rel_se3_array[..., QuaternionSE3Index.XYZ] = (se3_array[..., QuaternionSE3Index.XYZ] - t_origin) @ R_origin -# rel_se3_array[..., QuaternionSE3Index.QUATERNION] = rel_quaternions - -# return rel_se3_array - - -# def convert_relative_to_absolute_qse3_array( -# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64] -# ) -> npt.NDArray[np.float64]: -# """Converts a QuaternionSE3 array from the relative frame to the absolute frame. - -# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz]. -# :param se3_array: The QuaternionSE3 array in the relative frame [N, 7]. -# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray. -# :return: The QuaternionSE3 array in the absolute frame [N, 7]. -# """ -# if isinstance(origin, QuaternionSE3): -# t_origin = origin.translation -# q_origin = origin.quaternion -# R_origin = origin.rotation_matrix -# elif isinstance(origin, np.ndarray): -# assert origin.ndim == 1 and origin.shape[-1] == 7 -# t_origin = origin[:3] -# q_origin = origin[3:] -# origin_quat_se3 = QuaternionSE3.from_array(origin) -# R_origin = origin_quat_se3.rotation_matrix -# else: -# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") - -# assert se3_array.ndim >= 1 -# assert se3_array.shape[-1] == len(QuaternionSE3Index) - -# # Extract relative positions and quaternions -# rel_positions = se3_array[..., QuaternionSE3Index.XYZ] -# rel_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION] - -# # Compute absolute positions: R_origin @ p_rel + t_origin -# abs_positions = (R_origin @ rel_positions.T).T + t_origin - -# # Compute absolute quaternions: q_abs = q_origin * q_rel -# if rel_quaternions.ndim == 1: -# abs_quaternions = _quaternion_multiply(q_origin, rel_quaternions) -# else: -# abs_quaternions = np.array([_quaternion_multiply(q_origin, q) for q in rel_quaternions]) - -# # Prepare output array -# abs_se3_array = se3_array.copy() -# abs_se3_array[..., :3] = abs_positions -# abs_se3_array[..., 3:] = abs_quaternions - -# return abs_se3_array - - -# def convert_absolute_to_relative_points_q3d_array( -# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] -# ) -> npt.NDArray[np.float64]: -# """Converts 3D points from the absolute frame to the relative frame. - -# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz]. -# :param points_3d_array: The 3D points in the absolute frame [N, 3]. -# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray. -# :return: The 3D points in the relative frame [N, 3]. -# """ -# if isinstance(origin, QuaternionSE3): -# t_origin = origin.point_3d.array -# R_origin_inv = origin.rotation_matrix.T -# elif isinstance(origin, np.ndarray): -# assert origin.ndim == 1 and origin.shape[-1] == 7 -# t_origin = origin[:3] -# origin_quat_se3 = QuaternionSE3.from_array(origin) -# R_origin_inv = origin_quat_se3.rotation_matrix.T -# else: -# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") - -# assert points_3d_array.ndim >= 1 -# assert points_3d_array.shape[-1] == len(Point3DIndex) - -# # Transform points: R_origin^T @ (p_abs - t_origin) -# relative_points = (points_3d_array - t_origin) @ R_origin_inv.T -# return relative_points - - -# def convert_relative_to_absolute_points_q3d_array( -# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] -# ) -> npt.NDArray[np.float64]: -# """Converts 3D points from the relative frame to the absolute frame. - -# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz]. -# :param points_3d_array: The 3D points in the relative frame [N, 3]. -# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray. -# :return: The 3D points in the absolute frame [N, 3]. -# """ -# if isinstance(origin, QuaternionSE3): -# t_origin = origin.point_3d.array -# R_origin = origin.rotation_matrix -# elif isinstance(origin, np.ndarray): -# assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) -# t_origin = origin[QuaternionSE3Index.XYZ] -# origin_quat_se3 = QuaternionSE3.from_array(origin) -# R_origin = origin_quat_se3.rotation_matrix -# else: -# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") - -# assert points_3d_array.shape[-1] == 3 - -# # Transform points: R_origin @ p_rel + t_origin -# absolute_points = (R_origin @ points_3d_array.T).T + t_origin -# return absolute_points - - -# def _quaternion_multiply(q1: npt.NDArray[np.float64], q2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: -# """Multiply two quaternions [w, x, y, z]. - -# :param q1: First quaternion [w, x, y, z]. -# :param q2: Second quaternion [w, x, y, z]. -# :return: Product quaternion [w, x, y, z]. -# """ -# w1, x1, y1, z1 = q1 -# w2, x2, y2, z2 = q2 - -# return np.array( -# [ -# w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2, -# w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2, -# w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2, -# w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2, -# ] -# ) +def translate_se3_along_y(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: + """Translates a QuaternionSE3 state along the Y-axis. + + :param state_se3: The QuaternionSE3 state to translate. + :param distance: The distance to translate along the Y-axis. + :return: The translated QuaternionSE3 state. + """ + R = state_se3.rotation_matrix + y_axis = R[:, 1] + + state_se3_array = state_se3.array.copy() + state_se3_array[QuaternionSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ] + return QuaternionSE3.from_array(state_se3_array, copy=False) + + +def translate_se3_along_x(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: + """Translates a QuaternionSE3 state along the X-axis. + + :param state_se3: The QuaternionSE3 state to translate. + :param distance: The distance to translate along the X-axis. + :return: The translated QuaternionSE3 state. + """ + R = state_se3.rotation_matrix + x_axis = R[:, 0] + + state_se3_array = state_se3.array.copy() + state_se3_array[QuaternionSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ] + return QuaternionSE3.from_array(state_se3_array, copy=False) + + +def translate_se3_along_body_frame(state_se3: QuaternionSE3, vector_3d: Vector3D) -> QuaternionSE3: + """Translates a QuaternionSE3 state along a vector in the body frame. + + :param state_se3: The QuaternionSE3 state to translate. + :param vector_3d: The vector to translate along in the body frame. + :return: The translated QuaternionSE3 state. + """ + R = state_se3.rotation_matrix + world_translation = R @ vector_3d.array + + state_se3_array = state_se3.array.copy() + state_se3_array[QuaternionSE3Index.XYZ] += world_translation + return QuaternionSE3.from_array(state_se3_array, copy=False) diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py index abed7552..affb70a3 100644 --- a/d123/geometry/transform/transform_se3.py +++ b/d123/geometry/transform/transform_se3.py @@ -7,6 +7,7 @@ from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex from d123.geometry.rotation import EulerAngles from d123.geometry.utils.rotation_utils import ( + get_rotation_matrices_from_euler_array, get_rotation_matrix_from_euler_array, normalize_angle, ) @@ -101,18 +102,16 @@ def convert_absolute_to_relative_se3_array( assert se3_array.ndim >= 1 assert se3_array.shape[-1] == len(StateSE3Index) - # Extract positions and orientations from se3_array abs_positions = se3_array[..., StateSE3Index.XYZ] - abs_euler_angles = se3_array[..., StateSE3Index.EULER_ANGLES] + abs_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., StateSE3Index.EULER_ANGLES]) + + # Convert absolute rotation matrices to relative rotation matrices + rel_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin.T, abs_rotation_matrices) + rel_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in rel_rotation_matrices]) # Vectorized relative position calculation rel_positions = (abs_positions - t_origin) @ R_origin - # Convert back to Euler angles (this may need a custom function) - # For now, using simple subtraction as approximation (this is incorrect for general rotations) - origin_euler = origin_array[StateSE3Index.EULER_ANGLES] - rel_euler_angles = abs_euler_angles - origin_euler - # Prepare output array rel_se3_array = se3_array.copy() rel_se3_array[..., StateSE3Index.XYZ] = rel_positions @@ -149,14 +148,14 @@ def convert_relative_to_absolute_se3_array( # Extract relative positions and orientations rel_positions = se3_array[..., StateSE3Index.XYZ] - rel_euler_angles = se3_array[..., StateSE3Index.EULER_ANGLES] + rel_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., StateSE3Index.EULER_ANGLES]) # Vectorized absolute position calculation: rotate and translate - abs_positions = (R_origin @ rel_positions.T).T + t_origin + abs_positions = (rel_positions @ R_origin.T) + t_origin - # Vectorized absolute orientation: add origin's euler angles - origin_euler = np.array([origin.roll, origin.pitch, origin.yaw], dtype=np.float64) - abs_euler_angles = rel_euler_angles + origin_euler + # Convert relative rotation matrices to absolute rotation matrices + abs_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin, rel_rotation_matrices) + abs_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in abs_rotation_matrices]) # Prepare output array abs_se3_array = se3_array.copy() diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py index da75c654..1ad2396f 100644 --- a/d123/geometry/utils/rotation_utils.py +++ b/d123/geometry/utils/rotation_utils.py @@ -2,8 +2,9 @@ import numpy as np import numpy.typing as npt +import pyquaternion -from d123.geometry.geometry_index import EulerAnglesIndex +from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex def normalize_angle(angle: Union[float, npt.NDArray[np.float64]]) -> Union[float, npt.NDArray[np.float64]]: @@ -16,6 +17,12 @@ def normalize_angle(angle: Union[float, npt.NDArray[np.float64]]) -> Union[float def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """ + Convert Euler angles to rotation matrices using Tait-Bryan ZYX convention (yaw-pitch-roll). + + Convention: Intrinsic rotations in order Z-Y-X (yaw, pitch, roll) + Equivalent to: R = R_x(roll) @ R_y(pitch) @ R_z(yaw) + """ assert euler_angles_array.ndim == 2 and euler_angles_array.shape[1] == len(EulerAnglesIndex) # Extract roll, pitch, yaw for all samples at once @@ -32,7 +39,7 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl batch_size = euler_angles_array.shape[0] rotation_matrices = np.zeros((batch_size, 3, 3), dtype=np.float64) - # R_x @ R_y @ R_z components + # ZYX Tait-Bryan rotation matrix elements rotation_matrices[:, 0, 0] = cos_pitch * cos_yaw rotation_matrices[:, 0, 1] = -cos_pitch * sin_yaw rotation_matrices[:, 0, 2] = sin_pitch @@ -50,4 +57,98 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl def get_rotation_matrix_from_euler_array(euler_angles: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: assert euler_angles.ndim == 1 and euler_angles.shape[0] == len(EulerAnglesIndex) - return get_rotation_matrices_from_euler_array(euler_angles[None, :])[0] + return get_rotation_matrices_from_euler_array(euler_angles[None, ...])[0] + + +def get_rotation_matrices_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert quaternion_array.ndim == 2 and quaternion_array.shape[1] == len(QuaternionIndex) + # TODO: Optimize this function to avoid the for loop, possibly by using pyquaternion's internal methods directly. + rotation_matrices = np.zeros((quaternion_array.shape[0], 3, 3), dtype=np.float64) + for i, quaternion in enumerate(quaternion_array): + rotation_matrices[i] = pyquaternion.Quaternion(array=quaternion).rotation_matrix + return rotation_matrices + + +def get_rotation_matrix_from_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert quaternion.ndim == 1 and quaternion.shape[0] == len(QuaternionIndex) + return get_rotation_matrices_from_quaternion_array(quaternion[None, :])[0] + + +def conjugate_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Computes the conjugate of an array of quaternions. + in the order [qw, qx, qy, qz]. + :param quaternion: Array of quaternions. + :return: Array of conjugated quaternions. + """ + assert quaternion.ndim >= 1 + assert quaternion.shape[-1] == len(QuaternionIndex) + conjugated_quaternions = np.zeros_like(quaternion) + conjugated_quaternions[..., QuaternionIndex.SCALAR] = quaternion[..., QuaternionIndex.SCALAR] + conjugated_quaternions[..., QuaternionIndex.VECTOR] = -quaternion[..., QuaternionIndex.VECTOR] + return conjugated_quaternions + + +def invert_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Computes the inverse of an array of quaternions. + in the order [qw, qx, qy, qz]. + :param quaternion: Array of quaternions. + :return: Array of inverted quaternions. + """ + assert quaternion.ndim >= 1 + assert quaternion.shape[-1] == len(QuaternionIndex) + norm_squared = np.sum(quaternion**2, axis=-1, keepdims=True) + assert np.all(norm_squared > 0), "Cannot invert a quaternion with zero norm." + conjugated_quaternions = conjugate_quaternion_array(quaternion) + inverted_quaternions = conjugated_quaternions / norm_squared + return inverted_quaternions + + +def normalize_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Normalizes an array of quaternions. + in the order [qw, qx, qy, qz]. + :param quaternion: Array of quaternions. + :return: Array of normalized quaternions. + """ + assert quaternion.ndim >= 1 + assert quaternion.shape[-1] == len(QuaternionIndex) + norm = np.linalg.norm(quaternion, axis=-1, keepdims=True) + assert np.all(norm > 0), "Cannot normalize a quaternion with zero norm." + normalized_quaternions = quaternion / norm + return normalized_quaternions + + +def multiply_quaternion_arrays(q1: npt.NDArray[np.float64], q2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Multiplies two arrays of quaternions element-wise. + in the order [qw, qx, qy, qz]. + :param q1: First array of quaternions. + :param q2: Second array of quaternions. + :return: Array of resulting quaternions after multiplication. + """ + assert q1.ndim >= 1 + assert q2.ndim >= 1 + assert q1.shape[-1] == q2.shape[-1] == len(QuaternionIndex) + + # Vectorized quaternion multiplication + qw1, qx1, qy1, qz1 = ( + q1[..., QuaternionIndex.QW], + q1[..., QuaternionIndex.QX], + q1[..., QuaternionIndex.QY], + q1[..., QuaternionIndex.QZ], + ) + qw2, qx2, qy2, qz2 = ( + q2[..., QuaternionIndex.QW], + q2[..., QuaternionIndex.QX], + q2[..., QuaternionIndex.QY], + q2[..., QuaternionIndex.QZ], + ) + + quaternions = np.stack( + [ + qw1 * qw2 - qx1 * qx2 - qy1 * qy2 - qz1 * qz2, + qw1 * qx2 + qx1 * qw2 + qy1 * qz2 - qz1 * qy2, + qw1 * qy2 - qx1 * qz2 + qy1 * qw2 + qz1 * qx2, + qw1 * qz2 + qx1 * qy2 - qy1 * qx2 + qz1 * qw2, + ], + axis=-1, + ) + return quaternions From 78592a1bfee311e93d1ab31e613ae7adff5a6b34 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 22 Sep 2025 10:40:10 +0200 Subject: [PATCH 037/145] Expand rotation utils and rely less on pyquaternion lib (#43) --- d123/geometry/rotation.py | 20 ++- d123/geometry/utils/rotation_utils.py | 237 +++++++++++++++++++++++--- 2 files changed, 224 insertions(+), 33 deletions(-) diff --git a/d123/geometry/rotation.py b/d123/geometry/rotation.py index 3fc4afa9..1ee21e25 100644 --- a/d123/geometry/rotation.py +++ b/d123/geometry/rotation.py @@ -8,7 +8,12 @@ from d123.common.utils.mixin import ArrayMixin from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex -from d123.geometry.utils.rotation_utils import get_rotation_matrix_from_euler_array +from d123.geometry.utils.rotation_utils import ( + get_euler_array_from_quaternion_array, + get_quaternion_array_from_rotation_matrix, + get_rotation_matrix_from_euler_array, + get_rotation_matrix_from_quaternion_array, +) class EulerAngles(ArrayMixin): @@ -89,6 +94,10 @@ def array(self) -> npt.NDArray[np.float64]: """ return self._array + @property + def quaternion(self) -> Quaternion: + return Quaternion.from_euler_angles(self) + @cached_property def rotation_matrix(self) -> npt.NDArray[np.float64]: """Returns the 3x3 rotation matrix representation of the Euler angles. @@ -110,7 +119,6 @@ def __hash__(self): class Quaternion(ArrayMixin): """ Represents a quaternion for 3D rotations. - NOTE: This class uses the pyquaternion library for internal computations. """ _array: npt.NDArray[np.float64] @@ -147,8 +155,7 @@ def from_rotation_matrix(cls, rotation_matrix: npt.NDArray[np.float64]) -> Quate """ assert rotation_matrix.ndim == 2 assert rotation_matrix.shape == (3, 3) - quaternion = pyquaternion.Quaternion(matrix=rotation_matrix) - return Quaternion(qw=quaternion.w, qx=quaternion.x, qy=quaternion.y, qz=quaternion.z) + return Quaternion.from_array(get_quaternion_array_from_rotation_matrix(rotation_matrix), copy=False) @classmethod def from_euler_angles(cls, euler_angles: EulerAngles) -> Quaternion: @@ -217,8 +224,7 @@ def euler_angles(self) -> EulerAngles: :return: An EulerAngles instance representing the Euler angles. """ - yaw, pitch, roll = self.pyquaternion.yaw_pitch_roll - return EulerAngles(roll=roll, pitch=pitch, yaw=yaw) + return EulerAngles.from_array(get_euler_array_from_quaternion_array(self.array), copy=False) @cached_property def rotation_matrix(self) -> npt.NDArray[np.float64]: @@ -226,7 +232,7 @@ def rotation_matrix(self) -> npt.NDArray[np.float64]: :return: A 3x3 numpy array representing the rotation matrix. """ - return self.pyquaternion.rotation_matrix + return get_rotation_matrix_from_quaternion_array(self.array) def __iter__(self): """Iterator over quaternion components.""" diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py index 1ad2396f..871ec29c 100644 --- a/d123/geometry/utils/rotation_utils.py +++ b/d123/geometry/utils/rotation_utils.py @@ -2,11 +2,27 @@ import numpy as np import numpy.typing as npt -import pyquaternion + +# import pyquaternion from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex +def batch_matmul(A: npt.NDArray[np.float64], B: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Batch matrix multiplication for arrays of matrices. + # TODO: move somewhere else + + :param A: Array of shape (..., M, N) + :param B: Array of shape (..., N, P) + :return: Array of shape (..., M, P) resulting from batch matrix multiplication of A and B. + """ + assert A.ndim >= 2 and B.ndim >= 2 + assert ( + A.shape[-1] == B.shape[-2] + ), f"Inner dimensions must match for matrix multiplication, got {A.shape} and {B.shape}" + return np.einsum("...ij,...jk->...ik", A, B) + + def normalize_angle(angle: Union[float, npt.NDArray[np.float64]]) -> Union[float, npt.NDArray[np.float64]]: """ Map a angle in range [-π, π] @@ -55,65 +71,154 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl return rotation_matrices +def get_euler_array_from_rotation_matrix(rotation_matrix: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + raise NotImplementedError + + +def get_quaternion_array_from_rotation_matrices(rotation_matrices: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert rotation_matrices.ndim == 3 + assert rotation_matrices.shape[-1] == rotation_matrices.shape[-2] == 3 + # http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/ + + # TODO: Update with: + # https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf + + N = rotation_matrices.shape[0] + quaternions = np.zeros((N, 4), dtype=np.float64) + + # Extract rotation matrix elements for vectorized operations + R = rotation_matrices + + # Compute trace for each matrix + trace = np.trace(R, axis1=1, axis2=2) + + # Case 1: trace > 0 (most common case) + mask1 = trace > 0 + s1 = np.sqrt(trace[mask1] + 1.0) * 2 # s = 4 * qw + quaternions[mask1, QuaternionIndex.QW] = 0.25 * s1 + quaternions[mask1, QuaternionIndex.QX] = (R[mask1, 2, 1] - R[mask1, 1, 2]) / s1 + quaternions[mask1, QuaternionIndex.QY] = (R[mask1, 0, 2] - R[mask1, 2, 0]) / s1 + quaternions[mask1, QuaternionIndex.QZ] = (R[mask1, 1, 0] - R[mask1, 0, 1]) / s1 + + # Case 2: R[0,0] > R[1,1] and R[0,0] > R[2,2] + mask2 = (~mask1) & (R[:, 0, 0] > R[:, 1, 1]) & (R[:, 0, 0] > R[:, 2, 2]) + s2 = np.sqrt(1.0 + R[mask2, 0, 0] - R[mask2, 1, 1] - R[mask2, 2, 2]) * 2 # s = 4 * qx + quaternions[mask2, QuaternionIndex.QW] = (R[mask2, 2, 1] - R[mask2, 1, 2]) / s2 + quaternions[mask2, QuaternionIndex.QX] = 0.25 * s2 # x + quaternions[mask2, QuaternionIndex.QY] = (R[mask2, 0, 1] + R[mask2, 1, 0]) / s2 + quaternions[mask2, QuaternionIndex.QZ] = (R[mask2, 0, 2] + R[mask2, 2, 0]) / s2 + + # Case 3: R[1,1] > R[2,2] + mask3 = (~mask1) & (~mask2) & (R[:, 1, 1] > R[:, 2, 2]) + s3 = np.sqrt(1.0 + R[mask3, 1, 1] - R[mask3, 0, 0] - R[mask3, 2, 2]) * 2 # s = 4 * qy + quaternions[mask3, QuaternionIndex.QW] = (R[mask3, 0, 2] - R[mask3, 2, 0]) / s3 + quaternions[mask3, QuaternionIndex.QX] = (R[mask3, 0, 1] + R[mask3, 1, 0]) / s3 + quaternions[mask3, QuaternionIndex.QY] = 0.25 * s3 # y + quaternions[mask3, QuaternionIndex.QZ] = (R[mask3, 1, 2] + R[mask3, 2, 1]) / s3 + + # Case 4: R[2,2] is largest + mask4 = (~mask1) & (~mask2) & (~mask3) + s4 = np.sqrt(1.0 + R[mask4, 2, 2] - R[mask4, 0, 0] - R[mask4, 1, 1]) * 2 # s = 4 * qz + quaternions[mask4, QuaternionIndex.QW] = (R[mask4, 1, 0] - R[mask4, 0, 1]) / s4 + quaternions[mask4, QuaternionIndex.QX] = (R[mask4, 0, 2] + R[mask4, 2, 0]) / s4 + quaternions[mask4, QuaternionIndex.QY] = (R[mask4, 1, 2] + R[mask4, 2, 1]) / s4 + quaternions[mask4, QuaternionIndex.QZ] = 0.25 * s4 # z + + assert np.all(mask1 | mask2 | mask3 | mask4), "All matrices should fall into one of the four cases." + + return normalize_quaternion_array(quaternions) + + +def get_quaternion_array_from_rotation_matrix(rotation_matrix: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert rotation_matrix.ndim == 2 and rotation_matrix.shape == (3, 3) + return get_quaternion_array_from_rotation_matrices(rotation_matrix[None, ...])[0] + + def get_rotation_matrix_from_euler_array(euler_angles: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: assert euler_angles.ndim == 1 and euler_angles.shape[0] == len(EulerAnglesIndex) return get_rotation_matrices_from_euler_array(euler_angles[None, ...])[0] def get_rotation_matrices_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - assert quaternion_array.ndim == 2 and quaternion_array.shape[1] == len(QuaternionIndex) - # TODO: Optimize this function to avoid the for loop, possibly by using pyquaternion's internal methods directly. - rotation_matrices = np.zeros((quaternion_array.shape[0], 3, 3), dtype=np.float64) - for i, quaternion in enumerate(quaternion_array): - rotation_matrices[i] = pyquaternion.Quaternion(array=quaternion).rotation_matrix - return rotation_matrices + assert quaternion_array.ndim == 2 and quaternion_array.shape[-1] == len(QuaternionIndex) + norm_quaternion = normalize_quaternion_array(quaternion_array) + Q_matrices = get_q_matrices(norm_quaternion) + Q_bar_matrices = get_q_bar_matrices(norm_quaternion) + rotation_matrix = batch_matmul(Q_matrices, Q_bar_matrices.conj().swapaxes(-1, -2)) + return rotation_matrix[:, 1:][:, :, 1:] + + +def get_rotation_matrix_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert quaternion_array.ndim == 1 and quaternion_array.shape[0] == len(QuaternionIndex) + return get_rotation_matrices_from_quaternion_array(quaternion_array[None, :])[0] + + +def get_euler_array_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert quaternion_array.ndim >= 1 and quaternion_array.shape[-1] == len(QuaternionIndex) + norm_quaternion = normalize_quaternion_array(quaternion_array) + QW, QX, QY, QZ = ( + norm_quaternion[..., QuaternionIndex.QW], + norm_quaternion[..., QuaternionIndex.QX], + norm_quaternion[..., QuaternionIndex.QY], + norm_quaternion[..., QuaternionIndex.QZ], + ) + euler_angles = np.zeros_like(quaternion_array[..., :3]) + euler_angles[..., EulerAnglesIndex.YAW] = np.arctan2( + 2 * (QW * QZ - QX * QY), + 1 - 2 * (QY**2 + QZ**2), + ) + euler_angles[..., EulerAnglesIndex.PITCH] = np.arcsin( + np.clip(2 * (QW * QY + QZ * QX), -1.0, 1.0), + ) + euler_angles[..., EulerAnglesIndex.ROLL] = np.arctan2( + 2 * (QW * QX - QY * QZ), + 1 - 2 * (QX**2 + QY**2), + ) -def get_rotation_matrix_from_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - assert quaternion.ndim == 1 and quaternion.shape[0] == len(QuaternionIndex) - return get_rotation_matrices_from_quaternion_array(quaternion[None, :])[0] + return euler_angles -def conjugate_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: +def conjugate_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """Computes the conjugate of an array of quaternions. in the order [qw, qx, qy, qz]. :param quaternion: Array of quaternions. :return: Array of conjugated quaternions. """ - assert quaternion.ndim >= 1 - assert quaternion.shape[-1] == len(QuaternionIndex) - conjugated_quaternions = np.zeros_like(quaternion) - conjugated_quaternions[..., QuaternionIndex.SCALAR] = quaternion[..., QuaternionIndex.SCALAR] - conjugated_quaternions[..., QuaternionIndex.VECTOR] = -quaternion[..., QuaternionIndex.VECTOR] + assert quaternion_array.ndim >= 1 + assert quaternion_array.shape[-1] == len(QuaternionIndex) + conjugated_quaternions = np.zeros_like(quaternion_array) + conjugated_quaternions[..., QuaternionIndex.SCALAR] = quaternion_array[..., QuaternionIndex.SCALAR] + conjugated_quaternions[..., QuaternionIndex.VECTOR] = -quaternion_array[..., QuaternionIndex.VECTOR] return conjugated_quaternions -def invert_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: +def invert_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """Computes the inverse of an array of quaternions. in the order [qw, qx, qy, qz]. :param quaternion: Array of quaternions. :return: Array of inverted quaternions. """ - assert quaternion.ndim >= 1 - assert quaternion.shape[-1] == len(QuaternionIndex) - norm_squared = np.sum(quaternion**2, axis=-1, keepdims=True) + assert quaternion_array.ndim >= 1 + assert quaternion_array.shape[-1] == len(QuaternionIndex) + norm_squared = np.sum(quaternion_array**2, axis=-1, keepdims=True) assert np.all(norm_squared > 0), "Cannot invert a quaternion with zero norm." - conjugated_quaternions = conjugate_quaternion_array(quaternion) + conjugated_quaternions = conjugate_quaternion_array(quaternion_array) inverted_quaternions = conjugated_quaternions / norm_squared return inverted_quaternions -def normalize_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: +def normalize_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """Normalizes an array of quaternions. in the order [qw, qx, qy, qz]. :param quaternion: Array of quaternions. :return: Array of normalized quaternions. """ - assert quaternion.ndim >= 1 - assert quaternion.shape[-1] == len(QuaternionIndex) - norm = np.linalg.norm(quaternion, axis=-1, keepdims=True) + assert quaternion_array.ndim >= 1 + assert quaternion_array.shape[-1] == len(QuaternionIndex) + norm = np.linalg.norm(quaternion_array, axis=-1, keepdims=True) assert np.all(norm > 0), "Cannot normalize a quaternion with zero norm." - normalized_quaternions = quaternion / norm + normalized_quaternions = quaternion_array / norm return normalized_quaternions @@ -152,3 +257,83 @@ def multiply_quaternion_arrays(q1: npt.NDArray[np.float64], q2: npt.NDArray[np.f axis=-1, ) return quaternions + + +def get_q_matrices(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Computes the Q matrices for an array of quaternions. + in the order [qw, qx, qy, qz]. + :param quaternion: Array of quaternions. + :return: Array of Q matrices. + """ + assert quaternion_array.ndim >= 1 + assert quaternion_array.shape[-1] == len(QuaternionIndex) + + qw = quaternion_array[..., QuaternionIndex.QW] + qx = quaternion_array[..., QuaternionIndex.QX] + qy = quaternion_array[..., QuaternionIndex.QY] + qz = quaternion_array[..., QuaternionIndex.QZ] + + batch_shape = quaternion_array.shape[:-1] + Q_matrices = np.zeros(batch_shape + (4, 4), dtype=np.float64) + + Q_matrices[..., 0, 0] = qw + Q_matrices[..., 0, 1] = -qx + Q_matrices[..., 0, 2] = -qy + Q_matrices[..., 0, 3] = -qz + + Q_matrices[..., 1, 0] = qx + Q_matrices[..., 1, 1] = qw + Q_matrices[..., 1, 2] = -qz + Q_matrices[..., 1, 3] = qy + + Q_matrices[..., 2, 0] = qy + Q_matrices[..., 2, 1] = qz + Q_matrices[..., 2, 2] = qw + Q_matrices[..., 2, 3] = -qx + + Q_matrices[..., 3, 0] = qz + Q_matrices[..., 3, 1] = -qy + Q_matrices[..., 3, 2] = qx + Q_matrices[..., 3, 3] = qw + + return Q_matrices + + +def get_q_bar_matrices(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Computes the Q-bar matrices for an array of quaternions. + in the order [qw, qx, qy, qz]. + :param quaternion: Array of quaternions. + :return: Array of Q-bar matrices. + """ + assert quaternion_array.ndim >= 1 + assert quaternion_array.shape[-1] == len(QuaternionIndex) + + qw = quaternion_array[..., QuaternionIndex.QW] + qx = quaternion_array[..., QuaternionIndex.QX] + qy = quaternion_array[..., QuaternionIndex.QY] + qz = quaternion_array[..., QuaternionIndex.QZ] + + batch_shape = quaternion_array.shape[:-1] + Q_bar_matrices = np.zeros(batch_shape + (4, 4), dtype=np.float64) + + Q_bar_matrices[..., 0, 0] = qw + Q_bar_matrices[..., 0, 1] = -qx + Q_bar_matrices[..., 0, 2] = -qy + Q_bar_matrices[..., 0, 3] = -qz + + Q_bar_matrices[..., 1, 0] = qx + Q_bar_matrices[..., 1, 1] = qw + Q_bar_matrices[..., 1, 2] = qz + Q_bar_matrices[..., 1, 3] = -qy + + Q_bar_matrices[..., 2, 0] = qy + Q_bar_matrices[..., 2, 1] = -qz + Q_bar_matrices[..., 2, 2] = qw + Q_bar_matrices[..., 2, 3] = qx + + Q_bar_matrices[..., 3, 0] = qz + Q_bar_matrices[..., 3, 1] = qy + Q_bar_matrices[..., 3, 2] = -qx + Q_bar_matrices[..., 3, 3] = qw + + return Q_bar_matrices From af72fbf11c7c59c670725b63fd463ad0c6d6aebc Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 22 Sep 2025 11:42:33 +0200 Subject: [PATCH 038/145] Renaming to transition se3 to quaternion representation in `geometry` (#43), added tests (#44) --- d123/common/datatypes/detection/detection.py | 6 +- .../datatypes/vehicle_state/ego_state.py | 16 +- .../vehicle_state/vehicle_parameters.py | 12 +- .../common/visualization/matplotlib/camera.py | 4 +- d123/common/visualization/matplotlib/utils.py | 4 +- d123/common/visualization/viser/utils.py | 6 +- d123/common/visualization/viser/utils_v2.py | 4 +- .../av2/av2_data_converter.py | 8 +- .../nuplan/nuplan_data_converter.py | 6 +- .../wopd/wopd_data_converter.py | 12 +- d123/geometry/__init__.py | 4 +- d123/geometry/bounding_box.py | 10 +- d123/geometry/geometry_index.py | 4 +- d123/geometry/se.py | 326 +++--- d123/geometry/test/test_bounding_box.py | 6 +- d123/geometry/test/test_transform.py | 976 ------------------ d123/geometry/transform/__init__.py | 15 - d123/geometry/transform/test/__init__.py | 0 .../test/test_transform_consistency.py | 475 +++++++++ .../test/test_transform_euler_se3.py | 335 ++++++ .../transform/test/test_transform_se2.py | 220 ++++ .../test/test_transform_se3.py} | 129 ++- .../geometry/transform/transform_euler_se3.py | 169 +++ .../transform/transform_quaternion_se3.py | 213 ---- d123/geometry/transform/transform_se3.py | 240 +++-- 25 files changed, 1609 insertions(+), 1591 deletions(-) delete mode 100644 d123/geometry/test/test_transform.py create mode 100644 d123/geometry/transform/test/__init__.py create mode 100644 d123/geometry/transform/test/test_transform_consistency.py create mode 100644 d123/geometry/transform/test/test_transform_euler_se3.py create mode 100644 d123/geometry/transform/test/test_transform_se2.py rename d123/geometry/{test/test_transform_quaternion.py => transform/test/test_transform_se3.py} (51%) create mode 100644 d123/geometry/transform/transform_euler_se3.py delete mode 100644 d123/geometry/transform/transform_quaternion_se3.py diff --git a/d123/common/datatypes/detection/detection.py b/d123/common/datatypes/detection/detection.py index 075129b4..ff18a561 100644 --- a/d123/common/datatypes/detection/detection.py +++ b/d123/common/datatypes/detection/detection.py @@ -7,7 +7,7 @@ from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.time.time_point import TimePoint from d123.common.utils.enums import SerialIntEnum -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, StateSE3, Vector2D, Vector3D +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, EulerStateSE3, Vector2D, Vector3D @dataclass @@ -51,11 +51,11 @@ def shapely_polygon(self) -> shapely.geometry.Polygon: return self.bounding_box_se3.shapely_polygon @property - def center(self) -> StateSE3: + def center(self) -> EulerStateSE3: return self.bounding_box_se3.center @property - def center_se3(self) -> StateSE3: + def center_se3(self) -> EulerStateSE3: return self.bounding_box_se3.center_se3 @property diff --git a/d123/common/datatypes/vehicle_state/ego_state.py b/d123/common/datatypes/vehicle_state/ego_state.py index d0487e60..1d378fcd 100644 --- a/d123/common/datatypes/vehicle_state/ego_state.py +++ b/d123/common/datatypes/vehicle_state/ego_state.py @@ -23,7 +23,7 @@ rear_axle_se3_to_center_se3, ) from d123.common.utils.enums import classproperty -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, StateSE3, Vector2D, Vector3D +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, EulerStateSE3, Vector2D, Vector3D # TODO: Find an appropriate way to handle SE2 and SE3 states. @@ -59,7 +59,7 @@ def DYNAMIC_VEHICLE_STATE(cls) -> slice: @dataclass class EgoStateSE3: - center_se3: StateSE3 + center_se3: EulerStateSE3 dynamic_state_se3: DynamicStateSE3 vehicle_parameters: VehicleParameters timepoint: Optional[TimePoint] = None @@ -72,14 +72,14 @@ def from_array( vehicle_parameters: VehicleParameters, timepoint: Optional[TimePoint] = None, ) -> EgoStateSE3: - state_se3 = StateSE3.from_array(array[EgoStateSE3Index.SE3]) + state_se3 = EulerStateSE3.from_array(array[EgoStateSE3Index.SE3]) dynamic_state = DynamicStateSE3.from_array(array[EgoStateSE3Index.DYNAMIC_VEHICLE_STATE]) return EgoStateSE3(state_se3, dynamic_state, vehicle_parameters, timepoint) @classmethod def from_rear_axle( cls, - rear_axle_se3: StateSE3, + rear_axle_se3: EulerStateSE3, dynamic_state_se3: DynamicStateSE3, vehicle_parameters: VehicleParameters, time_point: TimePoint, @@ -100,7 +100,7 @@ def array(self) -> npt.NDArray[np.float64]: Convert the EgoVehicleState to an array. :return: An array containing the bounding box and dynamic state information. """ - assert isinstance(self.center_se3, StateSE3) + assert isinstance(self.center_se3, EulerStateSE3) assert isinstance(self.dynamic_state_se3, DynamicStateSE3) center_array = self.center_se3.array @@ -109,11 +109,11 @@ def array(self) -> npt.NDArray[np.float64]: return np.concatenate((center_array, dynamic_array), axis=0) @property - def center(self) -> StateSE3: + def center(self) -> EulerStateSE3: return self.center_se3 @property - def rear_axle_se3(self) -> StateSE3: + def rear_axle_se3(self) -> EulerStateSE3: return center_se3_to_rear_axle_se3(center_se3=self.center_se3, vehicle_parameters=self.vehicle_parameters) @property @@ -121,7 +121,7 @@ def rear_axle_se2(self) -> StateSE2: return self.rear_axle_se3.state_se2 @property - def rear_axle(self) -> StateSE3: + def rear_axle(self) -> EulerStateSE3: return self.rear_axle_se3 @cached_property diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py index 152b9382..4698206b 100644 --- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py @@ -1,8 +1,8 @@ from dataclasses import dataclass -from d123.geometry import StateSE2, StateSE3, Vector2D, Vector3D +from d123.geometry import StateSE2, EulerStateSE3, Vector2D, Vector3D from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame -from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame +from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame # TODO: Add more vehicle parameters, potentially extend the parameters. @@ -76,14 +76,14 @@ def get_av2_ford_fusion_hybrid_parameters() -> VehicleParameters: ) -def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3: +def center_se3_to_rear_axle_se3(center_se3: EulerStateSE3, vehicle_parameters: VehicleParameters) -> EulerStateSE3: """ Converts a center state to a rear axle state. :param center_se3: The center state. :param vehicle_parameters: The vehicle parameters. :return: The rear axle state. """ - return translate_se3_along_body_frame( + return translate_euler_se3_along_body_frame( center_se3, Vector3D( -vehicle_parameters.rear_axle_to_center_longitudinal, @@ -93,14 +93,14 @@ def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: Vehicl ) -def rear_axle_se3_to_center_se3(rear_axle_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3: +def rear_axle_se3_to_center_se3(rear_axle_se3: EulerStateSE3, vehicle_parameters: VehicleParameters) -> EulerStateSE3: """ Converts a rear axle state to a center state. :param rear_axle_se3: The rear axle state. :param vehicle_parameters: The vehicle parameters. :return: The center state. """ - return translate_se3_along_body_frame( + return translate_euler_se3_along_body_frame( rear_axle_se3, Vector3D( vehicle_parameters.rear_axle_to_center_longitudinal, diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py index 071a5284..6bdee5f6 100644 --- a/d123/common/visualization/matplotlib/camera.py +++ b/d123/common/visualization/matplotlib/camera.py @@ -16,7 +16,7 @@ from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.common.visualization.color.default import BOX_DETECTION_CONFIG from d123.geometry import BoundingBoxSE3Index, Corners3DIndex -from d123.geometry.transform.transform_se3 import convert_absolute_to_relative_se3_array +from d123.geometry.transform.transform_euler_se3 import convert_absolute_to_relative_euler_se3_array # from navsim.common.dataclasses import Annotations, Camera, Lidar # from navsim.common.enums import BoundingBoxIndex, LidarIndex @@ -98,7 +98,7 @@ def add_box_detections_to_camera_ax( box_detection_array[idx] = box_detection.bounding_box_se3.array # FIXME - box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] = convert_absolute_to_relative_se3_array( + box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] = convert_absolute_to_relative_euler_se3_array( ego_state_se3.rear_axle_se3, box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] ) # box_detection_array[..., BoundingBoxSE3Index.XYZ] -= ego_state_se3.rear_axle_se3.point_3d.array diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py index 9e030b80..34cc2819 100644 --- a/d123/common/visualization/matplotlib/utils.py +++ b/d123/common/visualization/matplotlib/utils.py @@ -9,7 +9,7 @@ from matplotlib.path import Path from d123.common.visualization.color.config import PlotConfig -from d123.geometry import StateSE2, StateSE3 +from d123.geometry import StateSE2, EulerStateSE3 def add_shapely_polygon_to_ax( @@ -114,7 +114,7 @@ def get_pose_triangle(size: float) -> geom.Polygon: def shapely_geometry_local_coords( - geometry: geom.base.BaseGeometry, origin: Union[StateSE2, StateSE3] + geometry: geom.base.BaseGeometry, origin: Union[StateSE2, EulerStateSE3] ) -> geom.base.BaseGeometry: """Helper for transforming shapely geometry in coord-frame""" # TODO: move somewhere else for general use diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index 16717654..18af79d3 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -15,8 +15,8 @@ from d123.dataset.maps.abstract_map import MapLayer from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, StateSE3 -from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_points_3d_array +from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, EulerStateSE3 +from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array # TODO: Refactor this file. # TODO: Add general utilities for 3D primitives and mesh support. @@ -229,7 +229,7 @@ def get_camera_values(scene: AbstractScene, camera: Camera, iteration: int) -> T rear_axle_array = rear_axle.array rear_axle_array[:3] -= initial_point_3d.array - rear_axle = StateSE3.from_array(rear_axle_array) + rear_axle = EulerStateSE3.from_array(rear_axle_array) camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py index 6a747698..54c4eaab 100644 --- a/d123/common/visualization/viser/utils_v2.py +++ b/d123/common/visualization/viser/utils_v2.py @@ -7,7 +7,7 @@ # from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D -from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame +from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame # TODO: Refactor this file. # TODO: Add general utilities for 3D primitives and mesh support. @@ -31,7 +31,7 @@ def _get_bounding_box_corners(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.fl bounding_box_extent = np.array([bounding_box.length, bounding_box.width, bounding_box.height], dtype=np.float64) for idx, vec in corner_extent_factors.items(): vector_3d = Vector3D.from_array(bounding_box_extent * vec.array) - corners[idx] = translate_se3_along_body_frame(bounding_box.center, vector_3d).point_3d.array + corners[idx] = translate_euler_se3_along_body_frame(bounding_box.center, vector_3d).point_3d.array return corners diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py index f5e5e44a..d32e18e5 100644 --- a/d123/dataset/dataset_specific/av2/av2_data_converter.py +++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py @@ -34,8 +34,8 @@ from d123.dataset.dataset_specific.av2.av2_map_conversion import convert_av2_map from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata -from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix +from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex +from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array, get_rotation_matrix from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL @@ -399,7 +399,7 @@ def _extract_box_detections( av2_detection_type = AV2SensorBoxDetectionType.deserialize(row["category"]) detections_types.append(int(AV2_TO_DETECTION_TYPE[av2_detection_type])) - detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( + detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_euler_se3_array( origin=ego_state_se3.rear_axle_se3, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3] ) @@ -428,7 +428,7 @@ def _extract_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_timestamp_ns: yaw, pitch, roll = ego_pose_quat.yaw_pitch_roll - rear_axle_pose = StateSE3( + rear_axle_pose = EulerStateSE3( x=ego_pose_dict["tx_m"], y=ego_pose_dict["ty_m"], z=ego_pose_dict["tz_m"], diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py index b7b52e0b..a87d0c31 100644 --- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py +++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py @@ -31,7 +31,7 @@ from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata -from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL check_dependencies(["nuplan", "sqlalchemy"], "nuplan") @@ -366,7 +366,7 @@ def _extract_detections(lidar_pc: LidarPc) -> Tuple[List[List[float]], List[List for lidar_box in lidar_pc.lidar_boxes: lidar_box: LidarBox - center = StateSE3( + center = EulerStateSE3( x=lidar_box.x, y=lidar_box.y, z=lidar_box.z, @@ -390,7 +390,7 @@ def _extract_ego_state(lidar_pc: LidarPc) -> List[float]: vehicle_parameters = get_nuplan_chrysler_pacifica_parameters() # vehicle_parameters = get_pacifica_parameters() - rear_axle_pose = StateSE3( + rear_axle_pose = EulerStateSE3( x=lidar_pc.ego_pose.x, y=lidar_pc.ego_pose.y, z=lidar_pc.ego_pose.z, diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py index 3e577a04..1fd26b6b 100644 --- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py +++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py @@ -26,8 +26,8 @@ from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection from d123.dataset.logs.log_metadata import LogMetadata -from d123.geometry import BoundingBoxSE3Index, Point3D, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array +from d123.geometry import BoundingBoxSE3Index, Point3D, EulerStateSE3, Vector3D, Vector3DIndex +from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") @@ -389,12 +389,12 @@ def _write_recording_table( write_arrow_table(recording_table, log_file_path) -def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> StateSE3: +def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> EulerStateSE3: ego_pose_matrix = np.array(frame.pose.transform).reshape(4, 4) yaw, pitch, roll = Quaternion(matrix=ego_pose_matrix[:3, :3]).yaw_pitch_roll ego_point_3d = Point3D.from_array(ego_pose_matrix[:3, 3]) - return StateSE3(x=ego_point_3d.x, y=ego_point_3d.y, z=ego_point_3d.z, roll=roll, pitch=pitch, yaw=yaw) + return EulerStateSE3(x=ego_point_3d.x, y=ego_point_3d.y, z=ego_point_3d.z, roll=roll, pitch=pitch, yaw=yaw) def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: @@ -434,7 +434,7 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li detections_token.append(str(detection.id)) detections_types.append(int(WOPD_DETECTION_NAME_DICT[detection.type])) - detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( + detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_euler_se3_array( origin=ego_rear_axle, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3] ) if DETECTION_ROLL_PITCH == "ego": @@ -490,7 +490,7 @@ def _extract_camera( transform = np.array(calibration.extrinsic.transform).reshape(4, 4) # FIXME: This is an ugly hack to convert to uniform camera convention. - flip_camera = StateSE3( + flip_camera = EulerStateSE3( x=0.0, y=0.0, z=0.0, diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py index ca210677..ec44efd5 100644 --- a/d123/geometry/__init__.py +++ b/d123/geometry/__init__.py @@ -7,12 +7,12 @@ Point2DIndex, Point3DIndex, StateSE2Index, - StateSE3Index, + EulerStateSE3Index, Vector2DIndex, Vector3DIndex, ) from d123.geometry.occupancy_map import OccupancyMap2D from d123.geometry.point import Point2D, Point3D from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 -from d123.geometry.se import StateSE2, StateSE3 +from d123.geometry.se import StateSE2, EulerStateSE3 from d123.geometry.vector import Vector2D, Vector3D diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py index 459e56bf..6ace64e7 100644 --- a/d123/geometry/bounding_box.py +++ b/d123/geometry/bounding_box.py @@ -11,7 +11,7 @@ from d123.common.utils.mixin import ArrayMixin from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex from d123.geometry.point import Point2D, Point3D -from d123.geometry.se import StateSE2, StateSE3 +from d123.geometry.se import StateSE2, EulerStateSE3 from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array, bbse3_array_to_corners_array @@ -152,7 +152,7 @@ class BoundingBoxSE3(ArrayMixin): _array: npt.NDArray[np.float64] - def __init__(self, center: StateSE3, length: float, width: float, height: float): + def __init__(self, center: EulerStateSE3, length: float, width: float, height: float): """Initialize BoundingBoxSE3 with center (StateSE3), length, width and height. :param center: Center of the bounding box as a StateSE3 instance. @@ -183,15 +183,15 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Boundi return instance @property - def center(self) -> StateSE3: + def center(self) -> EulerStateSE3: """The center of the bounding box as a StateSE3 instance. :return: The center of the bounding box as a StateSE3 instance. """ - return StateSE3.from_array(self._array[BoundingBoxSE3Index.STATE_SE3]) + return EulerStateSE3.from_array(self._array[BoundingBoxSE3Index.STATE_SE3]) @property - def center_se3(self) -> StateSE3: + def center_se3(self) -> EulerStateSE3: """The center of the bounding box as a StateSE3 instance. :return: The center of the bounding box as a StateSE3 instance. diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py index 93ff4f3a..aa0924db 100644 --- a/d123/geometry/geometry_index.py +++ b/d123/geometry/geometry_index.py @@ -104,7 +104,7 @@ def VECTOR(cls) -> slice: return slice(cls.QX, cls.QZ + 1) -class StateSE3Index(IntEnum): +class EulerStateSE3Index(IntEnum): """ Indexes array-like representations of SE3 states (x,y,z,roll,pitch,yaw). TODO: Use quaternions for rotation. @@ -130,7 +130,7 @@ def EULER_ANGLES(cls) -> slice: return slice(cls.ROLL, cls.YAW + 1) -class QuaternionSE3Index(IntEnum): +class StateSE3Index(IntEnum): """ Indexes array-like representations of SE3 states with quaternions (x,y,z,qw,qx,qy,qz). """ diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 2bc56da9..148868ea 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -8,7 +8,7 @@ from pyparsing import cached_property from d123.common.utils.mixin import ArrayMixin -from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, StateSE2Index, StateSE3Index +from d123.geometry.geometry_index import Point3DIndex, StateSE3Index, StateSE2Index, EulerStateSE3Index from d123.geometry.point import Point2D, Point3D from d123.geometry.rotation import EulerAngles, Quaternion @@ -106,32 +106,32 @@ def shapely_point(self) -> geom.Point: class StateSE3(ArrayMixin): - """ - Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw). - TODO: Use quaternions for rotation representation. + """Class representing a quaternion in SE3 space. + + TODO: Implement and replace StateSE3. """ _array: npt.NDArray[np.float64] - def __init__(self, x: float, y: float, z: float, roll: float, pitch: float, yaw: float): - """Initialize StateSE3 with x, y, z, roll, pitch, yaw coordinates.""" + def __init__(self, x: float, y: float, z: float, qw: float, qx: float, qy: float, qz: float): + """Initialize QuaternionSE3 with x, y, z, qw, qx, qy, qz coordinates.""" array = np.zeros(len(StateSE3Index), dtype=np.float64) array[StateSE3Index.X] = x array[StateSE3Index.Y] = y array[StateSE3Index.Z] = z - array[StateSE3Index.ROLL] = roll - array[StateSE3Index.PITCH] = pitch - array[StateSE3Index.YAW] = yaw + array[StateSE3Index.QW] = qw + array[StateSE3Index.QX] = qx + array[StateSE3Index.QY] = qy + array[StateSE3Index.QZ] = qz object.__setattr__(self, "_array", array) @classmethod def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateSE3: - """Constructs a StateSE3 from a numpy array. + """Constructs a QuaternionSE3 from a numpy array. - :param array: Array of shape (6,) representing the state [x, y, z, roll, pitch, yaw], indexed by \ - :class:`~d123.geometry.geometry_index.StateSE3Index`. + :param array: Array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`. :param copy: Whether to copy the input array. Defaults to True. - :return: A StateSE3 instance. + :return: A QuaternionSE3 instance. """ assert array.ndim == 1 assert array.shape[0] == len(StateSE3Index) @@ -139,31 +139,9 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateS object.__setattr__(instance, "_array", array.copy() if copy else array) return instance - @classmethod - def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3: - """Constructs a StateSE3 from a 4x4 transformation matrix. - - :param array: A 4x4 numpy array representing the transformation matrix. - :return: A StateSE3 instance. - """ - assert array.ndim == 2 - assert array.shape == (4, 4) - translation = array[:3, 3] - rotation = array[:3, :3] - roll, pitch, yaw = EulerAngles.from_rotation_matrix(rotation) - - return StateSE3( - x=translation[Point3DIndex.X], - y=translation[Point3DIndex.Y], - z=translation[Point3DIndex.Z], - roll=roll, - pitch=pitch, - yaw=yaw, - ) - @property def x(self) -> float: - """Returns the x-coordinate of the 3D state. + """Returns the x-coordinate of the quaternion. :return: The x-coordinate. """ @@ -171,7 +149,7 @@ def x(self) -> float: @property def y(self) -> float: - """Returns the y-coordinate of the 3D state. + """Returns the y-coordinate of the quaternion. :return: The y-coordinate. """ @@ -179,52 +157,61 @@ def y(self) -> float: @property def z(self) -> float: - """Returns the z-coordinate of the 3D state. + """Returns the z-coordinate of the quaternion. :return: The z-coordinate. """ return self._array[StateSE3Index.Z] @property - def roll(self) -> float: - """Returns the roll (x-axis rotation) of the 3D state. + def qw(self) -> float: + """Returns the w-coordinate of the quaternion. - :return: The roll angle. + :return: The w-coordinate. """ - return self._array[StateSE3Index.ROLL] + return self._array[StateSE3Index.QW] @property - def pitch(self) -> float: - """Returns the pitch (y-axis rotation) of the 3D state. + def qx(self) -> float: + """Returns the x-coordinate of the quaternion. - :return: The pitch angle. + :return: The x-coordinate. """ - return self._array[StateSE3Index.PITCH] + return self._array[StateSE3Index.QX] @property - def yaw(self) -> float: - """Returns the yaw (z-axis rotation) of the 3D state. + def qy(self) -> float: + """Returns the y-coordinate of the quaternion. - :return: The yaw angle. + :return: The y-coordinate. """ - return self._array[StateSE3Index.YAW] + return self._array[StateSE3Index.QY] + + @property + def qz(self) -> float: + """Returns the z-coordinate of the quaternion. + + :return: The z-coordinate. + """ + return self._array[StateSE3Index.QZ] @property def array(self) -> npt.NDArray[np.float64]: - """Returns the StateSE3 instance as a numpy array. + """Converts the QuaternionSE3 instance to a numpy array. - :return: A numpy array of shape (6,), indexed by \ - :class:`~d123.geometry.geometry_index.StateSE3Index`. + :return: A numpy array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`. """ return self._array @property def state_se2(self) -> StateSE2: - """Returns the 3D state as a 2D state by ignoring the z-axis. + """Returns the quaternion state as a 2D state by ignoring the z-axis. :return: A StateSE2 instance representing the 2D projection of the 3D state. """ - return StateSE2(self.x, self.y, self.yaw) + # Convert quaternion to yaw angle + yaw = self.quaternion.euler_angles.yaw + return StateSE2(self.x, self.y, yaw) @property def point_3d(self) -> Point3D: @@ -250,174 +237,143 @@ def shapely_point(self) -> geom.Point: """ return self.point_3d.shapely_point - @property - def rotation_matrix(self) -> npt.NDArray[np.float64]: - """Returns the 3x3 rotation matrix representation of the state's orientation. - - :return: A 3x3 numpy array representing the rotation matrix. - """ - return self.euler_angles.rotation_matrix - - @property - def transformation_matrix(self) -> npt.NDArray[np.float64]: - """Returns the 4x4 transformation matrix representation of the state. - - :return: A 4x4 numpy array representing the transformation matrix. - """ - rotation_matrix = self.rotation_matrix - transformation_matrix = np.eye(4, dtype=np.float64) - transformation_matrix[:3, :3] = rotation_matrix - transformation_matrix[:3, 3] = self.array[StateSE3Index.XYZ] - return transformation_matrix - @cached_property - def euler_angles(self) -> EulerAngles: - return EulerAngles.from_array(self.array[StateSE3Index.EULER_ANGLES]) - - @property - def quaternion_se3(self) -> QuaternionSE3: - """Returns the QuaternionSE3 representation of the state. - - :return: A QuaternionSE3 instance representing the quaternion. - """ - quaternion_se3_array = np.zeros(len(QuaternionSE3Index), dtype=np.float64) - quaternion_se3_array[QuaternionSE3Index.XYZ] = self.array[StateSE3Index.XYZ] - quaternion_se3_array[QuaternionSE3Index.QUATERNION] = Quaternion.from_euler_angles(self.euler_angles) - - return QuaternionSE3.from_array(quaternion_se3_array) - - @property - def quaternion(self) -> npt.NDArray[np.float64]: + def quaternion(self) -> Quaternion: """Returns the quaternion (w, x, y, z) representation of the state's orientation. - :return: A numpy array of shape (4,) representing the quaternion. + :return: A Quaternion instance representing the quaternion. """ - raise NotImplementedError("Quaternion conversion not implemented yet.") + return Quaternion.from_array(self.array[StateSE3Index.QUATERNION]) - def __iter__(self) -> Iterable[float]: - """Iterator over the state coordinates (x, y, z, roll, pitch, yaw).""" - return iter((self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) - - def __hash__(self) -> int: - """Hash method""" - return hash((self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) - - def __matmul__(self, other: StateSE3) -> StateSE3: - """Combines two SE3 states by applying the transformation of the other state to this state. + @property + def rotation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 3x3 rotation matrix representation of the state's orientation. - :param other: Another StateSE3 instance representing the transformation to apply. - :return: A new StateSE3 instance representing the combined transformation. + :return: A 3x3 numpy array representing the rotation matrix. """ - return StateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix) - + return self.quaternion.rotation_matrix -class QuaternionSE3(ArrayMixin): - """Class representing a quaternion in SE3 space. - TODO: Implement and replace StateSE3. +class EulerStateSE3(ArrayMixin): + """ + Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw). + TODO: Use quaternions for rotation representation. """ _array: npt.NDArray[np.float64] - def __init__(self, x: float, y: float, z: float, qw: float, qx: float, qy: float, qz: float): - """Initialize QuaternionSE3 with x, y, z, qw, qx, qy, qz coordinates.""" - array = np.zeros(len(QuaternionSE3Index), dtype=np.float64) - array[QuaternionSE3Index.X] = x - array[QuaternionSE3Index.Y] = y - array[QuaternionSE3Index.Z] = z - array[QuaternionSE3Index.QW] = qw - array[QuaternionSE3Index.QX] = qx - array[QuaternionSE3Index.QY] = qy - array[QuaternionSE3Index.QZ] = qz + def __init__(self, x: float, y: float, z: float, roll: float, pitch: float, yaw: float): + """Initialize StateSE3 with x, y, z, roll, pitch, yaw coordinates.""" + array = np.zeros(len(EulerStateSE3Index), dtype=np.float64) + array[EulerStateSE3Index.X] = x + array[EulerStateSE3Index.Y] = y + array[EulerStateSE3Index.Z] = z + array[EulerStateSE3Index.ROLL] = roll + array[EulerStateSE3Index.PITCH] = pitch + array[EulerStateSE3Index.YAW] = yaw object.__setattr__(self, "_array", array) @classmethod - def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> QuaternionSE3: - """Constructs a QuaternionSE3 from a numpy array. + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> EulerStateSE3: + """Constructs a StateSE3 from a numpy array. - :param array: Array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`. + :param array: Array of shape (6,) representing the state [x, y, z, roll, pitch, yaw], indexed by \ + :class:`~d123.geometry.geometry_index.StateSE3Index`. :param copy: Whether to copy the input array. Defaults to True. - :return: A QuaternionSE3 instance. + :return: A StateSE3 instance. """ assert array.ndim == 1 - assert array.shape[0] == len(QuaternionSE3Index) + assert array.shape[0] == len(EulerStateSE3Index) instance = object.__new__(cls) object.__setattr__(instance, "_array", array.copy() if copy else array) return instance + @classmethod + def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> EulerStateSE3: + """Constructs a StateSE3 from a 4x4 transformation matrix. + + :param array: A 4x4 numpy array representing the transformation matrix. + :return: A StateSE3 instance. + """ + assert array.ndim == 2 + assert array.shape == (4, 4) + translation = array[:3, 3] + rotation = array[:3, :3] + roll, pitch, yaw = EulerAngles.from_rotation_matrix(rotation) + + return EulerStateSE3( + x=translation[Point3DIndex.X], + y=translation[Point3DIndex.Y], + z=translation[Point3DIndex.Z], + roll=roll, + pitch=pitch, + yaw=yaw, + ) + @property def x(self) -> float: - """Returns the x-coordinate of the quaternion. + """Returns the x-coordinate of the 3D state. :return: The x-coordinate. """ - return self._array[QuaternionSE3Index.X] + return self._array[EulerStateSE3Index.X] @property def y(self) -> float: - """Returns the y-coordinate of the quaternion. + """Returns the y-coordinate of the 3D state. :return: The y-coordinate. """ - return self._array[QuaternionSE3Index.Y] + return self._array[EulerStateSE3Index.Y] @property def z(self) -> float: - """Returns the z-coordinate of the quaternion. + """Returns the z-coordinate of the 3D state. :return: The z-coordinate. """ - return self._array[QuaternionSE3Index.Z] + return self._array[EulerStateSE3Index.Z] @property - def qw(self) -> float: - """Returns the w-coordinate of the quaternion. - - :return: The w-coordinate. - """ - return self._array[QuaternionSE3Index.QW] - - @property - def qx(self) -> float: - """Returns the x-coordinate of the quaternion. + def roll(self) -> float: + """Returns the roll (x-axis rotation) of the 3D state. - :return: The x-coordinate. + :return: The roll angle. """ - return self._array[QuaternionSE3Index.QX] + return self._array[EulerStateSE3Index.ROLL] @property - def qy(self) -> float: - """Returns the y-coordinate of the quaternion. + def pitch(self) -> float: + """Returns the pitch (y-axis rotation) of the 3D state. - :return: The y-coordinate. + :return: The pitch angle. """ - return self._array[QuaternionSE3Index.QY] + return self._array[EulerStateSE3Index.PITCH] @property - def qz(self) -> float: - """Returns the z-coordinate of the quaternion. + def yaw(self) -> float: + """Returns the yaw (z-axis rotation) of the 3D state. - :return: The z-coordinate. + :return: The yaw angle. """ - return self._array[QuaternionSE3Index.QZ] + return self._array[EulerStateSE3Index.YAW] @property def array(self) -> npt.NDArray[np.float64]: - """Converts the QuaternionSE3 instance to a numpy array. + """Returns the StateSE3 instance as a numpy array. - :return: A numpy array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`. + :return: A numpy array of shape (6,), indexed by \ + :class:`~d123.geometry.geometry_index.StateSE3Index`. """ return self._array @property def state_se2(self) -> StateSE2: - """Returns the quaternion state as a 2D state by ignoring the z-axis. + """Returns the 3D state as a 2D state by ignoring the z-axis. :return: A StateSE2 instance representing the 2D projection of the 3D state. """ - # Convert quaternion to yaw angle - yaw = self.quaternion.euler_angles.yaw - return StateSE2(self.x, self.y, yaw) + return StateSE2(self.x, self.y, self.yaw) @property def point_3d(self) -> Point3D: @@ -443,18 +399,54 @@ def shapely_point(self) -> geom.Point: """ return self.point_3d.shapely_point - @cached_property - def quaternion(self) -> Quaternion: - """Returns the quaternion (w, x, y, z) representation of the state's orientation. - - :return: A Quaternion instance representing the quaternion. - """ - return Quaternion.from_array(self.array[QuaternionSE3Index.QUATERNION]) - @property def rotation_matrix(self) -> npt.NDArray[np.float64]: """Returns the 3x3 rotation matrix representation of the state's orientation. :return: A 3x3 numpy array representing the rotation matrix. """ - return self.quaternion.rotation_matrix + return self.euler_angles.rotation_matrix + + @property + def transformation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 4x4 transformation matrix representation of the state. + + :return: A 4x4 numpy array representing the transformation matrix. + """ + rotation_matrix = self.rotation_matrix + transformation_matrix = np.eye(4, dtype=np.float64) + transformation_matrix[:3, :3] = rotation_matrix + transformation_matrix[:3, 3] = self.array[EulerStateSE3Index.XYZ] + return transformation_matrix + + @cached_property + def euler_angles(self) -> EulerAngles: + return EulerAngles.from_array(self.array[EulerStateSE3Index.EULER_ANGLES]) + + @property + def quaternion_se3(self) -> StateSE3: + quaternion_se3_array = np.zeros(len(StateSE3Index), dtype=np.float64) + quaternion_se3_array[StateSE3Index.XYZ] = self.array[EulerStateSE3Index.XYZ] + quaternion_se3_array[StateSE3Index.QUATERNION] = Quaternion.from_euler_angles(self.euler_angles) + + return StateSE3.from_array(quaternion_se3_array) + + @property + def quaternion(self) -> Quaternion: + return Quaternion.from_euler_angles(self.euler_angles) + + def __iter__(self) -> Iterable[float]: + """Iterator over the state coordinates (x, y, z, roll, pitch, yaw).""" + return iter((self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) + + def __hash__(self) -> int: + """Hash method""" + return hash((self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) + + def __matmul__(self, other: EulerStateSE3) -> EulerStateSE3: + """Combines two SE3 states by applying the transformation of the other state to this state. + + :param other: Another StateSE3 instance representing the transformation to apply. + :return: A new StateSE3 instance representing the combined transformation. + """ + return EulerStateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix) diff --git a/d123/geometry/test/test_bounding_box.py b/d123/geometry/test/test_bounding_box.py index 545c511c..06522d7a 100644 --- a/d123/geometry/test/test_bounding_box.py +++ b/d123/geometry/test/test_bounding_box.py @@ -4,7 +4,7 @@ import shapely.geometry as geom from d123.common.utils.mixin import ArrayMixin -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, StateSE3 +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, EulerStateSE3 from d123.geometry.geometry_index import ( BoundingBoxSE2Index, BoundingBoxSE3Index, @@ -109,7 +109,7 @@ class TestBoundingBoxSE3(unittest.TestCase): def setUp(self): """Set up test fixtures.""" - self.center = StateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3) + self.center = EulerStateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3) self.length = 4.0 self.width = 2.0 self.height = 1.5 @@ -204,7 +204,7 @@ def test_array_assertions(self): def test_zero_dimensions(self): """Test bounding box with zero dimensions.""" - center = StateSE3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + center = EulerStateSE3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) bbox = BoundingBoxSE3(center, 0.0, 0.0, 0.0) self.assertEqual(bbox.length, 0.0) self.assertEqual(bbox.width, 0.0) diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py deleted file mode 100644 index 8c033803..00000000 --- a/d123/geometry/test/test_transform.py +++ /dev/null @@ -1,976 +0,0 @@ -import unittest - -import numpy as np -import numpy.typing as npt - -from d123.geometry.geometry_index import EulerAnglesIndex, Point2DIndex, Point3DIndex, StateSE2Index, StateSE3Index -from d123.geometry.se import StateSE2, StateSE3 -from d123.geometry.transform.transform_se2 import ( - convert_absolute_to_relative_point_2d_array, - convert_absolute_to_relative_se2_array, - convert_relative_to_absolute_point_2d_array, - convert_relative_to_absolute_se2_array, - translate_se2_along_body_frame, - translate_se2_along_x, - translate_se2_along_y, - translate_se2_array_along_body_frame, -) -from d123.geometry.transform.transform_se3 import ( - convert_absolute_to_relative_points_3d_array, - convert_absolute_to_relative_se3_array, - convert_absolute_to_relative_se3_array, - convert_relative_to_absolute_points_3d_array, - convert_relative_to_absolute_se3_array, - translate_se3_along_body_frame, - translate_se3_along_x, - translate_se3_along_y, - translate_se3_along_z, -) -from d123.geometry.vector import Vector2D, Vector3D - - -class TestTransformSE2(unittest.TestCase): - - def setUp(self): - self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal - - def test_translate_se2_along_x(self) -> None: - """Tests translating a SE2 state along the X-axis.""" - pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) - distance: float = 1.0 - result: StateSE2 = translate_se2_along_x(pose, distance) - expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_along_x_negative(self) -> None: - """Tests translating a SE2 state along the X-axis in the negative direction.""" - pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64)) - distance: float = -0.5 - result: StateSE2 = translate_se2_along_x(pose, distance) - expected: StateSE2 = StateSE2.from_array(np.array([0.5, 2.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_along_x_with_rotation(self) -> None: - """Tests translating a SE2 state along the X-axis with 90 degree rotation.""" - pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) - distance: float = 1.0 - result: StateSE2 = translate_se2_along_x(pose, distance) - expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_along_y(self) -> None: - """Tests translating a SE2 state along the Y-axis.""" - pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) - distance: float = 1.0 - result: StateSE2 = translate_se2_along_y(pose, distance) - expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_along_y_negative(self) -> None: - """Tests translating a SE2 state along the Y-axis in the negative direction.""" - pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64)) - distance: float = -1.5 - result: StateSE2 = translate_se2_along_y(pose, distance) - expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.5, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_along_y_with_rotation(self) -> None: - """Tests translating a SE2 state along the Y-axis with -90 degree rotation.""" - pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, -np.pi / 2], dtype=np.float64)) - distance: float = 2.0 - result: StateSE2 = translate_se2_along_y(pose, distance) - expected: StateSE2 = StateSE2.from_array(np.array([2.0, 0.0, -np.pi / 2], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_along_body_frame_forward(self) -> None: - """Tests translating a SE2 state along the body frame forward direction, with 90 degree rotation.""" - # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction) - pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) - vector: Vector2D = Vector2D(1.0, 0.0) - result: StateSE2 = translate_se2_along_body_frame(pose, vector) - expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_along_body_frame_backward(self) -> None: - """Tests translating a SE2 state along the body frame backward direction.""" - pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) - vector: Vector2D = Vector2D(-1.0, 0.0) - result: StateSE2 = translate_se2_along_body_frame(pose, vector) - expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_along_body_frame_diagonal(self) -> None: - """Tests translating a SE2 state along the body frame diagonal direction.""" - pose: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.deg2rad(45)], dtype=np.float64)) - vector: Vector2D = Vector2D(1.0, 0.0) - result: StateSE2 = translate_se2_along_body_frame(pose, vector) - expected: StateSE2 = StateSE2.from_array( - np.array([1.0 + np.sqrt(2.0) / 2, 0.0 + np.sqrt(2.0) / 2, np.deg2rad(45)], dtype=np.float64) - ) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_along_body_frame_lateral(self) -> None: - """Tests translating a SE2 state along the body frame lateral direction.""" - # Move 1 unit to the right (positive y in body frame) - pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) - vector: Vector2D = Vector2D(0.0, 1.0) - result: StateSE2 = translate_se2_along_body_frame(pose, vector) - expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_along_body_frame_lateral_with_rotation(self) -> None: - """Tests translating a SE2 state along the body frame lateral direction with 90 degree rotation.""" - # Move 1 unit to the right when facing 90 degrees - pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) - vector: Vector2D = Vector2D(0.0, 1.0) - result: StateSE2 = translate_se2_along_body_frame(pose, vector) - expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, np.pi / 2], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se2_array_along_body_frame_single_distance(self) -> None: - """Tests translating a SE2 state array along the body frame forward direction.""" - poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) - distance: Vector2D = Vector2D(1.0, 0.0) - result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) - expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_translate_se2_array_along_body_frame_multiple_distances(self) -> None: - """Tests translating a SE2 state array along the body frame forward direction with different distances.""" - poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi]], dtype=np.float64) - distance: Vector2D = Vector2D(2.0, 0.0) - result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) - expected: npt.NDArray[np.float64] = np.array([[2.0, 0.0, 0.0], [-2.0, 0.0, np.pi]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_translate_se2_array_along_body_frame_lateral(self) -> None: - """Tests translating a SE2 state array along the body frame lateral direction with 90 degree rotation.""" - poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) - distance: Vector2D = Vector2D(0.0, 1.0) - result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) - expected: npt.NDArray[np.float64] = np.array([[0.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_absolute_to_relative_se2_array(self) -> None: - """Tests converting absolute SE2 poses to relative SE2 poses.""" - origin: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) - absolute_poses: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(origin, absolute_poses) - expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_absolute_to_relative_se2_array_with_rotation(self) -> None: - """Tests converting absolute SE2 poses to relative SE2 poses with 90 degree rotation.""" - reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) - absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, np.pi / 2]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses) - expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_absolute_to_relative_se2_array_identity(self) -> None: - """Tests converting absolute SE2 poses to relative SE2 poses with identity transformation.""" - reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) - absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses) - expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_relative_to_absolute_se2_array(self) -> None: - """Tests converting relative SE2 poses to absolute SE2 poses.""" - reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) - relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses) - expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_relative_to_absolute_se2_array_with_rotation(self) -> None: - """Tests converting relative SE2 poses to absolute SE2 poses with rotation.""" - reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64)) - relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses) - expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_absolute_to_relative_point_2d_array(self) -> None: - """Tests converting absolute 2D points to relative 2D points.""" - reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) - absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) - expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_absolute_to_relative_point_2d_array_with_rotation(self) -> None: - """Tests converting absolute 2D points to relative 2D points with 90 degree rotation.""" - reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) - absolute_points: npt.NDArray[np.float64] = np.array([[0.0, 1.0], [1.0, 0.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) - expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, -1.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_absolute_to_relative_point_2d_array_empty(self) -> None: - """Tests converting an empty array of absolute 2D points to relative 2D points.""" - reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) - absolute_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) - expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_relative_to_absolute_point_2d_array(self) -> None: - """Tests converting relative 2D points to absolute 2D points.""" - reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) - relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points) - expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_relative_to_absolute_point_2d_array_with_rotation(self) -> None: - """Tests converting relative 2D points to absolute 2D points with 90 degree rotation.""" - reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64)) - relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, 1.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points) - expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [0.0, 0.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - -class TestTransformSE3(unittest.TestCase): - - def setUp(self): - self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal - self.num_consistency_tests = 10 # Number of random test cases for consistency checks - - def test_translate_se3_along_x(self) -> None: - """Tests translating a SE3 state along the body frame forward direction.""" - pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) - distance: float = 1.0 - result: StateSE3 = translate_se3_along_x(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_x_negative(self) -> None: - """Tests translating a SE3 state along the body frame backward direction.""" - pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) - distance: float = -0.5 - result: StateSE3 = translate_se3_along_x(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([0.5, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_x_with_rotation(self) -> None: - """Tests translating a SE3 state along the body frame forward direction with yaw rotation.""" - pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) - distance: float = 2.5 - result: StateSE3 = translate_se3_along_x(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([0.0, 2.5, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_y(self) -> None: - """Tests translating a SE3 state along the body frame lateral direction.""" - pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) - distance: float = 1.0 - result: StateSE3 = translate_se3_along_y(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_y_with_existing_position(self) -> None: - """Tests translating a SE3 state along the body frame lateral direction with existing position.""" - pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) - distance: float = 2.5 - result: StateSE3 = translate_se3_along_y(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([1.0, 4.5, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_y_negative(self) -> None: - """Tests translating a SE3 state along the body frame lateral direction in the negative direction.""" - pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) - distance: float = -1.0 - result: StateSE3 = translate_se3_along_y(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_y_with_rotation(self) -> None: - """Tests translating a SE3 state along the body frame lateral direction with roll rotation.""" - pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, np.pi / 2, 0.0, 0.0], dtype=np.float64)) - distance: float = -1.0 - result: StateSE3 = translate_se3_along_y(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 2.0, np.pi / 2, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_z(self) -> None: - """Tests translating a SE3 state along the body frame vertical direction.""" - pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) - distance: float = 1.0 - result: StateSE3 = translate_se3_along_z(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_z_large_distance(self) -> None: - """Tests translating a SE3 state along the body frame vertical direction with a large distance.""" - pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) - distance: float = 10.0 - result: StateSE3 = translate_se3_along_z(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 15.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_z_negative(self) -> None: - """Tests translating a SE3 state along the body frame vertical direction in the negative direction.""" - pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) - distance: float = -2.0 - result: StateSE3 = translate_se3_along_z(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_z_with_rotation(self) -> None: - """Tests translating a SE3 state along the body frame vertical direction with pitch rotation.""" - pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64)) - distance: float = 2.0 - result: StateSE3 = translate_se3_along_z(pose, distance) - expected: StateSE3 = StateSE3.from_array(np.array([3.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_body_frame(self) -> None: - """Tests translating a SE3 state along the body frame forward direction.""" - pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) - translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) - result: StateSE3 = translate_se3_along_body_frame(pose, translation) - expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_body_frame_multiple_axes(self) -> None: - """Tests translating a SE3 state along the body frame in multiple axes.""" - pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) - translation: Vector3D = Vector3D.from_array(np.array([0.5, -1.0, 2.0], dtype=np.float64)) - result: StateSE3 = translate_se3_along_body_frame(pose, translation) - expected: StateSE3 = StateSE3.from_array(np.array([1.5, 1.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_body_frame_zero_translation(self) -> None: - """Tests translating a SE3 state along the body frame with zero translation.""" - pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) - translation: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) - result: StateSE3 = translate_se3_along_body_frame(pose, translation) - expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array) - - def test_translate_se3_along_body_frame_with_rotation(self) -> None: - """Tests translating a SE3 state along the body frame forward direction with yaw rotation.""" - # Rotate 90 degrees around z-axis, then translate 1 unit along body x-axis - pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) - translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) - result: StateSE3 = translate_se3_along_body_frame(pose, translation) - # Should move in +Y direction in world frame - expected: StateSE3 = StateSE3.from_array(np.array([0.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) - np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) - - def test_translate_se3_along_body_frame_consistency(self) -> None: - """Tests consistency between translate_se3_along_body_frame and axis-specific translation functions.""" - - for _ in range(self.num_consistency_tests): - # Generate random parameters - x_distance: float = np.random.uniform(-10.0, 10.0) - y_distance: float = np.random.uniform(-10.0, 10.0) - z_distance: float = np.random.uniform(-10.0, 10.0) - - start_x: float = np.random.uniform(-5.0, 5.0) - start_y: float = np.random.uniform(-5.0, 5.0) - start_z: float = np.random.uniform(-5.0, 5.0) - - start_roll: float = np.random.uniform(-np.pi, np.pi) - start_pitch: float = np.random.uniform(-np.pi, np.pi) - start_yaw: float = np.random.uniform(-np.pi, np.pi) - - original_pose: StateSE3 = StateSE3.from_array( - np.array( - [ - start_x, - start_y, - start_z, - start_roll, - start_pitch, - start_yaw, - ], - dtype=np.float64, - ) - ) - - # x-axis translation - translation_x: Vector3D = Vector3D.from_array(np.array([x_distance, 0.0, 0.0], dtype=np.float64)) - result_body_frame_x: StateSE3 = translate_se3_along_body_frame(original_pose, translation_x) - result_axis_x: StateSE3 = translate_se3_along_x(original_pose, x_distance) - np.testing.assert_array_almost_equal(result_body_frame_x.array, result_axis_x.array, decimal=self.decimal) - - # y-axis translation - translation_y: Vector3D = Vector3D.from_array(np.array([0.0, y_distance, 0.0], dtype=np.float64)) - result_body_frame_y: StateSE3 = translate_se3_along_body_frame(original_pose, translation_y) - result_axis_y: StateSE3 = translate_se3_along_y(original_pose, y_distance) - np.testing.assert_array_almost_equal(result_body_frame_y.array, result_axis_y.array, decimal=self.decimal) - - # z-axis translation - translation_z: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, z_distance], dtype=np.float64)) - result_body_frame_z: StateSE3 = translate_se3_along_body_frame(original_pose, translation_z) - result_axis_z: StateSE3 = translate_se3_along_z(original_pose, z_distance) - np.testing.assert_array_almost_equal(result_body_frame_z.array, result_axis_z.array, decimal=self.decimal) - - # all axes translation - translation_all: Vector3D = Vector3D.from_array( - np.array([x_distance, y_distance, z_distance], dtype=np.float64) - ) - result_body_frame_all: StateSE3 = translate_se3_along_body_frame(original_pose, translation_all) - intermediate_pose: StateSE3 = translate_se3_along_x(original_pose, x_distance) - intermediate_pose = translate_se3_along_y(intermediate_pose, y_distance) - result_axis_all: StateSE3 = translate_se3_along_z(intermediate_pose, z_distance) - np.testing.assert_array_almost_equal( - result_body_frame_all.array, result_axis_all.array, decimal=self.decimal - ) - - def test_convert_absolute_to_relative_se3_array(self) -> None: - """Tests converting absolute SE3 poses to relative SE3 poses.""" - reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) - absolute_poses: npt.NDArray[np.float64] = np.array( - [ - [2.0, 2.0, 2.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], - ], - dtype=np.float64, - ) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses) - expected: npt.NDArray[np.float64] = np.array( - [ - [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], - [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0], - ], - dtype=np.float64, - ) - np.testing.assert_array_almost_equal(result, expected) - - def test_convert_absolute_to_relative_se3_array_single_pose(self) -> None: - """Tests converting a single absolute SE3 pose to a relative SE3 pose.""" - reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) - absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses) - expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) - - def test_convert_absolute_to_relative_se3_array_with_rotation(self) -> None: - """Tests converting absolute SE3 poses to relative SE3 poses with 90 degree yaw rotation.""" - reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) - absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses) - expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0, 0.0, 0.0, -np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_relative_to_absolute_se3_array(self) -> None: - """Tests converting relative SE3 poses to absolute SE3 poses.""" - reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) - relative_poses: npt.NDArray[np.float64] = np.array( - [ - [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], - [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0], - ], - dtype=np.float64, - ) - result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array(reference, relative_poses) - expected: npt.NDArray[np.float64] = np.array( - [ - [2.0, 2.0, 2.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], - ], - dtype=np.float64, - ) - np.testing.assert_array_almost_equal(result, expected) - - def test_convert_relative_to_absolute_se3_array_with_rotation(self) -> None: - """Tests converting relative SE3 poses to absolute SE3 poses with 90 degree yaw rotation.""" - reference: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) - relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array(reference, relative_poses) - expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_absolute_to_relative_points_3d_array(self) -> None: - """Tests converting absolute 3D points to relative 3D points.""" - reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) - absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) - expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) - - def test_convert_absolute_to_relative_points_3d_array_origin_reference(self) -> None: - """Tests converting absolute 3D points to relative 3D points with origin reference.""" - reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) - absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) - expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) - - def test_convert_absolute_to_relative_points_3d_array_with_rotation(self) -> None: - """Tests converting absolute 3D points to relative 3D points with 90 degree yaw rotation.""" - reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) - absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) - expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 1.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - def test_convert_relative_to_absolute_points_3d_array(self) -> None: - """Tests converting relative 3D points to absolute 3D points.""" - reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) - relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) - expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected) - - def test_convert_relative_to_absolute_points_3d_array_empty(self) -> None: - """Tests converting an empty array of relative 3D points to absolute 3D points.""" - reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) - relative_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3) - result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) - expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3) - np.testing.assert_array_almost_equal(result, expected) - - def test_convert_relative_to_absolute_points_3d_array_with_rotation(self) -> None: - """Tests converting relative 3D points to absolute 3D points with 90 degree yaw rotation.""" - reference: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) - relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64) - result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) - expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float64) - np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) - - -class TestTransformConsistency(unittest.TestCase): - """Tests to ensure consistency between different transformation functions.""" - - def setUp(self): - self.decimal = 4 # Decimal places for np.testing.assert_array_almost_equal - self.num_consistency_tests = 10 # Number of random test cases for consistency checks - - self.max_pose_xyz = 100.0 - self.min_random_poses = 1 - self.max_random_poses = 20 - - def _get_random_se2_array(self, size: int) -> npt.NDArray[np.float64]: - """Generate a random SE2 pose""" - random_se2_array = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, len(StateSE2Index))) - random_se2_array[:, StateSE2Index.YAW] = np.random.uniform(-np.pi, np.pi, size) # yaw angles - return random_se2_array - - def _get_random_se3_array(self, size: int) -> npt.NDArray[np.float64]: - """Generate a random SE3 poses""" - random_se3_array = np.zeros((size, len(StateSE3Index)), dtype=np.float64) - random_se3_array[:, StateSE3Index.XYZ] = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, 3)) - random_se3_array[:, StateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size) - random_se3_array[:, StateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size) - random_se3_array[:, StateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size) - - return random_se3_array - - def test_se2_absolute_relative_conversion_consistency(self) -> None: - """Test that converting absolute->relative->absolute returns original poses""" - for _ in range(self.num_consistency_tests): - # Generate random reference pose - reference = StateSE2.from_array(self._get_random_se2_array(1)[0]) - - # Generate random absolute poses - num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) - absolute_poses = self._get_random_se2_array(num_poses) - - # Convert absolute -> relative -> absolute - relative_poses = convert_absolute_to_relative_se2_array(reference, absolute_poses) - recovered_absolute = convert_relative_to_absolute_se2_array(reference, relative_poses) - - np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal) - - def test_se2_points_absolute_relative_conversion_consistency(self) -> None: - """Test that converting absolute->relative->absolute returns original points""" - for _ in range(self.num_consistency_tests): - # Generate random reference pose - reference = StateSE2.from_array(self._get_random_se2_array(1)[0]) - - # Generate random absolute points - num_points = np.random.randint(self.min_random_poses, self.max_random_poses) - absolute_points = self._get_random_se2_array(num_points)[:, StateSE2Index.XY] - - # Convert absolute -> relative -> absolute - relative_points = convert_absolute_to_relative_point_2d_array(reference, absolute_points) - recovered_absolute = convert_relative_to_absolute_point_2d_array(reference, relative_points) - - np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal) - - def test_se2_points_consistency(self) -> None: - """Test whether SE2 point and pose conversions are consistent""" - for _ in range(self.num_consistency_tests): - # Generate random reference pose - reference = StateSE2.from_array(self._get_random_se2_array(1)[0]) - - # Generate random absolute points - num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) - absolute_se2 = self._get_random_se2_array(num_poses) - - # Convert absolute -> relative -> absolute - relative_se2 = convert_absolute_to_relative_se2_array(reference, absolute_se2) - relative_points = convert_absolute_to_relative_point_2d_array( - reference, absolute_se2[..., StateSE2Index.XY] - ) - np.testing.assert_array_almost_equal( - relative_se2[..., StateSE2Index.XY], relative_points, decimal=self.decimal - ) - - recovered_absolute_se2 = convert_relative_to_absolute_se2_array(reference, relative_se2) - absolute_points = convert_relative_to_absolute_point_2d_array(reference, relative_points) - np.testing.assert_array_almost_equal( - recovered_absolute_se2[..., StateSE2Index.XY], absolute_points, decimal=self.decimal - ) - - def test_se2_translation_consistency(self) -> None: - """Test that SE2 translations are consistent between different methods""" - for _ in range(self.num_consistency_tests): - # Generate random pose - pose = StateSE2.from_array(self._get_random_se2_array(1)[0]) - - # Generate random distances - dx = np.random.uniform(-10.0, 10.0) - dy = np.random.uniform(-10.0, 10.0) - - # Test x-translation consistency - result_x_direct = translate_se2_along_x(pose, dx) - result_x_body = translate_se2_along_body_frame(pose, Vector2D(dx, 0.0)) - np.testing.assert_array_almost_equal(result_x_direct.array, result_x_body.array, decimal=self.decimal) - - # Test y-translation consistency - result_y_direct = translate_se2_along_y(pose, dy) - result_y_body = translate_se2_along_body_frame(pose, Vector2D(0.0, dy)) - np.testing.assert_array_almost_equal(result_y_direct.array, result_y_body.array, decimal=self.decimal) - - # Test combined translation - result_xy_body = translate_se2_along_body_frame(pose, Vector2D(dx, dy)) - result_xy_sequential = translate_se2_along_y(translate_se2_along_x(pose, dx), dy) - np.testing.assert_array_almost_equal(result_xy_body.array, result_xy_sequential.array, decimal=self.decimal) - - def test_se3_absolute_relative_conversion_consistency(self) -> None: - """Test that converting absolute->relative->absolute returns original poses""" - for _ in range(self.num_consistency_tests): - # Generate random reference pose - reference = StateSE3.from_array(self._get_random_se3_array(1)[0]) - - # Generate random absolute poses - num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) - absolute_poses = self._get_random_se3_array(num_poses) - - # Convert absolute -> relative -> absolute - relative_poses = convert_absolute_to_relative_se3_array(reference, absolute_poses) - recovered_absolute = convert_relative_to_absolute_se3_array(reference, relative_poses) - - np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal) - - def test_se3_points_absolute_relative_conversion_consistency(self) -> None: - """Test that converting absolute->relative->absolute returns original points""" - for _ in range(self.num_consistency_tests): - # Generate random reference pose - reference = StateSE3.from_array(self._get_random_se3_array(1)[0]) - - # Generate random absolute points - num_points = np.random.randint(self.min_random_poses, self.max_random_poses) - absolute_points = self._get_random_se3_array(num_points)[:, StateSE3Index.XYZ] - - # Convert absolute -> relative -> absolute - relative_points = convert_absolute_to_relative_points_3d_array(reference, absolute_points) - recovered_absolute = convert_relative_to_absolute_points_3d_array(reference, relative_points) - - np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal) - - def test_se3_points_consistency(self) -> None: - """Test whether SE3 point and pose conversions are consistent""" - for _ in range(self.num_consistency_tests): - # Generate random reference pose - reference = StateSE3.from_array(self._get_random_se3_array(1)[0]) - - # Generate random absolute points - num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) - absolute_se3 = self._get_random_se3_array(num_poses) - - # Convert absolute -> relative -> absolute - relative_se3 = convert_absolute_to_relative_se3_array(reference, absolute_se3) - relative_points = convert_absolute_to_relative_points_3d_array( - reference, absolute_se3[..., StateSE3Index.XYZ] - ) - np.testing.assert_array_almost_equal( - relative_se3[..., StateSE3Index.XYZ], relative_points, decimal=self.decimal - ) - - recovered_absolute_se3 = convert_relative_to_absolute_se3_array(reference, relative_se3) - absolute_points = convert_relative_to_absolute_points_3d_array(reference, relative_points) - np.testing.assert_array_almost_equal( - recovered_absolute_se3[..., StateSE3Index.XYZ], absolute_points, decimal=self.decimal - ) - - def test_se2_se3_translation_along_body_consistency(self) -> None: - """Test that SE2 and SE3 translations are consistent when SE3 has no z-component or rotation""" - for _ in range(self.num_consistency_tests): - # Create equivalent SE2 and SE3 poses (SE3 with z=0 and no rotations except yaw) - - pose_se2 = StateSE2.from_array(self._get_random_se2_array(1)[0]) - pose_se3 = StateSE3.from_array( - np.array([pose_se2.x, pose_se2.y, 0.0, 0.0, 0.0, pose_se2.yaw], dtype=np.float64) - ) - - # Test translation along x-axis - dx = np.random.uniform(-5.0, 5.0) - translated_se2_x = translate_se2_along_body_frame(pose_se2, Vector2D(dx, 0.0)) - translated_se3_x = translate_se3_along_x(pose_se3, dx) - - np.testing.assert_array_almost_equal( - translated_se2_x.array[StateSE2Index.XY], translated_se3_x.array[StateSE3Index.XY], decimal=self.decimal - ) - np.testing.assert_almost_equal( - translated_se2_x.array[StateSE2Index.YAW], - translated_se3_x.array[StateSE3Index.YAW], - decimal=self.decimal, - ) - - # Test translation along y-axis - dy = np.random.uniform(-5.0, 5.0) - translated_se2_y = translate_se2_along_body_frame(pose_se2, Vector2D(0.0, dy)) - translated_se3_y = translate_se3_along_y(pose_se3, dy) - - np.testing.assert_array_almost_equal( - translated_se2_y.array[StateSE2Index.XY], translated_se3_y.array[StateSE3Index.XY], decimal=self.decimal - ) - np.testing.assert_almost_equal( - translated_se2_y.array[StateSE2Index.YAW], - translated_se3_y.array[StateSE3Index.YAW], - decimal=self.decimal, - ) - - # Test translation along x- and y-axis - dx = np.random.uniform(-5.0, 5.0) - dy = np.random.uniform(-5.0, 5.0) - translated_se2_xy = translate_se2_along_body_frame(pose_se2, Vector2D(dx, dy)) - translated_se3_xy = translate_se3_along_body_frame(pose_se3, Vector3D(dx, dy, 0.0)) - np.testing.assert_array_almost_equal( - translated_se2_xy.array[StateSE2Index.XY], - translated_se3_xy.array[StateSE3Index.XY], - decimal=self.decimal, - ) - np.testing.assert_almost_equal( - translated_se2_xy.array[StateSE2Index.YAW], - translated_se3_xy.array[StateSE3Index.YAW], - decimal=self.decimal, - ) - - def test_se2_se3_point_conversion_consistency(self) -> None: - """Test that SE2 and SE3 point conversions are consistent for 2D points embedded in 3D""" - for _ in range(self.num_consistency_tests): - # Create equivalent SE2 and SE3 reference poses - x = np.random.uniform(-10.0, 10.0) - y = np.random.uniform(-10.0, 10.0) - yaw = np.random.uniform(-np.pi, np.pi) - - reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64)) - reference_se3 = StateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64)) - - # Generate 2D points and embed them in 3D with z=0 - num_points = np.random.randint(1, 8) - points_2d = np.random.uniform(-20.0, 20.0, (num_points, len(Point2DIndex))) - points_3d = np.column_stack([points_2d, np.zeros(num_points)]) - - # Convert using SE2 functions - relative_2d = convert_absolute_to_relative_point_2d_array(reference_se2, points_2d) - absolute_2d_recovered = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d) - - # Convert using SE3 functions - relative_3d = convert_absolute_to_relative_points_3d_array(reference_se3, points_3d) - absolute_3d_recovered = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d) - - # Check that SE2 and SE3 conversions are consistent for the x,y components - np.testing.assert_array_almost_equal(relative_2d, relative_3d[:, Point3DIndex.XY], decimal=self.decimal) - np.testing.assert_array_almost_equal( - absolute_2d_recovered, absolute_3d_recovered[:, Point3DIndex.XY], decimal=self.decimal - ) - - # Check that z-components remain zero - np.testing.assert_array_almost_equal( - relative_3d[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal - ) - np.testing.assert_array_almost_equal( - absolute_3d_recovered[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal - ) - - def test_se2_se3_pose_conversion_consistency(self) -> None: - """Test that SE2 and SE3 pose conversions are consistent for 2D points embedded in 3D""" - for _ in range(self.num_consistency_tests): - # Create equivalent SE2 and SE3 reference poses - x = np.random.uniform(-10.0, 10.0) - y = np.random.uniform(-10.0, 10.0) - yaw = np.random.uniform(-np.pi, np.pi) - - reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64)) - reference_se3 = StateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64)) - - # Generate 2D poses and embed them in 3D with z=0 and zero roll/pitch - num_poses = np.random.randint(1, 8) - pose_2d = self._get_random_se2_array(num_poses) - pose_3d = np.zeros((num_poses, len(StateSE3Index)), dtype=np.float64) - pose_3d[:, StateSE3Index.XY] = pose_2d[:, StateSE2Index.XY] - pose_3d[:, StateSE3Index.YAW] = pose_2d[:, StateSE2Index.YAW] - - # Convert using SE2 functions - relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, pose_2d) - absolute_se2_recovered = convert_relative_to_absolute_se2_array(reference_se2, relative_se2) - - # Convert using SE3 functions - relative_se3 = convert_absolute_to_relative_se3_array(reference_se3, pose_3d) - absolute_se3_recovered = convert_relative_to_absolute_se3_array(reference_se3, relative_se3) - - # Check that SE2 and SE3 conversions are consistent for the x,y components - np.testing.assert_array_almost_equal( - relative_se2[:, StateSE2Index.XY], relative_se3[:, StateSE3Index.XY], decimal=self.decimal - ) - np.testing.assert_array_almost_equal( - absolute_se2_recovered[:, StateSE2Index.XY], - absolute_se3_recovered[:, StateSE3Index.XY], - decimal=self.decimal, - ) - # Check that SE2 and SE3 conversions are consistent for the yaw component - np.testing.assert_array_almost_equal( - relative_se2[:, StateSE2Index.YAW], relative_se3[:, StateSE3Index.YAW], decimal=self.decimal - ) - np.testing.assert_array_almost_equal( - absolute_se2_recovered[:, StateSE2Index.YAW], - absolute_se3_recovered[:, StateSE3Index.YAW], - decimal=self.decimal, - ) - - # Check that z-components remain zero - np.testing.assert_array_almost_equal( - relative_se3[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal - ) - np.testing.assert_array_almost_equal( - absolute_se3_recovered[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal - ) - - def test_se2_array_translation_consistency(self) -> None: - """Test that SE2 array translation is consistent with single pose translation""" - for _ in range(self.num_consistency_tests): - # Generate random poses - num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) - poses_array = self._get_random_se2_array(num_poses) - - # Generate random translation - dx = np.random.uniform(-5.0, 5.0) - dy = np.random.uniform(-5.0, 5.0) - translation = Vector2D(dx, dy) - - # Translate using array function - result_array = translate_se2_array_along_body_frame(poses_array, translation) - - # Translate each pose individually - result_individual = np.zeros_like(poses_array) - for i in range(num_poses): - pose = StateSE2.from_array(poses_array[i]) - translated = translate_se2_along_body_frame(pose, translation) - result_individual[i] = translated.array - - np.testing.assert_array_almost_equal(result_array, result_individual, decimal=self.decimal) - - # def test_transform_empty_arrays(self) -> None: - # """Test that transform functions handle empty arrays correctly""" - # reference_se2 = StateSE2.from_array(np.array([1.0, 2.0, np.pi / 4], dtype=np.float64)) - # reference_se3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], dtype=np.float64)) - - # # Test SE2 empty arrays - # empty_se2_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE2Index)) - # empty_2d_points = np.array([], dtype=np.float64).reshape(0, len(Point2DIndex)) - - # result_se2_poses = convert_absolute_to_relative_se2_array(reference_se2, empty_se2_poses) - # result_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, empty_2d_points) - - # self.assertEqual(result_se2_poses.shape, (0, len(StateSE2Index))) - # self.assertEqual(result_2d_points.shape, (0, len(Point2DIndex))) - - # # Test SE3 empty arrays - # empty_se3_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE3Index)) - # empty_3d_points = np.array([], dtype=np.float64).reshape(0, len(Point3DIndex)) - - # result_se3_poses = convert_absolute_to_relative_se3_array(reference_se3, empty_se3_poses) - # result_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, empty_3d_points) - - # self.assertEqual(result_se3_poses.shape, (0, len(StateSE3Index))) - # self.assertEqual(result_3d_points.shape, (0, len(Point3DIndex))) - - def test_transform_identity_operations(self) -> None: - """Test that transforms with identity reference frames work correctly""" - # Identity SE2 pose - identity_se2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) - identity_se3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) - - for _ in range(self.num_consistency_tests): - # Test SE2 identity transforms - num_poses = np.random.randint(1, 10) - se2_poses = self._get_random_se2_array(num_poses) - se2_points = se2_poses[:, StateSE2Index.XY] - - relative_se2_poses = convert_absolute_to_relative_se2_array(identity_se2, se2_poses) - relative_se2_points = convert_absolute_to_relative_point_2d_array(identity_se2, se2_points) - - np.testing.assert_array_almost_equal(se2_poses, relative_se2_poses, decimal=self.decimal) - np.testing.assert_array_almost_equal(se2_points, relative_se2_points, decimal=self.decimal) - - # Test SE3 identity transforms - se3_poses = self._get_random_se3_array(num_poses) - se3_points = se3_poses[:, StateSE3Index.XYZ] - - relative_se3_poses = convert_absolute_to_relative_se3_array(identity_se3, se3_poses) - relative_se3_points = convert_absolute_to_relative_points_3d_array(identity_se3, se3_points) - - np.testing.assert_array_almost_equal( - se3_poses[..., StateSE3Index.EULER_ANGLES], - relative_se3_poses[..., StateSE3Index.EULER_ANGLES], - decimal=self.decimal, - ) - np.testing.assert_array_almost_equal(se3_points, relative_se3_points, decimal=self.decimal) - - def test_transform_large_rotations(self) -> None: - """Test transforms with large rotation angles beyond [-π, π]""" - for _ in range(self.num_consistency_tests): - # Create poses with large rotation angles - large_yaw_se2 = np.random.uniform(-4 * np.pi, 4 * np.pi) - large_euler_se3 = np.random.uniform(-4 * np.pi, 4 * np.pi, 3) - - reference_se2 = StateSE2.from_array(np.array([0.0, 0.0, large_yaw_se2], dtype=np.float64)) - reference_se3 = StateSE3.from_array( - np.array([0.0, 0.0, 0.0, large_euler_se3[0], large_euler_se3[1], large_euler_se3[2]], dtype=np.float64) - ) - - # Generate test poses/points - test_se2_poses = self._get_random_se2_array(5) - test_se3_poses = self._get_random_se3_array(5) - test_2d_points = test_se2_poses[:, StateSE2Index.XY] - test_3d_points = test_se3_poses[:, StateSE3Index.XYZ] - - # Test round-trip conversions should still work - relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, test_se2_poses) - recovered_se2 = convert_relative_to_absolute_se2_array(reference_se2, relative_se2) - - relative_se3 = convert_absolute_to_relative_se3_array(reference_se3, test_se3_poses) - recovered_se3 = convert_relative_to_absolute_se3_array(reference_se3, relative_se3) - - relative_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, test_2d_points) - recovered_2d_points = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d_points) - - relative_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, test_3d_points) - recovered_3d_points = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d_points) - - # Check consistency (allowing for angle wrapping) - np.testing.assert_array_almost_equal( - test_se2_poses[:, StateSE2Index.XY], - recovered_se2[:, StateSE2Index.XY], - decimal=self.decimal, - ) - np.testing.assert_array_almost_equal( - test_se3_poses[:, StateSE3Index.XYZ], - recovered_se3[:, StateSE3Index.XYZ], - decimal=self.decimal, - ) - np.testing.assert_array_almost_equal(test_2d_points, recovered_2d_points, decimal=self.decimal) - np.testing.assert_array_almost_equal(test_3d_points, recovered_3d_points, decimal=self.decimal) - - -if __name__ == "__main__": - unittest.main() diff --git a/d123/geometry/transform/__init__.py b/d123/geometry/transform/__init__.py index 96088028..e69de29b 100644 --- a/d123/geometry/transform/__init__.py +++ b/d123/geometry/transform/__init__.py @@ -1,15 +0,0 @@ -from d123.geometry.transform.transform_se2 import ( - convert_absolute_to_relative_se2_array, - convert_relative_to_absolute_se2_array, - translate_se2_along_body_frame, - translate_se2_along_x, - translate_se2_along_y, -) -from d123.geometry.transform.transform_se3 import ( - convert_absolute_to_relative_se3_array, - convert_relative_to_absolute_se3_array, - translate_se3_along_body_frame, - translate_se3_along_x, - translate_se3_along_y, - translate_se3_along_z, -) diff --git a/d123/geometry/transform/test/__init__.py b/d123/geometry/transform/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/geometry/transform/test/test_transform_consistency.py b/d123/geometry/transform/test/test_transform_consistency.py new file mode 100644 index 00000000..b4f73ab1 --- /dev/null +++ b/d123/geometry/transform/test/test_transform_consistency.py @@ -0,0 +1,475 @@ +import unittest + +import numpy as np +import numpy.typing as npt + +from d123.geometry.geometry_index import Point2DIndex, Point3DIndex, StateSE2Index, EulerStateSE3Index +from d123.geometry.se import StateSE2, EulerStateSE3 +from d123.geometry.transform.transform_se2 import ( + convert_absolute_to_relative_point_2d_array, + convert_absolute_to_relative_se2_array, + convert_relative_to_absolute_point_2d_array, + convert_relative_to_absolute_se2_array, + translate_se2_along_body_frame, + translate_se2_along_x, + translate_se2_along_y, + translate_se2_array_along_body_frame, +) +from d123.geometry.transform.transform_euler_se3 import ( + convert_absolute_to_relative_points_3d_array, + convert_absolute_to_relative_euler_se3_array, + convert_relative_to_absolute_points_3d_array, + convert_relative_to_absolute_euler_se3_array, + translate_euler_se3_along_body_frame, + translate_euler_se3_along_x, + translate_euler_se3_along_y, +) +from d123.geometry.vector import Vector2D, Vector3D + + +class TestTransformConsistency(unittest.TestCase): + """Tests to ensure consistency between different transformation functions.""" + + def setUp(self): + self.decimal = 4 # Decimal places for np.testing.assert_array_almost_equal + self.num_consistency_tests = 10 # Number of random test cases for consistency checks + + self.max_pose_xyz = 100.0 + self.min_random_poses = 1 + self.max_random_poses = 20 + + def _get_random_se2_array(self, size: int) -> npt.NDArray[np.float64]: + """Generate a random SE2 pose""" + random_se2_array = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, len(StateSE2Index))) + random_se2_array[:, StateSE2Index.YAW] = np.random.uniform(-np.pi, np.pi, size) # yaw angles + return random_se2_array + + def _get_random_se3_array(self, size: int) -> npt.NDArray[np.float64]: + """Generate a random SE3 poses""" + random_se3_array = np.zeros((size, len(EulerStateSE3Index)), dtype=np.float64) + random_se3_array[:, EulerStateSE3Index.XYZ] = np.random.uniform( + -self.max_pose_xyz, self.max_pose_xyz, (size, 3) + ) + random_se3_array[:, EulerStateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size) + random_se3_array[:, EulerStateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size) + random_se3_array[:, EulerStateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size) + + return random_se3_array + + def test_se2_absolute_relative_conversion_consistency(self) -> None: + """Test that converting absolute->relative->absolute returns original poses""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = StateSE2.from_array(self._get_random_se2_array(1)[0]) + + # Generate random absolute poses + num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_poses = self._get_random_se2_array(num_poses) + + # Convert absolute -> relative -> absolute + relative_poses = convert_absolute_to_relative_se2_array(reference, absolute_poses) + recovered_absolute = convert_relative_to_absolute_se2_array(reference, relative_poses) + + np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal) + + def test_se2_points_absolute_relative_conversion_consistency(self) -> None: + """Test that converting absolute->relative->absolute returns original points""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = StateSE2.from_array(self._get_random_se2_array(1)[0]) + + # Generate random absolute points + num_points = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_points = self._get_random_se2_array(num_points)[:, StateSE2Index.XY] + + # Convert absolute -> relative -> absolute + relative_points = convert_absolute_to_relative_point_2d_array(reference, absolute_points) + recovered_absolute = convert_relative_to_absolute_point_2d_array(reference, relative_points) + + np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal) + + def test_se2_points_consistency(self) -> None: + """Test whether SE2 point and pose conversions are consistent""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = StateSE2.from_array(self._get_random_se2_array(1)[0]) + + # Generate random absolute points + num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_se2 = self._get_random_se2_array(num_poses) + + # Convert absolute -> relative -> absolute + relative_se2 = convert_absolute_to_relative_se2_array(reference, absolute_se2) + relative_points = convert_absolute_to_relative_point_2d_array( + reference, absolute_se2[..., StateSE2Index.XY] + ) + np.testing.assert_array_almost_equal( + relative_se2[..., StateSE2Index.XY], relative_points, decimal=self.decimal + ) + + recovered_absolute_se2 = convert_relative_to_absolute_se2_array(reference, relative_se2) + absolute_points = convert_relative_to_absolute_point_2d_array(reference, relative_points) + np.testing.assert_array_almost_equal( + recovered_absolute_se2[..., StateSE2Index.XY], absolute_points, decimal=self.decimal + ) + + def test_se2_translation_consistency(self) -> None: + """Test that SE2 translations are consistent between different methods""" + for _ in range(self.num_consistency_tests): + # Generate random pose + pose = StateSE2.from_array(self._get_random_se2_array(1)[0]) + + # Generate random distances + dx = np.random.uniform(-10.0, 10.0) + dy = np.random.uniform(-10.0, 10.0) + + # Test x-translation consistency + result_x_direct = translate_se2_along_x(pose, dx) + result_x_body = translate_se2_along_body_frame(pose, Vector2D(dx, 0.0)) + np.testing.assert_array_almost_equal(result_x_direct.array, result_x_body.array, decimal=self.decimal) + + # Test y-translation consistency + result_y_direct = translate_se2_along_y(pose, dy) + result_y_body = translate_se2_along_body_frame(pose, Vector2D(0.0, dy)) + np.testing.assert_array_almost_equal(result_y_direct.array, result_y_body.array, decimal=self.decimal) + + # Test combined translation + result_xy_body = translate_se2_along_body_frame(pose, Vector2D(dx, dy)) + result_xy_sequential = translate_se2_along_y(translate_se2_along_x(pose, dx), dy) + np.testing.assert_array_almost_equal(result_xy_body.array, result_xy_sequential.array, decimal=self.decimal) + + def test_se3_absolute_relative_conversion_consistency(self) -> None: + """Test that converting absolute->relative->absolute returns original poses""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = EulerStateSE3.from_array(self._get_random_se3_array(1)[0]) + + # Generate random absolute poses + num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_poses = self._get_random_se3_array(num_poses) + + # Convert absolute -> relative -> absolute + relative_poses = convert_absolute_to_relative_euler_se3_array(reference, absolute_poses) + recovered_absolute = convert_relative_to_absolute_euler_se3_array(reference, relative_poses) + + np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal) + + def test_se3_points_absolute_relative_conversion_consistency(self) -> None: + """Test that converting absolute->relative->absolute returns original points""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = EulerStateSE3.from_array(self._get_random_se3_array(1)[0]) + + # Generate random absolute points + num_points = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_points = self._get_random_se3_array(num_points)[:, EulerStateSE3Index.XYZ] + + # Convert absolute -> relative -> absolute + relative_points = convert_absolute_to_relative_points_3d_array(reference, absolute_points) + recovered_absolute = convert_relative_to_absolute_points_3d_array(reference, relative_points) + + np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal) + + def test_se3_points_consistency(self) -> None: + """Test whether SE3 point and pose conversions are consistent""" + for _ in range(self.num_consistency_tests): + # Generate random reference pose + reference = EulerStateSE3.from_array(self._get_random_se3_array(1)[0]) + + # Generate random absolute points + num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) + absolute_se3 = self._get_random_se3_array(num_poses) + + # Convert absolute -> relative -> absolute + relative_se3 = convert_absolute_to_relative_euler_se3_array(reference, absolute_se3) + relative_points = convert_absolute_to_relative_points_3d_array( + reference, absolute_se3[..., EulerStateSE3Index.XYZ] + ) + np.testing.assert_array_almost_equal( + relative_se3[..., EulerStateSE3Index.XYZ], relative_points, decimal=self.decimal + ) + + recovered_absolute_se3 = convert_relative_to_absolute_euler_se3_array(reference, relative_se3) + absolute_points = convert_relative_to_absolute_points_3d_array(reference, relative_points) + np.testing.assert_array_almost_equal( + recovered_absolute_se3[..., EulerStateSE3Index.XYZ], absolute_points, decimal=self.decimal + ) + + def test_se2_se3_translation_along_body_consistency(self) -> None: + """Test that SE2 and SE3 translations are consistent when SE3 has no z-component or rotation""" + for _ in range(self.num_consistency_tests): + # Create equivalent SE2 and SE3 poses (SE3 with z=0 and no rotations except yaw) + + pose_se2 = StateSE2.from_array(self._get_random_se2_array(1)[0]) + pose_se3 = EulerStateSE3.from_array( + np.array([pose_se2.x, pose_se2.y, 0.0, 0.0, 0.0, pose_se2.yaw], dtype=np.float64) + ) + + # Test translation along x-axis + dx = np.random.uniform(-5.0, 5.0) + translated_se2_x = translate_se2_along_body_frame(pose_se2, Vector2D(dx, 0.0)) + translated_se3_x = translate_euler_se3_along_x(pose_se3, dx) + + np.testing.assert_array_almost_equal( + translated_se2_x.array[StateSE2Index.XY], + translated_se3_x.array[EulerStateSE3Index.XY], + decimal=self.decimal, + ) + np.testing.assert_almost_equal( + translated_se2_x.array[StateSE2Index.YAW], + translated_se3_x.array[EulerStateSE3Index.YAW], + decimal=self.decimal, + ) + + # Test translation along y-axis + dy = np.random.uniform(-5.0, 5.0) + translated_se2_y = translate_se2_along_body_frame(pose_se2, Vector2D(0.0, dy)) + translated_se3_y = translate_euler_se3_along_y(pose_se3, dy) + + np.testing.assert_array_almost_equal( + translated_se2_y.array[StateSE2Index.XY], + translated_se3_y.array[EulerStateSE3Index.XY], + decimal=self.decimal, + ) + np.testing.assert_almost_equal( + translated_se2_y.array[StateSE2Index.YAW], + translated_se3_y.array[EulerStateSE3Index.YAW], + decimal=self.decimal, + ) + + # Test translation along x- and y-axis + dx = np.random.uniform(-5.0, 5.0) + dy = np.random.uniform(-5.0, 5.0) + translated_se2_xy = translate_se2_along_body_frame(pose_se2, Vector2D(dx, dy)) + translated_se3_xy = translate_euler_se3_along_body_frame(pose_se3, Vector3D(dx, dy, 0.0)) + np.testing.assert_array_almost_equal( + translated_se2_xy.array[StateSE2Index.XY], + translated_se3_xy.array[EulerStateSE3Index.XY], + decimal=self.decimal, + ) + np.testing.assert_almost_equal( + translated_se2_xy.array[StateSE2Index.YAW], + translated_se3_xy.array[EulerStateSE3Index.YAW], + decimal=self.decimal, + ) + + def test_se2_se3_point_conversion_consistency(self) -> None: + """Test that SE2 and SE3 point conversions are consistent for 2D points embedded in 3D""" + for _ in range(self.num_consistency_tests): + # Create equivalent SE2 and SE3 reference poses + x = np.random.uniform(-10.0, 10.0) + y = np.random.uniform(-10.0, 10.0) + yaw = np.random.uniform(-np.pi, np.pi) + + reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64)) + reference_se3 = EulerStateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64)) + + # Generate 2D points and embed them in 3D with z=0 + num_points = np.random.randint(1, 8) + points_2d = np.random.uniform(-20.0, 20.0, (num_points, len(Point2DIndex))) + points_3d = np.column_stack([points_2d, np.zeros(num_points)]) + + # Convert using SE2 functions + relative_2d = convert_absolute_to_relative_point_2d_array(reference_se2, points_2d) + absolute_2d_recovered = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d) + + # Convert using SE3 functions + relative_3d = convert_absolute_to_relative_points_3d_array(reference_se3, points_3d) + absolute_3d_recovered = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d) + + # Check that SE2 and SE3 conversions are consistent for the x,y components + np.testing.assert_array_almost_equal(relative_2d, relative_3d[:, Point3DIndex.XY], decimal=self.decimal) + np.testing.assert_array_almost_equal( + absolute_2d_recovered, absolute_3d_recovered[:, Point3DIndex.XY], decimal=self.decimal + ) + + # Check that z-components remain zero + np.testing.assert_array_almost_equal( + relative_3d[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal + ) + np.testing.assert_array_almost_equal( + absolute_3d_recovered[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal + ) + + def test_se2_se3_pose_conversion_consistency(self) -> None: + """Test that SE2 and SE3 pose conversions are consistent for 2D points embedded in 3D""" + for _ in range(self.num_consistency_tests): + # Create equivalent SE2 and SE3 reference poses + x = np.random.uniform(-10.0, 10.0) + y = np.random.uniform(-10.0, 10.0) + yaw = np.random.uniform(-np.pi, np.pi) + + reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64)) + reference_se3 = EulerStateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64)) + + # Generate 2D poses and embed them in 3D with z=0 and zero roll/pitch + num_poses = np.random.randint(1, 8) + pose_2d = self._get_random_se2_array(num_poses) + pose_3d = np.zeros((num_poses, len(EulerStateSE3Index)), dtype=np.float64) + pose_3d[:, EulerStateSE3Index.XY] = pose_2d[:, StateSE2Index.XY] + pose_3d[:, EulerStateSE3Index.YAW] = pose_2d[:, StateSE2Index.YAW] + + # Convert using SE2 functions + relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, pose_2d) + absolute_se2_recovered = convert_relative_to_absolute_se2_array(reference_se2, relative_se2) + + # Convert using SE3 functions + relative_se3 = convert_absolute_to_relative_euler_se3_array(reference_se3, pose_3d) + absolute_se3_recovered = convert_relative_to_absolute_euler_se3_array(reference_se3, relative_se3) + + # Check that SE2 and SE3 conversions are consistent for the x,y components + np.testing.assert_array_almost_equal( + relative_se2[:, StateSE2Index.XY], relative_se3[:, EulerStateSE3Index.XY], decimal=self.decimal + ) + np.testing.assert_array_almost_equal( + absolute_se2_recovered[:, StateSE2Index.XY], + absolute_se3_recovered[:, EulerStateSE3Index.XY], + decimal=self.decimal, + ) + # Check that SE2 and SE3 conversions are consistent for the yaw component + np.testing.assert_array_almost_equal( + relative_se2[:, StateSE2Index.YAW], relative_se3[:, EulerStateSE3Index.YAW], decimal=self.decimal + ) + np.testing.assert_array_almost_equal( + absolute_se2_recovered[:, StateSE2Index.YAW], + absolute_se3_recovered[:, EulerStateSE3Index.YAW], + decimal=self.decimal, + ) + + # Check that z-components remain zero + np.testing.assert_array_almost_equal( + relative_se3[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal + ) + np.testing.assert_array_almost_equal( + absolute_se3_recovered[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal + ) + + def test_se2_array_translation_consistency(self) -> None: + """Test that SE2 array translation is consistent with single pose translation""" + for _ in range(self.num_consistency_tests): + # Generate random poses + num_poses = np.random.randint(self.min_random_poses, self.max_random_poses) + poses_array = self._get_random_se2_array(num_poses) + + # Generate random translation + dx = np.random.uniform(-5.0, 5.0) + dy = np.random.uniform(-5.0, 5.0) + translation = Vector2D(dx, dy) + + # Translate using array function + result_array = translate_se2_array_along_body_frame(poses_array, translation) + + # Translate each pose individually + result_individual = np.zeros_like(poses_array) + for i in range(num_poses): + pose = StateSE2.from_array(poses_array[i]) + translated = translate_se2_along_body_frame(pose, translation) + result_individual[i] = translated.array + + np.testing.assert_array_almost_equal(result_array, result_individual, decimal=self.decimal) + + def test_transform_empty_arrays(self) -> None: + """Test that transform functions handle empty arrays correctly""" + reference_se2 = StateSE2.from_array(np.array([1.0, 2.0, np.pi / 4], dtype=np.float64)) + reference_se3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], dtype=np.float64)) + + # Test SE2 empty arrays + empty_se2_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE2Index)) + empty_2d_points = np.array([], dtype=np.float64).reshape(0, len(Point2DIndex)) + + result_se2_poses = convert_absolute_to_relative_se2_array(reference_se2, empty_se2_poses) + result_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, empty_2d_points) + + self.assertEqual(result_se2_poses.shape, (0, len(StateSE2Index))) + self.assertEqual(result_2d_points.shape, (0, len(Point2DIndex))) + + # Test SE3 empty arrays + empty_se3_poses = np.array([], dtype=np.float64).reshape(0, len(EulerStateSE3Index)) + empty_3d_points = np.array([], dtype=np.float64).reshape(0, len(Point3DIndex)) + + result_se3_poses = convert_absolute_to_relative_euler_se3_array(reference_se3, empty_se3_poses) + result_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, empty_3d_points) + + self.assertEqual(result_se3_poses.shape, (0, len(EulerStateSE3Index))) + self.assertEqual(result_3d_points.shape, (0, len(Point3DIndex))) + + def test_transform_identity_operations(self) -> None: + """Test that transforms with identity reference frames work correctly""" + # Identity SE2 pose + identity_se2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + identity_se3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + + for _ in range(self.num_consistency_tests): + # Test SE2 identity transforms + num_poses = np.random.randint(1, 10) + se2_poses = self._get_random_se2_array(num_poses) + se2_points = se2_poses[:, StateSE2Index.XY] + + relative_se2_poses = convert_absolute_to_relative_se2_array(identity_se2, se2_poses) + relative_se2_points = convert_absolute_to_relative_point_2d_array(identity_se2, se2_points) + + np.testing.assert_array_almost_equal(se2_poses, relative_se2_poses, decimal=self.decimal) + np.testing.assert_array_almost_equal(se2_points, relative_se2_points, decimal=self.decimal) + + # Test SE3 identity transforms + se3_poses = self._get_random_se3_array(num_poses) + se3_points = se3_poses[:, EulerStateSE3Index.XYZ] + + relative_se3_poses = convert_absolute_to_relative_euler_se3_array(identity_se3, se3_poses) + relative_se3_points = convert_absolute_to_relative_points_3d_array(identity_se3, se3_points) + + np.testing.assert_array_almost_equal( + se3_poses[..., EulerStateSE3Index.EULER_ANGLES], + relative_se3_poses[..., EulerStateSE3Index.EULER_ANGLES], + decimal=self.decimal, + ) + np.testing.assert_array_almost_equal(se3_points, relative_se3_points, decimal=self.decimal) + + def test_transform_large_rotations(self) -> None: + """Test transforms with large rotation angles beyond [-π, π]""" + for _ in range(self.num_consistency_tests): + # Create poses with large rotation angles + large_yaw_se2 = np.random.uniform(-4 * np.pi, 4 * np.pi) + large_euler_se3 = np.random.uniform(-4 * np.pi, 4 * np.pi, 3) + + reference_se2 = StateSE2.from_array(np.array([0.0, 0.0, large_yaw_se2], dtype=np.float64)) + reference_se3 = EulerStateSE3.from_array( + np.array([0.0, 0.0, 0.0, large_euler_se3[0], large_euler_se3[1], large_euler_se3[2]], dtype=np.float64) + ) + + # Generate test poses/points + test_se2_poses = self._get_random_se2_array(5) + test_se3_poses = self._get_random_se3_array(5) + test_2d_points = test_se2_poses[:, StateSE2Index.XY] + test_3d_points = test_se3_poses[:, EulerStateSE3Index.XYZ] + + # Test round-trip conversions should still work + relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, test_se2_poses) + recovered_se2 = convert_relative_to_absolute_se2_array(reference_se2, relative_se2) + + relative_se3 = convert_absolute_to_relative_euler_se3_array(reference_se3, test_se3_poses) + recovered_se3 = convert_relative_to_absolute_euler_se3_array(reference_se3, relative_se3) + + relative_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, test_2d_points) + recovered_2d_points = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d_points) + + relative_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, test_3d_points) + recovered_3d_points = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d_points) + + # Check consistency (allowing for angle wrapping) + np.testing.assert_array_almost_equal( + test_se2_poses[:, StateSE2Index.XY], + recovered_se2[:, StateSE2Index.XY], + decimal=self.decimal, + ) + np.testing.assert_array_almost_equal( + test_se3_poses[:, EulerStateSE3Index.XYZ], + recovered_se3[:, EulerStateSE3Index.XYZ], + decimal=self.decimal, + ) + np.testing.assert_array_almost_equal(test_2d_points, recovered_2d_points, decimal=self.decimal) + np.testing.assert_array_almost_equal(test_3d_points, recovered_3d_points, decimal=self.decimal) + + +if __name__ == "__main__": + unittest.main() diff --git a/d123/geometry/transform/test/test_transform_euler_se3.py b/d123/geometry/transform/test/test_transform_euler_se3.py new file mode 100644 index 00000000..b40ab4c7 --- /dev/null +++ b/d123/geometry/transform/test/test_transform_euler_se3.py @@ -0,0 +1,335 @@ +import unittest + +import numpy as np +import numpy.typing as npt + +from d123.geometry.se import EulerStateSE3 +from d123.geometry.transform.transform_euler_se3 import ( + convert_absolute_to_relative_points_3d_array, + convert_absolute_to_relative_euler_se3_array, + convert_relative_to_absolute_points_3d_array, + convert_relative_to_absolute_euler_se3_array, + translate_euler_se3_along_body_frame, + translate_euler_se3_along_x, + translate_euler_se3_along_y, + translate_euler_se3_along_z, +) +from d123.geometry.vector import Vector3D + + +class TestTransformEulerSE3(unittest.TestCase): + + def setUp(self): + self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal + self.num_consistency_tests = 10 # Number of random test cases for consistency checks + + def test_translate_se3_along_x(self) -> None: + """Tests translating a SE3 state along the body frame forward direction.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 1.0 + result: EulerStateSE3 = translate_euler_se3_along_x(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_x_negative(self) -> None: + """Tests translating a SE3 state along the body frame backward direction.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = -0.5 + result: EulerStateSE3 = translate_euler_se3_along_x(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.5, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_x_with_rotation(self) -> None: + """Tests translating a SE3 state along the body frame forward direction with yaw rotation.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) + distance: float = 2.5 + result: EulerStateSE3 = translate_euler_se3_along_x(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array( + np.array([0.0, 2.5, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64) + ) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_y(self) -> None: + """Tests translating a SE3 state along the body frame lateral direction.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 1.0 + result: EulerStateSE3 = translate_euler_se3_along_y(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_y_with_existing_position(self) -> None: + """Tests translating a SE3 state along the body frame lateral direction with existing position.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 2.5 + result: EulerStateSE3 = translate_euler_se3_along_y(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 4.5, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_y_negative(self) -> None: + """Tests translating a SE3 state along the body frame lateral direction in the negative direction.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = -1.0 + result: EulerStateSE3 = translate_euler_se3_along_y(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_y_with_rotation(self) -> None: + """Tests translating a SE3 state along the body frame lateral direction with roll rotation.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, np.pi / 2, 0.0, 0.0], dtype=np.float64)) + distance: float = -1.0 + result: EulerStateSE3 = translate_euler_se3_along_y(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array( + np.array([1.0, 2.0, 2.0, np.pi / 2, 0.0, 0.0], dtype=np.float64) + ) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_z(self) -> None: + """Tests translating a SE3 state along the body frame vertical direction.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 1.0 + result: EulerStateSE3 = translate_euler_se3_along_z(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_z_large_distance(self) -> None: + """Tests translating a SE3 state along the body frame vertical direction with a large distance.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 10.0 + result: EulerStateSE3 = translate_euler_se3_along_z(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 15.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_z_negative(self) -> None: + """Tests translating a SE3 state along the body frame vertical direction in the negative direction.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = -2.0 + result: EulerStateSE3 = translate_euler_se3_along_z(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_z_with_rotation(self) -> None: + """Tests translating a SE3 state along the body frame vertical direction with pitch rotation.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64)) + distance: float = 2.0 + result: EulerStateSE3 = translate_euler_se3_along_z(pose, distance) + expected: EulerStateSE3 = EulerStateSE3.from_array( + np.array([3.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64) + ) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_body_frame(self) -> None: + """Tests translating a SE3 state along the body frame forward direction.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) + result: EulerStateSE3 = translate_euler_se3_along_body_frame(pose, translation) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_body_frame_multiple_axes(self) -> None: + """Tests translating a SE3 state along the body frame in multiple axes.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + translation: Vector3D = Vector3D.from_array(np.array([0.5, -1.0, 2.0], dtype=np.float64)) + result: EulerStateSE3 = translate_euler_se3_along_body_frame(pose, translation) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.5, 1.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_body_frame_zero_translation(self) -> None: + """Tests translating a SE3 state along the body frame with zero translation.""" + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + translation: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + result: EulerStateSE3 = translate_euler_se3_along_body_frame(pose, translation) + expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array) + + def test_translate_se3_along_body_frame_with_rotation(self) -> None: + """Tests translating a SE3 state along the body frame forward direction with yaw rotation.""" + # Rotate 90 degrees around z-axis, then translate 1 unit along body x-axis + pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)) + translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) + result: EulerStateSE3 = translate_euler_se3_along_body_frame(pose, translation) + # Should move in +Y direction in world frame + expected: EulerStateSE3 = EulerStateSE3.from_array( + np.array([0.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64) + ) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se3_along_body_frame_consistency(self) -> None: + """Tests consistency between translate_se3_along_body_frame and axis-specific translation functions.""" + + for _ in range(self.num_consistency_tests): + # Generate random parameters + x_distance: float = np.random.uniform(-10.0, 10.0) + y_distance: float = np.random.uniform(-10.0, 10.0) + z_distance: float = np.random.uniform(-10.0, 10.0) + + start_x: float = np.random.uniform(-5.0, 5.0) + start_y: float = np.random.uniform(-5.0, 5.0) + start_z: float = np.random.uniform(-5.0, 5.0) + + start_roll: float = np.random.uniform(-np.pi, np.pi) + start_pitch: float = np.random.uniform(-np.pi, np.pi) + start_yaw: float = np.random.uniform(-np.pi, np.pi) + + original_pose: EulerStateSE3 = EulerStateSE3.from_array( + np.array( + [ + start_x, + start_y, + start_z, + start_roll, + start_pitch, + start_yaw, + ], + dtype=np.float64, + ) + ) + + # x-axis translation + translation_x: Vector3D = Vector3D.from_array(np.array([x_distance, 0.0, 0.0], dtype=np.float64)) + result_body_frame_x: EulerStateSE3 = translate_euler_se3_along_body_frame(original_pose, translation_x) + result_axis_x: EulerStateSE3 = translate_euler_se3_along_x(original_pose, x_distance) + np.testing.assert_array_almost_equal(result_body_frame_x.array, result_axis_x.array, decimal=self.decimal) + + # y-axis translation + translation_y: Vector3D = Vector3D.from_array(np.array([0.0, y_distance, 0.0], dtype=np.float64)) + result_body_frame_y: EulerStateSE3 = translate_euler_se3_along_body_frame(original_pose, translation_y) + result_axis_y: EulerStateSE3 = translate_euler_se3_along_y(original_pose, y_distance) + np.testing.assert_array_almost_equal(result_body_frame_y.array, result_axis_y.array, decimal=self.decimal) + + # z-axis translation + translation_z: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, z_distance], dtype=np.float64)) + result_body_frame_z: EulerStateSE3 = translate_euler_se3_along_body_frame(original_pose, translation_z) + result_axis_z: EulerStateSE3 = translate_euler_se3_along_z(original_pose, z_distance) + np.testing.assert_array_almost_equal(result_body_frame_z.array, result_axis_z.array, decimal=self.decimal) + + # all axes translation + translation_all: Vector3D = Vector3D.from_array( + np.array([x_distance, y_distance, z_distance], dtype=np.float64) + ) + result_body_frame_all: EulerStateSE3 = translate_euler_se3_along_body_frame(original_pose, translation_all) + intermediate_pose: EulerStateSE3 = translate_euler_se3_along_x(original_pose, x_distance) + intermediate_pose = translate_euler_se3_along_y(intermediate_pose, y_distance) + result_axis_all: EulerStateSE3 = translate_euler_se3_along_z(intermediate_pose, z_distance) + np.testing.assert_array_almost_equal( + result_body_frame_all.array, result_axis_all.array, decimal=self.decimal + ) + + def test_convert_absolute_to_relative_se3_array(self) -> None: + """Tests converting absolute SE3 poses to relative SE3 poses.""" + reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array( + [ + [2.0, 2.0, 2.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float64, + ) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_euler_se3_array(reference, absolute_poses) + expected: npt.NDArray[np.float64] = np.array( + [ + [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], + [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0], + ], + dtype=np.float64, + ) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_se3_array_single_pose(self) -> None: + """Tests converting a single absolute SE3 pose to a relative SE3 pose.""" + reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_euler_se3_array(reference, absolute_poses) + expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_se3_array_with_rotation(self) -> None: + """Tests converting absolute SE3 poses to relative SE3 poses with 90 degree yaw rotation.""" + reference: EulerStateSE3 = EulerStateSE3.from_array( + np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64) + ) + absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_euler_se3_array(reference, absolute_poses) + expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0, 0.0, 0.0, -np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_relative_to_absolute_se3_array(self) -> None: + """Tests converting relative SE3 poses to absolute SE3 poses.""" + reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + relative_poses: npt.NDArray[np.float64] = np.array( + [ + [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], + [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0], + ], + dtype=np.float64, + ) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_euler_se3_array(reference, relative_poses) + expected: npt.NDArray[np.float64] = np.array( + [ + [2.0, 2.0, 2.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float64, + ) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_relative_to_absolute_se3_array_with_rotation(self) -> None: + """Tests converting relative SE3 poses to absolute SE3 poses with 90 degree yaw rotation.""" + reference: EulerStateSE3 = EulerStateSE3.from_array( + np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64) + ) + relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_euler_se3_array(reference, relative_poses) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_absolute_to_relative_points_3d_array(self) -> None: + """Tests converting absolute 3D points to relative 3D points.""" + reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_points_3d_array_origin_reference(self) -> None: + """Tests converting absolute 3D points to relative 3D points with origin reference.""" + reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_absolute_to_relative_points_3d_array_with_rotation(self) -> None: + """Tests converting absolute 3D points to relative 3D points with 90 degree yaw rotation.""" + reference: EulerStateSE3 = EulerStateSE3.from_array( + np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64) + ) + absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 1.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_relative_to_absolute_points_3d_array(self) -> None: + """Tests converting relative 3D points to absolute 3D points.""" + reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) + expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_relative_to_absolute_points_3d_array_empty(self) -> None: + """Tests converting an empty array of relative 3D points to absolute 3D points.""" + reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64)) + relative_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) + expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3) + np.testing.assert_array_almost_equal(result, expected) + + def test_convert_relative_to_absolute_points_3d_array_with_rotation(self) -> None: + """Tests converting relative 3D points to absolute 3D points with 90 degree yaw rotation.""" + reference: EulerStateSE3 = EulerStateSE3.from_array( + np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64) + ) + relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) diff --git a/d123/geometry/transform/test/test_transform_se2.py b/d123/geometry/transform/test/test_transform_se2.py new file mode 100644 index 00000000..503d87de --- /dev/null +++ b/d123/geometry/transform/test/test_transform_se2.py @@ -0,0 +1,220 @@ +import unittest + +import numpy as np +import numpy.typing as npt + +from d123.geometry.se import StateSE2 +from d123.geometry.transform.transform_se2 import ( + convert_absolute_to_relative_point_2d_array, + convert_absolute_to_relative_se2_array, + convert_relative_to_absolute_point_2d_array, + convert_relative_to_absolute_se2_array, + translate_se2_along_body_frame, + translate_se2_along_x, + translate_se2_along_y, + translate_se2_array_along_body_frame, +) +from d123.geometry.vector import Vector2D + + +class TestTransformSE2(unittest.TestCase): + + def setUp(self): + self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal + + def test_translate_se2_along_x(self) -> None: + """Tests translating a SE2 state along the X-axis.""" + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 1.0 + result: StateSE2 = translate_se2_along_x(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_x_negative(self) -> None: + """Tests translating a SE2 state along the X-axis in the negative direction.""" + pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64)) + distance: float = -0.5 + result: StateSE2 = translate_se2_along_x(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([0.5, 2.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_x_with_rotation(self) -> None: + """Tests translating a SE2 state along the X-axis with 90 degree rotation.""" + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) + distance: float = 1.0 + result: StateSE2 = translate_se2_along_x(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_y(self) -> None: + """Tests translating a SE2 state along the Y-axis.""" + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + distance: float = 1.0 + result: StateSE2 = translate_se2_along_y(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_y_negative(self) -> None: + """Tests translating a SE2 state along the Y-axis in the negative direction.""" + pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64)) + distance: float = -1.5 + result: StateSE2 = translate_se2_along_y(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.5, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_y_with_rotation(self) -> None: + """Tests translating a SE2 state along the Y-axis with -90 degree rotation.""" + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, -np.pi / 2], dtype=np.float64)) + distance: float = 2.0 + result: StateSE2 = translate_se2_along_y(pose, distance) + expected: StateSE2 = StateSE2.from_array(np.array([2.0, 0.0, -np.pi / 2], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_body_frame_forward(self) -> None: + """Tests translating a SE2 state along the body frame forward direction, with 90 degree rotation.""" + # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction) + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) + vector: Vector2D = Vector2D(1.0, 0.0) + result: StateSE2 = translate_se2_along_body_frame(pose, vector) + expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_body_frame_backward(self) -> None: + """Tests translating a SE2 state along the body frame backward direction.""" + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + vector: Vector2D = Vector2D(-1.0, 0.0) + result: StateSE2 = translate_se2_along_body_frame(pose, vector) + expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_body_frame_diagonal(self) -> None: + """Tests translating a SE2 state along the body frame diagonal direction.""" + pose: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.deg2rad(45)], dtype=np.float64)) + vector: Vector2D = Vector2D(1.0, 0.0) + result: StateSE2 = translate_se2_along_body_frame(pose, vector) + expected: StateSE2 = StateSE2.from_array( + np.array([1.0 + np.sqrt(2.0) / 2, 0.0 + np.sqrt(2.0) / 2, np.deg2rad(45)], dtype=np.float64) + ) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_body_frame_lateral(self) -> None: + """Tests translating a SE2 state along the body frame lateral direction.""" + # Move 1 unit to the right (positive y in body frame) + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + vector: Vector2D = Vector2D(0.0, 1.0) + result: StateSE2 = translate_se2_along_body_frame(pose, vector) + expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_along_body_frame_lateral_with_rotation(self) -> None: + """Tests translating a SE2 state along the body frame lateral direction with 90 degree rotation.""" + # Move 1 unit to the right when facing 90 degrees + pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) + vector: Vector2D = Vector2D(0.0, 1.0) + result: StateSE2 = translate_se2_along_body_frame(pose, vector) + expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, np.pi / 2], dtype=np.float64)) + np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal) + + def test_translate_se2_array_along_body_frame_single_distance(self) -> None: + """Tests translating a SE2 state array along the body frame forward direction.""" + poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) + distance: Vector2D = Vector2D(1.0, 0.0) + result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) + expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_translate_se2_array_along_body_frame_multiple_distances(self) -> None: + """Tests translating a SE2 state array along the body frame forward direction with different distances.""" + poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi]], dtype=np.float64) + distance: Vector2D = Vector2D(2.0, 0.0) + result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) + expected: npt.NDArray[np.float64] = np.array([[2.0, 0.0, 0.0], [-2.0, 0.0, np.pi]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_translate_se2_array_along_body_frame_lateral(self) -> None: + """Tests translating a SE2 state array along the body frame lateral direction with 90 degree rotation.""" + poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64) + distance: Vector2D = Vector2D(0.0, 1.0) + result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance) + expected: npt.NDArray[np.float64] = np.array([[0.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_absolute_to_relative_se2_array(self) -> None: + """Tests converting absolute SE2 poses to relative SE2 poses.""" + origin: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(origin, absolute_poses) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_absolute_to_relative_se2_array_with_rotation(self) -> None: + """Tests converting absolute SE2 poses to relative SE2 poses with 90 degree rotation.""" + reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, np.pi / 2]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses) + expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_absolute_to_relative_se2_array_identity(self) -> None: + """Tests converting absolute SE2 poses to relative SE2 poses with identity transformation.""" + reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64)) + absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses) + expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_relative_to_absolute_se2_array(self) -> None: + """Tests converting relative SE2 poses to absolute SE2 poses.""" + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) + relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses) + expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_relative_to_absolute_se2_array_with_rotation(self) -> None: + """Tests converting relative SE2 poses to absolute SE2 poses with rotation.""" + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64)) + relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, np.pi / 2]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_absolute_to_relative_point_2d_array(self) -> None: + """Tests converting absolute 2D points to relative 2D points.""" + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_absolute_to_relative_point_2d_array_with_rotation(self) -> None: + """Tests converting absolute 2D points to relative 2D points with 90 degree rotation.""" + reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([[0.0, 1.0], [1.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, -1.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_absolute_to_relative_point_2d_array_empty(self) -> None: + """Tests converting an empty array of absolute 2D points to relative 2D points.""" + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) + absolute_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2) + result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points) + expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_relative_to_absolute_point_2d_array(self) -> None: + """Tests converting relative 2D points to absolute 2D points.""" + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64)) + relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points) + expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) + + def test_convert_relative_to_absolute_point_2d_array_with_rotation(self) -> None: + """Tests converting relative 2D points to absolute 2D points with 90 degree rotation.""" + reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64)) + relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, 1.0]], dtype=np.float64) + result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points) + expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [0.0, 0.0]], dtype=np.float64) + np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal) diff --git a/d123/geometry/test/test_transform_quaternion.py b/d123/geometry/transform/test/test_transform_se3.py similarity index 51% rename from d123/geometry/test/test_transform_quaternion.py rename to d123/geometry/transform/test/test_transform_se3.py index 12c8433e..d7c077ea 100644 --- a/d123/geometry/test/test_transform_quaternion.py +++ b/d123/geometry/transform/test/test_transform_se3.py @@ -3,40 +3,31 @@ import numpy as np import numpy.typing as npt -from d123.geometry.geometry_index import ( - # EulerAnglesIndex, - # Point2DIndex, - # Point3DIndex, - QuaternionSE3Index, - # StateSE2Index, - StateSE3Index, -) +from d123.geometry.geometry_index import StateSE3Index, EulerStateSE3Index from d123.geometry.point import Point3D -from d123.geometry.rotation import EulerAngles, Quaternion -from d123.geometry.se import StateSE3, QuaternionSE3 -from d123.geometry.transform.transform_quaternion_se3 import ( +from d123.geometry.rotation import Quaternion +from d123.geometry.se import EulerStateSE3, StateSE3 +from d123.geometry.transform.transform_se3 import ( convert_absolute_to_relative_points_3d_array, convert_absolute_to_relative_se3_array, convert_relative_to_absolute_points_3d_array, convert_relative_to_absolute_se3_array, - # translate_se3_along_body_frame, - # translate_se3_along_x, - # translate_se3_along_y, - # translate_se3_along_z, + translate_se3_along_x, + translate_se3_along_y, + translate_se3_along_z, + translate_se3_along_body_frame, ) -import d123.geometry.transform.transform_se3 as euler_transform_se3 +import d123.geometry.transform.transform_euler_se3 as euler_transform_se3 from d123.geometry.utils.rotation_utils import ( get_rotation_matrices_from_euler_array, get_rotation_matrices_from_quaternion_array, ) -# from d123.geometry.vector import Vector2D, Vector3D - -class TestTransformQuaternion(unittest.TestCase): +class TestTransformSE3(unittest.TestCase): def setUp(self): - euler_se3_a = StateSE3( + euler_se3_a = EulerStateSE3( x=1.0, y=2.0, z=3.0, @@ -44,7 +35,7 @@ def setUp(self): pitch=0.0, yaw=0.0, ) - euler_se3_b = StateSE3( + euler_se3_b = EulerStateSE3( x=1.0, y=-2.0, z=3.0, @@ -52,7 +43,7 @@ def setUp(self): pitch=np.deg2rad(90), yaw=0.0, ) - euler_se3_c = StateSE3( + euler_se3_c = EulerStateSE3( x=-1.0, y=2.0, z=-3.0, @@ -61,9 +52,9 @@ def setUp(self): yaw=np.deg2rad(90), ) - quat_se3_a = euler_se3_a.quaternion_se3 - quat_se3_b = euler_se3_b.quaternion_se3 - quat_se3_c = euler_se3_c.quaternion_se3 + quat_se3_a: StateSE3 = euler_se3_a.quaternion_se3 + quat_se3_b: StateSE3 = euler_se3_b.quaternion_se3 + quat_se3_c: StateSE3 = euler_se3_c.quaternion_se3 self.euler_se3 = [euler_se3_a, euler_se3_b, euler_se3_c] self.quat_se3 = [quat_se3_a, quat_se3_b, quat_se3_c] @@ -72,11 +63,13 @@ def setUp(self): def _get_random_euler_se3_array(self, size: int) -> npt.NDArray[np.float64]: """Generate a random SE3 poses""" - random_se3_array = np.zeros((size, len(StateSE3Index)), dtype=np.float64) - random_se3_array[:, StateSE3Index.XYZ] = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, 3)) - random_se3_array[:, StateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size) - random_se3_array[:, StateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size) - random_se3_array[:, StateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size) + random_se3_array = np.zeros((size, len(EulerStateSE3Index)), dtype=np.float64) + random_se3_array[:, EulerStateSE3Index.XYZ] = np.random.uniform( + -self.max_pose_xyz, self.max_pose_xyz, (size, 3) + ) + random_se3_array[:, EulerStateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size) + random_se3_array[:, EulerStateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size) + random_se3_array[:, EulerStateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size) return random_se3_array @@ -84,12 +77,12 @@ def _convert_euler_se3_array_to_quat_se3_array( self, euler_se3_array: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: """Convert an array of SE3 poses from Euler angles to Quaternion representation""" - quat_se3_array = np.zeros((euler_se3_array.shape[0], len(QuaternionSE3Index)), dtype=np.float64) - quat_se3_array[:, QuaternionSE3Index.XYZ] = euler_se3_array[:, StateSE3Index.XYZ] - rotation_matrices = get_rotation_matrices_from_euler_array(euler_se3_array[:, StateSE3Index.EULER_ANGLES]) + quat_se3_array = np.zeros((euler_se3_array.shape[0], len(StateSE3Index)), dtype=np.float64) + quat_se3_array[:, StateSE3Index.XYZ] = euler_se3_array[:, EulerStateSE3Index.XYZ] + rotation_matrices = get_rotation_matrices_from_euler_array(euler_se3_array[:, EulerStateSE3Index.EULER_ANGLES]) for idx, rotation_matrix in enumerate(rotation_matrices): quat = Quaternion.from_rotation_matrix(rotation_matrix) - quat_se3_array[idx, QuaternionSE3Index.QUATERNION] = quat.array + quat_se3_array[idx, StateSE3Index.QUATERNION] = quat.array return quat_se3_array def test_sanity(self): @@ -112,15 +105,15 @@ def test_random_sanity(self): random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array) np.testing.assert_allclose( - random_euler_se3_array[:, StateSE3Index.XYZ], - random_quat_se3_array[:, QuaternionSE3Index.XYZ], + random_euler_se3_array[:, EulerStateSE3Index.XYZ], + random_quat_se3_array[:, StateSE3Index.XYZ], atol=1e-6, ) quat_rotation_matrices = get_rotation_matrices_from_quaternion_array( - random_quat_se3_array[:, QuaternionSE3Index.QUATERNION] + random_quat_se3_array[:, StateSE3Index.QUATERNION] ) euler_rotation_matrices = get_rotation_matrices_from_euler_array( - random_euler_se3_array[:, StateSE3Index.EULER_ANGLES] + random_euler_se3_array[:, EulerStateSE3Index.EULER_ANGLES] ) np.testing.assert_allclose(euler_rotation_matrices, quat_rotation_matrices, atol=1e-6) @@ -141,18 +134,18 @@ def test_convert_absolute_to_relative_se3_array(self): random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array) rel_se3_quat = convert_absolute_to_relative_se3_array(quat_se3, random_quat_se3_array) - rel_se3_euler = euler_transform_se3.convert_absolute_to_relative_se3_array( + rel_se3_euler = euler_transform_se3.convert_absolute_to_relative_euler_se3_array( euler_se3, random_euler_se3_array ) np.testing.assert_allclose( - rel_se3_euler[..., StateSE3Index.XYZ], rel_se3_quat[..., QuaternionSE3Index.XYZ], atol=1e-6 + rel_se3_euler[..., EulerStateSE3Index.XYZ], rel_se3_quat[..., StateSE3Index.XYZ], atol=1e-6 ) # We compare rotation matrices to avoid issues with quaternion sign ambiguity quat_rotation_matrices = get_rotation_matrices_from_quaternion_array( - rel_se3_quat[..., QuaternionSE3Index.QUATERNION] + rel_se3_quat[..., StateSE3Index.QUATERNION] ) euler_rotation_matrices = get_rotation_matrices_from_euler_array( - rel_se3_euler[..., StateSE3Index.EULER_ANGLES] + rel_se3_euler[..., EulerStateSE3Index.EULER_ANGLES] ) np.testing.assert_allclose(quat_rotation_matrices, euler_rotation_matrices, atol=1e-6) @@ -173,21 +166,65 @@ def test_convert_relative_to_absolute_se3_array(self): random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array) abs_se3_quat = convert_relative_to_absolute_se3_array(quat_se3, random_quat_se3_array) - abs_se3_euler = euler_transform_se3.convert_relative_to_absolute_se3_array( + abs_se3_euler = euler_transform_se3.convert_relative_to_absolute_euler_se3_array( euler_se3, random_euler_se3_array ) np.testing.assert_allclose( - abs_se3_euler[..., StateSE3Index.XYZ], abs_se3_quat[..., QuaternionSE3Index.XYZ], atol=1e-6 + abs_se3_euler[..., EulerStateSE3Index.XYZ], abs_se3_quat[..., StateSE3Index.XYZ], atol=1e-6 ) # We compare rotation matrices to avoid issues with quaternion sign ambiguity quat_rotation_matrices = get_rotation_matrices_from_quaternion_array( - abs_se3_quat[..., QuaternionSE3Index.QUATERNION] + abs_se3_quat[..., StateSE3Index.QUATERNION] ) euler_rotation_matrices = get_rotation_matrices_from_euler_array( - abs_se3_euler[..., StateSE3Index.EULER_ANGLES] + abs_se3_euler[..., EulerStateSE3Index.EULER_ANGLES] ) np.testing.assert_allclose(quat_rotation_matrices, euler_rotation_matrices, atol=1e-6) + def test_translate_se3_along_x(self): + for _ in range(10): + distance = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz) + for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): + translated_quat = translate_se3_along_x(quat_se3, distance) + translated_euler = euler_transform_se3.translate_euler_se3_along_x(euler_se3, distance) + np.testing.assert_allclose(translated_quat.point_3d.array, translated_euler.point_3d.array, atol=1e-6) + np.testing.assert_allclose(translated_quat.rotation_matrix, translated_euler.rotation_matrix, atol=1e-6) + np.testing.assert_allclose(quat_se3.quaternion.array, translated_quat.quaternion.array, atol=1e-6) + + def test_translate_se3_along_y(self): + for _ in range(10): + distance = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz) + for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): + translated_quat = translate_se3_along_y(quat_se3, distance) + translated_euler = euler_transform_se3.translate_euler_se3_along_y(euler_se3, distance) + np.testing.assert_allclose(translated_quat.point_3d.array, translated_euler.point_3d.array, atol=1e-6) + np.testing.assert_allclose(translated_quat.rotation_matrix, translated_euler.rotation_matrix, atol=1e-6) + np.testing.assert_allclose(quat_se3.quaternion.array, translated_quat.quaternion.array, atol=1e-6) + + def test_translate_se3_along_z(self): + for _ in range(10): + distance = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz) + for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): + translated_quat = translate_se3_along_z(quat_se3, distance) + translated_euler = euler_transform_se3.translate_euler_se3_along_z(euler_se3, distance) + np.testing.assert_allclose(translated_quat.point_3d.array, translated_euler.point_3d.array, atol=1e-6) + np.testing.assert_allclose(translated_quat.rotation_matrix, translated_euler.rotation_matrix, atol=1e-6) + np.testing.assert_allclose(quat_se3.quaternion.array, translated_quat.quaternion.array, atol=1e-6) + + def test_translate_se3_along_body_frame(self): + for _ in range(10): + vector_3d = Point3D( + x=np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz), + y=np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz), + z=np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz), + ) + for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): + translated_quat = translate_se3_along_body_frame(quat_se3, vector_3d) + translated_euler = euler_transform_se3.translate_euler_se3_along_body_frame(euler_se3, vector_3d) + np.testing.assert_allclose(translated_quat.point_3d.array, translated_euler.point_3d.array, atol=1e-6) + np.testing.assert_allclose(translated_quat.rotation_matrix, translated_euler.rotation_matrix, atol=1e-6) + np.testing.assert_allclose(quat_se3.quaternion.array, translated_quat.quaternion.array, atol=1e-6) + if __name__ == "__main__": unittest.main() diff --git a/d123/geometry/transform/transform_euler_se3.py b/d123/geometry/transform/transform_euler_se3.py new file mode 100644 index 00000000..c2f897d5 --- /dev/null +++ b/d123/geometry/transform/transform_euler_se3.py @@ -0,0 +1,169 @@ +from typing import Union + +import numpy as np +import numpy.typing as npt + +from d123.geometry import EulerStateSE3, EulerStateSE3Index, Vector3D +from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex +from d123.geometry.rotation import EulerAngles +from d123.geometry.utils.rotation_utils import ( + get_rotation_matrices_from_euler_array, + get_rotation_matrix_from_euler_array, + normalize_angle, +) + + +def translate_euler_se3_along_z(state_se3: EulerStateSE3, distance: float) -> EulerStateSE3: + + R = state_se3.rotation_matrix + z_axis = R[:, 2] + + state_se3_array = state_se3.array.copy() + state_se3_array[EulerStateSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ] + return EulerStateSE3.from_array(state_se3_array, copy=False) + + +def translate_euler_se3_along_y(state_se3: EulerStateSE3, distance: float) -> EulerStateSE3: + + R = state_se3.rotation_matrix + y_axis = R[:, 1] + + state_se3_array = state_se3.array.copy() + state_se3_array[EulerStateSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ] + return EulerStateSE3.from_array(state_se3_array, copy=False) + + +def translate_euler_se3_along_x(state_se3: EulerStateSE3, distance: float) -> EulerStateSE3: + + R = state_se3.rotation_matrix + x_axis = R[:, 0] + + state_se3_array = state_se3.array.copy() + state_se3_array[EulerStateSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ] + return EulerStateSE3.from_array(state_se3_array, copy=False) + + +def translate_euler_se3_along_body_frame(state_se3: EulerStateSE3, vector_3d: Vector3D) -> EulerStateSE3: + + R = state_se3.rotation_matrix + world_translation = R @ vector_3d.array + + state_se3_array = state_se3.array.copy() + state_se3_array[EulerStateSE3Index.XYZ] += world_translation[Vector3DIndex.XYZ] + return EulerStateSE3.from_array(state_se3_array, copy=False) + + +def convert_absolute_to_relative_euler_se3_array( + origin: Union[EulerStateSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + + if isinstance(origin, EulerStateSE3): + origin_array = origin.array + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(EulerStateSE3Index) + origin_array = origin + t_origin = origin_array[EulerStateSE3Index.XYZ] + R_origin = get_rotation_matrix_from_euler_array(origin_array[EulerStateSE3Index.EULER_ANGLES]) + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + + assert se3_array.ndim >= 1 + assert se3_array.shape[-1] == len(EulerStateSE3Index) + + # Prepare output array + rel_se3_array = se3_array.copy() + + # Vectorized relative position calculation + abs_positions = se3_array[..., EulerStateSE3Index.XYZ] + rel_positions = (abs_positions - t_origin) @ R_origin + rel_se3_array[..., EulerStateSE3Index.XYZ] = rel_positions + + # Convert absolute rotation matrices to relative rotation matrices + abs_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., EulerStateSE3Index.EULER_ANGLES]) + rel_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin.T, abs_rotation_matrices) + if se3_array.shape[0] != 0: + rel_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in rel_rotation_matrices]) + rel_se3_array[..., EulerStateSE3Index.EULER_ANGLES] = normalize_angle(rel_euler_angles) + + return rel_se3_array + + +def convert_relative_to_absolute_euler_se3_array( + origin: EulerStateSE3, se3_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + + if isinstance(origin, EulerStateSE3): + origin_array = origin.array + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(EulerStateSE3Index) + origin_array = origin + t_origin = origin_array[EulerStateSE3Index.XYZ] + R_origin = get_rotation_matrix_from_euler_array(origin_array[EulerStateSE3Index.EULER_ANGLES]) + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + + assert se3_array.ndim >= 1 + assert se3_array.shape[-1] == len(EulerStateSE3Index) + + # Prepare output array + abs_se3_array = se3_array.copy() + + # Vectorized absolute position calculation: rotate and translate + rel_positions = se3_array[..., EulerStateSE3Index.XYZ] + abs_positions = (rel_positions @ R_origin.T) + t_origin + abs_se3_array[..., EulerStateSE3Index.XYZ] = abs_positions + + # Convert relative rotation matrices to absolute rotation matrices + rel_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., EulerStateSE3Index.EULER_ANGLES]) + abs_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin, rel_rotation_matrices) + + if se3_array.shape[0] != 0: + abs_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in abs_rotation_matrices]) + abs_se3_array[..., EulerStateSE3Index.EULER_ANGLES] = normalize_angle(abs_euler_angles) + + return abs_se3_array + + +def convert_absolute_to_relative_points_3d_array( + origin: Union[EulerStateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + + if isinstance(origin, EulerStateSE3): + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(EulerStateSE3Index) + t_origin = origin[EulerStateSE3Index.XYZ] + R_origin = get_rotation_matrix_from_euler_array(origin[EulerStateSE3Index.EULER_ANGLES]) + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + + assert points_3d_array.ndim >= 1 + assert points_3d_array.shape[-1] == len(Point3DIndex) + + # Translate points to origin frame, then rotate to body frame + relative_points = (points_3d_array - t_origin) @ R_origin + return relative_points + + +def convert_relative_to_absolute_points_3d_array( + origin: Union[EulerStateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + + if isinstance(origin, EulerStateSE3): + origin_array = origin.array + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(EulerStateSE3Index) + origin_array = origin + else: + raise TypeError(f"Expected EulerStateSE3 or np.ndarray, got {type(origin)}") + + assert points_3d_array.shape[-1] == len(Point3DIndex) + + R = EulerAngles.from_array(origin_array[EulerStateSE3Index.EULER_ANGLES]).rotation_matrix + absolute_points = points_3d_array @ R.T + origin.point_3d.array + return absolute_points diff --git a/d123/geometry/transform/transform_quaternion_se3.py b/d123/geometry/transform/transform_quaternion_se3.py deleted file mode 100644 index 32a6858b..00000000 --- a/d123/geometry/transform/transform_quaternion_se3.py +++ /dev/null @@ -1,213 +0,0 @@ -from typing import Union - -import numpy as np -import numpy.typing as npt - -from d123.geometry import Vector3D -from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, Vector3DIndex -from d123.geometry.se import QuaternionSE3 -from d123.geometry.utils.rotation_utils import ( - conjugate_quaternion_array, - get_rotation_matrix_from_quaternion_array, - multiply_quaternion_arrays, -) - - -def convert_absolute_to_relative_points_3d_array( - origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] -) -> npt.NDArray[np.float64]: - """Converts 3D points from the absolute frame to the relative frame. - - :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. - :param points_3d_array: The 3D points in the absolute frame. - :raises TypeError: If the origin is not a StateSE3 or np.ndarray. - :return: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`. - """ - - if isinstance(origin, QuaternionSE3): - t_origin = origin.point_3d.array - R_origin = origin.rotation_matrix - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) - t_origin = origin[QuaternionSE3Index.XYZ] - R_origin = get_rotation_matrix_from_quaternion_array(origin[QuaternionSE3Index.QUATERNION]) - else: - raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") - - assert points_3d_array.ndim >= 1 - assert points_3d_array.shape[-1] == len(Point3DIndex) - - # Translate points to origin frame, then rotate to body frame - relative_points = (points_3d_array - t_origin) @ R_origin - return relative_points - - -def convert_absolute_to_relative_se3_array( - origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64] -) -> npt.NDArray[np.float64]: - """Converts an SE3 array from the absolute frame to the relative frame. - - :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. - :param se3_array: The SE3 array in the absolute frame. - :raises TypeError: If the origin is not a StateSE3 or np.ndarray. - :return: The SE3 array in the relative frame, indexed by :class:`~d123.geometry.StateSE3Index`. - """ - if isinstance(origin, QuaternionSE3): - origin_array = origin.array - t_origin = origin.point_3d.array - R_origin = origin.rotation_matrix - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) - origin_array = origin - t_origin = origin_array[QuaternionSE3Index.XYZ] - R_origin = get_rotation_matrix_from_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION]) - else: - raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") - - assert se3_array.ndim >= 1 - assert se3_array.shape[-1] == len(QuaternionSE3Index) - - abs_positions = se3_array[..., QuaternionSE3Index.XYZ] - abs_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION] - - rel_se3_array = np.zeros_like(se3_array) - - # 1. Vectorized relative position calculation: translate and rotate - rel_positions = (abs_positions - t_origin) @ R_origin - rel_se3_array[..., QuaternionSE3Index.XYZ] = rel_positions - - # 2. Vectorized relative orientation calculation: quaternion multiplication with conjugate - q_origin_conj = conjugate_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION]) - rel_quaternions = multiply_quaternion_arrays(q_origin_conj, abs_quaternions) - - rel_se3_array[..., QuaternionSE3Index.QUATERNION] = rel_quaternions - - return rel_se3_array - - -def convert_relative_to_absolute_points_3d_array( - origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] -) -> npt.NDArray[np.float64]: - """Converts 3D points from the relative frame to the absolute frame. - - :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. - :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`. - :raises TypeError: If the origin is not a StateSE3 or np.ndarray. - :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`. - """ - if isinstance(origin, QuaternionSE3): - t_origin = origin.point_3d.array - R_origin = origin.rotation_matrix - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) - t_origin = origin[QuaternionSE3Index.XYZ] - R_origin = get_rotation_matrix_from_quaternion_array(origin[QuaternionSE3Index.QUATERNION]) - else: - raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") - - assert points_3d_array.shape[-1] == len(Point3DIndex) - - absolute_points = points_3d_array @ R_origin.T + t_origin - return absolute_points - - -def convert_relative_to_absolute_se3_array( - origin: QuaternionSE3, se3_array: npt.NDArray[np.float64] -) -> npt.NDArray[np.float64]: - """Converts an SE3 array from the relative frame to the absolute frame. - - :param origin: The origin state in the relative frame, as a StateSE3 or np.ndarray. - :param se3_array: The SE3 array in the relative frame. - :raises TypeError: If the origin is not a StateSE3 or np.ndarray. - :return: The SE3 array in the absolute frame, indexed by :class:`~d123.geometry.StateSE3Index`. - """ - - if isinstance(origin, QuaternionSE3): - origin_array = origin.array - t_origin = origin.point_3d.array - R_origin = origin.rotation_matrix - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index) - origin_array = origin - t_origin = origin_array[QuaternionSE3Index.XYZ] - R_origin = get_rotation_matrix_from_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION]) - else: - raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") - - assert se3_array.ndim >= 1 - assert se3_array.shape[-1] == len(QuaternionSE3Index) - - # Extract relative positions and orientations - rel_positions = se3_array[..., QuaternionSE3Index.XYZ] - rel_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION] - - # Vectorized absolute position calculation: rotate and translate - abs_positions = (R_origin @ rel_positions.T).T + t_origin - abs_quaternions = multiply_quaternion_arrays(origin_array[QuaternionSE3Index.QUATERNION], rel_quaternions) - - # Prepare output array - abs_se3_array = se3_array.copy() - abs_se3_array[..., QuaternionSE3Index.XYZ] = abs_positions - abs_se3_array[..., QuaternionSE3Index.QUATERNION] = abs_quaternions - - return abs_se3_array - - -def translate_se3_along_z(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: - """Translates a QuaternionSE3 state along the Z-axis. - - :param state_se3: The QuaternionSE3 state to translate. - :param distance: The distance to translate along the Z-axis. - :return: The translated QuaternionSE3 state. - """ - R = state_se3.rotation_matrix - z_axis = R[:, 2] - - state_se3_array = state_se3.array.copy() - state_se3_array[QuaternionSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ] - return QuaternionSE3.from_array(state_se3_array, copy=False) - - -def translate_se3_along_y(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: - """Translates a QuaternionSE3 state along the Y-axis. - - :param state_se3: The QuaternionSE3 state to translate. - :param distance: The distance to translate along the Y-axis. - :return: The translated QuaternionSE3 state. - """ - R = state_se3.rotation_matrix - y_axis = R[:, 1] - - state_se3_array = state_se3.array.copy() - state_se3_array[QuaternionSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ] - return QuaternionSE3.from_array(state_se3_array, copy=False) - - -def translate_se3_along_x(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3: - """Translates a QuaternionSE3 state along the X-axis. - - :param state_se3: The QuaternionSE3 state to translate. - :param distance: The distance to translate along the X-axis. - :return: The translated QuaternionSE3 state. - """ - R = state_se3.rotation_matrix - x_axis = R[:, 0] - - state_se3_array = state_se3.array.copy() - state_se3_array[QuaternionSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ] - return QuaternionSE3.from_array(state_se3_array, copy=False) - - -def translate_se3_along_body_frame(state_se3: QuaternionSE3, vector_3d: Vector3D) -> QuaternionSE3: - """Translates a QuaternionSE3 state along a vector in the body frame. - - :param state_se3: The QuaternionSE3 state to translate. - :param vector_3d: The vector to translate along in the body frame. - :return: The translated QuaternionSE3 state. - """ - R = state_se3.rotation_matrix - world_translation = R @ vector_3d.array - - state_se3_array = state_se3.array.copy() - state_se3_array[QuaternionSE3Index.XYZ] += world_translation - return QuaternionSE3.from_array(state_se3_array, copy=False) diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py index affb70a3..4725e4c2 100644 --- a/d123/geometry/transform/transform_se3.py +++ b/d123/geometry/transform/transform_se3.py @@ -3,78 +3,43 @@ import numpy as np import numpy.typing as npt -from d123.geometry import StateSE3, StateSE3Index, Vector3D -from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex -from d123.geometry.rotation import EulerAngles +from d123.geometry import Vector3D +from d123.geometry.geometry_index import Point3DIndex, StateSE3Index, Vector3DIndex +from d123.geometry.se import StateSE3 from d123.geometry.utils.rotation_utils import ( - get_rotation_matrices_from_euler_array, - get_rotation_matrix_from_euler_array, - normalize_angle, + conjugate_quaternion_array, + get_rotation_matrix_from_quaternion_array, + multiply_quaternion_arrays, ) -def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3: - """Translates a SE3 state along the Z-axis. - - :param state_se3: The SE3 state to translate. - :param distance: The distance to translate along the Z-axis. - :return: The translated SE3 state. - """ - - R = state_se3.rotation_matrix - z_axis = R[:, 2] - - state_se3_array = state_se3.array.copy() - state_se3_array[StateSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ] - return StateSE3.from_array(state_se3_array, copy=False) - - -def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3: - """Translates a SE3 state along the Y-axis. - - :param state_se3: The SE3 state to translate. - :param distance: The distance to translate along the Y-axis. - :return: The translated SE3 state. - """ - - R = state_se3.rotation_matrix - y_axis = R[:, 1] - - state_se3_array = state_se3.array.copy() - state_se3_array[StateSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ] - return StateSE3.from_array(state_se3_array, copy=False) - - -def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3: - """Translates a SE3 state along the X-axis. +def convert_absolute_to_relative_points_3d_array( + origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + """Converts 3D points from the absolute frame to the relative frame. - :param state_se3: The SE3 state to translate. - :param distance: The distance to translate along the X-axis. - :return: The translated SE3 state. + :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param points_3d_array: The 3D points in the absolute frame. + :raises TypeError: If the origin is not a StateSE3 or np.ndarray. + :return: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`. """ - R = state_se3.rotation_matrix - x_axis = R[:, 0] - - state_se3_array = state_se3.array.copy() - state_se3_array[StateSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ] - return StateSE3.from_array(state_se3_array, copy=False) - - -def translate_se3_along_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3: - """Translates a SE3 state along a vector in the body frame. - - :param state_se3: The SE3 state to translate. - :param vector_3d: The vector to translate along in the body frame. - :return: The translated SE3 state. - """ + if isinstance(origin, StateSE3): + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) + t_origin = origin[StateSE3Index.XYZ] + R_origin = get_rotation_matrix_from_quaternion_array(origin[StateSE3Index.QUATERNION]) + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") - R = state_se3.rotation_matrix - world_translation = R @ vector_3d.array + assert points_3d_array.ndim >= 1 + assert points_3d_array.shape[-1] == len(Point3DIndex) - state_se3_array = state_se3.array.copy() - state_se3_array[StateSE3Index.XYZ] += world_translation[Vector3DIndex.XYZ] - return StateSE3.from_array(state_se3_array, copy=False) + # Translate points to origin frame, then rotate to body frame + relative_points = (points_3d_array - t_origin) @ R_origin + return relative_points def convert_absolute_to_relative_se3_array( @@ -95,7 +60,7 @@ def convert_absolute_to_relative_se3_array( assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) origin_array = origin t_origin = origin_array[StateSE3Index.XYZ] - R_origin = get_rotation_matrix_from_euler_array(origin_array[StateSE3Index.EULER_ANGLES]) + R_origin = get_rotation_matrix_from_quaternion_array(origin_array[StateSE3Index.QUATERNION]) else: raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") @@ -103,23 +68,49 @@ def convert_absolute_to_relative_se3_array( assert se3_array.shape[-1] == len(StateSE3Index) abs_positions = se3_array[..., StateSE3Index.XYZ] - abs_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., StateSE3Index.EULER_ANGLES]) + abs_quaternions = se3_array[..., StateSE3Index.QUATERNION] - # Convert absolute rotation matrices to relative rotation matrices - rel_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin.T, abs_rotation_matrices) - rel_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in rel_rotation_matrices]) + rel_se3_array = np.zeros_like(se3_array) - # Vectorized relative position calculation + # 1. Vectorized relative position calculation: translate and rotate rel_positions = (abs_positions - t_origin) @ R_origin - - # Prepare output array - rel_se3_array = se3_array.copy() rel_se3_array[..., StateSE3Index.XYZ] = rel_positions - rel_se3_array[..., StateSE3Index.EULER_ANGLES] = normalize_angle(rel_euler_angles) + + # 2. Vectorized relative orientation calculation: quaternion multiplication with conjugate + q_origin_conj = conjugate_quaternion_array(origin_array[StateSE3Index.QUATERNION]) + rel_quaternions = multiply_quaternion_arrays(q_origin_conj, abs_quaternions) + + rel_se3_array[..., StateSE3Index.QUATERNION] = rel_quaternions return rel_se3_array +def convert_relative_to_absolute_points_3d_array( + origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + """Converts 3D points from the relative frame to the absolute frame. + + :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`. + :raises TypeError: If the origin is not a StateSE3 or np.ndarray. + :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`. + """ + if isinstance(origin, StateSE3): + t_origin = origin.point_3d.array + R_origin = origin.rotation_matrix + elif isinstance(origin, np.ndarray): + assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) + t_origin = origin[StateSE3Index.XYZ] + R_origin = get_rotation_matrix_from_quaternion_array(origin[StateSE3Index.QUATERNION]) + else: + raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") + + assert points_3d_array.shape[-1] == len(Point3DIndex) + + absolute_points = points_3d_array @ R_origin.T + t_origin + return absolute_points + + def convert_relative_to_absolute_se3_array( origin: StateSE3, se3_array: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: @@ -139,81 +130,84 @@ def convert_relative_to_absolute_se3_array( assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) origin_array = origin t_origin = origin_array[StateSE3Index.XYZ] - R_origin = get_rotation_matrix_from_euler_array(origin_array[StateSE3Index.EULER_ANGLES]) + R_origin = get_rotation_matrix_from_quaternion_array(origin_array[StateSE3Index.QUATERNION]) else: - raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") assert se3_array.ndim >= 1 assert se3_array.shape[-1] == len(StateSE3Index) # Extract relative positions and orientations rel_positions = se3_array[..., StateSE3Index.XYZ] - rel_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., StateSE3Index.EULER_ANGLES]) + rel_quaternions = se3_array[..., StateSE3Index.QUATERNION] # Vectorized absolute position calculation: rotate and translate - abs_positions = (rel_positions @ R_origin.T) + t_origin - - # Convert relative rotation matrices to absolute rotation matrices - abs_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin, rel_rotation_matrices) - abs_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in abs_rotation_matrices]) + abs_positions = (R_origin @ rel_positions.T).T + t_origin + abs_quaternions = multiply_quaternion_arrays(origin_array[StateSE3Index.QUATERNION], rel_quaternions) # Prepare output array abs_se3_array = se3_array.copy() abs_se3_array[..., StateSE3Index.XYZ] = abs_positions - abs_se3_array[..., StateSE3Index.EULER_ANGLES] = normalize_angle(abs_euler_angles) + abs_se3_array[..., StateSE3Index.QUATERNION] = abs_quaternions return abs_se3_array -def convert_absolute_to_relative_points_3d_array( - origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] -) -> npt.NDArray[np.float64]: - """Converts 3D points from the absolute frame to the relative frame. +def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3: + """Translates an SE3 state along the Z-axis. - :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. - :param points_3d_array: The 3D points in the absolute frame. - :raises TypeError: If the origin is not a StateSE3 or np.ndarray. - :return: The 3D points in the relative frame , indexed by :class:`~d123.geometry.Point3DIndex`. + :param state_se3: The SE3 state to translate. + :param distance: The distance to translate along the Z-axis. + :return: The translated SE3 state. """ + R = state_se3.rotation_matrix + z_axis = R[:, 2] - if isinstance(origin, StateSE3): - t_origin = origin.point_3d.array - R_origin = origin.rotation_matrix - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) - t_origin = origin[StateSE3Index.XYZ] - R_origin = get_rotation_matrix_from_euler_array(origin[StateSE3Index.EULER_ANGLES]) - else: - raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + state_se3_array = state_se3.array.copy() + state_se3_array[StateSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ] + return StateSE3.from_array(state_se3_array, copy=False) - assert points_3d_array.ndim >= 1 - assert points_3d_array.shape[-1] == len(Point3DIndex) - # Translate points to origin frame, then rotate to body frame - relative_points = (points_3d_array - t_origin) @ R_origin - return relative_points +def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3: + """Translates a SE3 state along the Y-axis. + :param state_se3: The SE3 state to translate. + :param distance: The distance to translate along the Y-axis. + :return: The translated SE3 state. + """ + R = state_se3.rotation_matrix + y_axis = R[:, 1] -def convert_relative_to_absolute_points_3d_array( - origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] -) -> npt.NDArray[np.float64]: - """Converts 3D points from the relative frame to the absolute frame. + state_se3_array = state_se3.array.copy() + state_se3_array[StateSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ] + return StateSE3.from_array(state_se3_array, copy=False) - :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. - :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`. - :raises TypeError: If the origin is not a StateSE3 or np.ndarray. - :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`. + +def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3: + """Translates a SE3 state along the X-axis. + + :param state_se3: The SE3 state to translate. + :param distance: The distance to translate along the X-axis. + :return: The translated SE3 state. """ - if isinstance(origin, StateSE3): - origin_array = origin.array - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) - origin_array = origin - else: - raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + R = state_se3.rotation_matrix + x_axis = R[:, 0] - assert points_3d_array.shape[-1] == len(Point3DIndex) + state_se3_array = state_se3.array.copy() + state_se3_array[StateSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ] + return StateSE3.from_array(state_se3_array, copy=False) - R = EulerAngles.from_array(origin_array[StateSE3Index.EULER_ANGLES]).rotation_matrix - absolute_points = points_3d_array @ R.T + origin.point_3d.array - return absolute_points + +def translate_se3_along_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3: + """Translates a SE3 state along a vector in the body frame. + + :param state_se3: The SE3 state to translate. + :param vector_3d: The vector to translate along in the body frame. + :return: The translated SE3 state. + """ + R = state_se3.rotation_matrix + world_translation = R @ vector_3d.array + + state_se3_array = state_se3.array.copy() + state_se3_array[StateSE3Index.XYZ] += world_translation + return StateSE3.from_array(state_se3_array, copy=False) From 8c4117e52b6d75976702aba1ddf105ae39c18cc2 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 22 Sep 2025 15:24:50 +0200 Subject: [PATCH 039/145] Use quaternions for se3 bounding boxes (#43), add tests and refactor `geometry.utils` (#44) --- d123/geometry/__init__.py | 8 +- d123/geometry/bounding_box.py | 21 +- d123/geometry/geometry_index.py | 29 +- d123/geometry/se.py | 8 + d123/geometry/test/test_bounding_box.py | 26 +- d123/geometry/transform/transform_se2.py | 28 +- d123/geometry/transform/transform_se3.py | 24 +- d123/geometry/utils/bounding_box_utils.py | 185 ++++++------ d123/geometry/utils/rotation_utils.py | 45 ++- d123/geometry/utils/test/__init__.py | 0 .../utils/test/test_bounding_box_utils.py | 269 ++++++++++++++++++ .../utils/test/test_polyline_utils.py | 0 .../utils/test/test_rotation_utils.py | 0 d123/geometry/utils/utils.py | 63 ---- 14 files changed, 503 insertions(+), 203 deletions(-) create mode 100644 d123/geometry/utils/test/__init__.py create mode 100644 d123/geometry/utils/test/test_bounding_box_utils.py create mode 100644 d123/geometry/utils/test/test_polyline_utils.py create mode 100644 d123/geometry/utils/test/test_rotation_utils.py delete mode 100644 d123/geometry/utils/utils.py diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py index ec44efd5..68721954 100644 --- a/d123/geometry/__init__.py +++ b/d123/geometry/__init__.py @@ -1,4 +1,3 @@ -from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3 from d123.geometry.geometry_index import ( BoundingBoxSE2Index, BoundingBoxSE3Index, @@ -11,8 +10,9 @@ Vector2DIndex, Vector3DIndex, ) -from d123.geometry.occupancy_map import OccupancyMap2D from d123.geometry.point import Point2D, Point3D -from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 -from d123.geometry.se import StateSE2, EulerStateSE3 from d123.geometry.vector import Vector2D, Vector3D +from d123.geometry.se import StateSE2, StateSE3, EulerStateSE3 +from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 +from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3 +from d123.geometry.occupancy_map import OccupancyMap2D diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py index 6ace64e7..b164fc40 100644 --- a/d123/geometry/bounding_box.py +++ b/d123/geometry/bounding_box.py @@ -11,7 +11,7 @@ from d123.common.utils.mixin import ArrayMixin from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex from d123.geometry.point import Point2D, Point3D -from d123.geometry.se import StateSE2, EulerStateSE3 +from d123.geometry.se import StateSE2, StateSE3 from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array, bbse3_array_to_corners_array @@ -137,22 +137,22 @@ def corners_dict(self) -> Dict[Corners2DIndex, Point2D]: class BoundingBoxSE3(ArrayMixin): """ - Rotated bounding box in 3D defined by center (StateSE3), length, width and height. + Rotated bounding box in 3D defined by center with quaternion rotation (StateSE3), length, width and height. Example: >>> from d123.geometry import StateSE3 - >>> bbox = BoundingBoxSE3(center=StateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3), length=4.0, width=2.0, height=1.5) + >>> bbox = BoundingBoxSE3(center=StateSE3(1.0, 2.0, 3.0, 1.0, 0.0, 0.0, 0.0), length=4.0, width=2.0, height=1.5) >>> bbox.array - array([1. , 2. , 3. , 0.1, 0.2, 0.3, 4. , 2. , 1.5]) + array([1. , 2. , 3. , 1. , 0. , 0. , 0. , 4. , 2. , 1.5]) >>> bbox.bounding_box_se2.array - array([1. , 2. , 0.3, 4. , 2. ]) + array([1., 2., 0., 4., 2.]) >>> bbox.shapely_polygon.area 8.0 """ _array: npt.NDArray[np.float64] - def __init__(self, center: EulerStateSE3, length: float, width: float, height: float): + def __init__(self, center: StateSE3, length: float, width: float, height: float): """Initialize BoundingBoxSE3 with center (StateSE3), length, width and height. :param center: Center of the bounding box as a StateSE3 instance. @@ -183,15 +183,15 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Boundi return instance @property - def center(self) -> EulerStateSE3: + def center(self) -> StateSE3: """The center of the bounding box as a StateSE3 instance. :return: The center of the bounding box as a StateSE3 instance. """ - return EulerStateSE3.from_array(self._array[BoundingBoxSE3Index.STATE_SE3]) + return StateSE3.from_array(self._array[BoundingBoxSE3Index.STATE_SE3]) @property - def center_se3(self) -> EulerStateSE3: + def center_se3(self) -> StateSE3: """The center of the bounding box as a StateSE3 instance. :return: The center of the bounding box as a StateSE3 instance. @@ -245,9 +245,8 @@ def bounding_box_se2(self) -> BoundingBoxSE2: :return: A BoundingBoxSE2 instance. """ - center_se3 = self.center_se3 return BoundingBoxSE2( - center=StateSE2(center_se3.x, center_se3.y, center_se3.yaw), + center=self.center_se2, length=self.length, width=self.width, ) diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py index aa0924db..1da7c945 100644 --- a/d123/geometry/geometry_index.py +++ b/d123/geometry/geometry_index.py @@ -183,6 +183,10 @@ def XY(cls) -> slice: def SE2(cls) -> slice: return slice(cls.X, cls.YAW + 1) + @classproperty + def EXTENT(cls) -> slice: + return slice(cls.LENGTH, cls.WIDTH + 1) + class Corners2DIndex(IntEnum): """ @@ -197,19 +201,22 @@ class Corners2DIndex(IntEnum): class BoundingBoxSE3Index(IntEnum): """ - Indexes array-like representations of rotated 3D bounding boxes (x,y,z,roll,pitch,yaw,length,width,height). - TODO: Use quaternions for rotation. + Indexes array-like representations of rotated 3D bounding boxes + - center (x,y,z). + - rotation (qw,qx,qy,qz). + - extent (length,width,height). """ X = 0 Y = 1 Z = 2 - ROLL = 3 - PITCH = 4 - YAW = 5 - LENGTH = 6 - WIDTH = 7 - HEIGHT = 8 + QW = 3 + QX = 4 + QY = 5 + QZ = 6 + LENGTH = 7 + WIDTH = 8 + HEIGHT = 9 @classproperty def XYZ(cls) -> slice: @@ -217,11 +224,11 @@ def XYZ(cls) -> slice: @classproperty def STATE_SE3(cls) -> slice: - return slice(cls.X, cls.YAW + 1) + return slice(cls.X, cls.QZ + 1) @classproperty - def EULER_ANGLES(cls) -> slice: - return slice(cls.ROLL, cls.YAW + 1) + def QUATERNION(cls) -> slice: + return slice(cls.QW, cls.QZ + 1) @classproperty def EXTENT(cls) -> slice: diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 148868ea..b63bea7d 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -245,6 +245,14 @@ def quaternion(self) -> Quaternion: """ return Quaternion.from_array(self.array[StateSE3Index.QUATERNION]) + @property + def euler_angles(self) -> EulerAngles: + """Returns the Euler angles (roll, pitch, yaw) representation of the state's orientation. + + :return: An EulerAngles instance representing the Euler angles. + """ + return self.quaternion.euler_angles + @property def rotation_matrix(self) -> npt.NDArray[np.float64]: """Returns the 3x3 rotation matrix representation of the state's orientation. diff --git a/d123/geometry/test/test_bounding_box.py b/d123/geometry/test/test_bounding_box.py index 06522d7a..f34639ad 100644 --- a/d123/geometry/test/test_bounding_box.py +++ b/d123/geometry/test/test_bounding_box.py @@ -12,6 +12,7 @@ Corners3DIndex, Point2DIndex, ) +from d123.geometry.se import StateSE3 class TestBoundingBoxSE2(unittest.TestCase): @@ -109,7 +110,8 @@ class TestBoundingBoxSE3(unittest.TestCase): def setUp(self): """Set up test fixtures.""" - self.center = EulerStateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3) + self.array = np.array([1.0, 2.0, 3.0, 0.98185617, 0.06407135, 0.09115755, 0.1534393, 4.0, 2.0, 1.5]) + self.center = StateSE3(1.0, 2.0, 3.0, 0.98185617, 0.06407135, 0.09115755, 0.1534393) self.length = 4.0 self.width = 2.0 self.height = 1.5 @@ -125,13 +127,13 @@ def test_init(self): def test_from_array(self): """Test BoundingBoxSE3.from_array method.""" - array = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5]) + array = self.array.copy() bbox = BoundingBoxSE3.from_array(array) np.testing.assert_array_equal(bbox.array, array) def test_from_array_copy(self): """Test BoundingBoxSE3.from_array with copy parameter.""" - array = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5]) + array = self.array.copy() bbox_copy = BoundingBoxSE3.from_array(array, copy=True) bbox_no_copy = BoundingBoxSE3.from_array(array, copy=False) @@ -149,14 +151,14 @@ def test_properties(self): def test_array_property(self): """Test array property.""" - expected = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5]) + expected = self.array.copy() np.testing.assert_array_equal(self.bbox.array, expected) def test_array_mixin(self): """Test that BoundingBoxSE3 is an instance of ArrayMixin.""" self.assertIsInstance(self.bbox, ArrayMixin) - expected = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5], dtype=np.float16) + expected = np.array(self.array, dtype=np.float16) output_array = np.array(self.bbox, dtype=np.float16) np.testing.assert_array_equal(output_array, expected) self.assertEqual(output_array.dtype, np.float16) @@ -170,7 +172,7 @@ def test_bounding_box_se2_property(self): self.assertEqual(bbox_2d.width, self.width) self.assertEqual(bbox_2d.center.x, self.center.x) self.assertEqual(bbox_2d.center.y, self.center.y) - self.assertEqual(bbox_2d.center.yaw, self.center.yaw) + self.assertEqual(bbox_2d.center.yaw, self.center.euler_angles.yaw) def test_corners_array(self): """Test corners_array property.""" @@ -196,15 +198,19 @@ def test_array_assertions(self): """Test array assertions in from_array.""" # Test 2D array with self.assertRaises(AssertionError): - BoundingBoxSE3.from_array(np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])) + BoundingBoxSE3.from_array(np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])) - # Test wrong size + # Test wrong size, less than required + with self.assertRaises(AssertionError): + BoundingBoxSE3.from_array(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])) + + # Test wrong size, greater than required with self.assertRaises(AssertionError): - BoundingBoxSE3.from_array(np.array([1, 2, 3, 4, 5, 6, 7, 8])) + BoundingBoxSE3.from_array(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])) def test_zero_dimensions(self): """Test bounding box with zero dimensions.""" - center = EulerStateSE3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + center = StateSE3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) bbox = BoundingBoxSE3(center, 0.0, 0.0, 0.0) self.assertEqual(bbox.length, 0.0) self.assertEqual(bbox.width, 0.0) diff --git a/d123/geometry/transform/transform_se2.py b/d123/geometry/transform/transform_se2.py index faaa3587..c3a5ac6e 100644 --- a/d123/geometry/transform/transform_se2.py +++ b/d123/geometry/transform/transform_se2.py @@ -3,7 +3,7 @@ import numpy as np import numpy.typing as npt -from d123.geometry.geometry_index import Vector2DIndex +from d123.geometry.geometry_index import Point2DIndex, Vector2DIndex from d123.geometry.se import StateSE2, StateSE2Index from d123.geometry.utils.rotation_utils import normalize_angle from d123.geometry.vector import Vector2D @@ -180,3 +180,29 @@ def translate_se2_along_y(state_se2: StateSE2, distance: float) -> StateSE2: """ translation = Vector2D.from_array(np.array([0.0, distance], dtype=np.float64)) return StateSE2.from_array(translate_se2_array_along_body_frame(state_se2.array, translation), copy=False) + + +def translate_2d_along_body_frame( + points_2d: npt.NDArray[np.float64], + yaws: npt.NDArray[np.float64], + x_translate: npt.NDArray[np.float64], + y_translate: npt.NDArray[np.float64], +) -> npt.NDArray[np.float64]: + """Translate 2D points along their body frame. + + :param points_2d: Array of 2D points, indexed by :class:`~d123.geometry.Point2DIndex`. + :param yaws: Array of yaw angles. + :param x_translate: Array of x translation, i.e. forward translation. + :param y_translate: Array of y translation, i.e. left translation. + :return: Array of translated 2D points, indexed by :class:`~d123.geometry.Point2DIndex`. + """ + assert points_2d.shape[-1] == len(Point2DIndex) + half_pi = np.pi / 2.0 + translation: npt.NDArray[np.float64] = np.stack( + [ + (y_translate * np.cos(yaws + half_pi)) + (x_translate * np.cos(yaws)), + (y_translate * np.sin(yaws + half_pi)) + (x_translate * np.sin(yaws)), + ], + axis=-1, + ) + return points_2d + translation diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py index 4725e4c2..e5132fe2 100644 --- a/d123/geometry/transform/transform_se3.py +++ b/d123/geometry/transform/transform_se3.py @@ -4,10 +4,11 @@ import numpy.typing as npt from d123.geometry import Vector3D -from d123.geometry.geometry_index import Point3DIndex, StateSE3Index, Vector3DIndex +from d123.geometry.geometry_index import Point3DIndex, QuaternionIndex, StateSE3Index, Vector3DIndex from d123.geometry.se import StateSE3 from d123.geometry.utils.rotation_utils import ( conjugate_quaternion_array, + get_rotation_matrices_from_quaternion_array, get_rotation_matrix_from_quaternion_array, multiply_quaternion_arrays, ) @@ -211,3 +212,24 @@ def translate_se3_along_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> state_se3_array = state_se3.array.copy() state_se3_array[StateSE3Index.XYZ] += world_translation return StateSE3.from_array(state_se3_array, copy=False) + + +def translate_3d_along_body_frame( + points_3d: npt.NDArray[np.float64], + quaternions: npt.NDArray[np.float64], + translation: npt.NDArray[np.float64], +) -> npt.NDArray[np.float64]: + """Translates 3D points along a vector in the body frame defined by quaternions. + + :param points_3d: Array of 3D points, index by :class:`~d123.geometry.Point3DIndex`. + :param quaternions: Array of quaternions, index by :class:`~d123.geometry.QuaternionIndex`. + :param translation: Array of translation vectors, index by :class:`~d123.geometry.Vector3DIndex`. + :return: The translated 3D points in the world frame, index by :class:`~d123.geometry.Point3DIndex`. + """ + assert points_3d.shape[-1] == len(Point3DIndex) + assert quaternions.shape[-1] == len(QuaternionIndex) + assert translation.shape[-1] == len(Vector3DIndex) + + R = get_rotation_matrices_from_quaternion_array(quaternions) + world_translation = np.einsum("...ij,...j->...i", R, translation) + return points_3d + world_translation diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py index ff8bb237..4d3a4cd0 100644 --- a/d123/geometry/utils/bounding_box_utils.py +++ b/d123/geometry/utils/bounding_box_utils.py @@ -6,136 +6,133 @@ BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, + Corners3DIndex, Point2DIndex, + Vector2DIndex, + Vector3DIndex, ) +from d123.geometry.transform.transform_se2 import translate_2d_along_body_frame +from d123.geometry.transform.transform_se3 import translate_3d_along_body_frame -def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: +def get_corners_2d_factors() -> npt.NDArray[np.float64]: + """Returns the factors to compute the corners of a SE2 bounding box in the body frame. + + The factors are defined such that multiplying them with the length and width + of the bounding box yields the corner coordinates in the body frame. + + :return: A (4, 2), indexed by :class:`~d123.geometry.Corners2DIndex` and + :class:`~d123.geometry.Point2DIndex`, respectively. + """ + # NOTE: ISO 8855 convention for rotation + factors = np.zeros((len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64) + factors.fill(0.5) + factors[Corners2DIndex.FRONT_LEFT] *= [+1, +1] + factors[Corners2DIndex.FRONT_RIGHT] *= [+1, -1] + factors[Corners2DIndex.BACK_RIGHT] *= [-1, -1] + factors[Corners2DIndex.BACK_LEFT] *= [-1, +1] + return factors + + +def get_corners_3d_factors() -> npt.NDArray[np.float64]: + """Returns the factors to compute the corners of a SE3 bounding box in the body frame. + + The factors are defined such that multiplying them with the length, width, and height + of the bounding box yields the corner coordinates in the body frame. + + :return: A (8, 3), indexed by :class:`~d123.geometry.Corners3DIndex` and + :class:`~d123.geometry.Vector3DIndex`, respectively. """ - Converts an array of BoundingBoxSE2 objects to a coordinates array. - :param bbse2: Array of BoundingBoxSE2 objects. - :return: Coordinates array of shape (n, 5, 2) where n is the number of bounding boxes. + # NOTE: ISO 8855 convention for rotation + factors = np.zeros((len(Corners3DIndex), len(Vector3DIndex)), dtype=np.float64) + factors.fill(0.5) + factors[Corners3DIndex.FRONT_LEFT_BOTTOM] *= [+1, +1, -1] + factors[Corners3DIndex.FRONT_RIGHT_BOTTOM] *= [+1, -1, -1] + factors[Corners3DIndex.BACK_RIGHT_BOTTOM] *= [-1, -1, -1] + factors[Corners3DIndex.BACK_LEFT_BOTTOM] *= [-1, +1, -1] + factors[Corners3DIndex.FRONT_LEFT_TOP] *= [+1, +1, +1] + factors[Corners3DIndex.FRONT_RIGHT_TOP] *= [+1, -1, +1] + factors[Corners3DIndex.BACK_RIGHT_TOP] *= [-1, -1, +1] + factors[Corners3DIndex.BACK_LEFT_TOP] *= [-1, +1, +1] + return factors + + +def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Converts an array of BoundingBoxSE2 objects to the 2D coordinates array of their corners. + + :param bbse2: Array of SE2 bounding boxes, indexed by :class:`~d123.geometry.BoundingBoxSE2Index`. + :return: Coordinates array of shape (..., 4, 2), indexed by + :class:`~d123.geometry.Corners2DIndex` and :class:`~d123.geometry.Point2DIndex`, respectively. """ assert bbse2.shape[-1] == len(BoundingBoxSE2Index) ndim_one: bool = bbse2.ndim == 1 if ndim_one: - bbse2 = bbse2[None, :] - - corners_array = np.zeros((*bbse2.shape[:-1], len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64) + bbse2 = bbse2[None, ...] centers = bbse2[..., BoundingBoxSE2Index.XY] yaws = bbse2[..., BoundingBoxSE2Index.YAW] - half_length = bbse2[..., BoundingBoxSE2Index.LENGTH] / 2.0 - half_width = bbse2[..., BoundingBoxSE2Index.WIDTH] / 2.0 - - corners_array[..., Corners2DIndex.FRONT_LEFT, :] = translate_along_yaw_array( - centers, - yaws, - half_length, - half_width, - ) - corners_array[..., Corners2DIndex.FRONT_RIGHT, :] = translate_along_yaw_array( - centers, - yaws, - half_length, - -half_width, - ) - corners_array[..., Corners2DIndex.BACK_RIGHT, :] = translate_along_yaw_array( - centers, - yaws, - -half_length, - -half_width, - ) - corners_array[..., Corners2DIndex.BACK_LEFT, :] = translate_along_yaw_array( - centers, - yaws, - -half_length, - half_width, - ) + extents = bbse2[..., BoundingBoxSE2Index.EXTENT] # (..., 2) + + factors = get_corners_2d_factors() # (4, 2) + corner_translation_body = extents[..., None, :] * factors[None, :, :] # (..., 4, 2) + + corners_array = translate_2d_along_body_frame( # (..., 4, 2) + points_2d=centers[..., None, :], # (..., 1, 2) + yaws=yaws[..., None], # (..., 1) + x_translate=corner_translation_body[..., Vector2DIndex.X], + y_translate=corner_translation_body[..., Vector2DIndex.Y], + ) # (..., 4, 2) return corners_array.squeeze(axis=0) if ndim_one else corners_array def corners_2d_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]: + """Converts an array of 2D corners to an array of shapely Polygons. + TODO: Consider removing this function? + + :param corners_array: Array of shape (..., 4, 2) where 4 is the number of corners. + :return: Array of shapely Polygons. + """ polygons = shapely.creation.polygons(corners_array) return polygons -def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - return corners_2d_array_to_polygon_array(bbse2_array_to_corners_array(bbse2)) - +def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]: + """Converts an array of BoundingBoxSE2 objects to an array of shapely Polygons. -def translate_along_yaw_array( - points_2d: npt.NDArray[np.float64], - headings: npt.NDArray[np.float64], - lon: npt.NDArray[np.float64], - lat: npt.NDArray[np.float64], -) -> npt.NDArray[np.float64]: - # TODO: move somewhere else - assert points_2d.shape[-1] == len(Point2DIndex) - half_pi = np.pi / 2.0 - translation: npt.NDArray[np.float64] = np.stack( - [ - (lat * np.cos(headings + half_pi)) + (lon * np.cos(headings)), - (lat * np.sin(headings + half_pi)) + (lon * np.sin(headings)), - ], - axis=-1, - ) - return points_2d + translation + :param bbse2: Array of SE2 bounding boxes, indexed by :class:`~d123.geometry.BoundingBoxSE2Index`. + :return: Array of shapely Polygons. + """ + return corners_2d_array_to_polygon_array(bbse2_array_to_corners_array(bbse2)) def bbse3_array_to_corners_array(bbse3_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - """Converts an array of BoundingBoxSE3 objects to a coordinates array. - TODO: Fix this function + """Converts an array of BoundingBoxSE3 objects to the 3D coordinates array of their corners. - :param bbse3_array: Array of BoundingBoxSE3 objects, shape (..., 7) [x, y, z, yaw, pitch, roll, length, width, height]. - :return: Coordinates array of shape (..., 8, 3) where 8 is the number of corners. + :param bbse3_array: Array of SE3 bounding boxes, indexed by :class:`~d123.geometry.BoundingBoxSE3Index`. + :return: Coordinates array of shape (..., 8, 3), indexed by + :class:`~d123.geometry.Corners3DIndex` and :class:`~d123.geometry.Point3DIndex`, respectively. """ assert bbse3_array.shape[-1] == len(BoundingBoxSE3Index) + # Flag whether to unsqueeze and squeeze the input dim ndim_one: bool = bbse3_array.ndim == 1 if ndim_one: - bbse3_array = bbse3_array[None, :] + bbse3_array = bbse3_array[None, ...] # Extract parameters centers = bbse3_array[..., BoundingBoxSE3Index.XYZ] # (..., 3) - yaws = bbse3_array[..., BoundingBoxSE3Index.YAW] # (...,) - pitches = bbse3_array[..., BoundingBoxSE3Index.PITCH] # (...,) - rolls = bbse3_array[..., BoundingBoxSE3Index.ROLL] # (...,) - - # Corner factors: (x, y, z) in box frame - factors = np.array( - [ - [+0.5, -0.5, -0.5], # FRONT_LEFT_BOTTOM - [+0.5, +0.5, -0.5], # FRONT_RIGHT_BOTTOM - [-0.5, +0.5, -0.5], # BACK_RIGHT_BOTTOM - [-0.5, -0.5, -0.5], # BACK_LEFT_BOTTOM - [+0.5, -0.5, +0.5], # FRONT_LEFT_TOP - [+0.5, +0.5, +0.5], # FRONT_RIGHT_TOP - [-0.5, +0.5, +0.5], # BACK_RIGHT_TOP - [-0.5, -0.5, +0.5], # BACK_LEFT_TOP - ], - dtype=np.float64, - ) # (8, 3) + quaternions = bbse3_array[..., BoundingBoxSE3Index.QUATERNION] # (..., 4) # Box extents - extents = bbse3_array[..., BoundingBoxSE3Index.EXTENT] # (...,) - corners_local = factors[None, :, :] * extents # (..., 8, 3) - - # Rotation matrices (yaw, pitch, roll) - def rotation_matrix(yaw, pitch, roll): - cy, sy = np.cos(yaw), np.sin(yaw) - cp, sp = np.cos(pitch), np.sin(pitch) - cr, sr = np.cos(roll), np.sin(roll) - Rz = np.array([[cy, -sy, 0], [sy, cy, 0], [0, 0, 1]]) - Ry = np.array([[cp, 0, sp], [0, 1, 0], [-sp, 0, cp]]) - Rx = np.array([[1, 0, 0], [0, cr, -sr], [0, sr, cr]]) - return Rz @ Ry @ Rx - - corners_world = np.empty((*bbse3_array.shape[:-1], 8, 3), dtype=np.float64) - for idx in np.ndindex(bbse3_array.shape[:-1]): - R = rotation_matrix(yaws[idx], pitches[idx], rolls[idx]) - corners_world[idx] = centers[idx] + (corners_local[idx] @ R.T) + factors = get_corners_3d_factors() # (8, 3) + extents = bbse3_array[..., BoundingBoxSE3Index.EXTENT] # (..., 3) + corner_translation_body = extents[..., None, :] * factors[None, :, :] # (..., 8, 3) + corners_world = translate_3d_along_body_frame( + centers[..., None, :], # (..., 1, 3) + quaternions[..., None, :], # (..., 1, 4) + corner_translation_body, + ) return corners_world.squeeze(axis=0) if ndim_one else corners_world diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py index 871ec29c..ead501b1 100644 --- a/d123/geometry/utils/rotation_utils.py +++ b/d123/geometry/utils/rotation_utils.py @@ -39,12 +39,21 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl Convention: Intrinsic rotations in order Z-Y-X (yaw, pitch, roll) Equivalent to: R = R_x(roll) @ R_y(pitch) @ R_z(yaw) """ - assert euler_angles_array.ndim == 2 and euler_angles_array.shape[1] == len(EulerAnglesIndex) + assert euler_angles_array.ndim >= 1 and euler_angles_array.shape[-1] == len(EulerAnglesIndex) + + # Store original shape for reshaping later + original_shape = euler_angles_array.shape[:-1] + + # Flatten to 2D if needed + if euler_angles_array.ndim > 2: + euler_angles_array_ = euler_angles_array.reshape(-1, len(EulerAnglesIndex)) + else: + euler_angles_array_ = euler_angles_array # Extract roll, pitch, yaw for all samples at once - roll = euler_angles_array[:, EulerAnglesIndex.ROLL] - pitch = euler_angles_array[:, EulerAnglesIndex.PITCH] - yaw = euler_angles_array[:, EulerAnglesIndex.YAW] + roll = euler_angles_array_[:, EulerAnglesIndex.ROLL] + pitch = euler_angles_array_[:, EulerAnglesIndex.PITCH] + yaw = euler_angles_array_[:, EulerAnglesIndex.YAW] # Compute sin/cos for all angles at once cos_roll, sin_roll = np.cos(roll), np.sin(roll) @@ -52,7 +61,7 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl cos_yaw, sin_yaw = np.cos(yaw), np.sin(yaw) # Build rotation matrices for entire batch - batch_size = euler_angles_array.shape[0] + batch_size = euler_angles_array_.shape[0] rotation_matrices = np.zeros((batch_size, 3, 3), dtype=np.float64) # ZYX Tait-Bryan rotation matrix elements @@ -68,6 +77,10 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl rotation_matrices[:, 2, 1] = cos_roll * sin_pitch * sin_yaw + sin_roll * cos_yaw rotation_matrices[:, 2, 2] = cos_roll * cos_pitch + # Reshape back to original batch dimensions + (3, 3) + if len(original_shape) > 1: + rotation_matrices = rotation_matrices.reshape(original_shape + (3, 3)) + return rotation_matrices @@ -140,12 +153,28 @@ def get_rotation_matrix_from_euler_array(euler_angles: npt.NDArray[np.float64]) def get_rotation_matrices_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - assert quaternion_array.ndim == 2 and quaternion_array.shape[-1] == len(QuaternionIndex) - norm_quaternion = normalize_quaternion_array(quaternion_array) + assert quaternion_array.ndim >= 1 and quaternion_array.shape[-1] == len(QuaternionIndex) + + # Store original shape for reshaping later + original_shape = quaternion_array.shape[:-1] + + # Flatten to 2D if needed + if quaternion_array.ndim > 2: + quaternion_array_ = quaternion_array.reshape(-1, len(QuaternionIndex)) + else: + quaternion_array_ = quaternion_array + + norm_quaternion = normalize_quaternion_array(quaternion_array_) Q_matrices = get_q_matrices(norm_quaternion) Q_bar_matrices = get_q_bar_matrices(norm_quaternion) rotation_matrix = batch_matmul(Q_matrices, Q_bar_matrices.conj().swapaxes(-1, -2)) - return rotation_matrix[:, 1:][:, :, 1:] + rotation_matrix = rotation_matrix[:, 1:][:, :, 1:] + + # Reshape back to original batch dimensions + (3, 3) + if len(original_shape) > 1: + rotation_matrix = rotation_matrix.reshape(original_shape + (3, 3)) + + return rotation_matrix def get_rotation_matrix_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: diff --git a/d123/geometry/utils/test/__init__.py b/d123/geometry/utils/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/geometry/utils/test/test_bounding_box_utils.py b/d123/geometry/utils/test/test_bounding_box_utils.py new file mode 100644 index 00000000..1099bc6f --- /dev/null +++ b/d123/geometry/utils/test/test_bounding_box_utils.py @@ -0,0 +1,269 @@ +import unittest + +import numpy as np +import numpy.typing as npt +import shapely + +from d123.geometry.geometry_index import ( + BoundingBoxSE3Index, + Corners2DIndex, + Corners3DIndex, + Point2DIndex, + Point3DIndex, + StateSE3Index, + EulerStateSE3Index, +) +from d123.geometry.point import Point3D +from d123.geometry.rotation import Quaternion +from d123.geometry.se import EulerStateSE3, StateSE3 +from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame +from d123.geometry.utils.bounding_box_utils import ( + bbse2_array_to_corners_array, + corners_2d_array_to_polygon_array, + bbse2_array_to_polygon_array, + bbse3_array_to_corners_array, + get_corners_3d_factors, +) +from d123.geometry.vector import Vector3D + + +class TestBoundingBoxUtils(unittest.TestCase): + + def setUp(self): + self._num_consistency_checks = 10 + self._max_pose_xyz = 100.0 + self._max_extent = 200.0 + + def _get_random_euler_se3_array(self, size: int) -> npt.NDArray[np.float64]: + """Generate random SE3 poses""" + random_se3_array = np.zeros((size, len(EulerStateSE3Index)), dtype=np.float64) + random_se3_array[:, EulerStateSE3Index.XYZ] = np.random.uniform( + -self._max_pose_xyz, + self._max_pose_xyz, + (size, len(Point3DIndex)), + ) + random_se3_array[:, EulerStateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size) + random_se3_array[:, EulerStateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size) + random_se3_array[:, EulerStateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size) + + return random_se3_array + + def test_bbse2_array_to_corners_array_one_dim(self): + bounding_box_se2_array = np.array([1.0, 2.0, 0.0, 4.0, 2.0]) + corners_array = bbse2_array_to_corners_array(bounding_box_se2_array) + + # fill expected + expected_corners = np.zeros((4, 2), dtype=np.float64) + expected_corners[Corners2DIndex.FRONT_LEFT] = [1.0 + 2.0, 2.0 + 1.0] + expected_corners[Corners2DIndex.FRONT_RIGHT] = [1.0 + 2.0, 2.0 - 1.0] + expected_corners[Corners2DIndex.BACK_RIGHT] = [1.0 - 2.0, 2.0 - 1.0] + expected_corners[Corners2DIndex.BACK_LEFT] = [1.0 - 2.0, 2.0 + 1.0] + + np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6) + + def test_bbse2_array_to_corners_array_n_dim(self): + bounding_box_se2_array = np.array([1.0, 2.0, 0.0, 4.0, 2.0]) + bounding_box_se2_array = np.tile(bounding_box_se2_array, (3, 1)) + + corners_array = bbse2_array_to_corners_array(bounding_box_se2_array) + + # fill expected + expected_corners = np.zeros((4, 2), dtype=np.float64) + expected_corners[Corners2DIndex.FRONT_LEFT] = [1.0 + 2.0, 2.0 + 1.0] + expected_corners[Corners2DIndex.FRONT_RIGHT] = [1.0 + 2.0, 2.0 - 1.0] + expected_corners[Corners2DIndex.BACK_RIGHT] = [1.0 - 2.0, 2.0 - 1.0] + expected_corners[Corners2DIndex.BACK_LEFT] = [1.0 - 2.0, 2.0 + 1.0] + expected_corners = np.tile(expected_corners, (3, 1, 1)) + + np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6) + + def test_bbse2_array_to_corners_array_zero_dim(self): + bounding_box_se2_array = np.zeros((0, 5), dtype=np.float64) + corners_array = bbse2_array_to_corners_array(bounding_box_se2_array) + expected_corners = np.zeros((0, 4, 2), dtype=np.float64) + np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6) + + def test_bbse2_array_to_corners_array_rotation(self): + bounding_box_se2_array = np.array([1.0, 2.0, np.pi / 2, 4.0, 2.0]) + corners_array = bbse2_array_to_corners_array(bounding_box_se2_array) + + # fill expected + expected_corners = np.zeros((len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64) + expected_corners[Corners2DIndex.FRONT_LEFT] = [1.0 - 1.0, 2.0 + 2.0] + expected_corners[Corners2DIndex.FRONT_RIGHT] = [1.0 + 1.0, 2.0 + 2.0] + expected_corners[Corners2DIndex.BACK_RIGHT] = [1.0 + 1.0, 2.0 - 2.0] + expected_corners[Corners2DIndex.BACK_LEFT] = [1.0 - 1.0, 2.0 - 2.0] + + np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6) + + def test_corners_2d_array_to_polygon_array_one_dim(self): + corners_array = np.array( + [ + [3.0, 3.0], + [3.0, 1.0], + [-1.0, 1.0], + [-1.0, 3.0], + ] + ) + polygon = corners_2d_array_to_polygon_array(corners_array) + + expected_polygon = shapely.geometry.Polygon(corners_array) + np.testing.assert_allclose(polygon.area, expected_polygon.area, atol=1e-6) + self.assertTrue(polygon.equals(expected_polygon)) + + def test_corners_2d_array_to_polygon_array_n_dim(self): + corners_array = np.array( + [ + [ + [3.0, 3.0], + [3.0, 1.0], + [-1.0, 1.0], + [-1.0, 3.0], + ], + [ + [4.0, 4.0], + [4.0, 2.0], + [0.0, 2.0], + [0.0, 4.0], + ], + ] + ) + polygons = corners_2d_array_to_polygon_array(corners_array) + + expected_polygon_1 = shapely.geometry.Polygon(corners_array[0]) + expected_polygon_2 = shapely.geometry.Polygon(corners_array[1]) + + np.testing.assert_allclose(polygons[0].area, expected_polygon_1.area, atol=1e-6) + self.assertTrue(polygons[0].equals(expected_polygon_1)) + + np.testing.assert_allclose(polygons[1].area, expected_polygon_2.area, atol=1e-6) + self.assertTrue(polygons[1].equals(expected_polygon_2)) + + def test_corners_2d_array_to_polygon_array_zero_dim(self): + corners_array = np.zeros((0, 4, 2), dtype=np.float64) + polygons = corners_2d_array_to_polygon_array(corners_array) + expected_polygons = np.zeros((0,), dtype=np.object_) + np.testing.assert_array_equal(polygons, expected_polygons) + + def test_bbse2_array_to_polygon_array_one_dim(self): + bounding_box_se2_array = np.array([1.0, 2.0, 0.0, 4.0, 2.0]) + polygon = bbse2_array_to_polygon_array(bounding_box_se2_array) + + expected_corners = np.zeros((4, 2), dtype=np.float64) + expected_corners[Corners2DIndex.FRONT_LEFT] = [1.0 + 2.0, 2.0 + 1.0] + expected_corners[Corners2DIndex.FRONT_RIGHT] = [1.0 + 2.0, 2.0 - 1.0] + expected_corners[Corners2DIndex.BACK_RIGHT] = [1.0 - 2.0, 2.0 - 1.0] + expected_corners[Corners2DIndex.BACK_LEFT] = [1.0 - 2.0, 2.0 + 1.0] + expected_polygon = shapely.geometry.Polygon(expected_corners) + + np.testing.assert_allclose(polygon.area, expected_polygon.area, atol=1e-6) + self.assertTrue(polygon.equals(expected_polygon)) + + def test_bbse2_array_to_polygon_array_n_dim(self): + bounding_box_se2_array = np.array([1.0, 2.0, 0.0, 4.0, 2.0]) + bounding_box_se2_array = np.tile(bounding_box_se2_array, (3, 1)) + + polygons = bbse2_array_to_polygon_array(bounding_box_se2_array) + + expected_corners = np.zeros((4, 2), dtype=np.float64) + expected_corners[Corners2DIndex.FRONT_LEFT] = [1.0 + 2.0, 2.0 + 1.0] + expected_corners[Corners2DIndex.FRONT_RIGHT] = [1.0 + 2.0, 2.0 - 1.0] + expected_corners[Corners2DIndex.BACK_RIGHT] = [1.0 - 2.0, 2.0 - 1.0] + expected_corners[Corners2DIndex.BACK_LEFT] = [1.0 - 2.0, 2.0 + 1.0] + expected_polygon = shapely.geometry.Polygon(expected_corners) + + for polygon in polygons: + np.testing.assert_allclose(polygon.area, expected_polygon.area, atol=1e-6) + self.assertTrue(polygon.equals(expected_polygon)) + + def test_bbse2_array_to_polygon_array_zero_dim(self): + bounding_box_se2_array = np.zeros((0, 5), dtype=np.float64) + polygons = bbse2_array_to_polygon_array(bounding_box_se2_array) + expected_polygons = np.zeros((0,), dtype=np.object_) + np.testing.assert_array_equal(polygons, expected_polygons) + + def test_bbse3_array_to_corners_array_one_dim(self): + bounding_box_se3_array = np.array([1.0, 2.0, 3.0, 1.0, 0.0, 0.0, 0.0, 4.0, 2.0, 6.0]) + corners_array = bbse3_array_to_corners_array(bounding_box_se3_array) + + # fill expected + expected_corners = np.zeros((8, 3), dtype=np.float64) + expected_corners[Corners3DIndex.FRONT_LEFT_BOTTOM] = [1.0 + 2.0, 2.0 + 1.0, 3.0 - 3.0] + expected_corners[Corners3DIndex.FRONT_RIGHT_BOTTOM] = [1.0 + 2.0, 2.0 - 1.0, 3.0 - 3.0] + expected_corners[Corners3DIndex.BACK_RIGHT_BOTTOM] = [1.0 - 2.0, 2.0 - 1.0, 3.0 - 3.0] + expected_corners[Corners3DIndex.BACK_LEFT_BOTTOM] = [1.0 - 2.0, 2.0 + 1.0, 3.0 - 3.0] + expected_corners[Corners3DIndex.FRONT_LEFT_TOP] = [1.0 + 2.0, 2.0 + 1.0, 3.0 + 3.0] + expected_corners[Corners3DIndex.FRONT_RIGHT_TOP] = [1.0 + 2.0, 2.0 - 1.0, 3.0 + 3.0] + expected_corners[Corners3DIndex.BACK_RIGHT_TOP] = [1.0 - 2.0, 2.0 - 1.0, 3.0 + 3.0] + expected_corners[Corners3DIndex.BACK_LEFT_TOP] = [1.0 - 2.0, 2.0 + 1.0, 3.0 + 3.0] + + np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6) + + def test_bbse3_array_to_corners_array_one_dim_rotation(self): + for _ in range(self._num_consistency_checks): + se3_state = EulerStateSE3.from_array(self._get_random_euler_se3_array(1)[0]).quaternion_se3 + se3_array = se3_state.array + + # construct a bounding box + bounding_box_se3_array = np.zeros((len(BoundingBoxSE3Index),), dtype=np.float64) + length, width, height = np.random.uniform(0.0, self._max_extent, size=3) + + bounding_box_se3_array[BoundingBoxSE3Index.STATE_SE3] = se3_array + bounding_box_se3_array[BoundingBoxSE3Index.LENGTH] = length + bounding_box_se3_array[BoundingBoxSE3Index.WIDTH] = width + bounding_box_se3_array[BoundingBoxSE3Index.HEIGHT] = height + + corners_array = bbse3_array_to_corners_array(bounding_box_se3_array) + + corners_3d_factors = get_corners_3d_factors() + for corner_idx in Corners3DIndex: + body_translate_vector = Vector3D.from_array( + corners_3d_factors[corner_idx] * bounding_box_se3_array[BoundingBoxSE3Index.EXTENT] + ) + np.testing.assert_allclose( + corners_array[corner_idx], + translate_se3_along_body_frame(se3_state, body_translate_vector).point_3d.array, + atol=1e-6, + ) + + def test_bbse3_array_to_corners_array_n_dim(self): + for _ in range(self._num_consistency_checks): + N = np.random.randint(1, 20) + se3_array = self._get_random_euler_se3_array(N) + se3_state_array = np.array([EulerStateSE3.from_array(arr).quaternion_se3.array for arr in se3_array]) + + # construct a bounding box + bounding_box_se3_array = np.zeros((N, len(BoundingBoxSE3Index)), dtype=np.float64) + lengths, widths, heights = np.random.uniform(0.0, self._max_extent, size=(3, N)) + + bounding_box_se3_array[:, BoundingBoxSE3Index.STATE_SE3] = se3_state_array + bounding_box_se3_array[:, BoundingBoxSE3Index.LENGTH] = lengths + bounding_box_se3_array[:, BoundingBoxSE3Index.WIDTH] = widths + bounding_box_se3_array[:, BoundingBoxSE3Index.HEIGHT] = heights + + corners_array = bbse3_array_to_corners_array(bounding_box_se3_array) + + corners_3d_factors = get_corners_3d_factors() + for obj_idx in range(N): + for corner_idx in Corners3DIndex: + body_translate_vector = Vector3D.from_array( + corners_3d_factors[corner_idx] * bounding_box_se3_array[obj_idx, BoundingBoxSE3Index.EXTENT] + ) + np.testing.assert_allclose( + corners_array[obj_idx, corner_idx], + translate_se3_along_body_frame( + StateSE3.from_array(bounding_box_se3_array[obj_idx, BoundingBoxSE3Index.STATE_SE3]), + body_translate_vector, + ).point_3d.array, + atol=1e-6, + ) + + def test_bbse3_array_to_corners_array_zero_dim(self): + bounding_box_se3_array = np.zeros((0, len(BoundingBoxSE3Index)), dtype=np.float64) + corners_array = bbse3_array_to_corners_array(bounding_box_se3_array) + expected_corners = np.zeros((0, 8, 3), dtype=np.float64) + np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6) + + +if __name__ == "__main__": + unittest.main() diff --git a/d123/geometry/utils/test/test_polyline_utils.py b/d123/geometry/utils/test/test_polyline_utils.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/geometry/utils/test/test_rotation_utils.py b/d123/geometry/utils/test/test_rotation_utils.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/geometry/utils/utils.py b/d123/geometry/utils/utils.py deleted file mode 100644 index b1ce46b2..00000000 --- a/d123/geometry/utils/utils.py +++ /dev/null @@ -1,63 +0,0 @@ -import numpy as np -import numpy.typing as npt -import shapely - -from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, Point2DIndex - - -def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - """ - Converts an array of BoundingBoxSE2 objects to a coordinates array. - :param bbse2: Array of BoundingBoxSE2 objects. - :return: Coordinates array of shape (n, 5, 2) where n is the number of bounding boxes. - """ - assert bbse2.shape[-1] == len(BoundingBoxSE2Index) - - ndim_one: bool = bbse2.ndim == 1 - if ndim_one: - bbse2 = bbse2[None, :] - - corners_array = np.zeros((*bbse2.shape[:-1], len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64) - - centers = bbse2[..., BoundingBoxSE2Index.XY] - yaws = bbse2[..., BoundingBoxSE2Index.YAW] - half_length = bbse2[..., BoundingBoxSE2Index.LENGTH] / 2.0 - half_width = bbse2[..., BoundingBoxSE2Index.WIDTH] / 2.0 - - corners_array[..., Corners2DIndex.FRONT_LEFT, :] = translate_along_yaw_array(centers, yaws, half_length, half_width) - corners_array[..., Corners2DIndex.FRONT_RIGHT, :] = translate_along_yaw_array( - centers, yaws, half_length, -half_width - ) - corners_array[..., Corners2DIndex.BACK_RIGHT, :] = translate_along_yaw_array( - centers, yaws, -half_length, -half_width - ) - corners_array[..., Corners2DIndex.BACK_LEFT, :] = translate_along_yaw_array(centers, yaws, -half_length, half_width) - - return corners_array.squeeze(axis=0) if ndim_one else corners_array - - -def corners_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]: - polygons = shapely.creation.polygons(corners_array) - return polygons - - -def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - return corners_array_to_polygon_array(bbse2_array_to_corners_array(bbse2)) - - -def translate_along_yaw_array( - points_2d: npt.NDArray[np.float64], - headings: npt.NDArray[np.float64], - lon: npt.NDArray[np.float64], - lat: npt.NDArray[np.float64], -) -> npt.NDArray[np.float64]: - assert points_2d.shape[-1] == len(Point2DIndex) - half_pi = np.pi / 2.0 - translation: npt.NDArray[np.float64] = np.stack( - [ - (lat * np.cos(headings + half_pi)) + (lon * np.cos(headings)), - (lat * np.sin(headings + half_pi)) + (lon * np.sin(headings)), - ], - axis=-1, - ) - return points_2d + translation From f6c842e5315a763ce080fef5ac709be631526825 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 22 Sep 2025 15:27:46 +0200 Subject: [PATCH 040/145] Run pre-commit file & code formatting --- d123/common/datatypes/detection/detection.py | 2 +- .../datatypes/vehicle_state/ego_state.py | 2 +- .../vehicle_state/vehicle_parameters.py | 4 ++-- d123/common/visualization/matplotlib/utils.py | 2 +- d123/common/visualization/viser/utils.py | 2 +- .../av2/av2_data_converter.py | 5 ++++- .../dataset_specific/nuplan/load_sensor.py | 4 +--- .../nuplan/nuplan_data_converter.py | 1 - .../wopd/wopd_data_converter.py | 3 +-- d123/geometry/__init__.py | 10 ++++----- d123/geometry/se.py | 2 +- d123/geometry/test/test_bounding_box.py | 2 +- .../test/test_transform_consistency.py | 22 +++++++++---------- .../test/test_transform_euler_se3.py | 4 ++-- .../transform/test/test_transform_se3.py | 6 ++--- d123/geometry/utils/rotation_utils.py | 4 ++-- .../utils/test/test_bounding_box_utils.py | 7 ++---- 17 files changed, 39 insertions(+), 43 deletions(-) diff --git a/d123/common/datatypes/detection/detection.py b/d123/common/datatypes/detection/detection.py index ff18a561..8ada50fe 100644 --- a/d123/common/datatypes/detection/detection.py +++ b/d123/common/datatypes/detection/detection.py @@ -7,7 +7,7 @@ from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.time.time_point import TimePoint from d123.common.utils.enums import SerialIntEnum -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, EulerStateSE3, Vector2D, Vector3D +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, OccupancyMap2D, StateSE2, Vector2D, Vector3D @dataclass diff --git a/d123/common/datatypes/vehicle_state/ego_state.py b/d123/common/datatypes/vehicle_state/ego_state.py index 1d378fcd..79522517 100644 --- a/d123/common/datatypes/vehicle_state/ego_state.py +++ b/d123/common/datatypes/vehicle_state/ego_state.py @@ -23,7 +23,7 @@ rear_axle_se3_to_center_se3, ) from d123.common.utils.enums import classproperty -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, EulerStateSE3, Vector2D, Vector3D +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, StateSE2, Vector2D, Vector3D # TODO: Find an appropriate way to handle SE2 and SE3 states. diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py index 4698206b..5bf38706 100644 --- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py @@ -1,8 +1,8 @@ from dataclasses import dataclass -from d123.geometry import StateSE2, EulerStateSE3, Vector2D, Vector3D -from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame +from d123.geometry import EulerStateSE3, StateSE2, Vector2D, Vector3D from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame +from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame # TODO: Add more vehicle parameters, potentially extend the parameters. diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py index 34cc2819..1742a864 100644 --- a/d123/common/visualization/matplotlib/utils.py +++ b/d123/common/visualization/matplotlib/utils.py @@ -9,7 +9,7 @@ from matplotlib.path import Path from d123.common.visualization.color.config import PlotConfig -from d123.geometry import StateSE2, EulerStateSE3 +from d123.geometry import EulerStateSE3, StateSE2 def add_shapely_polygon_to_ax( diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index 18af79d3..1ef2ca18 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -15,7 +15,7 @@ from d123.dataset.maps.abstract_map import MapLayer from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject from d123.dataset.scene.abstract_scene import AbstractScene -from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, EulerStateSE3 +from d123.geometry import BoundingBoxSE3, EulerStateSE3, Point3D, Polyline3D from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array # TODO: Refactor this file. diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py index d32e18e5..ffa45db3 100644 --- a/d123/dataset/dataset_specific/av2/av2_data_converter.py +++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py @@ -35,7 +35,10 @@ from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex -from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array, get_rotation_matrix +from d123.geometry.transform.transform_euler_se3 import ( + convert_relative_to_absolute_euler_se3_array, + get_rotation_matrix, +) from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL diff --git a/d123/dataset/dataset_specific/nuplan/load_sensor.py b/d123/dataset/dataset_specific/nuplan/load_sensor.py index 3e0033b3..f7dd16e8 100644 --- a/d123/dataset/dataset_specific/nuplan/load_sensor.py +++ b/d123/dataset/dataset_specific/nuplan/load_sensor.py @@ -1,9 +1,8 @@ import io from pathlib import Path - -from d123.common.utils.dependencies import check_dependencies from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata +from d123.common.utils.dependencies import check_dependencies check_dependencies(["nuplan"], "nuplan") from nuplan.database.utils.pointclouds.lidar import LidarPointCloud @@ -14,4 +13,3 @@ def load_nuplan_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) - with open(filepath, "rb") as fp: buffer = io.BytesIO(fp.read()) return LiDAR(metadata=lidar_metadata, point_cloud=LidarPointCloud.from_buffer(buffer, "pcd").points) - diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py index a87d0c31..64ba1c72 100644 --- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py +++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py @@ -12,7 +12,6 @@ import yaml from pyquaternion import Quaternion - import d123.dataset.dataset_specific.nuplan.utils as nuplan_utils from d123.common.datatypes.detection.detection import TrafficLightStatus from d123.common.datatypes.detection.detection_types import DetectionType diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py index 1fd26b6b..d74a7ea8 100644 --- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py +++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py @@ -12,7 +12,6 @@ import pyarrow as pa from pyquaternion import Quaternion - from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json @@ -26,7 +25,7 @@ from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection from d123.dataset.logs.log_metadata import LogMetadata -from d123.geometry import BoundingBoxSE3Index, Point3D, EulerStateSE3, Vector3D, Vector3DIndex +from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Point3D, Vector3D, Vector3DIndex from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py index 68721954..d845b66c 100644 --- a/d123/geometry/__init__.py +++ b/d123/geometry/__init__.py @@ -1,18 +1,18 @@ +from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3 from d123.geometry.geometry_index import ( BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex, + EulerStateSE3Index, Point2DIndex, Point3DIndex, StateSE2Index, - EulerStateSE3Index, Vector2DIndex, Vector3DIndex, ) +from d123.geometry.occupancy_map import OccupancyMap2D from d123.geometry.point import Point2D, Point3D -from d123.geometry.vector import Vector2D, Vector3D -from d123.geometry.se import StateSE2, StateSE3, EulerStateSE3 from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 -from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3 -from d123.geometry.occupancy_map import OccupancyMap2D +from d123.geometry.se import EulerStateSE3, StateSE2, StateSE3 +from d123.geometry.vector import Vector2D, Vector3D diff --git a/d123/geometry/se.py b/d123/geometry/se.py index b63bea7d..6538bfe9 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -8,7 +8,7 @@ from pyparsing import cached_property from d123.common.utils.mixin import ArrayMixin -from d123.geometry.geometry_index import Point3DIndex, StateSE3Index, StateSE2Index, EulerStateSE3Index +from d123.geometry.geometry_index import EulerStateSE3Index, Point3DIndex, StateSE2Index, StateSE3Index from d123.geometry.point import Point2D, Point3D from d123.geometry.rotation import EulerAngles, Quaternion diff --git a/d123/geometry/test/test_bounding_box.py b/d123/geometry/test/test_bounding_box.py index f34639ad..e8f44986 100644 --- a/d123/geometry/test/test_bounding_box.py +++ b/d123/geometry/test/test_bounding_box.py @@ -4,7 +4,7 @@ import shapely.geometry as geom from d123.common.utils.mixin import ArrayMixin -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, EulerStateSE3 +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2 from d123.geometry.geometry_index import ( BoundingBoxSE2Index, BoundingBoxSE3Index, diff --git a/d123/geometry/transform/test/test_transform_consistency.py b/d123/geometry/transform/test/test_transform_consistency.py index b4f73ab1..1865a3ff 100644 --- a/d123/geometry/transform/test/test_transform_consistency.py +++ b/d123/geometry/transform/test/test_transform_consistency.py @@ -3,8 +3,17 @@ import numpy as np import numpy.typing as npt -from d123.geometry.geometry_index import Point2DIndex, Point3DIndex, StateSE2Index, EulerStateSE3Index -from d123.geometry.se import StateSE2, EulerStateSE3 +from d123.geometry.geometry_index import EulerStateSE3Index, Point2DIndex, Point3DIndex, StateSE2Index +from d123.geometry.se import EulerStateSE3, StateSE2 +from d123.geometry.transform.transform_euler_se3 import ( + convert_absolute_to_relative_euler_se3_array, + convert_absolute_to_relative_points_3d_array, + convert_relative_to_absolute_euler_se3_array, + convert_relative_to_absolute_points_3d_array, + translate_euler_se3_along_body_frame, + translate_euler_se3_along_x, + translate_euler_se3_along_y, +) from d123.geometry.transform.transform_se2 import ( convert_absolute_to_relative_point_2d_array, convert_absolute_to_relative_se2_array, @@ -15,15 +24,6 @@ translate_se2_along_y, translate_se2_array_along_body_frame, ) -from d123.geometry.transform.transform_euler_se3 import ( - convert_absolute_to_relative_points_3d_array, - convert_absolute_to_relative_euler_se3_array, - convert_relative_to_absolute_points_3d_array, - convert_relative_to_absolute_euler_se3_array, - translate_euler_se3_along_body_frame, - translate_euler_se3_along_x, - translate_euler_se3_along_y, -) from d123.geometry.vector import Vector2D, Vector3D diff --git a/d123/geometry/transform/test/test_transform_euler_se3.py b/d123/geometry/transform/test/test_transform_euler_se3.py index b40ab4c7..f63bc3bd 100644 --- a/d123/geometry/transform/test/test_transform_euler_se3.py +++ b/d123/geometry/transform/test/test_transform_euler_se3.py @@ -5,10 +5,10 @@ from d123.geometry.se import EulerStateSE3 from d123.geometry.transform.transform_euler_se3 import ( - convert_absolute_to_relative_points_3d_array, convert_absolute_to_relative_euler_se3_array, - convert_relative_to_absolute_points_3d_array, + convert_absolute_to_relative_points_3d_array, convert_relative_to_absolute_euler_se3_array, + convert_relative_to_absolute_points_3d_array, translate_euler_se3_along_body_frame, translate_euler_se3_along_x, translate_euler_se3_along_y, diff --git a/d123/geometry/transform/test/test_transform_se3.py b/d123/geometry/transform/test/test_transform_se3.py index d7c077ea..0a752f68 100644 --- a/d123/geometry/transform/test/test_transform_se3.py +++ b/d123/geometry/transform/test/test_transform_se3.py @@ -3,7 +3,8 @@ import numpy as np import numpy.typing as npt -from d123.geometry.geometry_index import StateSE3Index, EulerStateSE3Index +import d123.geometry.transform.transform_euler_se3 as euler_transform_se3 +from d123.geometry.geometry_index import EulerStateSE3Index, StateSE3Index from d123.geometry.point import Point3D from d123.geometry.rotation import Quaternion from d123.geometry.se import EulerStateSE3, StateSE3 @@ -12,12 +13,11 @@ convert_absolute_to_relative_se3_array, convert_relative_to_absolute_points_3d_array, convert_relative_to_absolute_se3_array, + translate_se3_along_body_frame, translate_se3_along_x, translate_se3_along_y, translate_se3_along_z, - translate_se3_along_body_frame, ) -import d123.geometry.transform.transform_euler_se3 as euler_transform_se3 from d123.geometry.utils.rotation_utils import ( get_rotation_matrices_from_euler_array, get_rotation_matrices_from_quaternion_array, diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py index ead501b1..3d3248ce 100644 --- a/d123/geometry/utils/rotation_utils.py +++ b/d123/geometry/utils/rotation_utils.py @@ -3,10 +3,10 @@ import numpy as np import numpy.typing as npt -# import pyquaternion - from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex +# import pyquaternion + def batch_matmul(A: npt.NDArray[np.float64], B: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """Batch matrix multiplication for arrays of matrices. diff --git a/d123/geometry/utils/test/test_bounding_box_utils.py b/d123/geometry/utils/test/test_bounding_box_utils.py index 1099bc6f..3bba0330 100644 --- a/d123/geometry/utils/test/test_bounding_box_utils.py +++ b/d123/geometry/utils/test/test_bounding_box_utils.py @@ -8,20 +8,17 @@ BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex, + EulerStateSE3Index, Point2DIndex, Point3DIndex, - StateSE3Index, - EulerStateSE3Index, ) -from d123.geometry.point import Point3D -from d123.geometry.rotation import Quaternion from d123.geometry.se import EulerStateSE3, StateSE3 from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame from d123.geometry.utils.bounding_box_utils import ( bbse2_array_to_corners_array, - corners_2d_array_to_polygon_array, bbse2_array_to_polygon_array, bbse3_array_to_corners_array, + corners_2d_array_to_polygon_array, get_corners_3d_factors, ) from d123.geometry.vector import Vector3D From 6abde9e867ff06e3c09b04a391c56bc7bca7c7a9 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 22 Sep 2025 20:09:17 +0200 Subject: [PATCH 041/145] Very large refactoring of folder structure and imports (#39) --- .../datatypes/recording/abstract_recording.py | 23 ----------- .../recording/detection_recording.py | 15 ------- .../utils/arrow_helper.py} | 0 d123/common/utils/timer.py | 7 +--- d123/common/visualization/color/default.py | 6 +-- .../visualization/matplotlib/observation.py | 8 ++-- d123/common/visualization/matplotlib/plots.py | 2 +- d123/common/visualization/viser/server.py | 2 +- d123/common/visualization/viser/utils.py | 6 +-- d123/common/visualization/viser/utils_v2.py | 2 +- d123/dataset/arrow/__init__.py | 2 - d123/dataset/logs/__init__.py | 0 d123/dataset/logs/log_metadata.py | 21 ---------- d123/dataset/maps/gpkg/__init__.py | 0 d123/dataset/scene/__init__.py | 0 .../detection => datasets}/__init__.py | 0 .../av2/av2_constants.py | 6 +-- .../av2/av2_data_converter.py | 26 ++++++------ .../av2/av2_helper.py | 2 +- .../av2/av2_map_conversion.py | 10 ++--- .../recording => datasets/carla}/__init__.py | 0 .../carla/carla_data_converter.py | 24 +++++------ .../carla/load_sensor.py | 2 +- .../kitti_360/.gitkeep | 0 .../time => datasets/nuplan}/__init__.py | 0 .../nuplan/load_sensor.py | 2 +- .../nuplan/nuplan_data_converter.py | 30 +++++++------- .../nuplan/nuplan_map_conversion.py | 6 +-- .../nuplan/utils/log_splits.yaml | 0 .../nuscenes/.gitkeep | 0 .../raw_data_converter.py | 0 .../utils}/__init__.py | 0 .../utils/maps}/__init__.py | 0 .../utils/maps/opendrive}/__init__.py | 0 .../opendrive/opendrive_map_conversion.py | 16 ++++---- .../utils/maps/opendrive/parser}/__init__.py | 0 .../utils/maps}/opendrive/parser/elevation.py | 2 +- .../utils/maps}/opendrive/parser/geometry.py | 0 .../utils/maps}/opendrive/parser/lane.py | 2 +- .../utils/maps}/opendrive/parser/objects.py | 0 .../utils/maps}/opendrive/parser/opendrive.py | 2 +- .../maps}/opendrive/parser/polynomial.py | 0 .../utils/maps}/opendrive/parser/reference.py | 8 ++-- .../utils/maps}/opendrive/parser/road.py | 8 ++-- .../utils/maps}/opendrive/utils/collection.py | 12 +++--- .../utils/maps}/opendrive/utils/id_mapping.py | 0 .../utils/maps}/opendrive/utils/id_system.py | 0 .../maps}/opendrive/utils/lane_helper.py | 8 ++-- .../maps}/opendrive/utils/objects_helper.py | 4 +- .../utils/maps/road_edge}/__init__.py | 0 .../maps}/road_edge/road_edge_2d_utils.py | 0 .../maps}/road_edge/road_edge_3d_utils.py | 2 +- .../waymo_map_utils/womp_boundary_utils.py | 0 .../wopd/waymo_map_utils/wopd_map_utils.py | 9 +++-- .../wopd/wopd_data_converter.py | 22 +++++----- .../wopd/wopd_utils.py | 3 ++ d123/{common => }/datatypes/__init__.py | 0 .../detections}/__init__.py | 0 .../detections}/detection.py | 11 ++++- .../detections}/detection_types.py | 0 .../maps/abstract_map.py | 4 +- .../maps/abstract_map_objects.py | 2 +- .../maps/gpkg}/__init__.py | 0 .../maps/gpkg/gpkg_map.py | 10 ++--- .../maps/gpkg/gpkg_map_objects.py | 6 +-- .../{dataset => datatypes}/maps/gpkg/utils.py | 0 .../maps/map_datatypes.py | 0 .../scene}/__init__.py | 0 .../scene/abstract_scene.py | 38 ++++-------------- .../datatypes/scene/abstract_scene_builder.py | 24 +++++++++++ .../scene/arrow}/arrow_scene.py | 25 ++++++------ .../scene/arrow/arrow_scene_builder.py} | 26 +++--------- .../scene/arrow/utils}/conversion.py | 23 +++++------ .../scene/scene_filter.py | 2 +- d123/datatypes/scene/scene_metadata.py | 40 +++++++++++++++++++ .../sensor => datatypes/sensors}/camera.py | 0 .../sensor => datatypes/sensors}/lidar.py | 2 +- .../sensors}/lidar_index.py | 0 .../carla => datatypes/time}/__init__.py | 0 .../{common => }/datatypes/time/time_point.py | 0 .../vehicle_state}/__init__.py | 0 .../datatypes/vehicle_state/ego_state.py | 14 +++---- .../vehicle_state/vehicle_parameters.py | 2 - d123/geometry/__init__.py | 4 ++ d123/geometry/test/test_bounding_box.py | 3 +- d123/geometry/test/test_point.py | 5 +-- d123/geometry/test/test_polyline.py | 4 +- .../test/test_transform_consistency.py | 3 +- .../test/test_transform_euler_se3.py | 3 +- .../transform/test/test_transform_se2.py | 3 +- .../transform/test/test_transform_se3.py | 5 +-- .../geometry/transform/transform_euler_se3.py | 4 +- d123/geometry/transform/transform_se2.py | 4 +- d123/geometry/transform/transform_se3.py | 4 +- .../script/builders/data_converter_builder.py | 2 +- d123/script/builders/scene_builder_builder.py | 2 +- d123/script/builders/scene_filter_builder.py | 2 +- d123/script/run_preprocessing.py | 2 +- d123/script/run_simulation.py | 2 +- d123/simulation/agents/abstract_agents.py | 4 +- .../agents/constant_velocity_agents.py | 4 +- d123/simulation/agents/idm_agents.py | 6 +-- d123/simulation/agents/path_following.py | 4 +- d123/simulation/agents/smart_agents.py | 6 +-- .../controller/abstract_controller.py | 2 +- .../controller/action_controller.py | 2 +- d123/simulation/gym/demo_gym_env.py | 6 +-- .../environment/helper/environment_cache.py | 6 +-- d123/simulation/gym/gym_env.py | 4 +- d123/simulation/history/simulation_history.py | 2 +- .../history/simulation_history_buffer.py | 2 +- .../metrics/sim_agents/interaction_based.py | 2 +- .../metrics/sim_agents/map_based.py | 6 +-- .../metrics/sim_agents/sim_agents.py | 4 +- d123/simulation/metrics/sim_agents/utils.py | 2 +- .../observation/abstract_observation.py | 2 +- .../observation/agents_observation.py | 4 +- .../observation/log_replay_observation.py | 2 +- d123/simulation/planning/abstract_planner.py | 2 +- d123/simulation/simulation_2d.py | 2 +- .../abstract_time_controller.py | 2 +- .../time_controller/log_time_controller.py | 2 +- .../feature_builder/smart_feature_builder.py | 6 +-- 123 files changed, 308 insertions(+), 357 deletions(-) delete mode 100644 d123/common/datatypes/recording/abstract_recording.py delete mode 100644 d123/common/datatypes/recording/detection_recording.py rename d123/{dataset/arrow/helper.py => common/utils/arrow_helper.py} (100%) delete mode 100644 d123/dataset/arrow/__init__.py delete mode 100644 d123/dataset/logs/__init__.py delete mode 100644 d123/dataset/logs/log_metadata.py delete mode 100644 d123/dataset/maps/gpkg/__init__.py delete mode 100644 d123/dataset/scene/__init__.py rename d123/{common/datatypes/detection => datasets}/__init__.py (100%) rename d123/{dataset/dataset_specific => datasets}/av2/av2_constants.py (95%) rename d123/{dataset/dataset_specific => datasets}/av2/av2_data_converter.py (96%) rename d123/{dataset/dataset_specific => datasets}/av2/av2_helper.py (98%) rename d123/{dataset/dataset_specific => datasets}/av2/av2_map_conversion.py (98%) rename d123/{common/datatypes/recording => datasets/carla}/__init__.py (100%) rename d123/{dataset/dataset_specific => datasets}/carla/carla_data_converter.py (95%) rename d123/{dataset/dataset_specific => datasets}/carla/load_sensor.py (80%) rename d123/{dataset/dataset_specific => datasets}/kitti_360/.gitkeep (100%) rename d123/{common/datatypes/time => datasets/nuplan}/__init__.py (100%) rename d123/{dataset/dataset_specific => datasets}/nuplan/load_sensor.py (88%) rename d123/{dataset/dataset_specific => datasets}/nuplan/nuplan_data_converter.py (95%) rename d123/{dataset/dataset_specific => datasets}/nuplan/nuplan_map_conversion.py (98%) rename d123/{dataset/dataset_specific => datasets}/nuplan/utils/log_splits.yaml (100%) rename d123/{dataset/dataset_specific => datasets}/nuscenes/.gitkeep (100%) rename d123/{dataset/dataset_specific => datasets}/raw_data_converter.py (100%) rename d123/{common/datatypes/vehicle_state => datasets/utils}/__init__.py (100%) rename d123/{dataset => datasets/utils/maps}/__init__.py (100%) rename d123/{dataset/conversion => datasets/utils/maps/opendrive}/__init__.py (100%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/opendrive_map_conversion.py (96%) rename d123/{dataset/conversion/map => datasets/utils/maps/opendrive/parser}/__init__.py (100%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/elevation.py (97%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/geometry.py (100%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/lane.py (98%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/objects.py (100%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/opendrive.py (99%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/polynomial.py (100%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/reference.py (94%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/road.py (93%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/utils/collection.py (96%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/utils/id_mapping.py (100%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/utils/id_system.py (100%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/utils/lane_helper.py (97%) rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/utils/objects_helper.py (94%) rename d123/{dataset/conversion/map/opendrive => datasets/utils/maps/road_edge}/__init__.py (100%) rename d123/{dataset/conversion/map => datasets/utils/maps}/road_edge/road_edge_2d_utils.py (100%) rename d123/{dataset/conversion/map => datasets/utils/maps}/road_edge/road_edge_3d_utils.py (99%) rename d123/{dataset/dataset_specific => datasets}/wopd/waymo_map_utils/womp_boundary_utils.py (100%) rename d123/{dataset/dataset_specific => datasets}/wopd/waymo_map_utils/wopd_map_utils.py (97%) rename d123/{dataset/dataset_specific => datasets}/wopd/wopd_data_converter.py (95%) rename d123/{dataset/dataset_specific => datasets}/wopd/wopd_utils.py (96%) rename d123/{common => }/datatypes/__init__.py (100%) rename d123/{dataset/conversion/map/opendrive/parser => datatypes/detections}/__init__.py (100%) rename d123/{common/datatypes/detection => datatypes/detections}/detection.py (94%) rename d123/{common/datatypes/detection => datatypes/detections}/detection_types.py (100%) rename d123/{dataset => datatypes}/maps/abstract_map.py (94%) rename d123/{dataset => datatypes}/maps/abstract_map_objects.py (98%) rename d123/{dataset/conversion/map/road_edge => datatypes/maps/gpkg}/__init__.py (100%) rename d123/{dataset => datatypes}/maps/gpkg/gpkg_map.py (98%) rename d123/{dataset => datatypes}/maps/gpkg/gpkg_map_objects.py (98%) rename d123/{dataset => datatypes}/maps/gpkg/utils.py (100%) rename d123/{dataset => datatypes}/maps/map_datatypes.py (100%) rename d123/{dataset/dataset_specific => datatypes/scene}/__init__.py (100%) rename d123/{dataset => datatypes}/scene/abstract_scene.py (66%) create mode 100644 d123/datatypes/scene/abstract_scene_builder.py rename d123/{dataset/scene => datatypes/scene/arrow}/arrow_scene.py (88%) rename d123/{dataset/scene/scene_builder.py => datatypes/scene/arrow/arrow_scene_builder.py} (87%) rename d123/{dataset/arrow => datatypes/scene/arrow/utils}/conversion.py (88%) rename d123/{dataset => datatypes}/scene/scene_filter.py (96%) create mode 100644 d123/datatypes/scene/scene_metadata.py rename d123/{common/datatypes/sensor => datatypes/sensors}/camera.py (100%) rename d123/{common/datatypes/sensor => datatypes/sensors}/lidar.py (97%) rename d123/{common/datatypes/sensor => datatypes/sensors}/lidar_index.py (100%) rename d123/{dataset/dataset_specific/carla => datatypes/time}/__init__.py (100%) rename d123/{common => }/datatypes/time/time_point.py (100%) rename d123/{dataset/dataset_specific/nuplan => datatypes/vehicle_state}/__init__.py (100%) rename d123/{common => }/datatypes/vehicle_state/ego_state.py (96%) rename d123/{common => }/datatypes/vehicle_state/vehicle_parameters.py (98%) diff --git a/d123/common/datatypes/recording/abstract_recording.py b/d123/common/datatypes/recording/abstract_recording.py deleted file mode 100644 index 4554d10f..00000000 --- a/d123/common/datatypes/recording/abstract_recording.py +++ /dev/null @@ -1,23 +0,0 @@ -import abc -from dataclasses import dataclass -from enum import IntEnum - - -class RecordingType(IntEnum): - DETECTION = 0 - # SENSOR = 1 NOTE: not used yet, but reserved for future use - - -@dataclass -class Recording(abc.ABC): - """ - Abstract observation container. - """ - - # @classmethod - # @abc.abstractmethod - # def observation_type(cls) -> ObservationType: - # """ - # Returns detection type of the observation. - # """ - # raise NotImplementedError diff --git a/d123/common/datatypes/recording/detection_recording.py b/d123/common/datatypes/recording/detection_recording.py deleted file mode 100644 index 19b68827..00000000 --- a/d123/common/datatypes/recording/detection_recording.py +++ /dev/null @@ -1,15 +0,0 @@ -from dataclasses import dataclass - -from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper -from d123.common.datatypes.recording.abstract_recording import Recording - -# TODO: Reconsider if these "wrapper" datatypes are necessary. -# Might be needed to package multiple datatypes into a single object (e.g. as planner input) -# On the other hand, an enum based dictionary might be more flexible. - - -@dataclass -class DetectionRecording(Recording): - - box_detections: BoxDetectionWrapper - traffic_light_detections: TrafficLightDetectionWrapper diff --git a/d123/dataset/arrow/helper.py b/d123/common/utils/arrow_helper.py similarity index 100% rename from d123/dataset/arrow/helper.py rename to d123/common/utils/arrow_helper.py diff --git a/d123/common/utils/timer.py b/d123/common/utils/timer.py index adcc9752..b2c57015 100644 --- a/d123/common/utils/timer.py +++ b/d123/common/utils/timer.py @@ -59,21 +59,16 @@ def stats(self, verbose: bool = True) -> Optional[pd.DataFrame]: """ Returns a DataFrame with statistics of the logged times. :param verbose: whether to print the timings, defaults to True - :return: pandas dataframe.F + :return: pandas dataframe. """ statistics = {} - for key, timings in self._time_logs.items(): - timings_array = np.array(timings) timings_statistics = {} - for name, function in self._statistic_functions.items(): timings_statistics[name] = function(timings_array) - statistics[key] = timings_statistics - dataframe = pd.DataFrame.from_dict(statistics).transpose() if verbose: diff --git a/d123/common/visualization/color/default.py b/d123/common/visualization/color/default.py index 076a1c81..9eda3f7c 100644 --- a/d123/common/visualization/color/default.py +++ b/d123/common/visualization/color/default.py @@ -1,7 +1,5 @@ from typing import Dict -from d123.common.datatypes.detection.detection import TrafficLightStatus -from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.visualization.color.color import ( BLACK, DARKER_GREY, @@ -13,7 +11,9 @@ Color, ) from d123.common.visualization.color.config import PlotConfig -from d123.dataset.maps.map_datatypes import MapLayer +from d123.datatypes.detections.detection import TrafficLightStatus +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.maps.map_datatypes import MapLayer HEADING_MARKER_STYLE: str = "^" # "^": triangle, "-": line diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py index 27a2fff1..eb37ad74 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/d123/common/visualization/matplotlib/observation.py @@ -22,10 +22,10 @@ get_pose_triangle, shapely_geometry_local_coords, ) -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.maps.abstract_map_objects import AbstractLane -from d123.dataset.maps.map_datatypes import MapLayer -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.maps.abstract_map_objects import AbstractLane +from d123.datasets.maps.map_datatypes import MapLayer +from d123.datasets.scene.abstract_scene import AbstractScene from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D from d123.geometry.geometry_index import StateSE2Index from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame diff --git a/d123/common/visualization/matplotlib/plots.py b/d123/common/visualization/matplotlib/plots.py index 80e51714..7a1010cb 100644 --- a/d123/common/visualization/matplotlib/plots.py +++ b/d123/common/visualization/matplotlib/plots.py @@ -11,7 +11,7 @@ add_ego_vehicle_to_ax, add_traffic_lights_to_ax, ) -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes: diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py index c9ce3601..ed5f7a17 100644 --- a/d123/common/visualization/viser/server.py +++ b/d123/common/visualization/viser/server.py @@ -15,7 +15,7 @@ get_map_meshes, ) from d123.common.visualization.viser.utils_v2 import get_bounding_box_outlines -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene # TODO: Try to fix performance issues. # TODO: Refactor this file. diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index 1ef2ca18..af209718 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -12,9 +12,9 @@ from d123.common.visualization.color.color import TAB_10, Color from d123.common.visualization.color.config import PlotConfig from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG, MAP_SURFACE_CONFIG -from d123.dataset.maps.abstract_map import MapLayer -from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.maps.abstract_map import MapLayer +from d123.datasets.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject +from d123.datasets.scene.abstract_scene import AbstractScene from d123.geometry import BoundingBoxSE3, EulerStateSE3, Point3D, Polyline3D from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py index 54c4eaab..9b4fd8d8 100644 --- a/d123/common/visualization/viser/utils_v2.py +++ b/d123/common/visualization/viser/utils_v2.py @@ -3,7 +3,7 @@ from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG from d123.common.visualization.viser.utils import BRIGHTNESS_FACTOR -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene # from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D diff --git a/d123/dataset/arrow/__init__.py b/d123/dataset/arrow/__init__.py deleted file mode 100644 index 406e0d32..00000000 --- a/d123/dataset/arrow/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -class SceneBuilder: - pass diff --git a/d123/dataset/logs/__init__.py b/d123/dataset/logs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/dataset/logs/log_metadata.py b/d123/dataset/logs/log_metadata.py deleted file mode 100644 index 818bc495..00000000 --- a/d123/dataset/logs/log_metadata.py +++ /dev/null @@ -1,21 +0,0 @@ -from dataclasses import dataclass - -import d123 - -# TODO: move this files and dataclass to a more appropriate place. - - -@dataclass -class LogMetadata: - - # TODO: add - # - split - # - global/local map - - dataset: str - log_name: str - location: str - timestep_seconds: float - - map_has_z: bool - version: str = str(d123.__version__) diff --git a/d123/dataset/maps/gpkg/__init__.py b/d123/dataset/maps/gpkg/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/dataset/scene/__init__.py b/d123/dataset/scene/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/common/datatypes/detection/__init__.py b/d123/datasets/__init__.py similarity index 100% rename from d123/common/datatypes/detection/__init__.py rename to d123/datasets/__init__.py diff --git a/d123/dataset/dataset_specific/av2/av2_constants.py b/d123/datasets/av2/av2_constants.py similarity index 95% rename from d123/dataset/dataset_specific/av2/av2_constants.py rename to d123/datasets/av2/av2_constants.py index 836bb12c..5d163245 100644 --- a/d123/dataset/dataset_specific/av2/av2_constants.py +++ b/d123/datasets/av2/av2_constants.py @@ -1,7 +1,7 @@ -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import CameraType from d123.common.utils.enums import SerialIntEnum -from d123.dataset.maps.map_datatypes import RoadLineType +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.maps.map_datatypes import RoadLineType +from d123.datatypes.sensors.camera import CameraType class AV2SensorBoxDetectionType(SerialIntEnum): diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py similarity index 96% rename from d123/dataset/dataset_specific/av2/av2_data_converter.py rename to d123/datasets/av2/av2_data_converter.py index ffa45db3..11e2549b 100644 --- a/d123/dataset/dataset_specific/av2/av2_data_converter.py +++ b/d123/datasets/av2/av2_data_converter.py @@ -11,29 +11,29 @@ import pyarrow as pa from pyquaternion import Quaternion -from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -from d123.common.datatypes.vehicle_state.vehicle_parameters import ( - get_av2_ford_fusion_hybrid_parameters, - rear_axle_se3_to_center_se3, -) from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.dataset.dataset_specific.av2.av2_constants import ( +from d123.datasets.av2.av2_constants import ( AV2_CAMERA_TYPE_MAPPING, AV2_TO_DETECTION_TYPE, AV2SensorBoxDetectionType, ) -from d123.dataset.dataset_specific.av2.av2_helper import ( +from d123.datasets.av2.av2_helper import ( build_sensor_dataframe, build_synchronization_dataframe, find_closest_target_fpath, get_slice_with_timestamp_ns, ) -from d123.dataset.dataset_specific.av2.av2_map_conversion import convert_av2_map -from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.dataset.logs.log_metadata import LogMetadata +from d123.datasets.av2.av2_map_conversion import convert_av2_map +from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.datatypes.vehicle_state.vehicle_parameters import ( + get_av2_ford_fusion_hybrid_parameters, + rear_axle_se3_to_center_se3, +) from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex from d123.geometry.transform.transform_euler_se3 import ( convert_relative_to_absolute_euler_se3_array, diff --git a/d123/dataset/dataset_specific/av2/av2_helper.py b/d123/datasets/av2/av2_helper.py similarity index 98% rename from d123/dataset/dataset_specific/av2/av2_helper.py rename to d123/datasets/av2/av2_helper.py index 8ad881cc..5e130eeb 100644 --- a/d123/dataset/dataset_specific/av2/av2_helper.py +++ b/d123/datasets/av2/av2_helper.py @@ -3,7 +3,7 @@ import pandas as pd -from d123.dataset.dataset_specific.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING +from d123.datasets.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING AV2_SENSOR_CAM_SHUTTER_INTERVAL_MS: Final[float] = 50.0 AV2_SENSOR_LIDAR_SWEEP_INTERVAL_W_BUFFER_NS: Final[float] = 102000000.0 diff --git a/d123/dataset/dataset_specific/av2/av2_map_conversion.py b/d123/datasets/av2/av2_map_conversion.py similarity index 98% rename from d123/dataset/dataset_specific/av2/av2_map_conversion.py rename to d123/datasets/av2/av2_map_conversion.py index daadcd3b..d158007d 100644 --- a/d123/dataset/dataset_specific/av2/av2_map_conversion.py +++ b/d123/datasets/av2/av2_map_conversion.py @@ -10,12 +10,10 @@ import shapely import shapely.geometry as geom -from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length -from d123.dataset.conversion.map.road_edge.road_edge_3d_utils import ( - get_road_edges_3d_from_generic_drivable_area_df, -) -from d123.dataset.dataset_specific.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING -from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType +from d123.datasets.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING +from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length +from d123.datasets.utils.maps.road_edge.road_edge_3d_utils import get_road_edges_3d_from_generic_drivable_area_df +from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType from d123.geometry import OccupancyMap2D, Point3DIndex, Polyline2D, Polyline3D LANE_GROUP_MARK_TYPES: List[str] = [ diff --git a/d123/common/datatypes/recording/__init__.py b/d123/datasets/carla/__init__.py similarity index 100% rename from d123/common/datatypes/recording/__init__.py rename to d123/datasets/carla/__init__.py diff --git a/d123/dataset/dataset_specific/carla/carla_data_converter.py b/d123/datasets/carla/carla_data_converter.py similarity index 95% rename from d123/dataset/dataset_specific/carla/carla_data_converter.py rename to d123/datasets/carla/carla_data_converter.py index c6ce3622..ec3a4824 100644 --- a/d123/dataset/dataset_specific/carla/carla_data_converter.py +++ b/d123/datasets/carla/carla_data_converter.py @@ -11,19 +11,19 @@ import numpy as np import pyarrow as pa -from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar_index import CarlaLidarIndex -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index -from d123.common.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table -from d123.dataset.conversion.map.opendrive.opendrive_map_conversion import convert_from_xodr -from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.dataset.logs.log_metadata import LogMetadata -from d123.dataset.maps.abstract_map import AbstractMap, MapLayer -from d123.dataset.maps.abstract_map_objects import AbstractLane -from d123.dataset.scene.arrow_scene import get_map_api_from_names +from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table +from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datasets.utils.maps.opendrive.opendrive_map_conversion import convert_from_xodr +from d123.datatypes.maps.abstract_map import AbstractMap, MapLayer +from d123.datatypes.maps.abstract_map_objects import AbstractLane +from d123.datatypes.maps.gpkg.gpkg_map import get_map_api_from_names +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from d123.datatypes.sensors.lidar_index import CarlaLidarIndex +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3Index +from d123.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters from d123.geometry import BoundingBoxSE3Index, Point2D, Point3D, Vector3DIndex AVAILABLE_CARLA_MAP_LOCATIONS: Final[List[str]] = [ diff --git a/d123/dataset/dataset_specific/carla/load_sensor.py b/d123/datasets/carla/load_sensor.py similarity index 80% rename from d123/dataset/dataset_specific/carla/load_sensor.py rename to d123/datasets/carla/load_sensor.py index 56ff68fa..5fcbc890 100644 --- a/d123/dataset/dataset_specific/carla/load_sensor.py +++ b/d123/datasets/carla/load_sensor.py @@ -2,7 +2,7 @@ import numpy as np -from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata +from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata def load_carla_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: diff --git a/d123/dataset/dataset_specific/kitti_360/.gitkeep b/d123/datasets/kitti_360/.gitkeep similarity index 100% rename from d123/dataset/dataset_specific/kitti_360/.gitkeep rename to d123/datasets/kitti_360/.gitkeep diff --git a/d123/common/datatypes/time/__init__.py b/d123/datasets/nuplan/__init__.py similarity index 100% rename from d123/common/datatypes/time/__init__.py rename to d123/datasets/nuplan/__init__.py diff --git a/d123/dataset/dataset_specific/nuplan/load_sensor.py b/d123/datasets/nuplan/load_sensor.py similarity index 88% rename from d123/dataset/dataset_specific/nuplan/load_sensor.py rename to d123/datasets/nuplan/load_sensor.py index f7dd16e8..c00e4f31 100644 --- a/d123/dataset/dataset_specific/nuplan/load_sensor.py +++ b/d123/datasets/nuplan/load_sensor.py @@ -1,8 +1,8 @@ import io from pathlib import Path -from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata from d123.common.utils.dependencies import check_dependencies +from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata check_dependencies(["nuplan"], "nuplan") from nuplan.database.utils.pointclouds.lidar import LidarPointCloud diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py similarity index 95% rename from d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py rename to d123/datasets/nuplan/nuplan_data_converter.py index 64ba1c72..a7386917 100644 --- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py +++ b/d123/datasets/nuplan/nuplan_data_converter.py @@ -12,24 +12,24 @@ import yaml from pyquaternion import Quaternion -import d123.dataset.dataset_specific.nuplan.utils as nuplan_utils -from d123.common.datatypes.detection.detection import TrafficLightStatus -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar_index import NuplanLidarIndex -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -from d123.common.datatypes.vehicle_state.vehicle_parameters import ( +import d123.datasets.nuplan.utils as nuplan_utils +from d123.common.multithreading.worker_utils import WorkerPool, worker_map +from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table +from d123.common.utils.dependencies import check_dependencies +from d123.datasets.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter +from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datatypes.detections.detection import TrafficLightStatus +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from d123.datatypes.sensors.lidar_index import NuplanLidarIndex +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.datatypes.vehicle_state.vehicle_parameters import ( get_nuplan_chrysler_pacifica_parameters, rear_axle_se3_to_center_se3, ) -from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.dependencies import check_dependencies -from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table -from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter -from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.dataset.logs.log_metadata import LogMetadata from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_map_conversion.py b/d123/datasets/nuplan/nuplan_map_conversion.py similarity index 98% rename from d123/dataset/dataset_specific/nuplan/nuplan_map_conversion.py rename to d123/datasets/nuplan/nuplan_map_conversion.py index 78f5d3f6..7b444e5c 100644 --- a/d123/dataset/dataset_specific/nuplan/nuplan_map_conversion.py +++ b/d123/datasets/nuplan/nuplan_map_conversion.py @@ -11,12 +11,12 @@ import pyogrio from shapely.geometry import LineString -from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import ( +from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, split_line_geometry_by_max_length, ) -from d123.dataset.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value -from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.datatypes.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value +from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType MAP_FILES = { "sg-one-north": "sg-one-north/9.17.1964/map.gpkg", diff --git a/d123/dataset/dataset_specific/nuplan/utils/log_splits.yaml b/d123/datasets/nuplan/utils/log_splits.yaml similarity index 100% rename from d123/dataset/dataset_specific/nuplan/utils/log_splits.yaml rename to d123/datasets/nuplan/utils/log_splits.yaml diff --git a/d123/dataset/dataset_specific/nuscenes/.gitkeep b/d123/datasets/nuscenes/.gitkeep similarity index 100% rename from d123/dataset/dataset_specific/nuscenes/.gitkeep rename to d123/datasets/nuscenes/.gitkeep diff --git a/d123/dataset/dataset_specific/raw_data_converter.py b/d123/datasets/raw_data_converter.py similarity index 100% rename from d123/dataset/dataset_specific/raw_data_converter.py rename to d123/datasets/raw_data_converter.py diff --git a/d123/common/datatypes/vehicle_state/__init__.py b/d123/datasets/utils/__init__.py similarity index 100% rename from d123/common/datatypes/vehicle_state/__init__.py rename to d123/datasets/utils/__init__.py diff --git a/d123/dataset/__init__.py b/d123/datasets/utils/maps/__init__.py similarity index 100% rename from d123/dataset/__init__.py rename to d123/datasets/utils/maps/__init__.py diff --git a/d123/dataset/conversion/__init__.py b/d123/datasets/utils/maps/opendrive/__init__.py similarity index 100% rename from d123/dataset/conversion/__init__.py rename to d123/datasets/utils/maps/opendrive/__init__.py diff --git a/d123/dataset/conversion/map/opendrive/opendrive_map_conversion.py b/d123/datasets/utils/maps/opendrive/opendrive_map_conversion.py similarity index 96% rename from d123/dataset/conversion/map/opendrive/opendrive_map_conversion.py rename to d123/datasets/utils/maps/opendrive/opendrive_map_conversion.py index 56dfa65c..c011a07f 100644 --- a/d123/dataset/conversion/map/opendrive/opendrive_map_conversion.py +++ b/d123/datasets/utils/maps/opendrive/opendrive_map_conversion.py @@ -10,19 +10,19 @@ import shapely from shapely.ops import polygonize, unary_union -from d123.dataset.conversion.map.opendrive.parser.opendrive import Junction, OpenDrive -from d123.dataset.conversion.map.opendrive.utils.collection import collect_element_helpers -from d123.dataset.conversion.map.opendrive.utils.id_mapping import IntIDMapping -from d123.dataset.conversion.map.opendrive.utils.lane_helper import ( +from d123.datasets.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.datasets.utils.maps.opendrive.parser.opendrive import Junction, OpenDrive +from d123.datasets.utils.maps.opendrive.utils.collection import collect_element_helpers +from d123.datasets.utils.maps.opendrive.utils.id_mapping import IntIDMapping +from d123.datasets.utils.maps.opendrive.utils.lane_helper import ( OpenDriveLaneGroupHelper, OpenDriveLaneHelper, ) -from d123.dataset.conversion.map.opendrive.utils.objects_helper import ( +from d123.datasets.utils.maps.opendrive.utils.objects_helper import ( OpenDriveObjectHelper, ) -from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length -from d123.dataset.conversion.map.road_edge.road_edge_3d_utils import get_road_edges_3d_from_gdf -from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length +from d123.datasets.utils.maps.road_edge.road_edge_3d_utils import get_road_edges_3d_from_gdf logger = logging.getLogger(__name__) D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) diff --git a/d123/dataset/conversion/map/__init__.py b/d123/datasets/utils/maps/opendrive/parser/__init__.py similarity index 100% rename from d123/dataset/conversion/map/__init__.py rename to d123/datasets/utils/maps/opendrive/parser/__init__.py diff --git a/d123/dataset/conversion/map/opendrive/parser/elevation.py b/d123/datasets/utils/maps/opendrive/parser/elevation.py similarity index 97% rename from d123/dataset/conversion/map/opendrive/parser/elevation.py rename to d123/datasets/utils/maps/opendrive/parser/elevation.py index 7ee7aa67..a529bc0b 100644 --- a/d123/dataset/conversion/map/opendrive/parser/elevation.py +++ b/d123/datasets/utils/maps/opendrive/parser/elevation.py @@ -4,7 +4,7 @@ from typing import List, Optional from xml.etree.ElementTree import Element -from d123.dataset.conversion.map.opendrive.parser.polynomial import Polynomial +from d123.datasets.utils.maps.opendrive.parser.polynomial import Polynomial @dataclass diff --git a/d123/dataset/conversion/map/opendrive/parser/geometry.py b/d123/datasets/utils/maps/opendrive/parser/geometry.py similarity index 100% rename from d123/dataset/conversion/map/opendrive/parser/geometry.py rename to d123/datasets/utils/maps/opendrive/parser/geometry.py diff --git a/d123/dataset/conversion/map/opendrive/parser/lane.py b/d123/datasets/utils/maps/opendrive/parser/lane.py similarity index 98% rename from d123/dataset/conversion/map/opendrive/parser/lane.py rename to d123/datasets/utils/maps/opendrive/parser/lane.py index ccbfc9ff..e27de490 100644 --- a/d123/dataset/conversion/map/opendrive/parser/lane.py +++ b/d123/datasets/utils/maps/opendrive/parser/lane.py @@ -4,7 +4,7 @@ from typing import List, Optional from xml.etree.ElementTree import Element -from d123.dataset.conversion.map.opendrive.parser.polynomial import Polynomial +from d123.datasets.utils.maps.opendrive.parser.polynomial import Polynomial @dataclass diff --git a/d123/dataset/conversion/map/opendrive/parser/objects.py b/d123/datasets/utils/maps/opendrive/parser/objects.py similarity index 100% rename from d123/dataset/conversion/map/opendrive/parser/objects.py rename to d123/datasets/utils/maps/opendrive/parser/objects.py diff --git a/d123/dataset/conversion/map/opendrive/parser/opendrive.py b/d123/datasets/utils/maps/opendrive/parser/opendrive.py similarity index 99% rename from d123/dataset/conversion/map/opendrive/parser/opendrive.py rename to d123/datasets/utils/maps/opendrive/parser/opendrive.py index 2a07bb5f..4ed7bb33 100644 --- a/d123/dataset/conversion/map/opendrive/parser/opendrive.py +++ b/d123/datasets/utils/maps/opendrive/parser/opendrive.py @@ -6,7 +6,7 @@ from typing import List, Literal, Optional from xml.etree.ElementTree import Element, parse -from d123.dataset.conversion.map.opendrive.parser.road import Road +from d123.datasets.utils.maps.opendrive.parser.road import Road @dataclass diff --git a/d123/dataset/conversion/map/opendrive/parser/polynomial.py b/d123/datasets/utils/maps/opendrive/parser/polynomial.py similarity index 100% rename from d123/dataset/conversion/map/opendrive/parser/polynomial.py rename to d123/datasets/utils/maps/opendrive/parser/polynomial.py diff --git a/d123/dataset/conversion/map/opendrive/parser/reference.py b/d123/datasets/utils/maps/opendrive/parser/reference.py similarity index 94% rename from d123/dataset/conversion/map/opendrive/parser/reference.py rename to d123/datasets/utils/maps/opendrive/parser/reference.py index bc13537d..ed19a98d 100644 --- a/d123/dataset/conversion/map/opendrive/parser/reference.py +++ b/d123/datasets/utils/maps/opendrive/parser/reference.py @@ -9,10 +9,10 @@ import numpy as np import numpy.typing as npt -from d123.dataset.conversion.map.opendrive.parser.elevation import Elevation -from d123.dataset.conversion.map.opendrive.parser.geometry import Arc, Geometry, Line, Spiral -from d123.dataset.conversion.map.opendrive.parser.lane import LaneOffset, Width -from d123.dataset.conversion.map.opendrive.parser.polynomial import Polynomial +from d123.datasets.utils.maps.opendrive.parser.elevation import Elevation +from d123.datasets.utils.maps.opendrive.parser.geometry import Arc, Geometry, Line, Spiral +from d123.datasets.utils.maps.opendrive.parser.lane import LaneOffset, Width +from d123.datasets.utils.maps.opendrive.parser.polynomial import Polynomial from d123.geometry import Point3DIndex, StateSE2Index TOLERANCE: Final[float] = 1e-3 diff --git a/d123/dataset/conversion/map/opendrive/parser/road.py b/d123/datasets/utils/maps/opendrive/parser/road.py similarity index 93% rename from d123/dataset/conversion/map/opendrive/parser/road.py rename to d123/datasets/utils/maps/opendrive/parser/road.py index e0171429..28b5b679 100644 --- a/d123/dataset/conversion/map/opendrive/parser/road.py +++ b/d123/datasets/utils/maps/opendrive/parser/road.py @@ -4,10 +4,10 @@ from typing import List, Optional from xml.etree.ElementTree import Element -from d123.dataset.conversion.map.opendrive.parser.elevation import ElevationProfile, LateralProfile -from d123.dataset.conversion.map.opendrive.parser.lane import Lanes -from d123.dataset.conversion.map.opendrive.parser.objects import Object -from d123.dataset.conversion.map.opendrive.parser.reference import PlanView +from d123.datasets.utils.maps.opendrive.parser.elevation import ElevationProfile, LateralProfile +from d123.datasets.utils.maps.opendrive.parser.lane import Lanes +from d123.datasets.utils.maps.opendrive.parser.objects import Object +from d123.datasets.utils.maps.opendrive.parser.reference import PlanView @dataclass diff --git a/d123/dataset/conversion/map/opendrive/utils/collection.py b/d123/datasets/utils/maps/opendrive/utils/collection.py similarity index 96% rename from d123/dataset/conversion/map/opendrive/utils/collection.py rename to d123/datasets/utils/maps/opendrive/utils/collection.py index be631445..bf28c997 100644 --- a/d123/dataset/conversion/map/opendrive/utils/collection.py +++ b/d123/datasets/utils/maps/opendrive/utils/collection.py @@ -3,21 +3,21 @@ import numpy as np -from d123.dataset.conversion.map.opendrive.parser.opendrive import Junction, OpenDrive -from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine -from d123.dataset.conversion.map.opendrive.parser.road import Road -from d123.dataset.conversion.map.opendrive.utils.id_system import ( +from d123.datasets.utils.maps.opendrive.parser.opendrive import Junction, OpenDrive +from d123.datasets.utils.maps.opendrive.parser.reference import ReferenceLine +from d123.datasets.utils.maps.opendrive.parser.road import Road +from d123.datasets.utils.maps.opendrive.utils.id_system import ( build_lane_id, derive_lane_section_id, lane_group_id_from_lane_id, road_id_from_lane_group_id, ) -from d123.dataset.conversion.map.opendrive.utils.lane_helper import ( +from d123.datasets.utils.maps.opendrive.utils.lane_helper import ( OpenDriveLaneGroupHelper, OpenDriveLaneHelper, lane_section_to_lane_helpers, ) -from d123.dataset.conversion.map.opendrive.utils.objects_helper import OpenDriveObjectHelper, get_object_helper +from d123.datasets.utils.maps.opendrive.utils.objects_helper import OpenDriveObjectHelper, get_object_helper logger = logging.getLogger(__name__) diff --git a/d123/dataset/conversion/map/opendrive/utils/id_mapping.py b/d123/datasets/utils/maps/opendrive/utils/id_mapping.py similarity index 100% rename from d123/dataset/conversion/map/opendrive/utils/id_mapping.py rename to d123/datasets/utils/maps/opendrive/utils/id_mapping.py diff --git a/d123/dataset/conversion/map/opendrive/utils/id_system.py b/d123/datasets/utils/maps/opendrive/utils/id_system.py similarity index 100% rename from d123/dataset/conversion/map/opendrive/utils/id_system.py rename to d123/datasets/utils/maps/opendrive/utils/id_system.py diff --git a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py b/d123/datasets/utils/maps/opendrive/utils/lane_helper.py similarity index 97% rename from d123/dataset/conversion/map/opendrive/utils/lane_helper.py rename to d123/datasets/utils/maps/opendrive/utils/lane_helper.py index a21bd625..34d57054 100644 --- a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py +++ b/d123/datasets/utils/maps/opendrive/utils/lane_helper.py @@ -6,10 +6,10 @@ import numpy.typing as npt import shapely -from d123.dataset.conversion.map.opendrive.parser.lane import Lane, LaneSection -from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine -from d123.dataset.conversion.map.opendrive.parser.road import RoadType -from d123.dataset.conversion.map.opendrive.utils.id_system import ( +from d123.datasets.utils.maps.opendrive.parser.lane import Lane, LaneSection +from d123.datasets.utils.maps.opendrive.parser.reference import ReferenceLine +from d123.datasets.utils.maps.opendrive.parser.road import RoadType +from d123.datasets.utils.maps.opendrive.utils.id_system import ( derive_lane_group_id, derive_lane_id, lane_group_id_from_lane_id, diff --git a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py b/d123/datasets/utils/maps/opendrive/utils/objects_helper.py similarity index 94% rename from d123/dataset/conversion/map/opendrive/utils/objects_helper.py rename to d123/datasets/utils/maps/opendrive/utils/objects_helper.py index 11c7f609..ff478149 100644 --- a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py +++ b/d123/datasets/utils/maps/opendrive/utils/objects_helper.py @@ -5,8 +5,8 @@ import numpy.typing as npt import shapely -from d123.dataset.conversion.map.opendrive.parser.objects import Object -from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine +from d123.datasets.utils.maps.opendrive.parser.objects import Object +from d123.datasets.utils.maps.opendrive.parser.reference import ReferenceLine from d123.geometry import Point2D, Point3D, Point3DIndex, StateSE2 from d123.geometry.transform.tranform_2d import translate_along_yaw from d123.geometry.utils.rotation_utils import normalize_angle diff --git a/d123/dataset/conversion/map/opendrive/__init__.py b/d123/datasets/utils/maps/road_edge/__init__.py similarity index 100% rename from d123/dataset/conversion/map/opendrive/__init__.py rename to d123/datasets/utils/maps/road_edge/__init__.py diff --git a/d123/dataset/conversion/map/road_edge/road_edge_2d_utils.py b/d123/datasets/utils/maps/road_edge/road_edge_2d_utils.py similarity index 100% rename from d123/dataset/conversion/map/road_edge/road_edge_2d_utils.py rename to d123/datasets/utils/maps/road_edge/road_edge_2d_utils.py diff --git a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py b/d123/datasets/utils/maps/road_edge/road_edge_3d_utils.py similarity index 99% rename from d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py rename to d123/datasets/utils/maps/road_edge/road_edge_3d_utils.py index b88a44e0..ceeee62a 100644 --- a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py +++ b/d123/datasets/utils/maps/road_edge/road_edge_3d_utils.py @@ -9,7 +9,7 @@ import shapely from shapely.geometry import LineString -from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings +from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import get_road_edge_linear_rings from d123.geometry import Point3DIndex from d123.geometry.occupancy_map import OccupancyMap2D diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/datasets/wopd/waymo_map_utils/womp_boundary_utils.py similarity index 100% rename from d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py rename to d123/datasets/wopd/waymo_map_utils/womp_boundary_utils.py diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py b/d123/datasets/wopd/waymo_map_utils/wopd_map_utils.py similarity index 97% rename from d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py rename to d123/datasets/wopd/waymo_map_utils/wopd_map_utils.py index 6cf4f0f9..561d83f0 100644 --- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py +++ b/d123/datasets/wopd/waymo_map_utils/wopd_map_utils.py @@ -7,13 +7,16 @@ import numpy.typing as npt import pandas as pd import shapely.geometry as geom -from waymo_open_dataset import dataset_pb2 -from d123.dataset.dataset_specific.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries -from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.common.utils.dependencies import check_dependencies +from d123.datasets.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries +from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType from d123.geometry import Point3DIndex, Polyline3D from d123.geometry.utils.units import mph_to_mps +check_dependencies(modules=["waymo_open_dataset"], optional_name="waymo") +from waymo_open_dataset import dataset_pb2 + # TODO: # - Implement stop signs # - Implement speed bumps diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py similarity index 95% rename from d123/dataset/dataset_specific/wopd/wopd_data_converter.py rename to d123/datasets/wopd/wopd_data_converter.py index d74a7ea8..1e935329 100644 --- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py +++ b/d123/datasets/wopd/wopd_data_converter.py @@ -12,19 +12,19 @@ import pyarrow as pa from pyquaternion import Quaternion -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar_index import WopdLidarIndex -from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -from d123.common.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters from d123.common.multithreading.worker_utils import WorkerPool, worker_map +from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table from d123.common.utils.dependencies import check_dependencies -from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table -from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map -from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection -from d123.dataset.logs.log_metadata import LogMetadata +from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map +from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from d123.datatypes.sensors.lidar_index import WopdLidarIndex +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Point3D, Vector3D, Vector3DIndex from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL diff --git a/d123/dataset/dataset_specific/wopd/wopd_utils.py b/d123/datasets/wopd/wopd_utils.py similarity index 96% rename from d123/dataset/dataset_specific/wopd/wopd_utils.py rename to d123/datasets/wopd/wopd_utils.py index c9c302ef..e58813f9 100644 --- a/d123/dataset/dataset_specific/wopd/wopd_utils.py +++ b/d123/datasets/wopd/wopd_utils.py @@ -1,5 +1,8 @@ from typing import Dict, List, Tuple +from d123.common.utils.dependencies import check_dependencies + +check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") import tensorflow as tf from waymo_open_dataset import dataset_pb2 diff --git a/d123/common/datatypes/__init__.py b/d123/datatypes/__init__.py similarity index 100% rename from d123/common/datatypes/__init__.py rename to d123/datatypes/__init__.py diff --git a/d123/dataset/conversion/map/opendrive/parser/__init__.py b/d123/datatypes/detections/__init__.py similarity index 100% rename from d123/dataset/conversion/map/opendrive/parser/__init__.py rename to d123/datatypes/detections/__init__.py diff --git a/d123/common/datatypes/detection/detection.py b/d123/datatypes/detections/detection.py similarity index 94% rename from d123/common/datatypes/detection/detection.py rename to d123/datatypes/detections/detection.py index 8ada50fe..bf855679 100644 --- a/d123/common/datatypes/detection/detection.py +++ b/d123/datatypes/detections/detection.py @@ -4,9 +4,9 @@ import shapely -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.time.time_point import TimePoint from d123.common.utils.enums import SerialIntEnum +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.time.time_point import TimePoint from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, OccupancyMap2D, StateSE2, Vector2D, Vector3D @@ -150,3 +150,10 @@ def get_detection_by_lane_id(self, lane_id: int) -> TrafficLightDetection | None traffic_light_detection = detection break return traffic_light_detection + + +@dataclass +class DetectionRecording: + + box_detections: BoxDetectionWrapper + traffic_light_detections: TrafficLightDetectionWrapper diff --git a/d123/common/datatypes/detection/detection_types.py b/d123/datatypes/detections/detection_types.py similarity index 100% rename from d123/common/datatypes/detection/detection_types.py rename to d123/datatypes/detections/detection_types.py diff --git a/d123/dataset/maps/abstract_map.py b/d123/datatypes/maps/abstract_map.py similarity index 94% rename from d123/dataset/maps/abstract_map.py rename to d123/datatypes/maps/abstract_map.py index be9eefeb..edfc16e5 100644 --- a/d123/dataset/maps/abstract_map.py +++ b/d123/datatypes/maps/abstract_map.py @@ -5,8 +5,8 @@ import shapely -from d123.dataset.maps.abstract_map_objects import AbstractMapObject -from d123.dataset.maps.map_datatypes import MapLayer +from d123.datatypes.maps.abstract_map_objects import AbstractMapObject +from d123.datatypes.maps.map_datatypes import MapLayer from d123.geometry import Point2D # TODO: diff --git a/d123/dataset/maps/abstract_map_objects.py b/d123/datatypes/maps/abstract_map_objects.py similarity index 98% rename from d123/dataset/maps/abstract_map_objects.py rename to d123/datatypes/maps/abstract_map_objects.py index 14e85539..67f63e5c 100644 --- a/d123/dataset/maps/abstract_map_objects.py +++ b/d123/datatypes/maps/abstract_map_objects.py @@ -6,7 +6,7 @@ import shapely.geometry as geom import trimesh -from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType from d123.geometry import Polyline2D, Polyline3D, PolylineSE2 diff --git a/d123/dataset/conversion/map/road_edge/__init__.py b/d123/datatypes/maps/gpkg/__init__.py similarity index 100% rename from d123/dataset/conversion/map/road_edge/__init__.py rename to d123/datatypes/maps/gpkg/__init__.py diff --git a/d123/dataset/maps/gpkg/gpkg_map.py b/d123/datatypes/maps/gpkg/gpkg_map.py similarity index 98% rename from d123/dataset/maps/gpkg/gpkg_map.py rename to d123/datatypes/maps/gpkg/gpkg_map.py index d466857e..429a611e 100644 --- a/d123/dataset/maps/gpkg/gpkg_map.py +++ b/d123/datatypes/maps/gpkg/gpkg_map.py @@ -11,9 +11,9 @@ import shapely import shapely.geometry as geom -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.maps.abstract_map_objects import AbstractMapObject -from d123.dataset.maps.gpkg.gpkg_map_objects import ( +from d123.datatypes.maps.abstract_map import AbstractMap +from d123.datatypes.maps.abstract_map_objects import AbstractMapObject +from d123.datatypes.maps.gpkg.gpkg_map_objects import ( GPKGCarpark, GPKGCrosswalk, GPKGGenericDrivable, @@ -24,8 +24,8 @@ GPKGRoadLine, GPKGWalkway, ) -from d123.dataset.maps.gpkg.utils import load_gdf_with_geometry_columns -from d123.dataset.maps.map_datatypes import MapLayer +from d123.datatypes.maps.gpkg.utils import load_gdf_with_geometry_columns +from d123.datatypes.maps.map_datatypes import MapLayer from d123.geometry import Point2D USE_ARROW: bool = True diff --git a/d123/dataset/maps/gpkg/gpkg_map_objects.py b/d123/datatypes/maps/gpkg/gpkg_map_objects.py similarity index 98% rename from d123/dataset/maps/gpkg/gpkg_map_objects.py rename to d123/datatypes/maps/gpkg/gpkg_map_objects.py index 1a274a53..2326490b 100644 --- a/d123/dataset/maps/gpkg/gpkg_map_objects.py +++ b/d123/datatypes/maps/gpkg/gpkg_map_objects.py @@ -11,7 +11,7 @@ import trimesh from d123.common.visualization.viser.utils import get_trimesh_from_boundaries -from d123.dataset.maps.abstract_map_objects import ( +from d123.datatypes.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, AbstractGenericDrivable, @@ -24,8 +24,8 @@ AbstractSurfaceMapObject, AbstractWalkway, ) -from d123.dataset.maps.gpkg.utils import get_row_with_value -from d123.dataset.maps.map_datatypes import RoadEdgeType, RoadLineType +from d123.datatypes.maps.gpkg.utils import get_row_with_value +from d123.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType from d123.geometry import Point3DIndex, Polyline3D diff --git a/d123/dataset/maps/gpkg/utils.py b/d123/datatypes/maps/gpkg/utils.py similarity index 100% rename from d123/dataset/maps/gpkg/utils.py rename to d123/datatypes/maps/gpkg/utils.py diff --git a/d123/dataset/maps/map_datatypes.py b/d123/datatypes/maps/map_datatypes.py similarity index 100% rename from d123/dataset/maps/map_datatypes.py rename to d123/datatypes/maps/map_datatypes.py diff --git a/d123/dataset/dataset_specific/__init__.py b/d123/datatypes/scene/__init__.py similarity index 100% rename from d123/dataset/dataset_specific/__init__.py rename to d123/datatypes/scene/__init__.py diff --git a/d123/dataset/scene/abstract_scene.py b/d123/datatypes/scene/abstract_scene.py similarity index 66% rename from d123/dataset/scene/abstract_scene.py rename to d123/datatypes/scene/abstract_scene.py index 4877fc83..a3d2ccfe 100644 --- a/d123/dataset/scene/abstract_scene.py +++ b/d123/datatypes/scene/abstract_scene.py @@ -1,17 +1,15 @@ from __future__ import annotations import abc -from dataclasses import dataclass from typing import List, Optional -from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.sensor.camera import Camera, CameraType -from d123.common.datatypes.sensor.lidar import LiDAR, LiDARType -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.dataset.logs.log_metadata import LogMetadata -from d123.dataset.maps.abstract_map import AbstractMap +from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper +from d123.datatypes.maps.abstract_map import AbstractMap +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera import Camera, CameraType +from d123.datatypes.sensors.lidar import LiDAR, LiDARType +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 # TODO: Remove or improve open/close dynamic of Scene object. @@ -93,25 +91,3 @@ def open(self) -> None: def close(self) -> None: pass - - -# TODO: Move to a more appropriate place. -@dataclass(frozen=True) -class SceneExtractionInfo: - - initial_idx: int - duration_s: float - history_s: float - iteration_duration_s: float - - @property - def number_of_iterations(self) -> int: - return round(self.duration_s / self.iteration_duration_s) - - @property - def number_of_history_iterations(self) -> int: - return round(self.history_s / self.iteration_duration_s) - - @property - def end_idx(self) -> int: - return self.initial_idx + self.number_of_iterations diff --git a/d123/datatypes/scene/abstract_scene_builder.py b/d123/datatypes/scene/abstract_scene_builder.py new file mode 100644 index 00000000..56eb9a6b --- /dev/null +++ b/d123/datatypes/scene/abstract_scene_builder.py @@ -0,0 +1,24 @@ +import abc +from typing import Iterator + +from d123.common.multithreading.worker_utils import WorkerPool +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.scene_filter import SceneFilter + +# TODO: Expand lazy implementation for scene builder. + + +class SceneBuilder(abc.ABC): + """ + Abstract base class for building scenes from a dataset. + """ + + @abc.abstractmethod + def get_scenes(self, filter: SceneFilter, worker: WorkerPool) -> Iterator[AbstractScene]: + """ + Returns an iterator over scenes that match the given filter. + :param filter: SceneFilter object to filter the scenes. + :param worker: WorkerPool to parallelize the scene extraction. + :return: Iterator over AbstractScene objects. + """ + raise NotImplementedError diff --git a/d123/dataset/scene/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py similarity index 88% rename from d123/dataset/scene/arrow_scene.py rename to d123/datatypes/scene/arrow/arrow_scene.py index ecd68111..0fc61ba8 100644 --- a/d123/dataset/scene/arrow_scene.py +++ b/d123/datatypes/scene/arrow/arrow_scene.py @@ -4,14 +4,12 @@ import pyarrow as pa -from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, CameraType, camera_metadata_dict_from_json -from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.common.datatypes.vehicle_state.vehicle_parameters import VehicleParameters -from d123.dataset.arrow.conversion import ( +from d123.common.utils.arrow_helper import open_arrow_table +from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper +from d123.datatypes.maps.abstract_map import AbstractMap +from d123.datatypes.maps.gpkg.gpkg_map import get_local_map_api, get_map_api_from_names +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.arrow.utils.conversion import ( get_box_detections_from_arrow_table, get_camera_from_arrow_table, get_ego_vehicle_state_from_arrow_table, @@ -19,11 +17,12 @@ get_timepoint_from_arrow_table, get_traffic_light_detections_from_arrow_table, ) -from d123.dataset.arrow.helper import open_arrow_table -from d123.dataset.logs.log_metadata import LogMetadata -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.maps.gpkg.gpkg_map import get_local_map_api, get_map_api_from_names -from d123.dataset.scene.abstract_scene import AbstractScene, SceneExtractionInfo +from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionInfo +from d123.datatypes.sensors.camera import Camera, CameraMetadata, CameraType, camera_metadata_dict_from_json +from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 +from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters # TODO: Remove or improve open/close dynamic of Scene object. diff --git a/d123/dataset/scene/scene_builder.py b/d123/datatypes/scene/arrow/arrow_scene_builder.py similarity index 87% rename from d123/dataset/scene/scene_builder.py rename to d123/datatypes/scene/arrow/arrow_scene_builder.py index 228fcb13..94571715 100644 --- a/d123/dataset/scene/scene_builder.py +++ b/d123/datatypes/scene/arrow/arrow_scene_builder.py @@ -1,4 +1,3 @@ -import abc import json import random from functools import partial @@ -6,25 +5,12 @@ from typing import Iterator, List, Optional, Set, Union from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.dataset.arrow.helper import open_arrow_table -from d123.dataset.logs.log_metadata import LogMetadata -from d123.dataset.scene.abstract_scene import AbstractScene -from d123.dataset.scene.arrow_scene import ArrowScene, SceneExtractionInfo -from d123.dataset.scene.scene_filter import SceneFilter - -# TODO: Fix lazy abstraction implementation for scene builder. - - -class SceneBuilder(abc.ABC): - @abc.abstractmethod - def get_scenes(self, filter: SceneFilter, worker: WorkerPool) -> Iterator[AbstractScene]: - """ - Returns an iterator over scenes that match the given filter. - :param filter: SceneFilter object to filter the scenes. - :param worker: WorkerPool to parallelize the scene extraction. - :return: Iterator over AbstractScene objects. - """ - raise NotImplementedError +from d123.common.utils.arrow_helper import open_arrow_table +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.abstract_scene_builder import SceneBuilder +from d123.datatypes.scene.arrow.arrow_scene import ArrowScene +from d123.datatypes.scene.scene_filter import SceneFilter +from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionInfo class ArrowSceneBuilder(SceneBuilder): diff --git a/d123/dataset/arrow/conversion.py b/d123/datatypes/scene/arrow/utils/conversion.py similarity index 88% rename from d123/dataset/arrow/conversion.py rename to d123/datatypes/scene/arrow/utils/conversion.py index c961ab7d..514d9f20 100644 --- a/d123/dataset/arrow/conversion.py +++ b/d123/datatypes/scene/arrow/utils/conversion.py @@ -3,14 +3,14 @@ import io import os from pathlib import Path -from typing import Dict, Optional +from typing import Dict, List, Optional import numpy as np import numpy.typing as npt import pyarrow as pa from PIL import Image -from d123.common.datatypes.detection.detection import ( +from d123.datatypes.detections.detection import ( BoxDetection, BoxDetectionMetadata, BoxDetectionSE3, @@ -19,14 +19,13 @@ TrafficLightDetectionWrapper, TrafficLightStatus, ) -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import Camera, CameraMetadata -from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.common.datatypes.vehicle_state.vehicle_parameters import VehicleParameters -from d123.dataset.logs.log_metadata import LogMetadata -from d123.dataset.maps.abstract_map import List +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera import Camera, CameraMetadata +from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 +from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters from d123.geometry import BoundingBoxSE3, Vector3D DATASET_SENSOR_ROOT: Dict[str, Path] = { @@ -144,11 +143,11 @@ def get_lidar_from_arrow_table( # NOTE: We move data specific import into if-else block, to avoid data specific import errors if log_metadata.dataset == "nuplan": - from d123.dataset.dataset_specific.nuplan.load_sensor import load_nuplan_lidar_from_path + from d123.datasets.nuplan.load_sensor import load_nuplan_lidar_from_path lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "carla": - from d123.dataset.dataset_specific.carla.load_sensor import load_carla_lidar_from_path + from d123.datasets.carla.load_sensor import load_carla_lidar_from_path lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "wopd": diff --git a/d123/dataset/scene/scene_filter.py b/d123/datatypes/scene/scene_filter.py similarity index 96% rename from d123/dataset/scene/scene_filter.py rename to d123/datatypes/scene/scene_filter.py index 0a58eba6..5dbb5ba6 100644 --- a/d123/dataset/scene/scene_filter.py +++ b/d123/datatypes/scene/scene_filter.py @@ -1,7 +1,7 @@ from dataclasses import dataclass from typing import List, Optional -from d123.common.datatypes.sensor.camera import CameraType +from d123.datatypes.sensors.camera import CameraType # TODO: Add more filter options (e.g. scene tags, ego movement, or whatever appropriate) diff --git a/d123/datatypes/scene/scene_metadata.py b/d123/datatypes/scene/scene_metadata.py new file mode 100644 index 00000000..0fc12b03 --- /dev/null +++ b/d123/datatypes/scene/scene_metadata.py @@ -0,0 +1,40 @@ +from dataclasses import dataclass + +import d123 + + +@dataclass +class LogMetadata: + + # TODO: add + # - split + # - global/local map + + dataset: str + log_name: str + location: str + timestep_seconds: float + + map_has_z: bool + version: str = str(d123.__version__) + + +@dataclass(frozen=True) +class SceneExtractionInfo: + + initial_idx: int + duration_s: float + history_s: float + iteration_duration_s: float + + @property + def number_of_iterations(self) -> int: + return round(self.duration_s / self.iteration_duration_s) + + @property + def number_of_history_iterations(self) -> int: + return round(self.history_s / self.iteration_duration_s) + + @property + def end_idx(self) -> int: + return self.initial_idx + self.number_of_iterations diff --git a/d123/common/datatypes/sensor/camera.py b/d123/datatypes/sensors/camera.py similarity index 100% rename from d123/common/datatypes/sensor/camera.py rename to d123/datatypes/sensors/camera.py diff --git a/d123/common/datatypes/sensor/lidar.py b/d123/datatypes/sensors/lidar.py similarity index 97% rename from d123/common/datatypes/sensor/lidar.py rename to d123/datatypes/sensors/lidar.py index fe178760..f15ccc89 100644 --- a/d123/common/datatypes/sensor/lidar.py +++ b/d123/datatypes/sensors/lidar.py @@ -7,8 +7,8 @@ import numpy as np import numpy.typing as npt -from d123.common.datatypes.sensor.lidar_index import LIDAR_INDEX_REGISTRY, LiDARIndex from d123.common.utils.enums import SerialIntEnum +from d123.datatypes.sensors.lidar_index import LIDAR_INDEX_REGISTRY, LiDARIndex class LiDARType(SerialIntEnum): diff --git a/d123/common/datatypes/sensor/lidar_index.py b/d123/datatypes/sensors/lidar_index.py similarity index 100% rename from d123/common/datatypes/sensor/lidar_index.py rename to d123/datatypes/sensors/lidar_index.py diff --git a/d123/dataset/dataset_specific/carla/__init__.py b/d123/datatypes/time/__init__.py similarity index 100% rename from d123/dataset/dataset_specific/carla/__init__.py rename to d123/datatypes/time/__init__.py diff --git a/d123/common/datatypes/time/time_point.py b/d123/datatypes/time/time_point.py similarity index 100% rename from d123/common/datatypes/time/time_point.py rename to d123/datatypes/time/time_point.py diff --git a/d123/dataset/dataset_specific/nuplan/__init__.py b/d123/datatypes/vehicle_state/__init__.py similarity index 100% rename from d123/dataset/dataset_specific/nuplan/__init__.py rename to d123/datatypes/vehicle_state/__init__.py diff --git a/d123/common/datatypes/vehicle_state/ego_state.py b/d123/datatypes/vehicle_state/ego_state.py similarity index 96% rename from d123/common/datatypes/vehicle_state/ego_state.py rename to d123/datatypes/vehicle_state/ego_state.py index 79522517..2198cc6b 100644 --- a/d123/common/datatypes/vehicle_state/ego_state.py +++ b/d123/datatypes/vehicle_state/ego_state.py @@ -8,21 +8,17 @@ import numpy as np import numpy.typing as npt -from d123.common.datatypes.detection.detection import ( - BoxDetectionMetadata, - BoxDetectionSE2, - BoxDetectionSE3, -) -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.vehicle_parameters import ( +from d123.common.utils.enums import classproperty +from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE2, BoxDetectionSE3 +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.vehicle_parameters import ( VehicleParameters, center_se2_to_rear_axle_se2, center_se3_to_rear_axle_se3, rear_axle_se2_to_center_se2, rear_axle_se3_to_center_se3, ) -from d123.common.utils.enums import classproperty from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, StateSE2, Vector2D, Vector3D # TODO: Find an appropriate way to handle SE2 and SE3 states. diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/datatypes/vehicle_state/vehicle_parameters.py similarity index 98% rename from d123/common/datatypes/vehicle_state/vehicle_parameters.py rename to d123/datatypes/vehicle_state/vehicle_parameters.py index 5bf38706..cd9aeb1c 100644 --- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/datatypes/vehicle_state/vehicle_parameters.py @@ -4,8 +4,6 @@ from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame -# TODO: Add more vehicle parameters, potentially extend the parameters. - @dataclass class VehicleParameters: diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py index d845b66c..678f2cdf 100644 --- a/d123/geometry/__init__.py +++ b/d123/geometry/__init__.py @@ -4,15 +4,19 @@ BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex, + EulerAnglesIndex, EulerStateSE3Index, Point2DIndex, Point3DIndex, + QuaternionIndex, StateSE2Index, + StateSE3Index, Vector2DIndex, Vector3DIndex, ) from d123.geometry.occupancy_map import OccupancyMap2D from d123.geometry.point import Point2D, Point3D from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 +from d123.geometry.rotation import EulerAngles, Quaternion from d123.geometry.se import EulerStateSE3, StateSE2, StateSE3 from d123.geometry.vector import Vector2D, Vector3D diff --git a/d123/geometry/test/test_bounding_box.py b/d123/geometry/test/test_bounding_box.py index e8f44986..a102b4da 100644 --- a/d123/geometry/test/test_bounding_box.py +++ b/d123/geometry/test/test_bounding_box.py @@ -4,7 +4,7 @@ import shapely.geometry as geom from d123.common.utils.mixin import ArrayMixin -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2 +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, StateSE3 from d123.geometry.geometry_index import ( BoundingBoxSE2Index, BoundingBoxSE3Index, @@ -12,7 +12,6 @@ Corners3DIndex, Point2DIndex, ) -from d123.geometry.se import StateSE3 class TestBoundingBoxSE2(unittest.TestCase): diff --git a/d123/geometry/test/test_point.py b/d123/geometry/test/test_point.py index 94162e93..5c1b30d7 100644 --- a/d123/geometry/test/test_point.py +++ b/d123/geometry/test/test_point.py @@ -3,9 +3,8 @@ import numpy as np -from d123.geometry import Point2D, Point2DIndex -from d123.geometry.geometry_index import Point3DIndex -from d123.geometry.point import Point3D +from d123.geometry import Point2D, Point3D +from d123.geometry.geometry_index import Point2DIndex, Point3DIndex class TestPoint2D(unittest.TestCase): diff --git a/d123/geometry/test/test_polyline.py b/d123/geometry/test/test_polyline.py index e4103364..d1c1d652 100644 --- a/d123/geometry/test/test_polyline.py +++ b/d123/geometry/test/test_polyline.py @@ -3,9 +3,7 @@ import numpy as np import shapely.geometry as geom -from d123.geometry.point import Point2D, Point3D -from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 -from d123.geometry.se import StateSE2 +from d123.geometry import Point2D, Point3D, Polyline2D, Polyline3D, PolylineSE2, StateSE2 class TestPolyline2D(unittest.TestCase): diff --git a/d123/geometry/transform/test/test_transform_consistency.py b/d123/geometry/transform/test/test_transform_consistency.py index 1865a3ff..72facda0 100644 --- a/d123/geometry/transform/test/test_transform_consistency.py +++ b/d123/geometry/transform/test/test_transform_consistency.py @@ -3,8 +3,8 @@ import numpy as np import numpy.typing as npt +from d123.geometry import EulerStateSE3, StateSE2, Vector2D, Vector3D from d123.geometry.geometry_index import EulerStateSE3Index, Point2DIndex, Point3DIndex, StateSE2Index -from d123.geometry.se import EulerStateSE3, StateSE2 from d123.geometry.transform.transform_euler_se3 import ( convert_absolute_to_relative_euler_se3_array, convert_absolute_to_relative_points_3d_array, @@ -24,7 +24,6 @@ translate_se2_along_y, translate_se2_array_along_body_frame, ) -from d123.geometry.vector import Vector2D, Vector3D class TestTransformConsistency(unittest.TestCase): diff --git a/d123/geometry/transform/test/test_transform_euler_se3.py b/d123/geometry/transform/test/test_transform_euler_se3.py index f63bc3bd..f20203cc 100644 --- a/d123/geometry/transform/test/test_transform_euler_se3.py +++ b/d123/geometry/transform/test/test_transform_euler_se3.py @@ -3,7 +3,7 @@ import numpy as np import numpy.typing as npt -from d123.geometry.se import EulerStateSE3 +from d123.geometry import EulerStateSE3, Vector3D from d123.geometry.transform.transform_euler_se3 import ( convert_absolute_to_relative_euler_se3_array, convert_absolute_to_relative_points_3d_array, @@ -14,7 +14,6 @@ translate_euler_se3_along_y, translate_euler_se3_along_z, ) -from d123.geometry.vector import Vector3D class TestTransformEulerSE3(unittest.TestCase): diff --git a/d123/geometry/transform/test/test_transform_se2.py b/d123/geometry/transform/test/test_transform_se2.py index 503d87de..cbed45c6 100644 --- a/d123/geometry/transform/test/test_transform_se2.py +++ b/d123/geometry/transform/test/test_transform_se2.py @@ -3,7 +3,7 @@ import numpy as np import numpy.typing as npt -from d123.geometry.se import StateSE2 +from d123.geometry import StateSE2, Vector2D from d123.geometry.transform.transform_se2 import ( convert_absolute_to_relative_point_2d_array, convert_absolute_to_relative_se2_array, @@ -14,7 +14,6 @@ translate_se2_along_y, translate_se2_array_along_body_frame, ) -from d123.geometry.vector import Vector2D class TestTransformSE2(unittest.TestCase): diff --git a/d123/geometry/transform/test/test_transform_se3.py b/d123/geometry/transform/test/test_transform_se3.py index 0a752f68..be936c71 100644 --- a/d123/geometry/transform/test/test_transform_se3.py +++ b/d123/geometry/transform/test/test_transform_se3.py @@ -4,10 +4,7 @@ import numpy.typing as npt import d123.geometry.transform.transform_euler_se3 as euler_transform_se3 -from d123.geometry.geometry_index import EulerStateSE3Index, StateSE3Index -from d123.geometry.point import Point3D -from d123.geometry.rotation import Quaternion -from d123.geometry.se import EulerStateSE3, StateSE3 +from d123.geometry import EulerStateSE3, EulerStateSE3Index, Point3D, Quaternion, StateSE3, StateSE3Index from d123.geometry.transform.transform_se3 import ( convert_absolute_to_relative_points_3d_array, convert_absolute_to_relative_se3_array, diff --git a/d123/geometry/transform/transform_euler_se3.py b/d123/geometry/transform/transform_euler_se3.py index c2f897d5..516f1ba9 100644 --- a/d123/geometry/transform/transform_euler_se3.py +++ b/d123/geometry/transform/transform_euler_se3.py @@ -3,9 +3,7 @@ import numpy as np import numpy.typing as npt -from d123.geometry import EulerStateSE3, EulerStateSE3Index, Vector3D -from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex -from d123.geometry.rotation import EulerAngles +from d123.geometry import EulerAngles, EulerStateSE3, EulerStateSE3Index, Point3DIndex, Vector3D, Vector3DIndex from d123.geometry.utils.rotation_utils import ( get_rotation_matrices_from_euler_array, get_rotation_matrix_from_euler_array, diff --git a/d123/geometry/transform/transform_se2.py b/d123/geometry/transform/transform_se2.py index c3a5ac6e..48f718fa 100644 --- a/d123/geometry/transform/transform_se2.py +++ b/d123/geometry/transform/transform_se2.py @@ -3,10 +3,8 @@ import numpy as np import numpy.typing as npt -from d123.geometry.geometry_index import Point2DIndex, Vector2DIndex -from d123.geometry.se import StateSE2, StateSE2Index +from d123.geometry import Point2DIndex, StateSE2, StateSE2Index, Vector2D, Vector2DIndex from d123.geometry.utils.rotation_utils import normalize_angle -from d123.geometry.vector import Vector2D def convert_absolute_to_relative_se2_array( diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py index e5132fe2..a11fa2e2 100644 --- a/d123/geometry/transform/transform_se3.py +++ b/d123/geometry/transform/transform_se3.py @@ -3,9 +3,7 @@ import numpy as np import numpy.typing as npt -from d123.geometry import Vector3D -from d123.geometry.geometry_index import Point3DIndex, QuaternionIndex, StateSE3Index, Vector3DIndex -from d123.geometry.se import StateSE3 +from d123.geometry import Point3DIndex, QuaternionIndex, StateSE3, StateSE3Index, Vector3D, Vector3DIndex from d123.geometry.utils.rotation_utils import ( conjugate_quaternion_array, get_rotation_matrices_from_quaternion_array, diff --git a/d123/script/builders/data_converter_builder.py b/d123/script/builders/data_converter_builder.py index d9c54004..cb7cc83b 100644 --- a/d123/script/builders/data_converter_builder.py +++ b/d123/script/builders/data_converter_builder.py @@ -4,7 +4,7 @@ from hydra.utils import instantiate from omegaconf import DictConfig -from d123.dataset.dataset_specific.raw_data_converter import RawDataConverter +from d123.datasets.raw_data_converter import RawDataConverter from d123.script.builders.utils.utils_type import validate_type logger = logging.getLogger(__name__) diff --git a/d123/script/builders/scene_builder_builder.py b/d123/script/builders/scene_builder_builder.py index 4424b550..148a4792 100644 --- a/d123/script/builders/scene_builder_builder.py +++ b/d123/script/builders/scene_builder_builder.py @@ -5,7 +5,7 @@ from hydra.utils import instantiate from omegaconf import DictConfig -from d123.dataset.scene.scene_builder import SceneBuilder +from d123.datatypes.scene.abstract_scene_builder import SceneBuilder logger = logging.getLogger(__name__) diff --git a/d123/script/builders/scene_filter_builder.py b/d123/script/builders/scene_filter_builder.py index af3429a2..512f59ca 100644 --- a/d123/script/builders/scene_filter_builder.py +++ b/d123/script/builders/scene_filter_builder.py @@ -4,7 +4,7 @@ from hydra.utils import instantiate from omegaconf import DictConfig -from d123.dataset.scene.scene_filter import SceneFilter +from d123.datatypes.scene.scene_filter import SceneFilter logger = logging.getLogger(__name__) diff --git a/d123/script/run_preprocessing.py b/d123/script/run_preprocessing.py index 4ed6cf97..9e77c514 100644 --- a/d123/script/run_preprocessing.py +++ b/d123/script/run_preprocessing.py @@ -9,7 +9,7 @@ from omegaconf import DictConfig from d123.common.multithreading.worker_utils import worker_map -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.abstract_scene import AbstractScene from d123.script.builders.scene_builder_builder import build_scene_builder from d123.script.builders.scene_filter_builder import build_scene_filter from d123.script.run_dataset_conversion import build_worker diff --git a/d123/script/run_simulation.py b/d123/script/run_simulation.py index 393e4367..76ed5e50 100644 --- a/d123/script/run_simulation.py +++ b/d123/script/run_simulation.py @@ -10,7 +10,7 @@ from tqdm import tqdm from d123.common.multithreading.worker_utils import worker_map -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.abstract_scene import AbstractScene from d123.script.builders.scene_builder_builder import build_scene_builder from d123.script.builders.scene_filter_builder import build_scene_filter from d123.script.run_dataset_conversion import build_worker diff --git a/d123/simulation/agents/abstract_agents.py b/d123/simulation/agents/abstract_agents.py index 2d7f6cde..14c0f3b3 100644 --- a/d123/simulation/agents/abstract_agents.py +++ b/d123/simulation/agents/abstract_agents.py @@ -2,8 +2,8 @@ from typing import List, Optional from d123.common.datatypes.detection.detection import BoxDetection -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.scene.abstract_scene import AbstractScene class AbstractAgents: diff --git a/d123/simulation/agents/constant_velocity_agents.py b/d123/simulation/agents/constant_velocity_agents.py index 5201e768..57c48d01 100644 --- a/d123/simulation/agents/constant_velocity_agents.py +++ b/d123/simulation/agents/constant_velocity_agents.py @@ -3,8 +3,8 @@ from typing import List, Optional from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.scene.abstract_scene import AbstractScene from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.point import Point2D from d123.geometry.transform.tranform_2d import translate_along_yaw diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py index ef8437cf..648a8145 100644 --- a/d123/simulation/agents/idm_agents.py +++ b/d123/simulation/agents/idm_agents.py @@ -7,9 +7,9 @@ from shapely.geometry import CAP_STYLE, Polygon from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.dataset.arrow.conversion import BoxDetectionWrapper -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.point import Point2D from d123.geometry.polyline import PolylineSE2 diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py index 960486d0..347f7f7e 100644 --- a/d123/simulation/agents/path_following.py +++ b/d123/simulation/agents/path_following.py @@ -3,8 +3,8 @@ from typing import Dict, List, Optional from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.scene.abstract_scene import AbstractScene from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.point import Point2D from d123.geometry.polyline import PolylineSE2 diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py index 9d2e2140..4ec342b3 100644 --- a/d123/simulation/agents/smart_agents.py +++ b/d123/simulation/agents/smart_agents.py @@ -6,9 +6,9 @@ from torch_geometric.data import HeteroData from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.dataset.arrow.conversion import BoxDetectionWrapper, DetectionType -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper, DetectionType from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.se import StateSE2 from d123.geometry.transform.transform_se2 import convert_relative_to_absolute_point_2d_array diff --git a/d123/simulation/controller/abstract_controller.py b/d123/simulation/controller/abstract_controller.py index fc7f4a4f..eaa5aed5 100644 --- a/d123/simulation/controller/abstract_controller.py +++ b/d123/simulation/controller/abstract_controller.py @@ -1,7 +1,7 @@ import abc from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/controller/action_controller.py b/d123/simulation/controller/action_controller.py index 65199590..5e924c78 100644 --- a/d123/simulation/controller/action_controller.py +++ b/d123/simulation/controller/action_controller.py @@ -1,7 +1,7 @@ from typing import Optional from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene from d123.simulation.controller.abstract_controller import AbstractEgoController from d123.simulation.controller.motion_model.abstract_motion_model import AbstractMotionModel from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput diff --git a/d123/simulation/gym/demo_gym_env.py b/d123/simulation/gym/demo_gym_env.py index 54051c31..88eba93f 100644 --- a/d123/simulation/gym/demo_gym_env.py +++ b/d123/simulation/gym/demo_gym_env.py @@ -9,9 +9,9 @@ from nuplan.planning.simulation.controller.motion_model.kinematic_bicycle import KinematicBicycleModel from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.dataset.arrow.conversion import EgoStateSE3 -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.arrow.utils.conversion import EgoStateSE3 from d123.simulation.observation.abstract_observation import AbstractObservation from d123.simulation.observation.agents_observation import AgentsObservation diff --git a/d123/simulation/gym/environment/helper/environment_cache.py b/d123/simulation/gym/environment/helper/environment_cache.py index 4ab65f6a..00672828 100644 --- a/d123/simulation/gym/environment/helper/environment_cache.py +++ b/d123/simulation/gym/environment/helper/environment_cache.py @@ -14,8 +14,8 @@ from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.maps.abstract_map_objects import ( +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, AbstractIntersection, @@ -23,7 +23,7 @@ AbstractLaneGroup, AbstractStopLine, ) -from d123.dataset.maps.map_datatypes import MapLayer +from d123.datasets.maps.map_datatypes import MapLayer from d123.geometry.occupancy_map import OccupancyMap2D from d123.geometry.se import StateSE2 from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea diff --git a/d123/simulation/gym/gym_env.py b/d123/simulation/gym/gym_env.py index 96672d16..b4953dc3 100644 --- a/d123/simulation/gym/gym_env.py +++ b/d123/simulation/gym/gym_env.py @@ -5,8 +5,8 @@ from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2 -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.scene.abstract_scene import AbstractScene from d123.geometry.vector import Vector2D from d123.simulation.controller.motion_model.kinematic_bicycle_model import KinematicBicycleModel from d123.simulation.observation.abstract_observation import AbstractObservation diff --git a/d123/simulation/history/simulation_history.py b/d123/simulation/history/simulation_history.py index d40b7dc6..b5488780 100644 --- a/d123/simulation/history/simulation_history.py +++ b/d123/simulation/history/simulation_history.py @@ -5,7 +5,7 @@ from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/history/simulation_history_buffer.py b/d123/simulation/history/simulation_history_buffer.py index 47a17ac5..21bfdf7d 100644 --- a/d123/simulation/history/simulation_history_buffer.py +++ b/d123/simulation/history/simulation_history_buffer.py @@ -6,7 +6,7 @@ from d123.common.datatypes.recording.abstract_recording import Recording from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene class Simulation2DHistoryBuffer: diff --git a/d123/simulation/metrics/sim_agents/interaction_based.py b/d123/simulation/metrics/sim_agents/interaction_based.py index 7cbd9b92..992c837b 100644 --- a/d123/simulation/metrics/sim_agents/interaction_based.py +++ b/d123/simulation/metrics/sim_agents/interaction_based.py @@ -3,7 +3,7 @@ import numpy as np import numpy.typing as npt -from d123.dataset.arrow.conversion import BoxDetectionWrapper +from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper from d123.geometry.geometry_index import BoundingBoxSE2Index from d123.geometry.utils.bounding_box_utils import bbse2_array_to_polygon_array diff --git a/d123/simulation/metrics/sim_agents/map_based.py b/d123/simulation/metrics/sim_agents/map_based.py index 134a3c5e..d204eeb3 100644 --- a/d123/simulation/metrics/sim_agents/map_based.py +++ b/d123/simulation/metrics/sim_agents/map_based.py @@ -4,9 +4,9 @@ import numpy.typing as npt import shapely -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.maps.abstract_map_objects import AbstractLane -from d123.dataset.maps.map_datatypes import MapLayer +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.maps.abstract_map_objects import AbstractLane +from d123.datasets.maps.map_datatypes import MapLayer from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, StateSE2Index from d123.geometry.se import StateSE2 from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array diff --git a/d123/simulation/metrics/sim_agents/sim_agents.py b/d123/simulation/metrics/sim_agents/sim_agents.py index 36033e70..225e6a80 100644 --- a/d123/simulation/metrics/sim_agents/sim_agents.py +++ b/d123/simulation/metrics/sim_agents/sim_agents.py @@ -5,8 +5,8 @@ import numpy.typing as npt from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper, DetectionType -from d123.dataset.maps.abstract_map import AbstractMap -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.maps.abstract_map import AbstractMap +from d123.datasets.scene.abstract_scene import AbstractScene from d123.geometry.geometry_index import BoundingBoxSE2Index from d123.simulation.metrics.sim_agents.histogram_metric import ( BinaryHistogramIntersectionMetric, diff --git a/d123/simulation/metrics/sim_agents/utils.py b/d123/simulation/metrics/sim_agents/utils.py index c9d2bb3b..9151ffd6 100644 --- a/d123/simulation/metrics/sim_agents/utils.py +++ b/d123/simulation/metrics/sim_agents/utils.py @@ -4,7 +4,7 @@ import numpy.typing as npt from d123.common.datatypes.detection.detection import BoxDetectionWrapper -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene from d123.geometry.geometry_index import BoundingBoxSE2Index diff --git a/d123/simulation/observation/abstract_observation.py b/d123/simulation/observation/abstract_observation.py index dfba70af..ae8f0293 100644 --- a/d123/simulation/observation/abstract_observation.py +++ b/d123/simulation/observation/abstract_observation.py @@ -5,7 +5,7 @@ from d123.common.datatypes.recording.abstract_recording import Recording from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/observation/agents_observation.py b/d123/simulation/observation/agents_observation.py index a31d6d67..be31ce5f 100644 --- a/d123/simulation/observation/agents_observation.py +++ b/d123/simulation/observation/agents_observation.py @@ -5,8 +5,8 @@ from d123.common.datatypes.recording.abstract_recording import Recording from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.dataset.arrow.conversion import BoxDetectionWrapper -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper from d123.simulation.agents.abstract_agents import AbstractAgents # from d123.simulation.agents.path_following import PathFollowingAgents diff --git a/d123/simulation/observation/log_replay_observation.py b/d123/simulation/observation/log_replay_observation.py index 6e0b6b85..0a986ca3 100644 --- a/d123/simulation/observation/log_replay_observation.py +++ b/d123/simulation/observation/log_replay_observation.py @@ -3,7 +3,7 @@ from d123.common.datatypes.recording.abstract_recording import Recording from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene from d123.simulation.observation.abstract_observation import AbstractObservation from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/planning/abstract_planner.py b/d123/simulation/planning/abstract_planner.py index 1746d857..3a0f486d 100644 --- a/d123/simulation/planning/abstract_planner.py +++ b/d123/simulation/planning/abstract_planner.py @@ -4,7 +4,7 @@ from dataclasses import dataclass from typing import List -from d123.dataset.maps.abstract_map import AbstractMap +from d123.datasets.maps.abstract_map import AbstractMap from d123.simulation.history.simulation_history_buffer import Simulation2DHistoryBuffer from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/simulation_2d.py b/d123/simulation/simulation_2d.py index 0be93c05..8a4a517a 100644 --- a/d123/simulation/simulation_2d.py +++ b/d123/simulation/simulation_2d.py @@ -3,7 +3,7 @@ import logging from typing import Any, Optional, Tuple, Type -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene from d123.simulation.callback.abstract_callback import AbstractCallback from d123.simulation.callback.multi_callback import MultiCallback from d123.simulation.history.simulation_history import Simulation2DHistory, Simulation2DHistorySample diff --git a/d123/simulation/time_controller/abstract_time_controller.py b/d123/simulation/time_controller/abstract_time_controller.py index 788acfcb..1da25641 100644 --- a/d123/simulation/time_controller/abstract_time_controller.py +++ b/d123/simulation/time_controller/abstract_time_controller.py @@ -3,7 +3,7 @@ import abc from typing import Tuple -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/time_controller/log_time_controller.py b/d123/simulation/time_controller/log_time_controller.py index b358a24c..363e43e7 100644 --- a/d123/simulation/time_controller/log_time_controller.py +++ b/d123/simulation/time_controller/log_time_controller.py @@ -1,6 +1,6 @@ from typing import Optional, Tuple -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene from d123.simulation.time_controller.abstract_time_controller import ( AbstractTimeController, ) diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py index 9ea931bf..439a4830 100644 --- a/d123/training/feature_builder/smart_feature_builder.py +++ b/d123/training/feature_builder/smart_feature_builder.py @@ -8,14 +8,14 @@ from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.visualization.color.default import TrafficLightStatus -from d123.dataset.maps.abstract_map import MapLayer -from d123.dataset.maps.abstract_map_objects import ( +from d123.datasets.maps.abstract_map import MapLayer +from d123.datasets.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, AbstractGenericDrivable, AbstractLaneGroup, ) -from d123.dataset.scene.abstract_scene import AbstractScene +from d123.datasets.scene.abstract_scene import AbstractScene from d123.geometry import BoundingBoxSE2, PolylineSE2, StateSE2 from d123.geometry.geometry_index import StateSE2Index from d123.geometry.transform.transform_se2 import convert_absolute_to_relative_se2_array From 9ece564ecc780e24baf365fc567879caf3623f2d Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 22 Sep 2025 20:34:57 +0200 Subject: [PATCH 042/145] Few minor fixes and comments --- .../common/visualization/matplotlib/camera.py | 8 ++--- d123/common/visualization/viser/server.py | 6 ++-- d123/common/visualization/viser/utils.py | 17 ++++------ d123/common/visualization/viser/utils_v2.py | 34 +++---------------- d123/datatypes/detections/detection.py | 18 +++++----- 5 files changed, 28 insertions(+), 55 deletions(-) diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py index 6bdee5f6..bc33d0dd 100644 --- a/d123/common/visualization/matplotlib/camera.py +++ b/d123/common/visualization/matplotlib/camera.py @@ -10,11 +10,11 @@ # from PIL import ImageColor from pyquaternion import Quaternion -from d123.common.datatypes.detection.detection import BoxDetectionSE3, BoxDetectionWrapper -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import Camera -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.common.visualization.color.default import BOX_DETECTION_CONFIG +from d123.datatypes.detections.detection import BoxDetectionSE3, BoxDetectionWrapper +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.sensors.camera import Camera +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.geometry import BoundingBoxSE3Index, Corners3DIndex from d123.geometry.transform.transform_euler_se3 import convert_absolute_to_relative_euler_se3_array diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py index ed5f7a17..6b44ac15 100644 --- a/d123/common/visualization/viser/server.py +++ b/d123/common/visualization/viser/server.py @@ -5,8 +5,6 @@ import trimesh import viser -from d123.common.datatypes.sensor.camera import CameraType -from d123.common.datatypes.sensor.lidar import LiDARType from d123.common.visualization.viser.utils import ( get_bounding_box_meshes, get_camera_if_available, @@ -15,7 +13,9 @@ get_map_meshes, ) from d123.common.visualization.viser.utils_v2 import get_bounding_box_outlines -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.datatypes.sensors.camera import CameraType +from d123.datatypes.sensors.lidar import LiDARType # TODO: Try to fix performance issues. # TODO: Refactor this file. diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index af209718..8c6236ce 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -1,20 +1,18 @@ -from typing import List, Optional, Tuple +from typing import Final, List, Optional, Tuple import numpy as np import numpy.typing as npt import trimesh -from pyquaternion import Quaternion -from typing_extensions import Final +from pyquaternion import Quaternion # TODO: remove -# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters -from d123.common.datatypes.sensor.camera import Camera, CameraType -from d123.common.datatypes.sensor.lidar import LiDARType from d123.common.visualization.color.color import TAB_10, Color from d123.common.visualization.color.config import PlotConfig from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG, MAP_SURFACE_CONFIG -from d123.datasets.maps.abstract_map import MapLayer -from d123.datasets.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.datatypes.maps.abstract_map import MapLayer +from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.datatypes.sensors.camera import Camera, CameraType +from d123.datatypes.sensors.lidar import LiDARType from d123.geometry import BoundingBoxSE3, EulerStateSE3, Point3D, Polyline3D from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array @@ -103,7 +101,6 @@ def get_map_meshes(scene: AbstractScene): ] map_objects_dict = scene.map_api.get_proximal_map_objects(center.point_2d, radius=MAP_RADIUS, layers=map_layers) - print(map_objects_dict.keys()) output = {} for map_layer in map_objects_dict.keys(): diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py index 9b4fd8d8..16a1a15b 100644 --- a/d123/common/visualization/viser/utils_v2.py +++ b/d123/common/visualization/viser/utils_v2.py @@ -3,43 +3,18 @@ from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG from d123.common.visualization.viser.utils import BRIGHTNESS_FACTOR -from d123.datasets.scene.abstract_scene import AbstractScene - -# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters -from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D -from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex # TODO: Refactor this file. # TODO: Add general utilities for 3D primitives and mesh support. -def _get_bounding_box_corners(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.float64]: - """ - Get the vertices of a bounding box in 3D space. - """ - corner_extent_factors = { - Corners3DIndex.FRONT_LEFT_BOTTOM: Vector3D(+0.5, -0.5, -0.5), - Corners3DIndex.FRONT_RIGHT_BOTTOM: Vector3D(+0.5, +0.5, -0.5), - Corners3DIndex.BACK_RIGHT_BOTTOM: Vector3D(-0.5, +0.5, -0.5), - Corners3DIndex.BACK_LEFT_BOTTOM: Vector3D(-0.5, -0.5, -0.5), - Corners3DIndex.FRONT_LEFT_TOP: Vector3D(+0.5, -0.5, +0.5), - Corners3DIndex.FRONT_RIGHT_TOP: Vector3D(+0.5, +0.5, +0.5), - Corners3DIndex.BACK_RIGHT_TOP: Vector3D(-0.5, +0.5, +0.5), - Corners3DIndex.BACK_LEFT_TOP: Vector3D(-0.5, -0.5, +0.5), - } - corners = np.zeros((len(Corners3DIndex), len(Point3DIndex)), dtype=np.float64) - bounding_box_extent = np.array([bounding_box.length, bounding_box.width, bounding_box.height], dtype=np.float64) - for idx, vec in corner_extent_factors.items(): - vector_3d = Vector3D.from_array(bounding_box_extent * vec.array) - corners[idx] = translate_euler_se3_along_body_frame(bounding_box.center, vector_3d).point_3d.array - return corners - - def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.float64]: """ - Get the edges of a bounding box in 3D space as a Polyline3D. + TODO: Vectorize this function and move to geometry module. """ - corners = _get_bounding_box_corners(bounding_box) + corners = bounding_box.corners_array index_pairs = [ (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM), (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM), @@ -62,6 +37,7 @@ def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.floa def translate_points_3d(points_3d: npt.NDArray[np.float64], point_3d: Point3D) -> npt.NDArray[np.float64]: + # TODO: remove return points_3d - point_3d.array diff --git a/d123/datatypes/detections/detection.py b/d123/datatypes/detections/detection.py index bf855679..36747b59 100644 --- a/d123/datatypes/detections/detection.py +++ b/d123/datatypes/detections/detection.py @@ -1,13 +1,13 @@ from dataclasses import dataclass from functools import cached_property -from typing import Iterable +from typing import Iterable, Optional, Union import shapely from d123.common.utils.enums import SerialIntEnum from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.time.time_point import TimePoint -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, OccupancyMap2D, StateSE2, Vector2D, Vector3D +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, StateSE3, Vector2D, Vector3D @dataclass @@ -16,7 +16,7 @@ class BoxDetectionMetadata: detection_type: DetectionType timepoint: TimePoint track_token: str - confidence: float | None = None + confidence: Optional[float] = None @dataclass @@ -44,18 +44,18 @@ class BoxDetectionSE3: metadata: BoxDetectionMetadata bounding_box_se3: BoundingBoxSE3 - velocity: Vector3D | None = None + velocity: Optional[Vector3D] = None @property def shapely_polygon(self) -> shapely.geometry.Polygon: return self.bounding_box_se3.shapely_polygon @property - def center(self) -> EulerStateSE3: + def center(self) -> StateSE3: return self.bounding_box_se3.center @property - def center_se3(self) -> EulerStateSE3: + def center_se3(self) -> StateSE3: return self.bounding_box_se3.center_se3 @property @@ -75,7 +75,7 @@ def box_detection_se2(self) -> BoxDetectionSE2: ) -BoxDetection = BoxDetectionSE2 | BoxDetectionSE3 +BoxDetection = Union[BoxDetectionSE2, BoxDetectionSE3] @dataclass @@ -143,8 +143,8 @@ def __len__(self) -> int: def __iter__(self): return iter(self.traffic_light_detections) - def get_detection_by_lane_id(self, lane_id: int) -> TrafficLightDetection | None: - traffic_light_detection: TrafficLightDetection | None = None + def get_detection_by_lane_id(self, lane_id: int) -> Optional[TrafficLightDetection]: + traffic_light_detection: Optional[TrafficLightDetection] = None for detection in self.traffic_light_detections: if int(detection.lane_id) == int(lane_id): traffic_light_detection = detection From 3d4f9113418daf24c7562a7fbfbe39696557a9ed Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 23 Sep 2025 21:42:44 +0200 Subject: [PATCH 043/145] Minor refactors and adding quaternion based SE3 in more datatypes (#43) --- .pre-commit-config.yaml | 1 + d123/datatypes/detections/detection.py | 4 +- d123/datatypes/vehicle_state/ego_state.py | 59 +++++++++++-------- .../vehicle_state/vehicle_parameters.py | 12 ++-- d123/geometry/__init__.py | 12 ++-- d123/geometry/geometry_index.py | 10 +++- d123/geometry/se.py | 45 +++++++++++--- d123/geometry/utils/rotation_utils.py | 1 + 8 files changed, 96 insertions(+), 48 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 251ae7fb..cfcbd787 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,6 +24,7 @@ repos: - id: isort name: isort (python) args: ["--profile", "black", "--filter-files", '--line-length', '120'] + exclude: __init__.py$ - repo: https://github.com/ambv/black rev: 25.1.0 hooks: diff --git a/d123/datatypes/detections/detection.py b/d123/datatypes/detections/detection.py index 36747b59..29da95d7 100644 --- a/d123/datatypes/detections/detection.py +++ b/d123/datatypes/detections/detection.py @@ -1,6 +1,6 @@ from dataclasses import dataclass from functools import cached_property -from typing import Iterable, Optional, Union +from typing import Iterable, List, Optional, Union import shapely @@ -92,7 +92,7 @@ def __len__(self) -> int: def __iter__(self): return iter(self.box_detections) - def get_box_detections_by_types(self, detection_types: Iterable[DetectionType]) -> list[BoxDetection]: + def get_box_detections_by_types(self, detection_types: Iterable[DetectionType]) -> List[BoxDetection]: return [detection for detection in self.box_detections if detection.metadata.detection_type in detection_types] def get_detection_by_track_token(self, track_token: str) -> BoxDetection | None: diff --git a/d123/datatypes/vehicle_state/ego_state.py b/d123/datatypes/vehicle_state/ego_state.py index 2198cc6b..cc1aab7d 100644 --- a/d123/datatypes/vehicle_state/ego_state.py +++ b/d123/datatypes/vehicle_state/ego_state.py @@ -19,43 +19,51 @@ rear_axle_se2_to_center_se2, rear_axle_se3_to_center_se3, ) -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, StateSE2, Vector2D, Vector3D - -# TODO: Find an appropriate way to handle SE2 and SE3 states. +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, StateSE3, Vector2D, Vector3D EGO_TRACK_TOKEN: Final[str] = "ego_vehicle" class EgoStateSE3Index(IntEnum): + X = 0 Y = 1 Z = 2 - ROLL = 3 - PITCH = 4 - YAW = 5 - VELOCITY_X = 6 - VELOCITY_Y = 7 - VELOCITY_Z = 8 - ACCELERATION_X = 9 - ACCELERATION_Y = 10 - ACCELERATION_Z = 11 - ANGULAR_VELOCITY_X = 12 - ANGULAR_VELOCITY_Y = 13 - ANGULAR_VELOCITY_Z = 14 + QW = 3 + QX = 4 + QY = 5 + QZ = 6 + VELOCITY_X = 7 + VELOCITY_Y = 8 + VELOCITY_Z = 9 + ACCELERATION_X = 10 + ACCELERATION_Y = 11 + ACCELERATION_Z = 12 + ANGULAR_VELOCITY_X = 13 + ANGULAR_VELOCITY_Y = 14 + ANGULAR_VELOCITY_Z = 15 @classproperty - def SE3(cls) -> slice: - return slice(cls.X, cls.YAW + 1) + def STATE_SE3(cls) -> slice: + return slice(cls.X, cls.QZ + 1) @classproperty def DYNAMIC_VEHICLE_STATE(cls) -> slice: return slice(cls.VELOCITY_X, cls.ANGULAR_VELOCITY_Z + 1) + @classproperty + def SCALAR(cls) -> slice: + return slice(cls.QW, cls.QW + 1) + + @classproperty + def VECTOR(cls) -> slice: + return slice(cls.QX, cls.QZ + 1) + @dataclass class EgoStateSE3: - center_se3: EulerStateSE3 + center_se3: StateSE3 dynamic_state_se3: DynamicStateSE3 vehicle_parameters: VehicleParameters timepoint: Optional[TimePoint] = None @@ -68,14 +76,14 @@ def from_array( vehicle_parameters: VehicleParameters, timepoint: Optional[TimePoint] = None, ) -> EgoStateSE3: - state_se3 = EulerStateSE3.from_array(array[EgoStateSE3Index.SE3]) + state_se3 = StateSE3.from_array(array[EgoStateSE3Index.STATE_SE3]) dynamic_state = DynamicStateSE3.from_array(array[EgoStateSE3Index.DYNAMIC_VEHICLE_STATE]) return EgoStateSE3(state_se3, dynamic_state, vehicle_parameters, timepoint) @classmethod def from_rear_axle( cls, - rear_axle_se3: EulerStateSE3, + rear_axle_se3: StateSE3, dynamic_state_se3: DynamicStateSE3, vehicle_parameters: VehicleParameters, time_point: TimePoint, @@ -96,7 +104,7 @@ def array(self) -> npt.NDArray[np.float64]: Convert the EgoVehicleState to an array. :return: An array containing the bounding box and dynamic state information. """ - assert isinstance(self.center_se3, EulerStateSE3) + assert isinstance(self.center_se3, StateSE3) assert isinstance(self.dynamic_state_se3, DynamicStateSE3) center_array = self.center_se3.array @@ -105,11 +113,11 @@ def array(self) -> npt.NDArray[np.float64]: return np.concatenate((center_array, dynamic_array), axis=0) @property - def center(self) -> EulerStateSE3: + def center(self) -> StateSE3: return self.center_se3 @property - def rear_axle_se3(self) -> EulerStateSE3: + def rear_axle_se3(self) -> StateSE3: return center_se3_to_rear_axle_se3(center_se3=self.center_se3, vehicle_parameters=self.vehicle_parameters) @property @@ -117,7 +125,7 @@ def rear_axle_se2(self) -> StateSE2: return self.rear_axle_se3.state_se2 @property - def rear_axle(self) -> EulerStateSE3: + def rear_axle(self) -> StateSE3: return self.rear_axle_se3 @cached_property @@ -265,6 +273,8 @@ def ANGULAR_VELOCITY(cls) -> slice: @dataclass class DynamicStateSE3: + # TODO: Make class array like + velocity: Vector3D acceleration: Vector3D angular_velocity: Vector3D @@ -322,6 +332,7 @@ def dynamic_state_se2(self) -> DynamicStateSE2: @dataclass class DynamicStateSE2: + velocity: Vector2D acceleration: Vector2D angular_velocity: float diff --git a/d123/datatypes/vehicle_state/vehicle_parameters.py b/d123/datatypes/vehicle_state/vehicle_parameters.py index cd9aeb1c..0d7f3d01 100644 --- a/d123/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/datatypes/vehicle_state/vehicle_parameters.py @@ -1,8 +1,8 @@ from dataclasses import dataclass -from d123.geometry import EulerStateSE3, StateSE2, Vector2D, Vector3D -from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame +from d123.geometry import StateSE2, StateSE3, Vector2D, Vector3D from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame +from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame @dataclass @@ -74,14 +74,14 @@ def get_av2_ford_fusion_hybrid_parameters() -> VehicleParameters: ) -def center_se3_to_rear_axle_se3(center_se3: EulerStateSE3, vehicle_parameters: VehicleParameters) -> EulerStateSE3: +def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3: """ Converts a center state to a rear axle state. :param center_se3: The center state. :param vehicle_parameters: The vehicle parameters. :return: The rear axle state. """ - return translate_euler_se3_along_body_frame( + return translate_se3_along_body_frame( center_se3, Vector3D( -vehicle_parameters.rear_axle_to_center_longitudinal, @@ -91,14 +91,14 @@ def center_se3_to_rear_axle_se3(center_se3: EulerStateSE3, vehicle_parameters: V ) -def rear_axle_se3_to_center_se3(rear_axle_se3: EulerStateSE3, vehicle_parameters: VehicleParameters) -> EulerStateSE3: +def rear_axle_se3_to_center_se3(rear_axle_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3: """ Converts a rear axle state to a center state. :param rear_axle_se3: The rear axle state. :param vehicle_parameters: The vehicle parameters. :return: The center state. """ - return translate_euler_se3_along_body_frame( + return translate_se3_along_body_frame( rear_axle_se3, Vector3D( vehicle_parameters.rear_axle_to_center_longitudinal, diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py index 678f2cdf..e022c86a 100644 --- a/d123/geometry/__init__.py +++ b/d123/geometry/__init__.py @@ -1,22 +1,22 @@ -from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3 from d123.geometry.geometry_index import ( + Point2DIndex, + Point3DIndex, BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex, EulerAnglesIndex, EulerStateSE3Index, - Point2DIndex, - Point3DIndex, QuaternionIndex, StateSE2Index, StateSE3Index, Vector2DIndex, Vector3DIndex, ) -from d123.geometry.occupancy_map import OccupancyMap2D from d123.geometry.point import Point2D, Point3D -from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 +from d123.geometry.vector import Vector2D, Vector3D from d123.geometry.rotation import EulerAngles, Quaternion from d123.geometry.se import EulerStateSE3, StateSE2, StateSE3 -from d123.geometry.vector import Vector2D, Vector3D +from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3 +from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 +from d123.geometry.occupancy_map import OccupancyMap2D diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py index 1da7c945..aa42cc4b 100644 --- a/d123/geometry/geometry_index.py +++ b/d123/geometry/geometry_index.py @@ -157,7 +157,7 @@ def QUATERNION(cls) -> slice: @classproperty def SCALAR(cls) -> slice: - return cls.QW + return slice(cls.QW, cls.QW + 1) @classproperty def VECTOR(cls) -> slice: @@ -234,6 +234,14 @@ def QUATERNION(cls) -> slice: def EXTENT(cls) -> slice: return slice(cls.LENGTH, cls.HEIGHT + 1) + @classproperty + def SCALAR(cls) -> slice: + return slice(cls.QW, cls.QW + 1) + + @classproperty + def VECTOR(cls) -> slice: + return slice(cls.QX, cls.QZ + 1) + class Corners3DIndex(IntEnum): """ diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 6538bfe9..a35b9c4c 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -253,6 +253,30 @@ def euler_angles(self) -> EulerAngles: """ return self.quaternion.euler_angles + @property + def roll(self) -> float: + """The roll (x-axis rotation) angle in radians. + + :return: The roll angle in radians. + """ + return self.euler_angles.roll + + @property + def pitch(self) -> float: + """The pitch (y-axis rotation) angle in radians. + + :return: The pitch angle in radians. + """ + return self.euler_angles.pitch + + @property + def yaw(self) -> float: + """The yaw (z-axis rotation) angle in radians. + + :return: The yaw angle in radians. + """ + return self.euler_angles.yaw + @property def rotation_matrix(self) -> npt.NDArray[np.float64]: """Returns the 3x3 rotation matrix representation of the state's orientation. @@ -261,11 +285,22 @@ def rotation_matrix(self) -> npt.NDArray[np.float64]: """ return self.quaternion.rotation_matrix + @property + def transformation_matrix(self) -> npt.NDArray[np.float64]: + """Returns the 4x4 transformation matrix representation of the state. + + :return: A 4x4 numpy array representing the transformation matrix. + """ + transformation_matrix = np.eye(4, dtype=np.float64) + transformation_matrix[:3, :3] = self.rotation_matrix + transformation_matrix[:3, 3] = self.array[StateSE3Index.XYZ] + return transformation_matrix + class EulerStateSE3(ArrayMixin): """ Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw). - TODO: Use quaternions for rotation representation. + NOTE: This class is deprecated, use :class:`~d123.geometry.StateSE3` instead (quaternion based). """ _array: npt.NDArray[np.float64] @@ -450,11 +485,3 @@ def __iter__(self) -> Iterable[float]: def __hash__(self) -> int: """Hash method""" return hash((self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) - - def __matmul__(self, other: EulerStateSE3) -> EulerStateSE3: - """Combines two SE3 states by applying the transformation of the other state to this state. - - :param other: Another StateSE3 instance representing the transformation to apply. - :return: A new StateSE3 instance representing the combined transformation. - """ - return EulerStateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix) diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py index 3d3248ce..499b98bc 100644 --- a/d123/geometry/utils/rotation_utils.py +++ b/d123/geometry/utils/rotation_utils.py @@ -178,6 +178,7 @@ def get_rotation_matrices_from_quaternion_array(quaternion_array: npt.NDArray[np def get_rotation_matrix_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + # TODO: Check if this function is necessary or batch-wise function is universally applicable assert quaternion_array.ndim == 1 and quaternion_array.shape[0] == len(QuaternionIndex) return get_rotation_matrices_from_quaternion_array(quaternion_array[None, :])[0] From e67fd6b737d881d524f114d35c0f1f0e0d67c546 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 23 Sep 2025 22:37:41 +0200 Subject: [PATCH 044/145] Unfinished updating of conversion scripts to quats (#43) --- d123/common/utils/dependencies.py | 4 +- d123/common/visualization/matplotlib/utils.py | 4 +- d123/common/visualization/viser/utils.py | 4 +- d123/datasets/av2/av2_data_converter.py | 82 ++++++------------- d123/datasets/nuplan/nuplan_data_converter.py | 33 ++++---- d123/datasets/wopd/wopd_data_converter.py | 10 +-- 6 files changed, 48 insertions(+), 89 deletions(-) diff --git a/d123/common/utils/dependencies.py b/d123/common/utils/dependencies.py index d750061e..547947e3 100644 --- a/d123/common/utils/dependencies.py +++ b/d123/common/utils/dependencies.py @@ -2,8 +2,8 @@ def check_dependencies(modules: Union[str, List[str,]], optional_name: str) -> None: - """ - Checks if the given modules can be imported, otherwise raises an ImportError with a message + """Checks if the given modules can be imported, otherwise raises an ImportError with a message + :param modules: Module name or list of module names to check :param optional_name: Name of the optional feature :raises ImportError: If any of the modules cannot be imported diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py index 1742a864..9e030b80 100644 --- a/d123/common/visualization/matplotlib/utils.py +++ b/d123/common/visualization/matplotlib/utils.py @@ -9,7 +9,7 @@ from matplotlib.path import Path from d123.common.visualization.color.config import PlotConfig -from d123.geometry import EulerStateSE3, StateSE2 +from d123.geometry import StateSE2, StateSE3 def add_shapely_polygon_to_ax( @@ -114,7 +114,7 @@ def get_pose_triangle(size: float) -> geom.Polygon: def shapely_geometry_local_coords( - geometry: geom.base.BaseGeometry, origin: Union[StateSE2, EulerStateSE3] + geometry: geom.base.BaseGeometry, origin: Union[StateSE2, StateSE3] ) -> geom.base.BaseGeometry: """Helper for transforming shapely geometry in coord-frame""" # TODO: move somewhere else for general use diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index 8c6236ce..783efa01 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -13,7 +13,7 @@ from d123.datatypes.scene.abstract_scene import AbstractScene from d123.datatypes.sensors.camera import Camera, CameraType from d123.datatypes.sensors.lidar import LiDARType -from d123.geometry import BoundingBoxSE3, EulerStateSE3, Point3D, Polyline3D +from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, StateSE3 from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array # TODO: Refactor this file. @@ -226,7 +226,7 @@ def get_camera_values(scene: AbstractScene, camera: Camera, iteration: int) -> T rear_axle_array = rear_axle.array rear_axle_array[:3] -= initial_point_3d.array - rear_axle = EulerStateSE3.from_array(rear_axle_array) + rear_axle = StateSE3.from_array(rear_axle_array) camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py index 11e2549b..e13bbf04 100644 --- a/d123/datasets/av2/av2_data_converter.py +++ b/d123/datasets/av2/av2_data_converter.py @@ -9,7 +9,6 @@ import numpy as np import pandas as pd import pyarrow as pa -from pyquaternion import Quaternion from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.datasets.av2.av2_constants import ( @@ -34,12 +33,8 @@ get_av2_ford_fusion_hybrid_parameters, rear_axle_se3_to_center_se3, ) -from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex -from d123.geometry.transform.transform_euler_se3 import ( - convert_relative_to_absolute_euler_se3_array, - get_rotation_matrix, -) -from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array def create_token(input_data: str) -> str: @@ -382,35 +377,19 @@ def _extract_box_detections( for detection_idx, (_, row) in enumerate(annotations_slice.iterrows()): row = row.to_dict() - yaw, pitch, roll = Quaternion( - w=row["qw"], - x=row["qx"], - y=row["qy"], - z=row["qz"], - ).yaw_pitch_roll - - detections_state[detection_idx, BoundingBoxSE3Index.X] = row["tx_m"] - detections_state[detection_idx, BoundingBoxSE3Index.Y] = row["ty_m"] - detections_state[detection_idx, BoundingBoxSE3Index.Z] = row["tz_m"] - detections_state[detection_idx, BoundingBoxSE3Index.ROLL] = roll - detections_state[detection_idx, BoundingBoxSE3Index.PITCH] = pitch - detections_state[detection_idx, BoundingBoxSE3Index.YAW] = yaw - detections_state[detection_idx, BoundingBoxSE3Index.LENGTH] = row["length_m"] - detections_state[detection_idx, BoundingBoxSE3Index.WIDTH] = row["width_m"] - detections_state[detection_idx, BoundingBoxSE3Index.HEIGHT] = row["height_m"] + + detections_state[detection_idx, BoundingBoxSE3Index.XYZ] = [row["tx_m"], row["ty_m"], row["tz_m"]] + detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = [row["qw"], row["qx"], row["qy"], row["qz"]] + detections_state[detection_idx, BoundingBoxSE3Index.EXTENT] = [row["length_m"], row["width_m"], row["height_m"]] av2_detection_type = AV2SensorBoxDetectionType.deserialize(row["category"]) detections_types.append(int(AV2_TO_DETECTION_TYPE[av2_detection_type])) - detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_euler_se3_array( - origin=ego_state_se3.rear_axle_se3, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3] + detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( + origin=ego_state_se3.rear_axle_se3, + se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3], ) - ZERO_BOX_ROLL_PITCH = False # TODO: Add config option or remove - if ZERO_BOX_ROLL_PITCH: - detections_state[:, BoundingBoxSE3Index.ROLL] = DEFAULT_ROLL - detections_state[:, BoundingBoxSE3Index.PITCH] = DEFAULT_PITCH - return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types @@ -421,26 +400,19 @@ def _extract_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_timestamp_ns: ), f"Expected exactly one ego state for timestamp {lidar_timestamp_ns}, got {len(ego_state_slice)}." ego_pose_dict = ego_state_slice.iloc[0].to_dict() - - ego_pose_quat = Quaternion( - w=ego_pose_dict["qw"], - x=ego_pose_dict["qx"], - y=ego_pose_dict["qy"], - z=ego_pose_dict["qz"], - ) - - yaw, pitch, roll = ego_pose_quat.yaw_pitch_roll - - rear_axle_pose = EulerStateSE3( + rear_axle_pose = StateSE3( x=ego_pose_dict["tx_m"], y=ego_pose_dict["ty_m"], z=ego_pose_dict["tz_m"], - roll=roll, - pitch=pitch, - yaw=yaw, + qw=ego_pose_dict["qw"], + qx=ego_pose_dict["qx"], + qy=ego_pose_dict["qy"], + qz=ego_pose_dict["qz"], ) - vehicle_parameters = get_av2_ford_fusion_hybrid_parameters() # TODO: Add av2 vehicle parameters + + vehicle_parameters = get_av2_ford_fusion_hybrid_parameters() center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) + # TODO: Add script to calculate the dynamic state from log sequence. dynamic_state = DynamicStateSE3( velocity=Vector3D( @@ -495,9 +467,8 @@ def _extract_camera( source_dataset_dir = source_log_path.parent.parent rear_axle_se3 = ego_state_se3.rear_axle_se3 - ego_transform = np.zeros((4, 4), dtype=np.float64) - ego_transform[:3, :3] = get_rotation_matrix(ego_state_se3.rear_axle_se3) - ego_transform[:3, 3] = rear_axle_se3.point_3d.array + ego_transform = rear_axle_se3.transformation_matrix + ego_transform # TODO: Refactor this file, ie. why is the ego transform calculated but not used? for _, row in egovehicle_se3_sensor_df.iterrows(): row = row.to_dict() @@ -521,16 +492,13 @@ def _extract_camera( absolute_image_path = source_dataset_dir / relative_image_path assert absolute_image_path.exists() # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. - camera_extrinsic = np.eye(4, dtype=np.float64) - camera_extrinsic[:3, :3] = Quaternion( - w=row["qw"], - x=row["qx"], - y=row["qy"], - z=row["qz"], - ).rotation_matrix - camera_extrinsic[:3, 3] = np.array([row["tx_m"], row["ty_m"], row["tz_m"]], dtype=np.float64) + + camera_extrinsic = StateSE3( + x=row["tx_m"], y=row["ty_m"], z=row["tz_m"], qw=row["qw"], qx=row["qx"], qy=row["qy"], qz=row["qz"] + ) + # camera_extrinsic = camera_extrinsic @ ego_transform - camera_extrinsic = camera_extrinsic.flatten().tolist() + camera_extrinsic = camera_extrinsic.transformation_matrix.flatten().tolist() if data_converter_config.camera_store_option == "path": camera_dict[camera_type] = (str(relative_image_path), camera_extrinsic) diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py index a7386917..d03dbaf9 100644 --- a/d123/datasets/nuplan/nuplan_data_converter.py +++ b/d123/datasets/nuplan/nuplan_data_converter.py @@ -30,7 +30,8 @@ get_nuplan_chrysler_pacifica_parameters, rear_axle_se3_to_center_se3, ) -from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex +from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.rotation import EulerAngles from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL check_dependencies(["nuplan", "sqlalchemy"], "nuplan") @@ -365,17 +366,19 @@ def _extract_detections(lidar_pc: LidarPc) -> Tuple[List[List[float]], List[List for lidar_box in lidar_pc.lidar_boxes: lidar_box: LidarBox - center = EulerStateSE3( + lidar_quaternion = EulerAngles(roll=DEFAULT_ROLL, pitch=DEFAULT_PITCH, yaw=lidar_box.yaw).quaternion + center = StateSE3( x=lidar_box.x, y=lidar_box.y, z=lidar_box.z, - roll=DEFAULT_ROLL, - pitch=DEFAULT_PITCH, - yaw=lidar_box.yaw, + qw=lidar_quaternion.qw, + qx=lidar_quaternion.qx, + qy=lidar_quaternion.qy, + qz=lidar_quaternion.qz, ) bounding_box_se3 = BoundingBoxSE3(center, lidar_box.length, lidar_box.width, lidar_box.height) - detections_state.append(bounding_box_se3.array) + detections_state.append(bounding_box_se3.tolist()) detections_velocity.append(lidar_box.velocity) detections_token.append(lidar_box.track_token) detections_types.append(int(NUPLAN_DETECTION_NAME_DICT[lidar_box.category.name])) @@ -385,19 +388,16 @@ def _extract_detections(lidar_pc: LidarPc) -> Tuple[List[List[float]], List[List def _extract_ego_state(lidar_pc: LidarPc) -> List[float]: - yaw, pitch, roll = lidar_pc.ego_pose.quaternion.yaw_pitch_roll vehicle_parameters = get_nuplan_chrysler_pacifica_parameters() - # vehicle_parameters = get_pacifica_parameters() - - rear_axle_pose = EulerStateSE3( + rear_axle_pose = StateSE3( x=lidar_pc.ego_pose.x, y=lidar_pc.ego_pose.y, z=lidar_pc.ego_pose.z, - roll=roll, - pitch=pitch, - yaw=yaw, + qw=lidar_pc.ego_pose.qw, + qx=lidar_pc.ego_pose.qx, + qy=lidar_pc.ego_pose.qy, + qz=lidar_pc.ego_pose.qz, ) - # NOTE: The height to rear axle is not provided the dataset and is merely approximated. center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) dynamic_state = DynamicStateSE3( velocity=Vector3D( @@ -416,7 +416,6 @@ def _extract_ego_state(lidar_pc: LidarPc) -> List[float]: z=lidar_pc.ego_pose.angular_rate_z, ), ) - return EgoStateSE3( center_se3=center, dynamic_state_se3=dynamic_state, @@ -436,7 +435,6 @@ def _extract_traffic_lights(log_db: NuPlanDB, lidar_pc_token: str) -> Tuple[List def _extract_scenario_tag(log_db: NuPlanDB, lidar_pc_token: str) -> List[str]: - scenario_tags = [ scenario_tag.type for scenario_tag in log_db.scenario_tag.select_many(lidar_pc_token=lidar_pc_token) ] @@ -454,7 +452,6 @@ def _extract_camera( camera_dict: Dict[str, Union[str, bytes]] = {} sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" - log_cam_infos = {camera.token: camera for camera in log_db.log.cameras} for camera_type, camera_channel in NUPLAN_CAMERA_TYPES.items(): @@ -468,7 +465,7 @@ def _extract_camera( # Code taken from MTGS # https://github.com/OpenDriveLab/MTGS/blob/main/nuplan_scripts/utils/nuplan_utils_custom.py#L117 - + # TODO: Refactor timestamp = image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us img_ego_pose: EgoPose = ( log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first() diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py index 1e935329..30c5cdc1 100644 --- a/d123/datasets/wopd/wopd_data_converter.py +++ b/d123/datasets/wopd/wopd_data_converter.py @@ -13,7 +13,6 @@ from pyquaternion import Quaternion from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table from d123.common.utils.dependencies import check_dependencies from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map @@ -32,6 +31,7 @@ check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") import tensorflow as tf from waymo_open_dataset import dataset_pb2 +from waymo_open_dataset.utils import frame_utils # TODO: Make keep_polar_features an optional argument. # With polar features, the lidar loading time is SIGNIFICANTLY higher. @@ -71,7 +71,7 @@ 5: LiDARType.LIDAR_BACK, # REAR } -WOPD_DATA_ROOT = Path("/media/nvme1/waymo_perception") # TODO: set as environment variable +WOPD_DATA_ROOT = Path("/media/nvme1/waymo_perception") # TODO: set as environment variable !!!! # Whether to use ego or zero roll and pitch values for bounding box detections (after global conversion) DETECTION_ROLL_PITCH: Final[Literal["ego", "zero"]] = "zero" @@ -382,11 +382,6 @@ def _write_recording_table( writer.write_batch(batch) del batch, row_data, detections_state, detections_velocity, detections_token, detections_types - if SORT_BY_TIMESTAMP: - recording_table = open_arrow_table(log_file_path) - recording_table = recording_table.sort_by([("timestamp", "ascending")]) - write_arrow_table(recording_table, log_file_path) - def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> EulerStateSE3: ego_pose_matrix = np.array(frame.pose.transform).reshape(4, 4) @@ -519,7 +514,6 @@ def _extract_camera( def _extract_lidar( frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig ) -> Dict[LiDARType, npt.NDArray[np.float32]]: - from waymo_open_dataset.utils import frame_utils assert data_converter_config.lidar_store_option == "binary", "Lidar store option must be 'binary' for WOPD." (range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame) From 21f1dcbba6df94e7293b69776bdaed89ca364ff1 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Wed, 24 Sep 2025 10:07:21 +0800 Subject: [PATCH 045/145] add map convert, fix 0004 detection, interpolate dynamic --- d123/common/visualization/viser/server.py | 2 +- .../kitti_360/kitti_360_data_converter.py | 76 ++++++-- .../kitti_360/kitti_360_helper.py | 174 +++++++++++++++--- .../kitti_360/kitti_360_map_conversion.py | 125 +++++++++++++ .../dataset_specific/kitti_360/labels.py | 15 ++ .../dataset_specific/kitti_360/load_sensor.py | 6 +- .../kitti_360/preprocess_detection.py | 127 ++++++++++--- .../default_dataset_conversion.yaml | 2 +- 8 files changed, 450 insertions(+), 77 deletions(-) create mode 100644 d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py index 16e38a66..f70aba28 100644 --- a/d123/common/visualization/viser/server.py +++ b/d123/common/visualization/viser/server.py @@ -38,7 +38,7 @@ BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "lines" # Map config: -MAP_AVAILABLE: bool = False +MAP_AVAILABLE: bool = True # Cameras config: diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index aee14883..4e221617 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -10,6 +10,7 @@ import numpy as np import pickle +import copy from collections import defaultdict import datetime import hashlib @@ -31,8 +32,9 @@ from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic,interpolate_obj_list +from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT +from d123.dataset.dataset_specific.kitti_360.kitti_360_map_conversion import convert_kitti360_map from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex KITTI360_DT: Final[float] = 0.1 @@ -55,11 +57,10 @@ DIR_POSES = "data_poses" DIR_CALIB = "calibration" -# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW -PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT +PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW +# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW -# PATH_3D_RAW_ROOT: Path = Path("/data/jbwang/d123/data_3d_raw") PATH_3D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_SMT PATH_3D_BBOX_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_BBOX PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES @@ -146,8 +147,22 @@ def get_available_splits(self) -> List[str]: return ["kitti360"] def convert_maps(self, worker: WorkerPool) -> None: - logging.info("KITTI-360 does not provide standard maps. Skipping map conversion.") - return None + log_args = [ + { + "log_path": log_path, + "split": split, + } + for split, log_paths in self._log_paths_per_split.items() + for log_path in log_paths + ] + worker_map( + worker, + partial( + convert_kitti360_map_to_gpkg, + data_converter_config=self.data_converter_config + ), + log_args, + ) def convert_logs(self, worker: WorkerPool) -> None: log_args = [ @@ -168,6 +183,20 @@ def convert_logs(self, worker: WorkerPool) -> None: log_args, ) +def convert_kitti360_map_to_gpkg( + args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig +) -> List[Any]: + for log_info in args: + log_path: Path = log_info["log_path"] + split: str = log_info["split"] + log_name = log_path.stem + + map_path = data_converter_config.output_path / "maps" / split / f"kitti360_{log_name}.gpkg" + if data_converter_config.force_map_conversion or not map_path.exists(): + map_path.unlink(missing_ok=True) + convert_kitti360_map(log_name, map_path) + return [] + def convert_kitti360_log_to_arrow( args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig ) -> List[Any]: @@ -189,7 +218,7 @@ def convert_kitti360_log_to_arrow( metadata = LogMetadata( dataset="kitti360", log_name=log_name, - location=None, + location=log_name, timestep_seconds=KITTI360_DT, map_has_z=True, ) @@ -505,26 +534,34 @@ def _extract_detections( detections_tokens: List[List[str]] = [[] for _ in range(ts_len)] detections_types: List[List[int]] = [[] for _ in range(ts_len)] - bbox_3d_path = PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" + if log_name == "2013_05_28_drive_0004_sync": + bbox_3d_path = PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml" + else: + bbox_3d_path = PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" if not bbox_3d_path.exists(): raise FileNotFoundError(f"BBox 3D file not found: {bbox_3d_path}") tree = ET.parse(bbox_3d_path) root = tree.getroot() - dynamic_groups: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list) + dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list) detection_preprocess_path = PREPOCESS_DETECTION_DIR / f"{log_name}_detection_preprocessed.pkl" if detection_preprocess_path.exists(): with open(detection_preprocess_path, "rb") as f: detection_preprocess_result = pickle.load(f) - records_dict = {record_item["global_id"]: record_item for record_item in detection_preprocess_result["records"]} + static_records_dict = {record_item["global_id"]: record_item for record_item in detection_preprocess_result["static"]} + dynamic_records_dict = detection_preprocess_result["dynamic"] else: detection_preprocess_result = None for child in root: - semanticIdKITTI = int(child.find('semanticId').text) - name = kittiId2label[semanticIdKITTI].name + if child.find('semanticId') is not None: + semanticIdKITTI = int(child.find('semanticId').text) + name = kittiId2label[semanticIdKITTI].name + else: + lable = child.find('label').text + name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, 'unknown') if child.find('transform') is None or name not in KIITI360_DETECTION_NAME_DICT.keys(): continue obj = KITTI360Bbox3D() @@ -535,7 +572,7 @@ def _extract_detections( if detection_preprocess_result is None: obj.filter_by_radius(ego_states_xyz,radius=50.0) else: - obj.load_detection_preprocess(records_dict) + obj.load_detection_preprocess(static_records_dict) for record in obj.valid_frames["records"]: frame = record["timestamp"] detections_states[frame].append(obj.get_state_array()) @@ -543,12 +580,15 @@ def _extract_detections( detections_tokens[frame].append(str(obj.globalID)) detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.name])) else: - ann_id = obj.annotationId - dynamic_groups[ann_id].append(obj) + global_ID = obj.globalID + dynamic_objs[global_ID].append(obj) # dynamic object - for ann_id, obj_list in dynamic_groups.items(): - obj_list.sort(key=lambda obj: obj.timestamp) + if detection_preprocess_result is not None: + dynamic_objs = copy.deepcopy(dynamic_records_dict) + + for global_id, obj_list in dynamic_objs.items(): + obj_list = interpolate_obj_list(obj_list) num_frames = len(obj_list) positions = [obj.get_state_array()[:3] for obj in obj_list] diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py index 77217b5d..a756a343 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py @@ -2,11 +2,13 @@ from collections import defaultdict from typing import Dict, Optional, Any, List +import copy from scipy.linalg import polar from scipy.spatial.transform import Rotation as R from d123.geometry import BoundingBoxSE3, StateSE3 -from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label +from d123.geometry.polyline import Polyline3D +from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT import os from pathlib import Path @@ -44,6 +46,11 @@ def global2local(globalId): return int(semanticId), int(instanceId) class KITTI360Bbox3D(): + + # global id(only used for sequence 0004) + dynamic_global_id = 2000000 + static_global_id = 1000000 + # Constructor def __init__(self): @@ -65,37 +72,39 @@ def __init__(self): #label self.label = '' - - def parseOpencvMatrix(self, node): - rows = int(node.find('rows').text) - cols = int(node.find('cols').text) - data = node.find('data').text.split(' ') - - mat = [] - for d in data: - d = d.replace('\n', '') - if len(d)<1: - continue - mat.append(float(d)) - mat = np.reshape(mat, [rows, cols]) - return mat + # used to mark if the bbox is interpolated + self.is_interpolated = False + # GT annotation idx + self.idx_next = -1 + self.idx_prev = -1 + def parseBbox(self, child): - semanticIdKITTI = int(child.find('semanticId').text) - self.semanticId = kittiId2label[semanticIdKITTI].id - self.instanceId = int(child.find('instanceId').text) - self.name = kittiId2label[semanticIdKITTI].name - - self.start_frame = int(child.find('start_frame').text) - self.end_frame = int(child.find('end_frame').text) - self.timestamp = int(child.find('timestamp').text) self.annotationId = int(child.find('index').text) + 1 self.label = child.find('label').text - self.globalID = local2global(self.semanticId, self.instanceId) + if child.find('semanticId') is None: + self.name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(self.label, 'unknown') + self.is_dynamic = int(child.find('dynamic').text) + if self.is_dynamic != 0: + dynamicSeq = int(child.find('dynamicSeq').text) + self.globalID = KITTI360Bbox3D.dynamic_global_id + dynamicSeq + else: + self.globalID = KITTI360Bbox3D.static_global_id + KITTI360Bbox3D.static_global_id += 1 + else: + self.start_frame = int(child.find('start_frame').text) + self.end_frame = int(child.find('end_frame').text) + + semanticIdKITTI = int(child.find('semanticId').text) + self.semanticId = kittiId2label[semanticIdKITTI].id + self.instanceId = int(child.find('instanceId').text) + self.name = kittiId2label[semanticIdKITTI].name + + self.globalID = local2global(self.semanticId, self.instanceId) self.valid_frames = {"global_id": self.globalID, "records": []} @@ -103,10 +112,11 @@ def parseBbox(self, child): self.parse_scale_rotation() def parseVertices(self, child): - transform = self.parseOpencvMatrix(child.find('transform')) + transform = parseOpencvMatrix(child.find('transform')) R = transform[:3,:3] T = transform[:3,3] - vertices = self.parseOpencvMatrix(child.find('vertices')) + vertices = parseOpencvMatrix(child.find('vertices')) + self.vertices_template = copy.deepcopy(vertices) vertices = np.matmul(R, vertices.transpose()).transpose() + T self.vertices = vertices @@ -122,6 +132,7 @@ def parse_scale_rotation(self): yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) self.Rm = np.array(Rm) + self.Sm = np.array(Sm) self.scale = scale self.yaw = yaw self.pitch = pitch @@ -153,7 +164,10 @@ def filter_by_radius(self,ego_state_xyz,radius=50.0): def box_visible_in_point_cloud(self, points): ''' points: (N,3) , box: (8,3) ''' - box = self.vertices + box = self.vertices.copy() + # avoid calculating ground point cloud + z_offset = 0.1 + box[:,2] += z_offset O, A, B, C = box[0], box[1], box[2], box[5] OA = A - O OB = B - O @@ -164,13 +178,117 @@ def box_visible_in_point_cloud(self, points): (np.dot(O, OC) < POC) & (POC < np.dot(C, OC)) points_in_box = np.sum(mask) - visible = True if points_in_box > 50 else False + visible = True if points_in_box > 40 else False return visible, points_in_box def load_detection_preprocess(self, records_dict: Dict[int, Any]): if self.globalID in records_dict: self.valid_frames["records"] = records_dict[self.globalID]["records"] +def interpolate_obj_list(obj_list: List[KITTI360Bbox3D]) -> List[KITTI360Bbox3D]: + """ + Fill missing timestamps in obj_list by linear interpolation. + For each missing timestamp between two objects, create a new KITTI360Bbox3D object + with only interpolated position (T), yaw, pitch, roll, and copy other attributes. + Returns a new list with all timestamps filled and sorted. + """ + if not obj_list: + return obj_list + + # Sort by timestamp ascending + obj_list.sort(key=lambda obj: obj.timestamp) + timestamps = [obj.timestamp for obj in obj_list] + min_ts, max_ts = min(timestamps), max(timestamps) + full_ts = list(range(min_ts, max_ts + 1)) + missing_ts = sorted(set(full_ts) - set(timestamps)) + + # Prepare arrays for interpolation + T_arr = np.array([obj.T for obj in obj_list]) + yaw_arr = np.array([obj.yaw for obj in obj_list]) + pitch_arr = np.array([obj.pitch for obj in obj_list]) + roll_arr = np.array([obj.roll for obj in obj_list]) + ts_arr = np.array(timestamps) + + for ts in missing_ts: + idx_next = np.searchsorted(ts_arr, ts) + idx_prev = idx_next - 1 + if idx_prev < 0 or idx_next >= len(obj_list): + continue + + frac = (ts - ts_arr[idx_prev]) / (ts_arr[idx_next] - ts_arr[idx_prev]) + T_interp = T_arr[idx_prev] * (1 - frac) + T_arr[idx_next] * frac + + yaw_delat = normalize_angle(yaw_arr[idx_next] - yaw_arr[idx_prev]) + yaw_interp = yaw_arr[idx_prev] + yaw_delat * frac + yaw_interp = normalize_angle(yaw_interp) + + pitch_interp = pitch_arr[idx_prev] * (1 - frac) + pitch_arr[idx_next] * frac + roll_interp = roll_arr[idx_prev] * (1 - frac) + roll_arr[idx_next] * frac + + obj_new = copy.deepcopy(obj_list[idx_prev]) + obj_new.timestamp = ts + obj_new.T = T_interp + obj_new.yaw = yaw_interp + obj_new.pitch = pitch_interp + obj_new.roll = roll_interp + obj_new.Rm = R.from_euler('zyx', [obj_new.yaw, obj_new.pitch, obj_new.roll], degrees=False).as_matrix() + obj_new.R = obj_new.Rm @ obj_new.Sm + obj_new.vertices = (obj_new.R @ obj_new.vertices_template.T).T + obj_new.T + obj_new.is_interpolated = True + obj_new.idx_prev = ts_arr[idx_prev] + obj_new.idx_next = ts_arr[idx_next] + + obj_list.append(obj_new) + + obj_list.sort(key=lambda obj: obj.timestamp) + return obj_list + +def normalize_angle(a): + return np.arctan2(np.sin(a), np.cos(a)) + +class KITTI360_MAP_Bbox3D(): + def __init__(self): + self.id = -1 + self.label = ' ' + + self.vertices: Polyline3D = None + self.R = None + self.T = None + + def parseVertices_plane(self, child): + transform = parseOpencvMatrix(child.find('transform')) + R = transform[:3,:3] + T = transform[:3,3] + if child.find("transform_plane").find('rows').text == '0': + vertices = parseOpencvMatrix(child.find('vertices')) + else: + vertices = parseOpencvMatrix(child.find('vertices_plane')) + + vertices = np.matmul(R, vertices.transpose()).transpose() + T + self.vertices = Polyline3D.from_array(vertices) + + self.R = R + self.T = T + + def parseBbox(self, child): + self.id = int(child.find('index').text) + self.label = child.find('label').text + self.parseVertices_plane(child) + + +def parseOpencvMatrix(node): + rows = int(node.find('rows').text) + cols = int(node.find('cols').text) + data = node.find('data').text.split(' ') + + mat = [] + for d in data: + d = d.replace('\n', '') + if len(d)<1: + continue + mat.append(float(d)) + mat = np.reshape(mat, [rows, cols]) + return mat def get_lidar_extrinsic() -> np.ndarray: cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py new file mode 100644 index 00000000..bf13eda6 --- /dev/null +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py @@ -0,0 +1,125 @@ +import os +import warnings +from pathlib import Path +from typing import Dict, List, Optional + +import geopandas as gpd +import numpy as np +import pandas as pd +import xml.etree.ElementTree as ET +import pyogrio +from shapely.geometry import LineString +import shapely.geometry as geom + +from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import ( + get_road_edge_linear_rings, + split_line_geometry_by_max_length, +) +from d123.dataset.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value +from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.geometry.polyline import Polyline3D +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D + +MAX_ROAD_EDGE_LENGTH = 100.0 # meters, used to filter out very long road edges + +KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) + +DIR_3D_BBOX = "data_3d_bboxes" + +PATH_3D_BBOX_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_BBOX + +KIITI360_MAP_BBOX = [ + "road", + "sidewalk", + # "railtrack", + # "ground", + # "driveway", +] + +def convert_kitti360_map(log_name, map_path): + + xml_path = PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml" + + if not xml_path.exists(): + raise FileNotFoundError(f"BBox 3D file not found: {xml_path}") + + tree = ET.parse(xml_path) + root = tree.getroot() + objs: List[KITTI360_MAP_Bbox3D] = [] + for child in root: + label = child.find('label').text + if child.find("transform") is None or label not in KIITI360_MAP_BBOX: + continue + obj = KITTI360_MAP_Bbox3D() + obj.parseBbox(child) + objs.append(obj) + + dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} + dataframes[MapLayer.LANE] = _get_none_data() + dataframes[MapLayer.LANE_GROUP] = _get_none_data() + dataframes[MapLayer.INTERSECTION] = _get_none_data() + dataframes[MapLayer.CROSSWALK] = _get_none_data() + dataframes[MapLayer.WALKWAY] = _extract_walkway_df(objs) + dataframes[MapLayer.CARPARK] = _get_none_data() + dataframes[MapLayer.GENERIC_DRIVABLE] = _extract_generic_drivable_df(objs) + dataframes[MapLayer.ROAD_EDGE] = _extract_road_edge_df(objs) + dataframes[MapLayer.ROAD_LINE] = _get_none_data() + + map_file_name = map_path + for layer, gdf in dataframes.items(): + gdf.to_file(map_file_name, layer=layer.serialize(), driver="GPKG", mode="a") + +def _get_none_data() -> gpd.GeoDataFrame: + ids = [] + geometries = [] + data = pd.DataFrame({"id": ids}) + gdf = gpd.GeoDataFrame(data, geometry=geometries) + return gdf + +def _extract_generic_drivable_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: + ids: List[int] = [] + outlines: List[geom.LineString] = [] + geometries: List[geom.Polygon] = [] + for obj in objs: + if obj.label != "road": + continue + ids.append(obj.id) + outlines.append(obj.vertices.linestring) + geometries.append(geom.Polygon(obj.vertices.array[:, :2])) + data = pd.DataFrame({"id": ids, "outline": outlines}) + gdf = gpd.GeoDataFrame(data, geometry=geometries) + return gdf + +def _extract_walkway_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: + ids: List[int] = [] + outlines: List[geom.LineString] = [] + geometries: List[geom.Polygon] = [] + for obj in objs: + if obj.label != "sidewalk": + continue + ids.append(obj.id) + outlines.append(obj.vertices.linestring) + geometries.append(geom.Polygon(obj.vertices.array[:, :2])) + + data = pd.DataFrame({"id": ids, "outline": outlines}) + gdf = gpd.GeoDataFrame(data, geometry=geometries) + return gdf + +def _extract_road_edge_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: + geometries: List[geom.Polygon] = [] + for obj in objs: + if obj.label != "road": + continue + geometries.append(geom.Polygon(obj.vertices.array[:, :2])) + road_edge_linear_rings = get_road_edge_linear_rings(geometries) + road_edges = split_line_geometry_by_max_length(road_edge_linear_rings, MAX_ROAD_EDGE_LENGTH) + + ids = [] + road_edge_types = [] + for idx in range(len(road_edges)): + ids.append(idx) + # TODO @DanielDauner: Figure out if other types should/could be assigned here. + road_edge_types.append(int(RoadEdgeType.ROAD_EDGE_BOUNDARY)) + + data = pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}) + return gpd.GeoDataFrame(data, geometry=road_edges) \ No newline at end of file diff --git a/d123/dataset/dataset_specific/kitti_360/labels.py b/d123/dataset/dataset_specific/kitti_360/labels.py index de24f152..6903be9f 100644 --- a/d123/dataset/dataset_specific/kitti_360/labels.py +++ b/d123/dataset/dataset_specific/kitti_360/labels.py @@ -169,6 +169,21 @@ def assureSingleInstanceName( name ): from d123.common.datatypes.detection.detection_types import DetectionType +BBOX_LABLES_TO_DETECTION_NAME_DICT = { + 'car': 'car', + 'truck': 'truck', + "bicycle": "bicycle", + "trafficLight": "traffic light", + "trailer": "trailer", + "bus": "bus", + "pedestrian": "person", + "motorcycle": "motorcycle", + "stop": "stop", + "trafficSign": "traffic sign", + "rider": "rider", + "caravan": "caravan", +} + KIITI360_DETECTION_NAME_DICT = { "traffic light": DetectionType.SIGN, "traffic sign": DetectionType.SIGN, diff --git a/d123/dataset/dataset_specific/kitti_360/load_sensor.py b/d123/dataset/dataset_specific/kitti_360/load_sensor.py index 2a23401f..c4df6d36 100644 --- a/d123/dataset/dataset_specific/kitti_360/load_sensor.py +++ b/d123/dataset/dataset_specific/kitti_360/load_sensor.py @@ -1,12 +1,16 @@ from pathlib import Path import numpy as np +import logging from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: - assert filepath.exists(), f"LiDAR file not found: {filepath}" + if not filepath.exists(): + logging.warning(f"LiDAR file does not exist: {filepath}. Returning empty point cloud.") + return LiDAR(metadata=lidar_metadata, point_cloud=np.zeros((4, 0), dtype=np.float32)) + pcd = np.fromfile(filepath, dtype=np.float32) pcd = np.reshape(pcd,[-1,4]) # [N,4] diff --git a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py index 5827e779..f2d14ce1 100644 --- a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py +++ b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py @@ -1,8 +1,8 @@ """ -This script precomputes static detection records for KITTI-360: +This script precomputes detection records for KITTI-360: - Stage 1: radius filtering using ego positions (from poses.txt). - Stage 2: LiDAR visibility check to fill per-frame point counts. -It writes a pickle containing, for each static object, all feasible frames and +It writes a pickle containing, for each object, all feasible frames and their point counts to avoid recomputation in later pipelines. We have precomputed and saved the pickle for all training logs, you can either download them or run this script to generate @@ -12,9 +12,11 @@ import os import pickle import logging +import copy from pathlib import Path from typing import Dict, List, Tuple, Optional, Any from collections import defaultdict +import concurrent.futures import numpy as np import numpy.typing as npt @@ -25,15 +27,16 @@ DIR_3D_BBOX = "data_3d_bboxes" DIR_POSES = "data_poses" -# PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW -PATH_3D_RAW_ROOT = Path("/data/jbwang/d123/data_3d_raw/") +PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic,interpolate_obj_list +from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT def _bbox_xml_path(log_name: str) -> Path: + if log_name == "2013_05_28_drive_0004_sync": + return PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml" return PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" def _lidar_frame_path(log_name: str, frame_idx: int) -> Path: @@ -44,24 +47,36 @@ def _load_lidar_xyz(filepath: Path) -> np.ndarray: arr = np.fromfile(filepath, dtype=np.float32) return arr.reshape(-1, 4)[:, :3] -def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]: - """Parse XML and collect static objects with valid class names.""" +def _collect_objects(log_name: str) -> Tuple[List[KITTI360Bbox3D], Dict[int, List[KITTI360Bbox3D]]]: + """Parse XML and collect objects with valid class names.""" xml_path = _bbox_xml_path(log_name) if not xml_path.exists(): raise FileNotFoundError(f"BBox 3D file not found: {xml_path}") tree = ET.parse(xml_path) root = tree.getroot() - objs: List[KITTI360Bbox3D] = [] + + static_objs: List[KITTI360Bbox3D] = [] + dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list) + for child in root: - sem_id = int(child.find("semanticId").text) - name = kittiId2label[sem_id].name - timestamp = int(child.find('timestamp').text) # -1 for static objects - if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT or timestamp != -1: + if child.find('semanticId') is not None: + semanticIdKITTI = int(child.find('semanticId').text) + name = kittiId2label[semanticIdKITTI].name + else: + lable = child.find('label').text + name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, 'unknown') + if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT: continue obj = KITTI360Bbox3D() obj.parseBbox(child) - objs.append(obj) - return objs + timestamp = int(child.find('timestamp').text) + if timestamp == -1: + static_objs.append(obj) + else: + global_ID = obj.globalID + dynamic_objs[global_ID].append(obj) + + return static_objs, dynamic_objs def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]: """Load ego states from poses.txt.""" @@ -105,14 +120,18 @@ def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]: def process_detection( log_name: str, - radius_m: float = 50.0, + radius_m: float = 60.0, output_dir: Optional[Path] = None, ) -> None: """ - Precompute static detections filtering: + Precompute detections filtering + for static objects: 1) filter by ego-centered radius over all frames 2) filter by LiDAR point cloud visibility - Save per-frame static detections to a pickle to avoid recomputation. + for dynamic objects: + 1) interpolate boxes for missing frames + 2) select box with highest LiDAR point count + Save per-frame detections to a pickle to avoid recomputation. """ lidar_dir = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" @@ -121,21 +140,36 @@ def process_detection( ts_len = len(list(lidar_dir.glob("*.bin"))) logging.info(f"[preprocess] {log_name}: found {ts_len} lidar frames") - # 1) Parse static objects from XML - static_objs = _collect_static_objects(log_name) + # 1) Parse objects from XML + static_objs: List[KITTI360Bbox3D] + dynamic_objs: Dict[int, List[KITTI360Bbox3D]] + static_objs, dynamic_objs = _collect_objects(log_name) + + # only interpolate dynamic objects + for global_ID, obj_list in dynamic_objs.items(): + obj_list_interpolated = interpolate_obj_list(obj_list) + dynamic_objs[global_ID] = obj_list_interpolated + dymanic_objs_updated = copy.deepcopy(dynamic_objs) + logging.info(f"[preprocess] {log_name}: static objects = {len(static_objs)}") + logging.info(f"[preprocess] {log_name}: dynamic objects = {len(dynamic_objs.keys())}") - # 2) Filter by ego-centered radius + # 2) Filter static objs by ego-centered radius ego_states = _collect_ego_states(log_name,ts_len) logging.info(f"[preprocess] {log_name}: ego states = {len(ego_states)}") for obj in static_objs: obj.filter_by_radius(ego_states[:, :3, 3], radius_m) - # 3) Filter by LiDAR point cloud visibility + # 3) Filter static objs by LiDAR point cloud visibility lidar_extrinsic = get_lidar_extrinsic() - for time_idx in range(ts_len): + + def process_one_frame(time_idx: int) -> None: logging.info(f"[preprocess] {log_name}: t={time_idx}") lidar_path = _lidar_frame_path(log_name, time_idx) + if not lidar_path.exists(): + logging.warning(f"[preprocess] {log_name}: LiDAR frame not found: {lidar_path}") + return + lidar_xyz = _load_lidar_xyz(lidar_path) # lidar to pose @@ -158,17 +192,53 @@ def process_detection( record["points_in_box"] = points_in_box break + # for dynamic objects, select the box with the highest LiDAR point count + for global_ID, obj_list in dynamic_objs.items(): + obj_at_time = [obj for obj in obj_list if obj.timestamp == time_idx] + if not obj_at_time: + continue + + obj = obj_at_time[0] + # NOTE only update interpolated boxes + if not obj.is_interpolated: + continue + + max_points = -1 + best_obj = None + ts_prev = obj.idx_prev + ts_next = obj.idx_next + candidates = [candidate for candidate in obj_list if ts_prev <= candidate.timestamp <= ts_next] + + for obj in candidates: + visible, points_in_box = obj.box_visible_in_point_cloud(lidar_in_world) + if points_in_box > max_points: + max_points = points_in_box + best_obj = obj + + if best_obj is not None: + idx = next((i for i, o in enumerate(dynamic_objs[global_ID]) if o.timestamp == time_idx), None) + if idx is not None: + dymanic_objs_updated[global_ID][idx] = copy.deepcopy(best_obj) + dymanic_objs_updated[global_ID][idx].timestamp = time_idx + + max_workers = os.cpu_count() * 2 + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + results = list(executor.map(process_one_frame, range(ts_len))) + # 4) Save pickle - records: List[Dict[str, Any]] = [] + static_records: List[Dict[str, Any]] = [] for obj in static_objs: - records.append(obj.valid_frames) + static_records.append(obj.valid_frames) + if output_dir is None: output_dir = PATH_3D_BBOX_ROOT / "preprocess" output_dir.mkdir(parents=True, exist_ok=True) out_path = output_dir / f"{log_name}_detection_preprocessed.pkl" + payload = { "log_name": log_name, - "records": records + "static": static_records, + "dynamic": dymanic_objs_updated } with open(out_path, "wb") as f: pickle.dump(payload, f) @@ -177,11 +247,12 @@ def process_detection( if __name__ == "__main__": import argparse logging.basicConfig(level=logging.INFO) - parser = argparse.ArgumentParser(description="Precompute KITTI-360 static detections filters") - parser.add_argument("--log_name", default="2013_05_28_drive_0007_sync") + parser = argparse.ArgumentParser(description="Precompute KITTI-360 detections filters") + parser.add_argument("--log_name", default="2013_05_28_drive_0004_sync") parser.add_argument("--radius", type=float, default=60.0) parser.add_argument("--out", type=Path, default="detection_preprocess", help="output directory for pkl") args = parser.parse_args() + process_detection( log_name=args.log_name, radius_m=args.radius, diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index 52915f13..2c474fe8 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -22,4 +22,4 @@ defaults: - kitti360_dataset force_log_conversion: True -force_map_conversion: False +force_map_conversion: True From 49fa542fde116d497956cfe597efa466668e4f92 Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Wed, 24 Sep 2025 09:18:58 +0200 Subject: [PATCH 046/145] Further removal of EulerStateSE3 (#43) --- d123/datasets/wopd/wopd_data_converter.py | 57 ++++++++++------------- d123/geometry/se.py | 29 ++++++++---- 2 files changed, 45 insertions(+), 41 deletions(-) diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py index 30c5cdc1..f3353a6a 100644 --- a/d123/datasets/wopd/wopd_data_converter.py +++ b/d123/datasets/wopd/wopd_data_converter.py @@ -5,12 +5,11 @@ from dataclasses import asdict from functools import partial from pathlib import Path -from typing import Any, Dict, Final, List, Literal, Tuple, Union +from typing import Any, Dict, Final, List, Tuple, Union import numpy as np import numpy.typing as npt import pyarrow as pa -from pyquaternion import Quaternion from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.dependencies import check_dependencies @@ -24,8 +23,8 @@ from d123.datatypes.sensors.lidar_index import WopdLidarIndex from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters -from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Point3D, Vector3D, Vector3DIndex -from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array +from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") @@ -74,7 +73,7 @@ WOPD_DATA_ROOT = Path("/media/nvme1/waymo_perception") # TODO: set as environment variable !!!! # Whether to use ego or zero roll and pitch values for bounding box detections (after global conversion) -DETECTION_ROLL_PITCH: Final[Literal["ego", "zero"]] = "zero" +ZERO_ROLL_PITCH: Final[bool] = True def create_token(input_data: str) -> str: @@ -383,16 +382,12 @@ def _write_recording_table( del batch, row_data, detections_state, detections_velocity, detections_token, detections_types -def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> EulerStateSE3: - ego_pose_matrix = np.array(frame.pose.transform).reshape(4, 4) - yaw, pitch, roll = Quaternion(matrix=ego_pose_matrix[:3, :3]).yaw_pitch_roll - ego_point_3d = Point3D.from_array(ego_pose_matrix[:3, 3]) - - return EulerStateSE3(x=ego_point_3d.x, y=ego_point_3d.y, z=ego_point_3d.z, roll=roll, pitch=pitch, yaw=yaw) +def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> StateSE3: + ego_pose_matrix = np.array(frame.pose.transform, dtype=np.float64).reshape(4, 4) + return StateSE3.from_transformation_matrix(ego_pose_matrix) def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: - # TODO: implement ego_rear_axle = _get_ego_pose_se3(frame) @@ -406,13 +401,22 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li if detection.type not in WOPD_DETECTION_NAME_DICT: continue - # 1. SS3 Bounding Box + # 1. Quaternion rotations + # NOTE: WOPD bounding boxes are (1) stored in ego frame and (2) only supply yaw rotation + # The global pose can either consider ego roll and pitch or set them to zero. + # (zero roll/pitch corresponds to setting it to the ego roll/pitch, before transformation to global frame) + # + detection_quaternion = EulerAngles( + roll=ego_rear_axle.roll if ZERO_ROLL_PITCH else DEFAULT_ROLL, + pitch=ego_rear_axle.pitch if ZERO_ROLL_PITCH else DEFAULT_PITCH, + yaw=detection.box.heading, + ).quaternion + + # 2. Fill SE3 Bounding Box detections_state[detection_idx, BoundingBoxSE3Index.X] = detection.box.center_x detections_state[detection_idx, BoundingBoxSE3Index.Y] = detection.box.center_y detections_state[detection_idx, BoundingBoxSE3Index.Z] = detection.box.center_z - detections_state[detection_idx, BoundingBoxSE3Index.ROLL] = DEFAULT_ROLL # not provided in WOPD - detections_state[detection_idx, BoundingBoxSE3Index.PITCH] = DEFAULT_PITCH # not provided in WOPD - detections_state[detection_idx, BoundingBoxSE3Index.YAW] = detection.box.heading + detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = detection_quaternion detections_state[detection_idx, BoundingBoxSE3Index.LENGTH] = detection.box.length detections_state[detection_idx, BoundingBoxSE3Index.WIDTH] = detection.box.width detections_state[detection_idx, BoundingBoxSE3Index.HEIGHT] = detection.box.height @@ -428,17 +432,9 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li detections_token.append(str(detection.id)) detections_types.append(int(WOPD_DETECTION_NAME_DICT[detection.type])) - detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_euler_se3_array( + detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( origin=ego_rear_axle, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3] ) - if DETECTION_ROLL_PITCH == "ego": - pass - if DETECTION_ROLL_PITCH == "zero": - detections_state[:, BoundingBoxSE3Index.ROLL] = DEFAULT_ROLL - detections_state[:, BoundingBoxSE3Index.PITCH] = DEFAULT_PITCH - else: - raise ValueError(f"Invalid DETECTION_ROLL_PITCH value: {DETECTION_ROLL_PITCH}. Must be 'ego' or 'zero'.") - return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types @@ -484,17 +480,12 @@ def _extract_camera( transform = np.array(calibration.extrinsic.transform).reshape(4, 4) # FIXME: This is an ugly hack to convert to uniform camera convention. - flip_camera = EulerStateSE3( - x=0.0, - y=0.0, - z=0.0, - roll=np.deg2rad(0.0), - pitch=np.deg2rad(90.0), - yaw=np.deg2rad(-90.0), - ).rotation_matrix + # TODO: Extract function to convert between different camera conventions. + flip_camera = EulerAngles(roll=np.deg2rad(0.0), pitch=np.deg2rad(90.0), yaw=np.deg2rad(-90.0)).rotation_matrix transform[:3, :3] = transform[:3, :3] @ flip_camera context_extrinsic[camera_type] = transform + # TODO: Refactor to avoid code duplication for image_proto in frame.images: camera_type = WOPD_CAMERA_TYPES[image_proto.name] diff --git a/d123/geometry/se.py b/d123/geometry/se.py index a35b9c4c..213dca2f 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -139,6 +139,20 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateS object.__setattr__(instance, "_array", array.copy() if copy else array) return instance + @classmethod + def from_transformation_matrix(cls, transformation_matrix: npt.NDArray[np.float64]) -> StateSE3: + """Constructs a StateSE3 from a 4x4 transformation matrix. + + :param transformation_matrix: A 4x4 numpy array representing the transformation matrix. + :return: A StateSE3 instance. + """ + assert transformation_matrix.ndim == 2 + assert transformation_matrix.shape == (4, 4) + array = np.zeros(len(StateSE3Index), dtype=np.float64) + array[StateSE3Index.XYZ] = transformation_matrix[:3, :3] + array[StateSE3Index.QUATERNION] = Quaternion.from_rotation_matrix(transformation_matrix[:3, :3]) + return StateSE3.from_array(array) + @property def x(self) -> float: """Returns the x-coordinate of the quaternion. @@ -332,18 +346,17 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> EulerS return instance @classmethod - def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> EulerStateSE3: - """Constructs a StateSE3 from a 4x4 transformation matrix. + def from_transformation_matrix(cls, transformation_matrix: npt.NDArray[np.float64]) -> EulerStateSE3: + """Constructs a EulerStateSE3 from a 4x4 transformation matrix. :param array: A 4x4 numpy array representing the transformation matrix. - :return: A StateSE3 instance. + :return: A EulerStateSE3 instance. """ - assert array.ndim == 2 - assert array.shape == (4, 4) - translation = array[:3, 3] - rotation = array[:3, :3] + assert transformation_matrix.ndim == 2 + assert transformation_matrix.shape == (4, 4) + translation = transformation_matrix[:3, 3] + rotation = transformation_matrix[:3, :3] roll, pitch, yaw = EulerAngles.from_rotation_matrix(rotation) - return EulerStateSE3( x=translation[Point3DIndex.X], y=translation[Point3DIndex.Y], From cdc5cd8be46a298c8312c8d9d69c6f4385317400 Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Wed, 24 Sep 2025 09:43:50 +0200 Subject: [PATCH 047/145] Config adaptations and visualizations to account for recent refactoring (#39) --- .../visualization/matplotlib/observation.py | 14 ++++++------ d123/datasets/wopd/wopd_data_converter.py | 2 +- .../default_dataset_conversion.yaml | 4 ++-- .../config/datasets/av2_sensor_dataset.yaml | 4 ++-- .../script/config/datasets/carla_dataset.yaml | 4 ++-- .../config/datasets/nuplan_dataset.yaml | 4 ++-- .../config/datasets/nuplan_mini_dataset.yaml | 4 ++-- .../datasets/nuplan_private_dataset.yaml | 4 ++-- d123/script/config/datasets/wopd_dataset.yaml | 4 ++-- notebooks/av2/delete_me.ipynb | 10 ++++----- notebooks/av2/delete_me_map.ipynb | 2 +- notebooks/deprecated/extraction_testing.ipynb | 2 +- .../test_nuplan_conversion.ipynb | 2 +- notebooks/deprecated/test_waypoints.ipynb | 2 +- notebooks/gym/test_gym.ipynb | 2 +- notebooks/nuplan/nuplan_sensor_loading.ipynb | 2 +- notebooks/scene_rendering.ipynb | 4 ++-- notebooks/viz/bev_matplotlib.ipynb | 22 ++++++++++--------- notebooks/waymo_perception/map_testing.ipynb | 2 +- 19 files changed, 48 insertions(+), 46 deletions(-) diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py index eb37ad74..ed46c6a9 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/d123/common/visualization/matplotlib/observation.py @@ -4,9 +4,6 @@ import numpy as np import shapely.geometry as geom -from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3 from d123.common.visualization.color.config import PlotConfig from d123.common.visualization.color.default import ( BOX_DETECTION_CONFIG, @@ -22,10 +19,13 @@ get_pose_triangle, shapely_geometry_local_coords, ) -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.maps.abstract_map_objects import AbstractLane -from d123.datasets.maps.map_datatypes import MapLayer -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.maps.abstract_map import AbstractMap +from d123.datatypes.maps.abstract_map_objects import AbstractLane +from d123.datatypes.maps.map_datatypes import MapLayer +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3 from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D from d123.geometry.geometry_index import StateSE2Index from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py index f3353a6a..95649218 100644 --- a/d123/datasets/wopd/wopd_data_converter.py +++ b/d123/datasets/wopd/wopd_data_converter.py @@ -10,13 +10,13 @@ import numpy as np import numpy.typing as npt import pyarrow as pa +from fromd123.datatypes.detections.detection_types import DetectionType from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.dependencies import check_dependencies from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection -from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index b46e7c9d..be97439e 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -15,9 +15,9 @@ defaults: - default_dataset_paths - _self_ - datasets: - # - nuplan_private_dataset + - nuplan_private_dataset # - carla_dataset - - wopd_dataset + # - wopd_dataset # - av2_sensor_dataset force_log_conversion: True diff --git a/d123/script/config/datasets/av2_sensor_dataset.yaml b/d123/script/config/datasets/av2_sensor_dataset.yaml index 58a64f7a..d567a175 100644 --- a/d123/script/config/datasets/av2_sensor_dataset.yaml +++ b/d123/script/config/datasets/av2_sensor_dataset.yaml @@ -1,12 +1,12 @@ av2_sensor_dataset: - _target_: d123.dataset.dataset_specific.av2.av2_data_converter.AV2SensorDataConverter + _target_: d123.datasets.av2.av2_data_converter.AV2SensorDataConverter _convert_: 'all' splits: ["av2-sensor-mini_train"] log_path: "/mnt/elements_0/argoverse" data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _target_: d123.datasets.raw_data_converter.DataConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/datasets/carla_dataset.yaml b/d123/script/config/datasets/carla_dataset.yaml index 31184f78..14160a0c 100644 --- a/d123/script/config/datasets/carla_dataset.yaml +++ b/d123/script/config/datasets/carla_dataset.yaml @@ -1,12 +1,12 @@ carla_dataset: - _target_: d123.dataset.dataset_specific.carla.carla_data_converter.CarlaDataConverter + _target_: d123.datasets.carla.carla_data_converter.CarlaDataConverter _convert_: 'all' splits: ["carla"] log_path: "${oc.env:HOME}/carla_workspace/data" data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _target_: d123.datasets.raw_data_converter.DataConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/datasets/nuplan_dataset.yaml b/d123/script/config/datasets/nuplan_dataset.yaml index 8c104287..6bcca6fb 100644 --- a/d123/script/config/datasets/nuplan_dataset.yaml +++ b/d123/script/config/datasets/nuplan_dataset.yaml @@ -1,12 +1,12 @@ nuplan_dataset: - _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter + _target_: d123.datasets.nuplan.nuplan_data_converter.NuplanDataConverter _convert_: 'all' splits: ["nuplan_train", "nuplan_val", "nuplan_test"] log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits # NOTE: folder including [mini, trainval, test], sometimes not inside "splits" folder data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _target_: d123.datasets.raw_data_converter.DataConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/datasets/nuplan_mini_dataset.yaml b/d123/script/config/datasets/nuplan_mini_dataset.yaml index 1fdb2b54..a371d34d 100644 --- a/d123/script/config/datasets/nuplan_mini_dataset.yaml +++ b/d123/script/config/datasets/nuplan_mini_dataset.yaml @@ -1,5 +1,5 @@ nuplan_mini_dataset: - _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter + _target_: d123.datasets.nuplan.nuplan_data_converter.NuplanDataConverter _convert_: 'all' @@ -7,7 +7,7 @@ nuplan_mini_dataset: log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits # NOTE: folder including [mini, trainval, test], sometimes not inside "splits" folder data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _target_: d123.datasets.raw_data_converter.DataConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/datasets/nuplan_private_dataset.yaml b/d123/script/config/datasets/nuplan_private_dataset.yaml index 399dcb7e..7062f38f 100644 --- a/d123/script/config/datasets/nuplan_private_dataset.yaml +++ b/d123/script/config/datasets/nuplan_private_dataset.yaml @@ -1,12 +1,12 @@ nuplan_private_dataset: - _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter + _target_: d123.datasets.nuplan.nuplan_data_converter.NuplanDataConverter _convert_: 'all' splits: ["nuplan_private_test"] log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits # NOTE: folder including [mini, trainval, test], sometimes not inside "splits" folder data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _target_: d123.datasets.raw_data_converter.DataConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/datasets/wopd_dataset.yaml b/d123/script/config/datasets/wopd_dataset.yaml index 1abb0381..906aef03 100644 --- a/d123/script/config/datasets/wopd_dataset.yaml +++ b/d123/script/config/datasets/wopd_dataset.yaml @@ -1,12 +1,12 @@ wopd_dataset: - _target_: d123.dataset.dataset_specific.wopd.wopd_data_converter.WOPDDataConverter + _target_: d123.datasets.wopd.wopd_data_converter.WOPDDataConverter _convert_: 'all' splits: ["wopd_train"] log_path: null # TODO: implement data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _target_: d123.datasets.raw_data_converter.DataConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/notebooks/av2/delete_me.ipynb b/notebooks/av2/delete_me.ipynb index 434287f0..8d224181 100644 --- a/notebooks/av2/delete_me.ipynb +++ b/notebooks/av2/delete_me.ipynb @@ -126,8 +126,8 @@ "# # # 4. sensors\n", "# # print(_ls(log_folder))\n", "\n", - "# # from d123.dataset.dataset_specific.av2.av2_data_converter import AV2SensorDataConverter\n", - "# from d123.dataset.dataset_specific.av2.av2_data_converter import AV2SensorDataConverter\n", + "# # from d123.datasets.av2.av2_data_converter import AV2SensorDataConverter\n", + "# from d123.datasets.av2.av2_data_converter import AV2SensorDataConverter\n", "\n", "# # AV2SensorDataConverter([])" ] @@ -197,8 +197,8 @@ "# Testing sensor syn dataframes\n", "\n", "from typing import Optional\n", - "from d123.dataset.dataset_specific.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING\n", - "from d123.dataset.dataset_specific.av2.av2_helper import build_sensor_dataframe, build_synchronization_dataframe\n", + "from d123.datasets.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING\n", + "from d123.datasets.av2.av2_helper import build_sensor_dataframe, build_synchronization_dataframe\n", "\n", "\n", "sensor_df = build_sensor_dataframe(log_folder)\n", @@ -384,7 +384,7 @@ "\n", "\n", "from pyquaternion import Quaternion\n", - "from d123.common.datatypes.detection.detection_types import DetectionType\n", + "d123.datatypes.detections.detection_types import DetectionType\n", "from d123.geometry.base import StateSE2\n", "from d123.geometry.bounding_box import BoundingBoxSE2\n", "from d123.common.visualization.color.config import PlotConfig\n", diff --git a/notebooks/av2/delete_me_map.ipynb b/notebooks/av2/delete_me_map.ipynb index 901c232c..2e8dba42 100644 --- a/notebooks/av2/delete_me_map.ipynb +++ b/notebooks/av2/delete_me_map.ipynb @@ -59,7 +59,7 @@ "from typing import Dict, List\n", "\n", "from d123.geometry.line.polylines import Polyline3D\n", - "from d123.dataset.dataset_specific.av2.av2_map_conversion import _extract_lane_group_dict\n", + "from d123.datasets.av2.av2_map_conversion import _extract_lane_group_dict\n", "\n", "\n", "def _extract_polyline(data: List[Dict[str, float]], close: bool = False) -> Polyline3D:\n", diff --git a/notebooks/deprecated/extraction_testing.ipynb b/notebooks/deprecated/extraction_testing.ipynb index d782d7ba..b4c2d5bb 100644 --- a/notebooks/deprecated/extraction_testing.ipynb +++ b/notebooks/deprecated/extraction_testing.ipynb @@ -17,7 +17,7 @@ "from d123.common.multithreading.worker_pool import WorkerPool\n", "\n", "from d123.dataset.arrow.helper import open_arrow_arrow_table\n", - "from d123.dataset.dataset_specific.nuplan.nuplan_data_processor import worker_map\n", + "from d123.datasets.nuplan.nuplan_data_processor import worker_map\n", "from d123.dataset.logs.log_metadata import LogMetadata\n", "from d123.dataset.scene.abstract_scene import AbstractScene\n", "from d123.dataset.scene.arrow_scene import ArrowScene, SceneExtractionInfo\n", diff --git a/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb b/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb index 32805c97..8c9339ad 100644 --- a/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb +++ b/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb @@ -7,7 +7,7 @@ "outputs": [], "source": [ "from pathlib import Path\n", - "from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import NuPlanMapConverter, MAP_LOCATIONS\n", + "from d123.datasets.nuplan.nuplan_map_conversion import NuPlanMapConverter, MAP_LOCATIONS\n", "\n", "\n", "\n", diff --git a/notebooks/deprecated/test_waypoints.ipynb b/notebooks/deprecated/test_waypoints.ipynb index 86d2ea9c..cdafea69 100644 --- a/notebooks/deprecated/test_waypoints.ipynb +++ b/notebooks/deprecated/test_waypoints.ipynb @@ -60,7 +60,7 @@ "from shapely.geometry import LineString\n", "import numpy as np\n", "from matplotlib import pyplot as plt\n", - "from d123.dataset.dataset_specific.carla.carla_data_processor import _load_json_gz \n", + "from d123.datasets.carla.carla_data_processor import _load_json_gz \n", "from d123.common.visualization.matplotlib.plots import _plot_scene_on_ax\n", "json_dict = _load_json_gz(\"/home/daniel/carla_workspace/data/_Rep0_longest1_route0_06_13_17_21_21/boxes/0000000002.json.gz\")\n", "json_dict\n", diff --git a/notebooks/gym/test_gym.ipynb b/notebooks/gym/test_gym.ipynb index c49f505c..a9e89681 100644 --- a/notebooks/gym/test_gym.ipynb +++ b/notebooks/gym/test_gym.ipynb @@ -89,7 +89,7 @@ ")\n", "from d123.dataset.arrow.conversion import TrafficLightDetectionWrapper\n", "from d123.dataset.maps.abstract_map import AbstractMap\n", - "from d123.common.datatypes.detection.detection import BoxDetectionWrapper\n", + "d123.datatypes.detections.detection import BoxDetectionWrapper\n", "from d123.dataset.scene.abstract_scene import AbstractScene\n", "import io\n", "from PIL import Image\n", diff --git a/notebooks/nuplan/nuplan_sensor_loading.ipynb b/notebooks/nuplan/nuplan_sensor_loading.ipynb index 3fc654d8..939097bf 100644 --- a/notebooks/nuplan/nuplan_sensor_loading.ipynb +++ b/notebooks/nuplan/nuplan_sensor_loading.ipynb @@ -35,7 +35,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import NuplanDataConverter" + "from d123.datasets.nuplan.nuplan_data_converter import NuplanDataConverter" ] }, { diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb index 4436e7ea..25018fda 100644 --- a/notebooks/scene_rendering.ipynb +++ b/notebooks/scene_rendering.ipynb @@ -61,8 +61,8 @@ "outputs": [], "source": [ "from typing import Tuple\n", - "from d123.common.datatypes.detection.detection import BoxDetection\n", - "from d123.common.datatypes.detection.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n", + "d123.datatypes.detections.detection import BoxDetection\n", + "d123.datatypes.detections.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n", "from d123.geometry import StateSE2\n", "from d123.geometry.transform.tranform_2d import translate_along_yaw\n", "from d123.geometry.vector import Vector2D\n", diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index 3627a511..3f2e9f12 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -7,11 +7,11 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", + "from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", + "from d123.datatypes.scene.scene_filter import SceneFilter\n", "\n", "from d123.common.multithreading.worker_sequential import Sequential\n", - "from d123.common.datatypes.sensor.camera import CameraType" + "from d123.datatypes.sensors.camera import CameraType" ] }, { @@ -42,8 +42,8 @@ "\n", "# splits = [\"wopd_train\"]\n", "# splits = [\"carla\"]\n", - "# splits = [\"nuplan_private_test\"]\n", - "splits = [\"av2-sensor-mini_train\"]\n", + "splits = [\"nuplan_private_test\"]\n", + "# splits = [\"av2-sensor-mini_train\"]\n", "# log_names = None\n", "\n", "\n", @@ -76,8 +76,10 @@ "outputs": [], "source": [ "from typing import List, Optional, Tuple\n", + "\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", + "\n", "from d123.geometry import Point2D\n", "from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", "from d123.common.visualization.color.config import PlotConfig\n", @@ -89,11 +91,11 @@ " add_traffic_lights_to_ax,\n", ")\n", "from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", - "from d123.dataset.maps.abstract_map import AbstractMap\n", - "from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", - "from d123.dataset.maps.gpkg.gpkg_map_objects import GPKGIntersection\n", - "from d123.dataset.maps.map_datatypes import MapLayer\n", - "from d123.dataset.scene.abstract_scene import AbstractScene\n", + "from d123.datatypes.maps.abstract_map import AbstractMap\n", + "from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", + "from d123.datatypes.maps.gpkg.gpkg_map_objects import GPKGIntersection\n", + "from d123.datatypes.maps.map_datatypes import MapLayer\n", + "from d123.datatypes.scene.abstract_scene import AbstractScene\n", "\n", "\n", "import shapely.geometry as geom\n", diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb index d7694c2d..ed242b08 100644 --- a/notebooks/waymo_perception/map_testing.ipynb +++ b/notebooks/waymo_perception/map_testing.ipynb @@ -241,7 +241,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.dataset.dataset_specific.wopd.wopd_map_utils import extract_lane_boundaries\n", + "from d123.datasets.wopd.wopd_map_utils import extract_lane_boundaries\n", "\n", "\n", "left_boundaries, right_boundaries = extract_lane_boundaries(\n", From 53c46e8123d129ba016841ce4fbe6493720e8d71 Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Wed, 24 Sep 2025 13:59:17 +0200 Subject: [PATCH 048/145] Fix some wrong import paths in hydra logs (#39) --- .../config/common/scene_builder/default_scene_builder.yaml | 2 +- d123/script/config/common/scene_filter/all_scenes.yaml | 2 +- d123/script/config/common/scene_filter/log_scenes.yaml | 2 +- d123/script/config/common/scene_filter/nuplan_mini_train.yaml | 2 +- d123/script/config/common/scene_filter/nuplan_mini_val.yaml | 2 +- d123/script/config/common/scene_filter/nuplan_sim_agent.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/d123/script/config/common/scene_builder/default_scene_builder.yaml b/d123/script/config/common/scene_builder/default_scene_builder.yaml index 2a8bfec7..5e42aaf4 100644 --- a/d123/script/config/common/scene_builder/default_scene_builder.yaml +++ b/d123/script/config/common/scene_builder/default_scene_builder.yaml @@ -1,4 +1,4 @@ -_target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder +_target_: d123.datatypes.scene.arrow.arrow_scene_builder.ArrowSceneBuilder _convert_: 'all' dataset_path: ${d123_data_root} diff --git a/d123/script/config/common/scene_filter/all_scenes.yaml b/d123/script/config/common/scene_filter/all_scenes.yaml index 06ac76d5..35e157a0 100644 --- a/d123/script/config/common/scene_filter/all_scenes.yaml +++ b/d123/script/config/common/scene_filter/all_scenes.yaml @@ -1,4 +1,4 @@ -_target_: d123.dataset.scene.scene_filter.SceneFilter +_target_: d123.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null diff --git a/d123/script/config/common/scene_filter/log_scenes.yaml b/d123/script/config/common/scene_filter/log_scenes.yaml index 68114df9..f83322ac 100644 --- a/d123/script/config/common/scene_filter/log_scenes.yaml +++ b/d123/script/config/common/scene_filter/log_scenes.yaml @@ -1,4 +1,4 @@ -_target_: d123.dataset.scene.scene_filter.SceneFilter +_target_: d123.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null diff --git a/d123/script/config/common/scene_filter/nuplan_mini_train.yaml b/d123/script/config/common/scene_filter/nuplan_mini_train.yaml index f2f1c4bf..231e882b 100644 --- a/d123/script/config/common/scene_filter/nuplan_mini_train.yaml +++ b/d123/script/config/common/scene_filter/nuplan_mini_train.yaml @@ -1,4 +1,4 @@ -_target_: d123.dataset.scene.scene_filter.SceneFilter +_target_: d123.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null diff --git a/d123/script/config/common/scene_filter/nuplan_mini_val.yaml b/d123/script/config/common/scene_filter/nuplan_mini_val.yaml index 2e4eefee..43f5c9bc 100644 --- a/d123/script/config/common/scene_filter/nuplan_mini_val.yaml +++ b/d123/script/config/common/scene_filter/nuplan_mini_val.yaml @@ -1,4 +1,4 @@ -_target_: d123.dataset.scene.scene_filter.SceneFilter +_target_: d123.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null diff --git a/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml b/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml index eea010cb..055ac331 100644 --- a/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml +++ b/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml @@ -1,4 +1,4 @@ -_target_: d123.dataset.scene.scene_filter.SceneFilter +_target_: d123.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null From 5639321563ed30a5311c482993df9d5c33a91775 Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Wed, 24 Sep 2025 14:44:17 +0200 Subject: [PATCH 049/145] Fix some viser specific issues after debugging. --- d123/common/utils/mixin.py | 2 +- d123/common/visualization/viser/server.py | 4 +- d123/common/visualization/viser/utils.py | 75 ++++++++++++++++--- d123/common/visualization/viser/utils_v2.py | 73 ------------------ d123/datatypes/detections/detection.py | 7 +- .../common/scene_filter/viser_scenes.yaml | 20 +++++ d123/script/config/viser/default_viser.yaml | 1 + 7 files changed, 93 insertions(+), 89 deletions(-) delete mode 100644 d123/common/visualization/viser/utils_v2.py create mode 100644 d123/script/config/common/scene_filter/viser_scenes.yaml diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py index 252a4eee..56038bb4 100644 --- a/d123/common/utils/mixin.py +++ b/d123/common/utils/mixin.py @@ -5,7 +5,7 @@ class ArrayMixin: - """Abstract base class for geometric entities.""" + """Mixin class for object entities.""" @classmethod def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> ArrayMixin: diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py index 6b44ac15..e5c95081 100644 --- a/d123/common/visualization/viser/server.py +++ b/d123/common/visualization/viser/server.py @@ -7,12 +7,12 @@ from d123.common.visualization.viser.utils import ( get_bounding_box_meshes, + get_bounding_box_outlines, get_camera_if_available, get_camera_values, get_lidar_points, get_map_meshes, ) -from d123.common.visualization.viser.utils_v2 import get_bounding_box_outlines from d123.datatypes.scene.abstract_scene import AbstractScene from d123.datatypes.sensors.camera import CameraType from d123.datatypes.sensors.lidar import LiDARType @@ -35,7 +35,7 @@ LINE_WIDTH: float = 4.0 # Bounding box config: -BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "lines" +BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "mesh" # Map config: MAP_AVAILABLE: bool = True diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index 783efa01..75d72b91 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -13,7 +13,8 @@ from d123.datatypes.scene.abstract_scene import AbstractScene from d123.datatypes.sensors.camera import Camera, CameraType from d123.datatypes.sensors.lidar import LiDARType -from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, StateSE3 +from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Polyline3D, StateSE3, StateSE3Index +from d123.geometry.geometry_index import BoundingBoxSE3Index from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array # TODO: Refactor this file. @@ -57,13 +58,6 @@ def bounding_box_to_trimesh(bbox: BoundingBoxSE3, plot_config: PlotConfig) -> tr return configure_trimesh(box_mesh, plot_config.fill_color) -def translate_bounding_box_se3(bounding_box_se3: BoundingBoxSE3, point_3d: Point3D) -> BoundingBoxSE3: - bounding_box_se3.center.x = bounding_box_se3.center.x - point_3d.x - bounding_box_se3.center.y = bounding_box_se3.center.y - point_3d.y - bounding_box_se3.center.z = bounding_box_se3.center.z - point_3d.z - return bounding_box_se3 - - def get_bounding_box_meshes(scene: AbstractScene, iteration: int): initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) @@ -75,19 +69,80 @@ def get_bounding_box_meshes(scene: AbstractScene, iteration: int): output = {} for box_detection in box_detections: bbox: BoundingBoxSE3 = box_detection.bounding_box - bbox = translate_bounding_box_se3(bbox, initial_ego_vehicle_state.center_se3) + bbox.array[BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ] plot_config = BOX_DETECTION_CONFIG[box_detection.metadata.detection_type] trimesh_box = bounding_box_to_trimesh(bbox, plot_config) output[f"{box_detection.metadata.detection_type.serialize()}/{box_detection.metadata.track_token}"] = ( trimesh_box ) - ego_bbox = translate_bounding_box_se3(ego_vehicle_state.bounding_box, initial_ego_vehicle_state.center_se3) + ego_bbox = ego_vehicle_state.bounding_box + ego_bbox.array[BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ] trimesh_box = bounding_box_to_trimesh(ego_bbox, EGO_VEHICLE_CONFIG) output["ego"] = trimesh_box return output +def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.float64]: + """ + TODO: Vectorize this function and move to geometry module. + """ + corners = bounding_box.corners_array + index_pairs = [ + (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM), + (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM), + (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM), + (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_BOTTOM), + (Corners3DIndex.FRONT_LEFT_TOP, Corners3DIndex.FRONT_RIGHT_TOP), + (Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_TOP), + (Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_LEFT_TOP), + (Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.FRONT_LEFT_TOP), + (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_TOP), + (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.FRONT_RIGHT_TOP), + (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_TOP), + (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP), + ] + lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64) + for i, (start_idx, end_idx) in enumerate(index_pairs): + lines[i, 0] = corners[start_idx] + lines[i, 1] = corners[end_idx] + return lines + + +def get_bounding_box_outlines(scene: AbstractScene, iteration: int): + + initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) + origin: StateSE3 = initial_ego_vehicle_state.center_se3 + + ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) + box_detections = scene.get_box_detections_at_iteration(iteration) + + lines = [] + colors = [] + for box_detection in box_detections: + bbox: BoundingBoxSE3 = box_detection.bounding_box_se3 + bbox_lines = _get_bounding_box_lines(bbox) + bbox_lines[..., Point3DIndex.XYZ] = bbox_lines[..., Point3DIndex.XYZ] - origin.array[StateSE3Index.XYZ] + bbox_color = np.zeros(bbox_lines.shape, dtype=np.float32) + bbox_color[..., :] = ( + BOX_DETECTION_CONFIG[box_detection.metadata.detection_type] + .fill_color.set_brightness(BRIGHTNESS_FACTOR) + .rgb_norm + ) + + lines.append(bbox_lines) + colors.append(bbox_color) + + ego_bbox_lines = _get_bounding_box_lines(ego_vehicle_state.bounding_box_se3) + ego_bbox_lines[..., Point3DIndex.XYZ] = ego_bbox_lines[..., Point3DIndex.XYZ] - origin.array[StateSE3Index.XYZ] + ego_bbox_color = np.zeros(ego_bbox_lines.shape, dtype=np.float32) + ego_bbox_color[..., :] = EGO_VEHICLE_CONFIG.fill_color.set_brightness(BRIGHTNESS_FACTOR).rgb_norm + + lines.append(ego_bbox_lines) + colors.append(ego_bbox_color) + return np.concatenate(lines, axis=0), np.concatenate(colors, axis=0) + + def get_map_meshes(scene: AbstractScene): initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) center = initial_ego_vehicle_state.center_se3 diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py deleted file mode 100644 index 16a1a15b..00000000 --- a/d123/common/visualization/viser/utils_v2.py +++ /dev/null @@ -1,73 +0,0 @@ -import numpy as np -import numpy.typing as npt - -from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG -from d123.common.visualization.viser.utils import BRIGHTNESS_FACTOR -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex - -# TODO: Refactor this file. -# TODO: Add general utilities for 3D primitives and mesh support. - - -def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.float64]: - """ - TODO: Vectorize this function and move to geometry module. - """ - corners = bounding_box.corners_array - index_pairs = [ - (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM), - (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM), - (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM), - (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_BOTTOM), - (Corners3DIndex.FRONT_LEFT_TOP, Corners3DIndex.FRONT_RIGHT_TOP), - (Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_TOP), - (Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_LEFT_TOP), - (Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.FRONT_LEFT_TOP), - (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_TOP), - (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.FRONT_RIGHT_TOP), - (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_TOP), - (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP), - ] - lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64) - for i, (start_idx, end_idx) in enumerate(index_pairs): - lines[i, 0] = corners[start_idx] - lines[i, 1] = corners[end_idx] - return lines - - -def translate_points_3d(points_3d: npt.NDArray[np.float64], point_3d: Point3D) -> npt.NDArray[np.float64]: - # TODO: remove - return points_3d - point_3d.array - - -def get_bounding_box_outlines(scene: AbstractScene, iteration: int): - - initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) - ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) - box_detections = scene.get_box_detections_at_iteration(iteration) - - lines = [] - colors = [] - for box_detection in box_detections: - bbox: BoundingBoxSE3 = box_detection.bounding_box_se3 - bbox_lines = _get_bounding_box_lines(bbox) - bbox_lines = translate_points_3d(bbox_lines, initial_ego_vehicle_state.center_se3.point_3d) - bbox_color = np.zeros(bbox_lines.shape, dtype=np.float32) - bbox_color[..., :] = ( - BOX_DETECTION_CONFIG[box_detection.metadata.detection_type] - .fill_color.set_brightness(BRIGHTNESS_FACTOR) - .rgb_norm - ) - - lines.append(bbox_lines) - colors.append(bbox_color) - - ego_bbox_lines = _get_bounding_box_lines(ego_vehicle_state.bounding_box_se3) - ego_bbox_lines = translate_points_3d(ego_bbox_lines, initial_ego_vehicle_state.center_se3.point_3d) - ego_bbox_color = np.zeros(ego_bbox_lines.shape, dtype=np.float32) - ego_bbox_color[..., :] = EGO_VEHICLE_CONFIG.fill_color.set_brightness(BRIGHTNESS_FACTOR).rgb_norm - - lines.append(ego_bbox_lines) - colors.append(ego_bbox_color) - return np.concatenate(lines, axis=0), np.concatenate(colors, axis=0) diff --git a/d123/datatypes/detections/detection.py b/d123/datatypes/detections/detection.py index 29da95d7..aa1cc679 100644 --- a/d123/datatypes/detections/detection.py +++ b/d123/datatypes/detections/detection.py @@ -24,7 +24,7 @@ class BoxDetectionSE2: metadata: BoxDetectionMetadata bounding_box_se2: BoundingBoxSE2 - velocity: Vector2D | None = None + velocity: Optional[Vector2D] = None @property def shapely_polygon(self) -> shapely.geometry.Polygon: @@ -81,7 +81,7 @@ def box_detection_se2(self) -> BoxDetectionSE2: @dataclass class BoxDetectionWrapper: - box_detections: list[BoxDetection] + box_detections: List[BoxDetection] def __getitem__(self, index: int) -> BoxDetection: return self.box_detections[index] @@ -132,7 +132,8 @@ class TrafficLightDetection: @dataclass class TrafficLightDetectionWrapper: - traffic_light_detections: list[TrafficLightDetection] + + traffic_light_detections: List[TrafficLightDetection] def __getitem__(self, index: int) -> TrafficLightDetection: return self.traffic_light_detections[index] diff --git a/d123/script/config/common/scene_filter/viser_scenes.yaml b/d123/script/config/common/scene_filter/viser_scenes.yaml new file mode 100644 index 00000000..fe012d84 --- /dev/null +++ b/d123/script/config/common/scene_filter/viser_scenes.yaml @@ -0,0 +1,20 @@ +_target_: d123.datatypes.scene.scene_filter.SceneFilter +_convert_: 'all' + +split_types: null +split_names: null +log_names: null + + +map_names: null +scene_tokens: null +timestamp_threshold_s: 10.0 +ego_displacement_minimum_m: null + +duration_s: 10.0 +history_s: 0.0 + +camera_types: null + +max_num_scenes: null +shuffle: True diff --git a/d123/script/config/viser/default_viser.yaml b/d123/script/config/viser/default_viser.yaml index c42164dc..fb0e47a2 100644 --- a/d123/script/config/viser/default_viser.yaml +++ b/d123/script/config/viser/default_viser.yaml @@ -12,5 +12,6 @@ hydra: defaults: - default_common - default_dataset_paths + - override scene_filter: viser_scenes port_number: 8080 From ad9d33fbcbfc0eb4fccf42d6f8b15c2134bd4913 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Tue, 30 Sep 2025 13:14:06 +0800 Subject: [PATCH 050/145] only extract timestamp that exists in ego_pose.txt --- .../kitti_360/kitti_360_data_converter.py | 71 ++++++------ .../kitti_360/kitti_360_helper.py | 81 ++----------- .../kitti_360/kitti_360_map_conversion.py | 2 +- .../kitti_360/preprocess_detection.py | 108 +++++------------- 4 files changed, 68 insertions(+), 194 deletions(-) diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py index 4e221617..76396bbd 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py @@ -32,7 +32,7 @@ from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter from d123.dataset.logs.log_metadata import LogMetadata -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic,interpolate_obj_list +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT from d123.dataset.dataset_specific.kitti_360.kitti_360_map_conversion import convert_kitti360_map from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex @@ -57,8 +57,8 @@ DIR_POSES = "data_poses" DIR_CALIB = "calibration" -PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW -# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT +# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW +PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW PATH_3D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_SMT @@ -330,7 +330,7 @@ def _read_projection_matrix(p_line: str) -> np.ndarray: K = P[:, :3] return K -def _readYAMLFile(fileName): +def _readYAMLFile(fileName:Path) -> Dict[str, Any]: '''make OpenCV YAML file compatible with python''' ret = {} skip_lines=1 # Skip the first line which says "%YAML:1.0". Or replace it with "%YAML 1.0" @@ -360,22 +360,22 @@ def _write_recording_table( data_converter_config: DataConverterConfig ) -> None: - ts_list = _read_timestamps(log_name) - ego_state_all = _extract_ego_state_all(log_name) + ts_list: List[TimePoint] = _read_timestamps(log_name) + ego_state_all, valid_timestamp = _extract_ego_state_all(log_name) ego_states_xyz = np.array([ego_state[:3] for ego_state in ego_state_all],dtype=np.float64) - detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list),ego_states_xyz) + detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list),ego_states_xyz,valid_timestamp) with pa.OSFile(str(log_file_path), "wb") as sink: with pa.ipc.new_file(sink, recording_schema) as writer: - for idx, tp in enumerate(ts_list): - + for idx in range(len(valid_timestamp)): + valid_idx = valid_timestamp[idx] row_data = { "token": [create_token(f"{log_name}_{idx}")], - "timestamp": [tp.time_us], - "detections_state": [detections_states[idx]], - "detections_velocity": [detections_velocity[idx]], - "detections_token": [detections_tokens[idx]], - "detections_type": [detections_types[idx]], + "timestamp": [ts_list[valid_idx].time_us], + "detections_state": [detections_states[valid_idx]], + "detections_velocity": [detections_velocity[valid_idx]], + "detections_token": [detections_tokens[valid_idx]], + "detections_type": [detections_types[valid_idx]], "ego_states": [ego_state_all[idx]], "traffic_light_ids": [[]], "traffic_light_types": [[]], @@ -384,7 +384,7 @@ def _write_recording_table( } if data_converter_config.lidar_store_option is not None: - lidar_data_dict = _extract_lidar(log_name, idx, data_converter_config) + lidar_data_dict = _extract_lidar(log_name, valid_idx, data_converter_config) for lidar_type, lidar_data in lidar_data_dict.items(): if lidar_data is not None: row_data[lidar_type.serialize()] = [lidar_data] @@ -392,7 +392,7 @@ def _write_recording_table( row_data[lidar_type.serialize()] = [None] if data_converter_config.camera_store_option is not None: - camera_data_dict = _extract_cameras(log_name, idx, data_converter_config) + camera_data_dict = _extract_cameras(log_name, valid_idx, data_converter_config) for camera_type, camera_data in camera_data_dict.items(): if camera_data is not None: row_data[camera_type.serialize()] = [camera_data[0]] @@ -448,7 +448,7 @@ def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: return tps return None -def _extract_ego_state_all(log_name: str) -> List[List[float]]: +def _extract_ego_state_all(log_name: str) -> Tuple[List[List[float]], List[int]]: ego_state_all: List[List[float]] = [] @@ -456,24 +456,20 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: if not pose_file.exists(): raise FileNotFoundError(f"Pose file not found: {pose_file}") poses = np.loadtxt(pose_file) - poses_time = poses[:, 0] - 1 # Adjusting time to start from 0 + poses_time = poses[:, 0].astype(np.int32) + valid_timestamp: List[int] = list(poses_time) - # oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data" - oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data" + oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data" - pose_idx = 0 - poses_time_len = len(poses_time) - - for idx in range(len(list(oxts_path.glob("*.txt")))): - oxts_path_file = oxts_path / f"{int(idx):010d}.txt" + for idx in range(len(valid_timestamp)): + oxts_path_file = oxts_path / f"{int(valid_timestamp[idx]):010d}.txt" oxts_data = np.loadtxt(oxts_path_file) vehicle_parameters = get_kitti360_station_wagon_parameters() - while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < idx: - pose_idx += 1 - pos = pose_idx - # pos = np.searchsorted(poses_time, idx, side='right') - 1 + pos = idx + if log_name=="2013_05_28_drive_0004_sync" and pos == 0: + pos = 1 # NOTE you can use oxts_data[3:6] as roll, pitch, yaw for simplicity #roll, pitch, yaw = oxts_data[3:6] @@ -521,12 +517,13 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]: timepoint=None, ).array.tolist() ) - return ego_state_all + return ego_state_all, valid_timestamp def _extract_detections( log_name: str, ts_len: int, - ego_states_xyz: np.ndarray + ego_states_xyz: np.ndarray, + valid_timestamp: List[int], ) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: detections_states: List[List[List[float]]] = [[] for _ in range(ts_len)] @@ -544,17 +541,16 @@ def _extract_detections( tree = ET.parse(bbox_3d_path) root = tree.getroot() - dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list) - detection_preprocess_path = PREPOCESS_DETECTION_DIR / f"{log_name}_detection_preprocessed.pkl" if detection_preprocess_path.exists(): with open(detection_preprocess_path, "rb") as f: detection_preprocess_result = pickle.load(f) static_records_dict = {record_item["global_id"]: record_item for record_item in detection_preprocess_result["static"]} - dynamic_records_dict = detection_preprocess_result["dynamic"] else: detection_preprocess_result = None + dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list) + for child in root: if child.find('semanticId') is not None: semanticIdKITTI = int(child.find('semanticId').text) @@ -570,7 +566,7 @@ def _extract_detections( #static object if obj.timestamp == -1: if detection_preprocess_result is None: - obj.filter_by_radius(ego_states_xyz,radius=50.0) + obj.filter_by_radius(ego_states_xyz,valid_timestamp,radius=50.0) else: obj.load_detection_preprocess(static_records_dict) for record in obj.valid_frames["records"]: @@ -584,11 +580,8 @@ def _extract_detections( dynamic_objs[global_ID].append(obj) # dynamic object - if detection_preprocess_result is not None: - dynamic_objs = copy.deepcopy(dynamic_records_dict) - for global_id, obj_list in dynamic_objs.items(): - obj_list = interpolate_obj_list(obj_list) + obj_list.sort(key=lambda obj: obj.timestamp) num_frames = len(obj_list) positions = [obj.get_state_array()[:3] for obj in obj_list] diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py index a756a343..e8520253 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py @@ -1,7 +1,7 @@ import numpy as np from collections import defaultdict -from typing import Dict, Optional, Any, List +from typing import Dict, Optional, Any, List, Tuple import copy from scipy.linalg import polar from scipy.spatial.transform import Rotation as R @@ -30,14 +30,14 @@ KITTI3602NUPLAN_IMU_CALIBRATION = kitti3602nuplan_imu_calibration_ideal MAX_N = 1000 -def local2global(semanticId, instanceId): +def local2global(semanticId: int, instanceId: int) -> int: globalId = semanticId*MAX_N + instanceId if isinstance(globalId, np.ndarray): return globalId.astype(np.int32) else: return int(globalId) -def global2local(globalId): +def global2local(globalId: int) -> Tuple[int, int]: semanticId = globalId // MAX_N instanceId = globalId % MAX_N if isinstance(globalId, np.ndarray): @@ -72,12 +72,6 @@ def __init__(self): #label self.label = '' - - # used to mark if the bbox is interpolated - self.is_interpolated = False - # GT annotation idx - self.idx_next = -1 - self.idx_prev = -1 def parseBbox(self, child): self.timestamp = int(child.find('timestamp').text) @@ -138,7 +132,7 @@ def parse_scale_rotation(self): self.pitch = pitch self.roll = roll - def get_state_array(self): + def get_state_array(self) -> np.ndarray: center = StateSE3( x=self.T[0], y=self.T[1], @@ -152,17 +146,17 @@ def get_state_array(self): return bounding_box_se3.array - def filter_by_radius(self,ego_state_xyz,radius=50.0): + def filter_by_radius(self, ego_state_xyz: np.ndarray, valid_timestamp: List[int], radius: float = 50.0) -> None: ''' first stage of detection, used to filter out detections by radius ''' d = np.linalg.norm(ego_state_xyz - self.T[None, :], axis=1) idxs = np.where(d <= radius)[0] for idx in idxs: self.valid_frames["records"].append({ - "timestamp": idx, + "timestamp": valid_timestamp[idx], "points_in_box": None, }) - def box_visible_in_point_cloud(self, points): + def box_visible_in_point_cloud(self, points: np.ndarray) -> Tuple[bool, int]: ''' points: (N,3) , box: (8,3) ''' box = self.vertices.copy() # avoid calculating ground point cloud @@ -185,67 +179,6 @@ def load_detection_preprocess(self, records_dict: Dict[int, Any]): if self.globalID in records_dict: self.valid_frames["records"] = records_dict[self.globalID]["records"] -def interpolate_obj_list(obj_list: List[KITTI360Bbox3D]) -> List[KITTI360Bbox3D]: - """ - Fill missing timestamps in obj_list by linear interpolation. - For each missing timestamp between two objects, create a new KITTI360Bbox3D object - with only interpolated position (T), yaw, pitch, roll, and copy other attributes. - Returns a new list with all timestamps filled and sorted. - """ - if not obj_list: - return obj_list - - # Sort by timestamp ascending - obj_list.sort(key=lambda obj: obj.timestamp) - timestamps = [obj.timestamp for obj in obj_list] - min_ts, max_ts = min(timestamps), max(timestamps) - full_ts = list(range(min_ts, max_ts + 1)) - missing_ts = sorted(set(full_ts) - set(timestamps)) - - # Prepare arrays for interpolation - T_arr = np.array([obj.T for obj in obj_list]) - yaw_arr = np.array([obj.yaw for obj in obj_list]) - pitch_arr = np.array([obj.pitch for obj in obj_list]) - roll_arr = np.array([obj.roll for obj in obj_list]) - ts_arr = np.array(timestamps) - - for ts in missing_ts: - idx_next = np.searchsorted(ts_arr, ts) - idx_prev = idx_next - 1 - if idx_prev < 0 or idx_next >= len(obj_list): - continue - - frac = (ts - ts_arr[idx_prev]) / (ts_arr[idx_next] - ts_arr[idx_prev]) - T_interp = T_arr[idx_prev] * (1 - frac) + T_arr[idx_next] * frac - - yaw_delat = normalize_angle(yaw_arr[idx_next] - yaw_arr[idx_prev]) - yaw_interp = yaw_arr[idx_prev] + yaw_delat * frac - yaw_interp = normalize_angle(yaw_interp) - - pitch_interp = pitch_arr[idx_prev] * (1 - frac) + pitch_arr[idx_next] * frac - roll_interp = roll_arr[idx_prev] * (1 - frac) + roll_arr[idx_next] * frac - - obj_new = copy.deepcopy(obj_list[idx_prev]) - obj_new.timestamp = ts - obj_new.T = T_interp - obj_new.yaw = yaw_interp - obj_new.pitch = pitch_interp - obj_new.roll = roll_interp - obj_new.Rm = R.from_euler('zyx', [obj_new.yaw, obj_new.pitch, obj_new.roll], degrees=False).as_matrix() - obj_new.R = obj_new.Rm @ obj_new.Sm - obj_new.vertices = (obj_new.R @ obj_new.vertices_template.T).T + obj_new.T - obj_new.is_interpolated = True - obj_new.idx_prev = ts_arr[idx_prev] - obj_new.idx_next = ts_arr[idx_next] - - obj_list.append(obj_new) - - obj_list.sort(key=lambda obj: obj.timestamp) - return obj_list - -def normalize_angle(a): - return np.arctan2(np.sin(a), np.cos(a)) - class KITTI360_MAP_Bbox3D(): def __init__(self): self.id = -1 diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py index bf13eda6..924a7822 100644 --- a/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py +++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py @@ -36,7 +36,7 @@ # "driveway", ] -def convert_kitti360_map(log_name, map_path): +def convert_kitti360_map(log_name: str, map_path: Path) -> None: xml_path = PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml" diff --git a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py index f2d14ce1..97ea6eb8 100644 --- a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py +++ b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py @@ -1,8 +1,8 @@ """ -This script precomputes detection records for KITTI-360: +This script precomputes static detection records for KITTI-360: - Stage 1: radius filtering using ego positions (from poses.txt). - Stage 2: LiDAR visibility check to fill per-frame point counts. -It writes a pickle containing, for each object, all feasible frames and +It writes a pickle containing, for each static object, all feasible frames and their point counts to avoid recomputation in later pipelines. We have precomputed and saved the pickle for all training logs, you can either download them or run this script to generate @@ -31,8 +31,8 @@ PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic,interpolate_obj_list -from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT +from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic +from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT def _bbox_xml_path(log_name: str) -> Path: if log_name == "2013_05_28_drive_0004_sync": @@ -47,8 +47,8 @@ def _load_lidar_xyz(filepath: Path) -> np.ndarray: arr = np.fromfile(filepath, dtype=np.float32) return arr.reshape(-1, 4)[:, :3] -def _collect_objects(log_name: str) -> Tuple[List[KITTI360Bbox3D], Dict[int, List[KITTI360Bbox3D]]]: - """Parse XML and collect objects with valid class names.""" +def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]: + """Parse XML and collect static objects with valid class names.""" xml_path = _bbox_xml_path(log_name) if not xml_path.exists(): raise FileNotFoundError(f"BBox 3D file not found: {xml_path}") @@ -56,7 +56,6 @@ def _collect_objects(log_name: str) -> Tuple[List[KITTI360Bbox3D], Dict[int, Lis root = tree.getroot() static_objs: List[KITTI360Bbox3D] = [] - dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list) for child in root: if child.find('semanticId') is not None: @@ -65,20 +64,15 @@ def _collect_objects(log_name: str) -> Tuple[List[KITTI360Bbox3D], Dict[int, Lis else: lable = child.find('label').text name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, 'unknown') - if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT: + timestamp = int(child.find('timestamp').text) # -1 for static objects + if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT or timestamp != -1: continue obj = KITTI360Bbox3D() obj.parseBbox(child) - timestamp = int(child.find('timestamp').text) - if timestamp == -1: - static_objs.append(obj) - else: - global_ID = obj.globalID - dynamic_objs[global_ID].append(obj) - - return static_objs, dynamic_objs + static_objs.append(obj) + return static_objs -def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]: +def _collect_ego_states(log_name: str) -> Tuple[npt.NDArray[np.float64], list[int]]: """Load ego states from poses.txt.""" pose_file = PATH_POSES_ROOT / log_name / "poses.txt" @@ -86,17 +80,12 @@ def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]: raise FileNotFoundError(f"Pose file not found: {pose_file}") poses = np.loadtxt(pose_file) - poses_time = poses[:, 0] - 1 # Adjusting time to start from 0 + poses_time = poses[:, 0].astype(np.int32) + valid_timestamp: List[int] = list(poses_time) - pose_idx = 0 - poses_time_len = len(poses_time) - ego_states = [] - - for time_idx in range(length): - while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < time_idx: - pose_idx += 1 - pos = pose_idx + for time_idx in range(len(valid_timestamp)): + pos = time_idx state_item = np.eye(4) r00, r01, r02 = poses[pos, 1:4] r10, r11, r12 = poses[pos, 5:8] @@ -115,7 +104,8 @@ def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]: state_item[:3, 3] = ego_state_xyz ego_states.append(state_item) - return np.array(ego_states) # [N,4,4] + # [N,4,4] + return np.array(ego_states), valid_timestamp def process_detection( @@ -128,9 +118,6 @@ def process_detection( for static objects: 1) filter by ego-centered radius over all frames 2) filter by LiDAR point cloud visibility - for dynamic objects: - 1) interpolate boxes for missing frames - 2) select box with highest LiDAR point count Save per-frame detections to a pickle to avoid recomputation. """ @@ -141,31 +128,22 @@ def process_detection( logging.info(f"[preprocess] {log_name}: found {ts_len} lidar frames") # 1) Parse objects from XML - static_objs: List[KITTI360Bbox3D] - dynamic_objs: Dict[int, List[KITTI360Bbox3D]] - static_objs, dynamic_objs = _collect_objects(log_name) - - # only interpolate dynamic objects - for global_ID, obj_list in dynamic_objs.items(): - obj_list_interpolated = interpolate_obj_list(obj_list) - dynamic_objs[global_ID] = obj_list_interpolated - dymanic_objs_updated = copy.deepcopy(dynamic_objs) - + static_objs: List[KITTI360Bbox3D] = _collect_static_objects(log_name) logging.info(f"[preprocess] {log_name}: static objects = {len(static_objs)}") - logging.info(f"[preprocess] {log_name}: dynamic objects = {len(dynamic_objs.keys())}") # 2) Filter static objs by ego-centered radius - ego_states = _collect_ego_states(log_name,ts_len) + ego_states, valid_timestamp = _collect_ego_states(log_name) logging.info(f"[preprocess] {log_name}: ego states = {len(ego_states)}") for obj in static_objs: - obj.filter_by_radius(ego_states[:, :3, 3], radius_m) + obj.filter_by_radius(ego_states[:, :3, 3], valid_timestamp, radius_m) # 3) Filter static objs by LiDAR point cloud visibility lidar_extrinsic = get_lidar_extrinsic() def process_one_frame(time_idx: int) -> None: - logging.info(f"[preprocess] {log_name}: t={time_idx}") - lidar_path = _lidar_frame_path(log_name, time_idx) + valid_time_idx = valid_timestamp[time_idx] + logging.info(f"[preprocess] {log_name}: t={valid_time_idx}") + lidar_path = _lidar_frame_path(log_name, valid_time_idx) if not lidar_path.exists(): logging.warning(f"[preprocess] {log_name}: LiDAR frame not found: {lidar_path}") return @@ -181,49 +159,20 @@ def process_one_frame(time_idx: int) -> None: lidar_in_world = lidar_in_imu @ ego_states[time_idx][:3,:3].T + ego_states[time_idx][:3,3] for obj in static_objs: - if not any(record["timestamp"] == time_idx for record in obj.valid_frames["records"]): + if not any(record["timestamp"] == valid_time_idx for record in obj.valid_frames["records"]): continue visible, points_in_box = obj.box_visible_in_point_cloud(lidar_in_world) if not visible: - obj.valid_frames["records"] = [record for record in obj.valid_frames["records"] if record["timestamp"] != time_idx] + obj.valid_frames["records"] = [record for record in obj.valid_frames["records"] if record["timestamp"] != valid_time_idx] else: for record in obj.valid_frames["records"]: - if record["timestamp"] == time_idx: + if record["timestamp"] == valid_time_idx: record["points_in_box"] = points_in_box break - # for dynamic objects, select the box with the highest LiDAR point count - for global_ID, obj_list in dynamic_objs.items(): - obj_at_time = [obj for obj in obj_list if obj.timestamp == time_idx] - if not obj_at_time: - continue - - obj = obj_at_time[0] - # NOTE only update interpolated boxes - if not obj.is_interpolated: - continue - - max_points = -1 - best_obj = None - ts_prev = obj.idx_prev - ts_next = obj.idx_next - candidates = [candidate for candidate in obj_list if ts_prev <= candidate.timestamp <= ts_next] - - for obj in candidates: - visible, points_in_box = obj.box_visible_in_point_cloud(lidar_in_world) - if points_in_box > max_points: - max_points = points_in_box - best_obj = obj - - if best_obj is not None: - idx = next((i for i, o in enumerate(dynamic_objs[global_ID]) if o.timestamp == time_idx), None) - if idx is not None: - dymanic_objs_updated[global_ID][idx] = copy.deepcopy(best_obj) - dymanic_objs_updated[global_ID][idx].timestamp = time_idx - max_workers = os.cpu_count() * 2 with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: - results = list(executor.map(process_one_frame, range(ts_len))) + results = list(executor.map(process_one_frame, range(len(valid_timestamp)))) # 4) Save pickle static_records: List[Dict[str, Any]] = [] @@ -238,7 +187,6 @@ def process_one_frame(time_idx: int) -> None: payload = { "log_name": log_name, "static": static_records, - "dynamic": dymanic_objs_updated } with open(out_path, "wb") as f: pickle.dump(payload, f) @@ -248,7 +196,7 @@ def process_one_frame(time_idx: int) -> None: import argparse logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="Precompute KITTI-360 detections filters") - parser.add_argument("--log_name", default="2013_05_28_drive_0004_sync") + parser.add_argument("--log_name", default="2013_05_28_drive_0000_sync") parser.add_argument("--radius", type=float, default=60.0) parser.add_argument("--out", type=Path, default="detection_preprocess", help="output directory for pkl") args = parser.parse_args() From 5c95ecbd4ba65b6f2524ccb46355c41c88df8b70 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Tue, 30 Sep 2025 13:27:04 +0800 Subject: [PATCH 051/145] merge dev_v0.0.7 into kitti360 --- d123/datasets/av2/av2_data_converter.py | 11 ----------- d123/datasets/carla/carla_data_converter.py | 8 -------- d123/datasets/nuplan/nuplan_data_converter.py | 13 ------------- d123/datatypes/scene/arrow/arrow_scene.py | 11 ----------- 4 files changed, 43 deletions(-) diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py index 59f306e0..f066aa42 100644 --- a/d123/datasets/av2/av2_data_converter.py +++ b/d123/datasets/av2/av2_data_converter.py @@ -10,17 +10,6 @@ import pandas as pd import pyarrow as pa -<<<<<<< HEAD:d123/dataset/dataset_specific/av2/av2_data_converter.py -from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -from d123.common.datatypes.vehicle_state.vehicle_parameters import ( - get_av2_ford_fusion_hybrid_parameters, - rear_axle_se3_to_center_se3, -) -======= ->>>>>>> dev_v0.0.7:d123/datasets/av2/av2_data_converter.py from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.datasets.av2.av2_constants import ( AV2_CAMERA_TYPE_MAPPING, diff --git a/d123/datasets/carla/carla_data_converter.py b/d123/datasets/carla/carla_data_converter.py index 80525baf..bcf8342c 100644 --- a/d123/datasets/carla/carla_data_converter.py +++ b/d123/datasets/carla/carla_data_converter.py @@ -11,14 +11,6 @@ import numpy as np import pyarrow as pa -<<<<<<< HEAD:d123/dataset/dataset_specific/carla/carla_data_converter.py -from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar_index import CarlaLidarIndex -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index -from d123.common.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters -======= ->>>>>>> dev_v0.0.7:d123/datasets/carla/carla_data_converter.py from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py index e2d1cef5..398e536a 100644 --- a/d123/datasets/nuplan/nuplan_data_converter.py +++ b/d123/datasets/nuplan/nuplan_data_converter.py @@ -12,18 +12,6 @@ import yaml from pyquaternion import Quaternion -<<<<<<< HEAD:d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py - -import d123.dataset.dataset_specific.nuplan.utils as nuplan_utils -from d123.common.datatypes.detection.detection import TrafficLightStatus -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar_index import NuplanLidarIndex -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -from d123.common.datatypes.vehicle_state.vehicle_parameters import ( -======= import d123.datasets.nuplan.utils as nuplan_utils from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table @@ -39,7 +27,6 @@ from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import ( ->>>>>>> dev_v0.0.7:d123/datasets/nuplan/nuplan_data_converter.py get_nuplan_chrysler_pacifica_parameters, rear_axle_se3_to_center_se3, ) diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py index 45738893..e05b717c 100644 --- a/d123/datatypes/scene/arrow/arrow_scene.py +++ b/d123/datatypes/scene/arrow/arrow_scene.py @@ -4,23 +4,12 @@ import pyarrow as pa -<<<<<<< HEAD:d123/dataset/scene/arrow_scene.py -from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_from_json -from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.common.datatypes.vehicle_state.vehicle_parameters import VehicleParameters -from d123.dataset.arrow.conversion import ( -======= from d123.common.utils.arrow_helper import open_arrow_table from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper from d123.datatypes.maps.abstract_map import AbstractMap from d123.datatypes.maps.gpkg.gpkg_map import get_local_map_api, get_map_api_from_names from d123.datatypes.scene.abstract_scene import AbstractScene from d123.datatypes.scene.arrow.utils.conversion import ( ->>>>>>> dev_v0.0.7:d123/datatypes/scene/arrow/arrow_scene.py get_box_detections_from_arrow_table, get_camera_from_arrow_table, get_ego_vehicle_state_from_arrow_table, From 5bf2e5aa9c33bfda2d0f2b4a042164f991746d54 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Tue, 30 Sep 2025 14:47:20 +0800 Subject: [PATCH 052/145] merge dev_0.0.7 into kitti360 and makes kitti360 compatible with existing code --- d123/datasets/av2/av2_data_converter.py | 2 +- d123/datasets/carla/carla_data_converter.py | 2 +- .../kitti_360/kitti_360_data_converter.py | 35 ++++++++++--------- d123/datasets/kitti_360/kitti_360_helper.py | 15 +++++--- .../kitti_360/kitti_360_map_conversion.py | 8 ++--- d123/datasets/kitti_360/labels.py | 2 +- d123/datasets/kitti_360/load_sensor.py | 2 +- .../kitti_360/preprocess_detection.py | 4 +-- d123/datasets/nuplan/nuplan_data_converter.py | 2 +- d123/datasets/wopd/wopd_data_converter.py | 2 +- d123/datatypes/scene/arrow/arrow_scene.py | 2 +- .../datatypes/scene/arrow/utils/conversion.py | 2 +- .../default_dataset_conversion.yaml | 2 +- .../config/datasets/kitti360_dataset.yaml | 4 +-- 14 files changed, 47 insertions(+), 37 deletions(-) diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py index f066aa42..7d2e3525 100644 --- a/d123/datasets/av2/av2_data_converter.py +++ b/d123/datasets/av2/av2_data_converter.py @@ -25,7 +25,7 @@ from d123.datasets.av2.av2_map_conversion import convert_av2_map from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index diff --git a/d123/datasets/carla/carla_data_converter.py b/d123/datasets/carla/carla_data_converter.py index bcf8342c..f5b8fd16 100644 --- a/d123/datasets/carla/carla_data_converter.py +++ b/d123/datasets/carla/carla_data_converter.py @@ -19,7 +19,7 @@ from d123.datatypes.maps.abstract_map_objects import AbstractLane from d123.datatypes.maps.gpkg.gpkg_map import get_map_api_from_names from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.datatypes.sensors.lidar_index import CarlaLidarIndex from d123.datatypes.vehicle_state.ego_state import EgoStateSE3Index diff --git a/d123/datasets/kitti_360/kitti_360_data_converter.py b/d123/datasets/kitti_360/kitti_360_data_converter.py index 76396bbd..0616dcfa 100644 --- a/d123/datasets/kitti_360/kitti_360_data_converter.py +++ b/d123/datasets/kitti_360/kitti_360_data_converter.py @@ -22,20 +22,21 @@ from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.common.datatypes.sensor.lidar_index import Kitti360LidarIndex -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -from d123.common.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3 -from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table -from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.dataset.logs.log_metadata import LogMetadata -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT -from d123.dataset.dataset_specific.kitti_360.kitti_360_map_conversion import convert_kitti360_map +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.sensors.camera import PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from d123.datatypes.sensors.lidar_index import Kitti360LidarIndex +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3 +from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table +from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic +from d123.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT +from d123.datasets.kitti_360.kitti_360_map_conversion import convert_kitti360_map from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.rotation import EulerAngles KITTI360_DT: Final[float] = 0.1 SORT_BY_TIMESTAMP: Final[bool] = True @@ -482,13 +483,15 @@ def _extract_ego_state_all(log_name: str) -> Tuple[List[List[float]], List[int]] R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3] yaw, pitch, roll = Quaternion(matrix=R_mat_cali[:3, :3]).yaw_pitch_roll + ego_quaternion = EulerAngles(roll=roll, pitch=pitch, yaw=yaw).quaternion rear_axle_pose = StateSE3( x=poses[pos, 4], y=poses[pos, 8], z=poses[pos, 12], - roll=roll, - pitch=pitch, - yaw=yaw, + qw=ego_quaternion.qw, + qx=ego_quaternion.qx, + qy=ego_quaternion.qy, + qz=ego_quaternion.qz, ) center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) diff --git a/d123/datasets/kitti_360/kitti_360_helper.py b/d123/datasets/kitti_360/kitti_360_helper.py index e8520253..01c3d1fe 100644 --- a/d123/datasets/kitti_360/kitti_360_helper.py +++ b/d123/datasets/kitti_360/kitti_360_helper.py @@ -8,7 +8,8 @@ from d123.geometry import BoundingBoxSE3, StateSE3 from d123.geometry.polyline import Polyline3D -from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT +from d123.geometry.rotation import EulerAngles +from d123.datasets.kitti_360.labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT import os from pathlib import Path @@ -124,6 +125,7 @@ def parse_scale_rotation(self): Rm[0] = -Rm[0] scale = np.diag(Sm) yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) + obj_quaternion = EulerAngles(roll=roll, pitch=pitch, yaw=yaw).quaternion self.Rm = np.array(Rm) self.Sm = np.array(Sm) @@ -131,15 +133,20 @@ def parse_scale_rotation(self): self.yaw = yaw self.pitch = pitch self.roll = roll + self.qw = obj_quaternion.qw + self.qx = obj_quaternion.qx + self.qy = obj_quaternion.qy + self.qz = obj_quaternion.qz def get_state_array(self) -> np.ndarray: center = StateSE3( x=self.T[0], y=self.T[1], z=self.T[2], - roll=self.roll, - pitch=self.pitch, - yaw=self.yaw, + qw=self.qw, + qx=self.qx, + qy=self.qy, + qz=self.qz, ) scale = self.scale bounding_box_se3 = BoundingBoxSE3(center, scale[0], scale[1], scale[2]) diff --git a/d123/datasets/kitti_360/kitti_360_map_conversion.py b/d123/datasets/kitti_360/kitti_360_map_conversion.py index 924a7822..643a13c6 100644 --- a/d123/datasets/kitti_360/kitti_360_map_conversion.py +++ b/d123/datasets/kitti_360/kitti_360_map_conversion.py @@ -11,14 +11,14 @@ from shapely.geometry import LineString import shapely.geometry as geom -from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import ( +from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, split_line_geometry_by_max_length, ) -from d123.dataset.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value -from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.datatypes.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value +from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType from d123.geometry.polyline import Polyline3D -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D +from d123.datasets.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D MAX_ROAD_EDGE_LENGTH = 100.0 # meters, used to filter out very long road edges diff --git a/d123/datasets/kitti_360/labels.py b/d123/datasets/kitti_360/labels.py index 6903be9f..45e2d315 100644 --- a/d123/datasets/kitti_360/labels.py +++ b/d123/datasets/kitti_360/labels.py @@ -167,7 +167,7 @@ def assureSingleInstanceName( name ): # all good then return name -from d123.common.datatypes.detection.detection_types import DetectionType +from d123.datatypes.detections.detection_types import DetectionType BBOX_LABLES_TO_DETECTION_NAME_DICT = { 'car': 'car', diff --git a/d123/datasets/kitti_360/load_sensor.py b/d123/datasets/kitti_360/load_sensor.py index c4df6d36..7ca4489a 100644 --- a/d123/datasets/kitti_360/load_sensor.py +++ b/d123/datasets/kitti_360/load_sensor.py @@ -3,7 +3,7 @@ import numpy as np import logging -from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata +from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: diff --git a/d123/datasets/kitti_360/preprocess_detection.py b/d123/datasets/kitti_360/preprocess_detection.py index 97ea6eb8..92806736 100644 --- a/d123/datasets/kitti_360/preprocess_detection.py +++ b/d123/datasets/kitti_360/preprocess_detection.py @@ -31,8 +31,8 @@ PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES -from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT +from d123.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic +from d123.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT def _bbox_xml_path(log_name: str) -> Path: if log_name == "2013_05_28_drive_0004_sync": diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py index 398e536a..980c53b1 100644 --- a/d123/datasets/nuplan/nuplan_data_converter.py +++ b/d123/datasets/nuplan/nuplan_data_converter.py @@ -21,7 +21,7 @@ from d123.datatypes.detections.detection import TrafficLightStatus from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.datatypes.sensors.lidar_index import NuplanLidarIndex from d123.datatypes.time.time_point import TimePoint diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py index ebac241a..7a7aa7b1 100644 --- a/d123/datasets/wopd/wopd_data_converter.py +++ b/d123/datasets/wopd/wopd_data_converter.py @@ -18,7 +18,7 @@ from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json +from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.datatypes.sensors.lidar_index import WopdLidarIndex from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py index e05b717c..0fc61ba8 100644 --- a/d123/datatypes/scene/arrow/arrow_scene.py +++ b/d123/datatypes/scene/arrow/arrow_scene.py @@ -69,7 +69,7 @@ def __init__( ) = _get_scene_data(arrow_file_path) self._metadata: LogMetadata = _metadata self._vehicle_parameters: VehicleParameters = _vehicle_parameters - self._camera_metadata: Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = _camera_metadata + self._camera_metadata: Dict[CameraType, CameraMetadata] = _camera_metadata self._lidar_metadata: Dict[LiDARType, LiDARMetadata] = _lidar_metadata self._map_api: Optional[AbstractMap] = None diff --git a/d123/datatypes/scene/arrow/utils/conversion.py b/d123/datatypes/scene/arrow/utils/conversion.py index 1f6c879c..dfa63d54 100644 --- a/d123/datatypes/scene/arrow/utils/conversion.py +++ b/d123/datatypes/scene/arrow/utils/conversion.py @@ -154,7 +154,7 @@ def get_lidar_from_arrow_table( elif log_metadata.dataset == "wopd": raise NotImplementedError elif log_metadata.dataset == "kitti360": - from d123.dataset.dataset_specific.kitti_360.load_sensor import load_kitti360_lidar_from_path + from d123.datasets.kitti_360.load_sensor import load_kitti360_lidar_from_path lidar = load_kitti360_lidar_from_path(full_lidar_path, lidar_metadata) else: diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index 01084657..2c474fe8 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -15,7 +15,7 @@ defaults: - default_dataset_paths - _self_ - datasets: - - nuplan_private_dataset + # - nuplan_private_dataset # - carla_dataset # - wopd_dataset # - av2_sensor_dataset diff --git a/d123/script/config/datasets/kitti360_dataset.yaml b/d123/script/config/datasets/kitti360_dataset.yaml index 17b9e863..c5816a29 100644 --- a/d123/script/config/datasets/kitti360_dataset.yaml +++ b/d123/script/config/datasets/kitti360_dataset.yaml @@ -1,12 +1,12 @@ kitti360_dataset: - _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter + _target_: d123.datasets.kitti_360.kitti_360_data_converter.Kitti360DataConverter _convert_: 'all' splits: ["kitti360"] log_path: ${oc.env:KITTI360_DATA_ROOT} data_converter_config: - _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig + _target_: d123.datasets.raw_data_converter.DataConverterConfig _convert_: 'all' output_path: ${d123_data_root} From 0ac18baa706ff3673e73a810df4d097e663d2749 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 2 Oct 2025 23:10:43 +0200 Subject: [PATCH 053/145] Massively improve viser speed and stability for cameras and bounding boxes. paritally clean up the viser code. --- d123/common/utils/timer.py | 11 +- d123/common/visualization/color/default.py | 10 ++ d123/common/visualization/viser/server.py | 150 ++++++++++++---- d123/common/visualization/viser/utils.py | 163 ++++++++++-------- .../datatypes/scene/arrow/utils/conversion.py | 13 +- d123/geometry/se.py | 2 +- d123/geometry/utils/bounding_box_utils.py | 40 +++++ .../default_dataset_conversion.yaml | 4 +- .../config/datasets/av2_sensor_dataset.yaml | 4 +- .../datasets/nuplan_private_dataset.yaml | 2 +- notebooks/viz/viser_testing_v2_scene.ipynb | 43 +---- test_viser.py | 34 ++++ 12 files changed, 313 insertions(+), 163 deletions(-) create mode 100644 test_viser.py diff --git a/d123/common/utils/timer.py b/d123/common/utils/timer.py index b2c57015..69137c0e 100644 --- a/d123/common/utils/timer.py +++ b/d123/common/utils/timer.py @@ -55,7 +55,7 @@ def end(self) -> None: self._time_logs[self._end_key].append(time.perf_counter() - self._start_time) - def stats(self, verbose: bool = True) -> Optional[pd.DataFrame]: + def to_pandas(self) -> Optional[pd.DataFrame]: """ Returns a DataFrame with statistics of the logged times. :param verbose: whether to print the timings, defaults to True @@ -71,8 +71,8 @@ def stats(self, verbose: bool = True) -> Optional[pd.DataFrame]: statistics[key] = timings_statistics dataframe = pd.DataFrame.from_dict(statistics).transpose() - if verbose: - print(dataframe.to_string()) + # if verbose: + # print(dataframe.to_string()) return dataframe @@ -91,3 +91,8 @@ def flush(self) -> None: self._time_logs: Dict[str, List[float]] = {} self._start_time: Optional[float] = None self._iteration_time: Optional[float] = None + + def __str__(self) -> str: + """String representation of the Timer.""" + dataframe = self.to_pandas() + return dataframe.to_string() if dataframe is not None else "No timings logged" diff --git a/d123/common/visualization/color/default.py b/d123/common/visualization/color/default.py index 9eda3f7c..5d90977a 100644 --- a/d123/common/visualization/color/default.py +++ b/d123/common/visualization/color/default.py @@ -167,6 +167,16 @@ marker_style=None, zorder=2, ), + DetectionType.EGO: PlotConfig( + fill_color=ELLIS_5[0], + fill_color_alpha=1.0, + line_color=BLACK, + line_color_alpha=1.0, + line_width=1.0, + line_style="-", + marker_style=HEADING_MARKER_STYLE, + zorder=4, + ), } EGO_VEHICLE_CONFIG: PlotConfig = PlotConfig( diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py index e5c95081..29e97418 100644 --- a/d123/common/visualization/viser/server.py +++ b/d123/common/visualization/viser/server.py @@ -1,10 +1,11 @@ import time +from concurrent.futures import ThreadPoolExecutor from typing import Dict, List, Literal import numpy as np -import trimesh import viser +from d123.common.utils.timer import Timer from d123.common.visualization.viser.utils import ( get_bounding_box_meshes, get_bounding_box_outlines, @@ -43,15 +44,19 @@ # Cameras config: -VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0] -# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types +# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0] +VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types # VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L, CameraType.CAM_STEREO_R] # VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [] -VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_F0] +# VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_F0] + +VISUALIZE_CAMERA_GUI: List[CameraType] = [] + CAMERA_SCALE: float = 1.0 +RESIZE_FACTOR = 0.25 # Lidar config: -LIDAR_AVAILABLE: bool = True +LIDAR_AVAILABLE: bool = False LIDAR_TYPES: List[LiDARType] = [ LiDARType.LIDAR_MERGED, @@ -124,8 +129,8 @@ def set_scene(self, scene: AbstractScene) -> None: gui_prev_frame = self.server.gui.add_button("Prev Frame", disabled=True) gui_next_scene = self.server.gui.add_button("Next Scene", disabled=False) gui_playing = self.server.gui.add_checkbox("Playing", True) - gui_framerate = self.server.gui.add_slider("FPS", min=1, max=60, step=0.1, initial_value=10) - gui_framerate_options = self.server.gui.add_button_group("FPS options", ("10", "20", "30", "60")) + gui_framerate = self.server.gui.add_slider("FPS", min=1, max=90, step=0.1, initial_value=10) + gui_framerate_options = self.server.gui.add_button_group("FPS options", ("10", "20", "30", "60", "90")) # Frame step buttons. @gui_next_frame.on_click @@ -158,25 +163,26 @@ def _(_) -> None: # Toggle frame visibility when the timestep slider changes. @gui_timestep.on_update def _(_) -> None: - nonlocal current_frame_handle, current_frame_handle, prev_timestep + nonlocal prev_timestep, bounding_box_handle current_timestep = gui_timestep.value - start = time.time() + timer = Timer() + timer.start() + + start = time.perf_counter() # with self.server.atomic(): - mew_frame_handle = self.server.scene.add_frame(f"/frame{gui_timestep.value}", show_axes=False) + if BOUNDING_BOX_TYPE == "mesh": - meshes = [] - for _, mesh in get_bounding_box_meshes(scene, gui_timestep.value).items(): - meshes.append(mesh) - self.server.scene.add_mesh_trimesh( - f"/frame{gui_timestep.value}/detections", - trimesh.util.concatenate(meshes), + mesh = get_bounding_box_meshes(scene, gui_timestep.value) + new_bounding_box_handle = self.server.scene.add_mesh_trimesh( + "box_detections", + mesh=mesh, visible=True, ) elif BOUNDING_BOX_TYPE == "lines": lines, colors = get_bounding_box_outlines(scene, gui_timestep.value) - self.server.scene.add_line_segments( - f"/frame{gui_timestep.value}/detections", + new_bounding_box_handle = self.server.scene.add_line_segments( + "box_detections", points=lines, colors=colors, line_width=LINE_WIDTH, @@ -184,44 +190,115 @@ def _(_) -> None: else: raise ValueError(f"Unknown bounding box type: {BOUNDING_BOX_TYPE}") - current_frame_handle.remove() - current_frame_handle = mew_frame_handle + # bounding_box_handle.visible = False + # time.sleep(0.005) + # bounding_box_handle.remove() + bounding_box_handle = new_bounding_box_handle + new_bounding_box_handle.visible = True + + timer.log("Update bounding boxes") for camera_type in VISUALIZE_CAMERA_GUI: camera = get_camera_if_available(scene, camera_type, gui_timestep.value) if camera is not None: camera_gui_handles[camera_type].image = camera.image - for camera_type in VISUALIZE_CAMERA_FRUSTUM: + camera_timer = Timer() + camera_timer.start() + import concurrent.futures + + def load_camera_data(camera_type): camera = get_camera_if_available(scene, camera_type, gui_timestep.value) if camera is not None: - camera_position, camera_quaternion = get_camera_values(scene, camera, gui_timestep.value) - camera_frustum_handles[camera_type].position = camera_position.array - camera_frustum_handles[camera_type].wxyz = camera_quaternion.q - camera_frustum_handles[camera_type].image = camera.image + camera_position, camera_rotation, camera_image = get_camera_values( + scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR + ) + camera_frustum_handles[camera_type].position = camera_position + camera_frustum_handles[camera_type].wxyz = camera_rotation + camera_frustum_handles[camera_type].image = camera_image + + return camera_type, None + return camera_type, None + + with ThreadPoolExecutor(max_workers=len(VISUALIZE_CAMERA_FRUSTUM)) as executor: + future_to_camera = { + executor.submit(load_camera_data, camera_type): camera_type + for camera_type in VISUALIZE_CAMERA_FRUSTUM + } + + for future in concurrent.futures.as_completed(future_to_camera): + camera_type, camera_data = future.result() + + camera_timer.log("Load camera data") + + # for camera_type in VISUALIZE_CAMERA_FRUSTUM: + + # # camera = get_camera_if_available(scene, camera_type, gui_timestep.value) + # # camera_timer.log("Get camera") + + # if camera_type in camera_cache.keys(): + # camera_position, camera_rotation, camera_image = camera_cache[camera_type] + # # camera_position, camera_quaternion, camera_image = get_camera_values( + # # scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR + # # ) + + # # camera_timer.log("Get camera values") + + # camera_frustum_handles[camera_type].position = camera_position + # camera_frustum_handles[camera_type].wxyz = camera_rotation + # camera_frustum_handles[camera_type].image = camera_image + + camera_timer.log("Update camera frustum") + camera_timer.end() + # print(camera_timer) # 0.0082 + + timer.log("Update cameras") if LIDAR_AVAILABLE: try: points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES) except Exception as e: - print(f"Error getting lidar points: {e}") + # print(f"Error getting lidar points: {e}") points = np.zeros((0, 3)) colors = np.zeros((0, 3)) gui_lidar.points = points gui_lidar.colors = colors + # timer.log("Update lidar") + timer.end() + # print(timer) + prev_timestep = current_timestep - rendering_time = time.time() - start + rendering_time = time.perf_counter() - start sleep_time = 1.0 / gui_framerate.value - rendering_time - time.sleep(max(sleep_time, 0.0)) - self.server.flush() # Optional! + if sleep_time > 0: + time.sleep(max(sleep_time, 0.0)) + # self.server.flush() # Optional! + # print(f"Render time: {rendering_time:.3f}s, sleep time: {sleep_time:.3f}s") # Load in frames. - current_frame_handle = self.server.scene.add_frame(f"/frame{gui_timestep.value}", show_axes=False) self.server.scene.add_frame("/map", show_axes=False) + if BOUNDING_BOX_TYPE == "mesh": + mesh = get_bounding_box_meshes(scene, gui_timestep.value) + bounding_box_handle = self.server.scene.add_mesh_trimesh( + "box_detections", + mesh=mesh, + visible=True, + ) + elif BOUNDING_BOX_TYPE == "lines": + lines, colors = get_bounding_box_outlines(scene, gui_timestep.value) + bounding_box_handle = self.server.scene.add_line_segments( + "box_detections", + points=lines, + colors=colors, + line_width=LINE_WIDTH, + ) + else: + raise ValueError(f"Unknown bounding box type: {BOUNDING_BOX_TYPE}") + camera_gui_handles: Dict[CameraType, viser.GuiImageHandle] = {} camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {} @@ -232,28 +309,29 @@ def _(_) -> None: camera_gui_handles[camera_type] = self.server.gui.add_image( image=camera.image, label=camera_type.serialize(), - format="jpeg", ) for camera_type in VISUALIZE_CAMERA_FRUSTUM: camera = get_camera_if_available(scene, camera_type, gui_timestep.value) if camera is not None: - camera_position, camera_quaternion = get_camera_values(scene, camera, gui_timestep.value) + camera_position, camera_quaternion, camera_image = get_camera_values( + scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR + ) camera_frustum_handles[camera_type] = self.server.scene.add_camera_frustum( f"camera_frustum_{camera_type.serialize()}", fov=camera.metadata.fov_y, aspect=camera.metadata.aspect_ratio, scale=CAMERA_SCALE, - image=camera.image, - position=camera_position.array, - wxyz=camera_quaternion.q, + image=camera_image, + position=camera_position, + wxyz=camera_quaternion, ) if LIDAR_AVAILABLE: try: points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES) except Exception as e: - print(f"Error getting lidar points: {e}") + # print(f"Error getting lidar points: {e}") points = np.zeros((0, 3)) colors = np.zeros((0, 3)) diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py index 75d72b91..74615d46 100644 --- a/d123/common/visualization/viser/utils.py +++ b/d123/common/visualization/viser/utils.py @@ -1,26 +1,31 @@ from typing import Final, List, Optional, Tuple +import cv2 import numpy as np import numpy.typing as npt import trimesh -from pyquaternion import Quaternion # TODO: remove from d123.common.visualization.color.color import TAB_10, Color -from d123.common.visualization.color.config import PlotConfig -from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG, MAP_SURFACE_CONFIG +from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, MAP_SURFACE_CONFIG +from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.maps.abstract_map import MapLayer from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject from d123.datatypes.scene.abstract_scene import AbstractScene from d123.datatypes.sensors.camera import Camera, CameraType from d123.datatypes.sensors.lidar import LiDARType -from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Polyline3D, StateSE3, StateSE3Index +from d123.geometry import Corners3DIndex, Point3D, Point3DIndex, Polyline3D, StateSE3, StateSE3Index from d123.geometry.geometry_index import BoundingBoxSE3Index from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array +from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array +from d123.geometry.utils.bounding_box_utils import ( + bbse3_array_to_corners_array, + corners_array_to_3d_mesh, +) # TODO: Refactor this file. # TODO: Add general utilities for 3D primitives and mesh support. -MAP_RADIUS: Final[float] = 500 +MAP_RADIUS: Final[float] = 200 BRIGHTNESS_FACTOR: Final[float] = 1.0 @@ -42,52 +47,43 @@ def configure_trimesh(mesh: trimesh.Trimesh, color: Color): return mesh -def bounding_box_to_trimesh(bbox: BoundingBoxSE3, plot_config: PlotConfig) -> trimesh.Trimesh: +def get_bounding_box_meshes(scene: AbstractScene, iteration: int): - # Create a unit box centered at origin - box_mesh = trimesh.creation.box(extents=[bbox.length, bbox.width, bbox.height]) + initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) + ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) + box_detections = scene.get_box_detections_at_iteration(iteration) - # Apply rotations in order: roll, pitch, yaw - box_mesh = box_mesh.apply_transform(trimesh.transformations.rotation_matrix(bbox.center.yaw, [0, 0, 1])) - box_mesh = box_mesh.apply_transform(trimesh.transformations.rotation_matrix(bbox.center.pitch, [0, 1, 0])) - box_mesh = box_mesh.apply_transform(trimesh.transformations.rotation_matrix(bbox.center.roll, [1, 0, 0])) + # Load boxes to visualize, including ego vehicle at the last position + boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] + boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO] - # Apply translation - box_mesh = box_mesh.apply_translation([bbox.center.x, bbox.center.y, bbox.center.z]) + # create meshes for all boxes + box_se3_array = np.array([box.array for box in boxes]) + box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ] + box_corners_array = bbse3_array_to_corners_array(box_se3_array) + box_vertices, box_faces = corners_array_to_3d_mesh(box_corners_array) - return configure_trimesh(box_mesh, plot_config.fill_color) + # Create colors for each box based on detection type + box_colors = [] + for box_type in boxes_type: + box_colors.append(BOX_DETECTION_CONFIG[box_type].fill_color.rgba) + # Convert to numpy array and repeat for each vertex + box_colors = np.array(box_colors) + vertex_colors = np.repeat(box_colors, 8, axis=0) # 8 vertices per box -def get_bounding_box_meshes(scene: AbstractScene, iteration: int): - initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) + # Create trimesh object + mesh = trimesh.Trimesh(vertices=box_vertices, faces=box_faces) + mesh.visual.vertex_colors = vertex_colors - ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) - box_detections = scene.get_box_detections_at_iteration(iteration) - # traffic_light_detections = scene.get_traffic_light_detections_at_iteration(iteration) - # map_api = scene.map_api + return mesh - output = {} - for box_detection in box_detections: - bbox: BoundingBoxSE3 = box_detection.bounding_box - bbox.array[BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ] - plot_config = BOX_DETECTION_CONFIG[box_detection.metadata.detection_type] - trimesh_box = bounding_box_to_trimesh(bbox, plot_config) - output[f"{box_detection.metadata.detection_type.serialize()}/{box_detection.metadata.track_token}"] = ( - trimesh_box - ) - - ego_bbox = ego_vehicle_state.bounding_box - ego_bbox.array[BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ] - trimesh_box = bounding_box_to_trimesh(ego_bbox, EGO_VEHICLE_CONFIG) - output["ego"] = trimesh_box - return output +def _get_bounding_box_lines_from_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert corners_array.shape[-1] == len(Point3DIndex) + assert corners_array.shape[-2] == len(Corners3DIndex) + assert corners_array.ndim >= 2 -def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.float64]: - """ - TODO: Vectorize this function and move to geometry module. - """ - corners = bounding_box.corners_array index_pairs = [ (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM), (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM), @@ -102,45 +98,50 @@ def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.floa (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_TOP), (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP), ] - lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64) - for i, (start_idx, end_idx) in enumerate(index_pairs): - lines[i, 0] = corners[start_idx] - lines[i, 1] = corners[end_idx] + + # Handle both single box and batched cases + if corners_array.ndim == 2: + # Single box case: (8, 3) + lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64) + for i, (start_idx, end_idx) in enumerate(index_pairs): + lines[i, 0] = corners_array[start_idx] + lines[i, 1] = corners_array[end_idx] + else: + # Batched case: (..., 8, 3) + batch_shape = corners_array.shape[:-2] + lines = np.zeros(batch_shape + (len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64) + for i, (start_idx, end_idx) in enumerate(index_pairs): + lines[..., i, 0, :] = corners_array[..., start_idx, :] + lines[..., i, 1, :] = corners_array[..., end_idx, :] + return lines def get_bounding_box_outlines(scene: AbstractScene, iteration: int): initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) - origin: StateSE3 = initial_ego_vehicle_state.center_se3 - ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) box_detections = scene.get_box_detections_at_iteration(iteration) - lines = [] - colors = [] - for box_detection in box_detections: - bbox: BoundingBoxSE3 = box_detection.bounding_box_se3 - bbox_lines = _get_bounding_box_lines(bbox) - bbox_lines[..., Point3DIndex.XYZ] = bbox_lines[..., Point3DIndex.XYZ] - origin.array[StateSE3Index.XYZ] - bbox_color = np.zeros(bbox_lines.shape, dtype=np.float32) - bbox_color[..., :] = ( - BOX_DETECTION_CONFIG[box_detection.metadata.detection_type] - .fill_color.set_brightness(BRIGHTNESS_FACTOR) - .rgb_norm - ) + # Load boxes to visualize, including ego vehicle at the last position + boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] + boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO] + + # Create lines for all boxes + box_se3_array = np.array([box.array for box in boxes]) + box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ] + box_corners_array = bbse3_array_to_corners_array(box_se3_array) + box_lines = _get_bounding_box_lines_from_array(box_corners_array) - lines.append(bbox_lines) - colors.append(bbox_color) + # Create colors for all boxes + box_colors = np.zeros(box_lines.shape, dtype=np.float32) + for i, box_type in enumerate(boxes_type): + box_colors[i, ...] = BOX_DETECTION_CONFIG[box_type].fill_color.set_brightness(BRIGHTNESS_FACTOR).rgb_norm - ego_bbox_lines = _get_bounding_box_lines(ego_vehicle_state.bounding_box_se3) - ego_bbox_lines[..., Point3DIndex.XYZ] = ego_bbox_lines[..., Point3DIndex.XYZ] - origin.array[StateSE3Index.XYZ] - ego_bbox_color = np.zeros(ego_bbox_lines.shape, dtype=np.float32) - ego_bbox_color[..., :] = EGO_VEHICLE_CONFIG.fill_color.set_brightness(BRIGHTNESS_FACTOR).rgb_norm + box_lines = box_lines.reshape(-1, *box_lines.shape[2:]) + box_colors = box_colors.reshape(-1, *box_colors.shape[2:]) - lines.append(ego_bbox_lines) - colors.append(ego_bbox_color) - return np.concatenate(lines, axis=0), np.concatenate(colors, axis=0) + return box_lines, box_colors def get_map_meshes(scene: AbstractScene): @@ -275,25 +276,35 @@ def get_camera_if_available(scene: AbstractScene, camera_type: CameraType, itera return camera -def get_camera_values(scene: AbstractScene, camera: Camera, iteration: int) -> Tuple[Point3D, Quaternion]: +def get_camera_values( + scene: AbstractScene, camera: Camera, iteration: int, resize_factor: Optional[float] = None +) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.uint8]]: + initial_point_3d = scene.get_ego_state_at_iteration(0).center_se3.point_3d rear_axle = scene.get_ego_state_at_iteration(iteration).rear_axle_se3 rear_axle_array = rear_axle.array rear_axle_array[:3] -= initial_point_3d.array - rear_axle = StateSE3.from_array(rear_axle_array) + rear_axle = StateSE3.from_array(rear_axle_array, copy=False) camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame + camera_se3 = StateSE3.from_transformation_matrix(camera_to_ego) - ego_transform = rear_axle.transformation_matrix - - camera_transform = ego_transform @ camera_to_ego + camera_se3_array = convert_relative_to_absolute_se3_array(origin=rear_axle, se3_array=camera_se3.array) + abs_camera_se3 = StateSE3.from_array(camera_se3_array, copy=False) # Camera transformation in ego frame - camera_position = Point3D(*camera_transform[:3, 3]) - camera_rotation = Quaternion(matrix=camera_transform[:3, :3]) + camera_position = abs_camera_se3.point_3d.array + camera_rotation = abs_camera_se3.quaternion.array + + camera_image = camera.image + + if resize_factor is not None: + new_width = int(camera_image.shape[1] * resize_factor) + new_height = int(camera_image.shape[0] * resize_factor) + camera_image = cv2.resize(camera_image, (new_width, new_height), interpolation=cv2.INTER_LINEAR) - return camera_position, camera_rotation + return camera_position, camera_rotation, camera_image def get_lidar_points( diff --git a/d123/datatypes/scene/arrow/utils/conversion.py b/d123/datatypes/scene/arrow/utils/conversion.py index 514d9f20..2b95f2fb 100644 --- a/d123/datatypes/scene/arrow/utils/conversion.py +++ b/d123/datatypes/scene/arrow/utils/conversion.py @@ -1,14 +1,13 @@ # TODO: rename this file and potentially move somewhere more appropriate. -import io import os from pathlib import Path from typing import Dict, List, Optional +import cv2 import numpy as np import numpy.typing as npt import pyarrow as pa -from PIL import Image from d123.datatypes.detections.detection import ( BoxDetection, @@ -31,7 +30,7 @@ DATASET_SENSOR_ROOT: Dict[str, Path] = { "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", "carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs", - # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor", + "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini", } @@ -111,11 +110,11 @@ def get_camera_from_arrow_table( sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset] full_image_path = sensor_root / table_data assert full_image_path.exists(), f"Camera file not found: {full_image_path}" - img = Image.open(full_image_path) - img.load() - image = np.asarray(img, dtype=np.uint8) + image = cv2.imread(str(full_image_path), cv2.IMREAD_COLOR) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) elif isinstance(table_data, bytes): - image = np.array(Image.open(io.BytesIO(table_data))) + image = cv2.imdecode(np.frombuffer(table_data, np.uint8), cv2.IMREAD_UNCHANGED) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) else: raise NotImplementedError("Only string file paths for camera data are supported.") diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 213dca2f..0efc8898 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -149,7 +149,7 @@ def from_transformation_matrix(cls, transformation_matrix: npt.NDArray[np.float6 assert transformation_matrix.ndim == 2 assert transformation_matrix.shape == (4, 4) array = np.zeros(len(StateSE3Index), dtype=np.float64) - array[StateSE3Index.XYZ] = transformation_matrix[:3, :3] + array[StateSE3Index.XYZ] = transformation_matrix[:3, 3] array[StateSE3Index.QUATERNION] = Quaternion.from_rotation_matrix(transformation_matrix[:3, :3]) return StateSE3.from_array(array) diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py index 4d3a4cd0..74c506e1 100644 --- a/d123/geometry/utils/bounding_box_utils.py +++ b/d123/geometry/utils/bounding_box_utils.py @@ -1,3 +1,5 @@ +from typing import Tuple + import numpy as np import numpy.typing as npt import shapely @@ -136,3 +138,41 @@ def bbse3_array_to_corners_array(bbse3_array: npt.NDArray[np.float64]) -> npt.ND ) return corners_world.squeeze(axis=0) if ndim_one else corners_world + + +def corners_array_to_3d_mesh( + corners_array: npt.NDArray[np.float64], +) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.int32]]: + + num_boxes = corners_array.shape[0] + vertices = corners_array.reshape(-1, 3) + + # Define the faces for a single box using Corners3DIndex + faces_single = np.array( + [ + # Bottom face + [Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM], + [Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM], + # Top face + [Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.FRONT_LEFT_TOP], + [Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.FRONT_LEFT_TOP], + # Left face + [Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP], + [Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.FRONT_LEFT_TOP], + # Right face + [Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM], + [Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.FRONT_RIGHT_BOTTOM], + # Front face + [Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.FRONT_LEFT_BOTTOM], + [Corners3DIndex.FRONT_LEFT_TOP, Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.FRONT_LEFT_BOTTOM], + # Back face + [Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM], + [Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.BACK_RIGHT_BOTTOM], + ], + dtype=np.int32, + ) + + # Offset the faces for each box + faces = np.vstack([faces_single + i * 8 for i in range(num_boxes)]) + + return vertices, faces diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index be97439e..0a4544da 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -15,10 +15,10 @@ defaults: - default_dataset_paths - _self_ - datasets: - - nuplan_private_dataset + # - nuplan_private_dataset # - carla_dataset # - wopd_dataset - # - av2_sensor_dataset + - av2_sensor_dataset force_log_conversion: True force_map_conversion: True diff --git a/d123/script/config/datasets/av2_sensor_dataset.yaml b/d123/script/config/datasets/av2_sensor_dataset.yaml index d567a175..d65947e4 100644 --- a/d123/script/config/datasets/av2_sensor_dataset.yaml +++ b/d123/script/config/datasets/av2_sensor_dataset.yaml @@ -3,7 +3,7 @@ av2_sensor_dataset: _convert_: 'all' splits: ["av2-sensor-mini_train"] - log_path: "/mnt/elements_0/argoverse" + log_path: "/media/nvme1/argoverse" data_converter_config: _target_: d123.datasets.raw_data_converter.DataConverterConfig @@ -13,4 +13,4 @@ av2_sensor_dataset: force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} camera_store_option: "path" - lidar_store_option: null + lidar_store_option: "path" diff --git a/d123/script/config/datasets/nuplan_private_dataset.yaml b/d123/script/config/datasets/nuplan_private_dataset.yaml index 7062f38f..af6f16ba 100644 --- a/d123/script/config/datasets/nuplan_private_dataset.yaml +++ b/d123/script/config/datasets/nuplan_private_dataset.yaml @@ -12,5 +12,5 @@ nuplan_private_dataset: output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} - camera_store_option: "path" + camera_store_option: "binary" lidar_store_option: "path" diff --git a/notebooks/viz/viser_testing_v2_scene.ipynb b/notebooks/viz/viser_testing_v2_scene.ipynb index f2371058..e3dc780f 100644 --- a/notebooks/viz/viser_testing_v2_scene.ipynb +++ b/notebooks/viz/viser_testing_v2_scene.ipynb @@ -7,11 +7,11 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", + "from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", + "from d123.datatypes.scene.scene_filter import SceneFilter\n", "\n", "from d123.common.multithreading.worker_sequential import Sequential\n", - "from d123.common.datatypes.sensor.camera import CameraType" + "from d123.datatypes.sensors.camera import CameraType" ] }, { @@ -21,16 +21,11 @@ "metadata": {}, "outputs": [], "source": [ - "import requests\n", - "from PIL import Image\n", - "from io import BytesIO\n", "\n", - "\n", - "\n", - "# splits = [\"nuplan_private_test\"]\n", + "splits = [\"nuplan_private_test\"]\n", "# splits = [\"carla\"]\n", "# splits = [\"wopd_train\"]\n", - "splits = [\"av2-sensor-mini_train\"]\n", + "# splits = [\"av2-sensor-mini_train\"]\n", "log_names = None\n", "\n", "scene_tokens = None\n", @@ -42,7 +37,7 @@ " duration_s=10,\n", " history_s=0.0,\n", " timestamp_threshold_s=10,\n", - " shuffle=False,\n", + " shuffle=True,\n", " camera_types=[CameraType.CAM_F0],\n", ")\n", "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", @@ -59,14 +54,6 @@ "id": "2", "metadata": {}, "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], "source": [ "from d123.common.visualization.viser.server import ViserVisualizationServer\n", "\n", @@ -74,24 +61,10 @@ "visualization_server = ViserVisualizationServer(scenes, scene_index=0)" ] }, - { - "cell_type": "markdown", - "id": "4", - "metadata": {}, - "source": [] - }, { "cell_type": "code", "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", + "id": "3", "metadata": {}, "outputs": [], "source": [] @@ -99,7 +72,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123_dev", + "display_name": "d123", "language": "python", "name": "python3" }, diff --git a/test_viser.py b/test_viser.py new file mode 100644 index 00000000..072c835e --- /dev/null +++ b/test_viser.py @@ -0,0 +1,34 @@ +from d123.common.multithreading.worker_sequential import Sequential +from d123.common.visualization.viser.server import ViserVisualizationServer +from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder +from d123.datatypes.scene.scene_filter import SceneFilter +from d123.datatypes.sensors.camera import CameraType + +if __name__ == "__main__": + + splits = ["nuplan_private_test"] + # splits = ["carla"] + # splits = ["wopd_train"] + # splits = ["av2-sensor-mini_train"] + log_names = None + + scene_tokens = None + + scene_filter = SceneFilter( + split_names=splits, + log_names=log_names, + scene_tokens=scene_tokens, + duration_s=10, + history_s=0.5, + timestamp_threshold_s=10, + shuffle=False, + camera_types=[CameraType.CAM_F0], + ) + scene_builder = ArrowSceneBuilder("/home/daniel/d123_workspace/data") + worker = Sequential() + # worker = RayDistributed() + scenes = scene_builder.get_scenes(scene_filter, worker) + + print(f"Found {len(scenes)} scenes") + + visualization_server = ViserVisualizationServer(scenes, scene_index=0) From fd74132a7cf794dfcec49b882931c37aaee3129b Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 3 Oct 2025 21:05:28 +0200 Subject: [PATCH 054/145] Refactor viser viewer, including speed improvements (#39) --- .../visualization/viser/elements/__init__.py | 7 + .../viser/elements/detection_elements.py | 102 +++++ .../viser/elements/map_elements.py | 82 ++++ .../viser/elements/sensor_elements.py | 170 ++++++++ d123/common/visualization/viser/server.py | 386 ------------------ d123/common/visualization/viser/utils.py | 356 ---------------- .../visualization/viser/viser_config.py | 71 ++++ .../visualization/viser/viser_viewer.py | 245 +++++++++++ d123/datatypes/maps/gpkg/gpkg_map_objects.py | 3 +- d123/datatypes/maps/gpkg/utils.py | 42 ++ d123/datatypes/scene/abstract_scene.py | 14 +- d123/datatypes/scene/arrow/arrow_scene.py | 60 +-- d123/geometry/utils/bounding_box_utils.py | 40 +- d123/script/run_viser.py | 4 +- test_viser.py | 14 +- 15 files changed, 810 insertions(+), 786 deletions(-) create mode 100644 d123/common/visualization/viser/elements/__init__.py create mode 100644 d123/common/visualization/viser/elements/detection_elements.py create mode 100644 d123/common/visualization/viser/elements/map_elements.py create mode 100644 d123/common/visualization/viser/elements/sensor_elements.py delete mode 100644 d123/common/visualization/viser/server.py delete mode 100644 d123/common/visualization/viser/utils.py create mode 100644 d123/common/visualization/viser/viser_config.py create mode 100644 d123/common/visualization/viser/viser_viewer.py diff --git a/d123/common/visualization/viser/elements/__init__.py b/d123/common/visualization/viser/elements/__init__.py new file mode 100644 index 00000000..55c47327 --- /dev/null +++ b/d123/common/visualization/viser/elements/__init__.py @@ -0,0 +1,7 @@ +from d123.common.visualization.viser.elements.detection_elements import add_box_detections_to_viser_server +from d123.common.visualization.viser.elements.map_elements import add_map_to_viser_server +from d123.common.visualization.viser.elements.sensor_elements import ( + add_camera_frustums_to_viser_server, + add_camera_gui_to_viser_server, + add_lidar_pc_to_viser_server, +) diff --git a/d123/common/visualization/viser/elements/detection_elements.py b/d123/common/visualization/viser/elements/detection_elements.py new file mode 100644 index 00000000..cdb2b0c8 --- /dev/null +++ b/d123/common/visualization/viser/elements/detection_elements.py @@ -0,0 +1,102 @@ +import numpy as np +import numpy.typing as npt +import trimesh +import viser + +from d123.common.visualization.color.default import BOX_DETECTION_CONFIG +from d123.common.visualization.viser.viser_config import ViserConfig +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 +from d123.geometry.geometry_index import BoundingBoxSE3Index, Corners3DIndex, StateSE3Index +from d123.geometry.utils.bounding_box_utils import ( + bbse3_array_to_corners_array, + corners_array_to_3d_mesh, + corners_array_to_edge_lines, +) + + +def add_box_detections_to_viser_server( + scene: AbstractScene, + scene_interation: int, + initial_ego_state: EgoStateSE3, + viser_server: viser.ViserServer, + viser_config: ViserConfig, +) -> None: + if viser_config.bounding_box_visible: + if viser_config.bounding_box_type == "mesh": + mesh = _get_bounding_box_meshes(scene, scene_interation, initial_ego_state) + viser_server.scene.add_mesh_trimesh( + "box_detections", + mesh=mesh, + visible=True, + ) + elif viser_config.bounding_box_type == "lines": + lines, colors = _get_bounding_box_outlines(scene, scene_interation, initial_ego_state) + viser_server.scene.add_line_segments( + "box_detections", + points=lines, + colors=colors, + line_width=viser_config.bounding_box_line_width, + ) + else: + raise ValueError(f"Unknown bounding box type: {viser_config.bounding_box_type}") + + +def _get_bounding_box_meshes(scene: AbstractScene, iteration: int, initial_ego_state: EgoStateSE3) -> trimesh.Trimesh: + + ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) + box_detections = scene.get_box_detections_at_iteration(iteration) + + # Load boxes to visualize, including ego vehicle at the last position + boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] + boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO] + + # create meshes for all boxes + box_se3_array = np.array([box.array for box in boxes]) + box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_state.center_se3.array[StateSE3Index.XYZ] + box_corners_array = bbse3_array_to_corners_array(box_se3_array) + box_vertices, box_faces = corners_array_to_3d_mesh(box_corners_array) + + # Create colors for each box based on detection type + box_colors = [] + for box_type in boxes_type: + box_colors.append(BOX_DETECTION_CONFIG[box_type].fill_color.rgba) + + # Convert to numpy array and repeat for each vertex + box_colors = np.array(box_colors) + vertex_colors = np.repeat(box_colors, len(Corners3DIndex), axis=0) + + # Create trimesh object + mesh = trimesh.Trimesh(vertices=box_vertices, faces=box_faces) + mesh.visual.vertex_colors = vertex_colors + + return mesh + + +def _get_bounding_box_outlines( + scene: AbstractScene, iteration: int, initial_ego_state: EgoStateSE3 +) -> npt.NDArray[np.float64]: + + ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) + box_detections = scene.get_box_detections_at_iteration(iteration) + + # Load boxes to visualize, including ego vehicle at the last position + boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] + boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO] + + # Create lines for all boxes + box_se3_array = np.array([box.array for box in boxes]) + box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_state.center_se3.array[StateSE3Index.XYZ] + box_corners_array = bbse3_array_to_corners_array(box_se3_array) + box_outlines = corners_array_to_edge_lines(box_corners_array) + + # Create colors for all boxes + box_colors = np.zeros(box_outlines.shape, dtype=np.float32) + for i, box_type in enumerate(boxes_type): + box_colors[i, ...] = BOX_DETECTION_CONFIG[box_type].fill_color.rgb_norm + + box_outlines = box_outlines.reshape(-1, *box_outlines.shape[2:]) + box_colors = box_colors.reshape(-1, *box_colors.shape[2:]) + + return box_outlines, box_colors diff --git a/d123/common/visualization/viser/elements/map_elements.py b/d123/common/visualization/viser/elements/map_elements.py new file mode 100644 index 00000000..90c247cb --- /dev/null +++ b/d123/common/visualization/viser/elements/map_elements.py @@ -0,0 +1,82 @@ +from typing import Dict + +import trimesh +import viser + +from d123.common.visualization.color.default import MAP_SURFACE_CONFIG +from d123.common.visualization.viser.viser_config import ViserConfig +from d123.datatypes.maps.abstract_map import MapLayer +from d123.datatypes.maps.abstract_map_objects import AbstractSurfaceMapObject +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 +from d123.geometry import Point3D, Point3DIndex + + +def add_map_to_viser_server( + scene: AbstractScene, + initial_ego_state: EgoStateSE3, + viser_server: viser.ViserServer, + viser_config: ViserConfig, +) -> None: + + if viser_config.map_visible: + for name, mesh in _get_map_trimesh_dict(scene, initial_ego_state, viser_config).items(): + viser_server.scene.add_mesh_trimesh(f"/map/{name}", mesh, visible=True) + + +def _get_map_trimesh_dict( + scene: AbstractScene, + initial_ego_state: EgoStateSE3, + viser_config: ViserConfig, +) -> Dict[str, trimesh.Trimesh]: + + # Unpack scene center for translation of map objects. + scene_center: Point3D = initial_ego_state.center.point_3d + scene_center_array = scene_center.array + + # Load map objects within a certain radius around the scene center. + map_layers = [ + MapLayer.LANE_GROUP, + MapLayer.INTERSECTION, + MapLayer.WALKWAY, + MapLayer.CROSSWALK, + MapLayer.CARPARK, + MapLayer.GENERIC_DRIVABLE, + ] + map_objects_dict = scene.map_api.get_proximal_map_objects( + scene_center.point_2d, + radius=viser_config.map_radius, + layers=map_layers, + ) + + # Create trimesh meshes for each map layer. + trimesh_dict = {} + for map_layer in map_objects_dict.keys(): + surface_meshes = [] + for map_surface in map_objects_dict[map_layer]: + map_surface: AbstractSurfaceMapObject + + trimesh_mesh = map_surface.trimesh_mesh + trimesh_mesh.vertices -= scene_center_array + + # Adjust height of non-road surfaces to avoid z-fighting in the visualization. + if map_layer in [ + MapLayer.WALKWAY, + MapLayer.CROSSWALK, + MapLayer.CARPARK, + ]: + trimesh_mesh.vertices[..., Point3DIndex.Z] += viser_config.map_non_road_z_offset + + # If the map does not have z-values, we place the surfaces on the ground level of the ego vehicle. + if not scene.log_metadata.map_has_z: + trimesh_mesh.vertices[..., Point3DIndex.Z] += ( + scene_center.z - initial_ego_state.vehicle_parameters.height / 2 + ) + + # Color the mesh based on the map layer type. + trimesh_mesh.visual.face_colors = MAP_SURFACE_CONFIG[map_layer].fill_color.rgba + surface_meshes.append(trimesh_mesh) + + trimesh_dict[f"{map_layer.serialize()}"] = trimesh.util.concatenate(surface_meshes) + + return trimesh_dict diff --git a/d123/common/visualization/viser/elements/sensor_elements.py b/d123/common/visualization/viser/elements/sensor_elements.py new file mode 100644 index 00000000..ee9b57bf --- /dev/null +++ b/d123/common/visualization/viser/elements/sensor_elements.py @@ -0,0 +1,170 @@ +import concurrent.futures +from typing import Dict, List, Optional, Tuple + +import cv2 +import numpy as np +import numpy.typing as npt +import viser + +from d123.common.visualization.viser.viser_config import ViserConfig +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.datatypes.sensors.camera import Camera, CameraType +from d123.datatypes.sensors.lidar import LiDARType +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 +from d123.geometry import StateSE3, StateSE3Index +from d123.geometry.transform.transform_se3 import ( + convert_relative_to_absolute_points_3d_array, + convert_relative_to_absolute_se3_array, +) + + +def add_camera_frustums_to_viser_server( + scene: AbstractScene, + scene_interation: int, + initial_ego_state: EgoStateSE3, + viser_server: viser.ViserServer, + viser_config: ViserConfig, + camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle], +) -> None: + + if viser_config.camera_frustum_visible: + scene_center_array = initial_ego_state.center.point_3d.array + ego_pose = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array + ego_pose[StateSE3Index.XYZ] -= scene_center_array + + def _add_camera_frustums_to_viser_server(camera_type: CameraType) -> None: + camera = scene.get_camera_at_iteration(scene_interation, camera_type) + if camera is not None: + camera_position, camera_quaternion, camera_image = _get_camera_values( + camera, + ego_pose.copy(), + viser_config.camera_frustum_image_scale, + ) + if camera_type in camera_frustum_handles: + camera_frustum_handles[camera_type].position = camera_position + camera_frustum_handles[camera_type].wxyz = camera_quaternion + camera_frustum_handles[camera_type].image = camera_image + else: + camera_frustum_handles[camera_type] = viser_server.scene.add_camera_frustum( + f"camera_frustums/{camera_type.serialize()}", + fov=camera.metadata.fov_y, + aspect=camera.metadata.aspect_ratio, + scale=viser_config.camera_frustum_frustum_scale, + image=camera_image, + position=camera_position, + wxyz=camera_quaternion, + ) + + return None + + # NOTE; In order to speed up adding camera frustums, we use multithreading and resize the images. + with concurrent.futures.ThreadPoolExecutor(max_workers=len(viser_config.camera_frustum_types)) as executor: + future_to_camera = { + executor.submit(_add_camera_frustums_to_viser_server, camera_type): camera_type + for camera_type in viser_config.camera_frustum_types + } + for future in concurrent.futures.as_completed(future_to_camera): + _ = future.result() + + # TODO: Remove serial implementation, if not needed anymore. + # for camera_type in viser_config.camera_frustum_types: + # _add_camera_frustums_to_viser_server(camera_type) + + return None + + +def add_camera_gui_to_viser_server( + scene: AbstractScene, + scene_interation: int, + viser_server: viser.ViserServer, + viser_config: ViserConfig, + camera_gui_handles: Dict[CameraType, viser.GuiImageHandle], +) -> None: + if viser_config.camera_gui_visible: + for camera_type in viser_config.camera_gui_types: + camera = scene.get_camera_at_iteration(scene_interation, camera_type) + if camera is not None: + if camera_type in camera_gui_handles: + camera_gui_handles[camera_type].image = _rescale_image( + camera.image, viser_config.camera_gui_image_scale + ) + else: + with viser_server.gui.add_folder(f"Camera {camera_type.serialize()}"): + camera_gui_handles[camera_type] = viser_server.gui.add_image( + image=_rescale_image(camera.image, viser_config.camera_gui_image_scale), + label=camera_type.serialize(), + ) + + +def add_lidar_pc_to_viser_server( + scene: AbstractScene, + scene_interation: int, + initial_ego_state: EgoStateSE3, + viser_server: viser.ViserServer, + viser_config: ViserConfig, + lidar_pc_handle: Optional[viser.PointCloudHandle], +) -> None: + if viser_config.lidar_visible: + + scene_center_array = initial_ego_state.center.point_3d.array + ego_pose = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array + ego_pose[StateSE3Index.XYZ] -= scene_center_array + + def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]: + lidar = scene.get_lidar_at_iteration(scene_interation, lidar_type) + if lidar is not None: + return lidar.xyz + else: + return np.zeros((0, 3), dtype=np.float32) + + with concurrent.futures.ThreadPoolExecutor(max_workers=len(viser_config.lidar_types)) as executor: + future_to_lidar = { + executor.submit(_load_lidar_points, lidar_type): lidar_type for lidar_type in viser_config.lidar_types + } + lidar_points_3d_list: List[npt.NDArray[np.float32]] = [] + for future in concurrent.futures.as_completed(future_to_lidar): + lidar_points_3d_list.append(future.result()) + + points_3d_local = ( + np.concatenate(lidar_points_3d_list, axis=0) if lidar_points_3d_list else np.zeros((0, 3), dtype=np.float32) + ) + points = convert_relative_to_absolute_points_3d_array(ego_pose, points_3d_local) + colors = np.zeros_like(points) + + if lidar_pc_handle is not None: + lidar_pc_handle.points = points + lidar_pc_handle.colors = colors + else: + lidar_pc_handle = viser_server.scene.add_point_cloud( + "lidar_points", + points=points, + colors=colors, + point_size=viser_config.lidar_point_size, + point_shape=viser_config.lidar_point_shape, + ) + + +def _get_camera_values( + camera: Camera, + ego_pose: npt.NDArray[np.float64], + resize_factor: Optional[float] = None, +) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.uint8]]: + assert ego_pose.ndim == 1 and len(ego_pose) == len(StateSE3Index) + + rel_camera_pose = StateSE3.from_transformation_matrix(camera.extrinsic).array + abs_camera_pose = convert_relative_to_absolute_se3_array(origin=ego_pose, se3_array=rel_camera_pose) + + camera_position = abs_camera_pose[StateSE3Index.XYZ] + camera_rotation = abs_camera_pose[StateSE3Index.QUATERNION] + + camera_image = _rescale_image(camera.image, resize_factor) + return camera_position, camera_rotation, camera_image + + +def _rescale_image(image: npt.NDArray[np.uint8], scale: float) -> npt.NDArray[np.uint8]: + if scale == 1.0: + return image + new_width = int(image.shape[1] * scale) + new_height = int(image.shape[0] * scale) + downscaled_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LINEAR) + return downscaled_image diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py deleted file mode 100644 index 29e97418..00000000 --- a/d123/common/visualization/viser/server.py +++ /dev/null @@ -1,386 +0,0 @@ -import time -from concurrent.futures import ThreadPoolExecutor -from typing import Dict, List, Literal - -import numpy as np -import viser - -from d123.common.utils.timer import Timer -from d123.common.visualization.viser.utils import ( - get_bounding_box_meshes, - get_bounding_box_outlines, - get_camera_if_available, - get_camera_values, - get_lidar_points, - get_map_meshes, -) -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.sensors.camera import CameraType -from d123.datatypes.sensors.lidar import LiDARType - -# TODO: Try to fix performance issues. -# TODO: Refactor this file. - -all_camera_types: List[CameraType] = [ - CameraType.CAM_F0, - CameraType.CAM_B0, - CameraType.CAM_L0, - CameraType.CAM_L1, - CameraType.CAM_L2, - CameraType.CAM_R0, - CameraType.CAM_R1, - CameraType.CAM_R2, -] - -# MISC config: -LINE_WIDTH: float = 4.0 - -# Bounding box config: -BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "mesh" - -# Map config: -MAP_AVAILABLE: bool = True - - -# Cameras config: - -# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0] -VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types -# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L, CameraType.CAM_STEREO_R] -# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [] -# VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_F0] - -VISUALIZE_CAMERA_GUI: List[CameraType] = [] - -CAMERA_SCALE: float = 1.0 -RESIZE_FACTOR = 0.25 - -# Lidar config: -LIDAR_AVAILABLE: bool = False - -LIDAR_TYPES: List[LiDARType] = [ - LiDARType.LIDAR_MERGED, - LiDARType.LIDAR_TOP, - LiDARType.LIDAR_FRONT, - LiDARType.LIDAR_SIDE_LEFT, - LiDARType.LIDAR_SIDE_RIGHT, - LiDARType.LIDAR_BACK, -] -# LIDAR_TYPES: List[LiDARType] = [ -# LiDARType.LIDAR_TOP, -# ] -LIDAR_POINT_SIZE: float = 0.05 - - -class ViserVisualizationServer: - def __init__( - self, - scenes: List[AbstractScene], - scene_index: int = 0, - host: str = "localhost", - port: int = 8080, - label: str = "D123 Viser Server", - ): - assert len(scenes) > 0, "At least one scene must be provided." - self.scenes = scenes - self.scene_index = scene_index - - self.host = host - self.port = port - self.label = label - - self.server = viser.ViserServer(host=self.host, port=self.port, label=self.label) - self.set_scene(self.scenes[self.scene_index % len(self.scenes)]) - - def next(self) -> None: - self.server.flush() - self.server.gui.reset() - self.server.scene.reset() - self.scene_index = (self.scene_index + 1) % len(self.scenes) - print(f"Viser server started at {self.host}:{self.port}") - self.set_scene(self.scenes[self.scene_index]) - - def set_scene(self, scene: AbstractScene) -> None: - num_frames = scene.get_number_of_iterations() - # print(scene.available_camera_types) - - self.server.gui.configure_theme(dark_mode=False, control_width="large") - - # TODO: Fix lighting. Environment map can help, but cannot be freely configured. - # self.server.scene.configure_environment_map( - # hdri="warehouse", - # background=False, - # background_intensity=0.25, - # environment_intensity=0.5, - # ) - - with self.server.gui.add_folder("Playback"): - server_playing = True - - gui_timestep = self.server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = self.server.gui.add_button("Next Frame", disabled=True) - gui_prev_frame = self.server.gui.add_button("Prev Frame", disabled=True) - gui_next_scene = self.server.gui.add_button("Next Scene", disabled=False) - gui_playing = self.server.gui.add_checkbox("Playing", True) - gui_framerate = self.server.gui.add_slider("FPS", min=1, max=90, step=0.1, initial_value=10) - gui_framerate_options = self.server.gui.add_button_group("FPS options", ("10", "20", "30", "60", "90")) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - @gui_next_scene.on_click - def _(_) -> None: - nonlocal server_playing - server_playing = False - - # Disable frame controls when we're playing. - @gui_playing.on_update - def _(_) -> None: - gui_timestep.disabled = gui_playing.value - gui_next_frame.disabled = gui_playing.value - gui_prev_frame.disabled = gui_playing.value - - # Set the framerate when we click one of the options. - @gui_framerate_options.on_click - def _(_) -> None: - gui_framerate.value = int(gui_framerate_options.value) - - prev_timestep = gui_timestep.value - - # Toggle frame visibility when the timestep slider changes. - @gui_timestep.on_update - def _(_) -> None: - nonlocal prev_timestep, bounding_box_handle - current_timestep = gui_timestep.value - - timer = Timer() - timer.start() - - start = time.perf_counter() - # with self.server.atomic(): - - if BOUNDING_BOX_TYPE == "mesh": - mesh = get_bounding_box_meshes(scene, gui_timestep.value) - new_bounding_box_handle = self.server.scene.add_mesh_trimesh( - "box_detections", - mesh=mesh, - visible=True, - ) - elif BOUNDING_BOX_TYPE == "lines": - lines, colors = get_bounding_box_outlines(scene, gui_timestep.value) - new_bounding_box_handle = self.server.scene.add_line_segments( - "box_detections", - points=lines, - colors=colors, - line_width=LINE_WIDTH, - ) - else: - raise ValueError(f"Unknown bounding box type: {BOUNDING_BOX_TYPE}") - - # bounding_box_handle.visible = False - # time.sleep(0.005) - # bounding_box_handle.remove() - bounding_box_handle = new_bounding_box_handle - new_bounding_box_handle.visible = True - - timer.log("Update bounding boxes") - - for camera_type in VISUALIZE_CAMERA_GUI: - camera = get_camera_if_available(scene, camera_type, gui_timestep.value) - if camera is not None: - camera_gui_handles[camera_type].image = camera.image - - camera_timer = Timer() - camera_timer.start() - import concurrent.futures - - def load_camera_data(camera_type): - camera = get_camera_if_available(scene, camera_type, gui_timestep.value) - if camera is not None: - camera_position, camera_rotation, camera_image = get_camera_values( - scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR - ) - camera_frustum_handles[camera_type].position = camera_position - camera_frustum_handles[camera_type].wxyz = camera_rotation - camera_frustum_handles[camera_type].image = camera_image - - return camera_type, None - return camera_type, None - - with ThreadPoolExecutor(max_workers=len(VISUALIZE_CAMERA_FRUSTUM)) as executor: - future_to_camera = { - executor.submit(load_camera_data, camera_type): camera_type - for camera_type in VISUALIZE_CAMERA_FRUSTUM - } - - for future in concurrent.futures.as_completed(future_to_camera): - camera_type, camera_data = future.result() - - camera_timer.log("Load camera data") - - # for camera_type in VISUALIZE_CAMERA_FRUSTUM: - - # # camera = get_camera_if_available(scene, camera_type, gui_timestep.value) - # # camera_timer.log("Get camera") - - # if camera_type in camera_cache.keys(): - # camera_position, camera_rotation, camera_image = camera_cache[camera_type] - # # camera_position, camera_quaternion, camera_image = get_camera_values( - # # scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR - # # ) - - # # camera_timer.log("Get camera values") - - # camera_frustum_handles[camera_type].position = camera_position - # camera_frustum_handles[camera_type].wxyz = camera_rotation - # camera_frustum_handles[camera_type].image = camera_image - - camera_timer.log("Update camera frustum") - camera_timer.end() - # print(camera_timer) # 0.0082 - - timer.log("Update cameras") - - if LIDAR_AVAILABLE: - try: - points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES) - except Exception as e: - # print(f"Error getting lidar points: {e}") - points = np.zeros((0, 3)) - colors = np.zeros((0, 3)) - - gui_lidar.points = points - gui_lidar.colors = colors - - # timer.log("Update lidar") - timer.end() - # print(timer) - - prev_timestep = current_timestep - - rendering_time = time.perf_counter() - start - sleep_time = 1.0 / gui_framerate.value - rendering_time - if sleep_time > 0: - time.sleep(max(sleep_time, 0.0)) - # self.server.flush() # Optional! - # print(f"Render time: {rendering_time:.3f}s, sleep time: {sleep_time:.3f}s") - - # Load in frames. - self.server.scene.add_frame("/map", show_axes=False) - - if BOUNDING_BOX_TYPE == "mesh": - mesh = get_bounding_box_meshes(scene, gui_timestep.value) - bounding_box_handle = self.server.scene.add_mesh_trimesh( - "box_detections", - mesh=mesh, - visible=True, - ) - elif BOUNDING_BOX_TYPE == "lines": - lines, colors = get_bounding_box_outlines(scene, gui_timestep.value) - bounding_box_handle = self.server.scene.add_line_segments( - "box_detections", - points=lines, - colors=colors, - line_width=LINE_WIDTH, - ) - else: - raise ValueError(f"Unknown bounding box type: {BOUNDING_BOX_TYPE}") - - camera_gui_handles: Dict[CameraType, viser.GuiImageHandle] = {} - camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {} - - for camera_type in VISUALIZE_CAMERA_GUI: - camera = get_camera_if_available(scene, camera_type, gui_timestep.value) - if camera is not None: - with self.server.gui.add_folder(f"Camera {camera_type.serialize()}"): - camera_gui_handles[camera_type] = self.server.gui.add_image( - image=camera.image, - label=camera_type.serialize(), - ) - - for camera_type in VISUALIZE_CAMERA_FRUSTUM: - camera = get_camera_if_available(scene, camera_type, gui_timestep.value) - if camera is not None: - camera_position, camera_quaternion, camera_image = get_camera_values( - scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR - ) - camera_frustum_handles[camera_type] = self.server.scene.add_camera_frustum( - f"camera_frustum_{camera_type.serialize()}", - fov=camera.metadata.fov_y, - aspect=camera.metadata.aspect_ratio, - scale=CAMERA_SCALE, - image=camera_image, - position=camera_position, - wxyz=camera_quaternion, - ) - - if LIDAR_AVAILABLE: - try: - points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES) - except Exception as e: - # print(f"Error getting lidar points: {e}") - points = np.zeros((0, 3)) - colors = np.zeros((0, 3)) - - gui_lidar = self.server.scene.add_point_cloud( - name="LiDAR", - points=points, - colors=colors, - point_size=LIDAR_POINT_SIZE, - point_shape="circle", - ) - - if MAP_AVAILABLE: - for name, mesh in get_map_meshes(scene).items(): - self.server.scene.add_mesh_trimesh(f"/map/{name}", mesh, visible=True) - - # centerlines, __, __, road_edges = get_map_lines(scene) - # for i, centerline in enumerate(centerlines): - # self.server.scene.add_line_segments( - # "/map/centerlines", - # centerlines, - # colors=[[BLACK.rgb]], - # line_width=LINE_WIDTH, - # ) - # self.server.scene.add_line_segments( - # "/map/left_boundary", - # left_boundaries, - # colors=[[TAB_10[2].rgb]], - # line_width=LINE_WIDTH, - # ) - # self.server.scene.add_line_segments( - # "/map/right_boundary",clear - # right_boundaries, - # colors=[[TAB_10[3].rgb]], - # line_width=LINE_WIDTH, - # ) - # print(centerlines.shape, road_edges.shape) - # self.server.scene.add_line_segments( - # "/map/road_edges", - # road_edges, - # colors=[[BLACK.rgb]], - # line_width=LINE_WIDTH, - # ) - - # Playback update loop. - prev_timestep = gui_timestep.value - while server_playing: - # Update the timestep if we're playing. - if gui_playing.value: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - self.server.flush() - self.next() diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py deleted file mode 100644 index 74615d46..00000000 --- a/d123/common/visualization/viser/utils.py +++ /dev/null @@ -1,356 +0,0 @@ -from typing import Final, List, Optional, Tuple - -import cv2 -import numpy as np -import numpy.typing as npt -import trimesh - -from d123.common.visualization.color.color import TAB_10, Color -from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, MAP_SURFACE_CONFIG -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.maps.abstract_map import MapLayer -from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.sensors.camera import Camera, CameraType -from d123.datatypes.sensors.lidar import LiDARType -from d123.geometry import Corners3DIndex, Point3D, Point3DIndex, Polyline3D, StateSE3, StateSE3Index -from d123.geometry.geometry_index import BoundingBoxSE3Index -from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array -from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array -from d123.geometry.utils.bounding_box_utils import ( - bbse3_array_to_corners_array, - corners_array_to_3d_mesh, -) - -# TODO: Refactor this file. -# TODO: Add general utilities for 3D primitives and mesh support. - -MAP_RADIUS: Final[float] = 200 -BRIGHTNESS_FACTOR: Final[float] = 1.0 - - -def configure_trimesh(mesh: trimesh.Trimesh, color: Color): - # base_color = [r / 255.0 for r in color.rgba] - mesh.visual.face_colors = color.rgba - - # pbr_material = trimesh.visual.material.PBRMaterial( - # baseColorFactor=base_color, # Your desired color (RGBA, 0-1 range) - # metallicFactor=0.0, # 0.0 = non-metallic (more matte) - # roughnessFactor=1.0, # 0.8 = quite rough (less shiny, 0=mirror, 1=completely rough) - # emissiveFactor=[0.0, 0.0, 0.0], # No emission - # alphaCutoff=0.9, # Alpha threshold for transparency - # doubleSided=True, # Single-sided material - # ) - # mesh.visual.material = pbr_material - # mesh.visual = mesh.visual.to_texture() - - return mesh - - -def get_bounding_box_meshes(scene: AbstractScene, iteration: int): - - initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) - ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) - box_detections = scene.get_box_detections_at_iteration(iteration) - - # Load boxes to visualize, including ego vehicle at the last position - boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] - boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO] - - # create meshes for all boxes - box_se3_array = np.array([box.array for box in boxes]) - box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ] - box_corners_array = bbse3_array_to_corners_array(box_se3_array) - box_vertices, box_faces = corners_array_to_3d_mesh(box_corners_array) - - # Create colors for each box based on detection type - box_colors = [] - for box_type in boxes_type: - box_colors.append(BOX_DETECTION_CONFIG[box_type].fill_color.rgba) - - # Convert to numpy array and repeat for each vertex - box_colors = np.array(box_colors) - vertex_colors = np.repeat(box_colors, 8, axis=0) # 8 vertices per box - - # Create trimesh object - mesh = trimesh.Trimesh(vertices=box_vertices, faces=box_faces) - mesh.visual.vertex_colors = vertex_colors - - return mesh - - -def _get_bounding_box_lines_from_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - assert corners_array.shape[-1] == len(Point3DIndex) - assert corners_array.shape[-2] == len(Corners3DIndex) - assert corners_array.ndim >= 2 - - index_pairs = [ - (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM), - (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM), - (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM), - (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_BOTTOM), - (Corners3DIndex.FRONT_LEFT_TOP, Corners3DIndex.FRONT_RIGHT_TOP), - (Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_TOP), - (Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_LEFT_TOP), - (Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.FRONT_LEFT_TOP), - (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_TOP), - (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.FRONT_RIGHT_TOP), - (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_TOP), - (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP), - ] - - # Handle both single box and batched cases - if corners_array.ndim == 2: - # Single box case: (8, 3) - lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64) - for i, (start_idx, end_idx) in enumerate(index_pairs): - lines[i, 0] = corners_array[start_idx] - lines[i, 1] = corners_array[end_idx] - else: - # Batched case: (..., 8, 3) - batch_shape = corners_array.shape[:-2] - lines = np.zeros(batch_shape + (len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64) - for i, (start_idx, end_idx) in enumerate(index_pairs): - lines[..., i, 0, :] = corners_array[..., start_idx, :] - lines[..., i, 1, :] = corners_array[..., end_idx, :] - - return lines - - -def get_bounding_box_outlines(scene: AbstractScene, iteration: int): - - initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) - ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) - box_detections = scene.get_box_detections_at_iteration(iteration) - - # Load boxes to visualize, including ego vehicle at the last position - boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] - boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO] - - # Create lines for all boxes - box_se3_array = np.array([box.array for box in boxes]) - box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ] - box_corners_array = bbse3_array_to_corners_array(box_se3_array) - box_lines = _get_bounding_box_lines_from_array(box_corners_array) - - # Create colors for all boxes - box_colors = np.zeros(box_lines.shape, dtype=np.float32) - for i, box_type in enumerate(boxes_type): - box_colors[i, ...] = BOX_DETECTION_CONFIG[box_type].fill_color.set_brightness(BRIGHTNESS_FACTOR).rgb_norm - - box_lines = box_lines.reshape(-1, *box_lines.shape[2:]) - box_colors = box_colors.reshape(-1, *box_colors.shape[2:]) - - return box_lines, box_colors - - -def get_map_meshes(scene: AbstractScene): - initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) - center = initial_ego_vehicle_state.center_se3 - map_layers = [ - MapLayer.LANE_GROUP, - MapLayer.LANE, - MapLayer.WALKWAY, - MapLayer.CROSSWALK, - MapLayer.CARPARK, - MapLayer.GENERIC_DRIVABLE, - ] - - map_objects_dict = scene.map_api.get_proximal_map_objects(center.point_2d, radius=MAP_RADIUS, layers=map_layers) - output = {} - - for map_layer in map_objects_dict.keys(): - surface_meshes = [] - for map_surface in map_objects_dict[map_layer]: - map_surface: AbstractSurfaceMapObject - trimesh_mesh = map_surface.trimesh_mesh - if map_layer in [ - MapLayer.WALKWAY, - MapLayer.CROSSWALK, - MapLayer.GENERIC_DRIVABLE, - MapLayer.CARPARK, - ]: - # Push meshes up by a few centimeters to avoid overlap with the ground in the visualization. - trimesh_mesh.vertices -= Point3D(x=center.x, y=center.y, z=center.z - 0.1).array - else: - trimesh_mesh.vertices -= Point3D(x=center.x, y=center.y, z=center.z).array - - if not scene.log_metadata.map_has_z: - trimesh_mesh.vertices += Point3D( - x=0, y=0, z=center.z - initial_ego_vehicle_state.vehicle_parameters.height / 2 - ).array - - trimesh_mesh = configure_trimesh(trimesh_mesh, MAP_SURFACE_CONFIG[map_layer].fill_color) - surface_meshes.append(trimesh_mesh) - output[f"{map_layer.serialize()}"] = trimesh.util.concatenate(surface_meshes) - return output - - -def get_map_lines(scene: AbstractScene): - map_layers = [MapLayer.LANE, MapLayer.ROAD_EDGE] - initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) - center = initial_ego_vehicle_state.center_se3 - map_objects_dict = scene.map_api.get_proximal_map_objects(center.point_2d, radius=MAP_RADIUS, layers=map_layers) - - def polyline_to_segments(polyline: Polyline3D) -> npt.NDArray[np.float64]: - polyline_array = polyline.array - center.point_3d.array - polyline_array = polyline_array.reshape(-1, 1, 3) - polyline_array = np.concatenate([polyline_array[:-1], polyline_array[1:]], axis=1) - return polyline_array - - centerlines, right_boundaries, left_boundaries, road_edges = [], [], [], [] - for lane in map_objects_dict[MapLayer.LANE]: - lane: AbstractLane - - centerlines.append(polyline_to_segments(lane.centerline)) - right_boundaries.append(polyline_to_segments(lane.right_boundary)) - left_boundaries.append(polyline_to_segments(lane.left_boundary)) - - for road_edge in map_objects_dict[MapLayer.ROAD_EDGE]: - road_edges.append(polyline_to_segments(road_edge.polyline_3d)) - - centerlines = np.concatenate(centerlines, axis=0) - left_boundaries = np.concatenate(left_boundaries, axis=0) - right_boundaries = np.concatenate(right_boundaries, axis=0) - road_edges = np.concatenate(road_edges, axis=0) - - if not scene.log_metadata.map_has_z: - # If the map does not have a z-coordinate, we set it to the height of the ego vehicle. - centerlines[:, :, 2] += center.z - initial_ego_vehicle_state.vehicle_parameters.height / 2 - left_boundaries[:, :, 2] += center.z - initial_ego_vehicle_state.vehicle_parameters.height / 2 - right_boundaries[:, :, 2] += center.z - initial_ego_vehicle_state.vehicle_parameters.height / 2 - road_edges[:, :, 2] += center.z - initial_ego_vehicle_state.vehicle_parameters.height / 2 - - return centerlines, left_boundaries, right_boundaries, road_edges - - -def get_trimesh_from_boundaries( - left_boundary: Polyline3D, right_boundary: Polyline3D, resolution: float = 1.0 -) -> trimesh.Trimesh: - resolution = 1.0 # [m] - - average_length = (left_boundary.length + right_boundary.length) / 2 - num_samples = int(average_length // resolution) + 1 - left_boundary_array = _interpolate_polyline(left_boundary, num_samples=num_samples) - right_boundary_array = _interpolate_polyline(right_boundary, num_samples=num_samples) - return _create_lane_mesh_from_boundary_arrays(left_boundary_array, right_boundary_array) - - -def _interpolate_polyline(polyline_3d: Polyline3D, num_samples: int = 20) -> npt.NDArray[np.float64]: - if num_samples < 2: - num_samples = 2 - distances = np.linspace(0, polyline_3d.length, num=num_samples, endpoint=True, dtype=np.float64) - return polyline_3d.interpolate(distances) - - -def _create_lane_mesh_from_boundary_arrays( - left_boundary_array: npt.NDArray[np.float64], - right_boundary_array: npt.NDArray[np.float64], -) -> trimesh.Trimesh: - - # Ensure both polylines have the same number of points - if left_boundary_array.shape[0] != right_boundary_array.shape[0]: - raise ValueError("Both polylines must have the same number of points") - - n_points = left_boundary_array.shape[0] - - # Combine vertices from both polylines - vertices = np.vstack([left_boundary_array, right_boundary_array]) - - # Create faces by connecting corresponding points on the two polylines - faces = [] - for i in range(n_points - 1): - faces.append([i, i + n_points, i + 1]) - faces.append([i + 1, i + n_points, i + n_points + 1]) - - faces = np.array(faces) - mesh = trimesh.Trimesh(vertices=vertices, faces=faces) - mesh.visual.face_colors = MAP_SURFACE_CONFIG[MapLayer.LANE].fill_color.rgba - return mesh - - -def get_camera_if_available(scene: AbstractScene, camera_type: CameraType, iteration: int) -> Optional[Camera]: - camera: Optional[Camera] = None - if camera_type in scene.available_camera_types: - camera: Camera = scene.get_camera_at_iteration(iteration, camera_type) - return camera - - -def get_camera_values( - scene: AbstractScene, camera: Camera, iteration: int, resize_factor: Optional[float] = None -) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.uint8]]: - - initial_point_3d = scene.get_ego_state_at_iteration(0).center_se3.point_3d - rear_axle = scene.get_ego_state_at_iteration(iteration).rear_axle_se3 - - rear_axle_array = rear_axle.array - rear_axle_array[:3] -= initial_point_3d.array - rear_axle = StateSE3.from_array(rear_axle_array, copy=False) - - camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame - camera_se3 = StateSE3.from_transformation_matrix(camera_to_ego) - - camera_se3_array = convert_relative_to_absolute_se3_array(origin=rear_axle, se3_array=camera_se3.array) - abs_camera_se3 = StateSE3.from_array(camera_se3_array, copy=False) - - # Camera transformation in ego frame - camera_position = abs_camera_se3.point_3d.array - camera_rotation = abs_camera_se3.quaternion.array - - camera_image = camera.image - - if resize_factor is not None: - new_width = int(camera_image.shape[1] * resize_factor) - new_height = int(camera_image.shape[0] * resize_factor) - camera_image = cv2.resize(camera_image, (new_width, new_height), interpolation=cv2.INTER_LINEAR) - - return camera_position, camera_rotation, camera_image - - -def get_lidar_points( - scene: AbstractScene, iteration: int, lidar_types: List[LiDARType] -) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]: - - initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0) - current_ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) - - def float_to_rgb(values: npt.NDArray[np.float32], cmap_name: str = "viridis") -> npt.NDArray[np.float32]: - """ - Converts an array of float values to RGB colors using a matplotlib colormap. - Normalizes values to [0, 1] using min-max scaling. - Returns an array of shape (N, 3) with RGB values in [0, 1]. - """ - import matplotlib.pyplot as plt - - vmin = np.min(values) - vmax = np.max(values) - if vmax > vmin: - normed = (values - vmin) / (vmax - vmin) - else: - normed = np.zeros_like(values) - cmap = plt.get_cmap(cmap_name) - rgb = cmap(normed)[:, :3] # Ignore alpha channel - return rgb.astype(np.float32) - - points_ = [] - colors_ = [] - for lidar_idx, lidar_type in enumerate(lidar_types): - if lidar_type not in scene.available_lidar_types: - continue - lidar = scene.get_lidar_at_iteration(iteration, lidar_type) - - # 1. convert points to ego frame - points = lidar.xyz - - # 2. convert points to world frame - origin = current_ego_vehicle_state.rear_axle_se3 - points = convert_relative_to_absolute_points_3d_array(origin, points) - points = points - initial_ego_vehicle_state.center_se3.point_3d.array - points_.append(points) - colors_.append([TAB_10[lidar_idx % len(TAB_10)].rgb] * points.shape[0]) - # colors_.append(float_to_rgb(lidar.intensity, cmap_name="viridis")) - - points_ = np.concatenate(points_, axis=0) if points_ else np.empty((0, 3), dtype=np.float32) - colors_ = np.concatenate(colors_, axis=0) if colors_ else np.empty((0, 3), dtype=np.float32) - - return points_, colors_ diff --git a/d123/common/visualization/viser/viser_config.py b/d123/common/visualization/viser/viser_config.py new file mode 100644 index 00000000..59d5256d --- /dev/null +++ b/d123/common/visualization/viser/viser_config.py @@ -0,0 +1,71 @@ +from dataclasses import dataclass, field +from typing import List, Literal, Optional, Tuple + +from d123.datatypes.sensors.camera import CameraType +from d123.datatypes.sensors.lidar import LiDARType + +all_camera_types: List[CameraType] = [ + CameraType.CAM_F0, + CameraType.CAM_B0, + CameraType.CAM_L0, + CameraType.CAM_L1, + CameraType.CAM_L2, + CameraType.CAM_R0, + CameraType.CAM_R1, + CameraType.CAM_R2, +] + +all_lidar_types: List[LiDARType] = [ + LiDARType.LIDAR_MERGED, + LiDARType.LIDAR_TOP, + LiDARType.LIDAR_FRONT, + LiDARType.LIDAR_SIDE_LEFT, + LiDARType.LIDAR_SIDE_RIGHT, + LiDARType.LIDAR_BACK, +] + + +@dataclass +class ViserConfig: + + # Server + server_host: str = "localhost" + server_port: int = 8080 + server_label: str = "D123 Viser Server" + server_verbose: bool = True + + # Theme + theme_control_layout: Literal["floating", "collapsible", "fixed"] = "floating" + theme_control_width: Literal["small", "medium", "large"] = "large" + theme_dark_mode: bool = False + theme_show_logo: bool = True + theme_show_share_button: bool = True + theme_brand_color: Optional[Tuple[int, int, int]] = None + + # Map + map_visible: bool = True + map_radius: float = 1000.0 # [m] + map_non_road_z_offset: float = 0.0 # small translation to place crosswalks, parking, etc. on top of the road + + # Bounding boxes + bounding_box_visible: bool = True + bounding_box_type: Literal["mesh", "lines"] = "mesh" + bounding_box_line_width: float = 4.0 + + # Cameras + # -> Frustum + camera_frustum_visible: bool = True + camera_frustum_types: List[CameraType] = field(default_factory=lambda: all_camera_types.copy()) + camera_frustum_frustum_scale: float = 1.0 + camera_frustum_image_scale: float = 0.25 # Resize factor for the camera image shown on the frustum (<1.0 for speed) + + # -> GUI + camera_gui_visible: bool = True + camera_gui_types: List[CameraType] = field(default_factory=lambda: [CameraType.CAM_F0].copy()) + camera_gui_image_scale: float = 0.25 # Resize factor for the camera image shown in the GUI (<1.0 for speed) + + # LiDAR + lidar_visible: bool = True + lidar_types: List[LiDARType] = field(default_factory=lambda: all_lidar_types.copy()) + lidar_point_size: float = 0.05 + lidar_point_shape: Literal["square", "diamond", "circle", "rounded", "sparkle"] = "circle" diff --git a/d123/common/visualization/viser/viser_viewer.py b/d123/common/visualization/viser/viser_viewer.py new file mode 100644 index 00000000..9774918f --- /dev/null +++ b/d123/common/visualization/viser/viser_viewer.py @@ -0,0 +1,245 @@ +import logging +import time +from typing import Dict, List, Optional + +import viser +from viser.theme import TitlebarButton, TitlebarConfig, TitlebarImage + +from d123.common.visualization.viser.elements import ( + add_box_detections_to_viser_server, + add_camera_frustums_to_viser_server, + add_camera_gui_to_viser_server, + add_lidar_pc_to_viser_server, + add_map_to_viser_server, +) +from d123.common.visualization.viser.viser_config import ViserConfig +from d123.datatypes.scene.abstract_scene import AbstractScene +from d123.datatypes.sensors.camera import CameraType +from d123.datatypes.sensors.lidar import LiDARType +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 + +logger = logging.getLogger(__name__) + + +all_camera_types: List[CameraType] = [ + CameraType.CAM_F0, + CameraType.CAM_B0, + CameraType.CAM_L0, + CameraType.CAM_L1, + CameraType.CAM_L2, + CameraType.CAM_R0, + CameraType.CAM_R1, + CameraType.CAM_R2, +] + +all_lidar_types: List[LiDARType] = [ + LiDARType.LIDAR_MERGED, + LiDARType.LIDAR_TOP, + LiDARType.LIDAR_FRONT, + LiDARType.LIDAR_SIDE_LEFT, + LiDARType.LIDAR_SIDE_RIGHT, + LiDARType.LIDAR_BACK, +] + + +def _build_viser_server(viser_config: ViserConfig) -> viser.ViserServer: + server = viser.ViserServer( + host=viser_config.server_host, + port=viser_config.server_port, + label=viser_config.server_label, + verbose=viser_config.server_verbose, + ) + + # TODO: Fix links and logo. + buttons = ( + TitlebarButton( + text="Getting Started", + icon=None, + href="https://nerf.studio", + ), + TitlebarButton( + text="Github", + icon="GitHub", + href="https://github.com/nerfstudio-project/nerfstudio", + ), + TitlebarButton( + text="Documentation", + icon="Description", + href="https://docs.nerf.studio", + ), + ) + image = TitlebarImage( + image_url_light="https://docs.nerf.studio/_static/imgs/logo.png", + image_url_dark="https://docs.nerf.studio/_static/imgs/logo-dark.png", + image_alt="NerfStudio Logo", + href="https://docs.nerf.studio/", + ) + titlebar_theme = TitlebarConfig(buttons=buttons, image=image) + + server.gui.configure_theme( + titlebar_content=titlebar_theme, + control_layout=viser_config.theme_control_layout, + control_width=viser_config.theme_control_width, + dark_mode=viser_config.theme_dark_mode, + show_logo=viser_config.theme_show_logo, + show_share_button=viser_config.theme_show_share_button, + brand_color=viser_config.theme_brand_color, + ) + + return server + + +class ViserViewer: + def __init__( + self, + scenes: List[AbstractScene], + viser_config: ViserConfig = ViserConfig(), + scene_index: int = 0.0, + ) -> None: + assert len(scenes) > 0, "At least one scene must be provided." + + self._scenes = scenes + self._viser_config = viser_config + self._scene_index = scene_index + + self._viser_server = _build_viser_server(self._viser_config) + self.set_scene(self._scenes[self._scene_index % len(self._scenes)]) + + def next(self) -> None: + self._viser_server.flush() + self._viser_server.gui.reset() + self._viser_server.scene.reset() + self._scene_index = (self._scene_index + 1) % len(self._scenes) + self.set_scene(self._scenes[self._scene_index]) + + def set_scene(self, scene: AbstractScene) -> None: + num_frames = scene.get_number_of_iterations() + initial_ego_state: EgoStateSE3 = scene.get_ego_state_at_iteration(0) + + with self._viser_server.gui.add_folder("Playback"): + server_playing = True + gui_timestep = self._viser_server.gui.add_slider( + "Timestep", + min=0, + max=num_frames - 1, + step=1, + initial_value=0, + disabled=True, + ) + gui_next_frame = self._viser_server.gui.add_button("Next Frame", disabled=True) + gui_prev_frame = self._viser_server.gui.add_button("Prev Frame", disabled=True) + gui_next_scene = self._viser_server.gui.add_button("Next Scene", disabled=False) + gui_playing = self._viser_server.gui.add_checkbox("Playing", True) + gui_framerate = self._viser_server.gui.add_slider("FPS", min=1, max=100, step=1, initial_value=10) + gui_framerate_options = self._viser_server.gui.add_button_group( + "FPS options", ("10", "25", "50", "75", "100") + ) + + # Frame step buttons. + @gui_next_frame.on_click + def _(_) -> None: + gui_timestep.value = (gui_timestep.value + 1) % num_frames + + @gui_prev_frame.on_click + def _(_) -> None: + gui_timestep.value = (gui_timestep.value - 1) % num_frames + + @gui_next_scene.on_click + def _(_) -> None: + nonlocal server_playing + server_playing = False + + # Disable frame controls when we're playing. + @gui_playing.on_update + def _(_) -> None: + gui_timestep.disabled = gui_playing.value + gui_next_frame.disabled = gui_playing.value + gui_prev_frame.disabled = gui_playing.value + + # Set the framerate when we click one of the options. + @gui_framerate_options.on_click + def _(_) -> None: + gui_framerate.value = int(gui_framerate_options.value) + + # Toggle frame visibility when the timestep slider changes. + @gui_timestep.on_update + def _(_) -> None: + start = time.perf_counter() + add_box_detections_to_viser_server( + scene, + gui_timestep.value, + initial_ego_state, + self._viser_server, + self._viser_config, + ) + add_camera_frustums_to_viser_server( + scene, + gui_timestep.value, + initial_ego_state, + self._viser_server, + self._viser_config, + camera_frustum_handles, + ) + add_camera_gui_to_viser_server( + scene, + gui_timestep.value, + self._viser_server, + self._viser_config, + camera_gui_handles, + ) + add_lidar_pc_to_viser_server( + scene, + gui_timestep.value, + initial_ego_state, + self._viser_server, + self._viser_config, + lidar_pc_handle, + ) + rendering_time = time.perf_counter() - start + sleep_time = 1.0 / gui_framerate.value - rendering_time + if sleep_time > 0: + time.sleep(max(sleep_time, 0.0)) + + camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {} + camera_gui_handles: Dict[CameraType, viser.GuiImageHandle] = {} + lidar_pc_handle: Optional[viser.PointCloudHandle] = None + + add_box_detections_to_viser_server( + scene, + gui_timestep.value, + initial_ego_state, + self._viser_server, + self._viser_config, + ) + add_camera_frustums_to_viser_server( + scene, + gui_timestep.value, + initial_ego_state, + self._viser_server, + self._viser_config, + camera_frustum_handles, + ) + add_camera_gui_to_viser_server( + scene, + gui_timestep.value, + self._viser_server, + self._viser_config, + camera_gui_handles, + ) + add_lidar_pc_to_viser_server( + scene, + gui_timestep.value, + initial_ego_state, + self._viser_server, + self._viser_config, + lidar_pc_handle, + ) + add_map_to_viser_server(scene, initial_ego_state, self._viser_server, self._viser_config) + + # Playback update loop. + while server_playing: + if gui_playing.value: + gui_timestep.value = (gui_timestep.value + 1) % num_frames + + self._viser_server.flush() + self.next() diff --git a/d123/datatypes/maps/gpkg/gpkg_map_objects.py b/d123/datatypes/maps/gpkg/gpkg_map_objects.py index 2326490b..ff44b0d6 100644 --- a/d123/datatypes/maps/gpkg/gpkg_map_objects.py +++ b/d123/datatypes/maps/gpkg/gpkg_map_objects.py @@ -10,7 +10,6 @@ import shapely.geometry as geom import trimesh -from d123.common.visualization.viser.utils import get_trimesh_from_boundaries from d123.datatypes.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, @@ -24,7 +23,7 @@ AbstractSurfaceMapObject, AbstractWalkway, ) -from d123.datatypes.maps.gpkg.utils import get_row_with_value +from d123.datatypes.maps.gpkg.utils import get_row_with_value, get_trimesh_from_boundaries from d123.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType from d123.geometry import Point3DIndex, Polyline3D diff --git a/d123/datatypes/maps/gpkg/utils.py b/d123/datatypes/maps/gpkg/utils.py index 8b359f75..a1d382f7 100644 --- a/d123/datatypes/maps/gpkg/utils.py +++ b/d123/datatypes/maps/gpkg/utils.py @@ -2,8 +2,12 @@ import geopandas as gpd import numpy as np +import numpy.typing as npt +import trimesh from shapely import wkt +from d123.geometry.polyline import Polyline3D + def load_gdf_with_geometry_columns(gdf: gpd.GeoDataFrame, geometry_column_names: List[str] = []): # TODO: refactor @@ -46,3 +50,41 @@ def get_row_with_value(elements: gpd.geodataframe.GeoDataFrame, column_label: st f"{len(matching_rows)} matching keys found. Expected to only find one." "Try using get_all_rows_with_value" ) return matching_rows.iloc[0] + + +def get_trimesh_from_boundaries( + left_boundary: Polyline3D, right_boundary: Polyline3D, resolution: float = 0.25 +) -> trimesh.Trimesh: + + def _interpolate_polyline(polyline_3d: Polyline3D, num_samples: int) -> npt.NDArray[np.float64]: + if num_samples < 2: + num_samples = 2 + distances = np.linspace(0, polyline_3d.length, num=num_samples, endpoint=True, dtype=np.float64) + return polyline_3d.interpolate(distances) + + average_length = (left_boundary.length + right_boundary.length) / 2 + num_samples = int(average_length // resolution) + 1 + left_boundary_array = _interpolate_polyline(left_boundary, num_samples) + right_boundary_array = _interpolate_polyline(right_boundary, num_samples) + return _create_lane_mesh_from_boundary_arrays(left_boundary_array, right_boundary_array) + + +def _create_lane_mesh_from_boundary_arrays( + left_boundary_array: npt.NDArray[np.float64], right_boundary_array: npt.NDArray[np.float64] +) -> trimesh.Trimesh: + + # Ensure both polylines have the same number of points + if left_boundary_array.shape[0] != right_boundary_array.shape[0]: + raise ValueError("Both polylines must have the same number of points") + + n_points = left_boundary_array.shape[0] + vertices = np.vstack([left_boundary_array, right_boundary_array]) + + faces = [] + for i in range(n_points - 1): + faces.append([i, i + n_points, i + 1]) + faces.append([i + 1, i + n_points, i + n_points + 1]) + + faces = np.array(faces) + mesh = trimesh.Trimesh(vertices=vertices, faces=faces) + return mesh diff --git a/d123/datatypes/scene/abstract_scene.py b/d123/datatypes/scene/abstract_scene.py index a3d2ccfe..af66541e 100644 --- a/d123/datatypes/scene/abstract_scene.py +++ b/d123/datatypes/scene/abstract_scene.py @@ -59,31 +59,31 @@ def get_timepoint_at_iteration(self, iteration: int) -> TimePoint: raise NotImplementedError @abc.abstractmethod - def get_ego_state_at_iteration(self, iteration: int) -> EgoStateSE3: + def get_ego_state_at_iteration(self, iteration: int) -> Optional[EgoStateSE3]: raise NotImplementedError @abc.abstractmethod - def get_box_detections_at_iteration(self, iteration: int) -> BoxDetectionWrapper: + def get_box_detections_at_iteration(self, iteration: int) -> Optional[BoxDetectionWrapper]: raise NotImplementedError @abc.abstractmethod - def get_traffic_light_detections_at_iteration(self, iteration: int) -> TrafficLightDetectionWrapper: + def get_traffic_light_detections_at_iteration(self, iteration: int) -> Optional[TrafficLightDetectionWrapper]: raise NotImplementedError @abc.abstractmethod - def get_detection_recording_at_iteration(self, iteration: int) -> DetectionRecording: + def get_detection_recording_at_iteration(self, iteration: int) -> Optional[DetectionRecording]: raise NotImplementedError @abc.abstractmethod - def get_route_lane_group_ids(self, iteration: int) -> List[int]: + def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: raise NotImplementedError @abc.abstractmethod - def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Camera: + def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Optional[Camera]: raise NotImplementedError @abc.abstractmethod - def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> LiDAR: + def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> Optional[LiDAR]: raise NotImplementedError def open(self) -> None: diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py index 0fc61ba8..8f61397a 100644 --- a/d123/datatypes/scene/arrow/arrow_scene.py +++ b/d123/datatypes/scene/arrow/arrow_scene.py @@ -132,52 +132,62 @@ def get_timepoint_at_iteration(self, iteration: int) -> TimePoint: self._lazy_initialize() return get_timepoint_from_arrow_table(self._recording_table, self._get_table_index(iteration)) - def get_ego_state_at_iteration(self, iteration: int) -> EgoStateSE3: + def get_ego_state_at_iteration(self, iteration: int) -> Optional[EgoStateSE3]: self._lazy_initialize() return get_ego_vehicle_state_from_arrow_table( self._recording_table, self._get_table_index(iteration), self._vehicle_parameters ) - def get_box_detections_at_iteration(self, iteration: int) -> BoxDetectionWrapper: + def get_box_detections_at_iteration(self, iteration: int) -> Optional[BoxDetectionWrapper]: + # TODO: Make box detections optional in ArrowScene self._lazy_initialize() return get_box_detections_from_arrow_table(self._recording_table, self._get_table_index(iteration)) - def get_traffic_light_detections_at_iteration(self, iteration: int) -> TrafficLightDetectionWrapper: + def get_traffic_light_detections_at_iteration(self, iteration: int) -> Optional[TrafficLightDetectionWrapper]: + # TODO: Make traffic lights optional in ArrowScene self._lazy_initialize() return get_traffic_light_detections_from_arrow_table(self._recording_table, self._get_table_index(iteration)) - def get_detection_recording_at_iteration(self, iteration: int) -> DetectionRecording: + def get_detection_recording_at_iteration(self, iteration: int) -> Optional[DetectionRecording]: + # TODO: Make detection recording optional in ArrowScene return DetectionRecording( box_detections=self.get_box_detections_at_iteration(iteration), traffic_light_detections=self.get_traffic_light_detections_at_iteration(iteration), ) - def get_route_lane_group_ids(self, iteration: int) -> List[int]: + def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: self._lazy_initialize() - table_index = self._get_table_index(iteration) - return self._recording_table["route_lane_group_ids"][table_index].as_py() + route_lane_group_ids: Optional[List[int]] = None + if "route_lane_group_ids" in self._recording_table.column_names: + table_index = self._get_table_index(iteration) + route_lane_group_ids = self._recording_table["route_lane_group_ids"][table_index].as_py() + return route_lane_group_ids - def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Camera: + def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Optional[Camera]: self._lazy_initialize() - assert camera_type in self._camera_metadata, f"Camera type {camera_type} not found in metadata." - table_index = self._get_table_index(iteration) - return get_camera_from_arrow_table( - self._recording_table, - table_index, - self._camera_metadata[camera_type], - self.log_metadata, - ) + camera: Optional[Camera] = None + if camera_type in self._camera_metadata: + table_index = self._get_table_index(iteration) + camera = get_camera_from_arrow_table( + self._recording_table, + table_index, + self._camera_metadata[camera_type], + self.log_metadata, + ) + return camera - def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> LiDAR: + def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> Optional[LiDAR]: self._lazy_initialize() - assert lidar_type in self._lidar_metadata, f"LiDAR type {lidar_type} not found in metadata." - table_index = self._get_table_index(iteration) - return get_lidar_from_arrow_table( - self._recording_table, - table_index, - self._lidar_metadata[lidar_type], - self.log_metadata, - ) + lidar: Optional[LiDAR] = None + if lidar_type in self._lidar_metadata: + table_index = self._get_table_index(iteration) + lidar = get_lidar_from_arrow_table( + self._recording_table, + table_index, + self._lidar_metadata[lidar_type], + self.log_metadata, + ) + return lidar def _lazy_initialize(self) -> None: self.open() diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py index 74c506e1..72f6e994 100644 --- a/d123/geometry/utils/bounding_box_utils.py +++ b/d123/geometry/utils/bounding_box_utils.py @@ -10,6 +10,7 @@ Corners2DIndex, Corners3DIndex, Point2DIndex, + Point3DIndex, Vector2DIndex, Vector3DIndex, ) @@ -173,6 +174,43 @@ def corners_array_to_3d_mesh( ) # Offset the faces for each box - faces = np.vstack([faces_single + i * 8 for i in range(num_boxes)]) + faces = np.vstack([faces_single + i * len(Corners3DIndex) for i in range(num_boxes)]) return vertices, faces + + +def corners_array_to_edge_lines(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert corners_array.shape[-1] == len(Point3DIndex) + assert corners_array.shape[-2] == len(Corners3DIndex) + assert corners_array.ndim >= 2 + + index_pairs = [ + (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM), + (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM), + (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM), + (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_BOTTOM), + (Corners3DIndex.FRONT_LEFT_TOP, Corners3DIndex.FRONT_RIGHT_TOP), + (Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_TOP), + (Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_LEFT_TOP), + (Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.FRONT_LEFT_TOP), + (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_TOP), + (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.FRONT_RIGHT_TOP), + (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_TOP), + (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP), + ] + + if corners_array.ndim == 2: + # Single box case: (8, 3) + edge_lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64) + for edge_idx, (start_idx, end_idx) in enumerate(index_pairs): + edge_lines[edge_idx, 0] = corners_array[start_idx] + edge_lines[edge_idx, 1] = corners_array[end_idx] + else: + # Batched case: (..., 8, 3) + batch_shape = corners_array.shape[:-2] + edge_lines = np.zeros(batch_shape + (len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64) + for edge_idx, (start_idx, end_idx) in enumerate(index_pairs): + edge_lines[..., edge_idx, 0, :] = corners_array[..., start_idx, :] + edge_lines[..., edge_idx, 1, :] = corners_array[..., end_idx, :] + + return edge_lines diff --git a/d123/script/run_viser.py b/d123/script/run_viser.py index e682a96e..87c3218a 100644 --- a/d123/script/run_viser.py +++ b/d123/script/run_viser.py @@ -3,7 +3,7 @@ import hydra from omegaconf import DictConfig -from d123.common.visualization.viser.server import ViserVisualizationServer +from d123.common.visualization.viser.viser_viewer import ViserViewer from d123.script.builders.scene_builder_builder import build_scene_builder from d123.script.builders.scene_filter_builder import build_scene_filter from d123.script.run_dataset_conversion import build_worker @@ -22,7 +22,7 @@ def main(cfg: DictConfig) -> None: scene_builder = build_scene_builder(cfg.scene_builder) scenes = scene_builder.get_scenes(scene_filter, worker=worker) - ViserVisualizationServer(scenes=scenes) + ViserViewer(scenes=scenes) if __name__ == "__main__": diff --git a/test_viser.py b/test_viser.py index 072c835e..9daa2d9c 100644 --- a/test_viser.py +++ b/test_viser.py @@ -1,15 +1,17 @@ +import os + from d123.common.multithreading.worker_sequential import Sequential -from d123.common.visualization.viser.server import ViserVisualizationServer +from d123.common.visualization.viser.viser_viewer import ViserViewer from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder from d123.datatypes.scene.scene_filter import SceneFilter from d123.datatypes.sensors.camera import CameraType if __name__ == "__main__": - splits = ["nuplan_private_test"] + # splits = ["nuplan_private_test"] # splits = ["carla"] # splits = ["wopd_train"] - # splits = ["av2-sensor-mini_train"] + splits = ["av2-sensor-mini_train"] log_names = None scene_tokens = None @@ -24,11 +26,9 @@ shuffle=False, camera_types=[CameraType.CAM_F0], ) - scene_builder = ArrowSceneBuilder("/home/daniel/d123_workspace/data") + scene_builder = ArrowSceneBuilder(os.environ["D123_DATA_ROOT"]) worker = Sequential() # worker = RayDistributed() scenes = scene_builder.get_scenes(scene_filter, worker) - print(f"Found {len(scenes)} scenes") - - visualization_server = ViserVisualizationServer(scenes, scene_index=0) + visualization_server = ViserViewer(scenes, scene_index=0) From 8cf4d8eff5ed9b24c285184fd57a7b6052fbbe72 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 4 Oct 2025 22:26:56 +0200 Subject: [PATCH 055/145] Several updates refactoring how sensors are stored. --- d123/common/utils/mixin.py | 5 + d123/common/utils/timer.py | 4 - .../common/visualization/matplotlib/camera.py | 8 +- .../viser/elements/sensor_elements.py | 16 +- .../visualization/viser/viser_config.py | 28 +- .../visualization/viser/viser_viewer.py | 26 +- d123/datasets/av2/av2_constants.py | 20 +- d123/datasets/av2/av2_data_converter.py | 61 ++-- d123/datasets/carla/carla_data_converter.py | 24 +- d123/datasets/nuplan/load_sensor.py | 2 +- d123/datasets/nuplan/nuplan_data_converter.py | 72 ++--- .../utils/sensor/camera_conventions.py | 85 ++++++ .../utils/sensor/lidar_index_registry.py} | 0 d123/datasets/wopd/wopd_data_converter.py | 94 +++---- d123/datatypes/scene/abstract_scene.py | 8 +- d123/datatypes/scene/arrow/arrow_scene.py | 20 +- .../datatypes/scene/arrow/utils/conversion.py | 17 +- d123/datatypes/scene/scene_filter.py | 8 +- d123/datatypes/sensors/__init__.py | 0 d123/datatypes/sensors/camera.py | 117 -------- d123/datatypes/sensors/camera/__init__.py | 0 .../sensors/camera/pinhole_camera.py | 260 ++++++++++++++++++ d123/datatypes/sensors/lidar/__init__.py | 0 d123/datatypes/sensors/{ => lidar}/lidar.py | 8 +- d123/script/config/common/default_common.yaml | 3 +- .../config/common/default_experiment.yaml | 2 + .../default_dataset_conversion.yaml | 18 +- d123/script/config/datasets/__init__.py | 0 .../datasets/nuplan_private_dataset.yaml | 2 +- d123/script/run_dataset_conversion.py | 3 - pyproject.toml | 2 +- scripts/dataset/run_log_caching.sh | 2 - test_viser.py | 13 +- 33 files changed, 590 insertions(+), 338 deletions(-) create mode 100644 d123/datasets/utils/sensor/camera_conventions.py rename d123/{datatypes/sensors/lidar_index.py => datasets/utils/sensor/lidar_index_registry.py} (100%) create mode 100644 d123/datatypes/sensors/__init__.py delete mode 100644 d123/datatypes/sensors/camera.py create mode 100644 d123/datatypes/sensors/camera/__init__.py create mode 100644 d123/datatypes/sensors/camera/pinhole_camera.py create mode 100644 d123/datatypes/sensors/lidar/__init__.py rename d123/datatypes/sensors/{ => lidar}/lidar.py (93%) create mode 100644 d123/script/config/datasets/__init__.py diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py index 56038bb4..1e17db64 100644 --- a/d123/common/utils/mixin.py +++ b/d123/common/utils/mixin.py @@ -12,6 +12,11 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> ArrayM """Create an instance from a NumPy array.""" raise NotImplementedError + @classmethod + def from_list(cls, values: list) -> ArrayMixin: + """Create an instance from a list of values.""" + return cls.from_array(np.array(values, dtype=np.float64), copy=False) + @property def array(self) -> npt.NDArray[np.float64]: """The array representation of the geometric entity.""" diff --git a/d123/common/utils/timer.py b/d123/common/utils/timer.py index 69137c0e..17558977 100644 --- a/d123/common/utils/timer.py +++ b/d123/common/utils/timer.py @@ -58,7 +58,6 @@ def end(self) -> None: def to_pandas(self) -> Optional[pd.DataFrame]: """ Returns a DataFrame with statistics of the logged times. - :param verbose: whether to print the timings, defaults to True :return: pandas dataframe. """ @@ -71,9 +70,6 @@ def to_pandas(self) -> Optional[pd.DataFrame]: statistics[key] = timings_statistics dataframe = pd.DataFrame.from_dict(statistics).transpose() - # if verbose: - # print(dataframe.to_string()) - return dataframe def info(self) -> Dict[str, float]: diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py index bc33d0dd..49c567fb 100644 --- a/d123/common/visualization/matplotlib/camera.py +++ b/d123/common/visualization/matplotlib/camera.py @@ -13,7 +13,7 @@ from d123.common.visualization.color.default import BOX_DETECTION_CONFIG from d123.datatypes.detections.detection import BoxDetectionSE3, BoxDetectionWrapper from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.sensors.camera import Camera +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.geometry import BoundingBoxSE3Index, Corners3DIndex from d123.geometry.transform.transform_euler_se3 import convert_absolute_to_relative_euler_se3_array @@ -25,7 +25,7 @@ # from navsim.visualization.lidar import filter_lidar_pc, get_lidar_pc_color -def add_camera_ax(ax: plt.Axes, camera: Camera) -> plt.Axes: +def add_camera_ax(ax: plt.Axes, camera: PinholeCamera) -> plt.Axes: """ Adds camera image to matplotlib ax object :param ax: matplotlib ax object @@ -70,7 +70,7 @@ def add_camera_ax(ax: plt.Axes, camera: Camera) -> plt.Axes: def add_box_detections_to_camera_ax( ax: plt.Axes, - camera: Camera, + camera: PinholeCamera, box_detections: BoxDetectionWrapper, ego_state_se3: EgoStateSE3, ) -> plt.Axes: @@ -115,7 +115,7 @@ def add_box_detections_to_camera_ax( corners += detection_positions.reshape(-1, 1, 3) # Then draw project corners to image. - box_corners, corners_pc_in_fov = _transform_points_to_image(corners.reshape(-1, 3), camera.metadata.intrinsic) + box_corners, corners_pc_in_fov = _transform_points_to_image(corners.reshape(-1, 3), camera.metadata.intrinsics) box_corners = box_corners.reshape(-1, 8, 2) corners_pc_in_fov = corners_pc_in_fov.reshape(-1, 8) valid_corners = corners_pc_in_fov.any(-1) diff --git a/d123/common/visualization/viser/elements/sensor_elements.py b/d123/common/visualization/viser/elements/sensor_elements.py index ee9b57bf..0cc63621 100644 --- a/d123/common/visualization/viser/elements/sensor_elements.py +++ b/d123/common/visualization/viser/elements/sensor_elements.py @@ -8,10 +8,10 @@ from d123.common.visualization.viser.viser_config import ViserConfig from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.sensors.camera import Camera, CameraType -from d123.datatypes.sensors.lidar import LiDARType +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from d123.datatypes.sensors.lidar.lidar import LiDARType from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.geometry import StateSE3, StateSE3Index +from d123.geometry import StateSE3Index from d123.geometry.transform.transform_se3 import ( convert_relative_to_absolute_points_3d_array, convert_relative_to_absolute_se3_array, @@ -24,7 +24,7 @@ def add_camera_frustums_to_viser_server( initial_ego_state: EgoStateSE3, viser_server: viser.ViserServer, viser_config: ViserConfig, - camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle], + camera_frustum_handles: Dict[PinholeCameraType, viser.CameraFrustumHandle], ) -> None: if viser_config.camera_frustum_visible: @@ -32,7 +32,7 @@ def add_camera_frustums_to_viser_server( ego_pose = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array ego_pose[StateSE3Index.XYZ] -= scene_center_array - def _add_camera_frustums_to_viser_server(camera_type: CameraType) -> None: + def _add_camera_frustums_to_viser_server(camera_type: PinholeCameraType) -> None: camera = scene.get_camera_at_iteration(scene_interation, camera_type) if camera is not None: camera_position, camera_quaternion, camera_image = _get_camera_values( @@ -78,7 +78,7 @@ def add_camera_gui_to_viser_server( scene_interation: int, viser_server: viser.ViserServer, viser_config: ViserConfig, - camera_gui_handles: Dict[CameraType, viser.GuiImageHandle], + camera_gui_handles: Dict[PinholeCameraType, viser.GuiImageHandle], ) -> None: if viser_config.camera_gui_visible: for camera_type in viser_config.camera_gui_types: @@ -145,13 +145,13 @@ def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]: def _get_camera_values( - camera: Camera, + camera: PinholeCamera, ego_pose: npt.NDArray[np.float64], resize_factor: Optional[float] = None, ) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.uint8]]: assert ego_pose.ndim == 1 and len(ego_pose) == len(StateSE3Index) - rel_camera_pose = StateSE3.from_transformation_matrix(camera.extrinsic).array + rel_camera_pose = camera.extrinsic.array abs_camera_pose = convert_relative_to_absolute_se3_array(origin=ego_pose, se3_array=rel_camera_pose) camera_position = abs_camera_pose[StateSE3Index.XYZ] diff --git a/d123/common/visualization/viser/viser_config.py b/d123/common/visualization/viser/viser_config.py index 59d5256d..276d6762 100644 --- a/d123/common/visualization/viser/viser_config.py +++ b/d123/common/visualization/viser/viser_config.py @@ -1,18 +1,18 @@ from dataclasses import dataclass, field from typing import List, Literal, Optional, Tuple -from d123.datatypes.sensors.camera import CameraType -from d123.datatypes.sensors.lidar import LiDARType +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from d123.datatypes.sensors.lidar.lidar import LiDARType -all_camera_types: List[CameraType] = [ - CameraType.CAM_F0, - CameraType.CAM_B0, - CameraType.CAM_L0, - CameraType.CAM_L1, - CameraType.CAM_L2, - CameraType.CAM_R0, - CameraType.CAM_R1, - CameraType.CAM_R2, +all_camera_types: List[PinholeCameraType] = [ + PinholeCameraType.CAM_F0, + PinholeCameraType.CAM_B0, + PinholeCameraType.CAM_L0, + PinholeCameraType.CAM_L1, + PinholeCameraType.CAM_L2, + PinholeCameraType.CAM_R0, + PinholeCameraType.CAM_R1, + PinholeCameraType.CAM_R2, ] all_lidar_types: List[LiDARType] = [ @@ -44,7 +44,7 @@ class ViserConfig: # Map map_visible: bool = True - map_radius: float = 1000.0 # [m] + map_radius: float = 200.0 # [m] map_non_road_z_offset: float = 0.0 # small translation to place crosswalks, parking, etc. on top of the road # Bounding boxes @@ -55,13 +55,13 @@ class ViserConfig: # Cameras # -> Frustum camera_frustum_visible: bool = True - camera_frustum_types: List[CameraType] = field(default_factory=lambda: all_camera_types.copy()) + camera_frustum_types: List[PinholeCameraType] = field(default_factory=lambda: all_camera_types.copy()) camera_frustum_frustum_scale: float = 1.0 camera_frustum_image_scale: float = 0.25 # Resize factor for the camera image shown on the frustum (<1.0 for speed) # -> GUI camera_gui_visible: bool = True - camera_gui_types: List[CameraType] = field(default_factory=lambda: [CameraType.CAM_F0].copy()) + camera_gui_types: List[PinholeCameraType] = field(default_factory=lambda: [PinholeCameraType.CAM_F0].copy()) camera_gui_image_scale: float = 0.25 # Resize factor for the camera image shown in the GUI (<1.0 for speed) # LiDAR diff --git a/d123/common/visualization/viser/viser_viewer.py b/d123/common/visualization/viser/viser_viewer.py index 9774918f..2bc44c3d 100644 --- a/d123/common/visualization/viser/viser_viewer.py +++ b/d123/common/visualization/viser/viser_viewer.py @@ -14,22 +14,22 @@ ) from d123.common.visualization.viser.viser_config import ViserConfig from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.sensors.camera import CameraType -from d123.datatypes.sensors.lidar import LiDARType +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from d123.datatypes.sensors.lidar.lidar import LiDARType from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 logger = logging.getLogger(__name__) -all_camera_types: List[CameraType] = [ - CameraType.CAM_F0, - CameraType.CAM_B0, - CameraType.CAM_L0, - CameraType.CAM_L1, - CameraType.CAM_L2, - CameraType.CAM_R0, - CameraType.CAM_R1, - CameraType.CAM_R2, +all_camera_types: List[PinholeCameraType] = [ + PinholeCameraType.CAM_F0, + PinholeCameraType.CAM_B0, + PinholeCameraType.CAM_L0, + PinholeCameraType.CAM_L1, + PinholeCameraType.CAM_L2, + PinholeCameraType.CAM_R0, + PinholeCameraType.CAM_R1, + PinholeCameraType.CAM_R2, ] all_lidar_types: List[LiDARType] = [ @@ -200,8 +200,8 @@ def _(_) -> None: if sleep_time > 0: time.sleep(max(sleep_time, 0.0)) - camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {} - camera_gui_handles: Dict[CameraType, viser.GuiImageHandle] = {} + camera_frustum_handles: Dict[PinholeCameraType, viser.CameraFrustumHandle] = {} + camera_gui_handles: Dict[PinholeCameraType, viser.GuiImageHandle] = {} lidar_pc_handle: Optional[viser.PointCloudHandle] = None add_box_detections_to_viser_server( diff --git a/d123/datasets/av2/av2_constants.py b/d123/datasets/av2/av2_constants.py index 5d163245..16dab0b3 100644 --- a/d123/datasets/av2/av2_constants.py +++ b/d123/datasets/av2/av2_constants.py @@ -1,7 +1,7 @@ from d123.common.utils.enums import SerialIntEnum from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.maps.map_datatypes import RoadLineType -from d123.datatypes.sensors.camera import CameraType +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType class AV2SensorBoxDetectionType(SerialIntEnum): @@ -76,15 +76,15 @@ class AV2SensorBoxDetectionType(SerialIntEnum): AV2_CAMERA_TYPE_MAPPING = { - "ring_front_center": CameraType.CAM_F0, - "ring_front_left": CameraType.CAM_L0, - "ring_front_right": CameraType.CAM_R0, - "ring_side_left": CameraType.CAM_L1, - "ring_side_right": CameraType.CAM_R1, - "ring_rear_left": CameraType.CAM_L2, - "ring_rear_right": CameraType.CAM_R2, - "stereo_front_left": CameraType.CAM_STEREO_L, - "stereo_front_right": CameraType.CAM_STEREO_R, + "ring_front_center": PinholeCameraType.CAM_F0, + "ring_front_left": PinholeCameraType.CAM_L0, + "ring_front_right": PinholeCameraType.CAM_R0, + "ring_side_left": PinholeCameraType.CAM_L1, + "ring_side_right": PinholeCameraType.CAM_R1, + "ring_rear_left": PinholeCameraType.CAM_L2, + "ring_rear_right": PinholeCameraType.CAM_R2, + "stereo_front_left": PinholeCameraType.CAM_STEREO_L, + "stereo_front_right": PinholeCameraType.CAM_STEREO_R, } diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py index e13bbf04..638eb205 100644 --- a/d123/datasets/av2/av2_data_converter.py +++ b/d123/datasets/av2/av2_data_converter.py @@ -25,8 +25,14 @@ from d123.datasets.av2.av2_map_conversion import convert_av2_map from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, + camera_metadata_dict_to_json, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import ( @@ -232,24 +238,32 @@ def convert_av2_log_to_arrow( return [] -def get_av2_camera_metadata(log_path: Path) -> Dict[CameraType, CameraMetadata]: +def get_av2_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: intrinsics_file = log_path / "calibration" / "intrinsics.feather" intrinsics_df = pd.read_feather(intrinsics_file) - camera_metadata: Dict[CameraType, CameraMetadata] = {} + camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} for _, row in intrinsics_df.iterrows(): row = row.to_dict() - camera_type = AV2_CAMERA_TYPE_MAPPING[row["sensor_name"]] - camera_metadata[camera_type] = CameraMetadata( + camera_metadata[camera_type] = PinholeCameraMetadata( camera_type=camera_type, width=row["width_px"], height=row["height_px"], - intrinsic=np.array( - [[row["fx_px"], 0, row["cx_px"]], [0, row["fy_px"], row["cy_px"]], [0, 0, 1]], dtype=np.float64 + intrinsics=PinholeIntrinsics( + fx=row["fx_px"], + fy=row["fy_px"], + cx=row["cx_px"], + cy=row["cy_px"], + ), + distortion=PinholeDistortion( + k1=row["k1"], + k2=row["k2"], + p1=0.0, + p2=0.0, + k3=row["k3"], ), - distortion=np.array([row["k1"], row["k2"], row["k3"], 0, 0], dtype=np.float64), ) return camera_metadata @@ -456,9 +470,9 @@ def _extract_camera( synchronization_df: pd.DataFrame, source_log_path: Path, data_converter_config: DataConverterConfig, -) -> Dict[CameraType, Union[str, bytes]]: +) -> Dict[PinholeCameraType, Union[str, bytes]]: - camera_dict: Dict[CameraType, Union[str, bytes]] = { + camera_dict: Dict[PinholeCameraType, Union[str, bytes]] = { camera_type: None for camera_type in AV2_CAMERA_TYPE_MAPPING.values() } split = source_log_path.parent.name @@ -491,30 +505,27 @@ def _extract_camera( else: absolute_image_path = source_dataset_dir / relative_image_path assert absolute_image_path.exists() - # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. + # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. camera_extrinsic = StateSE3( - x=row["tx_m"], y=row["ty_m"], z=row["tz_m"], qw=row["qw"], qx=row["qx"], qy=row["qy"], qz=row["qz"] + x=row["tx_m"], + y=row["ty_m"], + z=row["tz_m"], + qw=row["qw"], + qx=row["qx"], + qy=row["qy"], + qz=row["qz"], ) - # camera_extrinsic = camera_extrinsic @ ego_transform - camera_extrinsic = camera_extrinsic.transformation_matrix.flatten().tolist() - if data_converter_config.camera_store_option == "path": - camera_dict[camera_type] = (str(relative_image_path), camera_extrinsic) + camera_dict[camera_type] = (str(relative_image_path), camera_extrinsic.tolist()) elif data_converter_config.camera_store_option == "binary": with open(absolute_image_path, "rb") as f: - camera_dict[camera_type] = (f.read(), camera_extrinsic) + camera_dict[camera_type] = (f.read(), camera_extrinsic.tolist()) return camera_dict def _extract_lidar(lidar_pc, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: - - # lidar: Optional[str] = None - # lidar_full_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" / lidar_pc.filename - # if lidar_full_path.exists(): - # lidar = lidar_pc.filename - - # return {LiDARType.LIDAR_MERGED: lidar} + # TODO: Implement this function to extract lidar data. return {} diff --git a/d123/datasets/carla/carla_data_converter.py b/d123/datasets/carla/carla_data_converter.py index ec3a4824..1930bb69 100644 --- a/d123/datasets/carla/carla_data_converter.py +++ b/d123/datasets/carla/carla_data_converter.py @@ -15,13 +15,17 @@ from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter from d123.datasets.utils.maps.opendrive.opendrive_map_conversion import convert_from_xodr +from d123.datasets.utils.sensor.lidar_index_registry import CarlaLidarIndex from d123.datatypes.maps.abstract_map import AbstractMap, MapLayer from d123.datatypes.maps.abstract_map_objects import AbstractLane from d123.datatypes.maps.gpkg.gpkg_map import get_map_api_from_names from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.datatypes.sensors.lidar_index import CarlaLidarIndex +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + camera_metadata_dict_to_json, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.datatypes.vehicle_state.ego_state import EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters from d123.geometry import BoundingBoxSE3Index, Point2D, Point3D, Vector3DIndex @@ -45,7 +49,7 @@ TRAFFIC_LIGHT_ASSIGNMENT_DISTANCE: Final[float] = 1.0 # [m] SORT_BY_TIMESTAMP: Final[bool] = True -CARLA_CAMERA_TYPES = {CameraType.CAM_F0} +CARLA_CAMERA_TYPES = {PinholeCameraType.CAM_F0} CARLA_DATA_ROOT: Final[Path] = Path(os.environ["CARLA_DATA_ROOT"]) @@ -247,20 +251,20 @@ def _get_metadata(location: str, log_name: str) -> LogMetadata: ) -def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[CameraType, CameraMetadata]: +def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[PinholeCameraType, PinholeCameraMetadata]: # FIXME: This is a placeholder function to return camera metadata. intrinsic = np.array( - first_log_dict[f"{CameraType.CAM_F0.serialize()}_intrinsics"], + first_log_dict[f"{PinholeCameraType.CAM_F0.serialize()}_intrinsics"], dtype=np.float64, ) camera_metadata = { - CameraType.CAM_F0: CameraMetadata( - camera_type=CameraType.CAM_F0, + PinholeCameraType.CAM_F0: PinholeCameraMetadata( + camera_type=PinholeCameraType.CAM_F0, width=1024, height=512, - intrinsic=intrinsic, + intrinsics=intrinsic, distortion=np.zeros((5,), dtype=np.float64), ) } @@ -417,7 +421,7 @@ def _extract_route_lane_group_ids(route: List[List[float]], map_api: AbstractMap def _extract_cameras( data: Dict[str, Any], log_name: str, sample_name: str, data_converter_config: DataConverterConfig -) -> Dict[CameraType, Optional[str]]: +) -> Dict[PinholeCameraType, Optional[str]]: camera_dict: Dict[str, Union[str, bytes]] = {} for camera_type in CARLA_CAMERA_TYPES: camera_full_path = CARLA_DATA_ROOT / "sensor_blobs" / log_name / camera_type.name / f"{sample_name}.jpg" diff --git a/d123/datasets/nuplan/load_sensor.py b/d123/datasets/nuplan/load_sensor.py index c00e4f31..6e80df47 100644 --- a/d123/datasets/nuplan/load_sensor.py +++ b/d123/datasets/nuplan/load_sensor.py @@ -2,7 +2,7 @@ from pathlib import Path from d123.common.utils.dependencies import check_dependencies -from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata +from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata check_dependencies(["nuplan"], "nuplan") from nuplan.database.utils.pointclouds.lidar import LidarPointCloud diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py index d03dbaf9..79f824e3 100644 --- a/d123/datasets/nuplan/nuplan_data_converter.py +++ b/d123/datasets/nuplan/nuplan_data_converter.py @@ -10,7 +10,6 @@ import numpy as np import pyarrow as pa import yaml -from pyquaternion import Quaternion import d123.datasets.nuplan.utils as nuplan_utils from d123.common.multithreading.worker_utils import WorkerPool, worker_map @@ -18,12 +17,18 @@ from d123.common.utils.dependencies import check_dependencies from d123.datasets.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datasets.utils.sensor.lidar_index_registry import NuplanLidarIndex from d123.datatypes.detections.detection import TrafficLightStatus from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.datatypes.sensors.lidar_index import NuplanLidarIndex +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, + camera_metadata_dict_to_json, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import ( @@ -31,6 +36,7 @@ rear_axle_se3_to_center_se3, ) from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.geometry_index import StateSE3Index from d123.geometry.rotation import EulerAngles from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL @@ -63,14 +69,14 @@ } NUPLAN_CAMERA_TYPES = { - CameraType.CAM_F0: CameraChannel.CAM_F0, - CameraType.CAM_B0: CameraChannel.CAM_B0, - CameraType.CAM_L0: CameraChannel.CAM_L0, - CameraType.CAM_L1: CameraChannel.CAM_L1, - CameraType.CAM_L2: CameraChannel.CAM_L2, - CameraType.CAM_R0: CameraChannel.CAM_R0, - CameraType.CAM_R1: CameraChannel.CAM_R1, - CameraType.CAM_R2: CameraChannel.CAM_R2, + PinholeCameraType.CAM_F0: CameraChannel.CAM_F0, + PinholeCameraType.CAM_B0: CameraChannel.CAM_B0, + PinholeCameraType.CAM_L0: CameraChannel.CAM_L0, + PinholeCameraType.CAM_L1: CameraChannel.CAM_L1, + PinholeCameraType.CAM_L2: CameraChannel.CAM_L2, + PinholeCameraType.CAM_R0: CameraChannel.CAM_R0, + PinholeCameraType.CAM_R1: CameraChannel.CAM_R1, + PinholeCameraType.CAM_R2: CameraChannel.CAM_R2, } NUPLAN_DATA_ROOT = Path(os.environ["NUPLAN_DATA_ROOT"]) @@ -106,7 +112,7 @@ def __init__( def _collect_log_paths(self) -> Dict[str, List[Path]]: # NOTE: the nuplan mini folder has an internal train, val, test structure, all stored in "mini". # The complete dataset is saved in the "trainval" folder (train and val), or in the "test" folder (for test). - subsplit_log_names: Dict[str, List[str]] = create_splits_logs() + # subsplit_log_names: Dict[str, List[str]] = create_splits_logs() log_paths_per_split: Dict[str, List[Path]] = {} for split in self._splits: @@ -123,7 +129,7 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]: all_log_files_in_path = [log_file for log_file in log_path.glob("*.db")] all_log_names = set([str(log_file.stem) for log_file in all_log_files_in_path]) - set(subsplit_log_names[subsplit]) + # set(subsplit_log_names[subsplit]) # log_paths = [log_path / f"{log_name}.db" for log_name in list(all_log_names & split_log_names)] log_paths = [log_path / f"{log_name}.db" for log_name in list(all_log_names)] log_paths_per_split[split] = log_paths @@ -138,7 +144,7 @@ def get_available_splits(self) -> List[str]: "nuplan_mini_train", "nuplan_mini_val", "nuplan_mini_test", - "nuplan_private_test", + "nuplan_private_test", # TODO: remove, not publicly available ] def convert_maps(self, worker: WorkerPool) -> None: @@ -204,7 +210,7 @@ def convert_nuplan_log_to_arrow( ) vehicle_parameters = get_nuplan_chrysler_pacifica_parameters() camera_metadata = get_nuplan_camera_metadata(log_path) - lidar_metadata = get_nuplan_lidar_metadata(log_db) + lidar_metadata = get_nuplan_lidar_metadata() schema_column_list = [ ("token", pa.string()), @@ -231,7 +237,7 @@ def convert_nuplan_log_to_arrow( if data_converter_config.camera_store_option == "path": schema_column_list.append((camera_type.serialize(), pa.string())) schema_column_list.append( - (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4)) + (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) ) elif data_converter_config.camera_store_option == "binary": @@ -256,30 +262,33 @@ def convert_nuplan_log_to_arrow( return [] -def get_nuplan_camera_metadata(log_path: Path) -> Dict[CameraType, CameraMetadata]: +def get_nuplan_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - def _get_camera_metadata(camera_type: CameraType) -> CameraMetadata: + def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadata: cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0] - intrinsic = np.array(pickle.loads(cam.intrinsic)) - rotation = np.array(pickle.loads(cam.rotation)) - rotation = Quaternion(rotation).rotation_matrix - distortion = np.array(pickle.loads(cam.distortion)) - return CameraMetadata( + + intrinsics_camera_matrix = np.array(pickle.loads(cam.intrinsic)) # array of shape (3, 3) + intrinsic = PinholeIntrinsics.from_camera_matrix(intrinsics_camera_matrix) + + distortion_array = np.array(pickle.loads(cam.distortion)) # array of shape (5,) + distortion = PinholeDistortion.from_array(distortion_array, copy=False) + + return PinholeCameraMetadata( camera_type=camera_type, width=cam.width, height=cam.height, - intrinsic=intrinsic, + intrinsics=intrinsic, distortion=distortion, ) - log_cam_infos: Dict[str, CameraMetadata] = {} + log_cam_infos: Dict[str, PinholeCameraMetadata] = {} for camera_type in NUPLAN_CAMERA_TYPES.keys(): log_cam_infos[camera_type] = _get_camera_metadata(camera_type) return log_cam_infos -def get_nuplan_lidar_metadata(log_db: NuPlanDB) -> Dict[LiDARType, LiDARMetadata]: +def get_nuplan_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: metadata: Dict[LiDARType, LiDARMetadata] = {} metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( lidar_type=LiDARType.LIDAR_MERGED, @@ -297,7 +306,6 @@ def _write_recording_table( data_converter_config: DataConverterConfig, ) -> None: - # with pa.ipc.new_stream(str(log_file_path), recording_schema) as writer: with pa.OSFile(str(log_file_path), "wb") as sink: with pa.ipc.new_file(sink, recording_schema) as writer: step_interval: float = int(TARGET_DT / NUPLAN_DT) @@ -448,7 +456,7 @@ def _extract_camera( lidar_pc: LidarPc, source_log_path: Path, data_converter_config: DataConverterConfig, -) -> Dict[CameraType, Union[str, bytes]]: +) -> Dict[PinholeCameraType, Union[str, bytes]]: camera_dict: Dict[str, Union[str, bytes]] = {} sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" @@ -477,11 +485,13 @@ def _extract_camera( c2img_e = cam_info.trans_matrix c2e = img_e2e @ c2img_e + extrinsic = StateSE3.from_transformation_matrix(c2e) + if data_converter_config.camera_store_option == "path": - camera_data = str(filename_jpg), c2e.flatten().tolist() + camera_data = str(filename_jpg), extrinsic.tolist() elif data_converter_config.camera_store_option == "binary": with open(filename_jpg, "rb") as f: - camera_data = f.read(), c2e + camera_data = f.read(), extrinsic.tolist() camera_dict[camera_type] = camera_data diff --git a/d123/datasets/utils/sensor/camera_conventions.py b/d123/datasets/utils/sensor/camera_conventions.py new file mode 100644 index 00000000..184eb9ef --- /dev/null +++ b/d123/datasets/utils/sensor/camera_conventions.py @@ -0,0 +1,85 @@ +""" +Default Camera Coordinate System in 123D: + + -Y (up) /| H + | / | e + | / | i + | / | g + | / | h + | | | t + O────────────|──●──|──────────── +Z (forward), aka. optical/principal axis + / | / h + / | / t + / | / d + / | / i ++X (right) |/ W + +We use COLMAP/OpenCV convention (+Z forward, -Y up, +X right), + abbreviated as "pZmYpX" for the forward-up-right axes. + +Other common conventions include, for forward-up-right axes. + - (+X forward, +Z up, -Y right), "pXpZmY", e.g. Waymo Open Dataset + +NOTE: This file should be extended if other conventions are needed in the future. +""" + +from enum import Enum +from typing import Union + +import numpy as np + +from d123.geometry import StateSE3 + + +class CameraConvention(Enum): + """Camera coordinate system conventions + p/m: positive/negative + X/Y/Z: axes directions + order: forward, up, right + + Example: pZmYpX means +Z forward, -Y up, +X right + """ + + pZmYpX = "pZmYpX" # Default in 123D (OpenCV/COLMAP) + pXpZmY = "pXpZmY" # e.g. Waymo Open Dataset + + +def convert_camera_convention( + from_pose: StateSE3, + from_convention: Union[CameraConvention, str], + to_convention: Union[CameraConvention, str], +) -> StateSE3: + """Convert camera pose between different conventions. + 123D default is pZmYpX (+Z forward, -Y up, +X right). + + :param from_pose: StateSE3 representing the camera pose to convert + :param from_convention: CameraConvention representing the current convention of the pose + :param to_convention: CameraConvention representing the target convention to convert to + :return: StateSE3 representing the converted camera pose + """ + # TODO: Write tests for this function + # TODO: Create function over batch/array of poses + + if isinstance(from_convention, str): + from_convention = CameraConvention(from_convention) + if isinstance(to_convention, str): + to_convention = CameraConvention(to_convention) + + if from_convention == to_convention: + return from_pose + + flip_matrices = { + (CameraConvention.pXpZmY, CameraConvention.pZmYpX): np.array( + [ + [0.0, -1.0, 0.0], # new X = old -Y (right) + [0.0, 0.0, -1.0], # new Y = old -Z (down) + [1.0, 0.0, 0.0], # new Z = old X (forward) + ], + dtype=np.float64, + ).T, + } + + pose_transformation = from_pose.transformation_matrix.copy() + F = flip_matrices[(from_convention, to_convention)] + pose_transformation[:3, :3] = pose_transformation[:3, :3] @ F + return StateSE3.from_transformation_matrix(pose_transformation) diff --git a/d123/datatypes/sensors/lidar_index.py b/d123/datasets/utils/sensor/lidar_index_registry.py similarity index 100% rename from d123/datatypes/sensors/lidar_index.py rename to d123/datasets/utils/sensor/lidar_index_registry.py diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py index 95649218..27bc4c05 100644 --- a/d123/datasets/wopd/wopd_data_converter.py +++ b/d123/datasets/wopd/wopd_data_converter.py @@ -10,20 +10,29 @@ import numpy as np import numpy.typing as npt import pyarrow as pa -from fromd123.datatypes.detections.detection_types import DetectionType +from pyparsing import Optional from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.dependencies import check_dependencies from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datasets.utils.sensor.camera_conventions import CameraConvention, convert_camera_convention +from d123.datasets.utils.sensor.lidar_index_registry import WopdLidarIndex from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection +from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.datatypes.sensors.lidar_index import WopdLidarIndex +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, + camera_metadata_dict_to_json, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.geometry_index import StateSE3Index from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL @@ -52,12 +61,12 @@ } # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L50 -WOPD_CAMERA_TYPES: Dict[int, CameraType] = { - 1: CameraType.CAM_F0, # front_camera - 2: CameraType.CAM_L0, # front_left_camera - 3: CameraType.CAM_R0, # front_right_camera - 4: CameraType.CAM_L1, # left_camera - 5: CameraType.CAM_R1, # right_camera +WOPD_CAMERA_TYPES: Dict[int, PinholeCameraType] = { + 1: PinholeCameraType.CAM_F0, # front_camera + 2: PinholeCameraType.CAM_L0, # front_left_camera + 3: PinholeCameraType.CAM_R0, # front_right_camera + 4: PinholeCameraType.CAM_L1, # left_camera + 5: PinholeCameraType.CAM_R1, # right_camera } # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L66 @@ -239,7 +248,7 @@ def convert_wopd_tfrecord_log_to_arrow( elif data_converter_config.camera_store_option == "binary": schema_column_list.append((camera_type.serialize(), pa.binary())) schema_column_list.append( - (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4)) + (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) ) if data_converter_config.lidar_store_option is not None: @@ -273,26 +282,24 @@ def convert_wopd_tfrecord_log_to_arrow( def get_wopd_camera_metadata( initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig -) -> Dict[CameraType, CameraMetadata]: +) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - cam_metadatas: Dict[CameraType, CameraMetadata] = {} + cam_metadatas: Dict[PinholeCameraType, PinholeCameraMetadata] = {} if data_converter_config.camera_store_option is not None: for calibration in initial_frame.context.camera_calibrations: camera_type = WOPD_CAMERA_TYPES[calibration.name] - # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L96 # https://github.com/waymo-research/waymo-open-dataset/issues/834#issuecomment-2134995440 fx, fy, cx, cy, k1, k2, p1, p2, k3 = calibration.intrinsic - _intrinsics = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]]) - _distortions = np.array([k1, k2, p1, p2, k3]) - + intrinsics = PinholeIntrinsics(fx=fx, fy=fy, cx=cx, cy=cy) + distortion = PinholeDistortion(k1=k1, k2=k2, p1=p1, p2=p2, k3=k3) if camera_type in WOPD_CAMERA_TYPES.values(): - cam_metadatas[camera_type] = CameraMetadata( + cam_metadatas[camera_type] = PinholeCameraMetadata( camera_type=camera_type, width=calibration.width, height=calibration.height, - intrinsic=_intrinsics, - distortion=_distortions, + intrinsics=intrinsics, + distortion=distortion, ) return cam_metadatas @@ -305,12 +312,14 @@ def get_wopd_lidar_metadata( laser_metadatas: Dict[LiDARType, LiDARMetadata] = {} if data_converter_config.lidar_store_option is not None: for laser_calibration in initial_frame.context.laser_calibrations: + lidar_type = WOPD_LIDAR_TYPES[laser_calibration.name] - extrinsic = ( - np.array(laser_calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) - if laser_calibration.extrinsic - else None - ) + + extrinsic: Optional[StateSE3] = None + if laser_calibration.extrinsic: + extrinsic_transform = np.array(laser_calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) + extrinsic = StateSE3.from_transformation_matrix(extrinsic_transform) + laser_metadatas[lidar_type] = LiDARMetadata( lidar_type=lidar_type, lidar_index=WopdLidarIndex, @@ -328,7 +337,6 @@ def _write_recording_table( data_converter_config: DataConverterConfig, ) -> None: - # with pa.ipc.new_stream(str(log_file_path), recording_schema) as writer: with pa.OSFile(str(log_file_path), "wb") as sink: with pa.ipc.new_file(sink, recording_schema) as writer: @@ -464,40 +472,32 @@ def _extract_traffic_lights() -> Tuple[List[int], List[int]]: def _extract_camera( frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig -) -> Dict[CameraType, Union[str, bytes]]: +) -> Dict[PinholeCameraType, Union[str, bytes]]: camera_dict: Dict[str, Union[str, bytes]] = {} # TODO: Fix wrong type hint np.array(frame.pose.transform).reshape(4, 4) # NOTE: The extrinsic matrix in frame.context.camera_calibration is fixed to model the ego to camera transformation. # The poses in frame.images[idx] are the motion compensated ego poses when the camera triggers. - # - context_extrinsic: Dict[str, npt.NDArray] = {} + context_extrinsic: Dict[str, StateSE3] = {} for calibration in frame.context.camera_calibrations: camera_type = WOPD_CAMERA_TYPES[calibration.name] + camera_transform = np.array(calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) + camera_pose = StateSE3.from_transformation_matrix(camera_transform) + # NOTE: WOPD uses a different camera convention than d123 + # https://arxiv.org/pdf/1912.04838 (Figure 1.) + camera_pose = convert_camera_convention( + camera_pose, + from_convention=CameraConvention.pXpZmY, + to_convention=CameraConvention.pZmYpX, + ) + context_extrinsic[camera_type] = camera_pose - transform = np.array(calibration.extrinsic.transform).reshape(4, 4) - - # FIXME: This is an ugly hack to convert to uniform camera convention. - # TODO: Extract function to convert between different camera conventions. - flip_camera = EulerAngles(roll=np.deg2rad(0.0), pitch=np.deg2rad(90.0), yaw=np.deg2rad(-90.0)).rotation_matrix - transform[:3, :3] = transform[:3, :3] @ flip_camera - context_extrinsic[camera_type] = transform - - # TODO: Refactor to avoid code duplication for image_proto in frame.images: camera_type = WOPD_CAMERA_TYPES[image_proto.name] - - np.array(image_proto.pose.transform).reshape(4, 4) camera_bytes = image_proto.image - - # # Compute the transform from ego_global_transform to ego_at_camera_transform - # # ego_global_transform * T = ego_at_camera_transform => T = ego_global_transform^-1 * ego_at_camera_transform - # np.linalg.inv(ego_global_transform) @ ego_at_trigger_transform - - # TODO: figure out the correct transform - camera_dict[camera_type] = camera_bytes, context_extrinsic[camera_type].flatten().tolist() + camera_dict[camera_type] = camera_bytes, context_extrinsic[camera_type].tolist() return camera_dict diff --git a/d123/datatypes/scene/abstract_scene.py b/d123/datatypes/scene/abstract_scene.py index af66541e..7f6ca5e3 100644 --- a/d123/datatypes/scene/abstract_scene.py +++ b/d123/datatypes/scene/abstract_scene.py @@ -6,8 +6,8 @@ from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper from d123.datatypes.maps.abstract_map import AbstractMap from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera import Camera, CameraType -from d123.datatypes.sensors.lidar import LiDAR, LiDARType +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -33,7 +33,7 @@ def log_metadata(self) -> LogMetadata: @property @abc.abstractmethod - def available_camera_types(self) -> List[CameraType]: + def available_camera_types(self) -> List[PinholeCameraType]: raise NotImplementedError @property @@ -79,7 +79,7 @@ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: raise NotImplementedError @abc.abstractmethod - def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Optional[Camera]: + def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType) -> Optional[PinholeCamera]: raise NotImplementedError @abc.abstractmethod diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py index 8f61397a..3fcc2150 100644 --- a/d123/datatypes/scene/arrow/arrow_scene.py +++ b/d123/datatypes/scene/arrow/arrow_scene.py @@ -18,8 +18,13 @@ get_traffic_light_detections_from_arrow_table, ) from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionInfo -from d123.datatypes.sensors.camera import Camera, CameraMetadata, CameraType, camera_metadata_dict_from_json -from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCamera, + PinholeCameraMetadata, + PinholeCameraType, + camera_metadata_dict_from_json, +) +from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters @@ -29,7 +34,7 @@ def _get_scene_data( arrow_file_path: Union[Path, str], -) -> Tuple[LogMetadata, VehicleParameters, Dict[CameraType, CameraMetadata]]: +) -> Tuple[LogMetadata, VehicleParameters, Dict[PinholeCameraType, PinholeCameraMetadata]]: """ Extracts the metadata and vehicle parameters from the arrow file. """ @@ -60,7 +65,6 @@ def __init__( ) -> None: self._recording_table: pa.Table = None - ( _metadata, _vehicle_parameters, @@ -69,7 +73,7 @@ def __init__( ) = _get_scene_data(arrow_file_path) self._metadata: LogMetadata = _metadata self._vehicle_parameters: VehicleParameters = _vehicle_parameters - self._camera_metadata: Dict[CameraType, CameraMetadata] = _camera_metadata + self._camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = _camera_metadata self._lidar_metadata: Dict[LiDARType, LiDARMetadata] = _lidar_metadata self._map_api: Optional[AbstractMap] = None @@ -105,7 +109,7 @@ def log_metadata(self) -> LogMetadata: return self._metadata @property - def available_camera_types(self) -> List[CameraType]: + def available_camera_types(self) -> List[PinholeCameraType]: return list(self._camera_metadata.keys()) @property @@ -163,9 +167,9 @@ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: route_lane_group_ids = self._recording_table["route_lane_group_ids"][table_index].as_py() return route_lane_group_ids - def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Optional[Camera]: + def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType) -> Optional[PinholeCamera]: self._lazy_initialize() - camera: Optional[Camera] = None + camera: Optional[PinholeCamera] = None if camera_type in self._camera_metadata: table_index = self._get_table_index(iteration) camera = get_camera_from_arrow_table( diff --git a/d123/datatypes/scene/arrow/utils/conversion.py b/d123/datatypes/scene/arrow/utils/conversion.py index 2b95f2fb..8d36e80b 100644 --- a/d123/datatypes/scene/arrow/utils/conversion.py +++ b/d123/datatypes/scene/arrow/utils/conversion.py @@ -20,17 +20,18 @@ ) from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera import Camera, CameraMetadata -from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraMetadata +from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters from d123.geometry import BoundingBoxSE3, Vector3D +from d123.geometry.se import StateSE3 DATASET_SENSOR_ROOT: Dict[str, Path] = { "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", "carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs", - "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini", + # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini", } @@ -93,13 +94,13 @@ def get_traffic_light_detections_from_arrow_table(arrow_table: pa.Table, index: def get_camera_from_arrow_table( arrow_table: pa.Table, index: int, - camera_metadata: CameraMetadata, + camera_metadata: PinholeCameraMetadata, log_metadata: LogMetadata, -) -> Camera: +) -> PinholeCamera: table_data = arrow_table[camera_metadata.camera_type.serialize()][index].as_py() - extrinsic = arrow_table[f"{camera_metadata.camera_type.serialize()}_extrinsic"][index].as_py() - extrinsic = np.array(extrinsic).reshape((4, 4)) if extrinsic else None + extrinsic_list = arrow_table[f"{camera_metadata.camera_type.serialize()}_extrinsic"][index].as_py() + extrinsic = StateSE3.from_list(extrinsic_list) if extrinsic_list is not None else None if table_data is None or extrinsic is None: return None @@ -118,7 +119,7 @@ def get_camera_from_arrow_table( else: raise NotImplementedError("Only string file paths for camera data are supported.") - return Camera( + return PinholeCamera( metadata=camera_metadata, image=image, extrinsic=extrinsic, diff --git a/d123/datatypes/scene/scene_filter.py b/d123/datatypes/scene/scene_filter.py index 5dbb5ba6..c05073db 100644 --- a/d123/datatypes/scene/scene_filter.py +++ b/d123/datatypes/scene/scene_filter.py @@ -1,7 +1,7 @@ from dataclasses import dataclass from typing import List, Optional -from d123.datatypes.sensors.camera import CameraType +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType # TODO: Add more filter options (e.g. scene tags, ego movement, or whatever appropriate) @@ -23,7 +23,7 @@ class SceneFilter: duration_s: Optional[float] = 10.0 history_s: Optional[float] = 3.0 - camera_types: Optional[List[CameraType]] = None + camera_types: Optional[List[PinholeCameraType]] = None max_num_scenes: Optional[int] = None shuffle: bool = False @@ -34,10 +34,10 @@ def __post_init__(self): camera_types = [] for camera_type in self.camera_types: if isinstance(camera_type, str): - camera_type = CameraType.deserialize[camera_type] + camera_type = PinholeCameraType.deserialize[camera_type] camera_types.append(camera_type) elif isinstance(camera_type, int): - camera_type = CameraType(camera_type) + camera_type = PinholeCameraType(camera_type) camera_types.append(camera_type) else: raise ValueError(f"Invalid camera type: {camera_type}") diff --git a/d123/datatypes/sensors/__init__.py b/d123/datatypes/sensors/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datatypes/sensors/camera.py b/d123/datatypes/sensors/camera.py deleted file mode 100644 index 56fe6f07..00000000 --- a/d123/datatypes/sensors/camera.py +++ /dev/null @@ -1,117 +0,0 @@ -from __future__ import annotations - -import json -from dataclasses import dataclass -from typing import Any, Dict - -import numpy as np -import numpy.typing as npt - -from d123.common.utils.enums import SerialIntEnum - - -class CameraType(SerialIntEnum): - """ - Enum for cameras in d123. - """ - - CAM_F0 = 0 - CAM_B0 = 1 - CAM_L0 = 2 - CAM_L1 = 3 - CAM_L2 = 4 - CAM_R0 = 5 - CAM_R1 = 6 - CAM_R2 = 7 - CAM_STEREO_L = 8 - CAM_STEREO_R = 9 - - -@dataclass -class CameraMetadata: - - camera_type: CameraType - width: int - height: int - intrinsic: npt.NDArray[np.float64] # 3x3 matrix # TODO: don't store matrix but values. - distortion: npt.NDArray[np.float64] # 5x1 vector # TODO: don't store matrix but values. - - def to_dict(self) -> Dict[str, Any]: - # TODO: remove None types. Only a placeholder for now. - return { - "camera_type": int(self.camera_type), - "width": self.width, - "height": self.height, - "intrinsic": self.intrinsic.tolist() if self.intrinsic is not None else None, - "distortion": self.distortion.tolist() if self.distortion is not None else None, - } - - @classmethod - def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata: - # TODO: remove None types. Only a placeholder for now. - return cls( - camera_type=CameraType(json_dict["camera_type"]), - width=json_dict["width"], - height=json_dict["height"], - intrinsic=np.array(json_dict["intrinsic"]) if json_dict["intrinsic"] is not None else None, - distortion=np.array(json_dict["distortion"]) if json_dict["distortion"] is not None else None, - ) - - @property - def aspect_ratio(self) -> float: - return self.width / self.height - - @property - def fov_x(self) -> float: - """ - Calculates the horizontal field of view (FOV) in radian. - """ - fx = self.intrinsic[0, 0] - fov_x_rad = 2 * np.arctan(self.width / (2 * fx)) - return fov_x_rad - - @property - def fov_y(self) -> float: - """ - Calculates the vertical field of view (FOV) in radian. - """ - fy = self.intrinsic[1, 1] - fov_y_rad = 2 * np.arctan(self.height / (2 * fy)) - return fov_y_rad - - -def camera_metadata_dict_to_json(camera_metadata: Dict[CameraType, CameraMetadata]) -> Dict[str, Dict[str, Any]]: - """ - Converts a dictionary of CameraMetadata to a JSON-serializable format. - :param camera_metadata: Dictionary of CameraMetadata. - :return: JSON-serializable dictionary. - """ - camera_metadata_dict = { - camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items() - } - return json.dumps(camera_metadata_dict) - - -def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, CameraMetadata]: - """ - Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata. - :param json_dict: JSON-serializable dictionary. - :return: Dictionary of CameraMetadata. - """ - camera_metadata_dict = json.loads(json_dict) - return { - CameraType.deserialize(camera_type): CameraMetadata.from_dict(metadata) - for camera_type, metadata in camera_metadata_dict.items() - } - - -@dataclass -class Camera: - - metadata: CameraMetadata - image: npt.NDArray[np.uint8] - extrinsic: npt.NDArray[np.float64] # 4x4 matrix - - def get_view_matrix(self) -> np.ndarray: - # Compute the view matrix based on the camera's position and orientation - pass diff --git a/d123/datatypes/sensors/camera/__init__.py b/d123/datatypes/sensors/camera/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datatypes/sensors/camera/pinhole_camera.py b/d123/datatypes/sensors/camera/pinhole_camera.py new file mode 100644 index 00000000..8f7f8d44 --- /dev/null +++ b/d123/datatypes/sensors/camera/pinhole_camera.py @@ -0,0 +1,260 @@ +from __future__ import annotations + +import json +from dataclasses import dataclass +from typing import Any, Dict, Optional + +import numpy as np +import numpy.typing as npt +from zmq import IntEnum + +from d123.common.utils.enums import SerialIntEnum +from d123.common.utils.mixin import ArrayMixin +from d123.geometry.se import StateSE3 + + +class PinholeCameraType(SerialIntEnum): + """ + Enum for cameras in d123. + """ + + CAM_F0 = 0 + CAM_B0 = 1 + CAM_L0 = 2 + CAM_L1 = 3 + CAM_L2 = 4 + CAM_R0 = 5 + CAM_R1 = 6 + CAM_R2 = 7 + CAM_STEREO_L = 8 + CAM_STEREO_R = 9 + + +class PinholeIntrinsicsIndex(IntEnum): + + FX = 0 + FY = 1 + CX = 2 + CY = 3 + SKEW = 4 # NOTE: not used, but added for completeness + + +class PinholeIntrinsics(ArrayMixin): + + _array: npt.NDArray[np.float64] + + def __init__(self, fx: float, fy: float, cx: float, cy: float, skew: float = 0.0) -> None: + array = np.zeros(len(PinholeIntrinsicsIndex), dtype=np.float64) + array[PinholeIntrinsicsIndex.FX] = fx + array[PinholeIntrinsicsIndex.FY] = fy + array[PinholeIntrinsicsIndex.CX] = cx + array[PinholeIntrinsicsIndex.CY] = cy + array[PinholeIntrinsicsIndex.SKEW] = skew + object.__setattr__(self, "_array", array) + + @classmethod + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> PinholeIntrinsics: + assert array.ndim == 1 + assert array.shape[-1] == len(PinholeIntrinsicsIndex) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance + + @classmethod + def from_camera_matrix(cls, intrinsic: npt.NDArray[np.float64]) -> PinholeIntrinsics: + """ + Create a PinholeIntrinsics from a 3x3 intrinsic matrix. + :param intrinsic: A 3x3 numpy array representing the intrinsic matrix. + :return: A PinholeIntrinsics instance. + """ + assert intrinsic.shape == (3, 3) + fx = intrinsic[0, 0] + fy = intrinsic[1, 1] + cx = intrinsic[0, 2] + cy = intrinsic[1, 2] + skew = intrinsic[0, 1] # Not used in most cases. + array = np.array([fx, fy, cx, cy, skew], dtype=np.float64) + return cls.from_array(array, copy=False) + + @property + def array(self) -> npt.NDArray[np.float64]: + return self._array + + @property + def fx(self) -> float: + return self._array[PinholeIntrinsicsIndex.FX] + + @property + def fy(self) -> float: + return self._array[PinholeIntrinsicsIndex.FY] + + @property + def cx(self) -> float: + return self._array[PinholeIntrinsicsIndex.CX] + + @property + def cy(self) -> float: + return self._array[PinholeIntrinsicsIndex.CY] + + @property + def skew(self) -> float: + return self._array[PinholeIntrinsicsIndex.SKEW] + + @property + def camera_matrix(self) -> npt.NDArray[np.float64]: + """ + Returns the intrinsic matrix. + :return: A 3x3 numpy array representing the intrinsic matrix. + """ + K = np.array( + [ + [self.fx, self.skew, self.cx], + [0.0, self.fy, self.cy], + [0.0, 0.0, 1.0], + ], + dtype=np.float64, + ) + return K + + +class PinholeDistortionIndex(IntEnum): + K1 = 0 + K2 = 1 + P1 = 2 + P2 = 3 + K3 = 4 + + +class PinholeDistortion(ArrayMixin): + _array: npt.NDArray[np.float64] + + def __init__(self, k1: float, k2: float, p1: float, p2: float, k3: float) -> None: + array = np.zeros(len(PinholeDistortionIndex), dtype=np.float64) + array[PinholeDistortionIndex.K1] = k1 + array[PinholeDistortionIndex.K2] = k2 + array[PinholeDistortionIndex.P1] = p1 + array[PinholeDistortionIndex.P2] = p2 + array[PinholeDistortionIndex.K3] = k3 + object.__setattr__(self, "_array", array) + + @classmethod + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> PinholeDistortion: + assert array.ndim == 1 + assert array.shape[-1] == len(PinholeDistortionIndex) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance + + @property + def array(self) -> npt.NDArray[np.float64]: + return self._array + + @property + def k1(self) -> float: + return self._array[PinholeDistortionIndex.K1] + + @property + def k2(self) -> float: + return self._array[PinholeDistortionIndex.K2] + + @property + def p1(self) -> float: + return self._array[PinholeDistortionIndex.P1] + + @property + def p2(self) -> float: + return self._array[PinholeDistortionIndex.P2] + + @property + def k3(self) -> float: + return self._array[PinholeDistortionIndex.K3] + + +@dataclass +class PinholeCameraMetadata: + + camera_type: PinholeCameraType + intrinsics: Optional[PinholeIntrinsics] + distortion: Optional[PinholeDistortion] + width: int + height: int + + @classmethod + def from_dict(cls, json_dict: Dict[str, Any]) -> PinholeCameraMetadata: + return cls( + camera_type=PinholeCameraType(json_dict["camera_type"]), + intrinsics=( + PinholeIntrinsics.from_list(json_dict["intrinsics"]) if json_dict["intrinsics"] is not None else None + ), + distortion=( + PinholeDistortion.from_list(json_dict["distortion"]) if json_dict["distortion"] is not None else None + ), + width=int(json_dict["width"]), + height=int(json_dict["height"]), + ) + + def to_dict(self) -> Dict[str, Any]: + return { + "camera_type": int(self.camera_type), + "intrinsics": self.intrinsics.tolist() if self.intrinsics is not None else None, + "distortion": self.distortion.tolist() if self.distortion is not None else None, + "width": self.width, + "height": self.height, + } + + @property + def aspect_ratio(self) -> float: + return self.width / self.height + + @property + def fov_x(self) -> float: + """ + Calculates the horizontal field of view (FOV) in radian. + """ + fov_x_rad = 2 * np.arctan(self.width / (2 * self.intrinsics.fx)) + return fov_x_rad + + @property + def fov_y(self) -> float: + """ + Calculates the vertical field of view (FOV) in radian. + """ + fov_y_rad = 2 * np.arctan(self.height / (2 * self.intrinsics.fy)) + return fov_y_rad + + +def camera_metadata_dict_to_json( + camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata], +) -> Dict[str, Dict[str, Any]]: + """ + Converts a dictionary of CameraMetadata to a JSON-serializable format. + :param camera_metadata: Dictionary of CameraMetadata. + :return: JSON-serializable dictionary. + """ + camera_metadata_dict = { + camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items() + } + return json.dumps(camera_metadata_dict) + + +def camera_metadata_dict_from_json( + json_dict: Dict[str, Dict[str, Any]], +) -> Dict[PinholeCameraType, PinholeCameraMetadata]: + """ + Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata. + :param json_dict: JSON-serializable dictionary. + :return: Dictionary of CameraMetadata. + """ + camera_metadata_dict = json.loads(json_dict) + return { + PinholeCameraType.deserialize(camera_type): PinholeCameraMetadata.from_dict(metadata) + for camera_type, metadata in camera_metadata_dict.items() + } + + +@dataclass +class PinholeCamera: + + metadata: PinholeCameraMetadata + image: npt.NDArray[np.uint8] + extrinsic: StateSE3 diff --git a/d123/datatypes/sensors/lidar/__init__.py b/d123/datatypes/sensors/lidar/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datatypes/sensors/lidar.py b/d123/datatypes/sensors/lidar/lidar.py similarity index 93% rename from d123/datatypes/sensors/lidar.py rename to d123/datatypes/sensors/lidar/lidar.py index f15ccc89..00356ee4 100644 --- a/d123/datatypes/sensors/lidar.py +++ b/d123/datatypes/sensors/lidar/lidar.py @@ -8,7 +8,8 @@ import numpy.typing as npt from d123.common.utils.enums import SerialIntEnum -from d123.datatypes.sensors.lidar_index import LIDAR_INDEX_REGISTRY, LiDARIndex +from d123.datasets.utils.sensor.lidar_index_registry import LIDAR_INDEX_REGISTRY, LiDARIndex +from d123.geometry import StateSE3 class LiDARType(SerialIntEnum): @@ -27,8 +28,7 @@ class LiDARMetadata: lidar_type: LiDARType lidar_index: Type[LiDARIndex] - extrinsic: Optional[npt.NDArray[np.float64]] = None # 4x4 matrix - + extrinsic: Optional[StateSE3] = None # TODO: add identifier if point cloud is returned in lidar or ego frame. def to_dict(self) -> dict: @@ -44,7 +44,7 @@ def from_dict(cls, json_dict: dict) -> LiDARMetadata: if json_dict["lidar_index"] not in LIDAR_INDEX_REGISTRY: raise ValueError(f"Unknown lidar index: {json_dict['lidar_index']}") lidar_index_class = LIDAR_INDEX_REGISTRY[json_dict["lidar_index"]] - extrinsic = np.array(json_dict["extrinsic"]) if json_dict["extrinsic"] is not None else None + extrinsic = StateSE3.from_list(json_dict["extrinsic"]) if json_dict["extrinsic"] is not None else None return cls(lidar_type=lidar_type, lidar_index=lidar_index_class, extrinsic=extrinsic) diff --git a/d123/script/config/common/default_common.yaml b/d123/script/config/common/default_common.yaml index f3f88913..5012ff14 100644 --- a/d123/script/config/common/default_common.yaml +++ b/d123/script/config/common/default_common.yaml @@ -3,12 +3,11 @@ defaults: - worker: ray_distributed - scene_filter: all_scenes - scene_builder: default_scene_builder - - override hydra/hydra_logging: colorlog - override hydra/job_logging: colorlog + - override hydra/hydra_logging: colorlog - _self_ distributed_timeout_seconds: 7200 # Sets how long to wait while synchronizing across worker nodes in a distributed context. - selected_simulation_metrics: null # Sets verbosity level, in particular determines if progress bars are shown or not. diff --git a/d123/script/config/common/default_experiment.yaml b/d123/script/config/common/default_experiment.yaml index 6ad3c3d5..9617f335 100644 --- a/d123/script/config/common/default_experiment.yaml +++ b/d123/script/config/common/default_experiment.yaml @@ -1,5 +1,7 @@ defaults: - default_dataset_paths + - override hydra/job_logging: colorlog + - override hydra/hydra_logging: colorlog - _self_ # Cache parameters diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index 0a4544da..b126ba58 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -1,24 +1,22 @@ hydra: run: - dir: ${output_dir} - output_subdir: ${output_dir}/code/hydra # Store hydra's config breakdown here for debugging - searchpath: # Only in these paths are discoverable + dir: . + output_subdir: null + searchpath: - pkg://d123.script.config - pkg://d123.script.config.common - job: chdir: False - +# defaults: - default_common - - default_experiment - default_dataset_paths - - _self_ - datasets: - # - nuplan_private_dataset + - nuplan_private_dataset # - carla_dataset # - wopd_dataset - - av2_sensor_dataset + # - av2_sensor_dataset + - _self_ +force_map_conversion: False force_log_conversion: True -force_map_conversion: True diff --git a/d123/script/config/datasets/__init__.py b/d123/script/config/datasets/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/script/config/datasets/nuplan_private_dataset.yaml b/d123/script/config/datasets/nuplan_private_dataset.yaml index af6f16ba..7062f38f 100644 --- a/d123/script/config/datasets/nuplan_private_dataset.yaml +++ b/d123/script/config/datasets/nuplan_private_dataset.yaml @@ -12,5 +12,5 @@ nuplan_private_dataset: output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} - camera_store_option: "binary" + camera_store_option: "path" lidar_store_option: "path" diff --git a/d123/script/run_dataset_conversion.py b/d123/script/run_dataset_conversion.py index 62ad1e30..cdbee27d 100644 --- a/d123/script/run_dataset_conversion.py +++ b/d123/script/run_dataset_conversion.py @@ -2,7 +2,6 @@ from typing import List import hydra -from nuplan.planning.script.builders.logging_builder import build_logger from omegaconf import DictConfig from d123.script.builders.data_converter_builder import RawDataConverter, build_data_converter @@ -20,8 +19,6 @@ def main(cfg: DictConfig) -> None: Main entrypoint for metric caching. :param cfg: omegaconf dictionary """ - # Configure logger - build_logger(cfg) # Build worker worker = build_worker(cfg) diff --git a/pyproject.toml b/pyproject.toml index 44061249..4e2da9e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,7 +86,7 @@ nuplan = [ "retry", ] waymo = [ - "tensorflow==2.11.0", + "tensorflow==2.16.1", "waymo-open-dataset-tf-2-11-0", "intervaltree", ] diff --git a/scripts/dataset/run_log_caching.sh b/scripts/dataset/run_log_caching.sh index 34ff923c..923aaeca 100644 --- a/scripts/dataset/run_log_caching.sh +++ b/scripts/dataset/run_log_caching.sh @@ -2,6 +2,4 @@ python $D123_DEVKIT_ROOT/d123/script/run_dataset_conversion.py \ -experiment_name="caching" \ worker.threads_per_node=32 -# worker=single_machine_thread_pool diff --git a/test_viser.py b/test_viser.py index 9daa2d9c..b9fdeec9 100644 --- a/test_viser.py +++ b/test_viser.py @@ -4,14 +4,14 @@ from d123.common.visualization.viser.viser_viewer import ViserViewer from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder from d123.datatypes.scene.scene_filter import SceneFilter -from d123.datatypes.sensors.camera import CameraType +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType if __name__ == "__main__": - # splits = ["nuplan_private_test"] + splits = ["nuplan_private_test"] # splits = ["carla"] # splits = ["wopd_train"] - splits = ["av2-sensor-mini_train"] + # splits = ["av2-sensor-mini_train"] log_names = None scene_tokens = None @@ -20,15 +20,14 @@ split_names=splits, log_names=log_names, scene_tokens=scene_tokens, - duration_s=10, + duration_s=18, history_s=0.5, timestamp_threshold_s=10, - shuffle=False, - camera_types=[CameraType.CAM_F0], + shuffle=True, + camera_types=[PinholeCameraType.CAM_F0], ) scene_builder = ArrowSceneBuilder(os.environ["D123_DATA_ROOT"]) worker = Sequential() - # worker = RayDistributed() scenes = scene_builder.get_scenes(scene_filter, worker) print(f"Found {len(scenes)} scenes") visualization_server = ViserViewer(scenes, scene_index=0) From e176d8f475c09b303ed833286fe91b5faa501954 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 6 Oct 2025 22:00:13 +0200 Subject: [PATCH 056/145] Larger update on the Scene API and object. Smarted management of memory maps, refactoring, and cleaner metadata structure. --- d123/common/utils/arrow_helper.py | 27 ++- d123/common/utils/mixin.py | 2 + .../visualization/matplotlib/observation.py | 6 +- d123/common/visualization/matplotlib/plots.py | 6 +- d123/common/visualization/matplotlib/utils.py | 2 - d123/common/visualization/utils/.gitkeep | 0 .../viser/elements/map_elements.py | 2 +- .../visualization/viser/viser_config.py | 4 +- .../visualization/viser/viser_viewer.py | 2 +- d123/datasets/av2/av2_data_converter.py | 40 ++-- d123/datasets/carla/carla_data_converter.py | 4 +- d123/datasets/nuplan/nuplan_data_converter.py | 37 ++- .../{load_sensor.py => nuplan_load_sensor.py} | 0 d123/datasets/utils/arrow_schema.py | 0 d123/datasets/wopd/wopd_data_converter.py | 57 +++-- d123/datatypes/maps/gpkg/gpkg_map.py | 8 +- d123/datatypes/scene/abstract_scene.py | 83 ++++--- d123/datatypes/scene/arrow/__init__.py | 0 d123/datatypes/scene/arrow/arrow_scene.py | 224 ++++++------------ .../scene/arrow/arrow_scene_builder.py | 43 ++-- d123/datatypes/scene/arrow/utils/__init__.py | 0 .../utils/{conversion.py => arrow_getters.py} | 31 +-- .../scene/arrow/utils/arrow_metadata_utils.py | 19 ++ d123/datatypes/scene/scene_metadata.py | 44 +++- .../sensors/camera/pinhole_camera.py | 79 ++---- d123/datatypes/sensors/lidar/lidar.py | 40 +--- .../vehicle_state/vehicle_parameters.py | 11 +- d123/geometry/utils/rotation_utils.py | 46 ++++ .../default_dataset_conversion.yaml | 2 +- d123/simulation/agents/idm_agents.py | 4 +- d123/simulation/agents/path_following.py | 2 +- d123/simulation/agents/smart_agents.py | 2 +- d123/simulation/gym/demo_gym_env.py | 11 +- .../reward_builder/default_reward_builder.py | 2 +- d123/simulation/gym/gym_env.py | 9 +- .../metrics/sim_agents/interaction_based.py | 2 +- .../metrics/sim_agents/sim_agents.py | 6 +- d123/simulation/metrics/sim_agents/utils.py | 6 +- .../observation/agents_observation.py | 4 +- d123/simulation/simulation_2d.py | 2 +- .../time_controller/log_time_controller.py | 2 +- .../feature_builder/smart_feature_builder.py | 4 +- notebooks/gym/test_simulation_2d.ipynb | 2 +- notebooks/scene_rendering.ipynb | 2 +- notebooks/smarty/smart_rollout.ipynb | 2 +- notebooks/viz/bev_matplotlib.ipynb | 12 +- notebooks/viz/bev_matplotlib_prediction.ipynb | 6 +- notebooks/viz/camera_matplotlib.ipynb | 4 +- notebooks/viz/video_example.ipynb | 10 +- test_viser.py | 11 +- 50 files changed, 464 insertions(+), 460 deletions(-) create mode 100644 d123/common/visualization/utils/.gitkeep rename d123/datasets/nuplan/{load_sensor.py => nuplan_load_sensor.py} (100%) create mode 100644 d123/datasets/utils/arrow_schema.py create mode 100644 d123/datatypes/scene/arrow/__init__.py create mode 100644 d123/datatypes/scene/arrow/utils/__init__.py rename d123/datatypes/scene/arrow/utils/{conversion.py => arrow_getters.py} (85%) create mode 100644 d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py diff --git a/d123/common/utils/arrow_helper.py b/d123/common/utils/arrow_helper.py index 6f68c152..54d231c7 100644 --- a/d123/common/utils/arrow_helper.py +++ b/d123/common/utils/arrow_helper.py @@ -1,8 +1,12 @@ +from functools import lru_cache from pathlib import Path -from typing import Union +from typing import Final, Union import pyarrow as pa +# TODO: Tune Parameters and add to config? +MAX_LRU_CACHED_TABLES: Final[int] = 4096 + def open_arrow_table(arrow_file_path: Union[str, Path]) -> pa.Table: with pa.memory_map(str(arrow_file_path), "rb") as source: @@ -14,3 +18,24 @@ def write_arrow_table(table: pa.Table, arrow_file_path: Union[str, Path]) -> Non with pa.OSFile(str(arrow_file_path), "wb") as sink: with pa.ipc.new_file(sink, table.schema) as writer: writer.write_table(table) + + +@lru_cache(maxsize=MAX_LRU_CACHED_TABLES) +def get_lru_cached_arrow_table(arrow_file_path: Union[str, Path]) -> pa.Table: + """Get a memory-mapped arrow table from the LRU cache or load it from disk. + + :param arrow_file_path: The path to the arrow file. + :return: The cached memory-mapped arrow table. + """ + + # NOTE @DanielDauner: The number of memory maps that a process can have is limited by the + # linux kernel parameter /proc/sys/vm/max_map_count (default: 65530 in most distributions). + # Thus, we cache memory-mapped arrow tables with an LRU cache to avoid + # hitting this limit, specifically since many scenes/routines access the same table. + # During cache eviction, the functools implementation calls __del__ on the + # evicted cache entry, which closes the memory map, if no other references to the table exist. + # Thus it is beneficial to keep track of all references to the table, otherwise the memory map + # will not be closed and the limit can still be hit. + # Not fully satisfied with this solution. Please reach out if you have a better idea. + + return open_arrow_table(str(arrow_file_path)) diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py index 1e17db64..138a0e57 100644 --- a/d123/common/utils/mixin.py +++ b/d123/common/utils/mixin.py @@ -3,6 +3,8 @@ import numpy as np import numpy.typing as npt +# import pyarrow as pa + class ArrayMixin: """Mixin class for object entities.""" diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py index ed46c6a9..7b809f41 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/d123/common/visualization/matplotlib/observation.py @@ -26,10 +26,8 @@ from d123.datatypes.maps.map_datatypes import MapLayer from d123.datatypes.scene.abstract_scene import AbstractScene from d123.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3 -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D -from d123.geometry.geometry_index import StateSE2Index +from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, StateSE2Index, Vector2D from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame -from d123.geometry.vector import Vector2D def add_default_map_on_ax( @@ -100,7 +98,7 @@ def add_box_future_detections_to_ax(ax: plt.Axes, scene: AbstractScene, iteratio if agent.metadata.detection_type == DetectionType.VEHICLE } frequency = 1 - for iteration in range(iteration + frequency, scene.get_number_of_iterations(), frequency): + for iteration in range(iteration + frequency, scene.number_of_iterations, frequency): agents = scene.get_box_detections_at_iteration(iteration) for agent in agents: if agent.metadata.track_token in agents_poses: diff --git a/d123/common/visualization/matplotlib/plots.py b/d123/common/visualization/matplotlib/plots.py index 7a1010cb..b61b8ab2 100644 --- a/d123/common/visualization/matplotlib/plots.py +++ b/d123/common/visualization/matplotlib/plots.py @@ -20,7 +20,7 @@ def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, ra box_detections = scene.get_box_detections_at_iteration(iteration) traffic_light_detections = scene.get_traffic_light_detections_at_iteration(iteration) route_lane_group_ids = scene.get_route_lane_group_ids(iteration) - map_api = scene.map_api + map_api = scene.get_map_api() point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=route_lane_group_ids) @@ -63,8 +63,8 @@ def render_scene_animation( scene.open() if end_idx is None: - end_idx = scene.get_number_of_iterations() - end_idx = min(end_idx, scene.get_number_of_iterations()) + end_idx = scene.number_of_iterations + end_idx = min(end_idx, scene.number_of_iterations) fig, ax = plt.subplots(figsize=(10, 10)) diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py index 9e030b80..ef335bb0 100644 --- a/d123/common/visualization/matplotlib/utils.py +++ b/d123/common/visualization/matplotlib/utils.py @@ -1,4 +1,3 @@ -from functools import lru_cache from typing import Union import matplotlib.patches as patches @@ -99,7 +98,6 @@ def add_shapely_linestring_to_ax( return ax -@lru_cache(maxsize=32) def get_pose_triangle(size: float) -> geom.Polygon: """Create a triangle shape for the pose.""" half_size = size / 2 diff --git a/d123/common/visualization/utils/.gitkeep b/d123/common/visualization/utils/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/d123/common/visualization/viser/elements/map_elements.py b/d123/common/visualization/viser/elements/map_elements.py index 90c247cb..d25fe73f 100644 --- a/d123/common/visualization/viser/elements/map_elements.py +++ b/d123/common/visualization/viser/elements/map_elements.py @@ -43,7 +43,7 @@ def _get_map_trimesh_dict( MapLayer.CARPARK, MapLayer.GENERIC_DRIVABLE, ] - map_objects_dict = scene.map_api.get_proximal_map_objects( + map_objects_dict = scene.get_map_api().get_proximal_map_objects( scene_center.point_2d, radius=viser_config.map_radius, layers=map_layers, diff --git a/d123/common/visualization/viser/viser_config.py b/d123/common/visualization/viser/viser_config.py index 276d6762..f99f7823 100644 --- a/d123/common/visualization/viser/viser_config.py +++ b/d123/common/visualization/viser/viser_config.py @@ -45,7 +45,7 @@ class ViserConfig: # Map map_visible: bool = True map_radius: float = 200.0 # [m] - map_non_road_z_offset: float = 0.0 # small translation to place crosswalks, parking, etc. on top of the road + map_non_road_z_offset: float = 0.1 # small z-translation to place crosswalks, parking, etc. on top of the road # Bounding boxes bounding_box_visible: bool = True @@ -65,7 +65,7 @@ class ViserConfig: camera_gui_image_scale: float = 0.25 # Resize factor for the camera image shown in the GUI (<1.0 for speed) # LiDAR - lidar_visible: bool = True + lidar_visible: bool = False lidar_types: List[LiDARType] = field(default_factory=lambda: all_lidar_types.copy()) lidar_point_size: float = 0.05 lidar_point_shape: Literal["square", "diamond", "circle", "rounded", "sparkle"] = "circle" diff --git a/d123/common/visualization/viser/viser_viewer.py b/d123/common/visualization/viser/viser_viewer.py index 2bc44c3d..8cf1ec80 100644 --- a/d123/common/visualization/viser/viser_viewer.py +++ b/d123/common/visualization/viser/viser_viewer.py @@ -113,7 +113,7 @@ def next(self) -> None: self.set_scene(self._scenes[self._scene_index]) def set_scene(self, scene: AbstractScene) -> None: - num_frames = scene.get_number_of_iterations() + num_frames = scene.number_of_iterations initial_ego_state: EgoStateSE3 = scene.get_ego_state_at_iteration(0) with self._viser_server.gui.add_folder("Playback"): diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py index 638eb205..63a5279d 100644 --- a/d123/datasets/av2/av2_data_converter.py +++ b/d123/datasets/av2/av2_data_converter.py @@ -1,7 +1,5 @@ import gc import hashlib -import json -from dataclasses import asdict from functools import partial from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union @@ -24,15 +22,15 @@ ) from d123.datasets.av2.av2_map_conversion import convert_av2_map from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, - camera_metadata_dict_to_json, ) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import ( @@ -40,6 +38,7 @@ rear_axle_se3_to_center_se3, ) from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.geometry_index import StateSE3Index from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array @@ -174,16 +173,18 @@ def convert_av2_log_to_arrow( sensor_df = build_sensor_dataframe(log_path) synchronization_df = build_synchronization_dataframe(sensor_df) - metadata = LogMetadata( + log_metadata = LogMetadata( dataset="av2-sensor", + split=split, log_name=log_path.name, - location=None, - timestep_seconds=0.1, # TODO: verify this + location=None, # TODO: Add location information. + timestep_seconds=0.1, + vehicle_parameters=get_av2_ford_fusion_hybrid_parameters(), + camera_metadata=get_av2_camera_metadata(log_path), + lidar_metadata=get_av2_lidar_metadata(log_path), map_has_z=True, + map_is_local=True, ) - vehicle_parameters = get_av2_ford_fusion_hybrid_parameters() # TODO: Add av2 vehicle parameters - camera_metadata = get_av2_camera_metadata(log_path) - lidar_metadata = get_av2_lidar_metadata(log_path) schema_column_list = [ ("token", pa.string()), @@ -199,31 +200,26 @@ def convert_av2_log_to_arrow( ("route_lane_group_ids", pa.list_(pa.int64())), ] if data_converter_config.lidar_store_option is not None: - for lidar_type in lidar_metadata.keys(): + for lidar_type in log_metadata.lidar_metadata.keys(): if data_converter_config.lidar_store_option == "path": schema_column_list.append((lidar_type.serialize(), pa.string())) elif data_converter_config.lidar_store_option == "binary": raise NotImplementedError("Binary lidar storage is not implemented.") if data_converter_config.camera_store_option is not None: - for camera_type in camera_metadata.keys(): + for camera_type in log_metadata.camera_metadata.keys(): if data_converter_config.camera_store_option == "path": schema_column_list.append((camera_type.serialize(), pa.string())) elif data_converter_config.camera_store_option == "binary": schema_column_list.append((camera_type.serialize(), pa.binary())) - schema_column_list.append((f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4))) + schema_column_list.append( + (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) + ) recording_schema = pa.schema(schema_column_list) - recording_schema = recording_schema.with_metadata( - { - "log_metadata": json.dumps(asdict(metadata)), - "vehicle_parameters": json.dumps(asdict(vehicle_parameters)), - "camera_metadata": camera_metadata_dict_to_json(camera_metadata), - "lidar_metadata": lidar_metadata_dict_to_json(lidar_metadata), - } - ) + recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) _write_recording_table( sensor_df, @@ -233,7 +229,7 @@ def convert_av2_log_to_arrow( log_path, data_converter_config, ) - del recording_schema, vehicle_parameters + del recording_schema gc.collect() return [] diff --git a/d123/datasets/carla/carla_data_converter.py b/d123/datasets/carla/carla_data_converter.py index 1930bb69..c38fc83a 100644 --- a/d123/datasets/carla/carla_data_converter.py +++ b/d123/datasets/carla/carla_data_converter.py @@ -18,7 +18,7 @@ from d123.datasets.utils.sensor.lidar_index_registry import CarlaLidarIndex from d123.datatypes.maps.abstract_map import AbstractMap, MapLayer from d123.datatypes.maps.abstract_map_objects import AbstractLane -from d123.datatypes.maps.gpkg.gpkg_map import get_map_api_from_names +from d123.datatypes.maps.gpkg.gpkg_map import get_global_map_api from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, @@ -178,7 +178,7 @@ def convert_log_internal(args: List[Dict[str, Union[List[str], List[Path]]]]) -> bounding_box_paths = sorted([bb_path for bb_path in (log_path / "boxes").iterdir()]) first_log_dict = _load_json_gz(bounding_box_paths[0]) map_name = first_log_dict["location"] - map_api = get_map_api_from_names("carla", map_name) + map_api = get_global_map_api("carla", map_name) metadata = _get_metadata(map_name, str(log_path.stem)) vehicle_parameters = get_carla_lincoln_mkz_2020_parameters() diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py index 79f824e3..95067b32 100644 --- a/d123/datasets/nuplan/nuplan_data_converter.py +++ b/d123/datasets/nuplan/nuplan_data_converter.py @@ -1,8 +1,6 @@ import gc -import json import os import pickle -from dataclasses import asdict from functools import partial from pathlib import Path from typing import Any, Dict, Final, List, Optional, Tuple, Union @@ -20,15 +18,15 @@ from d123.datasets.utils.sensor.lidar_index_registry import NuplanLidarIndex from d123.datatypes.detections.detection import TrafficLightStatus from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, - camera_metadata_dict_to_json, ) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import ( @@ -197,20 +195,23 @@ def convert_nuplan_log_to_arrow( log_file_path = data_converter_config.output_path / split / f"{log_path.stem}.arrow" if data_converter_config.force_log_conversion or not log_file_path.exists(): + log_file_path.unlink(missing_ok=True) if not log_file_path.parent.exists(): log_file_path.parent.mkdir(parents=True, exist_ok=True) - metadata = LogMetadata( + log_metadata = LogMetadata( dataset="nuplan", + split=split, log_name=log_db.log_name, location=log_db.log.map_version, timestep_seconds=TARGET_DT, + vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(), + camera_metadata=get_nuplan_camera_metadata(log_path), + lidar_metadata=get_nuplan_lidar_metadata(), map_has_z=False, + map_is_local=False, ) - vehicle_parameters = get_nuplan_chrysler_pacifica_parameters() - camera_metadata = get_nuplan_camera_metadata(log_path) - lidar_metadata = get_nuplan_lidar_metadata() schema_column_list = [ ("token", pa.string()), @@ -226,14 +227,14 @@ def convert_nuplan_log_to_arrow( ("route_lane_group_ids", pa.list_(pa.int64())), ] if data_converter_config.lidar_store_option is not None: - for lidar_type in lidar_metadata.keys(): + for lidar_type in log_metadata.lidar_metadata.keys(): if data_converter_config.lidar_store_option == "path": schema_column_list.append((lidar_type.serialize(), pa.string())) elif data_converter_config.lidar_store_option == "binary": raise NotImplementedError("Binary lidar storage is not implemented.") if data_converter_config.camera_store_option is not None: - for camera_type in camera_metadata.keys(): + for camera_type in log_metadata.camera_metadata.keys(): if data_converter_config.camera_store_option == "path": schema_column_list.append((camera_type.serialize(), pa.string())) schema_column_list.append( @@ -244,21 +245,15 @@ def convert_nuplan_log_to_arrow( raise NotImplementedError("Binary camera storage is not implemented.") recording_schema = pa.schema(schema_column_list) - recording_schema = recording_schema.with_metadata( - { - "log_metadata": json.dumps(asdict(metadata)), - "vehicle_parameters": json.dumps(asdict(vehicle_parameters)), - "camera_metadata": camera_metadata_dict_to_json(camera_metadata), - "lidar_metadata": lidar_metadata_dict_to_json(lidar_metadata), - } - ) - + recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) _write_recording_table(log_db, recording_schema, log_file_path, log_path, data_converter_config) + # Detach and remove log_db, for memory management log_db.detach_tables() log_db.remove_ref() - del recording_schema, vehicle_parameters, log_db - gc.collect() + del recording_schema, log_db + gc.collect() + return [] diff --git a/d123/datasets/nuplan/load_sensor.py b/d123/datasets/nuplan/nuplan_load_sensor.py similarity index 100% rename from d123/datasets/nuplan/load_sensor.py rename to d123/datasets/nuplan/nuplan_load_sensor.py diff --git a/d123/datasets/utils/arrow_schema.py b/d123/datasets/utils/arrow_schema.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py index 27bc4c05..4481a532 100644 --- a/d123/datasets/wopd/wopd_data_converter.py +++ b/d123/datasets/wopd/wopd_data_converter.py @@ -1,8 +1,6 @@ import gc import hashlib -import json import os -from dataclasses import asdict from functools import partial from pathlib import Path from typing import Any, Dict, Final, List, Tuple, Union @@ -20,21 +18,25 @@ from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, - camera_metadata_dict_to_json, ) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.geometry_index import StateSE3Index +from d123.geometry.geometry_index import EulerAnglesIndex, StateSE3Index from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry.utils.rotation_utils import ( + get_euler_array_from_quaternion_array, + get_quaternion_array_from_euler_array, +) check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") import tensorflow as tf @@ -216,16 +218,18 @@ def convert_wopd_tfrecord_log_to_arrow( if not log_file_path.parent.exists(): log_file_path.parent.mkdir(parents=True, exist_ok=True) - metadata = LogMetadata( + log_metadata = LogMetadata( dataset="wopd", + split=split, log_name=log_name, - location=None, # TODO: implement map name - timestep_seconds=TARGET_DT, # TODO: Check if correct. Maybe not hardcode + location=None, # TODO: Add location information. + timestep_seconds=TARGET_DT, + vehicle_parameters=get_wopd_chrysler_pacifica_parameters(), + camera_metadata=get_wopd_camera_metadata(initial_frame, data_converter_config), + lidar_metadata=get_wopd_lidar_metadata(initial_frame, data_converter_config), map_has_z=True, + map_is_local=True, ) - vehicle_parameters = get_wopd_chrysler_pacifica_parameters() - camera_metadata = get_wopd_camera_metadata(initial_frame, data_converter_config) - lidar_metadata = get_wopd_lidar_metadata(initial_frame, data_converter_config) schema_column_list = [ ("token", pa.string()), @@ -242,7 +246,7 @@ def convert_wopd_tfrecord_log_to_arrow( ] # TODO: Adjust how cameras are added if data_converter_config.camera_store_option is not None: - for camera_type in camera_metadata.keys(): + for camera_type in log_metadata.camera_metadata.keys(): if data_converter_config.camera_store_option == "path": raise NotImplementedError("Path camera storage is not implemented.") elif data_converter_config.camera_store_option == "binary": @@ -252,25 +256,18 @@ def convert_wopd_tfrecord_log_to_arrow( ) if data_converter_config.lidar_store_option is not None: - for lidar_type in lidar_metadata.keys(): + for lidar_type in log_metadata.lidar_metadata.keys(): if data_converter_config.lidar_store_option == "path": raise NotImplementedError("Filepath lidar storage is not implemented.") elif data_converter_config.lidar_store_option == "binary": schema_column_list.append((lidar_type.serialize(), (pa.list_(pa.float32())))) recording_schema = pa.schema(schema_column_list) - recording_schema = recording_schema.with_metadata( - { - "log_metadata": json.dumps(asdict(metadata)), - "vehicle_parameters": json.dumps(asdict(vehicle_parameters)), - "camera_metadata": camera_metadata_dict_to_json(camera_metadata), - "lidar_metadata": lidar_metadata_dict_to_json(lidar_metadata), - } - ) + recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) _write_recording_table(dataset, recording_schema, log_file_path, tf_record_path, data_converter_config) - del recording_schema, vehicle_parameters, dataset + del recording_schema, dataset except Exception as e: import traceback @@ -408,15 +405,9 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li for detection_idx, detection in enumerate(frame.laser_labels): if detection.type not in WOPD_DETECTION_NAME_DICT: continue - - # 1. Quaternion rotations - # NOTE: WOPD bounding boxes are (1) stored in ego frame and (2) only supply yaw rotation - # The global pose can either consider ego roll and pitch or set them to zero. - # (zero roll/pitch corresponds to setting it to the ego roll/pitch, before transformation to global frame) - # detection_quaternion = EulerAngles( - roll=ego_rear_axle.roll if ZERO_ROLL_PITCH else DEFAULT_ROLL, - pitch=ego_rear_axle.pitch if ZERO_ROLL_PITCH else DEFAULT_PITCH, + roll=DEFAULT_ROLL, + pitch=DEFAULT_PITCH, yaw=detection.box.heading, ).quaternion @@ -443,6 +434,12 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( origin=ego_rear_axle, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3] ) + if ZERO_ROLL_PITCH: + euler_array = get_euler_array_from_quaternion_array(detections_state[:, BoundingBoxSE3Index.QUATERNION]) + euler_array[..., EulerAnglesIndex.ROLL] = DEFAULT_ROLL + euler_array[..., EulerAnglesIndex.PITCH] = DEFAULT_PITCH + detections_state[..., BoundingBoxSE3Index.QUATERNION] = get_quaternion_array_from_euler_array(euler_array) + return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types diff --git a/d123/datatypes/maps/gpkg/gpkg_map.py b/d123/datatypes/maps/gpkg/gpkg_map.py index 429a611e..be15d482 100644 --- a/d123/datatypes/maps/gpkg/gpkg_map.py +++ b/d123/datatypes/maps/gpkg/gpkg_map.py @@ -5,7 +5,7 @@ from collections import defaultdict from functools import lru_cache from pathlib import Path -from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union +from typing import Callable, Dict, Final, Iterable, List, Optional, Tuple, Union import geopandas as gpd import shapely @@ -29,6 +29,7 @@ from d123.geometry import Point2D USE_ARROW: bool = True +MAX_LRU_CACHED_TABLES: Final[int] = 128 # TODO: add to some configs class GPKGMap(AbstractMap): @@ -370,8 +371,8 @@ def _get_road_line(self, id: str) -> Optional[GPKGRoadLine]: # return list(map_object_ids) -@lru_cache(maxsize=32) -def get_map_api_from_names(dataset: str, location: str) -> GPKGMap: +@lru_cache(maxsize=MAX_LRU_CACHED_TABLES) +def get_global_map_api(dataset: str, location: str) -> GPKGMap: D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) gpkg_path = D123_MAPS_ROOT / f"{dataset}_{location}.gpkg" assert gpkg_path.is_file(), f"{dataset}_{location}.gpkg not found in {str(D123_MAPS_ROOT)}." @@ -381,7 +382,6 @@ def get_map_api_from_names(dataset: str, location: str) -> GPKGMap: def get_local_map_api(split_name: str, log_name: str) -> GPKGMap: - print(split_name, log_name) D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) gpkg_path = D123_MAPS_ROOT / split_name / f"{log_name}.gpkg" assert gpkg_path.is_file(), f"{log_name}.gpkg not found in {str(D123_MAPS_ROOT)}." diff --git a/d123/datatypes/scene/abstract_scene.py b/d123/datatypes/scene/abstract_scene.py index 7f6ca5e3..c9fbc1af 100644 --- a/d123/datatypes/scene/abstract_scene.py +++ b/d123/datatypes/scene/abstract_scene.py @@ -5,53 +5,30 @@ from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper from d123.datatypes.maps.abstract_map import AbstractMap -from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 - -# TODO: Remove or improve open/close dynamic of Scene object. +from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters class AbstractScene(abc.ABC): - @property - @abc.abstractmethod - def log_name(self) -> str: - raise NotImplementedError - - @property - @abc.abstractmethod - def token(self) -> str: - raise NotImplementedError - - @property - @abc.abstractmethod - def log_metadata(self) -> LogMetadata: - raise NotImplementedError - - @property - @abc.abstractmethod - def available_camera_types(self) -> List[PinholeCameraType]: - raise NotImplementedError - - @property - @abc.abstractmethod - def available_lidar_types(self) -> List[LiDARType]: - raise NotImplementedError + #################################################################################################################### + # Abstract Methods, to be implemented by subclasses + #################################################################################################################### - @property @abc.abstractmethod - def map_api(self) -> Optional[AbstractMap]: + def get_log_metadata(self) -> LogMetadata: raise NotImplementedError @abc.abstractmethod - def get_number_of_iterations(self) -> int: + def get_scene_extraction_metadata(self) -> SceneExtractionMetadata: raise NotImplementedError @abc.abstractmethod - def get_number_of_history_iterations() -> int: + def get_map_api(self) -> Optional[AbstractMap]: raise NotImplementedError @abc.abstractmethod @@ -86,8 +63,44 @@ def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> Optional[LiDAR]: raise NotImplementedError - def open(self) -> None: - pass + #################################################################################################################### + # Syntactic Sugar / Properties, for easier access to common attributes + #################################################################################################################### + + # 1. Log Metadata properties + @property + def log_metadata(self) -> LogMetadata: + return self.get_log_metadata() + + @property + def log_name(self) -> str: + return self.log_metadata.log_name + + @property + def vehicle_parameters(self) -> VehicleParameters: + return self.log_metadata.vehicle_parameters + + @property + def available_camera_types(self) -> List[PinholeCameraType]: + return list(self.log_metadata.camera_metadata.keys()) + + @property + def available_lidar_types(self) -> List[LiDARType]: + return list(self.log_metadata.lidar_metadata.keys()) + + # 2. Scene Extraction Metadata properties + @property + def scene_extraction_metadata(self) -> SceneExtractionMetadata: + return self.get_scene_extraction_metadata() + + @property + def token(self) -> str: + return self.scene_extraction_metadata.initial_token + + @property + def number_of_iterations(self) -> int: + return self.scene_extraction_metadata.number_of_iterations - def close(self) -> None: - pass + @property + def number_of_history_iterations(self) -> int: + return self.scene_extraction_metadata.number_of_history_iterations diff --git a/d123/datatypes/scene/arrow/__init__.py b/d123/datatypes/scene/arrow/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py index 3fcc2150..8aa595db 100644 --- a/d123/datatypes/scene/arrow/arrow_scene.py +++ b/d123/datatypes/scene/arrow/arrow_scene.py @@ -1,15 +1,14 @@ -import json from pathlib import Path -from typing import Dict, List, Optional, Tuple, Union +from typing import List, Optional, Union import pyarrow as pa -from d123.common.utils.arrow_helper import open_arrow_table +from d123.common.utils.arrow_helper import get_lru_cached_arrow_table from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper from d123.datatypes.maps.abstract_map import AbstractMap -from d123.datatypes.maps.gpkg.gpkg_map import get_local_map_api, get_map_api_from_names +from d123.datatypes.maps.gpkg.gpkg_map import get_global_map_api, get_local_map_api from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.arrow.utils.conversion import ( +from d123.datatypes.scene.arrow.utils.arrow_getters import ( get_box_detections_from_arrow_table, get_camera_from_arrow_table, get_ego_vehicle_state_from_arrow_table, @@ -17,207 +16,132 @@ get_timepoint_from_arrow_table, get_traffic_light_detections_from_arrow_table, ) -from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionInfo -from d123.datatypes.sensors.camera.pinhole_camera import ( - PinholeCamera, - PinholeCameraMetadata, - PinholeCameraType, - camera_metadata_dict_from_json, -) -from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json +from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow +from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters - -# TODO: Remove or improve open/close dynamic of Scene object. - - -def _get_scene_data( - arrow_file_path: Union[Path, str], -) -> Tuple[LogMetadata, VehicleParameters, Dict[PinholeCameraType, PinholeCameraMetadata]]: - """ - Extracts the metadata and vehicle parameters from the arrow file. - """ - # TODO: consider a better way to read metadata, instead of loading the entire table. - table = open_arrow_table(arrow_file_path) - metadata = LogMetadata(**json.loads(table.schema.metadata[b"log_metadata"].decode())) - vehicle_parameters = VehicleParameters(**json.loads(table.schema.metadata[b"vehicle_parameters"].decode())) - - if b"camera_metadata" in table.schema.metadata: - camera_metadata = camera_metadata_dict_from_json(table.schema.metadata[b"camera_metadata"].decode()) - else: - camera_metadata = {} - - if b"lidar_metadata" in table.schema.metadata: - lidar_metadata = lidar_metadata_dict_from_json(table.schema.metadata[b"lidar_metadata"].decode()) - else: - lidar_metadata = {} - - del table - return metadata, vehicle_parameters, camera_metadata, lidar_metadata class ArrowScene(AbstractScene): + def __init__( self, arrow_file_path: Union[Path, str], - scene_extraction_info: Optional[SceneExtractionInfo] = None, + scene_extraction_metadata: Optional[SceneExtractionMetadata] = None, ) -> None: - self._recording_table: pa.Table = None - ( - _metadata, - _vehicle_parameters, - _camera_metadata, - _lidar_metadata, - ) = _get_scene_data(arrow_file_path) - self._metadata: LogMetadata = _metadata - self._vehicle_parameters: VehicleParameters = _vehicle_parameters - self._camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = _camera_metadata - self._lidar_metadata: Dict[LiDARType, LiDARMetadata] = _lidar_metadata + self._arrow_file_path: Path = Path(arrow_file_path) + self._log_metadata: LogMetadata = get_log_metadata_from_arrow(arrow_file_path) + + if scene_extraction_metadata is None: + scene_extraction_metadata = SceneExtractionMetadata( + initial_idx=0, + duration_s=self._log_metadata.timestep_seconds * len(self._arrow_file_path), + history_s=0.0, + iteration_duration_s=self._log_metadata.timestep_seconds, + ) + self._scene_extraction_metadata: SceneExtractionMetadata = scene_extraction_metadata - self._map_api: Optional[AbstractMap] = None + # NOTE: Lazy load a log-specific map API, and keep reference. + # Global maps are LRU cached internally. + self._local_map_api: Optional[AbstractMap] = None - self._arrow_log_path = arrow_file_path - self._scene_extraction_info: SceneExtractionInfo = scene_extraction_info + #################################################################################################################### + # Helpers for ArrowScene + #################################################################################################################### def __reduce__(self): + """Helper for pickling the object.""" return ( self.__class__, ( - self._arrow_log_path, - self._scene_extraction_info, + self._arrow_file_path, + self._scene_extraction_metadata, ), ) - @property - def map_api(self) -> AbstractMap: - self._lazy_initialize() - return self._map_api - - @property - def log_name(self) -> str: - return str(self._arrow_log_path.stem) - - @property - def token(self) -> str: - self._lazy_initialize() - return self._recording_table["token"][self._get_table_index(0)].as_py() - - @property - def log_metadata(self) -> LogMetadata: - return self._metadata - - @property - def available_camera_types(self) -> List[PinholeCameraType]: - return list(self._camera_metadata.keys()) - - @property - def available_lidar_types(self) -> List[LiDARType]: - return list(self._lidar_metadata.keys()) + def _get_recording_table(self) -> pa.Table: + """Helper function to return an LRU cached reference to the arrow table.""" + return get_lru_cached_arrow_table(self._arrow_file_path) def _get_table_index(self, iteration: int) -> int: - self._lazy_initialize() - assert ( - -self.get_number_of_history_iterations() <= iteration < self.get_number_of_iterations() - ), "Iteration out of bounds" - table_index = self._scene_extraction_info.initial_idx + iteration + assert -self.number_of_history_iterations <= iteration < self.number_of_iterations, "Iteration out of bounds" + table_index = self._scene_extraction_metadata.initial_idx + iteration return table_index - def get_number_of_iterations(self) -> int: - self._lazy_initialize() - return self._scene_extraction_info.number_of_iterations + #################################################################################################################### + # Implementation of AbstractScene + #################################################################################################################### + + def get_log_metadata(self) -> LogMetadata: + return self._log_metadata + + def get_scene_extraction_metadata(self) -> SceneExtractionMetadata: + return self._scene_extraction_metadata - def get_number_of_history_iterations(self) -> int: - self._lazy_initialize() - return self._scene_extraction_info.number_of_history_iterations + def get_map_api(self) -> Optional[AbstractMap]: + map_api: Optional[AbstractMap] = None + if self.log_metadata.map_is_local: + if self._local_map_api is None: + map_api = get_local_map_api(self.log_metadata.split, self.log_name) + self._local_map_api = map_api + else: + map_api = self._local_map_api + else: + map_api = get_global_map_api(self.log_metadata.dataset, self.log_metadata.location) + return map_api def get_timepoint_at_iteration(self, iteration: int) -> TimePoint: - self._lazy_initialize() - return get_timepoint_from_arrow_table(self._recording_table, self._get_table_index(iteration)) + return get_timepoint_from_arrow_table(self._get_recording_table(), self._get_table_index(iteration)) def get_ego_state_at_iteration(self, iteration: int) -> Optional[EgoStateSE3]: - self._lazy_initialize() return get_ego_vehicle_state_from_arrow_table( - self._recording_table, self._get_table_index(iteration), self._vehicle_parameters + self._get_recording_table(), + self._get_table_index(iteration), + self.log_metadata.vehicle_parameters, ) def get_box_detections_at_iteration(self, iteration: int) -> Optional[BoxDetectionWrapper]: - # TODO: Make box detections optional in ArrowScene - self._lazy_initialize() - return get_box_detections_from_arrow_table(self._recording_table, self._get_table_index(iteration)) + return get_box_detections_from_arrow_table(self._get_recording_table(), self._get_table_index(iteration)) def get_traffic_light_detections_at_iteration(self, iteration: int) -> Optional[TrafficLightDetectionWrapper]: - # TODO: Make traffic lights optional in ArrowScene - self._lazy_initialize() - return get_traffic_light_detections_from_arrow_table(self._recording_table, self._get_table_index(iteration)) + return get_traffic_light_detections_from_arrow_table( + self._get_recording_table(), self._get_table_index(iteration) + ) def get_detection_recording_at_iteration(self, iteration: int) -> Optional[DetectionRecording]: - # TODO: Make detection recording optional in ArrowScene return DetectionRecording( box_detections=self.get_box_detections_at_iteration(iteration), traffic_light_detections=self.get_traffic_light_detections_at_iteration(iteration), ) def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: - self._lazy_initialize() route_lane_group_ids: Optional[List[int]] = None - if "route_lane_group_ids" in self._recording_table.column_names: - table_index = self._get_table_index(iteration) - route_lane_group_ids = self._recording_table["route_lane_group_ids"][table_index].as_py() + table = self._get_recording_table() + if "route_lane_group_ids" in table.column_names: + route_lane_group_ids = table["route_lane_group_ids"][self._get_table_index(iteration)].as_py() return route_lane_group_ids def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType) -> Optional[PinholeCamera]: - self._lazy_initialize() camera: Optional[PinholeCamera] = None - if camera_type in self._camera_metadata: - table_index = self._get_table_index(iteration) + if camera_type in self.available_camera_types: camera = get_camera_from_arrow_table( - self._recording_table, - table_index, - self._camera_metadata[camera_type], + self._get_recording_table(), + self._get_table_index(iteration), + camera_type, self.log_metadata, ) return camera def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> Optional[LiDAR]: - self._lazy_initialize() lidar: Optional[LiDAR] = None - if lidar_type in self._lidar_metadata: - table_index = self._get_table_index(iteration) + if lidar_type in self.available_lidar_types: lidar = get_lidar_from_arrow_table( - self._recording_table, - table_index, - self._lidar_metadata[lidar_type], + self._get_recording_table(), + self._get_table_index(iteration), + lidar_type, self.log_metadata, ) return lidar - - def _lazy_initialize(self) -> None: - self.open() - - def open(self) -> None: - if self._map_api is None: - try: - if self._metadata.dataset in ["wopd", "av2-sensor"]: - # FIXME: - split = str(self._arrow_log_path.parent.name) - self._map_api = get_local_map_api(split, self._metadata.log_name) - else: - self._map_api = get_map_api_from_names(self._metadata.dataset, self._metadata.location) - self._map_api.initialize() - except Exception as e: - print(f"Error initializing map API: {e}") - if self._recording_table is None: - self._recording_table = open_arrow_table(self._arrow_log_path) - if self._scene_extraction_info is None: - self._scene_extraction_info = SceneExtractionInfo( - initial_idx=0, - duration_s=self._metadata.timestep_seconds * len(self._recording_table), - history_s=0.0, - iteration_duration_s=self._metadata.timestep_seconds, - ) - - def close(self) -> None: - del self._recording_table - self._recording_table = None diff --git a/d123/datatypes/scene/arrow/arrow_scene_builder.py b/d123/datatypes/scene/arrow/arrow_scene_builder.py index 94571715..bf48c96b 100644 --- a/d123/datatypes/scene/arrow/arrow_scene_builder.py +++ b/d123/datatypes/scene/arrow/arrow_scene_builder.py @@ -1,16 +1,16 @@ -import json import random from functools import partial from pathlib import Path from typing import Iterator, List, Optional, Set, Union from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.arrow_helper import open_arrow_table +from d123.common.utils.arrow_helper import get_lru_cached_arrow_table from d123.datatypes.scene.abstract_scene import AbstractScene from d123.datatypes.scene.abstract_scene_builder import SceneBuilder from d123.datatypes.scene.arrow.arrow_scene import ArrowScene +from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow from d123.datatypes.scene.scene_filter import SceneFilter -from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionInfo +from d123.datatypes.scene.scene_metadata import SceneExtractionMetadata class ArrowSceneBuilder(SceneBuilder): @@ -71,29 +71,29 @@ def _extract_scenes_from_logs(log_paths: List[Path], filter: SceneFilter) -> Lis scenes: List[AbstractScene] = [] for log_path in log_paths: try: - scene_extraction_infos = _get_scene_extraction_info(log_path, filter) + scene_extraction_metadatas = _get_scene_extraction_metadatas(log_path, filter) except Exception as e: print(f"Error extracting scenes from {log_path}: {e}") continue - for scene_extraction_info in scene_extraction_infos: + for scene_extraction_metadata in scene_extraction_metadatas: scenes.append( ArrowScene( arrow_file_path=log_path, - scene_extraction_info=scene_extraction_info, + scene_extraction_metadata=scene_extraction_metadata, ) ) return scenes -def _get_scene_extraction_info(log_path: Union[str, Path], filter: SceneFilter) -> List[SceneExtractionInfo]: - scene_extraction_infos: List[SceneExtractionInfo] = [] +def _get_scene_extraction_metadatas(log_path: Union[str, Path], filter: SceneFilter) -> List[SceneExtractionMetadata]: + scene_extraction_metadatas: List[SceneExtractionMetadata] = [] - recording_table = open_arrow_table(log_path) - log_metadata = LogMetadata(**json.loads(recording_table.schema.metadata[b"log_metadata"].decode())) + recording_table = get_lru_cached_arrow_table(log_path) + log_metadata = get_log_metadata_from_arrow(log_path) # 1. Filter map name if filter.map_names is not None and log_metadata.map_name not in filter.map_names: - return scene_extraction_infos + return scene_extraction_metadatas start_idx = int(filter.history_s / log_metadata.timestep_seconds) if filter.history_s is not None else 0 end_idx = ( @@ -103,7 +103,8 @@ def _get_scene_extraction_info(log_path: Union[str, Path], filter: SceneFilter) ) if filter.duration_s is None: return [ - SceneExtractionInfo( + SceneExtractionMetadata( + initial_token=str(recording_table["token"][start_idx].as_py()), initial_idx=start_idx, duration_s=(end_idx - start_idx) * log_metadata.timestep_seconds, history_s=filter.history_s if filter.history_s is not None else 0.0, @@ -114,27 +115,29 @@ def _get_scene_extraction_info(log_path: Union[str, Path], filter: SceneFilter) scene_token_set = set(filter.scene_tokens) if filter.scene_tokens is not None else None for idx in range(start_idx, end_idx): - scene_extraction_info: Optional[SceneExtractionInfo] = None + scene_extraction_metadata: Optional[SceneExtractionMetadata] = None if scene_token_set is None: - scene_extraction_info = SceneExtractionInfo( + scene_extraction_metadata = SceneExtractionMetadata( + initial_token=str(recording_table["token"][idx].as_py()), initial_idx=idx, duration_s=filter.duration_s, history_s=filter.history_s, iteration_duration_s=log_metadata.timestep_seconds, ) elif str(recording_table["token"][idx]) in scene_token_set: - scene_extraction_info = SceneExtractionInfo( + scene_extraction_metadata = SceneExtractionMetadata( + initial_token=str(recording_table["token"][idx].as_py()), initial_idx=idx, duration_s=filter.duration_s, history_s=filter.history_s, iteration_duration_s=log_metadata.timestep_seconds, ) - if scene_extraction_info is not None: + if scene_extraction_metadata is not None: # Check of timestamp threshold exceeded between previous scene, if specified in filter - if filter.timestamp_threshold_s is not None and len(scene_extraction_infos) > 0: - iteration_delta = idx - scene_extraction_infos[-1].initial_idx + if filter.timestamp_threshold_s is not None and len(scene_extraction_metadatas) > 0: + iteration_delta = idx - scene_extraction_metadatas[-1].initial_idx if (iteration_delta * log_metadata.timestep_seconds) < filter.timestamp_threshold_s: continue @@ -148,7 +151,7 @@ def _get_scene_extraction_info(log_path: Union[str, Path], filter: SceneFilter) if not all(cameras_available): continue - scene_extraction_infos.append(scene_extraction_info) + scene_extraction_metadatas.append(scene_extraction_metadata) del recording_table, log_metadata - return scene_extraction_infos + return scene_extraction_metadatas diff --git a/d123/datatypes/scene/arrow/utils/__init__.py b/d123/datatypes/scene/arrow/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datatypes/scene/arrow/utils/conversion.py b/d123/datatypes/scene/arrow/utils/arrow_getters.py similarity index 85% rename from d123/datatypes/scene/arrow/utils/conversion.py rename to d123/datatypes/scene/arrow/utils/arrow_getters.py index 8d36e80b..ac86ca18 100644 --- a/d123/datatypes/scene/arrow/utils/conversion.py +++ b/d123/datatypes/scene/arrow/utils/arrow_getters.py @@ -20,18 +20,17 @@ ) from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraMetadata -from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters -from d123.geometry import BoundingBoxSE3, Vector3D -from d123.geometry.se import StateSE3 +from d123.geometry import BoundingBoxSE3, StateSE3, Vector3D DATASET_SENSOR_ROOT: Dict[str, Path] = { "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", "carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs", - # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini", + "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini", } @@ -94,13 +93,13 @@ def get_traffic_light_detections_from_arrow_table(arrow_table: pa.Table, index: def get_camera_from_arrow_table( arrow_table: pa.Table, index: int, - camera_metadata: PinholeCameraMetadata, + camera_type: PinholeCameraType, log_metadata: LogMetadata, ) -> PinholeCamera: - table_data = arrow_table[camera_metadata.camera_type.serialize()][index].as_py() - extrinsic_list = arrow_table[f"{camera_metadata.camera_type.serialize()}_extrinsic"][index].as_py() - extrinsic = StateSE3.from_list(extrinsic_list) if extrinsic_list is not None else None + table_data = arrow_table[camera_type.serialize()][index].as_py() + extrinsic_values = arrow_table[f"{camera_type.serialize()}_extrinsic"][index].as_py() + extrinsic = StateSE3.from_list(extrinsic_values) if extrinsic_values is not None else None if table_data is None or extrinsic is None: return None @@ -120,7 +119,7 @@ def get_camera_from_arrow_table( raise NotImplementedError("Only string file paths for camera data are supported.") return PinholeCamera( - metadata=camera_metadata, + metadata=log_metadata.camera_metadata[camera_type], image=image, extrinsic=extrinsic, ) @@ -129,13 +128,15 @@ def get_camera_from_arrow_table( def get_lidar_from_arrow_table( arrow_table: pa.Table, index: int, - lidar_metadata: LiDARMetadata, + lidar_type: LiDARType, log_metadata: LogMetadata, ) -> LiDAR: assert ( - lidar_metadata.lidar_type.serialize() in arrow_table.schema.names - ), f'"{lidar_metadata.lidar_type.serialize()}" field not found in Arrow table schema.' - lidar_data = arrow_table[lidar_metadata.lidar_type.serialize()][index].as_py() + lidar_type.serialize() in arrow_table.schema.names + ), f'"{lidar_type.serialize()}" field not found in Arrow table schema.' + lidar_data = arrow_table[lidar_type.serialize()][index].as_py() + lidar_metadata = log_metadata.lidar_metadata[lidar_type] + if isinstance(lidar_data, str): sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset] full_lidar_path = sensor_root / lidar_data @@ -143,7 +144,7 @@ def get_lidar_from_arrow_table( # NOTE: We move data specific import into if-else block, to avoid data specific import errors if log_metadata.dataset == "nuplan": - from d123.datasets.nuplan.load_sensor import load_nuplan_lidar_from_path + from d123.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "carla": diff --git a/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py b/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py new file mode 100644 index 00000000..e392d337 --- /dev/null +++ b/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py @@ -0,0 +1,19 @@ +import json +from pathlib import Path +from typing import Union + +import pyarrow as pa + +from d123.common.utils.arrow_helper import get_lru_cached_arrow_table +from d123.datatypes.scene.scene_metadata import LogMetadata + + +def get_log_metadata_from_arrow(arrow_file_path: Union[Path, str]) -> LogMetadata: + table = get_lru_cached_arrow_table(arrow_file_path) + log_metadata = LogMetadata.from_dict(json.loads(table.schema.metadata[b"log_metadata"].decode())) + return log_metadata + + +def add_log_metadata_to_arrow_schema(schema: pa.Schema, log_metadata: LogMetadata) -> pa.Schema: + schema = schema.with_metadata({"log_metadata": json.dumps(log_metadata.to_dict())}) + return schema diff --git a/d123/datatypes/scene/scene_metadata.py b/d123/datatypes/scene/scene_metadata.py index 0fc12b03..ae40aa4d 100644 --- a/d123/datatypes/scene/scene_metadata.py +++ b/d123/datatypes/scene/scene_metadata.py @@ -1,27 +1,57 @@ -from dataclasses import dataclass +from __future__ import annotations + +from dataclasses import asdict, dataclass +from typing import Dict import d123 +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraMetadata, PinholeCameraType +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters -@dataclass +@dataclass(frozen=True) class LogMetadata: - # TODO: add - # - split - # - global/local map - dataset: str + split: str log_name: str location: str timestep_seconds: float + vehicle_parameters: VehicleParameters + camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] + lidar_metadata: Dict[LiDARType, LiDARMetadata] + map_has_z: bool + map_is_local: bool version: str = str(d123.__version__) + @classmethod + def from_dict(cls, data_dict: Dict) -> LogMetadata: + + data_dict["vehicle_parameters"] = VehicleParameters(**data_dict["vehicle_parameters"]) + data_dict["camera_metadata"] = { + PinholeCameraType.deserialize(key): PinholeCameraMetadata.from_dict(value) + for key, value in data_dict.get("camera_metadata", {}).items() + } + data_dict["lidar_metadata"] = { + LiDARType.deserialize(key): LiDARMetadata.from_dict(value) + for key, value in data_dict.get("lidar_metadata", {}).items() + } + return LogMetadata(**data_dict) + + def to_dict(self) -> Dict: + data_dict = asdict(self) + data_dict["vehicle_parameters"] = self.vehicle_parameters.to_dict() + data_dict["camera_metadata"] = {key.serialize(): value.to_dict() for key, value in self.camera_metadata.items()} + data_dict["lidar_metadata"] = {key.serialize(): value.to_dict() for key, value in self.lidar_metadata.items()} + return data_dict + @dataclass(frozen=True) -class SceneExtractionInfo: +class SceneExtractionMetadata: + initial_token: str initial_idx: int duration_s: float history_s: float diff --git a/d123/datatypes/sensors/camera/pinhole_camera.py b/d123/datatypes/sensors/camera/pinhole_camera.py index 8f7f8d44..33249d17 100644 --- a/d123/datatypes/sensors/camera/pinhole_camera.py +++ b/d123/datatypes/sensors/camera/pinhole_camera.py @@ -1,7 +1,6 @@ from __future__ import annotations -import json -from dataclasses import dataclass +from dataclasses import asdict, dataclass from typing import Any, Dict, Optional import numpy as np @@ -30,6 +29,14 @@ class PinholeCameraType(SerialIntEnum): CAM_STEREO_R = 9 +@dataclass +class PinholeCamera: + + metadata: PinholeCameraMetadata + image: npt.NDArray[np.uint8] + extrinsic: StateSE3 + + class PinholeIntrinsicsIndex(IntEnum): FX = 0 @@ -180,27 +187,22 @@ class PinholeCameraMetadata: height: int @classmethod - def from_dict(cls, json_dict: Dict[str, Any]) -> PinholeCameraMetadata: - return cls( - camera_type=PinholeCameraType(json_dict["camera_type"]), - intrinsics=( - PinholeIntrinsics.from_list(json_dict["intrinsics"]) if json_dict["intrinsics"] is not None else None - ), - distortion=( - PinholeDistortion.from_list(json_dict["distortion"]) if json_dict["distortion"] is not None else None - ), - width=int(json_dict["width"]), - height=int(json_dict["height"]), + def from_dict(cls, data_dict: Dict[str, Any]) -> PinholeCameraMetadata: + data_dict["camera_type"] = PinholeCameraType(data_dict["camera_type"]) + data_dict["intrinsics"] = ( + PinholeIntrinsics.from_list(data_dict["intrinsics"]) if data_dict["intrinsics"] is not None else None + ) + data_dict["distortion"] = ( + PinholeDistortion.from_list(data_dict["distortion"]) if data_dict["distortion"] is not None else None ) + return PinholeCameraMetadata(**data_dict) def to_dict(self) -> Dict[str, Any]: - return { - "camera_type": int(self.camera_type), - "intrinsics": self.intrinsics.tolist() if self.intrinsics is not None else None, - "distortion": self.distortion.tolist() if self.distortion is not None else None, - "width": self.width, - "height": self.height, - } + data_dict = asdict(self) + data_dict["camera_type"] = int(self.camera_type) + data_dict["intrinsics"] = self.intrinsics.tolist() if self.intrinsics is not None else None + data_dict["distortion"] = self.distortion.tolist() if self.distortion is not None else None + return data_dict @property def aspect_ratio(self) -> float: @@ -221,40 +223,3 @@ def fov_y(self) -> float: """ fov_y_rad = 2 * np.arctan(self.height / (2 * self.intrinsics.fy)) return fov_y_rad - - -def camera_metadata_dict_to_json( - camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata], -) -> Dict[str, Dict[str, Any]]: - """ - Converts a dictionary of CameraMetadata to a JSON-serializable format. - :param camera_metadata: Dictionary of CameraMetadata. - :return: JSON-serializable dictionary. - """ - camera_metadata_dict = { - camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items() - } - return json.dumps(camera_metadata_dict) - - -def camera_metadata_dict_from_json( - json_dict: Dict[str, Dict[str, Any]], -) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - """ - Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata. - :param json_dict: JSON-serializable dictionary. - :return: Dictionary of CameraMetadata. - """ - camera_metadata_dict = json.loads(json_dict) - return { - PinholeCameraType.deserialize(camera_type): PinholeCameraMetadata.from_dict(metadata) - for camera_type, metadata in camera_metadata_dict.items() - } - - -@dataclass -class PinholeCamera: - - metadata: PinholeCameraMetadata - image: npt.NDArray[np.uint8] - extrinsic: StateSE3 diff --git a/d123/datatypes/sensors/lidar/lidar.py b/d123/datatypes/sensors/lidar/lidar.py index 00356ee4..d32d73fd 100644 --- a/d123/datatypes/sensors/lidar/lidar.py +++ b/d123/datatypes/sensors/lidar/lidar.py @@ -1,8 +1,7 @@ from __future__ import annotations -import json from dataclasses import dataclass -from typing import Dict, Optional, Type +from typing import Optional, Type import numpy as np import numpy.typing as npt @@ -39,40 +38,15 @@ def to_dict(self) -> dict: } @classmethod - def from_dict(cls, json_dict: dict) -> LiDARMetadata: - lidar_type = LiDARType[json_dict["lidar_type"]] - if json_dict["lidar_index"] not in LIDAR_INDEX_REGISTRY: - raise ValueError(f"Unknown lidar index: {json_dict['lidar_index']}") - lidar_index_class = LIDAR_INDEX_REGISTRY[json_dict["lidar_index"]] - extrinsic = StateSE3.from_list(json_dict["extrinsic"]) if json_dict["extrinsic"] is not None else None + def from_dict(cls, data_dict: dict) -> LiDARMetadata: + lidar_type = LiDARType[data_dict["lidar_type"]] + if data_dict["lidar_index"] not in LIDAR_INDEX_REGISTRY: + raise ValueError(f"Unknown lidar index: {data_dict['lidar_index']}") + lidar_index_class = LIDAR_INDEX_REGISTRY[data_dict["lidar_index"]] + extrinsic = StateSE3.from_list(data_dict["extrinsic"]) if data_dict["extrinsic"] is not None else None return cls(lidar_type=lidar_type, lidar_index=lidar_index_class, extrinsic=extrinsic) -def lidar_metadata_dict_to_json(lidar_metadata: Dict[LiDARType, LiDARMetadata]) -> str: - """ - Converts a dictionary of LiDARMetadata to a JSON-serializable format. - :param lidar_metadata: Dictionary of LiDARMetadata. - :return: JSON string. - """ - lidar_metadata_dict = { - lidar_type.serialize(): metadata.to_dict() for lidar_type, metadata in lidar_metadata.items() - } - return json.dumps(lidar_metadata_dict) - - -def lidar_metadata_dict_from_json(json_str: str) -> Dict[LiDARType, LiDARMetadata]: - """ - Converts a JSON string back to a dictionary of LiDARMetadata. - :param json_str: JSON string. - :return: Dictionary of LiDARMetadata. - """ - lidar_metadata_dict = json.loads(json_str) - return { - LiDARType.deserialize(lidar_type): LiDARMetadata.from_dict(metadata) - for lidar_type, metadata in lidar_metadata_dict.items() - } - - @dataclass class LiDAR: diff --git a/d123/datatypes/vehicle_state/vehicle_parameters.py b/d123/datatypes/vehicle_state/vehicle_parameters.py index 0d7f3d01..5c8a57de 100644 --- a/d123/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/datatypes/vehicle_state/vehicle_parameters.py @@ -1,4 +1,6 @@ -from dataclasses import dataclass +from __future__ import annotations + +from dataclasses import asdict, dataclass from d123.geometry import StateSE2, StateSE3, Vector2D, Vector3D from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame @@ -18,6 +20,13 @@ class VehicleParameters: rear_axle_to_center_vertical: float rear_axle_to_center_longitudinal: float + @classmethod + def from_dict(cls, data_dict: dict) -> VehicleParameters: + return VehicleParameters(**data_dict) + + def to_dict(self) -> dict: + return asdict(self) + def get_nuplan_chrysler_pacifica_parameters() -> VehicleParameters: # NOTE: use parameters from nuPlan dataset diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py index 499b98bc..b8a8ddca 100644 --- a/d123/geometry/utils/rotation_utils.py +++ b/d123/geometry/utils/rotation_utils.py @@ -147,6 +147,52 @@ def get_quaternion_array_from_rotation_matrix(rotation_matrix: npt.NDArray[np.fl return get_quaternion_array_from_rotation_matrices(rotation_matrix[None, ...])[0] +def get_quaternion_array_from_euler_array(euler_angles: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + assert euler_angles.ndim >= 1 and euler_angles.shape[-1] == len(EulerAnglesIndex) + + # Store original shape for reshaping later + original_shape = euler_angles.shape[:-1] + + # Flatten to 2D if needed + if euler_angles.ndim > 2: + euler_angles_ = euler_angles.reshape(-1, len(EulerAnglesIndex)) + else: + euler_angles_ = euler_angles + + # Extract roll, pitch, yaw + roll = euler_angles_[..., EulerAnglesIndex.ROLL] + pitch = euler_angles_[..., EulerAnglesIndex.PITCH] + yaw = euler_angles_[..., EulerAnglesIndex.YAW] + + # Half angles + roll_half = roll / 2.0 + pitch_half = pitch / 2.0 + yaw_half = yaw / 2.0 + + # Compute sin/cos for half angles + cos_roll_half = np.cos(roll_half) + sin_roll_half = np.sin(roll_half) + cos_pitch_half = np.cos(pitch_half) + sin_pitch_half = np.sin(pitch_half) + cos_yaw_half = np.cos(yaw_half) + sin_yaw_half = np.sin(yaw_half) + + # Compute quaternion components (ZYX intrinsic rotation order) + qw = cos_roll_half * cos_pitch_half * cos_yaw_half + sin_roll_half * sin_pitch_half * sin_yaw_half + qx = sin_roll_half * cos_pitch_half * cos_yaw_half - cos_roll_half * sin_pitch_half * sin_yaw_half + qy = cos_roll_half * sin_pitch_half * cos_yaw_half + sin_roll_half * cos_pitch_half * sin_yaw_half + qz = cos_roll_half * cos_pitch_half * sin_yaw_half - sin_roll_half * sin_pitch_half * cos_yaw_half + + # Stack into quaternion array + quaternions = np.stack([qw, qx, qy, qz], axis=-1) + + # Reshape back to original batch dimensions + (4,) + if len(original_shape) > 1: + quaternions = quaternions.reshape(original_shape + (len(QuaternionIndex),)) + + return normalize_quaternion_array(quaternions) + + def get_rotation_matrix_from_euler_array(euler_angles: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: assert euler_angles.ndim == 1 and euler_angles.shape[0] == len(EulerAnglesIndex) return get_rotation_matrices_from_euler_array(euler_angles[None, ...])[0] diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index b126ba58..22743010 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -18,5 +18,5 @@ defaults: # - av2_sensor_dataset - _self_ -force_map_conversion: False +force_map_conversion: True force_log_conversion: True diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py index 648a8145..5227c10d 100644 --- a/d123/simulation/agents/idm_agents.py +++ b/d123/simulation/agents/idm_agents.py @@ -9,7 +9,7 @@ from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 from d123.datasets.maps.abstract_map import AbstractMap from d123.datasets.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper +from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.point import Point2D from d123.geometry.polyline import PolylineSE2 @@ -68,7 +68,7 @@ def reset( self._initial_target_agents = [copy.deepcopy(agent) for agent in target_agents] future_box_detections = [ - scene.get_box_detections_at_iteration(iteration) for iteration in range(0, scene.get_number_of_iterations()) + scene.get_box_detections_at_iteration(iteration) for iteration in range(0, scene.number_of_iterations) ] # TODO: refactor or move for general use diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py index 347f7f7e..661d4178 100644 --- a/d123/simulation/agents/path_following.py +++ b/d123/simulation/agents/path_following.py @@ -47,7 +47,7 @@ def reset( self._initial_target_agents = [copy.deepcopy(agent) for agent in target_agents] future_box_detections = [ - scene.get_box_detections_at_iteration(iteration) for iteration in range(0, scene.get_number_of_iterations()) + scene.get_box_detections_at_iteration(iteration) for iteration in range(0, scene.number_of_iterations) ] # TODO: refactor or move for general use diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py index 4ec342b3..778b144a 100644 --- a/d123/simulation/agents/smart_agents.py +++ b/d123/simulation/agents/smart_agents.py @@ -8,7 +8,7 @@ from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 from d123.datasets.maps.abstract_map import AbstractMap from d123.datasets.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper, DetectionType +from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper, DetectionType from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.se import StateSE2 from d123.geometry.transform.transform_se2 import convert_relative_to_absolute_point_2d_array diff --git a/d123/simulation/gym/demo_gym_env.py b/d123/simulation/gym/demo_gym_env.py index 88eba93f..030c0f69 100644 --- a/d123/simulation/gym/demo_gym_env.py +++ b/d123/simulation/gym/demo_gym_env.py @@ -11,7 +11,7 @@ from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.datasets.maps.abstract_map import AbstractMap from d123.datasets.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.arrow.utils.conversion import EgoStateSE3 +from d123.datatypes.scene.arrow.utils.arrow_getters import EgoStateSE3 from d123.simulation.observation.abstract_observation import AbstractObservation from d123.simulation.observation.agents_observation import AgentsObservation @@ -59,7 +59,12 @@ def reset(self, scene: Optional[AbstractScene]) -> Tuple[AbstractMap, EgoState, # ) detection_observation = self._observation.reset(self._current_scene) - return self._current_scene.map_api, self._current_ego_vehicle_state, detection_observation, self._current_scene + return ( + self._current_scene.get_map_api(), + self._current_ego_vehicle_state, + detection_observation, + self._current_scene, + ) def step(self, action: npt.NDArray[np.float64]) -> Tuple[EgoState, DetectionRecording, bool]: self._current_scene_index += 1 @@ -73,7 +78,7 @@ def step(self, action: npt.NDArray[np.float64]) -> Tuple[EgoState, DetectionReco ) detection_observation = self._observation.step() - is_done = self._current_scene_index == self._current_scene.get_number_of_iterations() - 1 + is_done = self._current_scene_index == self._current_scene.number_of_iterations - 1 return self._current_ego_vehicle_state, detection_observation, is_done diff --git a/d123/simulation/gym/environment/reward_builder/default_reward_builder.py b/d123/simulation/gym/environment/reward_builder/default_reward_builder.py index 940b1f47..ac18ffaa 100644 --- a/d123/simulation/gym/environment/reward_builder/default_reward_builder.py +++ b/d123/simulation/gym/environment/reward_builder/default_reward_builder.py @@ -349,7 +349,7 @@ def _add_value_measurements(self, simulation_wrapper: SimulationWrapper, info: D assert len(self._reward_history) > 0 current_iteration = simulation_wrapper.current_planner_input.iteration.index - num_simulation_iterations = simulation_wrapper.scenario.get_number_of_iterations() + num_simulation_iterations = simulation_wrapper.scenario.number_of_iterations remaining_time = 1 - (current_iteration / num_simulation_iterations) remaining_progress = 1 - simulation_wrapper.route_completion diff --git a/d123/simulation/gym/gym_env.py b/d123/simulation/gym/gym_env.py index b4953dc3..cf9621c4 100644 --- a/d123/simulation/gym/gym_env.py +++ b/d123/simulation/gym/gym_env.py @@ -48,7 +48,12 @@ def reset(self, scene: Optional[AbstractScene]) -> Tuple[AbstractMap, EgoStateSE ).ego_state_se2 detection_observation = self._observation.reset(self._current_scene) - return self._current_scene.map_api, self._current_ego_state_se2, detection_observation, self._current_scene + return ( + self._current_scene.get_map_api(), + self._current_ego_state_se2, + detection_observation, + self._current_scene, + ) def step(self, action: npt.NDArray[np.float64]) -> Tuple[EgoStateSE2, DetectionRecording, bool]: self._current_scene_index += 1 @@ -64,7 +69,7 @@ def step(self, action: npt.NDArray[np.float64]) -> Tuple[EgoStateSE2, DetectionR ) detection_observation = self._observation.step() - is_done = self._current_scene_index == self._current_scene.get_number_of_iterations() - 1 + is_done = self._current_scene_index == self._current_scene.number_of_iterations - 1 return self._current_ego_state_se2, detection_observation, is_done diff --git a/d123/simulation/metrics/sim_agents/interaction_based.py b/d123/simulation/metrics/sim_agents/interaction_based.py index 992c837b..3f9fca21 100644 --- a/d123/simulation/metrics/sim_agents/interaction_based.py +++ b/d123/simulation/metrics/sim_agents/interaction_based.py @@ -3,7 +3,7 @@ import numpy as np import numpy.typing as npt -from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper +from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper from d123.geometry.geometry_index import BoundingBoxSE2Index from d123.geometry.utils.bounding_box_utils import bbse2_array_to_polygon_array diff --git a/d123/simulation/metrics/sim_agents/sim_agents.py b/d123/simulation/metrics/sim_agents/sim_agents.py index 225e6a80..5c9f3f63 100644 --- a/d123/simulation/metrics/sim_agents/sim_agents.py +++ b/d123/simulation/metrics/sim_agents/sim_agents.py @@ -54,7 +54,7 @@ def get_agent_tokens(agent_rollout: List[BoxDetection]) -> List[str]: # TODO: Add ego vehicle state to the metrics log_rollouts: List[BoxDetectionWrapper] = [] - for iteration in range(scene.get_number_of_iterations()): + for iteration in range(scene.number_of_iterations): background_detections = scene.get_box_detections_at_iteration(iteration).box_detections ego_detection = scene.get_ego_state_at_iteration(iteration).box_detection log_rollouts.append(BoxDetectionWrapper(background_detections + [ego_detection])) @@ -63,8 +63,8 @@ def get_agent_tokens(agent_rollout: List[BoxDetection]) -> List[str]: log_agents_array, log_agents_mask = _get_log_agents_array(scene, initial_agent_tokens) agents_array, agents_mask = _get_rollout_agents_array(agent_rollouts, initial_agent_tokens) - log_agents_data = _extract_sim_agent_data(log_agents_array, log_agents_mask, log_rollouts, scene.map_api) - agents_data = _extract_sim_agent_data(agents_array, agents_mask, agent_rollouts, scene.map_api) + log_agents_data = _extract_sim_agent_data(log_agents_array, log_agents_mask, log_rollouts, scene.get_map_api()) + agents_data = _extract_sim_agent_data(agents_array, agents_mask, agent_rollouts, scene.get_map_api()) results: Dict[str, float] = {} diff --git a/d123/simulation/metrics/sim_agents/utils.py b/d123/simulation/metrics/sim_agents/utils.py index 9151ffd6..a08af428 100644 --- a/d123/simulation/metrics/sim_agents/utils.py +++ b/d123/simulation/metrics/sim_agents/utils.py @@ -13,15 +13,15 @@ def _get_log_agents_array( ) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]: log_agents_array = np.zeros( - (len(agent_tokens), scene.get_number_of_iterations(), len(BoundingBoxSE2Index)), + (len(agent_tokens), scene.number_of_iterations, len(BoundingBoxSE2Index)), dtype=np.float64, ) log_agents_mask = np.zeros( - (len(agent_tokens), scene.get_number_of_iterations()), + (len(agent_tokens), scene.number_of_iterations), dtype=bool, ) - for iteration in range(scene.get_number_of_iterations()): + for iteration in range(scene.number_of_iterations): box_detections = scene.get_box_detections_at_iteration(iteration) for agent_idx, agent_token in enumerate(agent_tokens): box_detection = box_detections.get_detection_by_track_token(agent_token) diff --git a/d123/simulation/observation/agents_observation.py b/d123/simulation/observation/agents_observation.py index be31ce5f..d42d2aeb 100644 --- a/d123/simulation/observation/agents_observation.py +++ b/d123/simulation/observation/agents_observation.py @@ -6,7 +6,7 @@ from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 from d123.datasets.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper +from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper from d123.simulation.agents.abstract_agents import AbstractAgents # from d123.simulation.agents.path_following import PathFollowingAgents @@ -43,7 +43,7 @@ def reset(self, scene: Optional[AbstractScene]) -> DetectionRecording: detection_types=[DetectionType.VEHICLE], ) cars = self._agents.reset( - map_api=self._scene.map_api, + map_api=self._scene.get_map_api(), target_agents=cars, non_target_agents=non_cars, scene=self._scene if self._agents.requires_scene else None, diff --git a/d123/simulation/simulation_2d.py b/d123/simulation/simulation_2d.py index 8a4a517a..ab0a81c8 100644 --- a/d123/simulation/simulation_2d.py +++ b/d123/simulation/simulation_2d.py @@ -82,7 +82,7 @@ def reset(self, scene: AbstractScene) -> Tuple[PlannerInitialization, PlannerInp # 5. Fill planner input and initialization planner_initialization = PlannerInitialization( route_lane_group_ids=self._scene.get_route_lane_group_ids(simulation_iteration.index), - map_api=self._scene.map_api, + map_api=self._scene.get_map_api(), ) planner_input = PlannerInput( iteration=simulation_iteration, diff --git a/d123/simulation/time_controller/log_time_controller.py b/d123/simulation/time_controller/log_time_controller.py index 363e43e7..cb853418 100644 --- a/d123/simulation/time_controller/log_time_controller.py +++ b/d123/simulation/time_controller/log_time_controller.py @@ -41,4 +41,4 @@ def reached_end(self) -> bool: def number_of_iterations(self) -> int: """Inherited, see superclass.""" - return self._scene.get_number_of_iterations() + return self._scene.number_of_iterations diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py index 439a4830..f4e7e10b 100644 --- a/d123/training/feature_builder/smart_feature_builder.py +++ b/d123/training/feature_builder/smart_feature_builder.py @@ -69,7 +69,7 @@ def _build_map_features(scene: AbstractScene, origin: StateSE2) -> Dict[str, np. # create map extent polygon map_bounding_box = BoundingBoxSE2(origin, height, width) - map_api = scene.map_api + map_api = scene.get_map_api() map_objects = map_api.query( map_bounding_box.shapely_polygon, layers=[ @@ -194,7 +194,7 @@ def _build_map_features(scene: AbstractScene, origin: StateSE2) -> Dict[str, np. def _build_agent_features(scene: AbstractScene, origin: StateSE2) -> None: iteration_indices = np.arange( -scene.get_number_of_history_iterations(), - scene.get_number_of_iterations(), + scene.number_of_iterations, ) # print(iteration_indices[scene.get_number_of_history_iterations()]) num_steps = len(iteration_indices) diff --git a/notebooks/gym/test_simulation_2d.ipynb b/notebooks/gym/test_simulation_2d.ipynb index 60c22ceb..d6eadc86 100644 --- a/notebooks/gym/test_simulation_2d.ipynb +++ b/notebooks/gym/test_simulation_2d.ipynb @@ -294,7 +294,7 @@ ") -> plt.Axes:\n", "\n", " sample = simulation_history.data[iteration]\n", - " map_api = simulation_history.scene.map_api\n", + " map_api = simulation_history.scene.get_map_api()\n", "\n", " ego_state = sample.ego_state\n", " # planner_output = sample.planner_output\n", diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb index 25018fda..77735e19 100644 --- a/notebooks/scene_rendering.ipynb +++ b/notebooks/scene_rendering.ipynb @@ -82,7 +82,7 @@ " ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)\n", " box_detections = scene.get_box_detections_at_iteration(iteration)\n", " traffic_light_detections = scene.get_traffic_light_detections_at_iteration(iteration)\n", - " map_api = scene.map_api\n", + " map_api = scene.get_map_api()\n", "\n", " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", diff --git a/notebooks/smarty/smart_rollout.ipynb b/notebooks/smarty/smart_rollout.ipynb index f8f71593..d437fcfb 100644 --- a/notebooks/smarty/smart_rollout.ipynb +++ b/notebooks/smarty/smart_rollout.ipynb @@ -42,7 +42,7 @@ "scenes = scene_builder.get_scenes(scene_filter, worker)\n", "scene: ArrowScene = scenes[100]\n", "plot_scene_at_iteration(scene, iteration=0)\n", - "print(scene.get_number_of_iterations(), scene.get_number_of_history_iterations())" + "print(scene.number_of_iterations, scene.get_number_of_history_iterations())" ] }, { diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index 3f2e9f12..e4a8d52b 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -11,7 +11,7 @@ "from d123.datatypes.scene.scene_filter import SceneFilter\n", "\n", "from d123.common.multithreading.worker_sequential import Sequential\n", - "from d123.datatypes.sensors.camera import CameraType" + "from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType " ] }, { @@ -24,7 +24,7 @@ "from d123.geometry import Point2D\n", "import numpy as np\n", "\n", - "import torch\n", + "# import torch\n", "\n", "from d123.geometry.polyline import Polyline2D" ] @@ -226,9 +226,9 @@ " box_detections = scene.get_box_detections_at_iteration(iteration)\n", "\n", " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", - " # add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", - " add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", - " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api)\n", + " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + " add_default_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", " add_box_detections_to_ax(ax, box_detections)\n", " add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n", @@ -252,7 +252,7 @@ " return fig, ax\n", "\n", "\n", - "scene_index = 0\n", + "scene_index = 19\n", "iteration = 99\n", "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n", "plt.show()\n", diff --git a/notebooks/viz/bev_matplotlib_prediction.ipynb b/notebooks/viz/bev_matplotlib_prediction.ipynb index bed72f71..68c11e2a 100644 --- a/notebooks/viz/bev_matplotlib_prediction.ipynb +++ b/notebooks/viz/bev_matplotlib_prediction.ipynb @@ -89,9 +89,9 @@ " box_detections = scene.get_box_detections_at_iteration(iteration)\n", "\n", " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", - " # add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radiuss, route_lane_group_ids=None)\n", - " add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", - " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api)\n", + " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radiuss, route_lane_group_ids=None)\n", + " add_default_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", " add_box_future_detections_to_ax(ax, scene, iteration=iteration)\n", " add_box_detections_to_ax(ax, box_detections)\n", diff --git a/notebooks/viz/camera_matplotlib.ipynb b/notebooks/viz/camera_matplotlib.ipynb index bbd6221d..2dbc9c77 100644 --- a/notebooks/viz/camera_matplotlib.ipynb +++ b/notebooks/viz/camera_matplotlib.ipynb @@ -329,8 +329,8 @@ " scene.open()\n", "\n", " if end_idx is None:\n", - " end_idx = scene.get_number_of_iterations()\n", - " end_idx = min(end_idx, scene.get_number_of_iterations())\n", + " end_idx = scene.number_of_iterations\n", + " end_idx = min(end_idx, scene.number_of_iterations)\n", "\n", " scale = 1\n", " fig, ax = plt.subplots(1, 1, figsize=(scale * 16, scale * 9))\n", diff --git a/notebooks/viz/video_example.ipynb b/notebooks/viz/video_example.ipynb index f9202dd7..a6beb679 100644 --- a/notebooks/viz/video_example.ipynb +++ b/notebooks/viz/video_example.ipynb @@ -106,9 +106,9 @@ " box_detections = scene.get_box_detections_at_iteration(iteration)\n", "\n", " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", - " # add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", - " add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", - " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api)\n", + " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + " add_default_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", " add_box_detections_to_ax(ax, box_detections)\n", " add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n", @@ -187,8 +187,8 @@ " scene.open()\n", "\n", " if end_idx is None:\n", - " end_idx = scene.get_number_of_iterations()\n", - " end_idx = min(end_idx, scene.get_number_of_iterations())\n", + " end_idx = scene.number_of_iterations\n", + " end_idx = min(end_idx, scene.number_of_iterations)\n", " fig, ax = plt.subplots(1, 2, figsize=(18, 5))\n", " gs = fig.add_gridspec(1, 2, width_ratios=[6, 1])\n", " ax[0].set_position(gs[0].get_position(fig))\n", diff --git a/test_viser.py b/test_viser.py index b9fdeec9..d1449ec1 100644 --- a/test_viser.py +++ b/test_viser.py @@ -8,21 +8,20 @@ if __name__ == "__main__": - splits = ["nuplan_private_test"] + # splits = ["nuplan_private_test"] # splits = ["carla"] - # splits = ["wopd_train"] + splits = ["wopd_train"] # splits = ["av2-sensor-mini_train"] log_names = None - scene_tokens = None scene_filter = SceneFilter( split_names=splits, log_names=log_names, scene_tokens=scene_tokens, - duration_s=18, - history_s=0.5, - timestamp_threshold_s=10, + duration_s=None, + history_s=None, + timestamp_threshold_s=None, shuffle=True, camera_types=[PinholeCameraType.CAM_F0], ) From 3dd8919f69f8f167f08a65ff3dda30ba25b0d427 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 6 Oct 2025 22:00:48 +0200 Subject: [PATCH 057/145] Small type hint fix. --- d123/datasets/wopd/wopd_data_converter.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py index 4481a532..ea418d57 100644 --- a/d123/datasets/wopd/wopd_data_converter.py +++ b/d123/datasets/wopd/wopd_data_converter.py @@ -3,12 +3,11 @@ import os from functools import partial from pathlib import Path -from typing import Any, Dict, Final, List, Tuple, Union +from typing import Any, Dict, Final, List, Optional, Tuple, Union import numpy as np import numpy.typing as npt import pyarrow as pa -from pyparsing import Optional from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.dependencies import check_dependencies From 44e83b459b78cb8388215a633278d7eec188dbb9 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 7 Oct 2025 16:22:11 +0200 Subject: [PATCH 058/145] Add togo test. --- README.md | 6 +++++- assets/123D_logo_v1.png | Bin 0 -> 897016 bytes d123/__init__.py | 12 ++++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 assets/123D_logo_v1.png diff --git a/README.md b/README.md index ee04aedf..95948bd8 100644 --- a/README.md +++ b/README.md @@ -1 +1,5 @@ -# d123 +

+ Logo +

123D: One Interface for 2D and 3D Driving Data

+

+ diff --git a/assets/123D_logo_v1.png b/assets/123D_logo_v1.png new file mode 100644 index 0000000000000000000000000000000000000000..5173ea3a89c97a2ace0c07a23dfb01826a5317a1 GIT binary patch literal 897016 zcmeEv2Yi*sk@$eRP(czPfeMnS0wj=72UO@15)!=`OfkkbHnYUkkLJfHT!w)9Zc=YHI)~k3> zqehLv*sA}*zxrj|x&oBf74@>bjOA)^S_2hc1NdCNgpK?N|7w;~W9fzU5dS{KXO!Ba zL2KZN8o+z@Wi0T&_(vbmvIM$Mm*Rcexh#chSy}@XNdx%&(ctt){BtRXb{#~SZSWbq zRSvSNoyZC~p}-q39IwQF{40+e5XHiEya9bFp+RdvYv5-M;B)DOseAZGzZC%z@j2a4 z5gMY~P%$-t_iSBk*;)K6uL&?I=2g5;|40c9S_9Qh1NaQyz&?AGW51{3^Vg>w|T#KQ-LaCF5#;6cG1 zODxH6I|j{WJ1u2un$|#d(f~e}Y1m(jdyHziG+(QPXfw+9X4)bY%HwB~2!%SPlGp3;&)a zM1##5_}$Qz|5u_ffX!TW#q!iZiI$3oIEcD%^&0$o#88K}j|OYIj~$0yM~{hehT=Qq zL(HSSSQ@kjiZp=t1dTYxm86b4H}65Cm|$q#fyO`T;lzRSuyr}@6Ey=_)>`6yN=L0| z&>E-?8X!xB#X(%*{rfn+s|n9!4{(ScE?>I>U(WeXT=~|gDa%*E z{u8IfBFfX)4tQ&TVb2|Xu-;QHOf#(P8h9Q%P*3ax-agEH@WV$Ijc;NVo726=c984R zVikSgG-wS}c@5yZItV-YjMz~pX@Pk1kJI4x-P>XjW$6K3dxM*+o6K(lwyUCzdF3`% zYP`ZI7cVsVzP>1G)7>`+>c=_8VtiuVL@2m9`qJnTToM2%5$jTz2U`4O1yc^Qz_=XfS07ZW#JGMnJ_?1$liiKc{w4 ziLABPZ{2_y%jYthD#XvHHso~ZCbOgOMZQebxH=860fRcWHgTdsd~LC2c-kna>BLt> z#e7@S5AWZDW2^qfs(X6*z^FHW3&G({86A<)_(7;66E&_iP#rXY_dc!ovQ?+3y8iHT zN-V?#(W#xqFn!e;*m>kwF~I>TY!6y2`pTTO#X{Cd^~Q%|_t_x6Ra(QHT}HGqxMEEG z%Swl7DsN1CnKymRN_5GR3u|0!pfYLz-_icq?$Kh~18diX=erF99}jPl4ByV13SRjbh0wqZrmN*bB$^o+~CO zOr>PEfCdq?U68>G*hT}9mC%URK;_l|eKXk1p;vM?)N=EHthSw4&n{iN4D)|l#AvFJ zK>xbXB{@@ON5|WCmWdiyqk#%Bp}?-tWP(`%!aIzBnl5Y{qol`VojAJkbH!%I{8OK_0|CG{%j(uplL$zb}xUAqCI;V zgQLZ)qSKnDeY}d%R3SB;YC_%%28#tPR*g|5L-WxZs7M-k{QY1H#kFhsKxTBiqGsmA zcL8@-KF0fU&O~_d@PR|RF|5ZRppyzE(D9BWg(ySc|L}teOM#w`Xbrf-MyC$5Nfl%+ckhy}kuwt7pvX~oJznGnrLYoOw2fR<|6HzO}G3tXLD zi(2UAQ46}Z$zZKLec=o&*|37qR3Q!PheG?NRB8G~D~4m~J*05d*W&1k9+7guNKF)~?ZM|QqvWunHl1}d`#@SRMX zhS*67p6K_~H8z8Ft5-+Lj!%V8I8oIJkzEH$mhyZ{_Cl2#bQP2-<5&IqDOK9#5QJ4B-sP;y<%RSHZ)8V zsB`b9d6k^4C*yx^9}g%+l_GL{7t#25yg`&2Jh$y(8P zW!3;iEMg}m^gy2{nqY`f8|V5Nt+=t^;EBWV)An^@F=gqfh-T0>if;x)<~{NG@}==Nd*+g?M_qLDwPiGCP{+ z7;avF~%DJtK|)dRv(pAkvQ(x*250C%pQX8f}HJr6#$nd_f9-xrS$NXs&W zdyK&X-OPn_LTjK3Y5q~glx*-2=nXkyVy zp;w=jX}MA`dQWQQqdspZhe$Lt8rvlj_c`$UdG6&fjhBETHJ^NtE z=537T2;@y;$Zc9Qkq4WO5UqJu01e=vAPJk1DK@2EnD5Tu#sNH{icrAGxhAyl$HqT! z`^G(3@YQ;ul)7X zid}g_@TgA-$ojmu8`4^)KwaN}C$>GjUMRF}%qQ9F zc5Z~dNB2Lmo~@bbaqS=i$8-{m!uIpiMx+87ExiWHWkP|?qs^ae$ul^<54ic%D}BpL zS(2<1N0$GCm3DLYz({vv8LcEFf9QBiSv|Ks*9c~XFqB6hP@+3 z<8iX3fmZbbplg$GR&?vZ!!UotCPs4vVj<)<^+8)K9HHzA#1jVwv0 z20g8rE^f|{+Kcb&zk1;&ESa?BX;p2?4{lTk61#AD$fyhU!Fbw%O%ik^t$}jUz`LSU zvOwhHX-}nre(iD%#VCM7~H*|%$imgUXh6!w@m|V;9wgIn`*HYni?2Wouc14 z0#0w4ftHGsjCI%EBcE!iFjZ)Trv^kPkgGX^e7rfi?VM$6L~Ee(X#nqOn%%R_O=P7= zmNsHPsb{Za>eD`GLgD7=!umYzuPfof{fCUE3h6s8-4ZLkSY=5onpWX8fbZ8tY-EPm(4?ku z5LPExB$b$M+Mp4%4C9mR6`NMUkyA%YtYc3`x0H?$6c8x07>oV*IaryTqj9S=Q05Z~ zyl^xnWh*ib+VlhuztY`+*7WWXhG{&q{4-X?#nlZh6)!Q`(+~?53;KY){>dHj`g$FIQZSvy3w%F<0@L!d=cxXiCF_Qfz8 zmJ4aT+G!x{cVkyqCSL%9MYyz<3e%PClfsORMGH~i&Yx_`R{HCV3)zFZv)2dlr^SdT zZ?sj~imJ=3%qJJ9$_b)6Yv87yi1bk+Nm=^L)>&}-(lN%5g53Gld8%cc)rm>WfXL?D z2M`Mk!w&-AoyuxyL~Ee(XaMhJPi*ZAVvFhIr>v$CBB@mA*HYr}*g*bj`2bIadhpOO zE5N2kbxW{lvFL|=BNrJBRZjynm|;gM<-VNYY;89?^lt&qu8a*#{^N3Z^w2SZxq6T7 zAUBbO>%}TrD;lrR8o>8z6gH6gFU5qnfM&tg&68szqoHwq{+(H{W(l0Vbj~6D&rQn$ zKOci};#KUAheHHbC#LUz_`wAJ>34AN@v3z}5vih>py=-4Stnd1DM^#L;_$M+GhSq_ z=sDzNMk_6n_l&{sjtu7pVrlv5a#{lwOanBv4-nh*9B!tdKo!75i9^wY1-xO?*VaL`Tcrg z|9RN7czk3)}N;N>6DcA>sj&-sHdL?JGYWHzt z?0560Fq$gF9mj$FI^z-19LWE7BqwRiS`Dz9!P?3;lz*Sq!Cf;E>CZ8eD&)+z*>L;f zQO2%w-Z1d5Yj}~fG9@9&9eYAh2%k!0*%$C5V3_Zg#6#1y1}dTk@E)dRQg$=}h0gBZ ziu;CG)AsSWxx&SXzg~WeVYcr-cwh}1Q`veOev1nM;gHoi(_phUl`;HwiI0W0O`FJc z@;A|D3t20b8-E*{?<#g7>-o3`wuXPm7~B$SI?+6|7$*I3X)&TOh4&)#e4eeb5qI)K z9IH@_W~DXYFb&{6Ozs*3MFC`XAd`hHkkTw3LhCeORc9=p3zx24W;9hu-_Cj9y#$)QxNF~(3Yhs(p}q!@lTaS9aNzh6vn zK&o?Fi&lz0_|Y=#jBB2p!=0J0hyum@YZ)_3xhecl!?qC(KxW-g@5>HIVUK> zoG`q0PYkmyfgj?IG2csqG^#aVs|FrV=tqk6g9Czqj()NSGFqh?f?P~lJOggtzG)BJ z66+4^hA|D$6-ojMb6=L@Vo7{Cpl8Q@irZkU^aXf&JIh3gc(G!!Dc!{8)M?rQd_!7^ zBqixbj~*I=RX91jpryk8Z4k6KBdecbLPWETVM-dWnP?4^u?FzjBnx7)C^kQ;G0<*p zdm+%v69z<6aJ^zUasC2){o|5if`d{M23y=?p)D2;(sx<0#rxG4WwDb5oZXx*ruL2s zx3>-T8~H)gM1GRg=A{SV(Du{zvbI>i%aC~R@n>@+D)&0}k-2qQtgMMz1LdcIS5Y+E z-P}7FLuJb|b1U0tDJ1u;JGWu#(pgq=DqV4ZA3VCSU01mjp-4gKvC~hLETy&Fhp{tI(Ov?e6e7WDiI~}d9y!l6JbE}C4(C# zE1{7Js{y=UX*qwK*g%{}TuU3+qLE0mCmj)U?^oYHGWi13gYanX?z(2qE;xPoqCFx? zteg8>3eY4&f(h7fhkvIfnHtp^D2)bQ!a{6v-wQ)&Qy>Tx;DVkdJ>okvR#&cHg;^`+ zF`6o*LERweoRlH6OTg!^qfFHJ(=|{QFD-I8U~(ce$2`4dHk3HGb0OS3yPws}>@f)H zHQ<}@SOFW5oIx*wvy0&)@E+cT%pzk0Yh6Wape!};CJJNE=IFNST@qhRjDcFx#_!)x znhLk>-gRhSJI1w!l$M4)BE9f2jd!SE&D>EMpbtk~k$Jn`%`dpRs~OCg_+|{|&%nb6 z4`JG0SDCVv{(Ad)LdOB*9n65tY;4lcay6ngP!1Zv_of?)Ws`ev4=?Bx(?Mx#vfFls zTJGE}@Y{KlpwK~k@}A8Z3^k2Iw|A9xs+urkg9+7r#v71Uk(gt%Z)gnoH&SCNW9=nN z#nMk0i7XZQ!^Sh33WScQ?a<9|!b}_F-AM(1%|mOz(Hg*Kla^uGS^s3ENDgbDT(qBu zJ3JdpM$2Njccp0q5ViB9u=>z!wq6dW!VD>-O*v#)q6$8WkAT@_uNU(eo-lOfFR8Qw= z4V0Az@Lr@b-Rq(}cRa4YdmNtxi23%Ua}&FQo8%Gl)trg&5Eqdh0ng?RvBdT*OGHg8 zSpyE6T;L5DhGN-O@~+NZO&xiz-t;7xX9*{7^F#g@n)_9~;PL>e?~3<({&%PeVz z+g2uOTx*~rX#nq0vMsY&etkUcDXVFO!Z)0Bb!5PvlzK6rr{TSl`5$O4bP?Kugy%G2X@SUTx zKU)KLZxyH!AaUtEmM_s_wjR((I?`ylXdnlrvV)g1quYbO7dr_`dBf`X_(O*l$&BxT zj<*NZYbd?Rpo@vtpXi`xYFf6tu)bV7m z2Kac!9Jp}pnnT5xV6o_l9}8`&_c9|ROj3Kbw_vJ^3 z-Mo4Se*AW$xs1w0O#4V^*47YMi3T)bD&=TiS_4*UfLSMM;&I3MNm*91u&sEXlpY4( zz%S=~XB)Fp)OseH45JL_!>Gq|OeG~F%3sR(@*}s3(V|EX{}2d@$ree<(ubFP!g!H& zqW6fhyZ&*hT_C(Eo4iwdMxY+pQ3!8cWaj=P;@+5=HF+} zhclNhJG2>TF|D9oEAB6k0?B_p(V>DhbDK1P*QEG>j$7ezkqzJX9bjx5;f z;jnb<6P@?k%Ynx}mDGcFU3x>^`UVU2*vI-PshXy14V1M8XtHkxdPR?jEBg~J+?<`@ zrNq|ec2MGOK>_?}>I_D61fs1Jj?k*Zc)llbew6!iqTxj=l~In83zVyeGvhnuPs@QK z3D9!&j0>DlP`eHIt#ZL?bzQK^&jsRyI&ke-=xf1vyK1QNxRMLR52?f6FXhns& z{J44rKuEmd!bnZ94Lu}<(nk5vdpL6&w~eulwL0cL13tCu$hF_VcDc($jcW~*g9hkW zJodbgj*l*fX8bIY_H@V=i+hEI@cYSA9p0w*h77PIAZ-r|qDK+Q%EVs!RLS>;j*lNx!z&7r?i!C$#Cr_lO_abq2PrIAmAbN~)YaJQ+NF z4ByeWP*2<^t0V@Rt~Fqv253(O+r!j9wWoa??X2B9xd-F$@Zm%FcK&2WQ-!#@xR&VaAg>d1@MMhJF^zEDnp6;|y zn*ojI^rW7tq!>{(&>f8|+0tjjcEiBg%`mtwmTY6{^5OMx<;X@>J0U$A!ke^WwDLf_ zeEgte?hsit`LXB8M2%|=l!XTH9OpmK42CJr~%YJ1sP&)7ydzBryIr9FL$?tx?#T1qCJl# zvGb=Ruy^BehiE8wbShB13<=&t9WftwDPN;n1NLg*Eo8tJpn6lx2)?z#UKv)`C;9ow zS*%{+{X&?y@CQayh4|F+hJ18`l7K#(j#(*}1XW7({SQBwVE3#7UvMtiduAhWcJqSJ z6vw#!99sNWgR*-+TXq2|48kQs&mk|P^DU`_!MhBr07&{P>DTS`1*ibs&`q#T1LUL^ zAXXXGHX3MyLqz~GTZ~VQV@-L0w(bA^-Z5VdQP8`3qlKuBijpRC1shi9yprHoQ5u(t zW(ygn{&|H=Qi2Br`9MY=Zqh@>5A(KM4TkYBzE+f zHZ6cA6%`3+xHLXBmNn@io*7s+R*L>i?qkGo{E($WnxZzD&x9GE?(tye|!_{kW; ztrT>=g$&IeU}HdMCnRYAxO$NVO)XqKz7;MXTF02SZQl(Vx8Q>kR-l}a;P9ppo6=d9PR`pE zud#9~HC~l8@VeL;a(Nntp{*+-bZOKOS_kn>xwC#+2ip%HacFZBn_IM4bi-%0gF^*t z=1c>q??5Ds5{aJPu9ssvhO6JDCknA4P1cIAmi%5_Iej}&5DP~@-&ZUex@jV&L2E#z z2A)S|>`2eN#4K=iX7hcO@@(SeLIJ`Oy0N;~Z{2|D%jPheDx^W(`jDBBCbLVzcb3Hm zQ!ZGYaGNI-s7$KMUTQV*#tU!%tV8s7XwhE`eEW{9c$NxNBlCxjGbP2}H$+6IwFVre zfyXbqB$0o9RAUIj-P#ou#;5Ro6|@TcU*Ar0cymh#VDU?b3)TWe4ZMbkhe+r)Iu7T+ z2G<|4NV%oc2ef3eDb)6iPggUVDx^tl2((NNm)Z5fXPkDoYtS07TLXA6kqMigK{I%| zc|h0L3^QJl65Zl5!N-GnX~B2%r$9krfkT=(tj9pZ@oy-o!tI%LNaw9!YV(8wWsqwH zvjYcXv^zH+T4+)W*G_DMO9y{qOvwV!xCI}8uma_bM72qQX3>dqD+c0&%iK}q0vgvE zP^1Ai+ooaJ$T&U-QY7bTZ2o85o_SXL0%SylKw=0TZCDKR*KdN2`wkQn9FR(A-V`$0 z7|u@Yg3qnCSU5o6GE3C6FVfhNWp1@xuEu9IR=+_{7H%WnqJ0-Cc1R&>G|%o_P*4mwwB;E5U-in(E8i5~G?fF>9f5!_weAggUB*0f94F2mf_ z3mHun(l|5{lA{x3c4XD=iGPt*;IAq5LGGZnxIL4LcSK<|&?FV!VT4FhmbQ5MQ#ppx zE^Wv1j1~*}2)tU_`nsIffGrxp7XzJXoGsQ(j|he47y+-M!}w&Qhv0{0V6k|2aDPjx z`IRN2rYY9|9VkGP3Nv(mHpURn+MHPd613lgLT-ytaPL0M{%nm%QkHJpsVPLXACGMcb#ZgLk=(O6U;ejUgu`l5h;6?> zYJM@y``0?Sb^UHJK~W0z1bJf&8)a!U#dN1goMuzKG|(Hn+d}Me=hkW9Q`sEI3UQCmg(9$@bbBL@`GhNtl<{7Jf0ZPq15=(wq>e5m{HUN?G zo`g;iGi%_nW8vweM3S;}(VhzQu@<2sgwhtcRlQocsD25i;<-gEV^ zG7F8`DzJ`Wz;3gEGN|}QJGO37B=JALp6GDN69#FEg~OF>6z~F)*kViiz*hHhf?+IK z!6;77HK9X)Zn3z1;~vcaa=k-Z+959*>V_ERTrc4>U$IAl4$+2Ys!#*W^MsC(rA^wE z0rHcwz(tA^_ODsr0-Yr22Oqs0tL~rMr_$6FuEJmsuQ7H z3D-~Wh70>wFuu{a3!nBasDy|2@53j*c@^Ft-5q|M{)I}`(4kvjs1p<-^L!cg=p_?1 zt~H=Q19VCNtzw!%wWZ=#K_QHrvlRM&KN}VtJZ6@oDiPNp5Yoc=GqZO0URb_uhbj>k z`NTJC0-fU8$a$UcS=Fa}$XZc=qh8&R4cj1!gY-M?`$hRFU~gt|=Vk#c|86HN{&G89 zzj)hBSCL5Q(HOjHv&Ri);->9_yM-#)GagPDYP;w2?9)s6q^r@aphmO?N~r;S2D@U> zc(Hg|%M_^V8z7SGPH#E91E#N?2S?AJu$!02pmr@^=!nP5ig4ugG5BfQI+3I-or5Qi z2Kv*QU6lsv;7D6~Sc6Vju?Fx4T#Ds0%yn=TKJ}HU;1$%=Oj4ED^5s~#uy+|__wOIi zf)=rM4L@UK7qQfBii>i&;1QR7+zwrT*M zO|pI+$3OGeAlSzXmW}L(=WExrm93#(=07LHv8&h3^soA6K|6F?QVBcHU4*=EW`Ov1 zifz&e7X8m}ES=z4+c=#rDMrAfhmYWsar5EWt_wvX0~&b4_!V|KSG{ta`QL9Z0r< z461B`dM!FRWOi}m%wD*#k2}h>jBBTSc5(I6dDy$1Pj9Dvp>~GRC4V?fMV>eChynrL zM16j&zXXCtbkbf8JckU-vkPK2BCeIv*~RV)m)PurWc+T`2ADB8kCMuO{I)tZ3WG(5 zXu|X}Y&mcU7H!%JUE*U+B;~)cjT;r2Ef#HJqr{H*tfu3$%3nyzrB@$@s81*C?Hgi` zYdY1qmocc7jk61pQ*nCd%H!r41yL`Zz5(lI?}rZkm_K=O1shVSy_&&_iM!zLt>Ulp zRE&<-bx1srnL#DEdAJnKR=)o|E%cdzhJb_d?+Y_UC$t8hq5*sc6R}8!SS+b&oYEOZ z!TkdGe&r0a2-;0OapiRQ)d2SBoAYf+1lFz#?V=K3#jfI2;$4UL!sb0&Auh^bg0@8j zt5XwK>+;gG8LhG-yHQ`?%$p>3qX8nL@bAwe@o8ym*D(!8qC})S#{61z?2}|p>G#|_ zPwS7>`7^)yv%EYB=g%C6!+W<`&8wseZXTY{rPpvdo18ob%0!K84cMZA#{)F-e=>uw zyBiE>rPh@AkBvLc{7s2n=Ptp4OIJ*&vfnq_#Y0UriHh)>ZzdX^S215jngUp$K4n(_ zCKEJnmj-A6+*st0klo})Ey@ZYYHF-BYxc|^Vx0T>t%N3Gxo3QE^ zPF;nSGmWxYAsdmLQ7Pc==_=QL2k%*HzvnU!jaM%XyeD@zKUwW&ilsYN!?oMR=53O< z_3%zOevvF|>Y#6GPs2O=%Q+L(38>4ou3my4r+s0O<@B%qXpwF!F?+xmaCLK+ zt50yuN1(O$ceruCBwqlz5gM;@8t8*PVut0xG0mZttBbOZp1pAkX74*};77aT{<>b> zWZWd6Ssg#<-Xz?F7X5BJbOh$C-N0z7kv5GZp>tcqDpw{xw;8HsRY!iPN1D04E%qI4 z_=TN@#ncgL_NMo)Jqf1|Fbi=J%WD^I!`8(|>}AjD)6Q*qm-&=GEeGB6Vs=b;3uqS1=KEr{)oHZu!mJGo4fP(Om+S2HM!KP( z6@<__4IsX8YgS|Zu1&D#2pyZM;b|JE?d1!7I~hC<>SMhzPpd2DmwIx6m1u_mTf&RT zcwXeCEPZG(SrGVY(tzjRRmR`MXy!M6fxCBZnNaq-cIhlQbDYj0Rs+6%bs#fmh|G*0 z>TH>)ajgLxH1N7u$Hl283~SRuB-x$*Z1YaIj}E1lVD{d_aP$gam9kXMS{fY(CA? zz5LuEt}C0*!^r~|V8bHE3~2Jkw6_${<9$>A^sm;yQ#3#WDmMP>lalk4D%NGM+rJr3 zT{>g2(z2ba;p#25-;2JfY<^TE_bY9&u&94a#AncA;pJ&KNa$Uhd>V+B>JyFKP$d;Y zhBPu^(-;591n>)QTT&I9>13%mziTn8-xNa`($P;U;pX)#u;7Q!N@V#62TtS~r4o7$ z84FGrDNq9SK3$RkjaF?9;PaV|JxmgNnb*8A1bV3(pxwk{pC;|xFN$F42e=ybw~gBw zO$8F+=L@;b`9N*&@e?p()fyG-OXNXTiY{#ptHAA0*O?_&(ix@I06UW$m#H3h8QzAS zD_dGdC6zn4=?om(dA_7|LwY1tWb>OTaxLOy=Z2E@QYVBrwbmQghgxj94e( z{i)WZud2aSh>!RBzfP_yz5sZ?gyN-PZrF|MM*vwXRKkq)^Gak__~1S)*s`R=d|NVT z7kH~MGTIfx^3AK^$f={n1f42T12nPd-z8tpr+4&AGO;AQRFey={0sbJWg{}IvWk=> z-#|aSPnkt~{Xvjb zqH!xV@TORvhBu?*4FeCch}G$Dw(Wr%_X=3OK)>1#M8Xfd4#27F{FxH-ZB36)i8X9E z|IN4G!~F+Lyshj@toN?vJr@5_Vg;S?R1M&}t{WCg5DP}7gj@;;RvUAHj%5-7<>J7f zeZpWFo;j-${Ofx|bVfL1d0@*)*!h!MS1Xf%KCh%1{K+v-FIJfpq<>4&0No?ZFIKgs zV(-ZVu=nJ_lH?cB=WSX7h4%}eSjO7SKB+mZ3b|l?GY{v28cL;sffx_LrS^gX{&_WH?PC|DgR(B z-Ms5SU=d&61He2j*dPFER3Gfq1 zcBRSX>C3IV8IQWYzVL6)4sjBRRPOH^`9Mz0w>90cwh#1f!To#>ojLPM9?N~@x&&UX(Q4mq7{&}MTCWGm%y2e=R}fDm!JVUpLRgk-g0gL z68PX)F7X(D>VyKzk&hFqUS%|Uknkq0ASN+GW|6Bu0@`G@2n_9OklYri1JD8HDU%FYD_0p7~f3*fs1Fzzrhp36($vKwZabgjx z(x)$8K#HYkxfAcIh(4H4>(qIh1wSrlUikwU&%e|K03CJOtS~4)1D4&;Dh(L zj!mISn_#fCy1#eLNjP^@-Qp3oz2nemLl>t0WrcyJf)L#<5}LK8&oqODk8w!AJSUVJYOBmSU2Br{P&Qo{za3C>~>w5pyL-#!uBKF63z+Vm@5DYCH+f2KH6HiievA3_>?331~857`RGh_$sHIiTDKkV~1i#(mo2~ik*e3 z){!VS#Ul)iA_DEBz=uF2*`1y{=~ILCA-ux~3{vD?#5*{?H@JCoYy8~r{{{v3?%FNe z#GqAt2OzgD3G_!mQiw*Ms)2D>;^!OpaZ%NliWNss0tLJiVQ{Am2=MVPBK6NqgV4Hl z*b>v%xcB199d*x(N%kwrZNS}`&0pYuCj9{S3f21Ln&g>&-x=J`(tAi-DNG6~#V@|c zTVt{AV$sm3x|c$lE1gfk;xD)1U`CXJzPsbn&4gYr@fCe4yW;y@=6n^2uoiV8B8KMr z#js}XZaA_3d@(^`D*uJ{GB@&-f9l6yuB7oQuYs4ahisL(7sjS=HXa~jvm(f)Y1z)7 zSpCMKkq{sC^T8Jby7mDI9w!1>DO3W51(#!P6ciS~q(#$IiYW_TXIlOAFOx4oS;(^} zqi^SY@bh7}4V8Nh-|7AqD?BFht1Ohnib05u5J*s~ZUoe8)kP#JN#DM79L{d%zIYK$ zV<1kN+1e}Y-ksY9ONEPPZ3s;oU}jy@*$u){2Agq8T)T1+md^f$(d3>{8rLU}tpMCEybu4{ zqILy}I#92cCs6zi5l&vX2wyH-ERvL`V9;$atz*|2I7-B>D;QS0lXL7uy3>&Wd?5#FX$A*C)?&s8xwOkE<%^o0#=(W z6)3-m)}U?>v~R=reW|KSG{N}N?-(YNrAIp0x$3P(inJiO!+qq$fK`b!|IMB^oB;5nqTQNxJ1R)(XJ zN~l{Z^4<%VVfC?7tZa{zM2M_U3l7EbOqY%jxoY0>=FR8mI z25#Z1@|49h8BHT4Yk(r+)%D{uGc5fozR$T&kHEs>gyQj__IaTkc=(4v{no|HlVYAd z>D!l2z?p5c8H@0y(a^SiH%7A;Arl3iBP)W7hcARC4-!d5X=gVt2ubM2=;u!#gEfn$ zGnzd}4YUL13}XI`#Ew1!WT(`iHBh1k@cAT1AhzV>>+S{vq)i7UGAL!nr+AzbAD#ic zoS$o`?S=#VvH3Y{37XEIrE_RigTIG63~dv|*q*y`1wNnmBcrKCq9em0JIP?GNWp8- zRkf@t$nRb36Z_U=y%=vWsz0IM2VQxmZ>fC&E}gmoJC+?|?3%`fKur5aj7CUGwsDDn z-K_m^`?}f*YWg64D$|3)2Qc%~pA79$LCEYM3x0uoK@9nh!+Vu25UP-+c~mY9JpOjP zF1Fg;#SOa0breZuvD2YSSzlT10Zw^RgZ4qvLgHoeq9p{FU?l z!XI!UO1-Zx>vR=CTwPo(nJv7qd+c5kyUSkqLIzc!ie`vNA6>Mg*-TKBII{F_21`Z1 zk#Cu8W{SkXf~o&3@?{eFHEcf&oZVJp9S)V5@GsRe=_Vu67#K1r)LiUzdC&7 z7kP}w7l7qPSt~%7uc{iL7M!T*zRjS3pTcUdT+ zYz4=^f%mm$iNOX%3e~JmF8O*pJVZYu5&FNHDWc}IrPcj{yZ2%3%>CvvDiW>I8$z8B z{!A?VdIMa)d|L&3#>2(c8FEIYFd8DX%VW4q)OfYiz(DMAW3kuW+ja!tzA3a2i z!$QW6j(f^Y%VIPW(!WbDQ?lrnBH}IF!Yv)FfDMR?vkUZy=WeRkFt)(#74vN1Rz0ia zXY>HFI7#pdzSjdKS`liPP+&oFoMJ2H?!JN0ATG~ReYAQ~18jwW&RQMTrbP)wZ6nh$PK6I1vdS;_>d9~s@kDFMm|XtxxN`R%Ox(UtET$yw zhqB1kON2|;Z@}N@%oj<@(@_mWp+|C}%$F<{YE7%MgvyOiz~;M(UFiN?tdDVCCYG=+ zO&cO;V}uA1E$c(uPL?OXRL;_h{!c&bfx>$aL^gJ%Yoc3H+JKgf$Gtm+F!%GdjHU|7 zdNv6>eGKj_bQYwuDjAwj<tq*D6=sBEm}RiQM1_eeT62=9er$ct(O zfnNLtTAXWr>SjS9Ox(dI1&tcihb*Hj5Mw?jHyeCw@sG*hHf{$xb5=Dx*R~bZ#z}?< zfBIo2T)TBkBq>i%7~Ef5EJ~?WyjN*OYlK+br;g|K==6q4Efq_@*#WdaLWKUWW<1g2 zZE1D?=GD8fY5qZxjos zoWF7b7Oq_^l63kB8t9px4YboAs+6e4@Z*B>#4w09yb^%+L9gn0UgLcNyUe71YbPq26UI!ncEqIqC! z^e*|s<+}6{=qD33t~G!fcvI8@jm&6ptrEW7z87e3ga|L?i$%cO(*vHvD0w1W zxOE4n>^>lpl%&1gTw#>dZ<4fH z7r7s)f*Y6a;3VJ>V;9n-F3_<^PXkM<`zyZRjSF7fjp}K9o|NzG;shP~@wVK&dIuJM zv++sQY|iWXd^>P;GZ-h{#rsp8(rs>?u2>ElV3vxSHEKeB61OVb+ooN|_rm^D{28QB z+7{eqm?(0sq2LH2%v`TNB3{qLZb17-MEKXNZ_!t-QM)!WuF=>75y-jHf>^735#cZ4VN#RVJyQ^hX8HV zE)7KDiC4JP#fqx7SkR_z7gv76qreIJBf!*N?Gd~mnxPW1MMb1XghG=5Kapft`aZs7 zzm(2{t>fzhgF2C631z#DL&B8Yq6?P7ygL7P)q@I&2hWcrc0#K7yR;xJK=r-w}AB9~zw3~A zv59c`%njJN@))DpizM}I3blOs`+fSyD=`SteS29O>igC6g0$YPWjYb~%9vb2*!*`;!q-9ZbjQcOsx%7|^x1NV1sr^7Mip z?YpwPqUd->cQcy3NYOdEtpIPd>>gac~ z_8x}Q(%_szX}i0+mP*(D-rg|0dly!g_|4qMSF>0F8xcBUR__0e+jrp4(`MPov2@j% zhlN5;YLZ;IEfTuR#A=L_)!SR_f9K(?-JP7-?JH(moxXML9;~0kEn$HT{h@u&=u-2d zV`k;m{iR>y325pJ7U1gc49R)4E>H|t&fkRPc+{*TfFk}`m|PiF79Am5^Q=%Bm>{>N zR}x#BlC!PGPhC0-n+|Pb^^&6#fFj_P1_ojnXE{3FEVNjt21`-@zL@om(!+ODx49zm zBX_kCJ%`9mq7{>OMWXn2AB`Ce#LO_-e^A2dNMbo_(?^#59Ue+iQu>Z~L%F453EnUl z&Yob+qv&?7wP+!&^bnCgiWPo@Uz?fVe8gz>BKgC{;e+KQvw24*XuNuA04MzQu;=WF zZWUMuQbG-@26idvpSVd|1e!?Dgu)tlrB|+D0`ljzTj7DqQQG4MJ&T*vgQUGAf15oQ zE?m84O_QxII{{-?SU3slKkzzn)27v&D}M!40lCq>EcV3B)8%$Tb`!%3PAp?>dgY{D zP;iH@`1gG!4Xl~86s_*};;Z{-4qbsmo6lI=oF|rV*Cz^GJ-A!a4}V^P7JKzB7{QI| zKw{VCa(g=Cb)iYGhH9Vzyw-G6vln;UMllc>)KF>fr>&i17?cm`UV6`M^3!kEI+?X; zGe*QadFeEx*^2~t`$IZL#S`J+iNmmNr(@g@MLAU^ot4@J!h%BO?u^EJEy6@BnouBt zc4EI_BGPZso`WZxjwF_~CVlV51vs^78mm$tXR|376I>;bb%Kt66@lEi8nzoQl1i2K zZYT;DFWTjNhN+=W<907^A5k`Vcyf*wPkP2wD+Gt=r_LQuMW{_9s%^5Z( zR$`#LUFR=}wf+C{S#7f~z~D|9K(0{MKs~?OFr;%Pt8(n>b(pvR2&1V$+?<_&j+zzW zHl9HB$EkeZmYA<1O%{vXRQ^#!zBG7K>$t3_6q_+58V|lxH@CQZs{q!{ z;77sMY2XW){bEJ2rAqf2Yh2x5_8p&Omx|F73ZWYq;N*&xID7OmteLZm(NrOQUm?#I z189qdK_yj|@E-PzEp+rr%~9IftGBMh3Tbn7^YA7R7uCipIRm@(VdW_>-s}xOGMWmc zS8@--0S;fz`A!AvYQcl7O2cyp%YCJJgxO+YvQ|8cL^h2LO&Nsy53CzV$dWAKM`%yQ z{d+8{FYc*uLi37BSU&fApx|{PgrX~#tCw|_3R8WGz%2qzUS@y$SCM3QI&UbA9}V!P zfuaf3N&`5-^u!L25j!31;|1AG!bOtZX^Jf&0Pala~k|p~d2% z11uI4r=z|%KXvM}xeMUTrOTpt<>?87Em|yojn|8P$dwDOzznhLZrHrn#3nksI^9dj zYf2v;Wv~h(Vbi2S1ezGo=4fld((3-fO=sZLem2pzmaU;Y9WyIiQYQatxnZ@}P|*fL zlh_bwnH(;xV!kcu;D8`VX%^3_Zr!&7wjEHrc)$cyF{Ep9M+k2aCiiMM z-g7_uyS@M62NQ7MP!y_{EngHZ?WYctYub(9yKxatZk)>a&{BQ7E>GXl!zw)mdHC=F znk)XuN>MaBifU&o)N9!Zyz0@zS`3S4eg)UBUMwcqnQGi37Fxx3kh$dJHHwmn8n0Fw zcn*6TAoh4925fLrc)G#9i&tP3nk+=1;0#Sdf<;oP)4_EDpkD^Rmk(UJ0!t2`C^e7L z7Nme3W0UyO*gZTK_y49$FD(n3$~6lOfu8LwnkstO#IR~q8j0O#Aa*5vK+78LwMw@s zN32&;nxfUMnYNc@*RJaYT?VyfG;0t`%lgZ|Q)@!-t?LP~U4H)fn%Z`B_gUDo?4Su% z`rCUf-;)LgXP@I{}0O#2s{##4Rl{kwGSGTgXzvs8b30#;u$`2swx zjy!X-x$}xElG$Qp&u3(=w0iUlBGQ`TEBMEjF=}9-iglk1qI{)Lc@7!a*?`RP*(Eb&Mbg6Xln?6aA_-oH0|zjOuM zfAGM}`A=&$L#IYz((B!vZF8c3tETYjhV5|t+6^<0e=S%9FXiVz!$8}gdo!~V67S$f ziuErCnbAY-5DdqSrQm;smQ3sd_4RDoorf#l8b5rr9O5$BC)i#wMfE8f zAVmc{vg?eLf^O5 zFQ%IXlykzv%^jR2_W@g*t7E#bnnY(_9A4dnD*r#7@L~Tt`L#NeVAB*QX&mv^T6Ggp4VcSH|g(i_YUt@ajx~L z)=8NVi8etAhT_FyD}YkIMysv{=mBXYc8`31==dX5aQ5bHn7xlrz~h=WhQ#LFE^V#6 zh@e2olUDP0;s%b@l2yc7zLv^6FnOA#;>u`bvB+BxcWsV~nN>$$O!l%*qW3FUv%YR~Ze^gr050>z#s|2^22jJbd&J z=4|Af9=vOLK~9?OC*aW$L%ln4iwZ@=TeWAM@(ZJSbBXc>DrR`+-rZ7JP@ZNrp?r!S zNLC$bGC?h-xx^Iu$8J8#DJ$^An^5&;il2U*2B(j4b8>KeAGA&c+TE=hc+<;~^KM?h z0*j`9X*X|?LC%eH8x>7Xi4PUA5o}#P4xm z-xixx)2YUTw1F+@-SuP$Z2xaq!0H$Q>t^i-ihw6V?&wtT@OrvPv5uv*X?0)B+c@ta z+_=o;iTSpqI}VM;ouj-a9S!9O=#Uo!bwl{`fpRD`uCF8HX`bb$0lcRED1Efpg238! zA+@>v6K|2{>OJe>+|^>M0_F9=iGdv1POMX!bUsExmU+y;U>LN6TO|bg)>S&$QrWgj z&MS&07pKar6X#AHGeoMhr!9{#lGn5^|Btbt2|;+;NJg^?AcGAzIDPN{VOv*PT2I8%~{NG!=*wj?!L{EEW&&o$$Ytr>bCIB9Eq_!O*LN zMTHLHo5dR4onAswDH(>y?~svrAG* zs+qoS9?K$Em_7=EjgiG8t6dky``Cq(u;b_+Mza@Luw|+8Ns7H~sv`AokpF8w{%sID z)I25$noEZWi}|*u&zv{}>z2(nS0`5`I;yGHv#*+I_9Rv>o^FUt7u>cNc+_rS4_j83 zBIwbi;_%W>%=Akbo^j=ZDUg{%)_?nf0#{JT?SK3?Tl}LJ#S8fNU-nDT94d(h{zGgh zEowg3wv|Y-JN?~`y>R^=pA5f@0Vh;$1b+)x^XIPH$d0=C3x?eOqBnOXw7WCNAg5U* ze1iKb_FlMbW;c1+N_e$TKG4A5Q7~apf0({%4bTKe0J2!jswVp?CJ6n;75EwtGX}R( zY6&N6!PWD(L@|&vDg|o!s1ws6FB-o2-6FVf@~T0m_Ca9K4KvN&M8@D~IIyvJ z0zvyLrhmK&UjIu!ds!RmQwZ_N|F;ybU%oBoy@uE5Bb3V@ezAm3R~ilAwWOIlb8D+z z%Lg)}4Ukz*W310=$aX z?T^^+;zkIWI$3ozfY&({d&8W)(;`BkNr0cSPTt3X@E4o;>aU--7YxiKBZ)eQiHv}b zv8`e8=B;L?D~_Io?Po7QOi*1jO+|uSlEx*+!b`Ijo7vHT|F;t-!)I@)S;R0i77~qu z>laxprYv72P$3o(au}W=602Go@73%y^J#cm|4X6}GNZ*_$F;q1^i zb31HJxOuoh*6<|w{`X5o7BtfyjDKH~ zDd2?I25Zxyfx*zOO){+9vc_C>)4^?U{NhOns}o|T*_v2|u3HpnLxjAf?$9Pe?V_E? zzpABaZ_U`hv9%=X;uZY+gxsG9CjR{M^Z#?Fj=-TkTSStQG~Ik@(t*ZwO4Gg_>+R;h ze)$}@h8sylaB=qm4?p{FLXjt>$^D6}AR^Fyjtwj3i6pzyF0O8n)o--SBM=EgWunHb zqz3rTHSA*Z05vN@CT-CM?bA1In$V)(=X0{a6Hk{?4YdD2hTp8P|GAfW65g#zxM89~ zmV}*0_$r&Fy7tiMBq(Rx`y#>r!}s13mcgXbl@sNwIslJ44vAJanIM&H`SEbkNlc=8 zx<8u;J^{5vl9Dv7?tA%qFkXwk*aqZ&r4q;jA_9flp7F_Qk)$k5RvnsvN$?Kdnx-*; zl%>(iq5-^yWJY^UY_F%A2Xu|i5J`5YkE1Kn*26m)pY}NTk7&@4(d-KMkvsc(cH}(H-xX^vm^fLJt325LchE)rH zC`J^fGJ6a%tmrSDP2%l?Y5)3zfkDCTD{$#x@l4lHzz#xq+9;sC8Y0N2PKkN8q`UPQ z2_Bwaa#i{W7)C8}A&pmh4H({_vv2}%@BRa29s6t(|2Vk0y1Ntb;8xn+0s4%B{FG6zfQ0(<8|+LA#7cAgwgCpqEbUa*3;sNo8ij2 z;v=8R&tVetopb3Ubm~LU? z^x*FOREj9#weRQw15a|hu{Sy%xp3`2eTeZ+Z;|ZYq&WXuQ&E0H4WC*op7Oj*w;H z%dOm9NzAt%d<>F?;D3CNFM<>1)Fmwq#IhKOj< zWMZv_n}`*90`kM>|5IWvSu3ucaLk}3GUIvN6C$b*9jB#;-X%7ylo`2$Ujk=*K}hgE zzDQ_MRzua+z$@52wsiSd9DrZF%L9MeqJqC~+`+2R?KCDQo6%Gvf1NSgBD=Z!kHFz8 z*DU7QOMEreusU0`ufkrwPpVHAi{8eQplH;X|D<}AF%R#VHrQ_FtAP6i?u0Z8P}UtZ zGQn~j$csRG9`avoFOrm|XMVaGPxCV_rd>FG4fd`*rBqg%P7&Z2uj<4dB$D)=GabcIz6>DC8M|m|vRo{)<;2 zc-e*?0!5qmD~DdEp|Exxf-Uz#-%(>(xCwqKpm=gCQv-NF>3J8@Irk zU3(Z!HS)IQjTZloBRTQit6FS%@cRV{Wh)(>hqqB`uGsU_2{?7&GV5E%ezBH2F{*5 zVzsDgU;II?^AHIeMB@2fi{a+^Ln298IwI{k1FxB1e`KK9Lnt~f3&RIXz+NO$o<^&V z2FM%_os$RqyfBUU9q|B1(((`1?u{$GZ~UPD8R$5ySJHUXfs7kaMh(o`un^BrH+;u` zFP4-?smu@R4%(u&tXU+MSC*E8JK%%{md#<}pDJtHx}~spyJ5ykqlI6Jl^@{^b!^qY zM3S;}?eI9L-7uE%TeElu(9u?^Ah=-@h&LXK&;?&8ZB@z8d@7~}@VU5&E&ETgb$4;s z_D5^YivnWSQ`6^e-iDd`4zY5vO&UQ`R5M0Xh1|M(7rt7^)+vbf8IeDoZTl$(nQ#Qe z2M0pCum+6v@@+d{)egP`m=#cfP*`ybn=AeupIP~xt3X=0uy_}xvc7bCu8q6Omh9hf z8jcHRt`ctwDA6E7?SRSDX4=p&*Jv3a^9Y-N%aa^aeOrH>U7aAc7tPO#;l`EQu=sn! zYG82zJ5s$~GEB(G%JilkEOpiL(EvSzW~hP2!u;ecGp-^*1IW1>`54frmN!PKqq&S) z_zr`9NkFUF>^L8ah`00DUa8oV(G3T-8bW|~i_K6T8L+q&ck^m3Gx-7(Q>#kqHg0@b zvVEo8oqb4{ClcAm0W+rJ-yN}lqmG4Y8l3>a;Y~$RK<-f1>=OUs^Z#Y_5uuO3f8u~_ z0sd|hGf!`s0e7#mF^!mQS6U8`L4BZ*+bW^=@YgM|d&?40)2f^XKEWQb8NuY8`{BSP zzKgXqz5Zg$E_6~QgI+OEq&P>xH;b0SwOaTrvzu>8 z3`#hPi9Rw5#X5V}oP={ntbg`I9&(x{7am@&j6CfxD=lvHV1;ZzqT5A6v$mmf&B2ea zt4!B;1=Ijut3+&8hS;>Erg0e5LG3fR65Wk1T)!lWfqtFy!2>-`RKl9=>*2t0_K73* zpW`2WE7%8X7DmUj2IMnk$wjPl>vU^aRQs~DTb?Er68s*=+U(k}2?b_e#|m@BN}E-+#0}pnoPjI5_>`W5Pts? zn=^~q7Sp#13gNr$e5I#hU_HoAPOwWY1>-x<`GsX$R#i z2a2+n9>E7t>-Ow}rCYWsmnAZM)ADgp6u^SLcf~rDBs~G!=_0nZ>&Q0lD$kHOyzLwu z+0NaJlCxVtSo1(ptguRTVkdB7Yy!sbUj}!v4YO1Xc{ma-KE-6K^>OQ4|T=UWAZa|lY3-ma~|O7X#069L9x#OZ3=+V9_1o-+hn!U*XXYV`Usydd&N9j!k>{zf@?4qC|_TEckNqLDW zrspN^CGX{X-~YYu%lGoqO)Rl z@XpNHo!Omp&Ysxl#(iO_SFsTjGv98+_Eis5YE6RLUiF|`-@zBiG1V`7MCYGxdTsGgR;IF+cJpR-ypcFHq={2%aA9Fk^7A$Wn@b z9YReBgM6gs6X*W>`LTF4GcO;0j^$^<}7^}RHYY%Wwb4{E?ojz=`ay^SSX*?CA;}er2{8rF_o-HQaePS=9XVDy0 z@sN$XR_#8)J&?Zjn?i>s+$&hT1mg->z`{WvImrITl6BZaNeS@g(RAS&G4b)JFt~G@ zydCO6UbLN`mkSYiB3?RpDbbDA{WeczOsNIKa3WFUTUCknxhtH0bJQ0em;NVy$rMw+IdosZh(u|J zrDw*B1XpKzeAL6K_)8FRlDnNz%L=T6SXg>F5=HF`aL^-rRK)9gaPnBtM^S`#7cK<~ z)3Ozi$KsJeL9+Oc$e1QGD?5$t4^2%zCzjLQ(RI5&1CU5tK#f%n-*pr9gcvPs!B78GeFiK`3k6L?i zmI?AQQz7w?Z9FbMy{3Sp8+Qk8`0X3ECGWdW??xAiFN_*GXsTS^veNyAO@ew2H81#! z*yz+Qt1KgZm9)UySZdZAet*xw2X>HukWRmIa&UmBMh&-_0lr+b-jZzgUfzFKKN1sW z;Q9|$M~I7Z32)==31iywj_~PA39w;bq?}YKO-F7!+S|)A{~O=g;@V<4ZDpZf!Gd^* zg&F!p7bgoxHj7EhgeRR(ftdZgqu-@}E9lU>d70#tCGncC_Q=WU{bJ80KCAa+UT$_i z><>9B=ha1HSQ&I3#Glc)qls{QujT`J^^x%>nQ73K#sY6)Nm%^2 zEj$%;>Y2QSD{UvM8pu;|4?XD;Sc-F}{^=Dl9qmvhpU@{>--27>is0t#3IjU!6w&4{vDep~GVMQbYtjO6|73m97$A9vDXnOFX3=~;t+TTJ{1 z^K1M`5wFw1c=rm(yR8=N9Gsz9z$_+|ygXr>)P-D;y~g5^+Y-=VVvfW!$ZBW-?ARQ_ zYONHjb`}q|TN`s;WHEeS!c{nVBbo6Y9Xt@~d)8q*Rm9EIJFs@|epy@umfcd%ofaV7 z5EK7=_$1K6a$CUbfju>AN#0w6A2l0#ESlD@2h%NkET-F{j<%4AgC7cwc~O+@;^CMp zhwF&ft-*IM!gEyUlO@2kR|g7|TJ%!PbvP2rmiUl@jrgou`T#A!Ygu9m!k6N1YB>cW zOE*4fwx8yJx>a9<>6U9rz{tn@fLkqR`O=$szcnLZIgvq^uLba)&|vQ=QLB%4L!e!s zR6=t4ZJ-!>5xTVL1b(eMi7efCN?Iyx#)FI{AXnv9iAPa1^g;MWIH14`G-O<1N<_c0Ck|S5 zIGre?IDGK9*_un1I^lF;_0a<~)?u(6%e$9x1Ol(6Bl(rUg6uoj;o7lHB1;+GG+-v! zJJGkd2tx2MyaFrFnJbqr9M4yk1uPX2Kmf5q61Xqis)V z`%_!N>iM_;S{}hjyFCfIeGq3olANqOIJ`wOAQ7^Z9mTDAp?v_UDuQEs&RYu6DoS7_ z=IY@D!)N!FbGJYSpXHNNR7$$}f}yum#R62y3W|k#4G9*eHo5Ou%|KMlU=6-5YOChJ zGh28nR_s^;faFJ}qzx zQTillhiY;vgRb%xz;_@HEAfL^5n4R4=xB_{D#T;aoe_4CPu-02?*(m}@*a#Pqm_&t ziecKSnFXM3V@L@i=i{{$@j4v=PN)9a0$vU50S9aB^Ebf^H%S zIw1Xs#H(~9j!0=v`&F@ExF2d>A89{S@wzqm?!_lH?fWCI53)v7sq)bNt$WvNJQGp) zerbZD?hE`2{_+P08R5%Lp$ojYj!>zJqDeG(b{7qA?bpQ>OOv!r3|hJE=wvVFc^mJ0 z4Vh`s6=MOs6}3?FTv5BXhZh8OQXA7l3ler53m4TuvxZHefA>Jf(@SLKWW$=>8)Px` zp;{(0O*Fojw-H}xqUo0wg3uxW5pJd3208&xgdVNCKx40lBFn%_us}xET?jk5PfkJZ zB+bWNOr&WXO&9UzMIufe7c+!!VO|U~Uj;H~>?|gRwt+elvBS)Zk39j0BQ&nD-B?ck z^qX@OA()0@EPg8D;8^$OMYtKWTVyH2eR1V>O*%ZgS#WmYt`U^? zS8RBSy{CHg8F?$61^$U8B6pk_=0_cabZP%sa#zS*ofjvLHRn-{{I)v`Zr-^gi~Stm z!F-u%qVZZ9;$N6(`n`EK3s#&wE3%Z~O=`Krl#Xqfyz^I+VA<{mHYF@iS&28NPqyT? z_!qvjc4d`S&gq{yDHJJf}iT^lfg2R36|$5|FG$gW#Uc-D7^pvinf z4`mT!sI@YUe9A5G0!m~-tCR33ZF@V-Kv0+BEk|}~RyofYF;%nEg)SwSIV5Z&+|eF{ z_A!17@*W`jQ@qY*g4d&4MTD)mn1T*j5MgwWAtGtu53s6)kzcs|2-yA%yISRZK za!QrtrM}(3(T?}&Klp7W#@L2HH{$N6w7SyXjc$0 zslC#f?9e6J8UN|%sX+arYGrsvD$dIOc3752quF&b^FI9s{<;)%4J<7TDK2ek`N6aN zSW@~HhJ3S^@l+9$UKmig=d2{}Er~YB3a|ja0~CYzim2Md#T|xp86dKB_c@@;FF9wbumk9pwb1VrOI5Q{&X#cSl5NK8&DWaXjk9mm4N%h1rX zK6G!%-zAp7AeaTnhFhX{F&+|E@sEuS&FZ!*`WVo@ zvJ(7+@1DkeA*G=Y1-Z7r5&212kF_!9M3yqVF&@_Os2`2Ri{SE&o3MC$shgCdjAg*y zw7k&bpV(U{Jcof>vQ2hp~7WAJI~%_M}bJ_a|h zq%xig!lzYTOZFaub?>7>z9B=G1)jp(jYJ9KdJhL@N6kumF-e*6Pz-mW)!HIV9Wnu& z9M!$+Bzs3>C|pavE~+7XKE8VyGP4-^Q;}bIBHsN)6tvPj^)5NTi(pib!8V(iDAH1W zu<(=nAq7{%OYkM$A5DWPr73wpfdVK{?TDCZ&$g9##HXgs6_Z$v%d_va$~k=VGN5gr zEa?2F6^j0id063TU4<>ct!@jb-*GS#O{ezJ%6C=Z-M|O>4V@&5^T&@skjymb>SqCb zKWRq)J+WrA24Q~WQJ~Z0mf>;LA~bJMAI1dui!7!1SL-zKvGlHeQ3fR+W7;e+J%x19 z>3z0<7y5Js7iq8850|Zk?A!;on^qU6Nxix-V@R-^zfyWBupOZp5$2t!X;<@J|7mJF z`r*Hwf_y0qY5HrymS)UlyIDokD(j6uACU7>4`-3gyidQ3zaHYX3D5V}lrMDYNim5M zJa|TDCWpec)_oVjc&dmgFKfJKB$_-k22_v*@Es&K>YqjRw2je7>?ErM>XdIdxQ&T) z#(~77!Q&WD1+jE1+rf}TX5$~NsVf!WcaPjCY{A5?<9iBE#M^wBdsv(t9AU8EfKtgV zZKBgLo}cqrfYRoydg(SC;?D%ufOc5>xshA-7dLo=T9x1Yv;a&ZMp)M{A z5@GA=pJD&*bs~%KNPNJy%ghJSm?s9)uzjGC<02p}@zjH|T6Nub;4_+hA&bk>*V1Iy zFeW_qvZayuTbhIhqq}(!BCwuF~P>60gGU zBgf^$A^6VmX=pixKx10!;4v3?LTVap!jt@L0rlNnVOCe(QNDIN1r}%*n=4iShUGOD z{|`G7mCa)@1uNc3ti1o&HZ^K_yNWDbct%P#L~V>?G8+1NLEu>4sb&du5<7p3hP32A zJbHqtPx1MqP7VXSCsf&&L}s13?!QldaRB%aZ!fap+~Fihj7?@dy+r4rW>B|DEhc8$ z;zO3o&(c^!wCUypodR0OStp|nXz*)5#aQ4`RNGoqJ+kLupztS^5Pkkc;lgK8=GXxv zp^kfPk)<2oe>@7}5;UKLpYh$hRz~40^n*^vTefE{#9rmH2X+bsckLR~GOBubk{`LC zeGahpbbm_Ku$88Fu^0ui>B7A>3W2JDot>bO)7jm-nUrA3Q}I76QQ3%h<>YjLg)N|V zlg{APihGX_hHU~0=TZgDT6Ki(0YhbRqwyoqO=cQ&^|JuJ=rpzdPqAh+yDy(2=ZWDy zGqtniVm4hkop^_3k!3J9wS2J{nnJp$PGA zRK&f4d$9ideRA3~WPB?#@6*rWZ$0tagy;L!aCCNnsTOAU!|(tLl~AK*4HzhSEXdok z0mHX!0aI|GBmvEq8y*YEDCI%oH=N!z_6|DW;_Pv~M<`b>@6B2h`(o3FPgbsH@#f{S z%mQRs^PQm3{3KD>j^p7XS%{D5u4!jAX$(e@1!zBE38?QsPvR9zKCsosPQq2py+@Ql z+sfwSh|m@=Wc+MPGw!0QXe2g^jI!WZA9Am-`dmM`4KkB2l|@Xc)3ykj!_rcxYyFb> zjHj2F@YpMsL>M0dOIE3#JR0<~FhvS~5@F8h5#Z+REV7j0Dd`!oB5bcLFB+e9_MXWJ zEYXEaBJck9UH-AG9V3k%Iz8odO0Dd<>p3Fw$@dx=*e zB$i_|D()#N?>4k$4bO(s%xK4Fr_59k3y_(f0|(Z{it3=APaPPj-Ia)?(L+RRI0;Gd znu)h>@EOzu6Qe!oX1ylr!^0kaA94S%hc?}tK&QUG;$=7)ehHEj(-}`M(PMZ^s8gTs z`nTc7{g9caajxi91w970g%+I}$#R}VTWFYaB7;_Lfw3sjPn0^yuRnOX*AZE|@$=W> zfliMTA-HFMXi~qi$kL6+Ts#ZWc+#;1tMT3A?n)_vB@**cXY_0F8hI>4=-9L^v~1X1 zWEpq~7NFAuk6qMEj;uplpDQ67yHwf_*;33k(3CSHEd@UNf6qZfpB5&T(qAupkSWZn z82A@fP%|xBOxohu|G^99X_?<~8#3}(be#Cl;?i5q_ihsiwR}3U5G&fEclw;SG@)MB zD(Q&(NVoIzhqEW5L=lhTN8lf5s(6uU;MLCpc#rSlz5Xw(*+xsXqmZZ6jJxo!1vCax z4^d}gEzRzKqC?YrnDMa0S&$jQtT)xeaO zg23Lvj`36yw4JO3RDdrfo{4-Mk!B`>e#g!Utl=sgM@_pXojMS2PIB+w@33dZX_)w0 zA2UxcLY|v`Q#-)MuMU|-(O`nYpeD}kZ|12&&>I8uUS27Rb3)=J{Ch`a8o13Z@Rq2d zCN6cF*k_c;Qi88NuvsIAwv$y2i#BOI6{JGXcYD62WvmwpYfu>~x>gZ(@9fj1`1RjT>Mom=q2H*c}C9lE#c0`uQ4 zQ$O;7Wx9GZ38ugOB;4a0Z|uhBY48IxmyiJM^etX~R!(A9(+*lUY5}ycy(DmRb}i{L zdX$~;pj3O7CMQ3)aQj-+4bAHMKyBAr?7B?2;E@RwPF5V)*R;o-yoxVpR&F-j&CY~? zcDlJa8z zFP263VB&sZKf+)0lb9`^QTsyOtaa(4f;WaP@A^!mYNiT>z7x zcpW_Ic$ZCGF{yh8je@2v+so@Vp2in|?02)6YzDh}S%7A4Y!h@{HajiDXRfH%Shl>T z)c=Rb4E!^<%JcR`+iY<0bkEMP;K(UR6+9NSUS!S~TCiBunKTtn#V>g*#^TOgtHL8h zaqdVwg@50P%yP#6F6$dK%g=ltSDnhDm)WI#DgG z$K9XgL*r)P8Dgq|)>KFX3sUic0zLCk#xwZG9NAQ-vx(;vh%Z@0+|>&uCs25GMZ`LI zY0T5g&nTfHZaBDI9#{~Y`pad5ciwyV?NWa!u$NHOY?e)rs?_l)XpJgiC}$b|vB&O8 z)H2A+qQ!e^p#~10eFyxB3C+pMgsrQ7Vmv(reci^*d{Gub<8cjBPGr#4!vgq@Mq*8F zRd8*nr}nwtGGJY3MuWZY$FALkRj1-_9-AUE ziBC5n<^7B8$Z*t2e`aXo-?%_NIKh0;3WQc@Q~Z+%9-gjXTC5=QRpIfcufefB=Vfse zFidUfH&jyMtGd^H~AJl8Y1KszBoYlm4nKM|41WsuWjxKysaMk=hKrRN= zFl=TosO6=Z&V3!94@cFy40+rFXk(nGgb^aQXR9vC7YJlz-^J77!bLUU+qfC@>)MC$ z^b#p)sjw+xyDa7eGITet_rDVl&8+G2y& zhJW-0q(Ab7rr7u|3+2;Nv{U#u0v{e#?h4wjrT}Grf`4Yn&$$bi4=gp)Dv`!L@FW>2 zzG~C*?;tztt}-=rM7g{tFA9Gd8z^^WN(@1j-2(lwfD}hVKs}A}cuz?my-)P6zJu>p zJpQTxa#b*(!#HdO6FRikh>p2*S;JGsxR7)WHb)+k#Zj!OZ5kn2lVy6o%pNkki_+NF zNL-nnkxGwB5zw@_vvjIl(N+D}sxQJVZBb@%niSj1QK+$}sCCfP?u9GJMV<+_!$lzD zpY3NN3x6rLOWrG&C9s0P72@DOmU&L#`lZ{jZ{tZTq?Me7;=M-ysjr;K2N|8#L{?N;z!W9ru)l2EL4CrPx2R(W!Ug%ejMqRzQz>Qtb?>Sjg)MDV93md+}Y_C#Dnd zzX^EZsdtmK`>u*fVEC+FuwveBNV}bB=I_CJ;!)&E_sR{L-mx&gO7u{y`aB%^kM~pF z5ghw;0IdCHuY3&oTiD@TfVNZbA~9R;;YHB4t+4h}L<_X_X$2jc@Ij?`-5NX(Pa58M zXa~#cUE2#r1rB9Ay+l?{HmupbK^8+Z;S?>R1n^mW5AEiO!k)rEnysZ78=CC01hg8W zq{$LTyk1<;D>a04f6UISvkNMsS@jE^RlxHSeuoSzCzYQXI#U#-8!tQs z@z8cz4vj8;l$oWW%QT=Ujlimu1|P&6d=S6DGXL9D?%a&qka%b{cK+4gIFjGA?;}9R zdFN*rjRw~({t|{ydJ?xiq32jX44?ETShHXr&|pP?H?f{yV|{Z4o`D(`sO%Q_yIjDx z2K1I$O6aWod|Xt)JG%Ai)P_d}1u2m)3%PRh7OV)}BePE-W4+8QXL$Hl z>WVB4PP6+oC|dT(Zu2#&=g}P(q31}}2Qgo_2B}ro6=;vygJ?$+#K&BN=v{Hpb4Xhg zOUbW$9Tyn#NDtV^565VS!pGrXh!Q1+5ZMB6BZGB#rw5Lg)99u{PewrMU2!L(r$ z!2u^FRYFMEM!1uq2`T>=`yyqY{Vu6oDDSU$o&IbF*oUIkZ|0ZT9#?WN{Gacjfp>l7 zI?u$%!JPkjksZ|3y`>*?@$INo*TJ}>g{#jcq2HlB`NJV4D2}wzc;dT}e1$YsUBX^Qc%2TCt z-paL*ivt=7{s$ZNik=023(IsvESIHyV8!dD#c75;d>M_Oion^`5ym_fP{dG-rKe=V zu4PALsb`R}QD)w!$$QOU@{6S#VAAY9)klQTr6&t#Cq3V%H%1(xg#HIb%Y z>g2985f$}|?O`_R{z%kMC)Gu5h!a^>;YYS#fZItKEHBOQTUZp#(&!=Rq`2%%jg$6$ zO9MA-AF1cY2C+Q`wSiV$8kedFwBUU3Bi&irn+bQ|%z;G4(@S_a_JB@7d|-X{@MVaJ zx};aMCV$<=wW0qwO$eYr+N!@MvBA)5fxn;t_Ti>ZWDVX7>eT@>Lw)(yRVHWrz|m0K zy%yu?C3ZxH!nNe&{$Dem zdm4`;ukBV`Sgk(QpRZbn3nMk1+<#l@%% z$x=E@VcJmMCsCnj=~m7B2u)U4J^QVqOb?dv;eq`a%~V`00N$q-C}V;sr+3F5(6)JN zk);e@x@9Hq^_tJ*O$ivwXca_A$N#s`0my&6KFwmp>TAVJCLxPir)gR;y$cz-mrF><1k zX!g-ADd9tK?L(|18x~&J{jE)^?c&)48u*Q1vg1x2hM1%K8BY&Uzj1TuGl`>1O2DTu!df0s-0b*}ah;I?h92yJ_JUxpT3b8NO ztiz*HY0{3td)Sb(wufD<7=mW1*n+*#B_}N{dVXN(Xw%5um{$af(WmWXZ3XuV?!o#W zH0>#kXudB-=iA6BnQtlqy-R8xr095qeD)61X}?<6Qd`csKl=}EjdURcN{ z#e%l<@r6E}dx3gQDG)uTb zyf=3)ABU9q!y-!=-eMqnIwhC#+J&DhlTU&sJ@J~Q2l1~;l^DVd3#hdKb?h8Pk+hD2 zdM~zsZw?-3V(l;#;Drh5-OHpTB|QTV)7!@MA#oZ1ewCROO=EjVgNZ#oHa}^K1z@u) zsWYuBYCF$k=EmvS){G(5`tK#?wp0Ts#ZWXO78Y zR^z*;e}R@P)%Ns@sKQNDL!JtJ?nRaoJR8sRSh;g8lSPBf;7)4igR}5_lI_RBm>wjk zFRXhPLSt~j&H5iCDI&rQ7sGxq<0=u4hHas48$KHmjq58e#`4aN9-Y2zI)fj&HYFI3 zFGDAZXOM;kD%=A2TsFW${7EcGOV8RcvW>6EQidN*x(SC8De|!h#`o(Bts1Kh1u#kf zcKs&Yqnyt~;Wb(~oNG!`$$n|}iWPX*!s9^eB+3G^r=ljFH!^W<|FVcEbJ`F*V?lx( zq`#1OCX!~|*&^8~uV_6Ero^TBoh+SgK^~j%<)=!MwM=P(XYdFl^3qtSdzoYwm)OER zbHKE(6_?)NKiC4;=8+9w7gM@Axxn!5!6HiuPVw{ir1he+Mom{DTNJWrlg3j)mvzsC zi#o~!$9v<1dR`E9dDZfSVSNUPEM@qHJzF3(ozHiS_aCWzyI(p@{cd)qT<%ZsJ47BO zeelkE@4jt}Dg+y%RjEtAMPL;!tVq8aBeIm?zCm*|veqyDO2gB`nE2SMa&{VDzbP{f z+OR++umA9BNE_sf&Ui|Uo(>6uyZ-G?4sU)Iz$H9e@NwP>fwN1mso`V-A zME`LSG-&Pxfn#`Yz36J`Fu#5+aazAAOH7AXkgG=}v=3+k4O@CLk+g^I?d$1`r-JA^ zvI8_}Q%@H44BC)QvC9bzx;CxN4)!^O-(5xIxk5H8!cT4WVI1H-!oGoD_8MwJ`2C*F|{ zd4XQpl;0RoXst-D3I_3eey+FXH#Ajo`vS1k))8JUdqbUO-B{*`?W^H>;zh>OL-_R$ zg0|XVz*+b~U=5lc9Y$Wm0(vcQzmxa8D5sH!8%${5N@OX+&)rUi-4`hosR#yj?+V>o zw=7~P#J=B%(@`0kj^&5=(Pb_Hg(_8v`TzwnmkMofL{spkf!&Kwuw*sxpanVv!IrS* zt39&#d&rn4Gw;){;ID^xZ3?;@?CkY_CTVLZ5$M1UI{ZT=(71cx4DNx>%gu-N-|thY zrcf5$I}3|qC_ZGaa%B&JKy35qA)^TuGdN+#bu11pRKv!@+aVi|eHMWhR$v)KmTsID zR!~P-f)6FS(vxTxG7NJiIXgNjKh(oyh#gU(ka**&i7fmb*?lm$IlD5RUSj{5LqM@` z63|J>84}MbQh%#ymD7;++mHY`w%7I+zTv5wvIy}N(|#>?0OzK%MkIbrD4DHCj=3No{E;G12_!BqSHQFAjSv$TEfF;mLoI92X zCn7G$Qr2KSG(CCjGf-!kHIAP3-642dcR9aRbinA28m~;``dG+J;~hJzgbSIv4sHS7 zKJ3*g+*g-Q|5FXYQ@eqecH6F3@7q$fy6P@(GFGsoSjnMXgP@ixzn9{5Yw*1MJlGPw zn`Nzsd(aQ-Gnn!85-Dk^uqk4@EQV$jSOkcippw#Kuw3lkjI+U^9;Ui4*|Nff7XCWm z_MKyTs~=ddJGhnUK_UNB8!M#CiCU57u zMUa}zKg29S8Hql@W1&&gmU6D=@FUerMz*QEy#5fo@vob|Om|o|JwU zHk{?1&hBkmK~NWcAuq)h@aHYt;MScq`SQoAW`$(IHlY8FGT9bYJAXa?KsCD;N!|&Z z^ph7GJWxm(6Ik$0kc5wm5knp^7P>R>`Tlr%sC_ZX%f_Y|eR&~5ZgxH#SQlG1K@Uhx z$7}bW#`->mr~YkT5cPnZWxwp;Xb)rO1jrfbJMx6gEH&-_=K#DBHjD)*bxA5R;aj3| z+Dn#JYO97_7y^}gmj~`oA2taC_tiY}rt;E}&^fs3Sb`6*%`2sEQ_5MfU*$ferYy$#6 zIP?`-DT<~;7|0t@9MJW1(R?XGuc7Urm0#oHvgqbtzjPb+Z#pf@3B&U5mzg-AbwVBs zZ*Z@JD`oKz4V5rxdUwsC9Q#(q=sH=@Qu%t1Y7HLVF6o$<{ zzl;wr{|d<|4BD`O4K0ArpQdBy;p7N2yR^57)>I5dT7EK?$!_h_7{>JVXFOHJuiJOR zwcE57jscy*mdZGlpZH%aA{&L#c2LEEC2s~D^{mBAtNYYS=nj4H%+jNY$VaGogZ{+#Ru-H>alR+ZyHe(a8Fs)upZY`$a@Q;4&{6P8iR{s z?i_wVDVCVU39Navako8Ny0t$sv1tX?B4Q?K_WpRevTDFKm zmx~3k@urU5pG9S^4)*Xw_l_b<8J?M!55FFbVe%Szd%^TUwwXm=x+4@W-_V3ieu-_% zEmnxCL;RmyR6257hFuF!Xm*F1_Ub@6nNoVgkNY7vn-3IG0qw43sng?dQ1b_K$tBbZ>n<{hA|Pb}z6@OA)`qy}WFZr3ANgaE4}mXE0f`U^{%va>mm`xVU-1(23;rXV3zvsf=R~h6T#U z0<%y}Yf;tAE*-$r#aU!2!+$$*7E-eKyE}LM7(BYxUYQP)paR@d@ryOq?o>InI5^Qby$Du)84kaf@zlbi+{I&) zUg!_BAYFpDB-$Va3*aOQO?8hGSkT3{Bee5j&7gSMYJ9EaoghKB2;*70l`XX*H8cx4 zpODH1QUCqe7pkFuJcrlmIHoQ_}Fgak1s9p)a?MNtOR?6a z9yotBNtXRBwkbDcW)-9VPcCnm;IW{yDX@+#$)j%!Vzg>v>%xOzVH;J7>&}&R;bTW% z>>eV22rj6u=Tk>yDaUEc+Mr1^nO_97MeliOL3`0v>oE5^&d>`NZi|q3{uUhA!o76j zb!+j)E$adujwiujw6Se^V3MLLO8yPY&H7za0>&z@%GKM~AS&(%lk4BPC$w$OpADA4 z3S$59D2Pj-y&M?)jPD+slx0^dFEJO3R$DCIqa&u+BsBJRS_*90zf)8NzV(|z*A|@^ zPcKn`eaFqwTo1~mZ_r}BvMuFj54NQQ1`obK8{84?5Fh>%&i<(Dk?*2JU=7txEsenDdaB z#=Y^+j2(&l!mHOuvreC{)_5vt29JePs;XOg%E698DC*lQ>O2v432t6agWE~zaBSB_ z@lxS#Pu&Kp?Zk)*3UIB!h9CBeEW%Upk37wm;BApv6VF1KXye29O!_9_p?dP{e&|v@ z$Tg+a^BX*)v*z%=74vp$26}pQnDS13X`l%efB!5J2L6WyaG=!`u}4LFv~JX*&|4w$ zRNx_zTQv83=E!L(L^BzSr85@rB6WwrcIdU3)636# zX(i;ONpspqRs<(U2Y6IFoQ$R7d!a*p?*9%nGADhhSNhoDuMzSO=^l zgf2M&yBD9-tU{jt=RwMk@D+83uRRVo6E!<)Sq$#RUIeYA=`ZH*ho|#3Zs9GmY=--^ zstf+Z+cWv+4ktlkY%=5NC2H4qgRX;pnV1VFli>K?3yi0NXw%KdQmor#v~?R5@~fE) z`rfh+y)E1ocQVr;^yEHfy4HAjRqu|>Q^&4bLlkah6rKaBv* zFlERDaMFf%n6F!l+|9~_wbHC{gF5vfpnWfEMHZWHeUzrpnuCeIEjEMB3z{CLPI=We zI}O3v5bt8x_kcZq<@I&0-2fVP9nIv%$DV)_hr$_857DSuEAS8EI#8bByVhe=BX6lK zfSnEM)w9-fe1}%hybi^*s)rrtFF{-~&0-h9W5WhReNXjqFVwmIWW|~yMSozll=wrL zSygGQ<0dToVX^#2cU*+%t>;Bn;W66&!*~apCD2KbtwX+zkfl&N&Ei&t1b-9oLUbZ7 z5-3lY`m%P4KWynK8BaUt@SNPL$C};i zAtRH!&nNU533LLYN;q*T1}z~4oA) zO-(a{pYONVjENmOit`*HGJ6hbYts|?L|rDlP4_0ysjshzEc`tgehHEj(-}`MQNMX@ z=n&Y1i6IZg*~1Bpr-JA)xGl8o+(;JnB-&iVloLHfG<|Sg#QIL|i!m>Xx8TjexSB1| zyG1oPaZ(S4_-hBUEKwPWt2dJ%ESiNvl7!v(?j0zjs0vSx_h2{{qMKNdG5v>wS1nJG zr3BB*&x7UL`Giw#*IF>7i)K)+Tc3p!?`Q^mo^HV<2;jZe5)xMHOZVQKfp0-sVgcSq zdlZN^xfuSVNGikK>a~D+9fFy>;|Iea_T(YP(?hiG*d03e)XdaP!WXTB9=S$d!vZB) z0H3b_yv|zE;ca}OZC&*ZIh7;o$~B0-$|q+g4G4r54I7kHOslVbvSPKIm)eXEWM(y> zsiMVBvp4efhQKjuS0BnYqr(^^AX|Sb@l519h|oCJ43<0x=8HB&CP@oA?%`HVdsduQ zu5r+8ZD`?|uQVM(C3+`3ulbfZq2QO52(RKou+6K5RhTPQ!`s6Pf;#mTSxWGo$HO5l ziw{&E88I2?pbC|+n6@v%T`(l5svcS9S;PAU(-a@-VY1Wai0yDIm4`cy=`|F$B;*Ip zn9^JOo0yE>z4OQA?3-}#r+(`OL5td_Ob}e%P`nZ6@j+XK^)fGaPPuppQZF5WI?cN) zuWpOrXW_=_T}+y$UA}vkwdnHebiTXZ6T6@){N>!2Y6}CwrLpDmCX70lxzshn=5GdAtjF zc)6HZx__ItuM4fZHv~GV&;*gV6;#668-<~1CZ3Al0TcXS=|^~!eEe-u+>2=Y_wkFEB5jF$6^{=;KqQW zY*zg@CZ`XRd8X@P*bmVC@%d$wQi{~IoA`lei{6lO@t`?%WatJ+jE@B$-?nC+9;92} z!O*f@7r1bib-EtMm+YVT5xA;HwvpGcfNTMLuE@naQsf8uHUYl|bw!pk{LIbU5O(o0 zlQ*>nAEUcrTDZKYp$*+WRmSCCQ#lNO<+3qCY1^O#z_^23t>t%i(4oI$F zVx3<(Nx9PZFm!C^ug78HYc%#z2eht298h4p4r{;O1Fy`RqD~1)qOG*X%?X6>|7(LN zmuBYgxL(8W;~%jA&mdjgJreU(OB~xj^4h#2R?fr4-6o+g2hJXbYbiHGRk%K29Jsr< zF`iyx#g4U*qdlef1ANXX)TXKdg@2p-c2xKoBd6M=4&gYFODEuoFuZ#((DR@Y(lXOE zi}g`Hb@)P6>airt2!*m_m121Kz8u3#Y?yxYY#3x**G!NVBcW{U76YGV8Gc81ezo&x{Fs)hH64zREV)NMNeYBlc2M2Bwr4U(@XGM*kHaOgy6;G>znor^D9 zmG}tjsk@Q4{49XaQ+w1lS=89QQGM`l(oke6!!O@ShmElpnY9&c@_Vb z>y=%a{Pv|uWTYvw0KPc8v7y+H99n2v2ua5_LaU)Kfr}@#ooa#3t~kEzFJ_?yxO>2+ z<=??$FaOKT(}UEcQSywr@Z(3+FE9hSt)IichoM7cu5@`IGvB-Uh$$AJJoHsk9dBc~ zn0NH$zTItB)E^F?gaTr^wHW{C z+p-h?G`$bx*#f9HPQ52OwtP_u3WnpgYxrmGNVt1CL(h?|OUP0_mu_E8hm4eL=rg`8 zbQ_|XuL_mAPBg55(yX9lYb=kJnq8h@<#rA{Dtn4(}#{O)4&Z2l+^u5BmV8|nY4M;* zIZ-8~=U|yw2Z{z@s?Up{dY`*|0isUvxusrhxJVRv`22<0~5V>5_vifwh>5O-WukxC(O8Z>`SgfL-Sj|Cx{lUwf^Gtt;hAImnnV zGb@f>{QUC~Ws3#CaUVUXFNtOLaC3*T1Jn=vNXM@mf79Hn$$iI&>XqO}&YytGx30>v ze#GY_S!UXde&@Y+-!?`VR+ty>61~nJiCORE=fLFyOGK73+^6RxaPp|feJxj>Mo0W>;4-k^9|atK&dQ%&kv1M|0E`E=~){_wDA>L%5WN4EIt;)44Z=?CXX4uXwt*fU zx-p(!V*l|dh)Xyxi}@MfD~c_wYCyv;=Fgcjcs#f`xhSv4)tlE~_rY)`w{7EA(7s6< z##2G8i`puSDL}^OGSil{F`%&Cx)elwNz9(Ge+lGgtN%dNw0DAL0W(Eaa3|#!>{?Gn zPy-ZCHDrRuL(vMQPg0}7kn)gP;0>%dYpLh<>1q@A0oixS@fe^OC=p)7qqH6E?M0Rn z{7k}S*b;R}mKBB{**!ATpeuz1$ZcT;yic%i^%*l=-H|@y+JIXfXI9OSZzCW#Tm53& z$u9<3Iy#zlvROf@5rG4zfmj_DogdhtH#G91xfQjr0S~C4Bb!B-IZFMiV|n``)JL7w zPei>{2!Bm1tAo7*Oc|_pt1OdzI-sHeW8_7c8c5-QYT?q&D{wqst z3@A#rPqA0=M$udRshEHU6ITwd5LwFbrZ}LmclKcN=tR5%Tsfr*#?E*F>>XIB3Wte3#07@MjDz^xe(xG5bwX#HGCpfI^{EgT0eGH1X%N5I3$~f_*#I zC{yKKzcKV5K3SI63tz&)GSi?93zWhFuV5NBBA$y!X*=M_S+;=Hr_aIF^b98YnK2{5 z&6&^8vIJHV=dUKgx`+dE?nwN|?vR-VU8yYag;<)L%se=_>72-_)_B0AcHrdd!0Nc> zt36PFr}3$VDKBa|wzP`*&#LuQCVBX+q~0Qn&hOc(3$$$5TzN6K9}R=_EZ*KdYJ*kB z3pXyaRQ7W3tXe9B6+H5un*r|^%os7nOshm_yDU23Lxd>-V=Q^`#3W_LZ)c>!p3{+X zzJvHJD)&Wxw)wKlRjZ~n-mpxp8e6eMtG2HmS#OgX`{K%N3WXA3?XRC(isu%SlnI~s z_-jz3rslCQJQiiLqLCOIYB#jPi=rj!y1KyBPHjb&GMwgSems1F$#ZdXf;l*#umya) ze6?oJTGaEuw$NqBRBC~RDDAo^ao?&KxL2T=-4l~k8{E~y3H--$&%yPC6o^>Q-yIbd z?iA1h+V!N}-xxfKZG-yWHB(Wc1g%V`fs+}g1dcW1s*roQJ@3Xt>EE97T(NV;B0RvvXtT5;^HAL zIhDzKeE49fTe}wHsUYI7U5Axnnh!3;2W(Oy-;iND3t*>+T*>5RHbctwyKp)p!Aw_o zg!Y=XcjWKkiqFFEo27n$HF0i#*_`Amc}-?k2O7VTt*{3#h zsVq5A)k@-z6J0DbsJIkfy%ml1+Je`*U1-+-6CiXdK37^ zVC}-s#U#4%$xmAJQ2#}@U_-z|VFCQGl1q(s8tOPZ!K^OrZC3y95AoSndpkRL0T*f8 z0_Lq;i?c(T34{N|wm^N4%oY_eWYQ!Mi&5RXGUlPNiiejA^cdcf)%SAj4LH0rmhn^& zeMfeHCT;l6g(&wKY$MoYg9=@hKnAU&U?1n{fqVi`RTen_{Iw{cI#SMma4HDHW!FL_sqX~gS!F14=#(kkg z;RA#i1BwS2sFasK!F0J|+KZvz0mdbXEG4*$R}*N^X&93gb96tPIj)JPV`<7lwCU6n z+IQ8s>89aJ*}5!pjIv;kR93a@NzxSSvFPx^h|oKeqI+EEh``f8qvQK66WCRckzWW@o5r$D(v# z`DeS!UT3$o)i`YTg;7HXO?^OaI*D(>bN%F0ZpfG`GpjO<-^qqp{@G$3eCs!b9<95G zEG77fOEGZa+9f7yRNzqM3yxRpSPMD1Yz$61KdhpYC@*2zT|}eJ9{q^&bq*wd+ksth zou4cWqk9YiXGdpIy>6W5vex5HU=pMu{jZKThsV2hEIyOYE8iYG z&Lm(N;FZY}7*7@P`Kl1e&DTr>y^C#t`qi$g3T?=-WPwj*=Dt;DWLCAMy&8Ey=b+}S zezDP4;M9S5##2EAPwfU?4cMLyB=%K&PibILU4Ys=)?=JZ3v>B03ng`3bAU%#QhWD^ z;swW+)&^kVui)P@C6y>gA!r|0RL2Ie^*!ss;NI%J2h_=5vUvro9eI*Q^&G-@Du~Fq zXh=%AE{pmW%Wy|#mKSXdDAZZ{0~EzN6&J&PP$ylGRk9e za7{(CRvnlQB%Z>TlV)NJFf34<1!m%f7UIQOT|0oM3;WiJmv!MWH*dq9c>2y2!SG%^ zphNSfMGQsQjg(YaxP6x_?Hn@r7er2B(3Q>t_|ZLrg$fr7c04p5?%d22S=AaJG|Nw( zmx5JaGLtB&zk{PajC&?fmgS9%CuL?;r16{Rh2?%mEPI1G^&qf)Z;_=0Prx$?j$Sy) zWcBaX7g{vtgF}|U3SwQ@Ce3;P6!S5@H@Q}btLQ9$!UC{xyGL-BOVw)|sFD%KBH=<} zJfUV7ieXQ6+-jR?Rmi%?t+FVZ>H1P;mM3itC=^*6U$nC*YKY>HJ zrveCO@;rxlKjHiMa@t8lj)zVBbyn#&yc{i&%&_|QGV@ z0G+5lzV`^mQ$d_dx(G2>Vr5aw@mrK2Gs~5J=e>8|Hbxj~P5!Da&TQsW|sRdu6LcSrRLM#x1TF;A`BiF^^anlda zATfBRpIH4ue9inl8lGMT?Q2UT3kk@zIZfhKDH6Yh)VQ%(w>oaMV340?A&KsKC8ww0 z!2}1GoDMD8LD#n0d&m-%k=PL#3fGb~u{HCt$C^<_QI#S2L@Y#Wu^cw4Nw9eOS1^t7Fe;(J_7 zQ6TMi_{MY^)2N59)^32i+1aw-f8#r^@mt7=47ze!0Ph{etg@Ja%=8>Mx&!@=4+*sB zQXhQ%c!y`-##3{J1VcwhC|yl z2NTR5Jzcq;rCV3ZGINkIUuKpg{qFIjAF7B2ctb2R#XwCRVjGwCg7u3ZEVO^bEV0iank);IB%FPBkPFsY=^%}y! z9{oj@ZhZgoD2Pj74m^_iGj>+5=@wqj0`UI$qsl;0?GXQg(7>Ayrp4>l;+K*x!#-SG zAwu_-el|H)eO=TxoK!0iRT2J)%%}+37*Hs-I=*;2P$ctFCLP-VS*ceQYocACYC>~7 za$AI)>@3*4;s=qX3wLsMfe}-ll4VdN?kt&U&=0i*{w~&@rXQc}MSTU^z)#VqAUmHo zs;^I-1T}3i@%eQKo@R9Su3VA-;JdDIU&x6Jx^i0p?_oNYkj1FpyqpGcht#*%mDLjM z`!<2bZD`SP5$s%k1a4hr8;g z4;CNe+nxE;^@d3U*kqIv#hKYT@WVz;?CNzCu5Q#`#*&VECINdar8*;^jFOGyXo1i1 zkBw9xrql8037{jlMJUKCfb~D@7g00QQIK|9{bN4)rGZS(eS&q)EhVUAJX*xn zQ7jYfBU;Oq+fcl2ExzY;B-~6(X4$6>od7OQ&Wxv*ShQ)GB@rK6VpahjJx_LGIcJQt zji=(bEvq0;I>d+$D^U&iaLlkddZ#RwI+Y7$W<}A)fWiirzdu@JF2jPnY`A=2sSUIN zH1&T399-)#+3Ru9c77i3bg=|wBqq*&1!(cM1b>%kgM7#=fHOf9AU#5?_CVhz;Mbt8 z$Wn&q78JlQNB9mKo*wQnd)OdlI!uDHa&mwcX-hy2_%{;IAgjIw=809HNQQ`2+#@Dl zuWoovJo-9lI`>j!XXe7@Um}%PwM{o4=p5KWmOB~Sk~T8464NZShW3x8McTH2^-)`y zSV#1POd2$f@$?c`Zze(L;k~k$-Po65q1?m+^Ry%V?_4#G%4!C(oGh_WJHs%`lg;;+BuX+~1 zdrGU54~iv^-gX}D-l0u!9uDX^suk4o<{_`^f7l1vchyZ&nJh_bEF_?Q$(s_dQj+-1 zqqrm1Qw{1A0QG8nD=+WSizng2^-E0dnEu1T8zTr*!s5-#;hwas9?JhvrH*ox^&;wW z6ZMg&0`Hs1Qi8AA6#|*knwQCa$KV=2_1*0C2e-*(rpD%7jmR{1rq#^yK#52h1 zV}Z}bs^sMqK-7jCRM-^3*K43qYG}9U7GBN*@cD5< zl`n{Dy=r;Fus(xCmJ&P{qd`{fT*qY5IVHiJ0~t>Rar$yB#9lowi&}#H%p{pvk+dPcN}z$6CnA&5^}? zi0vNV+DJ}OIqAo-SZsD{`tV6W;bW>{!ya6BcZYv3#`%u`2W`l>Zq@6eEROR&(=AxB z0QIXI6&J87i^KQvE;c~qo-!|H%}KisNk`U$PcOAI4rC|apr_$-)N-Jmr%bTs*H6Je zc#MhE{YxH;Bl~xVa>%FnXZ)iV*?@<{0{C1v!>Y~@tJ|@jHw59j3R}SshqVsqtDKmg^h5 z{;nxaT5=}rTYCmN^;6r4Gs(C1H{*6TG;Hl@>0mi)MHc3Ba&>@!3GHClqM~;#IVlZ7 zR~?0+kM`6hTh82nydNz2a3@@gXPvSa@E*{%&3qO?&RlWy-(}Ud@>HxjunB(0ecJBvm{dz;}iGyPP8Tdl~1E2CsBGbUD zwFTbB+Ob~Yi@2DAnsr-1M8Z`#nsk$io)$a+n%1w!cq)kHyTc*j2G@mhX=R*-sY2hw zBm>)t+E`x->xCcL2AJnbF<&dMIzR_j*b2Uh;=5^G=ILhxa*aP|NFh)VeMy;xYCwSYwIneYb__} zHJW#r$*pi;bBtcmn*2j&^u$%-ZZiK3e7~J#W+kBihGk&=rzrtrZBi^tM<}aySI|Ki z%eJkS%Z~wY6?ZU?T$nP_)V7#`P70hhbfTG7i7eT?f{Am$eg4PxriJ`!AtU=P>^>18 z3q6Sp&59T~QAN<~<*0}PJR~|l#Co#P|Ao+R0c;Sn=vKca)NMC_nf2&_U2x$H?~t(s zWh8nJ91Trcw3Bnq!H)pN85mIgE$}8*(M7E0^Zs3IaVgDr5sllc6DQ`$U*W|f$vah^n1S~52)A5MiQC@zN6N>%NM425gS*JYXW@g8T$%*syV{ppEi3>M2dsMi4Ssoz*+DZwL89)h!% z;+U-9&i%m4y$<84AhtyBf^0leTLL=qPVI{*DX^C0ErzvHjC>_p0Pp=3Ot4f;lzue^ zlFuIyS<3L1gPzmK3R(1phG#7UUtSnD^F=v3`TaCZIgvqEZ42PN_re;m2dpt37%{$` z+7JxUN?%1DlR}GeDd&{&ViF}dwb2&56L~1SQo5RzRJweHJc8=f$E%gS4vq0S?pItk zd=6-7Y^9t5v{dG>Wv48$b@6J~cY|(2S}++GPbI;z@biqPf*AXB064p8R?+?y?@?vy zpS*>2W#OK-@=k0yvP*Mp@}ncBD4(!dvW3sGWBn*3aK4IrS|%A|LqGUcZ7cvycrkc% z_!LH~A{I)g22$T>e4i1j#GB=iH)%`sZZmx!xq-i|UnwW3NSZ=e3@9H9U@Q$hhJWS; zGd}!B@NTU>5}6=tNHJJr66t2ro_wl|>U*U`mYld^omc zBax<2b1eQp zy^c%aYVZv|?iWST3qBtIG_ikhRgwhsZ zO}y26MZ!<*2U=qz!54UsH0}#2O@)!hfTEmCj}O5aeDK$yx``&0$>;XNoy*671^}v| zMbI2L690l(>^*F#)-Rb4&%X7snWqP#qqav+e-;*g@&80IG?PsY@8=@Zz^jJ^@cDJa znlO)4T~`;F+NrJ0D)`O8<4_=ZdS9Jleub3kEyh#vzEUL*sW5zYXt#(SV)X}4?+$cu zfvsT42Rlj<5pe;|97=%py=VtGbwK;eZvHh23i#f&w0@z&2NYO_*YJ;pdrs~<*3#ikx(yO` zQrn4~I%EPkIXdZDVf-3ludM5IPoqM z6@6n9Wr~EF8ze>Y3^65J;63?r{GOj>mJ+&NyCCpr)Sk%--LMo=ZmFAq7&hriaPz3m zbRh92e()S*ra@N=3(yF)u2_>fz5Kx0-cDpG!*69~!J3$JOkOA7<}kEpH^x&zY>7Gy zN6%f5MJ>io%Q@=TBUPvxkcvH;@SG zjLa>Ktjd;#CaSPZo5XURjZB2A=QJ~$ViK$HLFloNVa@zKGD|mIw{dNt{bnWb$LAzS z;*~3j`fH&=_MCWoctKDn_0KNtCAOtPEoOLq6Y-|qf*-`m}6dT%B%#@>5Z#11O(}-CUFh%k9?@D zP=#6mKisrjMyo90SdR&f-EtPLoW03-Du|IYdzpH-X-`9W0xhU`hy@sdp|oxAv>(-QiCj!*CF_gMA7H6Qz9%~>y_MuQI$x=aqDnuC3HC!Z>;cH^~ z#1aKR#y@o%WR;ba3$jiqczcuu#Re&l@eJamJ#%%@Ten>}IMt%@eGzP4@gqEZkY2>F z5F0XX21a3WkfqQ@yJ|AiN>}O@co|F2oJL>h*czPe^f63jxd-j|xZ3!*_Xqa*$D_x=%piZAXC_`UTe;*Acxqmkk_O3Z$APaw6ckzQ^ z(>43E$z5|iOIS$=d^X8Uuum-Ru^pEo?dC&~r34S1$gLEp8&-U!-b&%y${PavHj~R7 zj$Mg*^Q5fB1dV!W5#lB=T&^lR5kWGTZ}AKb`f(Q&9_299JrONr~LHy|2gS4prF z@96=FSK&##KlDvu?zkT4J)wH<2{pvs`)ROgA9ttptJ4rVH0JMyiblJ3Ck5gY`1cFt zuEl$FMumJmWmNPD#i4BrvHE^6xPKqE|3=R?DKpvlfNVx(hed_g<5xrWy&ED+30|#w z4QSM3GLx0{@BwUHsm}e%y=H9)9ir)<$&NjkDYg=owgsMi#Qz{xz{%DIrgUg6vXtWU zj-G}GIk`;ctl<&3N?3#OR1hC7Ss{y|m7#yiOe|@ zVy)RLP7Bb7+fOXcr1q_#rlb0fm2~756u{hg?#fZi)fFc63l)o?1mAn|H0(RYJ8~4f z5%Z(ut zud&^L~!<^7qEr6(UeOulh!_>MbTQ3lHe#4=p-1lJkzkIiIi0)RRVi zv~s~FG7h%(FetFEE^1dD*r<^^K1}U`YTEsKuwk#}#AN!;ZPF;O9E`WbV%pi-!H7OX zMV1o$VOA!rieArTd3$(4_m)A7r-De&ybrtZ)F%lJV}GYt9$z0RES=`knt=byc_sImr%{y+#A2l-7S%8 z&0GF0fcKET5zItkho*w5fixJ?2bP|?0Jk1yGU2ZcLw`Qp#Gx8KSiIbn$bXt*T6t!* z0P|6xh-Y;4lU{Ie(;2vZo%?tWdZ7!{u>H-vH3Vlg)v9Zj7MY)mUS(4)7e^XoPWxDt zo12{v2iBYyStjv_v#Fn$>kAOjvk`dvc`&)#792+Zs|Spyf*3Kgr!0p2UVbMtb*5=% z+*H&ts8e4ce<;;(EB!X?KX-_U)#H5BrQ23PPOfJ5`JX4x6w_bP3GAcjeB};eIfwQP zgIaDiMV1nL<<7N`k(t3{MFtOo8t4+D8a5o>4te=`ve-&tuAo9&Cl~UqD+3x|-~v?h zfvBn=Hw&&DTqLrT;SIY@02_N}CU4FBFQ5QTC8}V=i@!6q+l~!NRq3+IDbp4p4_~%k zIi^i>@OE+1r4V$M?3}~gT0oPEm%^$4Pz#68Uw~c5xGvPN9^X&iyQ@|E+(>(}xWLx+ z$YekP3iN{YKl0^18=O=OoY>i@uD-t~bPUxv@ejkY`Wq8fF60+1?hUcX`_K<6FNeAq zbwFNXve|rknwJ_juOk&Dzd}JSU!@hVn~U2zx`9v7XqNrv zwaY+J@l=6dvo_GSv*!4}NPN-RU{sYptDG`x0oJkBz+HsX@Jv~~V8i)DxSV#MiJv-X zAdnA~YWUA0tEED9n@aL#OEe6g(nV$|qv=?p(+887yq=M5!NZHMDw7YE1hmvhyUHsa z;N+KO~^5Wn*aQ?y5$C2`S%W^g9u3 z(e3Kh2r7nJIFWD}wjPd`g>J`pbg#^`(xqddPQfG!c(|2m$!wbrLSRA9eR#w&BSCr$kA+Q zVF}D6LZaFOZH$rNjWW7NY(b?udV}f}@odwP9gv;NCub7}kA~`n1{10FW|CwHS-eGa zDn%NK`@u|+<(=&vEC7u&j4*!CC?l>4d4%7RMVpr~aldV{>BD#`h{9kcv0G$O50LS_ z%&f??b#hU@cjp^aPcOC+C_3Jw0`-$qdpB>W*KwFp4L1;8~)_nzzha;E$pv`hve2)J|k6#bYif!^vBGWoT^Q5b*I*@AW`?D&)Z@ zR^}f{EA)H|qt$;qu?c8jgI+)@<}oYzWVCsp4>arOqfv?9BU91$XeoNKCUGTT;B!n% zoNRD-_r?P_z2}PZ(gzP|2F@P*q=q%$?SF@6dn_A>ihyndn-q`tZeLkbH+ZuaL|+c#rN`EW8W_SSJ)^ z*x0hFCol9@B0oF*CZrtOAhMLbjzVakcDbj4}`{BV;m zTQ9LxVa@!YnWy>bRTj|rt(VcltdB1MjVoAVO$t}L55>?x z3pVjB4YWC1&;2z9y-$7%@b?ReSK&!M(yZ{st&#=&3~QAo)+}+?_qdY!NMtF&ooo9; zodCWPatdeU=TF2bkxxRp;Q4O#8a9^Yy@nkEtu$Go^0dI8rgD`|cUjK!>V#q2{dV1!<0%05OnpN-O)vTmNv z(0yp}E0~2RQjP5>7LjoB?ddNwD+7%a<7-&Xg<@Hg&)$ZFW13ZJF^O4tcbr@}II}Tb z;aRhE=B!Gf^MJ!92g@S;kTF_js-Urtq7NL~k=?0DJ7`?jSA}>c<2QGg&5Y-WQDq}T zhA^I`#Qx*4a6ajxEaoSCUQ%RcMW^xpkeOh*SkC%hbs;3Mr^r%*A31vrPF_6CWc3c{ z3SRE&hcOgV{bbTvrVEJ`c#kg0%!*E1Cluv;hw!1fg+=&LEXKpsL`XfeQ)DT_{d&Km zk+pjM=Ng{5jOsLtpD;_#?u?ArWTus_m@R-GZt`IpD3&h7&j;Fit9L^(==fT)R1p95 z5fO&V%6=15Zo;ZP`(=5w)Vf7xT4}Re;B_%44c_~W>m;(2;&I!~!Of(*OeU>#+u_Rk z)1a8Z8MB|3r-EPmHSGqEw^-0*A~>QMYl@p!(qQ*$?taD+m`Mzq-rdxFd=I}- zdRUcc5rxT(>aN~eN=*@W;V4|aeVw&RMDM}i?&8LHmJ;)&Rd$p|&&NlW$}Ok-GQPaCM!qzlUyhbKc^P0-AcS(?%ip+m<*&Dt9OQ`%hTSQQrA@_QlY*XED+=>%!aZv!)9FM56?Xooag{E)7fn#6nF1T#g6 z8Jjm~01-Mc8@La`W zu<3Dn@(H+`pw2Bxtfs$Yu|WN>c7ZxqOItj4ZSd&nqDsQ+;NOdcSfS#!0DibzV(Es9 zB@C=r2Z9g#W?M$klm5qjABKwSO*D6PQ49Fqj$@)jv-^M%(T*Gw}57~fAEW4 z%og^njMo$Iz_H!jOQ2hL0C?6b*{bi*S2P>QX;MAoS$Zq*_5X-vqbc*ArQD4}ylyV; zQqutf2R32ZlP{zKS&mf0$XUHjxz4?-SkHSXuZAddO!r|TsSLk(BN0wtJSpkpZzr(4sFB5u6$7y z;g!--K?S!?D5w=G2K*lugPE>rUxgmv*{nObH3(p$W6(@`J0+R%EFlJsdKsLwrrbZ^ zfc5D<36?0e=2g)G&dM8wW@Y}d0f__fk+GP>@EeVWdH2=3h|DYWa^#8mlS#=4Yj zMp9BmWt0vdJ5lKyhj!hjn7A=T=~X| zyd_q~*2Wg1`bLT@CHRfoH(`4$y+TFMyuLn`s_eWR*m{iXLamATx2i~cCBshsHQ+C7 z^iY=N|FNDs?kdygT!6ByhaS4{%|C;3*Nd|6T-XcuubmQE%5cBlFELpK7)Ema;%^ww z62j5h75Wc}bVhLJ%c|vd8T%i~Ic=#mb6|jjl{BG0; zQ=!@|D#FdQG+3}bT9!fY+;W*|rOj@ES23rZn0v@e8k2x{*&;kEBL||F9c4271L{Gm zU|+_277;$Ri=4z28B=6tC8F7~H0`9`iy^mFL#=wQ&^FYMiMkpX8LCiR^+YodJttPUR;5YB3!hv&#nXFEDCVT6~WGPk$3pOsrfF=C9iqhX# zr=jd5VZRiL+JZ%mgSz(zPmfw6O9{RZS6byz=X!0j*?uer9%N<6<^CAQf*OZiIZ;KW z%WZN|Q5&S3%k{sq{>w2x$}AU)iffP#85@iwNXM(LA`jd0`BYq&% zf2p|+D`j2_yocFr#jLM)?Vv-q`kRIaIk~X-B=?!{tLqIB-8BO|u}Eg&pDtSsIk|aq z-v7gox2D&X6ItnJw*bDr*e z6PX4)=WuM1zh61WthX&BLM`Kyi2)swl zWoBt;%ul1zX<{C44=?E6GDu`8!&e{Ng#MoVlQUt^Xl3$AP}==_uzv3rS>_33Y?7Ik zgnnBrC*Cs{r`W_p@+tiI`aPRPRnWk*9t1V*z<4SMdIr`V+A52>hm4j$$k zwQ-5dii4Yjn3#Ee-#oPyGVdgnSVl<`R>Py3{CZ7g$#b$YVbiiXjAseq;aLZIX+M^< z^>=_Ja;m>!Q>bMafNSdBDJ?e9|diw-ldy^fb_xGp)4QEkI7s^b8px95++wc8)B%t!@07K9@y60VsM!mfmm0&cPP?kMC3@ z=r^nx{)EZQ%0NGU?0Ro4PVIj41t=W7GK)n{W*!_^%_s6I>Q%3CO=!`DTQlRMFTkba z*HwsTGDgno#b_jGEW-w#a)J`jvoVdUm~KSZaG>A>sv#rmA?!TCeUutE@Pn>x)lOuV zZd-*5DSXn6b@>mDH6E+3liu>MkD}EHw!4Aa%OBfKH7wb>60&l#nb?tC(P|~no>>a> zh=b>kKuX#zIrF!8i!|m7IZ;KXt&@w2cX$3J7U}V$0-%uFdO-caNU(RS!$fab@*QO3 zhfftmPMi(ZwCBIQt4h9Aj;5`MDTKVhw=SFM3xf# z%T?>JqtJA)|HO~CX19eBh0muD8kiZOm;t--x}`{b^hLOK={6HH5RZMLvuw=)a)F|w zqa=7kqAL^mGZyxSSYUF2%FitjSxWG}liO zDH`*UiHaR7$A-fo_*HGzUu=mBeDH_2kjl>Vj6tZX#9QI)OmRk6G{$k~APiWlp4 z{Rp~l+}F%z#Uy6o)otv+w>#UY1{wF$V8@zY&5~y-Q+?m25QGPDOVAfT=K8#;k|Hbl z>=wX#K`sEezF7qIU0q;U%f>|v#n=*bdrEzn#iYI#84eD1%pIRyH-|`jkO4n#)VKm% zMOpLAk!Q`LZ2^3KIw3b$yi$xyk(k6He8W!%SbnmKgiSVorPr&02Ly+;Vlt5k!@8+; zbeA>gL}NcfBUd&+$wyBKH*cLc=(K-vKV`0m^<;C zj(KZ~0waS5gB|vZsv!^eD{efpgNgO3SsMm)>C1SQ5^M0Rcyi;B;3Iq?%>M%N^VEHRMojsgSRDgSE8=AVr^4RbX7B|l z3w@Pc2X)TuftV584s5hmCMESOKr8&5Bd3@w7e_~UY4AYCQ$c*QW<6wN@ktEI{a0n| zY*n;}PBPw-<(k+4EhjOXPPu#+4(~Y6^3xs;??yElue1b3$CH3|4Y1X0>3T&czrq4O z5DOWz_>}UX5+px#0=HOD;EEMr?iN)m!FvvC3v@h`1T%3U>Mrq~N@994Yq3ts3C}7W z*fI2}`OLkz1KpnPJ>-Me(fx;mt&O_DD{V4fyk&)4#5835ATuioJsZpEAeMDf_*kXZ z9gFw9?hM2GS}GO_W`m5sRFV}MNI1s-~V}?D3UtG>G<~_ zk@*aKGG1qmFr+gcV4_0+1iTNhHf{?mj+28E3=bW`c$N~|4n{-Dotv_luTVdmRLTh| zB8|^2xu(4)maR@rFX$W8TVyH0w;tH3X}7_h`hlYzo69KCbu0Zg&|V4&cHljVmw1(u zv`#20;BCW)s{mi9kFaR8!c++PIS=9LfrZex5A#wnT(;=fxBFzYhWrAL^0JL`R{rt{ z^u~?WCiX%Y)074oKIKh$LP3@?@|9Fyq0$u0nwBOL%mrXt=eFQrQ$x3w`To!eCfnZD z7G4_`&Uh+_Z`YDZ`=KoAeSE((CJQ-HNoagW$#(%wVY(fVWvUpXQEx4*L%ykt<-nVP zI>`ma2&D-aVa!xQBqu8mb}Zq2Z^Pzw@MIwW5=wY_GK$vs#qU>&iD-iO68?QGGAj{I z{Rq9|c7`%XFF2`uLP4^7k81-9{~lvd0|j{nu=e|X@X}w^zT5rBb%Mno?S%Vx*t`9E zd@nx7_oAp1GnRxSn2!3Mw?(eKtsM--356bT@WK(ecIO5Y9T^%9E>84lse^@^m&wA& z1?~%(sg(Zk|90ZrsCSHjsaR&#M^S(e)vihS?7;kuOIY(zV2p5_T<8JoaFQt>U5k3? zeXSBeFPRl8W()j?#bRG1ngA5!Dj65C8r!+lf(C)Z8SmQV^Ke*uJRM6@8lpqje&FA{ zot(w&jdrpiD_!~)z)qHyW8M^VdN|m_gmx`OmQp-A@hY6Ub%)6u+ow0wtyPopEG06t zbKtvm8)Y%1_@J?2I^IA`2Q>zm0J9>}^Oqe#=O!LfVG< zLTw*6CT`2Ghv5F*Ovba6pbhy$rgoLZ)J4WbnOO`y68WvgYyI2z1~(^qwbaAvgZw*0 zgW@s$M<`Wv;6xmpPr4|}{0Z-Wip;F&G`?#zE@dv2&W=tH5i(eL(f1uc2np9NGr9fS z^#%`Tcg9meWaebS_Tw6tK=R|{4~Pn7qw^mM!oyDyuIle4%3iINQ-UQ6Kmw?l==n zr%TasvZ{eLXx~ofE>@`H1AMPCROrG^uNKN_qe7M$GG0afG#M;!Ah)LmVj+q-MM+Fz z5x!||EX(iYY7aCCPzAPjHl~i5^+x&csM1jma%keqjuMSod{TKas#mWDy~na~I%H?& zg1oU_g<_gSIyf5*Fb0u-crq9@(8c|}D_)?ZV`-DN9&k4Kf@WiGzu*w?#$(r2!mrXQ zJIa29e`-5$l?8sfEShZ@UB4;m@X(>);^_SJTxMMTbv>JF<7tQJUKc#j3`x(WYQ4URQ^z`Jc26MNy*VK^PXkMS%a zx(^ru-VOX^5wy8Zox`mx-AbEm0c>-cHLyXFldTO*>Cnn#dKJuLDX9>f%oofex_5;p z4eF^7ZR&PYE&(((6enP~w{CV6Q@v!MU9X8QU9JUQO?uaE!2OYVc@OwKA7 zYPP6V?=xjj>`j2nXZQm}K_?ttXrV$K6fX0{x_OcLs*#CjRKKU*R=MsaqE989g<}^^ zvb5b>1wp-3I^bQj^vV78UsR;RoS9egMD0BEDxRW_EfCq@36Y z^*TftSjv9=L#9E>@r}5+RkXUbdfsQyx?^`GI;!K+(9m(O!JH4@73H}hF&+Qj7nxRE z_ZA?30Y6d8r1q_#rlWo<;a?p*CaNpMUmr14!?Tnzcgr?Nxtk`7`550Pnq^oF_)e0; z4L!3a;rF9Qz<*C3Z}OTsOftO$PM<9wWkq4?f1q!KIWjHHgSrDzMpsb|4a7}^sTPx1 zgs=bc0LxFeVbGLd##2EYk&c&oB9jfsRp=>W8DzFFuFKOWe)p2oFzUb5eA^9mv9YZI zJx8~K4c{C#unK+Fw#+{Sk+1eJ@>C$hXY_&%Xu_h690L3i@s$ECqwQoy_m*saAzwC| zewdvJu@~Z5{-A)s$IX44()U9d$j28w5&VHN1!X7*-)VZGyNN3McL`N);vxCJtXm+e zDa1!~3D@vcF!r8_gVg(XWKmz^_d82wR%Y62vZ(lX?-MK{d*~AIxF$W!X0COcL+xfg zS&JOmw-pl4A7?yEh&~Y$!Of$#EP{61={J&a}|ixAiPj8Q8ZO)Nic$masIXCFm?v8l*}r}?uxVu{7m9GSxge%?}IY4lF)3mk;*)7V3hKr zC*8OTdyn$_-od{OG_LEbTo(ykcW8?&HwPJC%goA7>olSGpO<2W=<5qm*&E)R^5aM1 zCgz(f=6!f80dAj*5n0M`{}A%I=dflzxn-#Vdk04tJZ73K%o`b#WTutYtp(_79Uw{{ z)22CiyKuJ!@w%n>x45z5kzfTQd2fvxZmC#J{sr51!qr=3Rc7!dzDFAK1xrynVuqMp z4nh7fI~%^zPH@C5rN(EE9EL6$?s7hwl+(LYrhq!VP~m7%X+SrB@M~RHWGTfr{*0^A z`P>g##hJ2lArEgq4>yxE?>ir`Ml zeTc^MW>v$;nY~Qy*4GSB4SMlDQX!%ZH2v;78wa=VYwo$2#4Mc7s_oXIGs}A}>4N8l zE0-A0Qld?h*0Km+d`H!qhGY#o&{*ciC^k|Q9n?Ay(7GeCl;FQ^SfuHXMs~vitPXJM z%2~K@JwcW{51+*pnOWIsohBERy;&+;`QOJ9v8A4bo!{zG$eR8^;L)f9lf4gvSEM8- zGM**GpiwV_gLe1Td-y@AVToL8-V(O}>u@R3-SnV#CC*&hgeeam!s@fU6K&hr4|=y# zYaJ!5%ruwAXS5;aWj>FAROOQ-ih%9?0Z1I?i-J6 z=P49XkzEF9B%#|4Sx=S0_(7vgO{(5erl9Nuk@c000jO@OAvNtbY&o!liEUcXAKLo1 zVmwvE>ex-PD0Dtl*C)xs%agWFD9W?-OQ;)XvNW1sDke|AdK}U&^G;U0ZVB$!n_DR! z;Rko!!mlk6$@03m)r5YLlNk?*Cipj8W?E^rEr1>EV3gQNlp5Kx3HZ3t*i;|*;qVE_ zlLivB=K#y1RN<27-H>o2MV9#uzCX8QrUmq?n3r{WuM8OkZ$as;e(?w-;j%;w4KAkb?iCK9Z(rQZ#67>BA?%I}h)1zH?8Vb)<=pKj5AX zxvr7;-s<;)%GxZXv8=UF#0#RB#tr&XP3Sz@g2Q^{4 zQSO&2WLF>=KVeze41D*}(~zG_a}Bl7du)K_xmfzyF5DN$x-pBDQdsnsog2w=#^L+w zD>I2k{bVxvO1zel{SbDY+$XY>;SoV$OxB}Ek7_L4wknS?EhHvjJmI@QkyRtq>RXJ73(XSU!RYI$r z!WO`X@;D+J#pa+r6%VeT5m{<*e2M*fO=Yt3a&utg((f7162h}iedvMb%}PKU?fOVO zE2*djux)86ucs(3teGD)^QpbbmHis%a-|Yh@7)gt-b1D~!~Ehe`ZAKxz&S~O(id6GE^?`?aF zg_lggY%y73fCmhBtQVY#OM;8X_$}-+x+87|c2KI4!glYJ!giyA)A+YbsiJZghVMf* z7J(0@)3PCcTcYxkxqCT5#{rF*+{>p^;HcIj#L}2UjKF;r64)S(R-IyjCRX>vt7~F6 zn;QIyNzB4&uu6kg5teLQ`6w%gZiN~M4(McR-@%LcU8t)@gML!5jI^_Vrl_G7dNd5^ z5-PHk;LCTcflNG=O9a|SL4hh%Lq?X#N$LsIvt6~WiXu;aD^`@6Rn2nzTdr2(?jL2A z61radA>imypUK*^><7rqxX*Z&5F@6%g)R)5NATTJ$?SOll}ncT0+dQ&l}XGj6;!L& zblg*sg(l{CM`?Uk1YREQK;g7i0<8vpv_xa6pc%~vD)peO8KOJ~J3FAkz5yl=><7Ny zbqr+T@0<;r;l>?xt`0OQ`_rVcOa~J5Lj7H4s-|%eKqi9jqFg#o*1a}cWE3x3jIW<_ zKqF(Y4#&yTM&gY0ELqp*_-v_NT9-AH4^0-B3{iE=qLZd9HDVI8@R0FsG%L=_K94a= z*3&WthqZ);&H1~B>0iNTR^vNJa`3i3z@IsU8{qog6o^ltv7cI?fopiD{!D0gPPYA~ zecO~iTP99Sq8a#WOmW2|reLwyBxuaQ5n!kFX0WLAxp}#;d^=z4@o;v7&^GLoW|0mt zGws(g;ZjG@@08wmQD=GMnvq?37s#r=_!hwDkiNuw#S}MBY=+D`S45T)yc!0aY1Csf zlSLDYZL8)oo+U&Bzvj@X`#@Pl7D%2EN1D6>fHlsxw;xv*y8cL@`+Xy zna{)qa$_EIkO*`g3u@V1&KiMb43U|N={IG0gJ0|-vy{9&_n?+I?KA=Mg&}mR2{IY&wwtOO~YRGy^WINhBKxD`ek);G*w`VilN#~0@SVLu3(t6aN;= z%&J6NClrqFL$$4tR_DVaOEi7m}m|TylGNVRpCRd81qZqIbZ$iJT+e5$!*a} z!JBc#7rQw_0o7+z2dG`&U6xerfEw>_5++_HW;m933NGKeDzcQ|j&=^vzdfJt-@J3n z@!*L#CGtrKMV%hqpC1~5>{|Hu0$n!)68hP$D?49+zdrs8m2oKk(dvy6B6<%7cNaG! ztpdSKEwE_QGA4>{Qdq}6jHil-Ikg|sGMKv^iT(^9LcZjtN?`?&W1V4CkQ-5oV&E$e zZ0L6F@CwLzkXi~6B}-bv)&UxI9nTWeuI*hL)y=w_p<`&vcA90+F*u@W!W3Hxvs(b$ zvK3}#T~Dx2T?lHx%+6-ZHEtbur3A0vv=%h&RFBCzwCx-uow><)mJ&5Ew6}?8(i7!QeTwW)pIID7T5=z9n|(m1_kTlwir;uZzdw&8i=a;Yn@P3vO9|%0d;ooi^YDFp9@#^ z{VKAQ;f=aaf*N-GrCasOXFzAps)EQVZ^)wPfj?qS_O8#=(Of|f=(mTC zv%=cj+QRgRL5!z@Fk!BEgmT&Yp+FBg{jw#qX+w*z%h!l`9o(*QA355}AdAmZ_K{QX*2llQ#32C?Zgftxet*X~h zS~a+fepUxIoslcC89%e)d;&5oigGok#RtU;(I@x9y@&TjmJ-~{-4nVr4`i~Ao;zW$G9sQ64v-^5!kG>v z8slH2%q(vj?@1>tLx@<8zMXqR13b+|H7wY;go(68^NxHp7fWCPK^t>Sn7w|&XH9Jf zU_lx6%BuG3tXCXi3^Xh>`ke~fPWGTUI9o@kvXcUum^I9St+_{eNEFn7e z2!lpVG=r0u@q?q^61pV?u0(mU4gImWYem){gDZV4&3Y@>4#$-FznY*Pa0STPA`!RvF0S0zd702m@BkpDUZa+_u(X;_yb(^wNBFg))=yZ=P`0ge8SS&)u zwGmYnhT2~JEg1{cLd3N0Kod|2-oK;xiS_M(8GDMEdyip8OPe&E&#(oIqJRD&rOE7b8N zcK%9Nj8&Igr97mCik+n=urZ(n!>=%_E1;Nj&0p}2cn zf_6xUNxVW5+aM2-uf=QWnfGDunOKpf3?CNEi&0QeP-Df;waVm^poxRWXwLFQw#Fi$ zRB7piy@koyLf81P(WR46g4E<%cKFOumXci18rJ5EJuHC*L}Abq?omk6KC(b$rOIlS zh2r|!zRe820F|n?RkX19L8Mv4mtysDGw#5(BdbJ~GTcAp75rH7M`zW%PnF59VPgwJ z#=R!XBj1@ua@cEaGLeag)*Uwyw<2R(Q-}6CZyF3J8?T z7>VD6Rx-1!X)J>q>U>Gm+o-M&^ytu4WGTUaUb{deXL#4a8lDOUO-N%-ACN^I!m-+J znORk7K0>UjwXC}JA|E~G{CYX&CyZpIezDEj%OAX2@m0|iaWQcIUSxWJXcke-TBJ(1EzJcwbeG`ABdX8O6 zgq`u+#R?T{!VhP*?IR%Ud_@*Zr-t2; zna=^e4C{DNtmlE%=OFuGuEBb6ZIzCOoCHa&xtH0_-_&M`Y~A#vmV-P-nWhMEF~rk(yac|O46H_%2kkm z2FjpGg%QSwjZ(hQbv^Y4>^ic?sDqGzMlCh&XDopQM4`)EJ{i6ke8{LOPFN=tmG}nZ zN025H--%VoN=pWcil+zohiW}4)-GU15{uN@J32ww$f>dnA7o6BnO6FlEkGRyTcMs2 z+zxDTkgFGb9eWJqh)!=t4Og!2{e??S?dADDm5OlSQ3K^M>sW)@o)8w)sZ?qxN?!AZ zd;q?1do)~5xmhYX&q_>@_iEyVQG!2UJv1|PDb3SJnvgn(35LF8ay%v`F&n3qaoR8~ zf?lUgnF`g7dYKE1SdOy8m!Dq0yZn3suuq}Ys87WD=49qU?Ai+=O9@`XraJT*)0)Z3 ze2@bsq7kw*<`5%i(Y7cJJk1^GV@ypOYmK0n#bQ8*P)&5a`5TuM$lp?9LCipc@!0m} zzD?zvFXH#1uFNbe8g<*Eu34f!4_9{>)V;sRQi9LhuozFQWVaJWb`4h|TNaXop}RL9 z*(tM=k+EE6R%Kehkw#T+wTe}@kFYpwsV8yQ_jq97W96lE_h}0?{ezg?gVF2Y+U4_% zX9*EL_En%sh6J?H&ibrMjZz$mZRL#D*hkyT*%6`wnirSFV*ZT>4}s!Sh(NI%dbJG@ zSr+4GuUv)Aaff9&(Ksl=A(>K$Kxu3f3PtiG3q)yv(*I%XD3*mR7A7{Dvoxh8=tQd5 zM^KR&xFhu*cp5lq2$m?yTygOu}d`@HJ$)o&X7LtYkDp{vy+Si9gW#ZDqIOesiPVn3qi0+DHezWP?brpEuH9ffONkl7hCqF^s7UZS4xm^EE?qB< z9Dx^DK0OBaqVf$t9S}*->y)Xj^ADLCys9(1vRYIQ0tY^{JDat^{o9!kzcoR51#IkU zK=+aKNiTx*+Ye#eBK7mRk+`oy0vhxDPU49qv(#X_!1}{#-I5H-hISsP35K_5^Kzvt zf&GF*z}u5NAsNs&!I^Q&j(AH{MH7jrzQaV85*&}{g&b*r%E)d*@Z2Zz08|sJX-lMCmR$6urEIrBn zCFpF`Azg!2s(}m1*I@0w1G3BmI11P+RU#OOhw0wJAI_u#jEOObvD?sj_FT}1q^B- z^#bFR0fB8!=U{aZRSoGe5Xc8gC1hr2!b&N)R&BRh(4&>w{>V_~xC=)i`Ob9%Y54sf zpQUsI@x1(6XBbuBPUC}1U!dh;Rnig%})g6yPqU4NJaLCcjR7U+C6< zxGb*=evo>~Oe?Kv0kSF86S*&R3;<_4@@3Tr<{mi(nHb?sgg2uimG8j&2zNT0a610G zs87$*7$E$Pi7e1#r*_=7b8z!Y8qf*DWOVr^x$4yq1-l4cl;1 zp$C`>@w{Z~N~NkIL&L$vNi#90kmLHY9jnN3&w~45_YTMU}@o&5Bw`Yq?6)0{@4pSdW~z>xU9$ENvq1wqfAp*@$J^yy8cA zkWT%P8W=wL4O0hhKkZCFHHs^-6zm{q*6@y41t(h@n9`xdV@WY{8(-2WcSLJkST4b!cFjU=z7zKPya< zkAL5rNcz08K6=t7#`?!BI%ULFBE3ep23vbR$XotJjB<-ekBBx<*N+zE8O*?U(t~k| zh+h!BSwsS?JG2ECGaiX7C3s7prqH}W6DDiZ{%w!%DC|p}9GoCBB%JBEwIB-3fqpQ! zf1go!p)vkxR`6H`g#@;HTNKAEz&$&52j6-P6>7m38zUGl+qR0ybaixrzU|cZO$G&* zQZB=(D`yR);ddEM)DsQF^YmNjbn-k^SY=)DCGkRe2l0e4B#hTfi}*GTfc9Pb!ch!8 zD!@=|^ZiKkr3fKGIihx`+aLd^2l^I2DE};nvP?vsexlyV9a=+8NAiHt2NoSa3u#%| zO!&;mAz){t_S5p$pXXy|QNZ*dK_inb_)N%rOK5V7U?Y)NhJ=}#=$2YXME5Sx#@`oC zCMFs4lEot7MhYGWLkpViTswG8|gC8H|SXX z&0fgP%r%gOzu6D-;4fp@!a2Kc4naGD&mOuWXT5`rpArr&|s^YK8zCRD?)pfLF0+E+%g`T6PyX@cr7}|LNIM}NFz8Xrs zCU%nn6@Nb#iRTHghA+?aR9~fa#g~y*H0mf7s#ut7y_V2zHuH>9qTx|~F6{o#K*)Je z^zzbf>#tWGGj)WMSvpT0*$aQ2In1oQ6(Ca-zFRBGsvP(%R!3bY@UOK&r#qy7I1 z`MH|OqS-R78P&IdUmGuY?eoFP1>e1q0Uu08x5k2^aY(a{KJcIQYR9=WnVc5<0Z@cR z0jU4>!)GvC;1#0y9?&A;W&ERsqC#lhusQtiq(2!*Wxr$vnfcATaPwYKU+V7S{&>}g z^Q);m8tLypybmFx-!b%`33ey-T6O?{Tn-;oFCP@CO zGnd$8f_T#-1}mLRkj%}8j_5m?$g4DYE& zz~J|dtUs@cd24#RLbvb$k);&BeJ$NOu~6Eg`UtyxG8Jwmso#5AtDXzA@8i!3eIY&> zPV7x!JWC0CM_U-8jjHI4awnORjRV9R$X_g8JAL&$T)3{bTqK{O;|&h%C#r(OP`%zIzOOCxc z5~m#s;n!_DO>H<=sNt!iXm$2!K>pQ!VuHa_g2Bd?<{A1x?FQ~>-PG?O0ewpSd`jKD zR3Go&K9kyN-tXm~$0%>)AsETg!P#hhFvx!gzthEygEVd|>kjF&+CQK}h$bT5f=wo) zej3VM>ffdf8$+9>Z2hU!GE`+qynaQMD5ISDo0b@HLTGa~xt8exw1I!?(Ot6WL}aX$ zna>lg*6H~#CnMSzELX273ob4Zpk@X}440 zYGRRJFqVh>e_p~r-B&E|c??HAo$${H!&@~0A2-d3AI9sJB3mye!G$|%Ow8l~eW6ZG z^>e63b?U&6|MeF9{g;K1ji*fc*6{*6TYUzhv}Ysf=B+|2*{f)I=^NOd)zK1yBHw+! zat#wfTR}S4uM2kQ^Q{st+_?w09%jlSxp`5(3NTw_(L`dv#2}HS9LHxD{J>$~GIqxQ=67zEP3jfeNM!O7JD9JPZ>Lo(|xw!nF`6l7)O7~3L#O9Bj-+8*3I z>C>hT>iD`t%We(e%)Vr!(4#RI;o`AtKvC>efs2PD3>e=D=%g_LS|CoV1}g<#A&ORt zczix%kTANl>^+0-i+x6erz_iXC1)))?S$ioez=KvZQK@P-YY4|a1i_Qu5H!5gP;B3 zefZxwAHW&xtLPY4XWYoGH&8%_lNV1zwh%yK^{(|WWym=2^ib1|@#l^g>JaPFt92Kk zBYX7#It}b09$PEHhxp7eUnuE%l_yP1DEj>Orr;9z0#x~?DRBkKQlVM)pxn85rgh(; z6QQRztYwLXTbeL>{X+Qi@2?xhJj&03s|OZ9(*bW7d8!cK9xp)B-d}-q7+}>epFz)o zqckf}hU+Eyy?5sp#O~U_k_L7k0Dt^w5#v=9A}=ooW<<7!biPsS4eXpgErSovAH{Z{ zBm4BURFJOkBufQ_k)9DbR4j&4JhDeOpufrti?&C@)mugW(^Qw&g4)9$dUjS?tvi`n z(DMi8xk2*d@HyO3oFaTq+u$|hAd1patcaR*^qZg>%HrR;E_|}B=zC@0TYt;91?ZU9 z4`(hh7J}kP>|1db22Bez@>C&xC$(df3zAExLcRO_cvUJ2u zc%t|d{58H84i+Ok~fLmLR`w;NLr1+#MD?$P$so=B4|yUTd{V!whqa&GejH zP`gC-95fnsybZCMFN#$~o=C#-pFb)LJI$D7Nc6(Vj8|PMW)WQ3zX0;H9~3bZVRkN_ zP(N@OlXm6OX^7vqmGMeT>{vTr<3=@L#EYdBQ)cOH@pOp++S59Gu>cWz76PBW5R~6j z)G?@;A2joymr26->1N9V7eMiZ$$#Bc71;QlR?13UlL6 zLr%VC(d<7=xt4enS?oh*!v9>G%gU*;Enw%HU57|ys#;L2U<;YM3xe~j(ctB(wtA$Vox3hmBH|o z6x0kM;XMaJT~B_0mu*`O_cAQsqjM=a5e}S)W2Fjg+7A4^8!?_L;=sAXY-9ZsxfJiF z=+qKb$|{*0>x828H;nw&nSH)>U=JoG8BL3a_H1UnvLtHQ*uv29Gnh)6OeDuGWjs}c zfA3d-j+hl;)x6I{vQ+qE9DHqGGmqu*tWzHXdkkQ_ibf0^`4ak`YYbO3P}37M`+8mr zYNw|@_%`mCM*iy~be%V8^i?(n#o4%Y{RS)c`1UQqQ_-nX2Eo}lk(iH1{7R67^h$|m zByliBvro1Z+@e!Mpi_9Pptc1*I`Q7Q^6&-t>*K#zUJ*K%HR6@-Oi)f{9>lJ@z<8>N zJ`>4mRswW`CqBjQCVeK zXBefpbJ!Et!{jfCS?c=u!>jLo1U2x55n%@aHGE1lHus9$mVf+m0nsv|!=+m=qzA8;B4|m zIDd^mU zy>Z&rJVrk7^=$QA37a9D5^1Wt8sIMIk0u*&nz2F*7`+GX1t0@$flwv zOw3c<*Moa_J1?8HfQr{(@gkKN#G<&nN>={=IYSI&RL`#@LO$uRO4YC;Urm1i>(BE+MIZ)r=pH~tu|jbz@QGxp zptFCc>u~mObi!4*cvrKs`yCFB*h;HZPTC#iFY>}B1?hUGtfFo=(ecYI?E%Oh_QS8`tbBNYm{D8v7OE4C{tG*J?K+;MX>qNz)n+%Cm z{B~>E8NA%M)n@sQHIO45r=&tpdB}*uMOY6Pccyy5#-)&zlg)S~A?`nX0PA*dW=R`( z)`g&^WW-kw6s?d#tV^&BzpqCn-g8S@Cl@8Z^?$&m%u+G(#orscu96pH=f4JMj??;a)g@orX>E^h@ zaP(pVtN8F%O`)N?E90pk<{Z(ip3(s5%V)@*EncTHWy2=pd4N{1w!o7XaINVCeaG@a zTI!W_ID7D#@;0iD`y4{Xaf=0o#$Na10p+?z%+y#cY*5MXO)0{@>@xCKi`U{4PC!zs zdaoA>v=H98zo-iCJxGTw2X-)CWh5N!9bi;{Zr!F`+iPP?S^=I~#id(U;zT5ir5Sp*B5)SHPtjK@MHV5FAv0&*LYCqWNxoP#?T_vun3 z-(FLhT$(VfT~M-v5f1O!0(Vnyu>1ptPtmxTu>=*6puHercr=s*^^uN}c;=GtVn$k7 zGs5d#I`B;v#w%)&pYhC6I%-ISH=`nff@4_0bPMb{b^;Eb;S;j(7LB2CO>X`-ox610 zA25K3iDAdb&yey4US~IOsCAR}Gn(xAyzJemCUgtuR)oakH{r^uROJmqlb9Z(xJzY5 zdKPS+7pGJgP9h-kwH~re+L=B}W)h79``!2xD_)B}xexAV(bfyKK+*B~Vstza60co` z*i(l^R;9#U9GxMmZv-p*t+Z5FUy36%DFB9iHf;ok(xy|U^nlGrcVaX@jf>U?_)KWb z7kcQgEHbSVif7y@{OFKl;|wtwtwe_o885Po_{bNu*3;D%oV98cYylEE=?%@GJaN|# z7Dy~Ut9R>u;8LdvOR!_@uWAdQ$faxnQMmrOfJ1J6E^LX97Fo)0+O%E08Xux9+Pqwu z{PGku4ufhr;-pCgIu@293_c6;agkDP2bzEl=n(R(tY%%OGHpD(UCw(88S`c4^G@@p z^?7d%)Agr8psRRk#FRHR)(OM)Hl2Gx%XVE1WZ~}t++20z$|c4tOQODSQ|J;hgsHrn z5DWLN9A!Ku1a>&SJzr#^a|V=@@hS^Jds!%~xCEWC zqv8*Tl%ND8S<4*6bTe=tg`Spz`8b(K%gzy1!mQy#!44z1RRVBb_dO2m%h4Ag?(BKk zbAk`N2I2&;MeUl(o3EJYch9m^&>M#;g@i?YRoK zlF}JZ1!3o41KozVWTMh;WkB@Oc*e7osDXQlhQ8cQ7E=ouQ)T88dNKYci`Qu2NdsQh zK+*Ann|EYl51u?!C8FcGySM=zH7mmP)D+mVe}~92iEld?4JmhSvTNa;`fFUrSON=( zJ*N-C?fZ9S318vjO_t1j-f8RP;wewSlNG(0ViJl+@T?W|$SLGiPy~-~w!3Ox`R=iZ zocv}{fu3NOV}8`|R50py90m>^^_ZwlIJwBm%wRl`*s}5`pmq|WvTf29%TD-^aj$7s z_mHi<5pu?2XnY>2qhot~Xw&bomb{ZMCc9beeB7Mf<8wsc{TTua+-p1Iak2p-%ZgiDK+^&vW^xN94yu4+ zSLgxVqgsNUBVWK+{^f4vaWcXt2Z6I2Td$$&`~lw+Te)6X-#p}fBl7QN+=G2*<3yG+ z+*B~UWmO;;9yQR!@G)R-$1PI7tY3&5q8}IAe)PO8+|0kNw&?N{-bugyXie;9xjeMe z#-0H=!ShO6ClpV=^JJ+YHvuCIpYj&i;Ryy$gHAodpmB?KEXgiBQ#m!|D&v(U(X@3( z&0ddNXLdlw&5O#5Q4L@4M%^bdxf%D>1pn?HX*mCiDWxyLY zZV3UMcn6H-9)%s1kD2AC$p|ycPsdQ&HKZer&6I62+ltc{;l_i9?Ar99gTdM1>F)r` z@SG9H6B1!Z{4rK*3QE|nt{3B}AeNgK>~0w9_80XJexVE4+VR1gc-@LyTYy@iV`wvI z+=ed}9oc*d9^6sCSnK3!55Ys5vxdK!bQkt-;0qiqff{d*MXV8A!O9Bxjq86B}+KkyAV`^oy;V;|S8EFM$4&i@!epOQmgR90j^# z;b)5#@bIhy{iX9-#p?zf^TM!6EGsUv%P-B27~u5(@@{?177c^Pf~^yO z>uKk9eqJss0ucvyt%tPR{DlpRddbvszb0=*XABwpnp{L$2GcO*L^6%f6#3+_hsw*@ z5u#c*lanf=Y3XRr;S)@rqrE-693IAaR)V*{f6!b(OKT$N5v7nc%rzbT?k7>V37kDg z>QzC*Kte^9q$ygX({6o zA@9_-wY7)dgNs`&(qymD3<_f6-GHqQQ+Z#-e#)_w1t(v;nqr=57)8y&riRE;if_G` z1Q+hyV=|`<>$#J zU_46+@&W8Kx`QmHEi#H*J^2W)u~h>TzA~~rv*_+EgESHGR^X=1Toa3PW+^DgOy0Oi zJFqMFU%GfT0rng{pefbqgtKt$JYV3Vwy9UkTa#7=s+oxOUE>Y827G_i==fSwULKtH#VGJ5sFO{T0SA1PB%1qJcDSOqI+W|hc2fGx)i zM|UO%JZQNz5E4EXyy}vlW)W;&J_jB?;48pISM|r}SSShwF2c3ArI7pZuEq!?OHfDp7w|V)GnwPe+@6G1nL6un?ojYDaF|Vp2x-?g0(!crl)pC~AR(8!516FJEQrP|pjxH>zKhND)TS z?+$3A-z~#|PGWH?@Nx6NYm>z5P1@Fn7M=JvO1y5xtu0`x1qPzKoLem?c4^Pj)5>>K zlLF8Bt`N|>5z}@0;AJ=yo5XmQ5@dntI=GcACKTVBZZh);{fWq@(90VRZxdO{@UV`3 zG)ElK8LRldE7MV4f*RDR4}H;-LIko*h`~9o;O5l5NB4_KpuK+pG^y*yc&do|nGb-1 zkw`$Mi=7hBN(wDtolppkK`4GaXx6}1hFx)I(SVUtrIgPmYuMPrPz-M^Le|3vuxZ&G zkyWNRt)54W=VdC$%Z97_wJQQD3g*`v_eDriX;%I6+jlrGF9%|9C%XtV*^$rZ6|YxD z+!4cvhK6%jFsw%#{NrCy6P3f~k!B5Viw2d+P2Rhr#}p9#q>T0ne%=s5Si_)QiY;Fu8qGV<7}2>Fp)!P)|)Z2@~nTNotm za!yapgkw7rODn2G>HDCSLSBAZ{zXiQ@|Gsi$XUHjC4a{h6M38Q@3eSr>(O11nUf{5 zl;9MQp-)?W=5xZe%Mf?^h!XkbEoADjiJCLg=dLXplgwMcSYrhq*>#9+#ol;idtopV zNdZN?do1y+q|gG^35C!YPf+p?V#}}*6J`Mo3eAB5!(RY8hgO93i@t&EtcN12OmVWD z)UNBpbY8&;MShN|GDW|CeyP?Mz`?yP)N2>c^jOuI} zS2Ym{v6gHSTTHEKb=gn6MiYyg=nAD5oJqY4dy=j(@nJ!M(8|}ks?8cjAd*vW!?Io6 zrLVP@CxkXu-)6TSX2RNY34-=QbT9UM+*Bf@z{w$f_t~m~nT} zlY!=EV&W_e!Mx-6Zjq%7r|{8obiBn|Rw$ESo`U@A`$Er-e4!)$-0{M_6sc*q(ba1^ zE1ti1BWUZVxg{)(xkMhihi^KvQ_g-J8H;76mF6vAom>d*fgcSgB!4fkfTEfW()z}W zNuG+^;(^SAM^9&I9;DxeZL5A^ys{+7iZTQTxFY0brNgzjWg<%%?jJG@SDN^vMgctJ zP1^Pjj?gPCN}1k@B#6$-?bxlqT%=+6%xQWrL!u{xK(~yxz3~-^9SKnorC-dIoiIs|ZT__!$z1;&t|{4Vn^+hfml% zBb)WIwFOMK0BvyYJC1VW9EZVXh z^78W;e^mDf{I>G6EP(|?^vS(&?_s(u;WL~-=E_Vf&0Bz86e|?9zzjs|6p_D0F*Dav zVUROu)XU)P>R})Ye`(V)ZQd>~7%==rpbh9ET;4Mm9u-i4AGP3Ir!mxS!OhCY4(^0= zCt{f*3P&xs6H8FZ33(ES)uH!q)>EoA3a_;mua9Zd0_r%^yjLH%lKucTUbxJJcW%)P zy0mJ+cvhmQ1#YIL!NTp)MGVB6*75)fp{*LSuwyN4#JeYD&>SHfHYrGT@vQQmvlbYE zN6X5an#oF`2Q=*V4|o8X6oPTOHC5>6OXe(>mN+Eyzihc@cc z1Ve;-cW%M14U0rpnd0`2PC)S>M98^+6K=$>5n0Oc#(lVTgap&aP=uH?%?Wy_*I!FP$9N`w^ne373(KY<0^!vnyxUU83|n9_<{TcG$BpbgI5 z!udBX;b;n6Ih|U3CJU~FOyCxaM`*QM{S9|Jv_QNu>u@QII9{;FzvA3gzr<^~#>|M2 z#X=8g-LN?{tKXQ3-ne&bm57cxkc7Mw?m00t>GS_HlYeIZ$s{wUt03K{PoMt#oBsRn?sK}2%kdTe`nK-& zsOZffLkHUKc`H2i(_iVHsp!ahoamdcx*ZWd8%9oVK|JB;F|x#+q0miqTVHW_6m{zmfNO`(q_Mn<1()tIdHN`w)Q(!<}<~4 zaG|VFC7Bj~`hmxpEOLeyt=YwA3#lp?{u#6FKJkcKhU*Tk+hukk!-l^Rnhbs9 zf+)W%BF41%v*RE^yAOX+djM$_Y?!O4EKbGg1}XA&5xebEYq^TcMezGqABIt5Z@1F= zIubg3Bs(~MqpA?fvt!}*`=2%aQ6iT760(Zku_>2)dwlRN*O=I3^e~aMCXMwvTT0Vv z?Zn%G=sXK~S69Lxu@8QVb*n46|5Nh2cDvtt!2Uq1^12#-|9A#8xATo|IuQ8dKmW~D zk)AI%{l8ZI(UvWM=WDjq@YkE_$U%3?Hny~|+50;W&VfM91?-m6?)*LMn)f^13UkS)-B?FP2mv^V(QnueC z5IQmh8fts4LCQUH2`VpLhjEk5*`c!lKt1)5muJED_l{XeXjj}0F>6!c=YRSaE6v>q z*?2ws)*`_7L={Ev(tmdUQ|T1{0RO$J663y&hu=QK4_B)BuErP5oClkec0g5Kja9^k z*e#Hif68N9`PPa|iVQ=O39XX~i)gY#vKO9t@NpOtG{j!ETl7m;OW|~pZe<*eAL;_P z1p86p0Gd(xd%%z1dc@#$6^9pJ{||Wi?RQm@+i`L){mu*b;q20TY6>Hb7#uPJmi>2% z5mx7Tu&<@8d=VPT&pTJnKe&H~?_$^12=+%Zm}5qqPfd@2Po93%V@On^y5hk#+jMqm z9;?tt1>X@i43ggbDUiLI+2PflU&D!#GF2=MBzI!JDOQPU9QzX4(XrL4nlEudz*7iJ zoG}`H^BHMg%?+KMH{t!)qM`aqkD4MRG#J*V{~kg|gqT)YOH&)XHSh0GSJSAkSjnLO96dFLN6I_y>p$+W+FQsZEABGZzXCcD218afz%3b!Ao}Bbr+Z{Ek%#yErt-xE zJuP{>v%8q+ZVo$4VL}r{gdkuDfy!dfd`HN=_Tr!NgB(o_E%5dFG(*p2l!=^j%=qCX zEjwxH7HWyz>3^#9j?NA^n3brKOp8A@XNm5*B-4cVQ$f=w-088!MD2|_ICf6gf71PT zF|l#~YP@_>e=mUX&LNA7@3A_mLWUg*^0MvT{jRb)6)!6;0J>YYzlR~AqdoSBx6NLG zgQ9dh8vyT7Xa|&dS2Vq}TuU-Fy)+`+?><(h0*Jx_anj?d6ZLB|a%>p|u*r;__Lx7A z*{Mj#f`E#^xJl-kKB=-tx=I*2d_2s4PtI)jaf0NU?94|~99tI)k##@481a7ppL1|qBkjwMG zJ+=r!hYWUHz=hn3hDM0hd%Qb_{Y1CDW<&gLr2Sc?j|m?RBgc8hgg~uF;{Jy~eRUHw zTx-Hp)wjaW-%}N^=3)^vBoG$8I1>)6&9n-xzt#wczRG~7-?XPpu(BG8Tsl_)g?bNr zoAJH7VM_qgJ8;~l>!9AZ^)FkqUiSsLUAf$Hjr576*du7=6w1TjzXx z1j<~$7l z_S74UCJAE*kURAHrUorD-Puj&Xe2m+fY_n*fVHM4`n@xSgngS;l3VepO&>u!u8P$# zb=li~x3Ey#?~n14lRLD{rcObZ1!%(tB=np{#KfkoGg=-=`#1vR{chW-JZ&qBkg!)v zbdY9So5i`6!RjQ;K{?411o|xk!M6;C1<%c3&sTNjIvj~Q&1kM9+S@uHSuZC8Cz;uf zC(lHUdr8N}=Y$32h^z zu*Y-pFS7D$K1hak}!tA$?vk@a&@_uc@2Jlbq8s*Op?|kAPWkP38^csp}w{n(v#Wd zo;A;z$ll16(AvUppNn|pR|EE-@<+SEaI4&%djXt5&o_BXPwqssJFUeNFfEy;)vCFwNJw6Q!0s`6~zvSOh{x4uuEs;IkeZ*tGG8)i)43o6DS3A+f8=I3n5d2Rj<{GE%nL`fS%fWL*t z%J?i7b|m_G?^GL@n{f;-7wI~k%Hl8{yKOadUro&9W}0q6lHflG&>`*ChH7ntX>!=x ztZtQBsI{X5$RtLB?<24-lUZX@gWcIZi&GJ6ZfFfGIV+}gDkc&im1vsz+~c+m0;?<-pY#5d(Y@OOkn_7v zjkf#6g(hxN{`KZxG!N$c$-QYhbp>bVS18XM~1;BG!J z4IMEa?wI?Wfzra_O9+(ePim9if$OqtH<<1aW6JX_Xb08V>%zpgFrf|%1nhXf+lez5 zSGUv1CWB3z#13j;^aXj!-Vu1@l^^KtB;WP%k=_exIB3J?@b)YX6-tnhs3jUmGg}(E z$230MHB}0EYfH*aVO37S`&RFpI*ZXHVGM!l`WiTque;(a8GnC!&Pa?2_H!;-P-r>; zb|^Yh%dUAyirWTltz<^vT-d*JE!?Q*2Y;qK{0kTqG{iYU!abl7xKULIHHB>VirPcE z)?VEqB2OFgvQ=sE^?Eo`ba_ByA`kBv0rGffBe=YKhW&_c1z-jgxS;4wU1sAEgd_;` zKLo~29sx5KPGk>8DvSIK=hok6Q;VRiNT*s}hwl$Nfz2wipV)?3Y@jAjYvgv@*${K` zpw2@ZsVM0BCkf6Yun+I~wedTA;JhR$p&}p)3Kfl>_!m70BhnJtp({1pX1t}T z5x(C3Im;V7>{giYz)OrK3ExMcG!5^O)oT=VnlJ}Qe2RIru&@SlV&L0hK~nqL-faPV z8v_74-tUg$1SieSw#TaFD++4^YRVc^lEejp0gJ%0*M6XT_NdkDe3~v_OE~nE?xZ#? zz&C06jHJ6ahS->2(@+a3dAf}zhvFJrn&4nISC)()ITjwed!EB`l4%=(rq*USbV{c+ zA|qoqKH4rVR7VKN;=(pMWbbrC&5Dn9ZcO7g z7#IkI-#rGV-gg^&Sh^>+IKPt7>?IoOTOcJiSDT%Hh#pd8XM1`v=1g$fVM{cH9ju1oOLKVPqFoMo(p26 zNPk9vJl=h!|6L35nRTmD7urFF;%o(xCwI%b0HYM?uLvxAm7g!bZ#hJNdeqS49@~2w zTAG+rn98j;RM-v^)_)bVwy4?U4m+<@cg2*-VvVZq`gP*^s5v6V+6#u1=X z?q6rH+wiC`nISv3V{E+2e#!ZVh+JGzdDcgOEH3Q-^xTdK`HZ^TPS_g>*Q&UOmC1`< z16p86@Gk_q%%FIaNejk4Hay#Iibh1Xn%I$(4=wG^+v(b^K{C9Kz^>f$W_Z7|*$$lO z+{6raE!4&Aa%gGPO{i>^7os50-w~Mhz#TAg#%T7KRGNp!R<~|kWO%&vh*jNJ+?%{sLilhD!L2K%;r!f0gjG-<&rj3x=+N1&my0BVb~wGG{h zeT$#q*K*iMV;TA&kt*F9$nA36`9f=2A_E%%`hhoPpVKyioKtMj65U8M6R7sKPWUdP z%nfl;R1oN22rNYh7Ha5d>wxIb9G|PWTu_AvyXlfA79w%j<#tCb+gJi+oI%b*DmOWv zuraY!S?G>hh)&zjXta=cV(z1iCJEyR#NZ)qO`uzNm?eNFUy@EBAPWk|VBvAmoSQ2l=9azCifDgv9Ja*$=RQ&Gs&^$Q-rQQEvuMAHZEA$A>~jsloj(8@Wgp9L2%euE009{1A%f}Otd$%wJ0^*jLD%+ zRU3)U@FWau1ad2@A+403!NL}iKNGI{G^d$trQ1;345j(H`}xhONu&q@UO?dfv;P&VfCor;U2nY?*%Z5z+Z3v#VBik=PS{>)7Gt4U1>x2h{!%mF?j{H zPWiebUdnh00UPQHdJ<1?z@>U(A`vkIgU`u&AxlQ&^+OQ`zj*-ag-E;GwWHd<_M}X8nHgFd2cg7(!#3?I` zUSPrr99dg3yOreq+jS?m(~s;&RmNE+ZNl8=A!Otoj3x=+N1(ne59%(T(Kd7w_9a?W zm=T!Tq8YD&+I{A{8FEF27Z4zicX8)0YHKEscPF((GtwxR9H6lMJHJ0S2C= zeAGt1*bP7f$SF(SP>V_2PY@`!eNy`i6Pv#aWwGOXk*D%-2S)o{G_ghz
    {dd}mW zqh}^9g*9BcPy^R0ZdlV25d;Q00)vBt;Gq}pX3d}lMB>h#we;Z~nSh5X0vHp(I!)S) zWL;)@g4Pz`6BOLDy1_(jI-le2N{K;LA|KaZSn!k?&*PUe4rCsNW^}2o3FQ5bgREhY zr1J>K;=&kaqzYwMvcyMiu$IhDJq#Dlva4v}w%Jd?Eu$w}c_iW=2(-7{fQr*}IFE(n zICk}X(O8!A36#}}rI-P5acvzOy>!`mVN$}Y2$1)??Vj^~=csM$kOeX3i>qx%cuC#> zL}1|yGa&eu!R*QRtUgK$iY^*DsnFUU(I6Vn3YeI{v76-6=8!eyTsjAZ6~$d?(q7Qr zvud~}d^+4c@h+7laf1kS;4L}_G847jl}PY}d7HK#ze~%4!oW|Cg8&`pVwZ*H=bVJ1 z^Eq9m_iW&r3J3^*h^22DkSr{|hCo@?KA=T~Cg=|7X!2c2lJklVwP2*)*@2oZ@qUQF zPTL;uX!nf$wjOwNGV3HWC~J^26;>!D;v)#$5*7>(K0S@Krs7f!q{QaJx%5kL`9hU; z4z#`x-aXBP0U)MZOA(d&HP9qj4KFTOp(Z8n1Oi9SoPg^38f}S{_#5j~7Fs4rJCA_& zWpZAE|CIPt+th@ZNI0970_U>%U3KH{eGEd!^Fd5oR?@zSKxsPLe4<9y;=kQyw-yIP za@WLmnG*|3pyaw)M>foUVlFR!)T)QHwg8;NzfxqnwkeC-P$L4=ch-9v3G7Rx`e$mj zf~<11JEtpcB?tn(g8H@Bjj`xG)GR4gz$diUtB}ID($X0%P=C z;-9MM_KtS=Z2v}RZ70jtol1CU#wJdC)A_X+|_U!Dbn&@-MI+FRSX+M*9MgG&8-FkYynA@4F@ z%ZwR53KripUuBWF6A0`(c~Hwu8mN!8MBhvATKV2N@7n^%;=;L}MdvHnp>55T?=V#~ z70w~*Bi!Wqt|C`Am?h%Qh|a#ntR|yu8Z4PA_gYgk?Ej8^v&^baVGM^A%maN+n8M5E$SHjJ_ibW-XcQs1@{G*V(Z;D&S5YzI(I-v|5$%{czqx zd4B#=T`skIC=xS@z~$<4$h!Fb*;l1ZEMkn)ZV8@3AjngKeNirbfCB;owqVet*6w7VgwLhU9+ns-Ix%i_IkCxqV)2Sd~O z7Ic&UCs@9(?j?2eQKh(oKnup-!2Y6j^Q*9UJo+Bikm~VTqyj;p{~6xr3Hor4 Date: Tue, 7 Oct 2025 16:29:40 +0200 Subject: [PATCH 059/145] Update logo. --- assets/123D_logo_v1.png | Bin 897016 -> 1219377 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/assets/123D_logo_v1.png b/assets/123D_logo_v1.png index 5173ea3a89c97a2ace0c07a23dfb01826a5317a1..9a61360da44449644fd27580d3ca12b63993cabb 100644 GIT binary patch literal 1219377 zcmeFa2fQA|l_y%dSGr19Ip-@~<(w1BNT7ryl1VlgW58exw!vc$6ZXw}!*Ac)xBF(- zo}HODo_TM*v-a9E9*=E;jSV(nFa|^hArJ~=BoQQm1j_sWSM}-dRCRwhemAPlz4e__ zr>d*FPoICEuCD6t|7**pb9;0f+RYf#W8K=-7Z_81nK2!%=~4mB)BoC;#)QnWy{K(s)#K(s)#K(s)#K(s)#K(s)#Kq)Lx zR*FVq8_@#M0?`7|0?`7|0?`7|0?`7|0?`7|0?q=llNCK7S|C~=TA)rAc;J!0ykF)5 z)Mo04p(%7Xn|;fXn|;fXn|;fXn|;fXn|;fXn_)1Aa=4! zXw2ADv_P~#v_P~#v_P~#v_Q0gv%qsJ_MIQ+0z_|!7BF#CD>^~6K(s)#K(s)#K(s)# zK(s)#K(s)#Kq)N{N3}|6&Dd77K(s)#K(s)#K(s)#K(s)#K(xRaZ-Lm!I^$iS0S4Im zFUK#8a{(G)xVT8PK(s)#K(s)#K(s)#K(xS_W`Wq@I@6AL+}~({Xn|;fXn|;fXn|;f zXn|;fXn`iRKXXXXjTVR&h!%(zh!%(zh!%(z zh!%(zsILVUzwou!+S^=!*x{K(s)#K(s)#K(xRaWP#Y>I)e^&+|_7-Xn|;fXn|;fXn|;fXn|;fXo04;Kv zgRCE6E{9j-fnY zjjX4yMn7$TE)e&q1uf7D@W;K6*LN-G15LT=*vYEJx^)C#eFI;y`gI0cT|gN-TeTeA zSlsR`(9C!h`_&b7- zrSP{G>mJ^KUC+Z+o{miy)5n_~4KRFjEpn!R!T3*L=$7hW@fh>2Cr=7tVi|QjK9cmw zL+Z1hkkpql;kFznA?71R3$z6b+<}d3E&jS3+cG5C9>G*ioc7Ht*LuoAQdjaGkGS>B z(V8>=2^+^_*-|tSE1rZT{!|l}Z4B##ddZVZU9C&y*6XKtj(qH1W2(%$)hjPbH#Z9H z+XDX)yGMU*DH?FSXu^le>;|LsuD_i(T z@b3K*Bms|fuE<3voSP~#IZ5&^=YB(-FTG&`ie0a8#?M=~irj_)&d1w_Pujm)APV9z{R( z%n>uqWC8dcUbWXMVg~V-ckQNeB{HoevhYx+^OS{HKbMHX+=8fVISU-RqsQ%M?EFDE zD;m0{O0o)n!%OzeVI*OYsGaxeEwzgC-?K&8;|}PISz5vg*4`B zlZ%yeJ-RGiZ^UhCiWV2(ucNUMuOM+qHbc`_RGG{F-7>59`|z2n^;SIp;A`dwfAL_r z+<%7Rk3(fnKJ+b}&y(|X(yT5MUF>wFmWV=ASpfLs-Hhjvc%vahRRc$u|8&h?;KE5R zoq{U4o*UH3%c8j*Z|^eS{NDc&mi`n9-Za!yY%QT)=S2=V+ZCOxBg|WUj1yv>mgP-z zyRBvSsV-~hIB(j);sUcb+}I=?tm8*NG0)%ky_&X(=>IwrxlTMbVPZK+bG-~@Sw`2f z3k5$oKRF8YF&iz=A{Kx*o{bgfcCffXM^BtIfA<9AY264xh8KFdsiqb#nljFe=-DHz z@R@#0+9l5-T-?u>80oW6ac%d`{A%O~`S3H1;$WHJ*ya1~#bcX<3(1CPn-?JG9DQ z9^^BMaN=hG`jX!1%DzL*_yy-_RmtQRe)HeWCm(&_wO84f57~QkF>+N%JRTa5#Xu;a zljA2K6OLv)kV-x3GsIv>$`nLJv_SJ(;5NNFI^^)4m0D$f_`=KPjYA)KI%e`ZQ<+Li z5!P2lrej%|`O^I9T1B}F($|rQC7rmLOrq0ykwcE^X<%d-Nq9}Nt!OLM1FyrsaNy_0 zlyWXWx*1VOEP%r?7~)mhAmts)_UinpQ>V>CKYCHCipX*WSQqY^C@bq=mRy{<)+BhY zMWS!z`KQ)}GMC9qP^Z4ia-m|flw(?cj4`P(qR6NAE6lun4al z!fm-a{e_EOuxLZ5BBs8IB-j$pYdqpb^5l{y*Rqs}UO1}Rdd!oAi>&jmLd2i7Lrebf zV4i8j507}atY@#k-jsFhRB}6TMC;jK{r_IOoxArkbI;?q7d+z1sT6PtCupPtCY)uG zST3?GBOFB@2(_MNBoCA^AUdPO-{!Xfd~Yh&dahpc*<(hUuP&OURp$7~Q|50Uf6nT3 zr^&0-fK1rdMU%&x5xu)dRbODJ6z?s0e{21+>tH0OPPm5!6Y#ycnrD5V4lp)5KIc!I?9n+*aC

    `7gxH>13w?9IjJPnxy-k||nkPM$bre*cq~5}kOWS0&AGiea&oR#HO8PUU9Nmdx}e z#t}XvBK}BDe`TUiiC*M9PF+n2#nDe`gh(xB0q;3PK7+Mo;W~5iS(_lybqn;bkKSc7 z(n%_mkYX@Ut=Pz8ZRh0D&bR7!>gTlGNBI|mhCoh(` z9+r`WyIeLOukpk(Ex>tT&m>}Ji(k4+Y3AB%@rEex#$;O77~#aRPs}s-{)5*9-*7l_ z$wgK-WkvYRr0IqN&a)3Mt9YytS& zP^`iO#PmV^&AefQY{%#3g)_Y^IEmo*H%~m9>f{oZJeR^N*q~CeW~`8=+5lyyQj&5NvgC^H`~@~jo& zPpK?`)82fvwNkgYVEk+|vi~r{mtf2s!$}eadTQr$X6L(mQibk3rOG-A;j$b>8{VHG z%%iK2m zTpC@EbxcUmfM7(d!f8sJaI`uX-<=2eVrWja>jl_>Ke)lf$| zTwH|DY~7e0+3JiUZm(dHFGCPK)`rnMYzqF0aWro+N={-uk8p0w=)4LsQ&J1S+&o8f zdR|KCT)nsavMF|+E1#QM`WgSZsbim>Fn{>j%eKCDzqA1g;(WF7WFcv%bGJ@r?)q_F zQ^-^yZA22^6iy$NM4k3o`jOP5oR=4U$Wh(%O_qBU{nRr@%rwpdw}*>!L`&$x#dLk2 ze)`xi(vc~@#LwwYl*oRKLXPF6&GWrk?^JOzJN{-2arljCyyV+b6NhU9V)t1nn_D92FOn# zoit_T<>=sjCR{N`*?31^z@>vBfh2k1Qct*mFU(URf_q9BXPM%EoT9CwlK-4K#MP! z_*%!#<>@(g&f03zv!5Sl9{Jbp_!icQ!a|h9(a7scIm;}$Xp+IpIKmb<@P-;i1qL3-xP%Ui%7kZh$F#ah&727!gaae zswR%6BmQWF)=E5}X9Dp~^w;%{avI;1l~-t0$>bLw{GmB~V4v6CYbtvm_;GC;x z%}5mx_zcxq7x~Xnee%&!^Ta)PFh6j*@B*%`dAq)(t)Bh6nHgt~)|)_?+q4u(d{DUf z9%GEi!o}Ytuep?k9L?0tPgyZiv_L~Gkeuhfh2`m&Ddmk~T^PI(@Dy%#|LpN!+GPY| z%xE}TSz-WqrcCOyEJvWX=`5 zO`{*yVziViSBa>_A}Dt!OoHQ3kE&^1k;zG{n^MGSYKo>5u0iA8$2#V%tKM1BwIh$c zM+-MiFkO4FQ(mBl|7DwfO9nOiO07_%?vl4We1G|+Q#f1{Ho&2`ntW6HC}qia_@~=C zVZviN7a;UY@mKP&jN2eOk;yX@eKuo~sHlSl62BRNs+a2OV+M^f)2sZsyDXnGc7_=< zc$BVc9(eu{v;W}xUT-Qr@7rD!?uhF@J1yx{AvPodhjECBL`;&_%vf%EcH>M9k2r0E zW88u>o-+h*qLaMFm&k&t+BjD#bwnm7Q6H&=EWiWV-ic2J^PRjYb?V&JOgo!XfJ#p< zJ@g}U_(Ok;ns373oJBjVm4MdGDLjSwg%@GohaZ%&RSc2>MggK+%nA7Ncpg{n2nad(MR447Qi_$6HB1` zc-@z>W1rj{K1j7_^8|0p4(>lYK`oA4O~kw8^=OusJ9q1CM$gWSJ9Fa63M2uea0!fgz!MM(7cfd)$>(wI zGxb7R7G31g0;RTqKc^QDF@AQ2Kj$i3Vfe5X1KeLf{)`KMDlp>3GcNd{+y=pb+j3Rk zzk8X);A9yF)Gc`De9p)g@2HMK5?l%o@tPMv1((9}a2e{bDi5cbd<>p6Ns|#{NtJGW z+KN%HbgiuLF8XvizbahOh3-(GhyMA+w38(rB=THG&2e6DnL@L+&E=O(Rhm@14pQiw z^gGej=AZP#=%=YUqtM(INX~0~F!E}>?qU4~n}y@&Xq6`~o;1%4={v~N-S_N+=HQV- zUd#wPQC>W7HI3H^OLSppYb`oiU9+#U{t`BT=Lj7av|JY)))j-YudJ^r%BuXJx2_gdyjv|UmHP&)EA^$wPI>TIU3zFMnFCcpIXznG8Sf6Hrc!s7Gs zP*xA%5)Wz>NmG_xYB~fDsGt#oGz6l6RJblDpd=`2PQ4`RMTThWlT%01{vbM6g5P*C zmzp99jkExK>1;GHTQ|dZs}JnXSr*0VtdT>_!r_Cx3J>mi%WQpXUwXY7O#VnhV>X1k zddYQe()nV;tZBJpHph{zoWTo@a&T%$9v-!hxFS|HF11XoNnY?-jH9Y0jZZC0dE_Ou z0Q{B5hj-Kq)WICxc;me6oTWky)2(M`ydrR{(irpJ8;8t`zkA(wkgRjj_p-B-|M4p`pR7Zjn&d9nFk$zMxellea(eWAo8_{wUVC z1)EF-9*VScfYpmI^wdEs<4($?rJ47>bKAJFJVpc`ITHh+1W4+&EOnty>&tQ)czH6H z5iknZI?8c0%UU7+G{yqJC*RMzR=2Tg%m_2CFJFaQJIxt7&@91-mZnGFc*i{cCZCIR z7prp7nZ!fJK9yYHw4sr~QpyUUQtRIS-}b#|jG7C;`g|o}k&&%yu@O30imY)ecor@g z)jSWQTBmnk;3|s6! za~&yh=L}aj-{Gs;=L^eFKYso&GpH(qs|!%8Ka%!IEsI}LCq5}U$qT2iQP#exWfY|p z`em3;D@Gw&Ahdw$l;40d@7a$n3)Y&BxW226+$#dtc-0Pndc@rK>_Zku4oj}M5JOzN z(0SHK^NGx$VgFP->5Z4J<#Ysju2_ylOljN+-UMqQ7tCp{v6d%e6_%t-^5h==lznxb zD3{OzJaFxm_!htEdgmtKnW-NAhG|tvWILjD&;RSS#VLxMHfTq(FyiIj@s5!#$V`$a zXPv%7#+ren`P{NcXCp^&YG4Z3w2m^t6LRWUU%)1Ls63sNQ!hkaE=6GGnk6dhV1dMU zI0eh|uWRSzK4Vg}#c!N7$8P{Nzx~+LjCpQk3VFE`na6pFnvP%v7pyL9CfB$Op7IBI z)5I}&W-BwY#S2(Y58+))9Y8~jGB5Zo#s#B-$zmK$S@P_dDq(1mBX67qU`kHC^M+DK z<3=&*1LfG^U!?vf}conJG>W{s_-(C7mjM)jqv|{YQGoI^cr`v)1y4bLr{j-~XFA^xm6Z z!(-=daL;9-10|za>`ckq2CdAIE&@C0Xz>k%0rB{J4oQH=fGCMNmTOK~@_E3dP6DEq zp|&Jtk{2EUDwc@6u@>Od$(QLyrVZ+E<`46qxzgo%GM8C7n*EvrJ^jvp^T-=-BN!&r zKv2YM*f1du+Bz<{G9(r{Tynw6HryYBC@O%MF9UpT$i8Hbn7jsw*wgqFJl4eXa4I+? z*ErRBkx?b>Z&DYj^cL`DN1*vr=YM_S==!W?z{p-^@>%>UK!J8YyVtzFZNI%pqc*(2 zJB1Dvlh%9nk`5RBlXa|zI%a09tTMeZf))^Q$0=C!(VBdcW%N6#BR)$161g_N6#o>C zMWdh62xkO+ty}L$j8Ss|8n@T#y6$SUm-b*{Cq1HU4ny{!bC|DG1Y8-}G4D*6P;oL&X zB;hUtLt7#KG>HXxVA>;{gPmHWcWm7Jji%S2Dy=Gs%+6Byj22(pK7Qe5V2+(BfHvt+ z>5(kyWVvaCHe#AVV`iE@L#o5=+6V=&MtQ6>=tS0ff+Ykbb*vL6>ca$8suP)<#JVY| z6JV-~LbO0l7D#;N7S!>cZ^?OudQHuwWnWt~%XdnYeESd2x(*fso^-r4XRu83sBb%C z430KHO2?~k@ydW|bNm4@w{b%5$kt{Qvj<50g`KW2sD@Z%J`bmP_(WBYJY4FMh*52g zhRLwuaxA$-FFkyGzn2D%?3MY@ny!;4ue^fSSUx@crCfYh%DAqOqMwEqiTS!% zfWG%NU6CE8wR2W!Rh{JZ=wyX&2fXWvUz_8nFw00{Kn^DkU3@_7z#nC+!;jb5;#P2Z z{-Mg2h3jxq&$#^;P)qQnxyB%I#WKN{aKRV3=*YD`vqF@Uis@Zx{B1G|@Bp<(y4~9; zbkt|9yUtUUD)aKAKQjmS?(v!%GiR;o+`X4MEmH*fwn2_)h0klHdN+NL;N|qjfGUu` z94QZk0#X4S>rmDJ4FOnN9*bKNPzo1)u{JQhCHj8zdP?QS5?dssq4)h$16fpGP&pWF8ls zN}1$^huEY()P;Feu|z#dxXZ=g##+FSC1`E)R~I&D{`qjc5wLoeKYF|Uu{X`$-3N>N zsrFy%kMPN)BPJa#?qGW1N9RracHV|@_N|B1DEcg9-sBJ;6t3e7brMU&H}iPti#6qC z6U0A-i$3ybfjkTFA@*^);N01m28cH~*Oxfm@LWtG)U^N5`{uEipGGV=`0XdGnudtO@ZZI1IVQ9m^zN6V~#|WVZ!(!mU2ukQ4JX z0Y7v{kK1|F+6^V%vKTwGOz+efe6XtTFh1*4TCzh~_xzpLs2j@KgviI<6Nj?gkXAU9 zRT%1WjT9|VXA2}aWsl2yAJiA{>cR5tcMj^@?$;O1wr{J@3;gwC&zcj6ki6q>qowq$ zEO*0Ka>c5nrS5&X<=AR|&uhbk(Pm8c!7UC$>)|cIYRCnbf?LUloOr8i;>KnuM$?Q* z>a2k6W})&Ly}+PRy|#~-)GzbO30HQue_uUfty;cq8kvCuaSU z_59&3w;Z?NwCU8jqhZf0pn=GrgH+p9AAM2$R{XLie=X*xdi6!)NAy!|rYI|axJ_Hb zzTpCM2OCP8@g|K5Og++c=M(qZ=|{Xi%f<_LSI%C$p`6%boiuJFLtTPLwku`SU9f0_ zUX{qa^Fb_y7}WVZ{0R>8cy0KTa@mH+o5=z^SnZAeJlvt#>-d_@($kK|?lcGX?)Dn4 znzb6kTD=kc2uKcRxi?_Q975r?17;&8>m?mC=0W2YZt~hhhC>49BT3NH02RR16ab|T zfyR0QIL)E9R7@x3DOV@LR2PM4fg%>5-`%9kdUdWa7f%_hRrQv~;d*z&_|e{SuYPdA z{1(Goc>(hzv6ci%$6co@4UF~zmY1k5ShAz$%BbTX^8gWs%Ws%B-CHj*43bwSocqh9 z1E=VO3ogm2lf1?tW$LvK*<4~Bl5m%czYVp(pN6*F^3??c8rm3_8#`x+89CW+;)RD^ zM;!UM;Aht7r1RwbjXsz9sO^BEG3wlp^+}eKY(0b$@l2%$0_D(P>UOQnWy50rpUMqa!80x4nPQzG=R`LT^ktS~=}3Z?U&O*lV8N zjc+d?HpsP@+$^^@%x%ml#0}Ts@;h58HWda5%CSR6n~B55dh0`GB~ld{Ndu1oh;qy;w37BrF6n1pK=JisC9W`){n2s z8_imWW`-jXgEa+EVvtF&D)~GvIINAMxhy{8%@CTUegvEG-F;j1Qhf$jzB+p5U~eQ= zmk09YH+}zvG2vW*pwhBSyl-^jwX~M_98Z=5=(D+IIpdVd54`Uy z@N2|d7VuQtI68mfdc+R8bb;>P`s+drAhuNE3<$;tr^|M{P~ysj6MG;og95WTyUoN_ z-t!?62Lz8AgHk4(IMTA@1#{d4kweb13@N*3%A%s-7T`f@PxNQl89G!-v)A+Wnx&^# zpSZ^ycz3tgaMg@erb{xSB^@f)@k$=pO75CahX{#(*C7+|IvrkhR$gwZ=VneO?18OC zk|4B4bsPMo10`GnsLLZ5A~;GJJ7~5XjZw#NDS5XX^7fAa;>cF!W)3ka zQ$vVH!Kh%9oY<3mh)=<0eej9qENPr-S;`}?y9NFjRe20j+I-6bzEz^pG-b(1GjNpu zy?|%#+igDn(0x(WPHSa{DM{fpSJ%1XgP8eeY;|9SA;_uASQ_Sc=BW+&& zaQ-7-$txJs_|upR@u;yD;xEKqP1uq$;cC;OSV{}yct4s)s0}Q(WuFD zZAz7Wy1X%A!7x7C)M)B}*QzYLgyE_{Cyt*qPu%?~!a?C8O?R$b#|vdiC(8P#?Od@w z+QE-4qhBU<*gEU=9#o0<%=589k9c>0kL-(Y3fDd=ekol0BW3!TaP6B~MwN`8vOH3> zK*j>3lYyIfD!_9g$|bMWKl;z zI=#JoOTp>%ow{^4qi1g`g;%D+kjguk`LZ*69l} zS3!)rRk@L^+-J5pY9N792UTK@dYwXiN?vd&d2-Qd4s}Y@3u#$&k=Jg4KSC8Avy{%e zX@192es*TonlYx&pzdk=6rTL$PV>pf_@XKt@>DWSkUW*CPm-NRC@}*%`lG$TV`qzh zL8pryyqxXl)L!vT@iFmBofkQe)5k=weKM4}iy?`B3g<;|Du^n2 znd-q+B{RT?{zJ{&u`|5(Uc#u>&Ug2u7)!C1;LJU@MXWIZ4|VCbsaAUS}2HC zMoRnKS6R6i>$%=bk*E}42^Sy=FsTQ1 z(yB2d%(%V{esNpcPAJ?oe}>_dL{0zg=@&7w1snoXUgE_nbasR*364-XJfZ=q3zgL| zFEZgtfVw|i4UDZad@)BrtC8o(md2pQreH9S6I)uJhey$IwIY)XY1!)H1+18+IERfm zH|Z)}qibd7x2LZdHAL$gEAxX_OD^`G(>nUeaq~2$5T>J9>^#{H6nr(|HV+y9PM371 zL|?3RSS8ZHy72;3y+&7Umr#?~>TYv|pJ<#pg2|8y=bI<#7 zDwk5&`P9AU{Wo9snjA58ndyo*;S@So>@Xn#jQDYSUYS8I{Iu1dN)C%;QR#fYJcf04t`~b+EWcEQ=H^P$vtxi~7sH{pG(hCYcNH4J_!L zbG|s6vzJOwLwa;Kmrf;A3iR&BN6bC1?6K1ZId#xYA7oH;ohlm?A=?*b0Dsg|C*^5J z3r%V{fi6*Nr=wgyC-<4H%dnW>Q#frQ7qir9eTYxyi9g{Qr=g5$DX~mQ%c6_CxCQuH zX>b1ay6b0m;MBiT)BKI&O!wZr<|@z=_v|poKjm9%3o#`J> z{Fy$zH0a=At=y>#k+k0xlS4n{vbrwwq7ymfsFESpAqkHoTh1P5+;4J2Xbg7TyLmo- zXtkL#lK1UOOKf*)(C%})x0$!!e>dq=xy}_kSnefCt`o*M;5uOLdd|fQVvXehoI{@d z#gDDaEnhlik?B*JnR31vMI7GNu|Q)@^qLbZ8hexlYg&dn)u;HPQc*@9Z_Fv{ltAPG zX|t3#!$|A^r|G2Bf5aqoRFru7#L}- zNTCJzz%M%*DNP&H-z*rOxx1d0H&(cD{&dr|Bl|D~`u_9V&CwHuku4d?vYjsAk6Z#J zgJ~h}I##xGMeL$UCh2V1U~1ue+N>K_Wybf*yft(+s`DWk!KVaDjZ4Z>9_lqN^Dv7= zEk=d2eu~LJW}S#rYlZwD#1pqIIQ``K_zP=ld6rM-Cn}eC@kT z85GVAm5gl3R6^^soLWd9R2?lksf{!2l-Zc$_?Vf4%+PU}S7fii{`5y8XqdDQiob?j z>WIJP<)utGn$E-%?W-)06fMA;a`eNL@${3 z*NKwREEh8h*Lkj*g>dc+>;AwMc?7!4kQ~V;Ca@SLCvc=$IE- zI8+%O%Y?Kny2wjl0UnIDP8?>g&^s|_BSy5EV4C;tr+;nUe`C8|6+Sk67Dk^2BpoJu zt1$^|1g3PjY}7*_bbuu42txdzEthQG%2Xvfj%J<2i(JwX zEcz-tQS{s7xh?vimFb7ppW%tDljIX8b?%w?FjunV!b#q`km1VtI(zXq?W@InQtAp9 zKNLS@UdvKm%nwB`{EWXDaOsVYJZwz6eJ=pqqc|lu&v}FHN?mJszw9$ry1cRS%+XWr zx36?NPwjlpym#mWJF>+QEtxvVc;KcGs!=ZDFuBIF_#_t7o9$?oI86Syj@8qbn#!&m z#PaBTcN)=h11Clz0N~k$+zA zK%XJiX7udL#8_RW!Sa*As|PWu#EhMXVJ(X+-Q#k-Ezzgk!p=oT35V|L&0V>1!}c9AoG4lgLQUq*12PCo;L9P7o$}6nSNN<*Y-}Wlq73jT9{a3&0gB!MtGt zzPV!U*ilVxpBvEo>0aSq$^PhtSIj5JPo%vp8P<~M=zU}h5R{%5aY~u6w9vGXl(91z z;!59F+mNq8M$-=SW|lB+%!iOM{m6d$a~Kcx!4>l&ha5$@Wz>^cKT?S;zys2jiDS*)t@Ak>ZUiVx zzgv5rx!>&H^So!PVUre_ZoLNrK&pdfZ*AbRj5^2=yac$pJX&IrCIT^!`VF68`j7OV zphmqVNPHC`K}c|DP6IHn0V+DyrL0p3M#-a^2TGl%94T6$m<6sy0fR?MeBEI9rQx)& zghHRLUCfpEIEM~R4z`)TiJaxU4Yp{@gm6#z z(FM+7(*DLBr_a>ppRB_q%Sv8! z@Mpo8=(rYE3W%?+5y~0a;&J4-Gr0suk;%C*d9ftr!lfSbVS>sM^_s$bykoAdbB4;m zDg(z&diQ&Et}vgO;@`d3<)xN6uaE20^y6(i%*V%$B^@W}K*@Zzo%Ib*!$-hb;Ov+KFNX(x*vD?6Vs8O@Tp zh4j7rPM7m%TSgzX_x_x(&c=J@@v$&(eaM`L#H&Dk!KeLAd{+D~EYrRyWm+dP*Q;g8 zlap8$>CCeLu9uAW%81Pi*5U>o-LYwj*IAnvtn*fK;^YbQ@V3Vt{*r6GbRMBx>j77; zBgS)o(lJ9_>7+4A$jv*5Y5b_fcYIb%JIh-iGMu5c2uW~8uCMDvPo2o*f+fw1=>=Q5 zuE=Q0lGKUUa$=E2avqGfN*re9^rVz#um6(aTPjLTuRZsG*|+C8ui2p!7vQx~_&|d0 zY%!AAP8X`$=*OT|7};{yV)oZ-E0eJ|^AcZA&s3u#$&k)OF1;3&=*z5AcVC{wpiowTY+<(}wZeSX#yZykq@ z9y51gWJ{(Eo&;9iNEJI)lnGlKE>pdfv&fISX6^=Dqo1(UyrHRCMLNg65$Wmrpg}gJFDdOFBue zgC%#^;d|_K(Feh;?~z+sb8I`qjZJiJCzp3B!*s>*;g0cR1uK!nx9DFILxgKx=$kq( zKBzg@S1ix6T#7dXa?R4-lpIy{Z_0J+Y0kz2P0dBir!F!BdU=D|=Fyk8nj=S#0B5e_ zWHAV^N#36SnNQ+~9?Wt2&RN#U*h zPUWGmZNv*@oAbQ@@LiryZ?o1)e#*)^969^S>7%r+spY;ydYGxp{8jGVeZai6;{!L9 z(E2Fv=fNlC?HS4R!m!gtpQO&lB>FFx!45!lF$$c|hvjrAH=`QkiX`z*`j5!MwT^O_ zSTEE;<@qGbg`_U?S}*01w?hjgvEv%-8tz^fXw&?2O+`n4Rzjh)^b$wUHqZA*wvHY@ zYX0!@llI)tXL%6;AZJIfY>WW7Y#ri_Km&AxyYEzVe@)m3~@4}@tYwfkBtYhi#n!o5k)L@MSczTsqQ{nr)rPMJY;LN=OEInu zUVC`>_;EY3iu1NG~WfDtb64Z)6fZ)oa0a3yVG^7 z;G?dCMIVHRvEu}R-0E%K`5(iP^;47B_zj)_Q%bq$BsHYC8Jk%RExL+(4^Dl&i~X&a-A`!cQVWPXa6CeH}9PE2D{ov#t6ZL z#-89xIO~X<$E6M7tRu2;u2##ECr>HjFEvGj1RB1r5jzy#Fmu#4i{ zHfRSUT6`Xik<6h5%t8mOFip?}v?Q;($t%?0J zAJ|%kqWV6kCHM@vU{f&5)ryWhk3=t==bco}TcADbu=F#q*!`^~=B5895DyxX0fDxGXci#`Z_q0g(|C~&lA`7RkSYjP1)_;ahPwxcq4|q|YETZ!8C{ zJYuk%gd~_Dm+Lp;MertE>pJ{d_jCWkm~<`x%OpvAlGhy7(t4p@%E&F9$uUE0&`{2U z&}Jw(&|k&|_tzUe^PFq&2B*vezPd_-SN(5+KP6MpoHuY14q z|D7~E!Vj!bh;!@I2SrzE_g}@%XYf3H$=Kk#61KiYKn|(@s3G|P2wih5wFw> zZUtw;rCjowW3d#gp`K-toCSCX*}EGsedVZkdi3)<=*?-Yl~+v7tnQincDv3K94j5c zV#kTe`W!f?5sIw!kZnAYGPEuoFz)BH<)gy&RYSpzpx=m(i4ST{f32yGF4I0pnWVXt zyOQ|Z?k$j9%kj8-H$2$s**KT?k6VV;;~A>3v-ikLj~l)q!*;IN@k%;hK%B*2+Uc^L zFNMPdb2d2O$KjK3Bqkl-vT!|sAW%s6nKwPC)OZW?VY~>-HFjB6ETt<=1gXa?<&=2L zNU0(Ub+P~tL|Y>Eq&RIbrOp`YowDMxG+#<#>z#icG>1;kf{oBIxbY6S3t%pYxe+bu ztlrgf%M(-qiLW~D{MO3S(V{Oa*#WVG>Dal8nXveRpf&qa4Bjyp;IY9Fpm{o}!}g?{ zyrv{|r4IACg#BTiNN0`(7%;taN;q||B~0#oHuV$ zId6e>tox?kH(24s+Uw?w)Vk)DM^7JQMosk|RWZBq_Q4PL9k!h+j%>LREkvQDqh&i@ zyr4r~%hCx;`dRQv#w+X*>)M2Gg!XGNqkcBHF0m{ zNgLF08B1rdkBSM}Bj@vg-c63x)0de(l`VKv4$~Ib&RXT!`p}V&%yWCT+0K>R)R2xA zJ6NLA+{O)Htk3~-n6uZ8_6iYs{EZqg+)S(DAeKjSkQ_%f3sjX)h$b%JCr}Zw>5pVFkc!^I9J@m<7qXU9 zXY~xMvcIRzjq_$mMmVv@kuAX>u_JPAoON|#eaUN?I+B!W?n=(^U-&CWUwL7Z(u4)W z-s&@`yVx$(H@CpI5o%(`>G|L8Nj_K%|3aQKD)66nx|~kRoDW)Gl{$&z+Inc*`Y8Og z|L`7|y2#Hndc?U+W01tB$QeUu8_J^BIw{k>rggH6l%pBjo^a8VM{36wNUrO+hu_e< zS%z7?d=W-V(|PmH!AFMuXEh&w=`q{kk`9!O2{O6`FzL>glX*j);7j%fT;hm@qTmsH z)4a?+69c^IA($bTYdDrMpQqCp(z3Lpb+yS@MFyYLdBkH`3i29lZ<#}P^thczpsi5m z>B)+&m1gRy&o;^S=M8J|L9GI!y{oc>v)`5IoK&dC zWso{|?~RAX)`zWgrvF^mu&pR)YzXIyYC?rgxk zF=tng%zJQa399jxgo3l0Fe-I~>$=p{mQW=_T!ti^=blt{MeY9g_FxZBt-pRQk4cNu z#D&An&~bge?bxyP9dr2Lr-hysuiR8`E1g$Jy)A?Pu}8)Xz0~&3!0DY<<;qyR2 zb*`Hq*b043^7IwT#kkayI;@wMpicFaHL*TP_?hUV&I0Z8cNKPvACO9E>6C?La36nI zH!W;Np_hHn{4#Hi?|<~6dFjm^>76+}q{UYbN)N2Si7&Zh=gKYm1dkSpR%0fO7;nZ8 zt@hT348OR=5rz;;I$u|r7-HQ#42llLmLyM35-zgN+X|)lVf?9yO0kZXYJ-Mv^+e-i zBR`KcN^k7cRiDEHOa7~3ba_MNcVB(dy#4ay-r|F+XPRCEMnXsMBpoXPCP{>gmA$13 z8P|0}KDm}5bGyY3mFi&eL2VR@t^?lj&SkByTgI{uX6lN|^FAKN+vL3M!Nl4E!lZ#T zFM2utQeGF1W1ZW>D4i+#DTyo%S60EijaIG<6-G|(Z$?b&Z|kd{ z#gI7`Eq+7WgmFc5&z*U5_k7T?6PV!)|@mm^L*RpaFZ?9u`sOn{Vm;;q}h> zczCI`5Vs}#_!Wn@cpWJ5z{Uh%(Q;raoyQBAI`_PZ+?W@g#1j`Y+$E%YC1J4d#1o_5LtrI-5Oea}hxGoE2Eb1+*%UMR!x;)v{D&a5vCZrNJ-=s~^=xu@MNVn+? zU5GaVjM+DAVnQ} z1}FqkTW)upI%8Ii8EK{t^vuIj-nAT#L?aPnAuj8q(-@WZv<}T@h~*^VE_Y`9h9g>L z{g>wW@BeFIJ4|0$W%>>CeRcaE-ZGzjd@S4Pf|E%{%U#^5JUdq`*L7^X!Vc0otsm!( zY|)Q4Akja>H}hoCg_iCS%)MXF;E3lR~CQUo&`8R^=!S_bH~lF4=-t5 zOUnIw_BBfj7)69tenR&b` zQIU5-=>Ph9j`=)Xi7w<)U$~SrpO>Ic^;!<=vMi6ndiBlcwVBbil};Cp-Kr@$sq(cK z5x={>LtDgq=sq9t_+oeW7#NIFWcvjh+3 zXcaqE^ksIS>;-Ak0kg|Ey(?CuUUoTGZ~B_g+hv1wa&oUYwSb8L)gTqU=CX|D$p)tF z=`zZBl+nk)m}jQ;p~JE5h&{PU?@h1H73MQjJhq&j`Tx#!F#s1L&&e1zRd_uoBk!bT)L-=yCS&BDSXgYE1q(I+kAF2IW;7_niJkBXyu#o-v)*AfXD@z1XSeW2cb^NL@&#R98K6Z^Yl_i9;tm>fSrbFy^B}O zm}t6nVytL=v}IDY85_KN{*gD{HhT{jMz$CX?K|KRREcaG9I-1dpt`)hh6U(d;Itvw z)|G&o#851S7LZvvW<=h|7N=@)Zn9ubFe>@F@(_c0d8{TwtVa^=a_#%~&%*uP_~p5S zTH`LyT|d@@@11|?_ivbEpPq2>NIF{J@_ND~L={H4Y#ny2=!ZzmGvSZ_bWSTnTzo!1 z)!6TayPh%ph?b^-RntxHK~+!@7{Q2^u9(P~qsrr)BySW8 zI_m@Jb6W11tZ;OTb@*rTAj?q0Ho>RkHH-nVU^5brEoA{sUASx1Ehn0+jBTOc{6x9Gj7C@(ix&d7bhURP@|xg59&N4D6Z`nmnUmUN)pWhc~W$BJJ! zmQEJqV;W3th_u(LiH@>c-UV9ORg7%SD>||z7!!;VKMbhkT9;QZtiybmpvvf2CzskQ zR zpYuy8vz;sWE-&x+<2qa9E{~6i;d$h4scjO0oh&NFr^E-D7g>B%b6sEbqRZo4oz_d9 zoW!z7ZQlaCVNQQYiK9oYl`cMO6CBTXy~kdDQpEznns9c$c>ZTBNo1Q>GV%mqict$j zI(P@OZqC`EDY`rqeAveU%h_GEz-*qH@-hqg?tHUqn7 zF4T0D7MC}SuQGqPwXb<;{|D)|Joe_hW-VUe!54X)g4=cgzUjPpX9ghH!GQxhf`lC` z!u8Z?)+2CS-P2IIvjRBJ4>nH_Kt|nF_=5Aaf&I<%``$~}3xT6qKSA1sMBHc`GH_Ct zhhJTfb-1~L=MJXJQ;$DtjJy|sWjY~i+a=e;I?@(-dnP`kMSG>R@y5BCw?d?atw30~ zd4hT12QS!Xv}AK$eE4-t`x|OHphITL;Aiv-dYnCy^gCpsI}JyrNIm`16}$TOKx6R^ zZXxGzc!A7n$bJ}UkwoU4#J!l~1VdahpUM26aykyB?k zGmu%vxC*%1J+=Ax)4DCd zbKvknn@@f&0Ott92_#k|-k2y$K9swoj3jz;Qn9?0*-A-0k%dbelCLl4xt2b#9qCG> zkB~y*I_N8TlvgI(B+E$NTR!nW9*f*^uX2eqt+vCJ7=^bx=IDJMzi^Z3*`LR$w6y=V z7t9;m9`+g?FlwslGo%^^hIb_3(>y+syqZDAo=>17Pm&99E}wRu;7j)0gF57?Odn&O z9WU!|vK+YNg`?iIvp;X{|A(*ZjbISmi1go(B-lxC(|N5Enc&S?QL00+C8@&<9hZ4h zQ77gxZ3!oLn-zacX#pNfw0Em+lZtEbB8=8fW!Uta@yyl@cl{>aR0hF2@yyosbEd(O z859c+UFcvzhh>>BLtwNWE!M-1v&+R(gX?^;1^l6a=X932Ivm-WX1;v)Z`1WcpxGEi z{~sh`Pvej>%gIGguIpi~SVbYReqLf7>O-PFLp;9i39Hzhk=V&g^^W!))O|lbCY711 zqwBP^+__sP!;aQ(zyEx?DMt<+HM^eOYsStV0;hnF@uzpX#qr-F7R-1=@_@epr@G;kz`{gU&??s|g{EutZK1n}f+~HcJj+C=ZCoU@q zVk)x^^(4w9=JRA*7=PN71$dECp||*=#pmGw%OTe#tw+9a(MGS6#Vvb$$J6#hSf@Mi z+K>)fmf#IIwAYI+25@AHH!>{#Y+XP?JeoH_urOl=@ydF9)8VBYaL}vEfLpv5^T-%ExmCa%F4~Gbzjn|lF9Z>gG7rW z{p~EE!5&rnYh@zK>0}Y_RDnR}{!mKUj#N=(E#x_ZWp&_;Zp_=1)U`;YoYduVY{#~h zaNCwKqw!hu-h;=;W(YqULQfu;EQeGWb5`Wu^1vt-s zo!*%h7_}MKkI&DyemVyuSkv)UWlc}Mz0bUU@T2sbTI@{mj6m)ZAuqBEj>z&fV9RYF zMSU*C$aDB{Rg6;d81dEuI;r%rIDc`?1z!D1O=8A-UywXfel#_o7i z1UKF=w?}dcZPj~v7QS`WsWaW6KrjC8b^G=+>yMl&m`)d@pCx(G3#U_pymITnR%yMq zqDx0JZ^KwyPyO_ROdk~=qK}DRQYSt~u4Snoa;YPFES9!S9Z8otP4LSLra%?5tm9YP{7tQN0Jm@vof8=D-cj!3i z2ol_=vw?|VNTQZkTV-(U_{YwXCCOV|nK^mO`Mp^1u znR!We9bBf|Oh5ZdyE^sL8S=SFWi~3#E3)QnRPwR{$&-t&7)KSa4)eLxoX(xQ0a4j# z3na(zHZ(-vNU144R(#;oqvr6jcj=}IQ@0l&#*>VBU21+dD0%j0U zp=TQsOl@#Ybndj_+PES6%oaE2LL|YOV2}Dx7nVt0crlDpPs%HICBbid!7BD+Fm~-K zy?Z_SbbWuy(ve-Xt`st#rQ(|bOG%ZzyO_D_{ENIp?;XJ`jQ9MGmhD)<2g#FAB^P)u z4?VX{%D9nMre8Xl!h%iXO{a>?MIN93l}?uUWayvF>pFGuN%2Q+0cDbKmn-;eUYXR1 zymedPW^5j(p{B&Q4wl*LomACoh4X_qBobFo?0CkW@8z5?F~MPuqg})yr--NbP$Q|+)^B{qbtrE!Zko2_^KjX83V&uFE@ zp@%z>jv&dr4(=#DS@M*bL@zSSlMV5>l(US>NF9haPfkjF9zV-esP7cfW1$GgHQ!|8tFZ7= zzP$hnMoEAJAR}ia1p?h9eAKmb2r&nQoTm@Y17SJmbGf)F^G3i?-vS20iqXUK9^6`l zDpg3kkXT*tNtsu#UuXLWTS zGfY*D68MGTfBLfr%=SOLiTUl_%vE=+G{eUCYgC(h;T4xnHILu5!+yzF^v^%|niod#mrbW+HC#htFkKc zkx0BX@gx#`gg!>U6o0IZ+f77$;cO#JP+1-8uuO>gNNvyp$<=IkZ2FCQ^LuyiWmZh( zXld!_XOI2T&bzC?tEsO$_j7oa-@H0EZhJX3)SfqPrv2d8Yy00!YrXLLcC&cWJkzsl z58#TuDL6NAgg+ZApuj9_hegOv=i-C~A6n{gy4>NCb@3+;aoxK4M)Mzj@@>QQb?MZ_ z^zM#XN(vFI+uDgeM%|O%@y;%DFI1i34dB^Cu<@0ai!kR(KF6A{f1A}s*=d>z44OS^W1N}wzwV+Wo3r3YC9GP zX4d(^ElD2QBZuBkJ6J6H@SUAz&$GWVV`iUYJ5CIg;ZT;FPsmtCVw*^|ZN<4p==je2 zX{$bK9{V>w>yZ*~AFV{X2T3lxBp6DboJs3Nrt^g<*$~+vTTUJ8kqa@OOU>cfSn&cg z9~iD-EFr^l9#zP4z# zx$-A`7R3=Am>=8Xs|rscSkl8?M~fVwfv(UALy%-SDv^_D9H*n~g0EcU+K`GuR=sil zbnk(&WYLSj|07a}MPijr3vStj#xL_>vYV1Dr)Ab-J|yZh)J(X|thZDOY(M8Pd3Wl% z^{6;7Yjt%=C%r!OqZe&wi`&g%r+@zUkC?Cie|Uvio~0YGbG-(3M<=MtJoT$x>Czv* z`>}av=Z9vW$wd>f zuASj99f!C0$_)AnllZA{>cmI2f3jTrU@@7?OBqDF4ss!uNgZ;=!6v_8l9fl^U<>d8 z5ZoCk(3S=3Os9_C)ZRj21EgJhUo+qP&5z(5r_CqFJ~iLD`yUPG{q^YTji%X_-5I$4 zD>;ABdh?%u{`cvECr_QU!&@t-;YQ$T+%QLpjTJn{6LWS*%VDwb;1;5V^8^m!%k7jSY!ONE_v-RiS zp{IMz@y#1NmIVOVoUKOyu)bcx^Y6s*?QT#`MznT4Rk)ComF;=vJ{aqLX*81HQ!q)+5d_f_OCn1h z>U1immpZ7PA(oMZyIkA;y&0R|L+`?xt7le}m6vW8`GKp4fBceOh1t93fZ6_N;SRW0 zh>Z0S`bj;~^2_kXmh`0WTI6d8?T8h7{%KqAH#BV9=4e)!Y&}&6`NUz}GRX8ERB0wI z9v*J?WGKcXi4T#7eyH=}gIXuc>N3%hlQa*@T#fkK>Ma0Y<+0pG?tS{VlC@t2YR*nNndNe=oaT(3Va5-u4!4pwZe}3q zb3}M5^N~DsM?d-4y!qlI!2-s7@b;_b;NIPK znR<5IZ@~zWZLLv7f%Q)4cmJA8^bk#+mviw)FMI zvoj6e`N}Q?LIy+Kp>mxq1~wZcv6KW$>2wL#0o4Ulo<^`FHxSG7@5b66)(Wv1=4tdi zo#;a>N}d}bvgXuhsI6iZ+s!FUU)DQR(Y518^EQm*je?TVV|Tt{4j=e5)7}IBQ_qMg zgpr(UrOdK^rrcy z`o;=R{OrHLc{!`0Kob_x2?PLicAK+aZXFknt=-q#(4>=P&-s*-5v|=%-itZ*#|kS^ z(#ya9H+!ej)lK}xGS`_BJ=>H7f&Y#u(_B;_&A=Cp%R6;eYp^2k955hCP~_6unTzE^ zeN9fz6|p{3NTL%SmdAYCumJtXyF=NlbA`EN>bR0_{db@AFI1)RV<%4chG0oS}ovSs2~Y#1C)1{N4vV6C^=WI$pwEr^|J)ptJQ{3wC?(rv`zVV^K>ppS(r1{;Cw&gDW+6(*4p6B=33#1yZhjN!K zrd)AFc8cz{N8T`}Pr}bQdd2ue-?Kc)>rpN0CC`e9-zH+Zt*B*&jO}YiPv>Kq9`OMf z4tGf`;5l7liO4l^men<(YNjrAky^P0l507}i`(?3cJ5SRHlUMLO8WWZxW2+PzK}lr z_<(tM+hd_RKkuL*C+&QWC09*fYWh~Ek z>N$?()pp1-PRX{p0#@+J>Z!^W`4wA1Zq}G-X3B^O;g+n2VlI+gpUU|pztg(Ijk)*VP?dJsb1SY5&sFhZs@X{nW3$M*9pIYo7Cp;0I<^y(#D-%>kC@k@ z*rssSx3b@K*Pi{%_=W6KdBi(O92OL(r8BcZ$!jMSos=glw)N{GquESd>N1p^h%@@A zXnht)j@3ojvTD6`mrtK)x^?30kkZgQA00No*s-T*V?TKQWwZD2k&?%g+s>L}omQ{- z=k_&(>{Ph`XFEzbCyXy*`zlF#FU z)v&B4ymF;^<YzTCp|jfLHi6G+ z1w^0Oh$OxvKF2&|&2^j+z0?;jdUBnRwR!T8SdS!}u`rcgQT%NM7Pt`Gz^SGwt(m#P z^z7!%ZUy{M|Y=$fdpcS5}kfT7|PGud<#(C#>g`XZfYM$Nm zBF_E6Xcln<;`&`IaGfu!la5$|IjNJ>cFWj}lTBnBwmGY{IAzO!xy~b-mWix+C^O%XbIIE`b=qZ29o`GTjcXN@oG@hw zG2N!Qo4lLoslKto6L)?G&WebnXguEHXC0m(uz$||S;abU5$BdsC2}5dc8bywt*3rv zP8`opF<~{*X*|XJ@}q8K%ld|O9W2lI!FGbgWmsg1Wn3c_R(9%H>#oK6>9-zn=eVRe zq&PP7I!PU=msiF*c?oK(Cl87BNy1$&{zC7Ay`jowpP76D z(+Eqp7kuj=zxG=bZGZF)``lK39p@e5C60$RvvpQnJ~galUgF3Wsyl?1jQP&RxhhL|Qw0Rpvz-eDC}5 z6DKTq5EU$dh^>>-3A3@mVbM(^lz33UDC^r8K@2(o`uN4hqIT{kr~3Tj++|uy$rqJO z9hte2vj*EUABpD-xi%Fp=aDc^y_`>^j^u0NSj;X*PGVh;ln;CQhGe*5t%b&5LoU%9 zGhoydGh%XSU+v8H`S!nETYsrnWxoL2f5fE3xviI5ptxvL@LZA7qarJv_vVHC)3Yy`gGVt{BOnH*h9qg`Yl~)O&bB|>zRPgsn3-h*oC|_37;?F^ z_}jK<9!Vunxf9+$LJ6hA)riETB-0G-x$xZslsw?0TU zHw;8(J7iXH{^&YkEO6zEFUZ_Tp?%jONqo>;%Zv*WTO?+%TkhP|mddL#H+_W`|pzt!dSm5-l1VfYM|CcbO~exOz6&c}aceK`+uiN~X;?tfkI zXvVtPXM5{7jM+;szOfy+;>cAJ7X(w_Cy`v1JV{(9<$!Zz^&eM+CY{a-EJdCTDsD5u zkEPRY)E z3mf9yW}JH-MzKn>pWZ$`dFNllK@oKng`-%gV4Yf3NkwgW8A(dUl+MEdP zvJ9WR#0;Ov_fr(;)QRK9KBrZNR%}NLYruZ<81s8CmM3-~v&Wug^bx1GH!u)wW)BY6P7I6HesUDv6v{6Eln zVn<8Ub#tbg8|F>pNj4>Z#rS(qZ?pL{U?l-3tyB*|+L%0`uURv8lwI<>H(iY>FaBJB z*A5&qFYJ9!b)sA_EsSvK4i>L(aSm~DFbRq(SYj<{2MsdoF)hs>+2S3(F-SDKU{cG) zbgUC5)(v$!kKzomoFv@k+Q#n{*vu-usf#v`KiaiNg;q66=1V9Z_`wTa8+-=r@;_f@ zMou2!>7Ti8w>hwn9kawSywZXnh(FZCWzprAP7%>^ZqN4Lzhy_Y?DFo$I{b@H#&ysy z6W=69ed$<98GYJyzU(5_H@WPTr6Ww=A(h_xkl|b^D#efJW5g`;;%}N$mb}!_I$b7h zl9O~^WLMHQ{ZqER-SOnQj{C)HyOenSvKg;-FC{(j!Xt*yV`x)+Ko>OEO1o2XSR^%MJU6kyF=+ZSFDzHoYDaLzu0>Z zXg!YO&bNsSf*?Q;fyhK60*Oq3U?#;(ilnG4S;0z{EX%Uj_U_t#d-k34_IS^}w`b4U zJ$rW7UfI?QK`&J*ndr2ThZ)O2xh<4yfcNemZ>Px_n}w1lr3-6}-(6ce*9qk_`Nof|44h z!LNh&ds|igI5&TgR=iWEi%w{Z^gb!Cz4XGlYs1cs`!hAyJ$O~f_Ot6elU`crL$kpint_e{S%JT#jHwHhG%`244>pEcXACFN~}hR_0JZAx;N^yn4t znwtH@uwT%eG28eoYP<+>r0>QofkV0nvbKA-^R+E+0=tlRlj8Te`N z^puy<60ZZ&0mm`caeP0Y0cNgT>k_`AC8uV>kCxW&@qBoX_m8`>CM0UZz60USE!skkzhZ=A#5B*#aBYO7>#$y= zP-tc70JfQJg3B}!H;ot?W)2>hm8(d{l2-MMoITEiDcOC}8TXDkga_0t;2Kw#vh;jn z>0w^~rDFE19tbX4>b|P8$Ve;5)9$q&lDh z=T&Ksg8`S1v5w<~KLei~_#d~0P(BNg%uxThS{c4r+)S9i;;pb@XKlF(-FHsA)n26H zg`m-cMrOs$)iT%teSZgtZ6o&UnNS-d=y*^&WpvftE(Mmi;&>qs%a*t(FD_|WBUHnR z-dE&B*&-c9G(a8T$FV~U{%Oat5u2LWQjyJ}HRy_ zhn25Bnq{Y~fukmy;YRC?W@<+aJ6gEq;fiFSK@aE1L%FP_T`<<@Il>gWcE4DA=hi;r zGjR0Oz{jvsSp35G!@IU8ykGkQQ$}VFX|94cnVio@$$06l0yitpQ`0Er`d2Z$mYlBE z8ZbJ6wBk<3l*+NahFnqC5pBX5D6R=!0MBEUaLvHW!}Y@lm+@v0-!7gFbX`5WbP2yU zxpq7K*Rhcy}Iezug6)}$+8n^(TM4bt{ ztQo2DPQP<>S*E8ge|57SkvJ*4<7p+I$Q6?`bE-sxJ;(;#4Ba+i@E6O8G|xuP*3w-C zIIp)Txa}}*V3!_ux{K2zKCv@}b<-j{`2^b%SM zJNEAitG2E!%27YBoBN)(91Cb&sz>=;A?;1ovNlV*wKVD(+hA;gvEGpzX);-y%t#m5 zB9%jwtT5jP(NEvpFv?td!JFoUSPEX)ws)@6Q&~X z85Er%>xJ1ZM?Rp{VC%eg*K__)|A!fUs%YGthpgD(gDXY)^U74o&om$?2%UND>Emj(RS0% zuH6{;fK>_~yLz0Qs#>uZckL2>ecGhVUUnWn5_i$?_L{WW*rI9GEr+r^&tEV`_PsPb zG=5Z>48b1s^EYpcy@cd_AY|hKf8*JCsMGYe>_V&#I)IU}IJ82UCgLW2)T_bD7SH^S zQ{cC!1BcU9>~i30sJ7#3y0X*(GJxY4dmkRvKk|(0wQ1ER-#T*tWmn`kv(of)p`ZQE zqAZ)9PYkzwBEOk6^WHJ`EMY46(hrwtTwsrt2tN5b)8xoy!KpknD`_`=ggI>uP8>TK zR=mE&42#t%vo~|7$nQF;v~03;x-6?5j3?FT!BA&EFfKIMy757!`lQG0$>VOD@Zo{o z1|55zmawvvwuej#o(EnvEtTt_&+{2j+w=4PM=FYITf#?f_+UvlgIKX;by&Qi_Cy0Z zZWd$d`bP32I~%3>aqoC}d!mSmc?!u}m=LNse)ZaXm@G`;Z)S#g*aO zaZ{@`V>N=s0G0{qH#}fh-nTf0*C=@3K$_DnVTl;H4HHRe=H6L6ins1a1>Q@b6?`qN zz$N{LgmJU!x0?(5cWekNUiopRw0HkuTE&`*&6+`*@k+vs)C}JevOHy^aB>RoM<`xP zzD~Tl@}aIS-Fs>k>)PlfQCMt6Vet4_bzc~M`I@tgQ~c^b$R0H?qBYv;2Qv`C@Tr+4dKv=Z{E{K@L~wtuH30@U8ia!iiI~ zo!G)^EiFpLc1~Nbg_x4dlP?we9lf~KwKFDPgGaTpby8&^YlEe*4~lS|=L0D?4K`TQ z(^w_r;2C+8K@W5~e68`y7LUfI?a%2H)2`BZBRXw^RhmjvdBhvS`?Rg9_sZb(oxeUc ze={~qZhK_uyiH-_vRzrmX??oojN7wfcWVcYmzKU}?Z(@mnYX0C*!h`4Dr>al2lW{c=8Vto@q{gV zw#8jIP{(bxvh5{f2W1It?b*`fm|`nk%0+UNh=jG_8V3o>Z@NX5zo*a}6aRS}KY8;b zYSc9O41Py3=ATuk3tk%NJBX)!NJ^uac-p5ZT0)gySCLZk90>}4PoU@oIg#1uG~@oS zg-%^qcxf)Y{@DMlThSV?ceLUe#gHqZrQ$UKbb}}(N`B?2?hffnA%z`bBd~eb@$B&=0|1!GJ2k_#U$|VuaPfsdso~qI&Op&Vzog3Wm}v<(Kh?+T%EWUZmhRjg zUf!U)@Cn?btpHhBssawyXQ$>9*R2N+hkw$0UbL-h6OIFw4^IiMG<6@BOh-}Z(B8em zhbNBBs<8dgk?`uqt?`D4glkTf50H*k7U>LyC>5A)9l)N?XwoEv9N|V*wubPuW(JJn z9V=VeDaj+U*IUKmPG||^uN%6zx^qOCc#cGMMT>s<)=%XtTCN%=t4FW=roan-zgYIe zm5&gwJ=b3WK1r2ePLJ70A%~$9(5*+8x*aRn8Dq^`T7fO}P@ZkOt9aBEZ7sFCw0!be z31G^lEDj3Se|UnerWM6I;`R3G_>hhpK2|uU&v*HB+-QlWoHUJi3Zwy!W31zNLC!!i zR!|m??Pim=X0B*qiQD$>2=kV|p8YPIxN$>9XFMYJPX9n)r;HRHeeEeXWy97;uyHNj zqW_G4Ebm(5u!dpzX%@VDmd(G!vz0BhNx=GhD?@R6XA3?z;8bRbr5lZ=I;Q)3U5eQs zX(gCG#8YOOTH`0lEy>n$y*Cu^(p`5I3t1Vk)ODKz{YWE6i~fT&0(46u?Kc|H%34CT z|H$HrotGI}Lr}5z3YbNGBQ5-5-4&Q{s>8Nuc@wRb?c!=1ZUNHe>DzkNp`#_44F8`HpxhT=vzPkb`>*Fu#a**;Jp8Fvvrh61pMdt8{fSMJ%R(*@V}(rA z5u4xFwW`FInNxUl`5Id}i_g**t5;GU9j9VzDP{116`Y2$uc5k1i}6?$`2!QEe+*3?z3RSUM-hpW;;^k`xjbY<=JuRC`A`d^1o?h8Fue1^`e*}Ypc2Op^SNAZyJk004q1~g?1;jy|Ima$St%@jNnbQ(1=iHiR!; z9XlV~0q^}8;4aY@(rOI7d~mpKT-`)E6=^g56Ky@nMiVKtwAlt9+Pby!F4gL7>*6Db z-UiI7v3Y9}XU8rt?7|}l4iD2tUz1iTtlhrBR<~lSL4Ohwr_0KfF5EpIGt%)wrC44o zPKQzte3$^bBUMDxSd#EQCO>tn-r1_3*WxSCu`wVMs*V@^h^L*YZ=sLyJ}2!rs{4NmE5y2n%Oc^pfH>8QrxJCFuA zjEI=Pt09*1r#_~D_bAGdR$pH#I7SpZ556I5k+88 zrqgI+7kp@?nyh4yi)A4n^BK?hiYr36L7R;dMOi+&+FL)J_uZL*A~$Q7 zK;EfJ;km!lO$N;qYmQZE1rlpa$H`S;KZywZ9+z~rM+nn04(>S`)-B#?8On*7^4TNm z%473X#Vw7xP_p>^P9k8)NJm?ZLbA_~Ot5i@)GrEa{lkaXgP_;CG~(r)5g`$n_7zxl748cLEDMsBhuSUHHpP369+KF_SZ18e!k} zsu{$Ut)!2}?)^`VGOi{i*wci)kB{vm=AyBD^!g8`#TxB95YmYF1H3KTUhvk=ltTFy za2}m@Og%&=f2JrZQ(Dv>qp=K6Cr=d`@v1WKMSMj6q2Y!J3~?Fo%;{|kjU241b5)Id zKjHn2#~Wb^P5B;AG4wh0DOBLS0N(q^?wI$uq;Hsb%g38umxH^v=xdJunCazxuq(#R z6fYW~86MUOIF7M+Ec$A5W<)#I8P|Ut7g*6+{rb<+T;?zix$3$oPfDsgvHxKcwR25n zJb3HrZx_0__ANjqPYY7UzUKDN1a>~Gz(b02cYs1tW31AoW5g*xHOiEZCnV}r7Srt* zIfs;XUI%{voPpm>tJ0-Yr@F`X()6~{m_IuW@*%AhTJu%puWCb%?v>T1KYdLrTXwfS zwrZT4GyTQ%C`(;vnif1)5&uoCY_S7k3Qw$D7gx5Bbw^8?qvKS+vV~sKJd}fGQE-gM zx^U$0>-8&JQ~}4=+ac}8+hdAb;#XymYZSz8kx$zD5&oSj#cwI$-Y-wjZs3&(=Rk1% zHZ0j0R=trw;yUsA;bG`FUN>$TF71D@ZIPZzY+1D@tbTK=SY`k<+f$UXatEy7Sz44Q zpUKst;YK~u*QG0$xCZQwh#bX4oiQJF7)^IlwN+1RgyFR|NVHBCUI$#@Gf+6Pm#Wy`OpES+X}@suq&X?;oM~C3 z<$dCfUxcG4j%9j2JV%b4nL@@cBSZRK5oV0eov_#S72{32wn{5@&K(A~_gGS5@>X(e z3mAV4l^jUyU+@#lf>gTs#Rjt#NEqAO^?0pZ|`yc0} z{R_M_?k5CZ3e|X;)@GVQ-wWU+`|O2;sds;=kweh#y1nuE|Egcnx}FC+@~; zsl}nV=&6%Mid`u=TNnjl!#gj8qX+k-Im6KL*M%eI-fWsCjU3T}LM{=H9#eF3`qU)-~96>Yax zww_`@Q~&FEM$IL7a8Ndvs1PW+a#4AHUtf@ft%dGfH0{&$!)qbMKa z!}(Ey1?VC~!-?{ikAs+z4)#bN8*`+vciW+`dBv_mhC=jT=q~T7W3N_I-@-ejdztkz$ug`97vNPUTZwQdJu(h$Mg<;F$AX(lM#ycplEc z*QK*>s`o?J-5J!cjlCQ44QgVUwX(~$DI%*!8P9JaQVw+oR7?rv zJsZSdeKx5JpUxMqZ=Q5rwczn$+@Zi-&uX9_@!VoQalN1MfQ=JHJhs3SCf8 zpVvbL{;RamZasU4t8e>s$~re1151LfX#MhsnXX=!4br<)TG6u6O(UG$)hKXjwndf( z{j<47@O78p93|0wFJV=6MXO7Pckb(|=XR zD;b5ZGTsUTN4CLcBdlyu6Kuh;(-^SRRh-It8oX){t0B+%;0}0yoq^v=tHTBTf_D6C z7GVG1UAQzHICeZ!$$MCxT4~p;HdS{8{9jtxV(_v?rxuqDR^q4eT&5W8$m6+-MhfSv zGpE!&GWn!dwvL?CH;b$s<;T%d8F3(vWvN$52JlFe9tT)7N18s%>Q?z{Z z!AHfZwoBSy8metGO>?^1PSZGrrg5j?zu$m&wD?UW-20X3*@Ul5I0r(PCa?;%OdkbK zVcfNY!^o>!e#AAE9WeaTF!B0fDJ!hh3fblryGxt&#MDNeCe3NX<`}_?e6ttPY&Fl8 z^C>KPF1`yUbCQj6kYzF48Hx2q8b)OPdb zc;oW0@9=(WbElA_ot0(Ac_(on6DX74Y=4EU<%vZl?Xb{;Oz|hh{FDqxGpJ9>eF{OlCuN)R84mk63Ql)U{J)_5w5YlG>YH9Tp&kz3fTT8Np%P;Ox zx9wjhP>BrD%Ty1IADwa8((r|~o0wQypf$Uflj6uPWUQ&ep>K%t^bf|dG~Ki9*-c+l z&X=C|$zx=B%O=3e=%cCYh;@jgsnOC%9RSxI^QOIyXwwA_;ksd0G+N%`A!r_2p*?x~ zOljviPSc>%fJ_>%(n_uaWB|wXttB0kF3eHIj!;RTN=xfM?9u~cXAVhO?WUjk+l6*L zQbo6arlCiiie&WdpUIzRBJst4SW?;z9#PybP}e?A#%kVCnHKGgyk;k6K+#ro417dv z(vA7-a_6S~Vf)&>vHpcU6DjRXI#ONcnVEjChs)M*G~oE+7IjI0apQgkw>`Hnk7Fm! z^D)MGsSLa`1tr()k4OUK%iz7T&#V%^x{)vHr)3Y8J~->{y6ykk^YdkS;n7#0%2f2! z&SUpXyFKHbDbnbR#&Z|FXtrN^Iz_v{zGM4^8Hjhnyn zO-oDjn+K+RmOt^v&>6imiq;=>DsaJxaUHE#J5rZOfXD+lw#Yw|!{B~{v|M?8wYHcYH2o zHKs9~G&Ghfxrg`d2urKWT79*wb=7P$J~v8q7WP@@g)oPzINIseDUN4f(wb9Tq@}Gm zwKyGC&3`=0+gW?n51kN~wa69q3)^KP9U_A3AZvb z|_cq`MqE`hz$q zF|diYKX6R@^31ElrIn9)vAlKoL|ootzH4P_P>lm`Q3uP9&ZyVx1@=;mwscC3W%}{A z^cx#MYnHdXJv!z@Iqeu;OMzk=rNMu{z#pn2j437D`_&njR7|2wYb(Je+P-dIc<1@` zS=JF+(i%N=P{uo3WcbAXq54eY+j^mD_h#K*$Eot`-vY)kqMYokaEvI^Y?d?&r)}T) z>hFK&MQuY!9{$jev6?=gG}iJv-{ethd15+gmTuWkhw;}93oP4J;10!MrnZB<5pRU6 zw&?MyY^o0$sPdfeX(rThX#N=}99KH*>@g*rpZjKF0__Lv6<~LXjk`8y3h$bBTj<3N z?N$O4_sjIWVG2(!m>W)Lw={2iyfWo~E%BoLd%KDCGg%a?y|8EbU_CyS!oqb+!l9#w zEuG(lqlO+ICu06MUO=Nqv9eyqvwY%PG6%$nM|mqAW&=Q*L6ThZ8pyxbtdHOLP)o~O z-BdSh&YvTM_K@})jrxviy2rf_Id4Ckf-WR{R)vj93uL#39+zB}vT`~xqCBB-4CCSN z4XtRMJaIG=8aL-5b<3UNfP;bBU}ZL47;h#XBaB!Ir85K#4-nq4_@!`o|IQdMjyQ5% zd+3E07cfxuaC#ifcsuCI*goh zTUCOY+*k!_OsE=Q8h0A(@~y!V_cM~^gAbr1gQ6qPcpZ`RdIos2 zzkK7JOdY#Ud{)~UwiWp3*1hAd%(5-qwlmDzv@JS3YU8O>!+_**njVgq;ern>(zq@g zdlP;2uUoh9IXQwU@S@qXYd1<~F<>gQ>#zZ|;Kwp4uQrfIntVn7W2MIW#KAN2XAc_` z*p9IRXIjl-(DAnEZM9LG_BQLX9?A(lP1$9#tGC<=4|v`VQ=M;OZNO zH?2xB4A|5xwB<_ zGo%m(jk+w1nUS|wo>Sbt^+43^_9l6^Gk^Bg#zXPXwwh&)M0z;U*MEpb@C>>uc8`MF zmD{7oQw*KPQQ6j@EOk(40~gvp>uPis#v>nDvnGVrJ_~RbW$#V$Cspj2wCH>E)!<8* z2Q?9XJnv_jHl8*6;I(&Uyt75R>gBO}XM7+N+oLZU&s*_^nwGbV8ZO6-wEc<3Axq_b zm5X*o9*nTMvUO4~o4v7mp`{nTmYP%IP@z#b>Hyc{V~Lj)<7$Ah^zvwVoNLMChzVSB*R0u~gPWTly9XxFDcP z@uUAuS{QqfPr2(0DXT_f9A_~QGr-c^Vhja`_w5Wzp8xxrE>XSu4%99cb3~a5T3R&6 zOiZb9)hWg9Z0R>>Vo}=(16|b>txjsp!?eG91iwxNt`%J*i_bj8lB3MYVbkS1}_!B5Le@Q*F!#9Ie#I1Zra4qM|;hu@bgvc!l4toGpawE^C|-q z*r+!Syy*7wy*$&)!E#193Ntz5QVQvzr7a0RrX(LoI0fGRQrdZr8>PW(DJZF78vOSf zcsinIT8X>AH0_d$DlaUh>20C8G`qJP2yZ>JCd zJheI;*nLEktm4|64PY-dn=zXp-9o{~HcTeQDHb_+ytq|`#V>A*I%SL!F&d+f<;HmA zM;-7HGM&m1>qEvu*2GSSDYuUbz58XCfuebQkOqz2xox=(R%i?i1I1Xuxy|NtI5H2;!q;v;$l^@Nip{IT(v2%Jg*Q%|9R_G8v9`j!({B&m z^dfx2azT1`M{09`HmaOw8+EryHLQSV0>EG6=SA{wQv0* zWfjJhv#PN(8i#C+9V+Tpw61$l3tDoZWXNWs1&#$`pbDHqGuGl<;#BDmhs$2MGA9d1 zOX&)mmb@Ge?b()9Z}7NlLs#wcfj(1bdZRweXVOKSG?S8LNFO=+kGLlEy!02@yI+f+ z|6bsczaG8f08O1rpe}8ML8GUJE5^R2eh1J3Um;5m%1rA4sS;C+=%3QHAell%BTb3$>XQO z{Kr>@TRxez0j{J2XBQ|7RU+xc8I%AS&S}x?h=JW)dCkBuV$y)He({c&uH*W5c7y}B z9}1Ta=q1~XhG8mn`OMas;JvFSKEyP~OBP{vPjOg_H3mCIlA={E9}AoF@KxcdZ@-0* z!zChaS9n5!+&+fzYY5mId~iy`bD0z)W4~G$NYjX?Y0wSfLfL1-!hiLc+?35;I{d(t zn*-YcHWU8omB;JUJv{r~I$moSmtNdkUopNuJoV;tr8r*~Ubbmv7&myd84(HDbc4RS zXnSfIN-d69prW)p`gOBN#gXMk{QWcT2!H+T4^U@t?8Nc#-RFKN=j-w)BW~1tQQQg2 zRFF3Ywr%X*rMu#VP8Hy36itD#S_v+7nt;he_0VEQOWQyycNA{ru~$!~3GIh2T8-?X z$4t)zzim=B6A5Kh{Hn}L(;Qd9)cOLq@pCKLKBtV0`KdPoF>D=H#I$3{F zt>UJouLQ#RhKu_Q2ty~zhfssD0fbYWo_;@_R3uuiRmsdx2gCM07Uy3Bks1|ZD_?ue zzf=Vde)Lu12|7oAC7eej_~b-?KRjKa@`NTM9lH`oxWD5`5rv<8{p0bR%}z zmN2rU0ohfEEaQ=$rWHvY=zN?3b>y7yFDdj-_49rbAJu9WEZey!ytuyd%>9@V;YvLY z*dE}Wt<>Q{7a#8WFZbMHYTe<&q{;zC*2S+T?k&{~(+0M}DWZkaF5T(kowC2tW*&UW z*x?yDTpt)cELY<~_c>}YFuXx4q z9)4%e^zfMv6F29Fr#vE{iRs0U6>|fd5{>H&rlqwMvj9 zbaAK7jjCCSw3^_$?AviTyz%6!EYHA^`d&t5JHbq#6_IIoj0#Wv<(pye_Cuxg{3lk1 z>*Ta_>!G2!dRB|G&m<5_jaP=>sRT5UB4t8J(O)k z#aQx`0`^w?igNrQ?l$RTjPt&Ok9%kT+Y=>LjB$ zS4XI&ifr4v!`{h@xCi^D@WRbY{T^zrA^Lly}D_fbDv=%|Aq{_QBVzTi>K_QisVBkHh|zuY z=-N4aQG3#-z@G00b%ygE7D@2 zrz>!gXdmA8oIX<<_V$TvYlB4VfV_ZX`nZygNzFY5G`kfs+bBTU-Y)gSO>SDWcKSxofw&4{RMhv^Ok$@rQ|`c9JS{+C5*Y zD(NZ)4_I3bh21dS5aDCoR`Ud>>7<=4NTe(SItB6qj$^Fjcs|d-howB5C4ELKHaJ?% z0NTK|gNMVT%hzP;W)2w`W(;O7)_!QTJKx{DS=%~0ZTG&B01izfXN$hV22oYZ+OPE& z)R93}dx=lEDh%qC-!pn<&4#f5m?p3i;At~3I-(xOsk0TQ66izLbfcdV6}S2p<&y?{ zbB=Bn)dP=Y`+!56@iv>%Ro>asNKXT$rg5j?HxvHb!0&1Sr1kp(a91;1eZAwele>5A z&gp1Ba9IxSIT~Jjd_|`0@}a%MX`^#zDHCXAmpX_1o^Im9D!1gt7h2zN*E)J17Pm<9XVWFh#l=3)*2pk}sp#pRLaK5&z zm>&-8)L@kWD^a{lMMLBImw)6}2_E;fh&xG}J9FU9QBKa?ut_&)(CV3#5W<^J{<+>A zI2n1=ubnXNx`*m^Je1126_`z?MoD3Ah$#&(9W-6WvCD#68zkBR7wQZY1LyCh|cyi_XLLQw(UUXYIaYfC`LsjHUR=KPWVoKC# z(k&Eu;j^;E@2mhTTg;}tt$Ew?wh3;O=KR*SOXM1X^BvP?mUN>G=gN~>Jd@u|^XqZD zW3wN;ir>;Sc;WBg2`7%7%Jh9e-_ELj9XJzcLxe{J=RB0J7$4YmBrLJ**qOjZyDYJc z11AR?F`mV1G=>sou^A#UKKa&>q2J(3vYM@&za<@$OTn%Cw&vBX#uLGKU z2DqNwlr3TAm?`agoUc?Q0=ohppZ~ONAYqZ>@bGo_=|XdP+Xt*_RljKb;u1Zjbo7wf zKW(5Iye&otOYOwlj@pCw&vTx+bjMzCNpHukIIzA^olhL;DR51N-#1_qKR< z%fkTCZ4GR^ajEtOXc+W_74N6DHn3Zy?p8G=bk-c=JOc04xMP*J_q5M|Ce3+NrxBeR z6zPELblUlr*w%v{2)|(P+bZe#w6s|df3tPp(MmJ5f?mJqx$yM2_#{vS9FxsU=V^QM z+vA{M4OxwuQ^}xVm_q@YBhb|KYK1O~Xhf9E+yT7g`R}`=;fwT+*6>MjCB4*A5-6%g zeEt!!_))Q{P@~~>m4n49S-N4!!QETz{jjU_0p?}^J0woI>(|3e-~W%PI!5IWC_Jjb z86aTLV1{B0j=0k)=7+X6pbuGa6`b!_r=)ey!d4Y^#Ls#LJ}QapQmGG37!%kGqZz=( zJ*4f>zyEgjVrv-Dr+2th?`AaRpOs}6#7BgeAYO#DP4d&?&x^R4 z=6M{uEY(N@=~gx26yRSd_>v0Bo)Nv}lh<_VesPmlU-e54A2=3RRZ2nI$A@Wmj!IeW zrf>Mj#PGrobmJj$=3dd3?~`sAu8UYZrE#1PM`zyN6U1J)>4ywrTrI9BSH@@&W?abW z*}X?5gvb8pd?iL;wQR+!n`~8%agBf?qov0;#6X`t?_n}Zrwk;;&)(BArVcXV8$LWC zyz%qZ(q&3X0UlC#Q-K;1sHF!4-5#9>c7B=%p2w?LNtOoA3NQGc*U~!15>C@O!yhRp z=f8vxU;n|9ZU%AuE5#)GI=p6}-<#=c%Z3K0h?2i{4D!$Opr-DiqjG z7YPEV;|Ys$PDcqHRrj;MueJYAdjBysIqyJiRLF}8YtzEkEO=TkCiQ807&GgEz^)i6 ztbgbE@|@ZTBXhLD0kedbu$|iE>hoM5nXLNu6D|sySI!UiY+|Ck&%lwPhk6CTWKE|< ztjuUn$we}g@@ErOK5>;rUIMc`mbv%{RwsQ8IfXZW_N_G9ZhF!!pVY_A`d0NkC-SpEH$P6G!1?ST$nTP@2bUoamLkER=eVVw`iH3vpb&Y;6<(K z*M~EKbBNq}H3KEN^kSV_mE_8p~;{^$=(?Pbh%MlR{w!*-KM)$H1QAgo-l#l|I>gyHfQI`*k~f!TuW zjAbRWA^0XNP0?7QoY|F@L**@2_hMiA%BFA}Z?DK_Wti?lf5C6f@|Kqg?l+`&U_bi` zT%$Pac-Vv6oMY#m36plVAd%Acsg;#PX3w-s)^=7zcte?7V289jc%cRQ>%E^)ccWo<4lds&~Y23hNy zV>={H-CtcHKPUmdYHV9H>h1*1%K>8r5|+}%^@IY)?v!Rj5)W9_XuPaAgC$=ve~f}e zrJNi{B#&6BKzLP%lJQtgvp8AO&`_wc?iNJt9Oh>rO7i9E2JonO)PNJ> ztmoPQg0bnAGe&2pi@465B4+G%YapH)(&5_+TVkzx_3>1#(I>R-OuiV3Q%Xh}3!0;n zLXWO2alrJ_V9@VzoGs~))78CKpW-86CT)jZo0f-lTDof%bnV_VTz%W8tLoSpgD-pW zW2hO}=``;AbRuWJq=CHi8)1pp0dg4>9eu{@h@ATwD9&g8pp?|D+P(MOl~!_|)P(S_ zv;>qwUwxG7k%{N$jp48A17ImI=zUB3%i}~5%%Kv`X(pJdvlR!;VwjbjlC95x&%Ljh zP6?+2OMt^K?Miq?kd^$~HOI9m^(-zRJo|zL+~6fBQNf z8LWekisS9j+iHrvt)@KMSjw-aRog0sR(W;p6ru9~bW)>Iy*EBG@uKR>PHB2uX`HK< zf3!5q*8B3Fa)!oaykW({$Gb1{}v&?dW{R8e@GBt_gKeXFUV|RuWvJ5&XK^5 zfBM>!S?-?N6a4OJw`IKcM)>#_dQ&)l@eoU2t4Pf z*lA>16C~D94n9yjX)*q*eSB``ed|JK{k{OrJsZDYRrg>t-VSJ5lwN<4!Kx+P243zn5mJOhiuQ^1;Kr790U=6mi z^|e{KgU5M$V#Qh;M;TD#Kuw;QFX~J6GCoG#)Q|epKM}}dapNHq`O^mX*UHx5>^CYh zRY5cOvR!#wO)>2%jXMp$EkWXSKz_in`ZPMg_Jh<=&wrh2>eRW@$(uhuscA;Uxqa!M zmgHzPGI+zgJILMoq91>lRkygnc~{x>=0C0bHap) zo1GN=m|n=5!GtjDfw6j-xLZ<6uzlZ8oj|GB3Uw=6u)WNp9O6L9r%W=#M@C`D*nZ*4 z$#uKl%$CFu1-G$AV~X1%7&*tDr#)EeI^bN-K+%qQQ}nZ`4qkeiF?LGIYBxP^`5R%& z?roW#d-Ubui|gkqnM_+Fd{Ar0{#|w1o>}-y>$B5-8oN$+!HBC|(cY~smaAK*HEx~? z&%N_frkA>p7%(*Qm{*Fjl`CO0YIqRD_*XzL&kWPPVmBJ&_QX>mYTH+vhM0$Z@ws&f z=9GlE$`$j)IW0WXDY00A#@OT}(Aqz$1;l#fxJV(@$fz5c7?0^Lb&(w3Md74gg029L zgZC3;^ghBd;@-Ew1FFBWCB9eWY}j?0^41)@z}Cn%Lx!C(-bod$nD3-I2gcPHO{SjvUcIeS#C(0o3lkxjy~;Rh4wrbWa*Ln_ce>QOPV|QQJ>{5Xwxm)bempC$r;bsRwf6NRlTVb@f-CrS_QDJ2Nm4L+&TXisMC%f*no=s1~bnF)Ru|7hT zLTAl^?!WfVl+_fv(U*}?yg^Stt=hU){W1M@5sJQ>l`eO-tliKaieRf-`VDW&IoP>> zcUH$q!>-aEG{6H=p3|gN^E8t8{NSd57t$1Etx&=b{(dUAf5-Z;ZOy`z)iip-jXX_L z10L~X8^RJ)8_e1R*TxJGhsT6;j6~SJc2U@??POYlJzG|V1G_dWnGEPGPHkbS3_#Kz z|E%Fehe{T%ghiQ<_BL4k_)t4&1ym@b9JSaH{UVzOuk}vq*Vup&2q+L0Y^$ zzgzgxN#nwRUd@{vr*&GiZD)9W^Y)Zg(od{hYhN06hsGQnHOd%|j}pdrv^0rT>#e0L zUdS<^8edxD;3}h+W&Y-@{ALc$+hfbuN{h?IDMXx~R-HR!6!;Sn{|X*_OCuc`naE=` z>-yp6w3?N+)0Fo1*IHbj+l26T>X`ntq!YQel033hg6{CXd)ue3$zNP8Q`=5(PUrn( zc{sTDXqH(kWuaTorY)9by0@&}6IQ&oDdTx$)q<^I=cfI|>XNQ`;rN&xvbu#G6ugK% z4we=(7BI#V<8$;?mPz?u2kK#&q5|l#GY5yk zW8(YD;yTm)jn39~)Afrk^PfX|rzmqpcytX8~Lm45V?VV@f{8IIjb@a0ZID z%r*RtRAf-!0pUh{P^+1+WaEm!YEugAiow==&4f+6w${B%mFj=~otM=Pxe-ycJK4Rp z4a2b=#4!PfEXK)C`*>y%H{NT2BXpHMvV{X=v(CcX(x+bIyi;78E|R6NUYn1KneuBq zpC=-YqoqvzOPCTYkK)#-Oh?W6>RdQsKIe>1kn3v}rVjN^96yava{^1`kO45|5LX2KX-Tn0$cCK|2rH!R0;PB5stqMd2nT zd`Eg121;0YhSj1{=p2Y~*L|>Vzw}K@Uyg;lHA=6{!v1PS&>Sp52BT9X*_E&I9$p67 zZy;sk;$Pa9Vs0>I@z$844!>@8b)*xynxRWU8t>Glyj#&{;D|6lpGQw&?ZUZXx8B=o z7F^nYSQvfneN}bb3LUS2op^BLBMzPNamG@fr@3r3uf$E>`Dq-wA?Umz-vP}#16!o( zf28Vm9X=YqU3teUO>YhT=Wo27iE+_*Z18+RwY;>PV<%6AA1zsBD_Pt%!zn7?*JAKg zJGBX|4VpEli>``>F17^@Hra$%6mZYjkzxGh`2=&`#;x|qB)K&wEeGQ`DCco67dguD zftxznUtO8hA%qrIwpw~;%iAKCp-QK`Mrov{fnT1+ord33P*cBG3a?F-PP=2&$;+;| zBxSXWK6&DlZ3&nv;ZZw25_P`d`M-O|u3=XF$ zE6k&FcxX1wzq!Ddk!d3>!Q1PW*vb?#=4`RLRXAMkY*`v@#YPStCBq|+%Qmjc)Q%oB zBJ{Fv_D~-4JsCc)sED>IxJ3#>lFsyoKLE#x065<;PMgWei82Yvv6@97O_;eQK3H6} zit?mKew2%8^-fWH`L%g%EUy;l9MsB#=I8NysW*v5~nkTLG$}afPD+8xdOAxMP9xWV?CnknY0=bQfPr&>MB51ARKx1Q~fy z=M;@D>bJ7s$+5afD_XH3kR}5a-!}M|#XH3bsD<)@t!BcE`@Tv=t18oXwn)TgQfDfq zJslb7)&z-}T#BIq&+oJ`I}YtX1F|{JZ3_QMk?fg=KbyZO;B2)OXhX|)?Flce&F|XZ zrM=z9_UqeLb*+-$sxKrzp&kD!a59ZQzG6){pxqCtpF1}3@j*3N22`aup8+t&V;aM0 z{EK}AI>>_@1JZBK&R4e3^O)XEV~*-h6M1aTT9gyxXhj!#(PzgR55A?54vmZm!PG1I zha2iwwx|eqWW4Rr7F!!UPiwJM*8zC}$MmHo9h2I0{Nq$wnoesM!2D&SRG^JC=W_nz zD+3Nk3O9XpV(58E(~3z|>wVutVd1lDGoB*HQ^vA+oAmYMxUq^kRWh6sV-Lj5Fe1je zA!_zTI@Y3pV;a`J@7jyKQ|J6ryz|0(lQZY25SQVX`dtNjDAUw}h!?zCU7`ZYzWU~o zfj#moaJS+x)A6B^ZMHO~q_X^wmIB3k8Z?mp@RW|J9ml#eP_)~NmGE0>V&7huhTA9K zn6lbUZ{EE%yteYqOwaVulfx)EDb0kvhxUhOEALN9fA;7F{q>7W=9MqSJQ}Bd8s~Vz-3}AKb|SAl%&&oFW`G=p-1GH@WHDT`hD$Re=>yT?h8%gvjQd>)HJ9YW+3HnIbP1TGu#2Btn z9qc^LkBvVp;&iY~Wl}llMnUY1PrL&z%o*6LV*Ybl6d%ROfvx4Ytlq2Cm@n0p=`PBuO)ydH zx}8Q#q?yn)SlMFo&K=Nnyl^@lAJTCnrK@8`nwOHR=_wDoA;`|i*8z>sz*oc`kS4Lx z)U8W%Z$|uN-jmj+rsX_*UGrD8*xj(YJD|#ZOks0cHqPJS<3}=w${nugXz6xb(cWki zM~>^GeA>(-h0A;Q3s>k%#kF}IFRTdc9jrJH#PV#8=tt&+`ZEWsuuAer#{|CSjhZi- zvji>zj+s3gsHPRu)X2zZvS8(P+Vv&*N%Q%rJ3(i|E`KKM^{bXm9xv0&f%jTvdwiWO zI?AUNyp4jL7HM?a4|p0l<)^gkpu2L~v2|euUnm1)>Ol2w$ag^sKCe(zpjwF^sLGlm%d_yimb{8ia0V*MFsKvoPl?5Sk~wfJ=QiiDlj^4P$*&R zsyD;2L;Flm@f@MFM&0qqleshmWud`XpiCSq;c~?*HYRIO#$)UH2g`CG_%icbKmW^& z*WSqV`+lRUf|@?0P&LxjnSv(WWgN$!oToW2#mbr)J8=h>BMxm)*p){+aQn{y=k~uV zWcK_w3zvlT`W97NV4F6B`029MSr)!=b?uNr8LwH9BPUOWzpvlGgFBT!QrMrC^XRfQ z;gFmycc$Wi>5fjEG{%V}v`sxtmX3l|uJ~8$HcUp{GAHlrvnQuTS$=k1#G&$X=)o z0mt;IB^{Gmb^P~LsSkX1a`!6lT;rU>1y8LGd$u0TRN!Fs>eIAhGTnS3`L!pqhapJc zrtlMm9Yh@7(YsVf4{H?`8>l)|Sv=Z@Oe)GcHiopB058^>-4(`+jjqB7rU&>Unqv4b*CuO|$Mvfdm z8lGG*x2k5h7(Ai$e44-)jrkrHj#UiN)~k+|_1F3h^OnyKN2Nc7Yeur7g_+TKFuphw zgr!g28qdF*g%pkuy<}{k&;(3A@+_{*(E&r3=7{#!$s8=wJ4;7=4~scwoL{21bKyi8 zPw}eL1D{7hGpDHHgq>5G^tuyO;zBR=wfcZnCpHpUw}WYQ2J;o8U*v>wnjE0hc$M#c z1^jptqXc<8t(xC;I=_mehWzLhRv}VeghTar7WXGLE8QD=r@N@>d!A>flLnxafpzHS4qQStP*ImP7#S11 z(>PTapsiLxi*F49XA3@g3MY(53dI|8)vP*)amfomsQZAj)Soj9oiHa18a=I=pXDuX z)4OppDDpErp3=?(I~`9$o~N27antG8^APWV3v&h-CH_NN)Cs+S|KGGZMr(lv@mC8L z>%))SfT}@t3-nq_`zdx5+;j9;Rnc5A_&v|22?w=0_vFfTg>zGLYTVh0PEO?8d{RG% z-(>Sg=pw|KXw*zPQny|?OioXB(>J`iWk*=EcVBeSBFt7UT!HXp8ctC zo77rqu}tdojoDL)2nMYF{Fg##@GQW`l$cIry}hPwv`SZHyuCX1_IWmJHEYj7eIp&x zJ*ked*A6~9^s45)9M5VI-?2)ei#CwBseT8&RHR+>%m1`296yrXYee7E3Oq+Ymx#mh zqbI{VFKno-TDfCmSau=o_7a_*Xe*_`%1+#NT<3t{mxilm=MMT-ZPc-2!~W9V$WK1N z@>5v-CCaPPxt#sMal}&Sr^qb|jm8tVE4Q~QuG*)|w}ic1Pxp8S zZrvI9jKoH!qW90dv*}x|KR*8#;rPkyDcZohU~$%zmfk)ZXY0_>{BdDvoU6dV^_oI< zXYu8w^YsLT-<*r9ShNRsx=MQ&@0W{TRrVTm@7gVl9&}~l95JU;njLUJ6j8t#i-sBF zF*iK@6*6FRu#6Y=M8_pMDL7KmQ6sK&i$~{)G@K}Rz$~sgXlDh}fjs)n0jueY`N$hX z=_oaFo?76Qc@y2S9DaWcWj%`ctCdy3;%xrBz?>@UT7;p2c=~eed}KY{d7dwgr#$$8 z^U&eJ5|T8JpB?Z2liq(!Pj+5Gmg#4wTF&=V#_=Nu!}_ATida`*Kr%0TILKHwxSMz`(lU!^`NI7woCO>7ya?A3Hve8k_o~Tk(|3l&#nzSH}22$T=&7NLNoV}PnDlMek#23B5mXLs_G&~z*ZF!12`lK08jmXlYcV%)#+`TY7o;0{8 z_bU}vpJaN2PyU*!>Z za2iZm4k_PxZNe>8x=l*jDD0K~k@E4TTL~-cUxq5v+C%j2KQs*1vQP?JR=yrioj7KV z#VlXOT0(J}m%sAkl9doYOK3=LLuS3iDczdcv}~R|K8Stz?Fekdo)Ih>KyB%Y#-dA+ zG$CeTJ|)Qr2rWrN35bKN0EZv`LJ z7!;vB?zCghqNhQ(B}lwZ9$p7D;S8L?IprE*&;NH?43mDoWPCQDXW~O#`?d2#3SXXi zwK`T*Hy`G%*$`Ik*7LeK@GuIF7QmX<^F|eieOd;4di93r%-G;rb8O7PK~6tLKihiS z7I74dgD^i{aKnhfVb;+6IZ5`QU%h9abm>o%)69t~%OJI=hbW0z`7WD1yQm5wOW;WT zzBy;QH5g;n4$WB7@^;(Swp^=48!6c*Z%93oW*c$!z@rVmhMS7CiRPR=``vsgt86!t7&eZRV;O7lW3GidIvN(1I742`aMcw@Z=oz_esl zvv1$qJ+=?hN&DsEeB%M;aFd2#B}P%G;l__-`EjFMHB>EZ+Xwym-42Q5-NFhZ|Y z&=6Bza58W=B%AeZT>PT$M5?_i^)!uzGcDcbWT_$0U`RB_N=9aPAPmNw@DFH{wD7vmlUeu@1W;rJtJyfh7mjCAK!v8TBX$I4>) zz*Rs7aLjou>6p}UZ08x^eEvk?Hb73b;tD?`siR44o;B?o&~9JheuP*sIcfx&(y8{j+~z=u#y&? zs6wX%{OiVLc}2PCZd7jM>4!lN-a9s2HmFy|YgUA36@U2;4Od^8*g0^Xo>Sl^ zK?w`zu8m(X*2OMYx9r+;{S|O_%;AYy%qHxhxa^~x1DMgoI3EtH!jcy^6!ORvA<=C- zR2D?7+^E>|MB^;-E5ZwK)$I?Zh6z2_nBVNdZO!>$#*r#Nm3R7VSZXpr9&p@R+l;w> z^^bp${zN9*&D^Cz7OOt)-SF6J={!2vvx#@gv1=w!)=w&5MwWh@E7~5$3t$I$ey^zr<1FKA1R=louqEl>66^3k0jLtj;8JRgzI9IwO0c|oCk35Vzu5iUwE%fo> ztLkrAsNFH%J(=Bv@v58?9)H6lp;zC5)x7tMfwP6(aN@u|2ztDVT^4?OgH&|D1v&%& zSGAn~@5R^6*sFZvG=j&q1H^{P4jP}GIx$?_weHcOMk?COV?X&<>K#8i=3NSIYg7#f z>z4{8>^^cdJg2?r-NA{2zK)tHT0l}cYZFUoR3RDvNOW0JEQ5|S1`n+Jy6_S`dbv#X zwVV=mC(0cs&1Fkn#*<{^)nsd>*JQn1s`kw}(~{oID#YRA3f@M&ErLCGdri}-JU^E% zZyccvpGq?gy0Xu)an}w>S?#8u{SNobYCykx%O~m`kD5gLsJEY67j|u` zyMc&JVw>^OMoc{0B4FQ+!(r{B?a9g!*FWYev7$g3S2`jNDGhJzjKN{ZxPG2&c+J9X zT6H@b?GQx@*dk!IFS>NBWSq4$r8ijFvdSoX-vPa}_dAz0gRzQDRB#(=WTT|H&866B z&+E9#r$EvGmvJ1q9mnhp6cfff#hjBS+;a7tFtGA8O`3i#^ef9=*CTh^v#j?{|3K4U z0-L|$t+0OQrmP%ASboB(a=_3Da|`VJ@bc2vWb3t;v|^KxsQzDGHs8`pVG&o4m|#v4 zev-}UWO~f`G?TMGWQpq_9^4qm;V?N%iq4ohHON_rPL8ksxI7M7lq;Md9tG6A(O!Zw z=LwpfG2%*=J4(?RbEnL8*PO4^8EPg?`Dxh;)+`E#k7f5jIYoHibxT_$6u@Z%xMo%G zH276Ije9=lfg1&vJ10!1bRi-AF_im+!rrt3=j#i`UApxMqi5cm78Q8^>hQiDaV)S) z#o?l`f5(QfV{JYND`APk%0fXDEFwb0pHtv@-xAg@d{#50Q(7(7&&I(j8j)ccyf?Jm z!E(9S;Ao^#zJ|N%D|lmO;qTPo;GV5=3|W9^4s_Q3t&{Kgd{x;cV%)C4btI&Nn$utp z44f|K>7MsYSlKKexC-RUKcz5rQ{jz}BN0F%DQ(>!l&-4TAS-6dLr zZ)<0a6uN2mm(NV$iLmnnfBw3@0$Z8BhW^f0I!?I#aW0&njw$dx&1GHA(^AY&CEf^}@0jtqq#I@U7s8_rmVle^ zq3iA|wU0=%KxO zJmsyprJ#)$G07b-wG#bzmXhdP)mE<*7e5AQ(o@_yjMvtRUF`GYWpQD_6Mv?TfNC`d zuD>L+iN=7_0T4P3)pX(>M3%Jkzz!+xybj#lGw|=FF1xezpS7}e zirH2Lc&rd7EQJqG91{lg>Y1{dM)PH5K8{s^^@^`8cvbsgNr8^Lgzegs{^j+XnIozV z#=n|lBcAbNTA8UJNCQ9?oMGG$ChOx`w_KS!JS%ta)y5*b)N#}tEC%kHQ$?9IXNwvY z^V`JXiuLg_y?o;;^4t2rR`rLgN}p1oje6Td#A$FEcN*L%xO}6uMm(u$9cWTANRq#v zN?$&-_pz&Q9G)o;#;q|-ZsN%W;rW1Ycr?%jTU(F zAswvv-6_mXBiJ$fzMDNOLFQ~`{??jz!oj15QqeGZq;4kId<}OACkx|tIu=7Z&V|@Y zM@Hc^yhxMADwL;1$Et9u?8ZILR$QSnr%H;LN5<8v!g;YWpqXRFP+OVuRVB|;lQE|( zb;R5uYbGx1+9@pAxI$kn-kqI8MV?o{H)sT${_6E=BnJ<+k;fBX8dt^Qe2?rf|` z3ijUs%V{pm(+V1)YOwarUs!`wo6+T#F_&K=#&Hr;#tZJmPK40q>Q~`d;am~aP$EXY zQ|R8SuU6^qt`#I|@76Ve@|p$RdR`JH-SUa5x&dNbui%D=4MBrbI_0Hlp0Bl7YU_Z! z8R#6=@iKvq$hn+>#me;kG$R{3{KH~j+>I$TfIrt)ic{cXd|}4sj2KZn(<-&Z{|ul$bBP3>;)D)>om2j+UMesilm zhL@_j=GGBm(3O2tR#WK)t5{qu@2D{m;vPncZummuTXWaiHOKJyEtWx+ZTCDBVI|7J z(&NEd%1}#(Ntb!!M_J8!^xyoNZgp0l1vq))-LP!lMk~`*nXM%&TT~+2Us0zb&3P83 z^&tl`td*^gPE;%d&I32_s$)eOciNpX;^cFF8i(H~h+Wq#;Q`2J@GpE^$yT&DG3>kE zZ1{2gyHnx8S@$%%nDCD;bDz8p<|Z6W6fjdQFk|3Wm=3S6S{RO=II2Foc$`q;JVKa4 z*UnwTgrQ^P;K-!R3N^Y(*TMEF3I%uFxMHv3dtA${8P>3ht~m6W7Y^9A2nwCZZF<0ROB4V5csU zXFR5I(12rTzsArhq%!au#o?v-t1_v+hBR0Cy#lZ7GpYc~wI!^0_0f`UHZlH&4`(Wm z>V?K#8XWp|kV>&puQUk!F(2f*jo<N9PiTPI9k9>9URlbrd7HE`mF7CqVfK!V z8I2gUf;=hX8Lp+nb767IM+?!#4wAzr^v}vtYC9p~}VHmH~H3zVXO}n<*E_12I8DpjdzFOQ&*t~mdn787M zs!Aq%YZc5=D~WC=q&=Ia@xiY*R@GL+H}BdKHtx#X^>{g0J-YU=*G6zkIHSzd%t4Vd zo7c&tZSGg#=upYfky8d85j5IXq>8y7wCQqvibuzZMp8IOrSqh?Xm^Zk!;C^c<|*ow z@KVPorMqZ;Nl1CkqPae6wZKsUJXe;v{92FNlhB()n9@dz#Cq_)KZY0WKe7 zG48bCVxY))9g%jQf%VextyIf4J#zO~Z}RG5gm1mR$W~=M4rXij^98);wByi`s`|NN z(Ei*`+-_)>v_}Fo?Gw2Y{%Pr|m`JLi=17qt{>2nO7J?o<1O8pDY%zGI@MFqDg~HiZ zwsINjLj7Vso8;;cdBP9^R<>@@*OV(TUh&Baetyz6nywN!zlzg*PFFES!fOQ1cdY&_ zJ2Br>rtCSkJ3c$5TZ7%|8tHBokFCskj4y?WHw+8S{1S4i8~Ry&71^&#lJ-GI6ea z70sM2W%G0hcl@w#dsqZq$UxliNM4qbu)PNqus6;_YIh!Y>}hEnx-CKCbwK_MDz=%W z{R&6^t$rKlxP%X1&!uTD{CM6Ixt&UVGvEL6+Wx3PIZFG5rzaQ_1+?mE ziE=nR(V>fR=85rJRngIkt5&{pC7#VIC0k&t7j-Mp#PiCcF0Hile^4zat}a!%?T=sFXFwQ!&CQvj?RuZ;#IZwR<*Sd?bp#UpsRH%% zlL_|8oAVv>+jc0trclDZZEM3WZNx#NviGlOK;}%v1{fVFcc>C9<1U>nRnOvkmCGaY$)~!HQIS8zbZ?-rJ$rTtx9_w44|U} z&hHucf2E#PqZ0n~wS{4izE8E|@R7h4<|#}b)IZD~%G~k%!2fxZCp$7YuGmWo-VSM> zX&ZH6(swIl?UR+J^gT0EQcO?_WRpr)QpMRMw~{#S^1k8TaU(PCdhOD zI{fiKo2(XbbI2q|JqS@oacSgeSP3ysF`^Y|VP)%+5=fn^v`waMHKo0cdYew;PN$ek zq;aR=Hw5*Iy_E1 zS~ZAO@}L1^98%hOzw%h^kKa99;Zy049W`h~m^$j}l+`X8TiADae|YYlmoq)AX0dwK zOxSy9f8bkM72wf}r3KHdvY`FC!*iN3<%J^M&_3zp-J(NB4~IqTTuqc1s!cHZ>!Ztd zDhr2*P0={BI2iB=Mr+>2DwgvQiZX@MWND)L!x>T$o%NcDIV#43UxZ>_XigT7`gvY- z7LJrTQw5(g-C07m+#G`yu`G~Qg#Vwt_m0=AIP*M9APEVO2qFt10TKv-5IJWI2DiaB z&Yte}Y`eo}W_l;5PwzjwJD=X&-tC>;ot~LaZg;m+ce~vNM}i3w7?~^qBBOwa0t83` z2_eG%zE$UU-sjYP@6{Fe6=0+1-sgQ{g;VFLQ>RXypmR;HWVPkG{D#^-TDorIhIH?$ z2V(QWv*P%akbVrX>E0i4-&i*s&55fY(gQUu+U~D?oIlpccfB#vu*1Rn-=Fe(-lE}r z#-tyy^lr|a`7DpB$8P&U6bH)BJ@Yf2{N8?ROImgBjp@1N_jL3URtgCRx1#re{gAxf_t<|{I%w``u1}_` zm))FRcw$HtcYL-*n7Z#F>CodYtdnpEK0-*_fN_+Q*vi)X49~vkjm{%d@~{jK)N;{= z-))GwkZK3Jl@?%@^+(Z%uN433o%g4|((4B62d94f#1XH~$85Hlzg2Is;JxV;Fr>a# z=@8vNO|AQh82V`o-_t{AoC)N!#8z7v5*xaFl=^uT9m`ugtB*&>%Uk&jw#=UL3`!1# z_GVznYz5RdN=2Pz%o2*OSO25--7B5DZOdEki^Ea(z0uORx#qi#AZ^oM|Ebu{jb-Qm zqxlmi?7|^qFu;9$kKD!}-h(+O9hhbwy>Cx3VD6UhJgB$OvS&JQt)wAJ>Dc}gqbILI zJpQ7?qAEX>b-$Lk48%c!906h@wXLSn=bbch($4AB%Q=+yVAHE_BxZFvw&Y&kszH1S z6}Q7JZ?!Wo`4Qe!C+lFvbKZEy72KxQ0j7Bu3yD2q#4#9S%{qc&I3N2vAImjW7C8^} z!@`GbEcrKTh>5W=K6A!pYNQdHD7ue||IwX4OhQTlq> z#2ZJX0?19&W_h80I}hOBrU$d`UVd+SQ?DJ2uwd4_G858yT|Q^a=W4!^&UsujQ_55Y zasH=syMmk#;)C9^F3I@l!kA}e(DGoveAqM1`C*x5kzXl0&xY~?R?Mnc`YOv`=(C*b z0N+C|UG=E)qx?}~y9~y5unk*D$1K>(grOb1?8rV~UwHrE5_x36`>ja*f1{7{jk|O+2v{Nlif=O(LNS_)`APtWLswz@jq&0 zH@KENSOo@awpD zVCl7A?Z{&lj@Ob_AGl094}JtDJcFPILa6Of`#@{7J?iDw@k2J)4;g+3y!#d)X8S?Z z|2H{26P^BA%Szw6uiHltp1OOwWcJMHHcne`yk%&LS$j@RmmP6f$MEci*V3KOtl6&1Tff`?m4shV zMPeVNAJGr3v+TYZDZCNC;ZUZ&HuM1L44iELOAF9lcAfC%DIcE`Rb%4WjVx=`yYu{C zJ$6j_j+A)ghRx}&s~@jj)<|)GrF1Bqpj8l}o$JwoJb8QX^jnn~p}ZSZfzjXq$n{nBYicIN-l zyv;oOee;$#(sfG~cGB*<$Fy|%+><)Gu?m~scs*USbI?eD4yr z@Z^inq}v|tUYOr)(yr<70}fSCg$b0RhzX6(?#jf8^Em#b&l4vxc~ZjUhY1RQx|m*U zWg_GWRi418EBL~Do>=8Ks*zSD&;!U{n9HPG^3VA-E6(Un-7*%z_F240{UOxqQ;I40;WY8Dzjhh<9&>8+~-bP>+L_Bul?)v zQSQsLG4B<4BV1lBf5!&X$L{!{*@4oX#!pCdPGzppgDurNJ7rldWYQGtX~j zT~`nEp||~J9h;_Ct7qEAQ-srn1U8jxDwt*KBK4qY-$?Nr31#lsk^6$$mwa}>U|V3L zvirlB8vCXz z9~yE}+}g&<{7ZdZc!CDT2tUw+XK!ue%2r-g(5B`M(hn!K1+5dsC}3slyh9HzLhtuT zu^cSi*W3q@N8;@tDgIHed6~BgzafuM8kiz1BcsTLkQw2PzMf?HHUdlW-)SZ^O=4X$6cTEQ!IZe_#seae}n_J#W4{8Eb zLoXJZm>_iEoS=a%de-^5AgBH5>q9v#50*{CRFVNt1YM4*t%Rc zIb~k@hUX!%Q!hIrml1xZia%c%#|Yvn#+dn<+7G>Ljz!BdwXq5h*r!N+em^8*$-mI1 z?;bb*GpApcPpG4Ato-%&E=n6Vz1qpI_InOf`9Q(z z8!A%Z)oL3S%sN_cj^MnmcW##_KUrp?!ug)4&eifCJjlc;Q%{amt?Wk%1u_#OrD4@7 zPsDUuqiI-=$CiDhYtoU1J1CHgjR}+N{)1OOIlm@-_RIPvpN1cDFJPaESnq+ba*tuD zv+{AhMDrvu*Q@y4`p_MTiEstXirH+y`=^#|^FBg`kAB{7wm}X3Y;|7I7e3l;8$xxA z9M+Em&<6pi;G@FMO3-7sYf)+Vx{s;ebIV7R*Vb16|6AvQm|(~Q955cA%o|y(Gz3G=;K!rcxV2-*L^Lj#>^jo z>2KG05ZtW$dmuxG=cwty($qds^Z0KK<t9_q3s@pbv(CfCRo>hf?TOfzKhF7ig{7W($;M)d^u95MTwfvQ|-q>HC4L&nrv%W$cUQP1z9h!%=)9N^IqtuVvDW7yerehVc`3e(Us4G7lUKFnlch z_WikCV~!vB@`5N1l;5)X&Ghr{^SnU^cJ5AcHv&m%7l}}>AF-2uOewfBs#1Gm2k@nNewd0Fya9U*eUwUXJ zkh^I^|4H1~X;XCDu|96Kb@0iGl|&q?ct`q=@8YoCgWV_XmOirJoStF; z+*>*a`te;{*XRJVuiF}pA~N?LsKL5^nJ>Wil6%-bi*!}1*5wi__h(+>28QI0F`GNTOt zU17UC^Xf~_q(Abgc6(Af*3zZ`>J7i5W^0|wzfcQbM9CA--UnKFOzjz;X3BOeY2lwd+DB5#teY2qhqPw@0Lxe z558Pf>703ArB?*<8b5=(ndM07qd=!$Mu&?3XQwPkS3R^m@veFUZxrDTBD?6`dL~sl z;8kemB{0EKAZPtfddiv!)=tui6{kT9UYTXgI`LAL6nc5HR<_PRbVj;yC5N;Yn9-an zTr8wW_cv%n+6XyP{6|8nVmmy53|KBwN?av6)B^O(E9UN8zo8-8x0QeCSv`Gyj~}&YwJO4cns~CBk#NF5Qj_sQ_ zZb=W{_H>$m-pn>>Ze-=2&fEpJ=~Th zWz-mPK)%;Jac zOwYaid?%+%PPiaVn%KRN&~bPd!p(Z)#tZ8?73shtN$!ww-+P~%k_f)W>!cga4vr*ymSGC5PRnUb#aN8a>~tZitnNYUF@<{rEqg`Bi!8-^pFzA%jX&gDzh zRxj5{n037=Bo%G%)bxGp#Ab-H_nZCgC`L!O{*I# zv*4q@nXdn{e-*Rg(&VoQIlE@tOU26oNX2P~II+S9y?yQDHgo!#Vn+wSM?i0TZHs<8 z_^r1uK?m&+6qBx&v3WVlbs&vk_%d1=XZU`rgD# z#|8Q3376{Sje*HLc__VqncUr^Nfx>Qza##3VNN2Nixg!`>61q6?yv5v_~K{ZQe00L zWx3Tw=XB`*JwFDmsJyK&+fv3rfD`FNGOtMs8O&P58`chjnL^x22#q)bglZVlvdK|CrZ zKFoAz3toi=%@+HMa)Rgd=j!8POMki|zjy;rfR(L-j+my{L{c8i{GmB(@-F#uF@T8| zjY~OsQU#Tc_(j>)K>>W)lkxfpoeEnp(60m=ub^YLYX`J z0og!0@-tiU<e)!l)WE zPgz$D{TebC25)IHZ}+L>Qn8_>Mm;=uukab+kSjB*UwAr!A2i@2SQGV#OaV5#3+#O+ zYKATPK<38H8}sCv_xoRP?AbOAOLGJTE9b34pi*7GbYbnn0qxeHbc(nitg{<()J0$O z*f}Oxc3M;PMs)V8sr@6hPt*;|`@l7rdzR}Li&OR!C(GZ8!N zxQo)1X*yX8uL}rcgP;CVN~63NVD?FuCf-O`!9|j}9f8;)&>0NjEu%AU zy(zYBLAej{N4En;*#bHd8mT|VT@f5jHHKY3{l|xtePuu(1oJXX}qTTeU zhX+0y*{T}tYd!fiTR+4X+lK|dtPSl?{77u>C&+xQ*{%ac{a#Rj*{N#VU}g*uUMO3 z`RoMh@3E&>rsdB)Q8(dTQgG{_p)D#{2%3wbF!5i%djXsuZUG(23o`cT^ z0lY!fGYNEyu1@W+x zt!$H>tCo2(#smxOrOA5if}OM+ebm{UyY%1Zq`A~|3v(d_5YaYJ>YLw z8CKlqcV)a`L^>qQt*H71^Bx%qFG)hi|`uV|I!nK9)* zT;iMmu1wH_or`Wa1({vrHaScD?!lJko_28lcuv%1UMc3CH6!gjfnCspC0c=UW@rgB z@BbVGZ6r7@dbH%yOI>0qrSrZxzm08|#Is?}is)*>u1v5TZ@HJr*39UJ$_7K9R;eA}{w&(;*x115Bx7%NNrx?r*a#v|Z?-| zxaYcb)H$D1(Dni}AIDp{1hgEokE@z}l#2&Mne%|fMC=`1Wjx~aPv~~RzfPMr&};>G zF~+eM{<;Qe?i3h1%sund>8>CAQKt;$%I$+c7P<{+d$!;0Qc7MPd zydNy@prdO6+Ksj!3G4$k&J)iCk(t6jVE?I@*u+ zpbx^ElC}4Zb=^fyYnZHb+Vf_eM;wfrjBte z!XK>vLtS;~+zas7UC-*drrt5Q$({=XTvd0QxLp{BFKB!+nXt?FH2cH@qpHJC-hE=4 zbK*gIMyq$_o)xs!55Jh7?eav3lbf~|P^!@-ldE(dPY^xEz#nk)x!r4N0CZ5D0lr4aFeDz5rqs|}nJH}c}+&r4G$v)Da2 zc=E-k6RS-TPMCdcnz2u3F)fdLivf8&EmWG8Cj;pwWiT{v%Q63$1~49lP4;m+n6MdZPy< zyWO1KNHR7qB>ZzB`!V)2^HBFm(`a)#_L=SGu{l1_MIPBm%mB!MV+fu)8sIJCW7(7m zKnM8nF9Ss>sAQk{Mu&6Jc9LabI6cM}H*QMbt6nG1GhB!6wMUsmk-z%1Csc8=<>$Ea ziK|Md>)S6s1EVuno|}Nca=$g~Oj13hx|F>gv5 zqu9Ppf}K2}hE-xZx^*wSHjWSd^74nV0_}6lcOFPvHhau2IbNV|XXWRT##+EI=@)4g ztpc|3CShk=8=q4|_ip%1jyUDHAN> zP1z`F-rE>e%WU za}A`2)_`wE|4TNO{M)#}oLGPS*^2@+QkBx6^ve=hy+Wh*E4+bxPZdJN>)dL;>%TM`en$SMy)va`P zPz- zBX!!*)AheU<@fp=)<>VgyY!^iWmgQ1N^g;~j+-zT%4pZSH&DgoSy$1ut<_^n zCj4roVuWS$J=;3!tk0weZu*waG&Z(=ORoO2bd=t2&;5UcfMd@8)kL1L4CVU?;f+G? z7bpd~&5=uGi_U)5)$NEK9rlP4vkZ-Z?+zGy3%JeQp0quH40^xV9;x@WsrQ@rbECW}Z3$)*~M$JPf7blrKV?Bu1g2a-YYMK<}^y03(`-S z)hLFPM~O10gqD~qT+uT_=VN1s%-c7ue$cA{DLtW&Yc057Q<|zeE5{L4ueG3olqhYCtDea^JJ97p~EP8CmsJSD`_@drgF_M$H8-GbI3 zN8WZQc|mJH57?(jzu>>xFKiDt@0}1g<^1_)rhTVQi>fj4#Mvvatxq>U=;vrlcC!yW zG#ztD_x|ZI6{N&#^!=RMt1k&l%>iJQ5b(Y%Qu&QK_=xtijsrG4F~?EkERgYY``hlc z)|-;Xe5CLI)I50^u{m$}2(`YJqi=_I5OkCu0D%o~<4~QtL0m=rqOUic4&wCDjt;cl zvZ?cP8uZT7>Fi^h(>hPCKj32-3Vu(>#H)pee)jj`mAqQg;>3yLV!l5kXH|IXIukQu zkN?(xT-s&VsrjD5wjeRgthRePc`d zoBJ87d$9N9N%@{d_C`KwRa4|Y$(*oe9X9@ul@vWpYtL5F7bJK59$EWxzb`&>lMTyS zWwW~d&;2k`^Z3I@*?DY@Dp}in=KsBz&b8Q^zNL>~G1D9a&<2bD;UT>hXiF!T^S-1n zYgZqR>Nt#1$aNoD_3J~nSV7u~lidS_EgY^lVf6cIb3AfoUa60}@X$7$nR~Ba-mr2| zDYOFx0np6n8Z?vo#-|(xvY{70*`_6@&mEQ1`-gq=(Bdac{`4LJ<#1U&FK;H%N;(8`vqkMM$ym}k%jTH(iBm6$ zn>i5vhj(0?w&+a@5w_Q|R-a`P_dWKZqAjC&SIK%)^za^AO#A6Pe%Qw{l(8r3tuw_I z$0z%9KJ-yx3w!Q1HJvl>v{oJ@^RowUAM*ZvK5(|HZe40yOj}+$Pre-39!rRsjH*!E z`K06a3;gq&G{iu!zxU?!`j*Xg)fY)YyVg*O>uK#{+Ou@`mF-X??F0!w>|3J7i>3;dUVL3^ROJL)7l^3$VVAH=)uf>bENB>-JLPxz%*;m zsp>}#uBOk)vFTcwO6&z+?svtW^*Jr+ijNa~^S+1EdVO>(!UczNpCYRb1@cL&1g2AQ zQq}6kisg-xUFJ4` zzo^k?!aa>g>TMW6iJ9W+1Z_$�sxZoUV_8MOeOMO?x{1<{)*gDOrUbAoF|Ff36~~VE{8lzzJ-bD zm6aY&J18A+*j~Ae@N=GcHk6tXYanrH-tpIuMjzYBryok)>fwRh8inofjp`SVD0bK; z&e+e6wLksHlhVP{SU?;Mux~f%tzXwIUD(NR|2_9jryY4>M>kesonEoLaM|KI8*Z3> zs8K4qI{TJ3KnDUf&)8rc)MH0Yv3Wd+;}X1mv0FBE{+1&>Q~sS!@GE*1WW1M8_JKkD zwi4bCe-ZEb*K#xW{ubc(q{ezhZMkA=9eJ3bAr`MX|m0&RglH_bw{N-Y+YI zOL}65Bg-bYoIOT#KrSv&+R_khZe#24F%R|riF$YgK8DC!w@s9T&-(>+<=tRW4-dvh za|1=4eXKJjfE~oq2fOv~#^H46NgwT&-O=hiu)C8v-QP&pIm>!r;op>q;`kR<8+CLqBQupfA+9w}QPdzl`fTZ;A4tC$`fHdb6E;e@H za7jGLTq;&*9PrS4(5v+^jd}Z8uBi)0Z-{)>H3N+8J81tFAZ88lHDarE$j5(d?VqPq z?gi*)ci$L}x_>~;;|ouIP8a1`U#E*{Y>R249sV=I4t!?b?9#^+%Dys}yQ%w}ywwZ! zCrW?JGFg=HE5FEH;H$-7U;1E2x!Z&Z`Rm2yWV8%nd8JC5@5z>Ft5*(%ZyG6|#Wp)g zTgt|g)2dh2ryEubnY^r!SvPRB)pqr38v9@5Yrk4n)_wD~9va~fBy=u&-}rUi_Q*!O z3m_entxX#`hud_n9zNn->Dvo z{{c(7ap|~=4vSgjc)91g6LH!T~pd*yMj&<*tpRG8x6QYBJW0w zIyY);tUAE$6ZQ5-XFY+B-a4r30bkqQQCzQ>&i=J-t@7#IzxVR9%hNND-Khxn>j)L8 z3eMiDm3Y1FqC~gHf`$_D**EB6n{k5YsY*WR3h;c@AmR8+e`iP;%AY5isfqiXX|1zt zYCkBtekgUTuX)Q*fb=_H+gpH`?g#D<*v!||earpMl)li9x4-)nvU#V8{m6MxF8aD( z+800bk=lQz?A*fD!ifj%r)RWy-bH}}@<;#5e^I9-I8=b&aPYkcA5YJ}*11zIoujW7 zPu!W4j&e%O$tS=O6Skd9al)E4MOS=^Q}%7_q$zSre{qja{1LkFlSZVXHTLUsqt(T{ zD57o%%ce~=^4Hz}&I0tA$6rd1-T8D>jhf%G`OSRWU`(*<)LrzUt|Ovqp!~-5o73Ib zEU)uq#f~?R0C6X<{Y)BKAdHSWT`QZ`z+@!$2s5a#Ih!duP0aKbjD2TJb*fAs`uS7o z)mNHZbedj8Rylfda3Dw{TaJ@v(vv6h#86h2EX%O{PMUx2thDF;yA@^g`{e zsZ)*w-d1r>P2q&ZITUI6xW9;0ZbY|Fn{PajA9 z6TJ z2K6i3jfKq{8TqiH> zuLBQHhs-~>&SHTShYRBX=RuDRGH)6OOK-4kmPewLZ##TnXa<_TwhyCH2Tc0L)&QS~ z_($11F&%m2h}bN{yI;iq0`Gk63$;F`v5vglFP#RPbuo?e!uDAqG1J25jy@t^eWUNh zRV;Y@mjE+WsvK~(g`DKw`jh!vYGx)q5tv+^amaycFsiSHDZGEda#$di5rT^9Xe!AKR8HoC#DZk`lz=zc=_Dc)b zYc*?If$hKj2ajr!@p>oe*`J%2CQjad?|r&e&w{=2I^ooT?ips!G*j%Ho~U=+&)r8a zm|zZ}26M~7XGPH!JuxNJWD6Pogo`=E_TVk8GTnRQ$|j;5I|POjhyXcun&8UuqLz2; z*j84aik^jsv#MvOc%|bbw`(Fy)-5Uy^MtCbW)+)swY`~iiO3)9#qUl#@03pa#E=Ow zv#tw;^>`3t(Y$J#o6a)L2-az>hXR(1G*q^}r2`pDu6^PRy^SZl7og4ly3i0;m z@;-e=XCY-Eby%#G48D#V;nEW>9Qc=!uhV2}gFYTw!3~mQ=Ty{XfSPw3ShkI0Qsh0} zpf}b!{H===J|cePVIT7zvpy+X3rv7Me$vIQJV>VBd+|@4y&&zU+dRGTrLy$X^5^nP z&#f9Vx2BsEPx1O+_QXT!iRYhmQq#M{u-r+4n8@~i)YbAna9dgr1)^N@P796xF`aF; zW9tX4UB}Os@8^h)1JU~db;bJ0Y_I`)jczmtt?kxBV~|HV^7bLvtv7Yv_Ngg;?j)v8 z}A^W*_8DqF6|Tat*>l3bc+w~WUpcAwO_4O zW0H@*^tbz^=jcr(;$UY!Qu~-IF%LD2WV6o>+HC>%!AQ+VPxrIoDDjRB zT4U@#*pO>q`c2(;>>KlQUeOocdix? z^6?vI{pHpv@#b-$G0PRZ9P~d&?}p!_S_Qgq;i~*H5XTZVDDfoPOm>JDCOeeSM3WR0 zT6_ydKu&tj?eMhwUXxL`@W|~?r8hM3D&@#2=Ux02twoP6qTGNSuelN>&T}hCCNCzD ztVCA8SVCvXEz?j>u1p=DHH~TDZI61XJ9#4;?1v2h3kQl9>V2s2bFuPs4mxb0Zv!l` zq*pE9aZM~HP0%wy9~M}S-LlF>x5f$jI_r_6&iXpwWANJ+bKTR(`^LQ?I5I+abIVl}bm7#o37akxi6&gCYd9X9}V2MAvh|VKYxwLsl9-m=B zZ8V~N*n}T$gZt)il-PER*A!bs;@^m#{$h&WzV1g*>&;s>wQtM^Tl6;%d24u4WA*)TVzqTd6);_OHxtu)FQaVwd(_)X7e0%k1Pvo%4 zCHs&LRzG}D?BtIPd3gyh#W+jI)`K1^-~sC+r3IsG-Yr{G@<9hgThaXD|Nc8|LT7pR zGuvSm>%LVFrWMaU(P^#o^a}Ys^_Jp6!p#rdmR?xjT`g1oHw)b%*@rpA8k$2)y!Uam z5v_p79{WbwV@(s;t*`a;dHb7Ui-_JbWjO7SJ1jnNO7}T?`3?l0Ui)0S{jqpwrEXn5 z_=q%XzZvR(TpTILn?&>5MAa#Klc{p?#1pGBDymw!CDp8Li;hdrzO-h@hq~nXpwL&( z+{Tf*ZKICu9!a*%ZEk9PKN|C_%ftr6bVc(ne>w2HpTLIHP;Z=NHy(kB&W&S=&OwXL zJQU3S{PHP*1ce}6!-bDDyb1TYZ zOUDET**wr@9=SPA^b9?nbCIo_1mbIcCw*=B*X)rgo$B77eyyV$t8n}!zg1^JoN+ZW z4s`GgmQX4g2TtVZTQ;KjMc!%A0nt`8<=1-qMB5I&e+v-vfN4LzwjoC(#w|PEw?^Hc zpvF24dh)g1{R?Ve_-Hq8J2EG4=L=gnOkWQ^N#D}SC$Z{7W#T2O&?g&PFp6{L*oS3*pRGfb125effH^Gukx?*d5@U>3- zH}sanE%|J>l;8ZaqD8VA>ONw*-83bzjzVY_!!JhF zuspZV-TZglA=!h8lg6hrt~jcv7%<1AWbr>dT<7|_6uxR3+nepaRM^@6qt2L-V~I?L z#Ev|(E~oIZyg0VE5-K?7pEEN}p3+?fTKdx!iB-N5Q#o41GYL{RD9UD1)WnW-Wg6PHr z%a2IxzY4guDjxf38IKon9I?(6zjuRG@$J6LdjzA77cE~qe^BJ6j{B=VUygO+je{r8 z=JBAxK#m(_b=B?N#dAe&;zbyPh9BP9Ib4uE$Nojbl;izG^wmfbbRHv+;TbEet9i>c zbvpDlx{RpDpYiBiVGB%vj+lOUD-V*n>h_;oAu@s zzW~&Hl&y!lZ<%)=Gj;4^kNwQ)Ok@ACA2g!-ozvJB)0(#pith8m&KcYn=O39TP-Rk8 z`c8RPtAtXkS3(CD{VsPkY096BHOS%5sueBLpF8W|*0VVe4YCF*wGhYe-k)W5La~z| z`{1iTVC}}&(~lonnfdVZg7~G4OdszpgzC z1d_dpD@~sAU)Cebj0?8LH6QoIPtatfUfJS#wTEweN?Dg!dFO52BB)w`Wu67o#Eq0p zj53&~#toJjdnfHOc|tn*(%Fq;A-oLaiDl0zPdW3S(@-XPy6j@elP|JuZ1yKf;pN}( zlXu@GEx7PdqzKa`oG2u2LBdd<@~dBXI^DaXd$Hr>ImZw574lT^ZoM&Y<(k!XZF$LpJ7f-}#sS{weTz)g*OVmI zVXu7{Kdd)ziXI;Hm=kU2JZ_;k|7r2u7Td!1_^uh5zw+9;w5anDVLdCl&p~PKjM?g! zWfiNeWO0Fn+XBJTpP_lB%YIp3E~GHMD*wfI-Np@i+hAu6f#RPQt`M>hXk$~cS&kGQ zzz=|q2EA|W>-IOrZrgB_^7;Y(?+(`g{*>S2Nb^B}ep^kwymhrbwy0YX+7I!y-e+4; zCgMTc+#{eymlyLQ59{q0^TCeZ6yE;m77Kj_BvxT~_ciIYmzn)Aq82PetN1>DIg$Nx z8Z=EhQS-cUWBvudn%({L_FHeJ`}54snlVz<CUtiWz59T z>j$1aMce`>TzuG&r6697n3uBLEpv^^S&?TxI)Q59tI;d3Hr@z8{xcT)>7SaLCa7&A zEW72&JnJH_e1^tsOW{a5&f41i&>KUZJsnE@c;W!cVa#EnYwr4z21WcmbKV-5k7g^;O~jMs(OW< z%A?R5V!4eDmG=)mM)Z;Qu@cI`)HWzOsP)K3bfCu`c-u^|S#N(Rd2+6FnXrWe_ufC9 zqM7RU03M{PzbIWE)KaDCcj~ktyfN!qn`)JPAgVt9Lak(p)qCKZhd!@gvPkdWXPEB5 zB1!Vhhyk8`1C3yxNZpo^x-Fy5yxTkGVV<(!0qabQw*2%woTn*1@KJUUd`XnQXv>0mVBxpY>#x4j(TzozbL!=3_q}=HKnE_C zG)Y(waLCjSG#`DevrnY8OWA}>KF%M?jqwa@Eqp-_YK_-ZFrIADwaTQcbTj&1`RfI1>02d>)<(2N zP)7B#7UuKx#p11j8@d_CG!pPvz+4{uq_Te9=FRDckFKb*W`=TCrHnOoAL&O)7JaL1 z+J091)oGk}?HBWNnm>q2wtwNbyA!C|e*eN%>4hiPM|C#zCW+mOl--yKo59_Jomy45_oOiveiTG+vURb^G!K~ z5c?Q_H%1-f27oQ{_C;s(_<;=1*kay3HO0?9P7~8v_euFp5w^gCTDkAHn!zv|H*ZSU zEm_#fY@aE6C*Jj6max2()uZ&&?s8UdsjDn~(T{sNap}!B-%K~$cXOv43G0OS3EjS? zvE95cqRzbi?NiGm`C*Ixk(Vjkpg?W2E*l$r=G8X%jtV#sy(dsr$`v%F4sip`52v@? z)QxQFAV)S2b#R*!_~uP(9eU>jZ*-?|nNAn;!A2dS*rN}ATycI}*uo==zn3;^otF{U ze|hvRBW>%Kb&~MOkOyENMH6iX+p=H|-+PZVYfmmLtI?WPvS;}j?wFqSO855n{ z@r7+44b*)j%J4OgCePt~V&3*acZ0BVrgq;=tI~^4)pM7Ugtn?DVLOlCDV?SFyKhgZ zSI$%xdy~2Y@PMh7?Yj2|F?ryP7HuOmE!&$!<@EWuT~@6WdpZ)DFYzW>Oy*@f<#OEv zt9PYGxbr8EE1rlj;;PW{FqPzHLe)-+Gy%hhtI_R5t4T)&Ow52Y^rn$|g=yI>tDF3? zOW_rD?4P`vB{e)hWx~~zkKE|6!&6ov@8s3U*r!VF$*Wn2Y%s`b7OfapwalZppQ&|C z%lD0KZ2e$}Ui)E)u3y6UQrFapi)s16y!Q~xHA`+xOu8bJm>5?sz%*~f@M>13noQ9@d#lrB0#$)JD%&k!ec}-(yh&+DeiOz^Fu4*Ain|as8c|;i>WNZ=1_b-Gky!poKdL^Mg zw&i%t@12r4kF1JOQ&Kd?`PMh*QU+mcWk4oPc22WT`MBPr%d-PL;8sG)92Cqu=L>pz zx^oeYtUoRsFC^ZmjR!-^^*|V9{I><=KExm05cnVtltU`{9w~Hy9H|ddi)c3w?T0be z;qBwT;XdF#Vt@A~+xyYj7wvCqf6LMLbE*fE{}y;qYZraws!XbM8{r6(t=yNCG6$l5 zayOKO93jgx{;3|+;-gv-E;w{X6gzy^lLjlAoJD0s7geuq*^++v$cj#W5?&GB5tMaR zgQ>&Tezn|jmSDT7?X`@)pU(cK|F;<^gZ-kv?LGv8o;G;!*40C9Ej;m(*=eupi@W#_ z3Z8moZCbwMxjMUhr5K`=jt@hf6`gn6b5c5FUULl2$7mX*d(6(0F(zbct%C3f`bTff zcKyh($3BzOyfY4tMWr<>U(t%w+IEt|l2e&z(SBjFBYp0q*rPu4O)L^~lK0wwcYSGi zW~ZFBPj66JFXxygfBg%bmpJk@%I1ealpSt)U*^M4=1V&C`_I}l9d_ISow6kG(AFWs zeq%^!jNUloam3?HZAY$c_K}GeAA;xsY#(&^O;xbdfFD2kqBL>*#DUdYdn;|y`NL0_ zF6!jDzdp`%>JcY&bYm6Pyz)Z2?UB3dY#xb6Q zwt2!;9_H#zy4;ToEfcQm?zyS1@MbA|5zTFCAItqHZD>9I@NQS@qb~ZJH;w+o@h+pb zqaPD+fcc<7zm1H#*l_Uu(1DMQptkG)ul3mNW9oFAsF{aawyySfeXxVb)Ze_R{r*9= zXM`=V(zIpsMjgg=Y%sRghCvTBR6}p(^I+_8A=~gPgYF2 zVvO8`T_&fa&+D$9DNF8?IZNn4uT+YP^>oqZfzP^{I$g{M9X?JkLwE!_| z=*L^<_>R;(KA>YA-tiBOh>vCZ)NqJTwDn6<`|#9pC-C>0yi5An5wr5it0!4HcP+9) z$g5fOFZxst$jmDz`cwYHzmYPK}Cw7dw-??F>`OLO<#yGhkC4H5DQ=MZgC zCi@x2AR4?J56AnRjMY_guR?z4H8PL&{wEt?q4w zOis@E{PtbZy1CDUPZhhqshsw2yjRu)<~N*~b5lO$_sX&cTRWl(D&T-L{9`{5;RejP3FCAKj(+cvDg&;9|>FFqhq`R zaC^YQ`)^GzRgZ|AJ@2%9`zY zEo|PnK0S2HcXa6Rv7;PCkc`>ccGEV8UQ&+z`S_o+8kgpt`RO!KS1}^od+lGSLs6x{ z0AETTKe2OeLB_u2BMyST8cFS<9y!{)*`uYDSNo#-fWo-H_&@$2rFVTVz;@WGJ_izF zHG<`S6dmXoH*Xp0K2!6^M)wo@ZjHu%hi~l9mamY$v()mq!&m z_ip^WR!ZL1D)cL|PqqV9QRW}U9C?>}{Q`rrDud99H$RW?Q?LsO*85T#lNIi z9WJ}&i8{N-q*x)W+lop4dBPSZ?KUCJI(EP2*j(a>_{s64pK{#RPHHAlBrL~q+0Zrm zLW`|UqdmLd%stZK)hf`cWzVMPu~ z*&}~Fx!o`^&nx@ zK+#3n`r)X38I$aT?iAs%!WL++L4SaZ%<11hxP!aNd%&vZ8AqM0eOD$`ouX9L*1<;^y>(Gv^HFwQrphAc z58W=nLFR)7-uRKI8!b|J2Q_>h*yxPd@HJYF)VzJ5=vvqsi3lOV7q>kU= z^tKb*pyS+n%eJ|HSns}PYI~H;LnGqL5|4Gn)UOL$n7Yfv^r@p}=UWAN5e6Y3q9AN0 zRXM^6U)bEUlpf#w+X=9uE7`NU^mog+o23WSCQnMoAFyvvA#PAn_anXAef?&x*>vD0 zNkfz!kNyJt2QaPqY^;8wZ)JMdA}jq&URt>QhmWQWFO`d z(XO|7sYDm9>x!*(wz$rfts_sHp2q8Iz{FIB99+agi7O1|J*mpiXj!jBIo9weV6#4x zh98&Cyh1Bm5menJ*DcSJ7-lp~W}pzu`DBR+P-CZ4wv(Q$FLI8GoctZN_0h4E%s-&F zM3#w^{F^*r+Pu&;>5E^f59N}g*fQJl(x%h&-u?>c)0YU<0vVuS94FvGd~2KiO=CLi zB2^YS4|ImeXPx%l5S^XhV@f^#T`ZGT6K)$EH1HCP7ppG`Z;xBxn8>~H=8!|$_1Yf7 z6(EQV=1XAp;>5DkKy$c@GHyhBw3+uf1Vv|Rd!)8O$roz;4e>27x?Fb3#jQL@=C()g zPEW1v+%%O=JMzRddBUV-^3+VUJh5sgRlNxq{j=OQ$b_poPM`}DttVf6CO!OQ=a9{2 z+$UTsbi3Bn?G^Q;LmzeU(bs&W!}-+tAOn&1^RYepa*T}noYB^PIJmr@;0o82I-wg0 zy(1!ZqdUMk(TX}`2f9zfq72{9C;FO)l6RfnvZ?)IJ-uB@p7Y?on6D!kUVCLtdhGTe zSkz%c$IXbcv`2Iw+eP`ye%Kp_GH|0uw^N#T?ibR|)qTKAuK6>yws%9X*muh<_~>uy zmKVmi4zRKX#DUKP95nLO-hS}5n_34QEy{HVjMM_d97G%#{itIZ>KLzi`$W6@2Re{` zg3kTLvUT=HZ(B{7T!Hq5UM}RScMG4HHz(~raYA0DqHp90R;Gog-+MT#RxEukt-AmDXimqRdq~><(9U9I$39kpk5t`#&Es`8+$ZyZ z(A#4>JNp6)&9xwVcNKlA%`0wDL2z`wT8` zN!;+#>-q3rp0#KuH%y>Z4ilafUxZ;jYW_H}!mpL}As($R%{->ltYv?o7@4DV4I?|98 zlL_M|=yteHH+a`&IN|GCHm7UuxzVC2&!XNfWP1r!?+17whi^kY-eiM*kfYr^dX7W( zk7J5;_(q?R(!}&mH%sZy5w>vhoZ|-m0WxP^e^Gj8ouBcF|GZM=NmgF9^6FKYXqEok zybwcvZGUc7xR9d{w~{(0T(;=V4bQgIP5?XrjfmZTrsyK}(>o8Rw~zJ1QDxnh58n?+ zJ7(bE^PYls%xEfJ`wZ7mZrHdZYCZ-!JSS=Rn2&aB{jm12Y+q#Zbl#@c4_7DjY#a11 zgiO3zxaaCWQy|OK48W}crqTJxV%a|BSc_GdeG8ACRj1h}f6T6#u6^o}w0ha1j&3Z% z)O`+4hn;k3oduUCj}kT&%QOyv=sn3ok8FEPvo&7Ha$0B&r0qa6Fm-evW??IR)P2)^ zlBObl0oxqcj%{T31ylU3kNpKZvi+b@7iIh++GZ+q`VD>zlg5uvpE-KAE(SCcErm&W z5k`^eAHH4ClP+|0A^ql{YWWyRQof;YT75$kt_V{mPE4mx?>zS$%}tKO2WxbW#i76U z@h8$Nuk*Zd2Np^iqHH|8e-Ik+)R+jg3V|rKd~q0 zgxQ==u+IMMyzK2C%nvSpN@>Zjmp`r2=GT;+PXih~l_{qMQ27dT8uMz0J}X<3<;7Bf ztQm6?4a3)CO7tbY+ zQwLfWFWcC#>D4;V8>MiEq|J$%e&GGlL{-~giEI#kKT6)U4Zo;|w+`xY4Bki{_&MG0 zh$rS+xLnV*Z4bEriHFjoPcQFe&1>DK?m8vh63C>ARjYtZq`bP-Q2HlWR<)RrHGNiF z_sXm5(w&bj>69a3rSJ(M+ix%r?T54(dQinM9XwF$tS{Cv{&sfu z81s6!$-P1Ss5jnuTaMHX>qfL(cL4Mb_IMI!-u^Lt^!Ldex|qg3PH)<;KG>*xEW`EU z*1^9Owy^$%)oJCD>sxsU(;Uxz6v_5HAWLIgV=RL?dQ9WeG5VPCxN6mDi6(V}gcB~+ z@uaCo$d{{Z)XIaSsdc8-L;LBh$JP&}?&vK;0n+S%(YF9`Yv{+vK55>4z_O|1-!jzw z1{yK;HTS`&b6)O8G402d*GyL$`ssPI)84w3&$kJdZ)hnP=r=-7u(B-wq9<6wi7tE- zJ()DZclE0E=U*$gx4F!7W=&5MtJiPaX4>YZ%-|`r13@fy9e_0WUHS0i9XAQD3vU*N z6#00AA8ibnnn&)3(GK4aoUWhEy3zR9H&Rw7|0pUpZ`_iu`RYCC@w=am>U5Oewm1EV zA{Dp_5DSI=o>HzN%pCrs%FX}M^xt>^gyfAed(|&qo)8>s|1gAq++oIol#w%v1 z$9cjtVxecvJ0%^s*Zxs8CcZ>MO5eZznod3xf7!{M(C^sajllC?KfIG`869Bde#paG z(kWmvKqETBF~ZljsPj04kM&R^dVCmz%-d!f#~|m2jn48ZVGDe3?C2Rsw(=mE`jz+u z$2muzRwh(Tr23$GWbUt?M7i(A+XRbEe)@yIY1yqBkJ!u(ERq!4xgSr0s7I!q(77#Z z+5X#Vce;L*a%;d3Fr{5uzL-Y&yZ-m5{2oV__Z<4v_?>So`t*NKsl69~I(e&1twXNS ztseE(nL7G?mWIC8A=?f$Z(rwC%cQeyG(5i4)#Y1`RPB-Pg%T-n&yW9DsYo^qTO_*&BFL}zegVW4o&$nx)&pdi(dhT(S=pXr!Z1@?5ZQ{NL@3^%d8Hm*W=xU682Algv^s`Uw zGqzg?b-%UE^tWVdf!WWO^nUj|vr>QHZ5Z?s&^m&o@N^k$1Rj6Lg?EPN42(WsTk@dA zOeg9+@@E}#P)9dfA(lxR>Js6G6;GzM8<}KvV3DNmw69~%ZH&hIrEBd;wcT<}oubww z1E!JRnlAeOl?3kBYvJyzA8&ax=Y46u+3iB__|aqOwRJ;Q3vWMU!?U>i(36QgjY&2q?VCnY|ER}u!;qI)rU zqlafZoIo(FAm_8FJR2%4=I;R zc9{`X8WNOpsSi3;9tzRVP7}tZQ$9W?Y*A*JTAnNP7!fHB9vh;b){e6IygOv8DfyrS zqOE8?c)$NKR*B=tCr%x5i%E2OSNs#tKdINwb&q$a&N(4XpLXE8%5S(tJP`K6y0x|W z7Af2z36YT?hmahgL>16PR|zl4*ceBm-aORkJe+4tW1oK1>As@8b`iF~>l(KQtkV8j zx}y7<*0~4IN&D@wPo8b%*9)?2mHT9wf#nZmNv>Z#*z1>NMwVq&n&So!(f#b9+v`nW z1%blZ?g(x-bmmRb(RRqD=G~^XY`=bX=Q|RmOrZ8D+5!D?fcc;S8w_~EBX6A>7ugNG zHA;tYr!XCELKRQi@I`k1PKyj*V?V!Ir({Fwg3mL;D=`{geDb06^uxF5P&xF^J{_7z zx`()WkL^b{U|=@YieKg!=X&A zt>!I50rJ=Z!?b`7ru46V5cU0dbVlMj^6m@J*q5SyI39oZS@*lTZ(4tmd^^_+FF*3| zwEynA=E+t5XpuUGCs#SJ8oBfr^&v$^)00|Wc{V(ppg5gxMz^m}VZmZ&VGG=ia+pzuP^TQ~^w6Ine{WooC7dM zY}5WDzxNcHGLKWC9QRr{arT0A*#0wHc~H!iw_TlMAm+C{CUf;`^Hd(kdmraqJW&I< z-E$Nb6_MKk+n{lrspa8p(a(9=H_GOrj7dzceqGoC6Q0wLJgJoj$$anUKOVAw&N=$@ zJb_{|MPH=f8KYeKX>*UDny7bdz`rbM;X@z&`6G7@dC$MLW1*032HXyjw=PoJu$HZh z6yAt`&Br`O;$wZ_BajX-rM+6dm`4B0M;G1~pwiXBh>dV3iFLAmv>R#v`oKw^=s5Tq zT%KvIk7b(gN6~@gW8Qww)6{x=2}J7SH2C~i;b(+xJ-M&%;G;UX1mLH_G(eJ+W4dC% zg=b^)!|}cs9u$LD!MzVUA{}(ZDe0LFGz)-qVkW*}Q9h`rOmg{)Zix z)~r|>^EgYwcZ6$%y$bbswjDi`PQ)Ne<>9S&n{_v;*yYyd3vVCu&>=uPj-@jV$?%sz z(Wjq!t43;5Y>sREEt`+~933(c>DGLre@ zSFTQ}b1y(i$^>A8uvD;6_Q7yqn<@Pa-bgz0_CW{K`Y8AFEvZU6^CMC6U^}2(0s2?M zzOTQSrMEQPzwil}w-2n;y>TzE={!eEK{pA#dPqHejN;vVGlebiUi7ukY)CIXt6d|{ z90O48*SxX?HLukucDFAENLjy?ZSPE_((L|$xXp0p_ooe7j6Vhn5E&cU|hN7iraXD8Aqf2zdK zE?OBp{-VRu&#!ux^-^Wul^ z>|{G*pMw&}Di)CYrK?Aco2c;^Wfk5#jbqZaQ|VN>ld@qtUdXoa{>A$30LMiK2iX9o zMe51uYb09`z4@9tjrI05je7e=s;oP8vJL&f;eDHMu_b*@=GfS}^VD*`MsPZOn{df) zQI7%}JAi{I5PhReI?Lvr7c}b3<8PaFrr09Ha;Xn~KCm-_TqkVTl%INNah|X+aTykJ z@HfT53p6I?0gF+t^bP67XIu6emyZ3&uXiR~*q2`W)pWsc{GZux5I+k(`jvFuAODXr zD~>E2{r^fxg=hm$s%qKo!bX|LAK7TVW%8)`vf(l`HmLog4gC%XEzqZ*+8X;3I=3gh z_YpepAKOjQ;|F+uL2ZwEc))(}(GS^hzL-vD`PiIU>5x6kLoCi9pWHSnN|)h;mjXQ0 zh;L-v3g1bPD|5>ePp1c-@7~^W+KdC!p885MR|H1`rTpTV%h8Yj&{9``!R@M{D_UD* zS|r42qyn*xgzSrv?4z+Mi%uSvq1ytK#rC`^{8Qn75DtOs|N7n`y6vo>t#1^%elgDf zNnQ4uRj(xQg96+JIq%FF%+J)gO}ETjB84cEpgh4sCj~p8-J6m)Idd#x0v23&Xj=H~ zWoi9dE&>#IL?1kxd&Z13Y4Z3y!1`7Wwx|Nd}_oeS5LGC>rH8+_w3>wWvN1k3qKVr zHgCoedGZr=gW!Lt4>a*0RfJD&huahD2aZ%8`(UAv?KPkS-1e5y!CS6*PnhB{58iTZ zw+(%?MSq#v@eI9zSo|#uP`)O8!LesY)xh}g-*L^5{d3MSXXLRc_dl8>|Cj!l3zl5M zNT#ZfvmravM@4gbPAmn>^wy0Rs`va$O+Vyr1OP|xtDL;iMc(a)t{-n5wC1B+r|&1n zawD&-GgyZJpF!K)^44t&l1C_XiMXT%*4b@1*ab%)s&*?CaD^R&)oyKXMu zG8A8@wchFeKiN94R9_kH2ffq5Vs%7E-_m)o$eWWlj>*|^TsmO(Non6%-Pd+KcKZ+0 zhP6+}EXKqicEZJJkNs!Wd0Z$3XT8+KgI}p+k*zc3KvmnZ*>0PuOs#+9u@4Jsn-T59 zCj2EgaG(MDY^Gz^CdY8}g|{9GSckkdjQt9p+efT}*7(n*Yk?j5kB^_110@HHb}_45 zIZ*WDK2f?w)DuAT7ko-E*5}Pulj539gpKy_+XD+j)&a32;?bsT-hOm8^9vq`D_I~KRumRDhXv(kk_KCI~{4g!hr@s=j05tOG9Q#nf_Na%C z=sps4@WbhAKVG=fx##x6!wyMDOxr6@vbrz3a39FMLBUsg5?ur?|20w65E%+*`MxzT zrrVzEo&y|r;C|_VDZ8o~!$Xse$}lu#H=^~W4vi$z=VMy5%R=W;B}a_=#WHQ?ePMe) zYCmj|@8=tJE~lRkTR*6M@j(Z?BwPja0f1|pI?x_~>)l<}EVd8i@5IS^=e%mo$eT~3 z6jy~0TLbGYYxs7g&U$_dkjF_Mo1J!@$|1c6y!V|KanM3};slj%L3H9HW?M<0%i;%J z&OrKx=T^l&*TpWEN3YxT#u9XK9EoFyc~gAj_<57?s;*?AJ?T_fP&3vB~E%LZtNShLTU;tZ_V zP07Q4k&>qKo?{+jy!ZOQ)FEc;$L)R|<#>-pr)|n3%h_>UI%NKNY0v!+rA{q8c=LDC zrVX6i3{`0d9|EWTqx@ptEdh+i-)W{h&2Um35}JBlm@7;OcAJp;W;3 z@2_qe$8nV5-4CDzav|dO`9N>(`0H}*2$ws$E7=;T9a0<;}NV5 z+%Cx{_-;c}J6eVUkw#nO-JV<1MLRrTTBP#ZPiNoS)=&PFoZpBUz7ISXzE%m{HjzFr z{Tks18s?rhJx$$b*AkZmCoP?^GS!2K2`6|Kd5f!1yUR?WUOGEfYPfXHE+9R{GxBo z+YSYyY`$2E&HQ^s|FsI57z<-X%g3>(f498$O1>TK>bq~~WUND)4;K zZI7^c+=0g=t!gyh4~RK@8akXD`zij&fccn5KVSP{GsZOV^|_q$KUK^r2nJqEF>T7; z76(Sl*PU7tJnuqWb9bzLakLyWelKeDjS>Y1vKROebId z&s*JCn4QN@O!F`Na=Psw{;!zLR0-z`uMx6AY(Eua5OiBa-a7l?yHmRTyZ@_}E_^Ql z`GDB^q10Iq#V-Q>PFvfe-wr;s0DTv+nVh=)f=9bwt&8o9&S|_~E!*GJdZ>BZ$P+L< zTKq*>)BK#N*Q_7C-##Vgv!h*miJC5>Op2{1HkrcCPK@C2<^MOf#l; zkU$_LiV!M@COQ&AUHHwy~7ZlM75E9VEkGR-Z7qQ~!fE*!adYW#5{rW>;{iGMw_1!n2Rg;thHJMNS9HsaB` z)iYbB^7Z6Z559oK(-o%tcMJ4UlA!$v=(IgtddczcZ7|nb6QDz}*hx%VtHS|%W{b2f z7tY+j=JT8~VRAU*=#xFv75UDU_lMUu*MEfN>dXk6=etQieelM(Kq%7WI7e4#tK;k! zyGiyz?iA<)1kM4yj6WD$I9z&}o_vLyYm6ry4%Mz`0d)HVWTXl4rRUAt>OaWwhC{lE zjv49ggY1-wm*U3Z9jn%M+3rH3lA$zjnWph?*ru(Un{M%BLHs;%GWiUsN0F}|$*N1$ zH#l-r@HWXxNB$))I~?-5G2;%YE)@KP=tMRwuR z6<4?hsPgih`os_NxZ;+$e3X~`!2cR&QM{$^mhUGu9$#f?=NcrH#r)GT86?S3kB&pa zk*8c9Mi*Z>Tyg6U!mb^yhn%al_EBI_H`0CuD~O4a36PQyB-1yANrw9%d&%W5+@i&_ z#D&8f$?B$~tuex{D9D(FZ1)Kz34@pY8(+n1;`nQviTiXUd%r)iIJI{Bos*Amzj|d( z`TRDXH*&fd|12i0P6AaTxf^&)UE}W_TG_Ukx@7K5i(RLtqngrJFZ1cQ%9h8eCHKh2 zSK^|so^t$x?rVlo7fMntU=Vz7L2V~HE510kjTU%Tid$3& zJfj8&+w64kGGHfgCgKBr!g&FQw;_}EHVf=8qHh#TCFpt{^+YT?~sQ~F8i%FvxC?<36meU4xV($k=4EIsP+`lyimV zg(mKOW!JDQxg}jcOk^D#D#v0%2WI<+mh2dx^e601gWY_aU9R=OZE2oH4j&ONJ?R1u z^;_ly{r}7m>I+|m^go6o}ES)%=f|(-C zvMn$Es%Y_?yqZf4d z;}5jGaKl_~FWgX7KVB9luDVvei({9~*LjoOV@kItPI*ege?W+X$$mxF=uJE8F;yPr zZjyz0uEqjuY7k*?8nle-<2Tu4_vv^{aV+U7ZDdiCFW9vf*2)R~#GU^fwr^q1H_{ne z((K5Sk3AkowouuSa7!Bm6Yb-z!-j_i=lohuDX|^zYzj}>Eri{o5u?Vm_Asi-0 zO>(hxP8VKs{7MX6@k?%|%7*L-Ur0P@gWF!$0OQt(K3fufU*u)=c`(Uv1&tYT`N-ev zV6!b;Wuc-)QzwU04x17iwY6}uMz&T?BO-6nBj!3l1PnAv+mx&pSTAqc8m@lonFQwT z5#z>&V~9=W5XtQz-YaR=H5Z4oL&>J^utcUt7-|)~a@q@TU zLLBpY*#n~=zCJKk(KTjuOaPPYeM(%mGEE&9JdP}XZV#fLRm5{`13`D$BB=bx^C|OA z2*ZaCw=tH+OKaRw8O0x^j#VUEy_9hWZL?$7&amW}N9x89xP_Vpadb1z{(BO>H?Dev zmw~v#BzwNMw6Q}?e#xQ=z==xh7H$5<*uUgRvfojMy;KdUyuK#9P4eEjTD8efEvix2 zkVg8vP zx1Ulcw9+chV@5Tbn#ch(Bbu`r!76smMMj1HvhAvaM|Dr*!mhQN?RPT zHp;VE8$Jkht^pgneq711X&hkF81S;NChpV5u5k+gQwz0O?dzv5%xADPc=2bwhM^FH zMWho&5@?D#7C7UiHFiSyhesX{yY`T6i!PlvGsa;KYmt{*Xe>?6RQaS5rZSOG#4X>I z_U4UMujOA_T1h-=A~T#afJ??Jr(V^u>JFQH#eF@3NiSvTo+A?=&ztb%)!NEmh}X;b z(mnU(^fZLEnca!T_tCbd=FZQdr0?U0& ztFhcEQlB$F?zOWVb^jqa1;`yP@z`h7>{a$jXn>o}C1RRIRIgTXIymw$*rE$&`zY~) zWG}9HGoA5S0-VHDm$z(H8g)xAar7+Pii(|#Iw=az^1PD|4~NV= z0bzUN_%;QDE4-AY!y)1iH2t<^P`kc&)oGW8;r0t^x5!!}CUwn&-0t6^1MFvjZr=yT zS^UqJ-QLP1NnEyOOo>#lsbH~J#6o%7&^fdeegFt zFmv+suyFPQ4|PSRjkxF1)3Bk#!pU<_$Q9FiP%A#1r2(bAGEN!^2CNg~60$>1ShD(& zuze@DQrGBq(-cu$^&uVZ<+2WC7v97@hFt1OqbFV~cve)dCR@KT4mSHGS;KbPagY1T zWIuY+y-l(jNqQ4kV|McGlrR1w$)pb{AK@O8P5fWX$o2DzR^0mij`#I(q85n};fV=d zHYChxgAgbC;Uhb&VM;~%TM0 zdm^P%`hbfhd#-pYugVYZG2#A%N_L%ai8rN*ueW>I+xSK5CKVXI{heK5-HMmv<*D>V zVmqh}wTqXwph8!O{lNSq&x2L*~ zu9us9f7jx1xv`?F?4x6Dq*i!;g@>P69yYw{pO3PyNzOjj-XG7~Z}L5Zj3cTg{wG=6 zdj5v8X9dxDS)kotd=1%P9xz#2xJ#v8Af*UbT_Jm2b*%btvdKqjNN-9@m^kHtq{AD@ zoxjxV!~c)PXN9190cxgREBO%KYybDuksXH`GF7E{Q;A7ee6M?1jpwmWvTtB7s~n2w z^-k%RIB|#%DII+H3E}SmMccQ$6`s21$5QR^-}zV`E#7W&>)4;RX?tiTW?@KWEHh-`|>QW6fj9cHQC3~)% zOU&z?zucd1xFObpNt*#LS{0I~gD!6@+oo8Y@CW_~G&=1B)(CJgCJz zV!a_IUAZbxFufL0cJ$S6ycM2&wf-c!<*~xTix#L4NIYq2E6j6bA9M`r#PP+}lijE1 zWlDh#d0c5FOUov`^cUWgmQNR1Wi7dEn#@aH`pXWEEZwJr>}{r2emvR216~z9aqn~a z9zyG9aWO^Tr0*pjF+1W#C@;3u|Eeol>No0UF>cNx#^sP%r<7hwx86TfAQhs(9um+!*Bm2Ue&S8IB~jHFex4K$th9N*{>opl1ZMa|K63Jh2jnA7 zSKoiVRW%LKqHf;@$A_G*d&vKW6!Y3{+xuF%Mo9%x-J<(|6FOrTf=h?{F^f%O8(BgA zii_TxU~ZAPZHFt46-^pDF`T{Nv`X)$P`($RUlmA~;)9xikmz z#o{;Y+_Rv&Oy?Yx`VpzF(0i&w*~AqOy(wUquejp$#3_gD$Rdv+4+wDp+3(3ZMW+f- zC3(BYgqN7t*?+Hld2crCzB4GU$MAu$LGnpjYb(5o|AV>l)OJOW-t_nNTl$zRGjWm@ z$zkof_d({wZ-_l{Ju%EX?V~Av-oCYQZCL;4tu?Ga3h$Mlz4)RsiDM0zZ$Xoq=aRLs zLKo)cv?FP9>m-wJ-g(3t2^XhMD!Jx4sO+@{dNFQEL;YQ2Mmk)Wxc8SI5Bz-zgJ`zB z`tOn>XT(OP@u~)-DSgPcnh%)ybZYjU_J)HW*doi*xRE2mnMX{^I})d}YOs61xOl2c zLmtX2d7-C;p%z>r{N#!HLt#0+`s3*dRDHlj)SK$j+r6y1^}ZhSc$t=LKD{Pc?*n>X&q!1Vt0JC|LjDXlRGV_<)!CdNk$@eLMPCe8qG22B@+& z`B%o8@XK)i=ZX@10l|frT)yH;qvWIm2618!A3(i>z)EM>1YDc;8e0 z&b<&89C1`Q>hL4GnN_&wi3bDkcrWNK)Ad>|T3v*!vF5SdGt@((Pl-tj_FzHZa%7uSX-o_o5MEbTLK%!GIq%5EA=4@Fw(;uneI0IIq7 zPO-s%?~`p8uQ&$Y?Go_y0gMg_FS)N*f851SZ6MrZvMIjwGJSN&bIHk4YuoUCLF#4=?8BJ#;{n-I)4`Odjcb=)f0x${a<)mI_rV-%9pUY}W*-_t+r0o< zWOZ6-DUE-7e06yBo!SM9aG`xnYnbKDp^Bqb>yVXnb|#KN1fx7GUAgjjj9$p8%xmA= z9QZj=h=5Z00 z`f$QhUtkrEjz677i>Znm#g zh}YY4t6khhsoV6k zB|mR_gX`aH6DC)7AggBmS`Po;V56`{!f-^*s z6_~#ep!$(tE0b2c$Va@Xj)lR!Oq^0zI!&B%cpM!nG4uf=4lExivJMfcheG=b>{NNB zPjB@y+cI8hM{n{0YogNE>)){mTuQHK*$tg<=}Xh6x%PNm&m$=Mq~jLzz~~86?W$A# z7qVB@Jr-VEwWP|szrvxj7KX#-w_Qp<)zUr8LZ^ikg_MoHi@q@K zOpdM8e)Xh2j@hz;{1_-@J{(ZHMPu67(c#MZbE;W5_3G5vWy8|f70OJdD?a)jo3XxY zFT(J2;ILugyje%IO;K@j^Nv|g!;yR$nu%kI=g&Da7`J&{{p2$_R*i8*i+T{LPS90c zVV-MpDltuDvP;LWl!f^;yi6I;MY2hUd#se@SK`v)kk?DOEVFdawK(#NCE2l+vi!ki z7Z>Jzz+SRYJ77Nd)N5h$t6O8c$Ay-)apKl+Ji8V91aj(^-pA64tsDD{b(Z9-g_9iu z?Z1VoKc()F@GMtxV{&N|P0BpuWCspqQxqK*ZEi!^Je~VlZd9mK&sVmrDQ)2Rt65&A z*%?=HY-Nr-?7*Snq|1)1XOuoPWzSaB<_AAlAXDHbE)2U5C2r8W$Y(U`}ioifM86$`8l zo8AtOy!49P#koghos`U#ls{I;#bcdhO-^tE6CJ+NC427EQhZ?+b+3~fzMZJFDP7@S zSKgkh+?}{4a@o;4VWmxa$yuxsgiHQOZ6~ede8u+wEuyNcSR8xp;Kl+-i=ce;GPSbN zFQT1(5`B}6xUmyNXRz#NX~cB7iWSq({xOdfDW#WjD{RW2@?ejtinR`i39jv0lQQRo zsrJz;?(A*RvnyW>Z@;lMbr74n%d1)MTB|UlGp$E%Q7ai6w-ZKNK4HvGr(JPW7|sX7 z>U5>40;$oaz~1EAuDCGo57rb6ZdDD6Bm5z8>a~)SsrTC)_hO52vX6D{ac6|d<0pBj zEAmZ`+|l-iIlGGW#Dt0VLsPyNpdY)_SjO`0dFI4;zs&;mrm=5gl;OTUu|sf;Z^iML zw@LQg`!>nK;AJ3xQSF5#Ty4Jmq_#&U`K8S_uE!tv)`is5{ctscik zuS6=CcINL*-VN|P(@>4LyMoJHYNMJlD?TID8Fk(yd(5|sWb93{PfuxhPEour85}~7 zWOZ4|!n(mZpnTwo$`SQ(XH?-m@jfM~a=;bQ%Z62WAy@7hZz|LOYc?kHik4mf?MjY< zJDxK@O3XW_gQfS0Ln{uh@%B8`^kdHO?qS1KnIrLGPaojoKkQocgSmzjU zu!U5Z#5M7OA^R&^-sa;-#xA*un>i9Tk(VQuCp*~(c|U5PX^iY8xu&te&xBw1@pud1 zr;j@-O%6;DMex2&vhN&C_P=|0Wew(b!3*cih$n^^81Zh{*L+dZ(4<_*)#yx}GiM%| z*?wwo9rOo^oY+lmiPnL#p0F|rLD)1&iOU~dc5#n+f61~djgl+RCR{c+gilF|=W$_8 zyey9|rxrqRaOqz5izeaXvS}gZxnz6^(&3L-SYx50RS&)pc5K_77B+d|!vc&26t})c znZAL|0+&0hMNnL*S>K6AwA6oyBj30n%V|}8Oz}yJbP1FO6&oU4d1hN=F-r7^alumL z#9|%>z(rmL<86eV7#~mF|3dxBm9-|g0t2bKnx*Hu@imWrW z(LnBCnOPt5@{L?}6p|@1^k*$xgHhC%Tl(VBD$a~?7*!LeCdN3hq8-zau<+t9E1QZh zw-<4AlTNtgx60(Wr~Yyani!inyQ0a_bNOQL6wFlFN?AG_B8@>Bz#8!5r;piiNwhY5 zef7j+!-2!}BX;jn^PQxp7m}^^8V-3V#*dbx;?O3 z=vb#fjtef6>vlUJ(`g_re;2;0#y#wQJel}hQ=(#d6rg;w0? zoaPPk>J8y{%$Y}oLub`4JcqT9ZVX$u?6C1+&0_yeSGK^h zf2O_iokeOvyv>$*0m8@uLJKN!KZoF>d8Fjnnu5U*hnIwZjjUeUu$n`IlH+h|{dwMe z+EFKl8HXJ1p{~ext-L>8!SIQF^pp>G^*ddp{qOdC5$#yet)>eU_b$Rb*BAy9E-dz% zjAN7D6IWcNS8~due2}3(O02c*_V>n=^0iBE3cGC~?CGpyPPGE1V#cwNix&L*n_T8C ze{cvr66Yb3yUjU>{D&thaG&pM~h3Wz#3mJU$#cdr>W$XCAsSY%tp7SN0_KABw3FtB#V<0T3K(k|T@G z5s7imDXwho+5K)<^?+U3vQ})}7HF~oKF~9Cb*qxIPh4lwg80B}GjYn8xWG#}S6iN& z;lRLLpam8s)Z749&^#iW_Qdl}HcO^eJ5C0NNTa{>7i9t;JN>c}>X)t)^`9-NUqI&# zhnFlqzh7yWk$L0b{j2J4H#DDTEUdFYebKb4H)P0jWa+ZyUXi**w&MfNn3OC(jdkhb zO1spB!#&L~!wJGOYmkbvv zE;{lcy-zjZS6{gZjD;^*n346!zI?)hz&qE~a82C%_eJLQe-AuwKTJ3}aoXfCb4r1DauU+m!lKTUhM=AOnvlwB+~il@*%}^rj$aOI^t@^QCL5x}oH8$EbU!np`@q=+ zq4Ys-!jzuurH@bBlBmjtERqj8a!JCPxG>@%_Y_JW?WZrh_Fp!y`xO)NE(_j{46Yy@ zE?@Fe9NuJfwS|hdytO^7UA{4`nBppl2db?7<7z0ap6C~>jN&T=xku2BeE4WZm3$2| zpP?4a)LMa|6L;v)oMt^7bKm3Q0gV-1a2mRc}ss+Ci*!T6DqeFk#B* z8o2tQ7sHP2yW+}<)@-`GrOQ@~Dc!$>9LHD|SQ(_=kXC7Df10l+*Rn$4!7ZV1<^#(M55TaV!s0%Wp^QcZz9OFacn0xT)Wwkh_yltTk zG>rOGeVG^uy~KqHS34=LZx{KLT)yJ6ONWa*R^riza|}5T1m_?!NbgY%^w5}PBdcRj zGRdC*Ras;{u+@pu0P1A5u^Er|nnUoDJR}^k;6vfy!{^l^KXvcFhpn4xSvC5IX(KF9m~yg17bfl}OWDO+JJi}aOQPCQFXP*h@`TsXK4=*58lav3#OHF* z@L}N#CvXljF!b%E^-Edg!dHr``pgO{?`%d~pZDVw*WHxWKfBwQRHrRQZ~Dm$D>8{ErU z@W@Yk6IVRhq_-ATU-{quuLnZVy#Ot|bz%qR<0&opk1W)FA6|CT8e34YxU&8c7fg9k zL|x64#<;g#dHUB)6^Ld>g`e)MyBsVwoKu$YO( zAy@K?o2(dvfJ9yv1|K@?fWSh?)2Xz^JZBHk#brY;G5N_>a*L9x3n3FeA}##& zaR$?6_sD8ECU(t(H`Wqm(f`qtc`--#=67DrlD=stwC(!if5k)o42yQs?nXM@xzi&6Ve4Xil6G{`Jonv@R+$Da2aK9#U%*N(8_wjb88{wTvnjIyg|Unr9pZ@@Vg z{G^A>2{)WUjSQFWeZiZ8e6c~^Cn0GoTt415D8I)wV0}^T^;yfh_6xx079JfA9(CY= zlxXdno5Ia&Hq_ES=8%KK>2N9 zgn6$0f-5K;{Dg^X0VNJDSzNxdand0hytKb&SnbU9<@Ilcm)E_Su3XtYgj}tPZ@91{ z8pq^xrO6I#`5r$OJtjs!7D7Wap00A)RWJSwPw5!9{!}fXhMA4~6bbma&{|X_TkMw{ zA(b9|)cjL_HI5~9T3jNIcQizyd2{mRbHkVk^)b5Y!RNzn`yzPk2G$~6*-DEk>qI8U zR}u=t9v5FJ{n&HZfYIgbU-DU>YWwkp5`WZ!D|wXJQ!u!fORQ785|?htRe2#p*lJ{T z+J@B}Ovye+n8NHVxATaC zZZ33MRTNv;6k*aC!y50xJeQp@2`=9zE==)D-J++SR`9Qx&iP73SDkuUYiv|#|ApHg zySwc{y3^*JXvYIih0<7vQ{iyFEL*kOr%lZ=r zE?X(%(^Fe{e|*beP^~QJ9jRds>gc<#hIlyL*T{qMx>^Ht7Efj8@j4ytn>A;3AQ9SQ=y+aW`wt3!hb8e5xcP9j-u3$t5RYVUj&ZE(89;I>i-ku2q?kOTw==B)^`xxA}blS5Uc`uoXIC zvTK1P4$cA!PTWp@?NJ%)N)%2W2 zQ^R3%+wPJ$+su z$FV2nA8Kse^0)5g4dJCXxv*Fxe%RlVk(_euggNJ`o^TN+`KRAFe^dzRUI5=F@>83k z%kH^v7vYLq>fn&J)B*CoS!;1{6U%cob}`;+j_ zTdyb$Mpa9eL=$Slp=&*3?xxcOyBHwA3{vW^dUusx?l#!#yhhxt9 zOqm3$JYKy-KlKxyUs!pW9rk|GOANeIFjI~2gT#|IxP3or;3~`Xc%Sj7kDX`FOtmf4 zy~{q!8@Fr?*VsGPJsmNAY&d4}BwK_fl+j!t(k<^ejItU#n*qY$!dPr=+P)+3s>6a_ zHT_WwCwlh9{bU1KyZQT%_xbL~JXV1H73|VWnu58&Uq7Aazl`V7@S-CgF zvB$!TKo^Ovon-PVh>W?{j?xe`Coq3NU5Z*)#trdz1!@{=zLEQGpETxELcjrt+M9kQ}?7 z^*(xF3T82Z!28_`qAu&Y z63OC}QZk%Q_$@%u9(zU1id(-gwMWjVC@BuQJYF=|qH3FYr(YEg98-U1FE7U6Ho|Vv zqKm&84j5LSge`4OtTl+?<)R8q6Cd0MAJ5D37iQ7Yo@Oo_K1lC#4Om}PJMimnMMDk# zEqlNFK+$)Xt+WrzFw9zX`Mg=C*tEsTUi{00k`Wnkc%4-G)T!)b*MREJK=AAB>&QFz zaGqYHn@o!-I}~?+9PoCJYmx$Ml7)HhWu*nygv$3TsW~H51TU9LODlBXAd+t_<2TH-osc6O&EFQnyEM#q_~nJ*o)17GN0P7NS`?Uqpse2 zkwPzBu3h?zu&21H{;|_8>t*cTxp8IYx`%#II&U}NJ`1Wl?6D~kjD&-E9GftWbQsUDkcYa7dgwspMq()!Ux>xBtz~V)R~s_jLa+vfR$_IbJb; zPMA4?XOX*4n|ACB|FV(`5iOcLdQ>=Z+M#{Cq7_G398c;!aL!%nSuDP`?%Ex0TlZWm zqa?juS!|RsBROa zlc2{%mx(N}s>K#MZy&UH#?vi^>9~y^uWoT`VYIb)#7Pg_GEEDuyl|rohQ$S6EKp9{ zN{c`JHQFmKtlFKL;18E12c7+yV{KkxEz_dM@7a*9W^tlup}exiX;ItlgJ9XevPH+_ z%PesE$9TnijqE{RS zt2vm0#YS`uPGdM>X_%X=0czsCPc4aKs}?DQ+hTGpkh&46o>YI}VCbI9PrS4%zK`QE z#oHSv|B{IFz7PkDLE67+ps523mG`-@zz~xAfVft2b01dMQS0VoZ~8)T5() zjq>bYYkQvRksJTkCP0p3-J!*o{f>?IwrBv?U$Hns)vQ9XZhSK-CWc($|Pw9D`{J}d> zPqr#fhlQ7qE7@}|lNP!!n{@Wu zWn6Z+AJfEnF_AjHZ5bd& zakL~c>ea?-*hXrlCeg;cl^c{R=6tdEv2~BPe3JB>v1dxKgWjqBsB`Mx+rgxJyKr&& z?2S{TCLeGJTci&2jm%x;96Sc;eW-z=k=6L(Wr>Vz*O-Z(iXm%(k9Yv43X?|h%;#rB%@qb5!C zP}k%gw%}or<%#cfoiTk{udZUX;-#*_*wJuHC$#2aCaf&3Zdtpb6(vb;8GF!y8tZVP zrG(oj6ecAIKV$_TAFQ-{%-fpcdCZb(oYdrl9YT*(9QOqGWUJzulVqPhWrx5@8NDee zzD~w;;#KO3>laC0Ry_Hk_hiTK-z?C+6)nB~>D)h3)21Jptj}0yq2FkMW#>TF@?hD9 z-2w|JY?9-GiUm}Ro9tR-MV|30oxIqJ`QXCV`h$C`hgwFZw zv2lSXJDjUooFvAEf#*e3b(M4cz$l$!BeDmjjw;@x$xNfCu-^OJ+(vZ3McIs$3!fe6%Zv8wvseP zZ3I_;4C;tmtuJ}{k=P%Kc()e1RMm|#qW&Os$*NQNqU)Fo=8va}S90PgD4kB+T-ylW z_l-DE4ATBr15F)>QmV6f&dTb$eiydA{d(ZrSG^&BG>iTka7l@SPXmxSTElZ2)?J|EdjtlT%*wNIMN%es&s-21X{(&7nR@%F}*2E0s@xV*vZM;0o2I^68ZFU3(h z^xagBhLR0xg-p4e{OnTvxxD2`oN84e8$IZ z7T}(w!+pRIq9i^lKz`}$f3@@m8t(-ibe&!mczc0@bW%*hwOCThd|4A~T65bJjhu%zOi zpNh_&d3abj<&aA6rm)A((!RH>e*eU%VZ*{XGpG0YvKHfq87z(?D`f0^ag$^&YyD!) zvt{x5fPx=P8t3@s*tOY2#|vg3NdD;Mj3lfRC$4NtN0`@rK3*<;WP@`CjD26p$5lS0 zg-n?t;o{(uW!L^`#pHF#iYMKly(vH0#D(L_3Ju(^)X-Iyf5nVn6j?RS>o87OhkGY zLA8uLa?J2>;w48GxyO0rR0AoU?P26z1{3bNWPJM*EP9W`-1W!abItl*N0Cyw=3x@k)}&BW?NxlqXp`cmV#himE0m^?j9w+HCb7%`KTERGq*L{w_-mBQGE zY~;57Y=EaZYKUDjdptg@S4Bv|9mc3bh&q7OUg*@LC;50AK0O^n@%8PbxbWTt@|Pd7 z$YaRw3$^{u^S+eg1DokToqpoAA@uQHfPLNDvXl6ZuFeGCWZ{PvzOIZmzxI4sv-E~A z?=)QzlBJz1(aK^2Gh%V}Vq!(JpY`d}zZ?>dyWsQTnI$*aAa9K|9^9I~;H)Z%{tAmP z{aU#3uWJVyCcDcmeAfap`wb^h5MdP7W7vfGI#Yaj+QAkVW~Lf}L-qOL7DT^0L--_JOMnn^^O-=-kf^vWh*WiN-mn;s>L`D$ixtH6}s%j}8V;zz$Y3ZSyubHBkeyc8JR~q6aj$Yd2vCgW8Ju?H#4!J~>EWpdp0C_qTJvUj{l&M# zA=AfN{x;v_a{SUVi!vvaCZv-~<|8YoFnWR^$uEk*s?VSy;f$;1ho#rA4tsV-Cmf-V zS$Mz#Ui6`I{$LL^kFWqDvap!4>7HXN19mWYNo~06@0`rn&fYJvSKD#_U^mHXD?GIN z(eV81FV>P}vDNLo{{8;zEVs65xWzJ79Z?4FfUJNGlCFmIIvAk|^R_10V~SsL@)!Mq z2_vMyKXFFwH$iXem(0h`_V^#pEwwX`KG}9M6%XD}uMuOOiDXQ29INOG=8(p3;}?s$ zZR*~XNY#(YSVRV@C)GVR)hDv-9%nuRSDc=>(o#AlCta}pmIfDG?{m{|Lg5asbQX17 zN`~x7ovG|-=-;yNTNX4+2Npi@wSJlr;rWb^K^F(7!!&a;Ii-P{ixPd1)8sl~%4~b6 z>YVWGV|N&qh^tyJuYEL3nR9}me(_n0E(ix7HZQ#K;%cAHd8Yi>0@t(|d=yF(q;yS) z!Zmryo^~X~?Mb$z8wW3O)DHM?kQzceMrCl*%^En;68n@-aPhRs;q>YJs5mh6_r(>h zVfO3)g|la9*hJn_Q>USp#tD;G>R_3h0%jKu!Y;d4gL?-HddGB4YBH%`!4;ZpV6wyM z@1+jrgZzX^_wjm?ExB4A@&$*`BkjxhAP!?oX&5Ixba6~o9?k~-R~EjE@IDYv65efr z{wvaczzN^AP%W}>dh)*3f&+bjp@09s?+>HL9~kDH!c7IKZ^yFPUijvzt8mD;gTs+iX2rg( z{>^+814X2iMrp=La@6x+1W$il&B|A_3|aHiy70=T*IE&hbiXkcS~=AS(q5{&2;_}? z;6A9{#br|(!ac`FcKLfwmgtbjOF4P8e5ohJlWo8`2Zlk~ziXhWdrDz)sfzrsa`7M(R? zS{Q%eh!obTwGloY>3e_mx8p+S`@H}?Opv}zC^=hB&cqbA)I&~Z_M)?95^54{Nb*W zE1OR~gC277`M+_z)6y8@%2pg}aeoP?CV_K7r7#rrD z%vHG>aSO&A3lx%|IYSF7am^9p-dAyyMiaNBt30qn;-&mwqBU^wW&857g_XVBr_`vaDTyN8k1LOH{)3KPK25yIu5{p#c+%2&#n+|4$BY^sE;#-} z9_os`eb&s-88^EuRWZ?U%uQ$a}{i-lIUAJ&+>;frNQ%B%YX5`aNZZcA*f&c0J~*k;YD8z_x@xAgxY)y+~HdlppLs3ttroKXA;5Flm~7 z-7}t}bG;*xotW=#$y~ThXDo5Y-~A=^6v<%L#gaMv#WcWvX>T3czGc_w9ou$sOHoOm zG=PKQtJ&boOd8*}aEt|x*Sjpx0l~=)U-_4^nQHb@_wi(dL$Z}}sZ)>DB@L?Po_~Kb zeEdJ3j$cJpljKyZ7K$D|s}!Yf5B7}K;o|YpXZLZ(8l)1b&3KW){LcF1G2!W@^=FaH;|dG^j|JQb znl~ssK{{LkKiWw?;@GiC_uSxUgM;G-dHHP9YiI0d9(_vJ-x*E^=61dZpI#OVpv}Tr z3&NbKGo^NgU-!_>fdxZBx0tT_Q$65<>_drZY`|qh_CfLG>yLxjgv+ll{NomXHW`a9 zJMqE+c^%|0Y=QOmwk^uW_^A)SZNEq8UVt;~V_M85^!?P`*$t~abgD3^5sMq^Woc$I z&cNZn3XG=|?t5x!TU3jGTPvZO*;0t)>kt6FsZHc3oBYJpHl9mX+`aKwpkhOu{elJ; zT>C{c#DU^ZD152tEb7?c>O5dgo$tRJ&LmRB;hC+={?J|BVx%!jqf=Ty#nG3sWN7)M zq|86YBUuJ#2!|hiMwm4H=)moYqE+|(B%FN3AK2BXwqFqC(kH&)++PpNZuoZCzJaR@0T&r}E@xuo~ZX`Qs4L-BgSTnr`sYCPT z){NRo^NjIh!=>|P^**6KdVXsuw?@_JVtc>4h6FsHkJp_FEtcM})S>he45qp?X1EpV z`qk@-Sb_UP?#!V2S4txFH!$J2NvA(UaABSc_Z(RP-zM4n`}8DRw6^@f+Q{mRI9(y7 zmHSAiGPUs;QGjF=%II?s&KAs~(!jSP% z_%oBI14579ye?ey#YJ%;gr0^f0vdYaXjXrkzWi`E93eRZ0+xqPx zCfn^RT03^_3^y*jwJb6d`4S7N3+%pbR41wz+2Ihl0`=Y``$d*~;H4~GX~0EZ7WSVc zzq3uPUszVMO_t{@y8U-^s6Pj=c#vgh8fC)wxc{lFoxNSZsOzn7eWJxKds4XAU7 zQ^K+ndELw0|8xftMC%{BJ-qS4(^7ln@yM5FLk6K{PEC+ZtZ{OUCf-2!o3-@bwJ#v9 zxa|iutUt=|k)y(~=Y6hB;$Q>LvOxbOP;8xSNS93FtKHudr3tN30=_BeSdeK;ij)}*HG z?p=Gr13z6=)k!RA&XxLUQB7&V9i-#Wn`v(v99OeG^YBYy$2+@hoV89U8FSU`f=;Hi z`gNZ~;@FJNDQ?PES__GICAPN5A|E7M!21ik7#+~)!`@1InG1qTe!Z63 zDf1SG5w@s{JzAwT4~>slb3TUE9_Md5e$nF?qs&23z}LhFpME&(wsW?EZg;wJgIkOk zFne6G+DCR@*R+l7;>44WxG=9P{U%OcCE>T<5C?`q+D~eLGlvdEDRFf++$DSd&00E^ zELXE?kfF zaXI7;2dW?>)rs(7LV}tmCNO{Ufi6GEo+~|J(&1$=D3Zs8y{FvjQ~RWRKV-s5Qs5`q zH!Pg{EWv%6+8yTaSoQ~1J|tb*@dCQIK0$os%amwKt$CUG9-?S zO+M(5bj1^Aztz5A{$vInQd@ePmt|L4(&a-s4_l~R0(#(QtHO3$2>FuNkA8laC`O{X zkI=;VZr9nLSx^I6+^t&r0)r-9wX)Kr=dmIb#}Wl(*v7FbyIm1We)*X#+pDN&-=j`B zJWQTZU)-7L0t;0hVs`D)UzBO^%fvkyr1_xOj7h8t;4%4mUoiS9T>VyYN*ufVzRUWCzYVnKC{z1rQ=@S(%Q$#YLIODY;X$=<88Oj%RgCeEUZX8`x? z*%Ox9YyJy*!*qX(hwl_+-AMPGrw&Io_gxxzisCI{V@B0AM8ICc6^S!o-EDsmQtH}U&_Qooc%6g~#E@cdcj_&>!={3;|IPMJ%R-Q!>$FWZ)M z)W+DF?B1tS|4>}d@#XB;Kb{}%{qd9G-P)Kbi)b-2)#_Ye1Gm*__|T{7_xA1FwmWbi zVXP?I9Pqo}{4XJ-djV?ieK%V=^UQ;R)s?VnMV2cZ9L&nV#n46P&kExYsju+N#98$y z3+$MmGra8Q5Xq{Dy>Ux6rY%2o2%QJxhM|Z)b>8g3ko088fN#Y zb(8q=Zb9_1J1x)*o}|a-F@rchl1ok>1@rwHy&MPLPjZ=7lWs}Xh|5~EcSTp7$|Ak{ zboB!_)Dqb#^7fzwGKjH^i&RhG zl2s6|BU9JDz9oy(PVy^x8MjQ^qQHxP zm#yrK9OY_f1W(@mqp)?8-iW27nt2+Rw&i4m@s?-K$SOIVs03xJHo%Lu=_b3@!CPYlAHz;Q(78qT64t{IYZ-^sNyjW z7$U7MP+r!0=*5@A^P6kut;Bx70{3e`MAlf2K;8(`1O+S^nz22e7Mix%#U+upjun)lG@36$(&uvg_ZNHiSKOM9#-7-Yz>>e za7vguXJUKb?e?E3UG(1UDR7U=Px1R~{8cjU7?*c=G<|!=7Dsa%qbsY9Nh68RaU>GuVNI2EQ zwg}&@67_NY(pze2^0tMGPdKlJ^;cQG{_(J89tui-N$1#)B;tE5;srjc~c&6>4FiZ0u3;LB@nfP|{^+FNQRfi?VrsGT}KjJjS z^Z6h{o|ks%$+P7v{e9&e3oTt?f)n>aW@k(I3qdC zJExOKeDYw$7^4ia`};l>MvP*fY7t*FwpSsyfPP9RUiLfjKv5)frRk|qiTg!WIoWAK z6!-DGta!r65*ZREp3uSVKG%TtE9#OHWPUNMXza)l;d2YSKba|mA1r?|Y~8g>YUG#A zon;Nod!zG7Ts5I3z2phZ@af|>*gJScfZFS zj$_isp%z>$pv;hh$(Bzg%LhC8N5ABQeDWo8?cu%pg3i2{OE?a(V`>*a7f9}86Rx&x z;+AyP_SiY*(SIxY^jTNLK34%<5ZLo2tCxl6Uw^Td+Qo~{3!@KgyQ)|7>8tY72l#!r zMl`{#7GhImODUsw!eoQ{e(d{lY4f`JvrnfdITtZ9e!Lmwfsdd55ohdlNq^OzI61J`ntw*^E}pRYwdcc{rR@iin8-eCCLM9K zy|4GIGA6g@9ASZetzRA~D7rXush2kIgRKk(RT#1-d?9h_q>__qaJ!cp_>$RYc%LsW zIwp*>Mb|*kw%xnK4<3E8mNa>tcEr>?n9|8v99VATftHi4S~_)@WYzHC>eeIn;jVRW zwmp;ekY)awD}ctZAn7I7LkYJ=2WR&<-FhB(@?uDpW6nzN$X#E zE-YX7c$wr1175PAek{8`et`KtEgSt+xaY`Tr|(MFc#tmNy>Uw-rXfET!oLkDnl$!c zYai|k?k=$qet6CDTB38O%?eW{9vb_5ES8n3eri~AuZ|`<^xn!6wFMx#nUO)+6Qu#1<`50q6lF+QVDDrfYPS)3 zimyLpR_qcjzN~i4BUK8+uaQtHSehVvnh?EBaeB*rK8h1FAKdi42CNTJhn&RxsgKR= zf?ry^z(ZY?f3$LS+gF4?Y#-1XX7{eg6Eo|wtqF~jwf5}Out_5(jRl4hlbJeVn%J&u ze@2V3`3no|yHZAm&=sVM%hyj%^3~)eo4Ch?!6hsH-Z*7wB9*DhHsDz9KfmuY8a{GZ zIQi1H6KL;ZGS#h6H^ufdQz!n!y&D3z&v`oC-n1}mxLwE0@s2%vMwmFYewE++zi2^M zFv`r)A-JEHsbAXAliCUlxol5#zeqwSjWXa()tr2Uq@ul%5KBf}|| z&8tP<^5(X%fsbIZVB!yMd!4Rs*;6Z&3SBsAmJ=S$Egw=h|I@EHDhwaNp{GV4GYwGf zpmy>)?Sn2q*)*?+`?y}VsIiT?Vlo~@#7C`I{B)D3v(+!IwcFC3sU`cN1*e6{#WU75 zhka14e&Bk`jV@WETTEmBMcx5fq4dcuTiK=4Cy9eDOmeAvKlun#zMfl>6@9^^$(3fp zm8V|Z_4XnCpWJWtMyGak=6qYs(U18vlj=40ZFaxz9%JhKEl#Ueo3;7K1}T5{KFM92 zZFGA^$~FK7Baoxd6e^{C;7_XbIFRkH*QI{5pniQ@(1(%vVgy;g|dO%Pb}zWC!fI#WPfV(Rjnk zJC#zuIbTYiGUtRc9WHMjYJvVt!9ADE%q7`#WQ8&=x(}vC9bOWCkT`Wx$;mXh?N9@s zF}!wW_3I1ghl5Aye6&NXeT4VeS=o1&t*pgnQFr#t!}GvN(^(uuY%*z*lMYu4r%n<_ zNV*^13|z%}{FT?k)30+Opho;^SUxDJG3y&syXmt zhf|6xB)q@;)GKP;n>buG`G^G_d*J>Ufg5~m~<1Pj}}W!9N#9q6SmR( zYxf?leRQLJ4fxew)-gru9i8)Q3)e65SH3+pYcMQ9*eRD?{RUkXQtgvkd^Kb0L>alz#8WgoXm zwrDX8*=VC$`y6B+`n+h-x%hRJUVroR?y(y8ovhVsutIgHhCygtP`S zb4MJ!@oWaWebLxHgf*o2Rj%#BUah3|Nm_KtSIc-ze3xZVlbSfYfGk~HlO=Wq*`)V{ z=j;brZ)+f*BvZ$K?&AzQ;DGQIdlAM!(Y5wbtMcU(=gpoTU%Zh{*z9N>&*&%tCuyyc zXl$h6LebNB(D2bvz>l$U%~NYzk&{GfHJ-_giAH57K^G12(99=aDR4 zmKwZM_&c+`<0G)};nnXwl0PfSBUS^Mu@}v0W67kGO5P4QYvK8|WM6#RKCZE5SqVzJeACyoD|0^*$0H@aW%~n z|AfzA?n%?)oBurK<$M_H!-A_7o1^i4KdC>{hxLq)*UrVN2wY7&;iB1mQx>x#QxAp-&}V%>uAJY9qMX30Z9<3@%@fd0cilE2%%UP!T8Zmn=TN(z_{Kv*hQt-|TdYxJ9v>v}M~n z;nv6QE|YxO0G`capGEb8Bs8}Otr7#M`ZgTgbM;O9kbOUwtT-v&?~nUUyA-SU0?4NP zN=~}MkpGB{h+8`^KZ(EpcD#|s@q6*5hk@5U` zCd2vLX&+X5_?i0iVCK%RM~_<|Mc-z=PL&%R94fI?=dHHEE`OyZpOSlD$t91O<}_s! zR^#vGoK(xWgTet}kUqFIpw1@Fo=Enbv%Y`yyUEv#*Dm7+r@fOOdC{9en+J3p($b^j zkZ}Bmzi4nh)}uGF=Iag(u}k46v|ricA+d`skg=ci(1nR>B5ayC6<5BB@3|!z)ASg! z$jib8xvK%U5dAX;hoRPQzIGDlqys~Dta~mz@oIe$bk>Y%_Tr6^@uVc3lreN5o7^Ot z#zfVkBr^`w)#3a=4#ul>fA-XxmU~Om6)jR$g33i)-sr+4!*!D3Wj~Q2QB<~4puNga`beiAYj0huU(v| z*?GYOc@*>|(>eBV$xFu@$$0DdB-xBMLtF5nBdUB5US9uJcy+_3)}qO~e~wtD-Jy$~fTF<4KmUbjz+*26U*jCv-pCGT;YkKdu2A zTy!K+C)wN8@k;(}b7JDG=;7COf;XlrK(dE{0$8-p=Q+E}`2Ku%Os)-As^yzv4*igt(QpZSS!(D=z^VwW1gVym3gq{IEfih~H4>^iXYmRr;|(t)Wm z*a5B$tBk+e!iBc`XZkAm$o$!1)`W2$>Z<%V5Ah>wi&zZtA`Fd!YS0KUS58(usWn@j zr2UH4+BY_Z751D~L7a^~>o6tk`$tuT?5D{J92xRH!b`4rny{oRu5jsn;i(j<9H~I{ zooVL_riUJ~gj@+dCg%olpKc$DAr|H*PsOaGv*ZlwN zy$84+#hK?@$|xd4l0aEVD58V_ArK-OFwr<*Gq%Sb8{1<~+SzdL>^?j5?CyQ;-RGv~ z?(Eo}v1ja=@vhStlY_}gAS5J|Ba%P~5J*BfqWk`;``_Q&_4PSA()nad;=Je7_r|KO zu6`?ZbytTSTlN%uqll|*Wh;fnFRin)r$*WzeHXNyeMTb9=uD0-4;aKa(nmJN>o@!i zdlKWE^iXtQ{Vn+GK-ue>d8|oTs)tVl@&3d@0i8N_vfcl#EBHndkG}r2tzhggC3)v% zH`7y>WUXNi*b2$xbDv3**kOX%xsbvKvWt+>`jt#J-VVP;4pxw#dpR_6$-tr&Pco0q zEZTuR{JCeK8~rXDX;N;F+4b?jmiuGv`&oFh|HB4<=a2sa^2?|p2G=#awmD^u5Pw9O zwfyxWT2AaXOnpuvEhF@ji56D*f@LGRC0OwlH^n**_~eA)|9QoKpw_p+$yWk+qo66z zOywIa!mktw{K2|qVcn9Ky{JjLu3e@EGn9ys(UFfLThhAri{-Sem3YHGu+h1{b~#3$oGn#mQL8u!n_?cHcbUn6*_1DTY;w~Yr|Fo`KXz) zD7~mffqm=Zi;y1|c*FKwpS^*{TWT=*ymP|Y-MYmdja%Wh$pduFu1w+k@jN0%2Or=2 zh}d4#TK;A^O2+*LQwyXF27H~(B-yXzsm&o1sHgJ1nwMcie6JTTJMlV>$2ums3b6fM z!{;mmrRmJ&)i2+)I-mFfe(~yq<+sUu^gb<&yK2x8(aU4&$@2M3RO(g zs`3_rOG)A+ub@t%$t;v$ls%zM_v~_^LO<8u~fG1!yVy`O`Bsp_p`$2)sELu_}-*m=G=+Ds#0lM%ngj?LAhZ|nGAcAuDh(=ab;D7|#%`mlA= z&Qh9}+U^@{J9s1*eR==Tr(az=#WQ4GEvPLbp{Cj`wL$(+={54#8lMo)Ko=V9CsLkn zZbDV1d&bf#Z4L6uub1108Zm7w@ZweqOZdPROIG~Zm?DI%knB4qE1>evA{YYcp3Gu?+~bML4GdyRX^f|YIw#Df(u@N#@rIxxHs(s>{Ew2!ZNic^Ix z`4X_~SSp~~FXf4u#scN`;Or+#iFWDKIZPUMk)3@0XSsy}qMzqQ=C*tS=^MWe2O3ztk!>cxaVzQ|0URviCC4N>?_X*RhZ_1>fbAw ze{|?zSn%YJO0w24d?|VKRou`n0S^^UwLmV^v8huO4^C+>BW_c{RD~|3Q$Cgi!ZyfC zzzvfBA}y0|Ts$rnHH=@iWm|a0UeNL|`s_Yo!0A1U4y;zqvVoYl^>pU%HA(hjmhpDA zx!@1j%UM*m1RghDV(%BoBv%x8IwC-NupdlFj-5I!(ZWinO0W1S)-l0Vfc@_pF0~9W zNiSg14MRh>o;*6-SWwIR%Kghq3AgXiE?jCKWNHbtGY7fs_BvZe^fy~zKu1vfLtCXF znTeun3F{hwb!|n92g_d=P+hZW<1a$Uo&`XM%!gD)T=f3A!YZBt^{*@xuzSb8F#FM! z1>Y!w9Zz^E&chnp26D$X%YvPKHn9Rzf3rGVOuX{v2(#7d+%}d}7Lmbhtp70)>p<)S zX$f>P{w51)zoEL!HG}qgc*%4vg{PZtiiHBM9CKOdYhS5tG{lLJZ42muT{y0vaFrLe znx46AcHoP+2{75>IW+(uRe6aA1L&#(S1Dd8rksDE}5*L+Lmd+;DWaLpsrOzf`gT~DW zz0Vz@7!fad;TK`=&aEX`YnbuZf3@zb$(9~V|5Y6ynNx=+ELoQEYQ1#AWZBbIJhC>Z zE`iY|zQwDi4LmPQ>eu+ehK}+5*$a!uh$Opl_~4?2&Tow89b9ISc5<1^XEj1M8X`7V zV3Q|0w|92#4zo9wJ5Nh(jfGVf@EPi275F-e9-dAJ5cW27UVgx#4og_^g^~OCDUS~D z*xO6)ZDNGq!B1&rfa70w$A!l&8}GgKVR&=yrc(N2rwA?9FDP;0xVa1c3Ih{cj4lnb^>Q;S*})5B2M-;=%Bi3w`#52D{{P<2n=&on52qZz_p;vE zxhp)hVs&vW2zK7M@XRx^PRy!Jt$Jl0obS|pC#x%%UEoX;WhOutSF}o1H6HeH`=#y) zL<>s}Mn*>jh!(EuppZ!~EIZ*;4xa`cPjYx5JJp&W!xxaWONIP+5V`*_6aTbNf7nEO z>}zPB+~JTQp=J5|5G@yPfr#&#W9i- zZv2`ZtIm~K*~&6f?jVt=BlHt6Xz9@VpmdtTW?mc%n>J{=1csW5_f!g)(D&>xbwIy@ zZ#3}(`^+b$_VPI6s(!Oei(ljBVD27fPSGyBzk5%Zwf>zn;yM%1 z!C|I)>S@(Y;zV8;+h?JG7k;|9s85Bkl>~Kd`sd0kTdbZf zdv!y0EK0#3&Y_rGX`*HFIQcR~)MY=pzItVA;&nqq_tQ)6MbfueU_egU>za6+(&A;$ z_571OzM%~6yzG-A8-^LEFR{%JmTy>9N^|;{so|_%eM+*{GWXAVEKPzd@stJ5JHILs zi>|Uqh8`c39T;Era_PYe(t91UmIgB|;PXcHEjS}?nQ}v5i^xX9eXl%HO46FoF!jQV zE!X&D=Bu26ZXlG6OI&2=fz%5X{ZZ_f+J#q^&#rr#!y-Lnfie>K@@h(^GDk0al{(a9%BaxXWR+;OJmRmCrJq;Gj}&N!k5`*wX8mf9nPjRGD|95V5`G&y?Z z5f<=qb$Z}*GDYUSUNrI}1v4qxzzpwyQ)sf%RH@k9#^HU<4fnUbRMGzgi|^y^~3e{Ao;Ok(z3XWc)sS- z=zUK2Fv>QZXf)_DzW(^Cz(mW#(R{Sa$2(`x>4Eo+61d9v^DJZ>Gfmt*;Ww_NQ42^`f@2v}OtX`BRi9mOmyl=+RH60l6*z2J2 zJ;_9at7!RiQ+lP7VoR<#20Gw78OnayoZ^NFGrXuF^r}s3!os(>=T?J}=U))|*lv)~ z594p{lV%cc62z?5?4jbc%9_NiSv1~CJN8Jq3^^aqn)_z?+a?xulZEA^VW4yO*UHPK zmrUj8%bRPUYlB`oVaYsJI>J7_WE>L^4?Zb@#-a^QjS`SEayFuy!~fl%cD7l?kG&|X zxw#}8QW`^Tu7IqvqocOxIQB>F0w2HX(*5*OtogtB`|y!{P@+*V@#b$-CB+>`pBele zUY(?9;i^tk@qJv$z<|f8F0*L+r34raf5{icNZV!bnxO-{sG;;fzOgjyKH&ZA+x@=r zry=NBfSYVZOC41<=CTg1iRQeh1Nr499pB& zE*eY+1gIktMyC!=GU0UaA-OPd0e_s7uF{j=rULdY0xPcqS`?w$h}F>Zy^4=pwU7Jj zdud~6d?MYxylcdqO15Qxa9zNY}|@^(n}pziEMMSioIVkI#zM zh7aBeb5^}k90y|J&~ahNxdThG)-u0-^-(LfywGcjJ!K?91!*V;fg*V<`xJ8peL$aI z)k-W~m0g;)@&jfzTH2ZEy75rRa50p7%?b z(MQ?eoK^GdKCEWyr!6#>nlC4gqVo6hl$MP!dR>bYr*W%zjw?B|z{{W`SXmXcWG50X zRs0hvV{Lh=F}j8yP?M9UnY71^XZwShVz# z9SK|15@25fMw=zU$A|e>cB-BI>3(a<_*l?h)+l<CSVfE*-)Y8xS`d)Mc-n zy&-H~x4o47CAQ+hmh#66d|~;l{=L%Zx0;}v1rko%8+brvAR0N>LuKx-tNcKiNfNZM z+}1*}i3V&sr#8m2DofZt434-zegeTZV`3_Wd#sOJ7+Z4(&fg?ak`6 zRq#SCXKavBl(7U4htqm?3KOp%>JJ0W%HtFZDsNpU^((@X3)BBF zK`UVJIRlQ%PIesN!Pg#-=Ug1jh&wK2#rTHSGjBhgOmV^4 zSV(RV%A;!XFa32|^bIAsc~&JXN)Ps_%~YmT<0 zR5+z$5l7%@$vzg?{DN~o)56#M2@kX_)^8p@$cq|E|9p{sq;b#QQkvJ>x3fC7&ze03 zS=m6WWKzM{$G7A=gur_=b-38(tV;~PMwvMiir>}6iWK)744x_4>3Nvr5r z?$;AGSu(7cwRCIurVh-RUUvJ1sk)~L7}Ze@I41*k4cd`;x~i;n{kYc9*noBI!T63a zwC6S1LN8hvJ7~|RvLBkh1TX$_Nr`U+!{nBC+&t{vxj(F$XHToy*{Uni#L6nSJyK}a z=$K4JoYTd`E2?WYm|GljZyo_rG|5Xc}EEMqR%Wt&^RhO|aIVR%6 z-P^4d#Z_Dv-&xg}EoOIkAu&WOt{oP1pN-{(aw_Tcw$`N&JM5+P?$ z{NuSEU{#!64sCMUQyLmfGP+$@^!Cz#gCy{R@p|SXIY_jWOxTyF${8I&IwT=< z^dYgf$Al2YS|1S&K)Y~SkKSSUr99eKgKh7u4r}K>Qxmk38aQ@(IBQ_}852wLN(*OL zz}FSXp=ggKk7v+A8`+w9Z0Eo!U{eAslR8<+;M8O63BK|^x_@}dMWI7J18Vf?vdr}N zbIZ>$b?w+OTw#04(=KO!YOV7TS*`7k*@<4AUnN~+WLnaa{*e`H!~R3`H6{3sada{s zR7ZuS!%i}=;)xz?GDfz)vhY(2Keq7C7XGIN>M-)BEj(}GB?}C_W?7hHftlYz3lzpO z3rrL zlwfKy0xv0B$$YSJM_4|qJd++Zt#U8$qdwAWTG~=sX|{!)c1m)U zljn7<oWoy|FOuu94xi?xof7u;k^p z?YZ=ivO3L}i~5}*J2*6q{l{f3<4en1IiKsI$D4i6v%1>e?*r4!Utj_{7+>zb|Iqc) zwW8m-%O}Fev*Pn&Uf2NI<WI=c&5q#n^d+cB)aZbI-2u)chCHLI!~jQjmMIkI_kxP|J*{UfNsC!M(i32+jeaa&n=o+O7q;_ zXNO^Z2bq7$cT4I@ChK$T*P6=GpIg>K$A``mU~)y@z&ZQ#RXJjg4p8}IT#c_PYiyN< z$5ph_^>HMtI7jQ3jh}q6M`*YJcEWj*^K71L8BT=0Et6 z20NnVQw&-?T{U?^@8wLm6ucbzhZYL>$R;f>{Y&GQx0qn@9HEDuTla>wi{6W8oz-h2 zjflbW!SYry!Lm|_l?%P;U}78+jI_yfI_~GM60RNab|ZBi(rbmabl`g|bhA*vr|jh{ zo=^rbOJTdp#QnF|E>=UMqno^RrmU(pU(_!!a^5X)Q=2I3ZHeXtzV{ztsXdjF% zd{A9@T`8sa)+`7cm%mn$ zwT8K1%FUtI*#pxgZZH9RSmFB$oRp{4c}gyf4RlL_nMFHr3dAo#KA0@YPEm%*7K8i( z?!9Z1QDPf);WWp5(VOA}$A@G|GxH2w^>?eA2ykgO-qd1er-$MV` zinU?yLHd{yq$^rV-1~rJbV2|;L7kOoCR0qDe%rt#=UNLb@q5`QkK%=qdpflfU*M!} zeA5)C6-pzHbt^!hMpN{GeFwwbC*CZn*+OvWguW$NYnW``cZ#G0-!P6!L0hCCURdQQ zOak&H(Wy_#myo9c=OrNbIHhIR6n@6yue4CWoX6e_ySCL0&I|pq5?6k9v^|$j@zh|U zt)el>i}gEJXrt|nvLRU}r>+e*{+JsMAEDc^qSjrP<)xtv;a%mcQg)nl(eohHH3Wobn- zH839k1Hhu?hhB~hy?ji(Y^4KN>AW2JNa2sornJ0FnKc^r*aY>7dC!)T#TP~oywGxJ z{>NXQQ~a?6j#dAIzSaxbOIrNp<1g^cWp(2zQhC0VZpOIO3oSA)r?QNeMIxZE)(9K$R17mXdpVmoYy^AYjLe6|=ahePp1ANr8&4J}6dx_-NQJ zTy*_ctU)Nd-Mq*DE=}T?dA^!F{`%if<99WIW_QXQneeoDVS1-jk4}1F*$A6S5l=F( zz|)eQlGv_2=gQ#tkNgRqaaxz~*)b!$sG;;TtJj4UAC&GJ#772uc45&b_G|gz>d1$U zXJBT!XxVC3CfIY}V0g-ol>i6k4kF+?2534Ubwa9(O<~zyPeQ*V_)m7+ru>{DX-OdK zvP)@rK*gJJ`LDST$L?(5>t4bB?xUv-^rD8+bAPis?6$AtdYJa9QPysGN$Y5K#uzk~ zd29)W8h4Hb&Rc<_LaPitZQ`Qryssfo{Yw5MIXr_|@z9b9Yp^SAh;&X0$eyD)DYDv@ z$s?2iPM6oVN=rFqm6LzpVsIC=c+6qmW8b$u+V7}Mh^W!IUvuXqHV#gv5lA~r{V=&O z+o&c>QNZ06>vj4L3H?V+4eOS?Tx9a?h0llg-(FJnZdGA(G{nd&J{{&f^xwmt9b0^~ zn~izY0;4A?KeF}5rLkI~>S z0-t!6VCEaOY-IMtQIL)a##KtlV~+p(6Gw;64Sc`t@l|hywcED)!!Q!K)55R4FsC^e ze{K~fQLl6Mdg4j0G*Vvn3|9M#0kf+_`wlYqn%2?kw$nY`dDL&hbw9suh%L%w&jJi- z-GM4%SG=!<71y6ONI+Ss?1U)@AFzd2xxy=6Q@F||O#>SMIpB{8t+rD5K9l{4h4U;# zn0eo_F#YZ^RePt)2JtK+d>`pEh%%c&+;hhbR3 zif3Xv(UuH4{DF9s9`rz)6qDxqW$*iZPBZ3b7T&SIxvQ_BFCjKE`kn%MbnPB)m^h>0 z8%fNv?dIOu`d%r?8z){9m~=G?D8C0^dm>GaCCe9bA?FPUXI%9!Ha=iQ86p#wUOc!t zS~|rOwrEA3yzCE5emY74?`Xxzj06q9>>l#S>-Cf22}8zOuJXPp&sE(V0y%yf5&cod zFc#wvW&ATakERk>(W*u-Etuh_9ikDZz!U9K>EOkm%GUPrmGojP?s)Y#K-dPjY&U-2hnRJ6h zO=2bL4ainj*{vCfSu~zQT1Pha+Ao(TT*Q0hi64gRzW)FCB#xP8yF=a!oBhE5=hNU0 z<8%uQi0K4%{#1NU0U5M((CGwBafFFi70@FWcwRJ^vT4V4l;Np@IWIexP8R;mpHq*{ zox<;pAL&I6rRQzh9A4j89&k)P@0@T}x31A2DQSv3Epc5GGLv;&T$}77W)~P~CWBz2 z^_w@}PV>%eO;MuiB67R3y9^PHEHS`^7 zUxT%r5n`_cZD(MeWt>z|ubEI$huTLwtJ~2gn|u*ZrQFW!rrE|TKmXrfDvo0W{lX_L z{Kx{}r3-|-& z-~nDTVai8xPp2|;euV_yhtyttiY;1kJahuCU#VT;<5^dYylIy(?DCeoj`i8Q5}K=dJ#1T%}^Z6!OYF%3f-oSG$ov!>u3IHmvEj7tM4_F^vql{$xP$Z3p| zMcVj62St?Z2g1m9q3^H>;hZ5CGD<6asuSK;v_*^M0|q3Y zXmn}hGvT@+p-1o2qERD%;=tMxocZfzrDVDG6??OnY>tJQ{m9C|qjd@V)OeL|+778q zQjE_C&;ek4NLaomzpBEd(~`g-O9L(JMN9q@la-c6U$TAQ{fd+68v<9~GcM~|TxyG6 zd^^@_&OX-p_}&$C&YNw#*7lPEgihIZCW4kRPJYOSTew|C1fE zl$M?3!roRgFd!cKGZqFw2yUKyoo$xXMQlSbeG4Br^U$e7r*QGG@zLL+%aJYqGbWSA ztkz=D-^9t4i6Wi595j8BRD|A+F1?`PbRGLPj4Nl?`NE}YiC(M~aE9gwcd%WJ1_sanM z-r5CW!?IVss7YG2xiDaG%4j7*Mn^u1+J$k~eyK2pIBZMo3!eN@N!A*si@k6<>Pp(x z67)0fQVZll9b}576HfK;(t(=_rYdyMfNVu)!dgK^{$&5L)8RZBF#a2VE^M>@`!?Y^ zUa)G*HrovlH?F~$bIuI?d+}Jl`Ivb-?Q_~hQwapILnHSFx>!*LZvsy3p!w<(_Q+iV zyNu^9uLqwrbaOl%6aZ%MI>N$G3k7g49yaaR6@Ia3r5ClD{?p4QhRz+hbS3!r#&P$8 zBJuSh<(sP;(vdLnfGR8*`akB!!_+%2^lU@vZSU<0OJ}Ytr8(jf8*JNqRgDDv@3n{P zuG=;{#w-i|{GrpJ@|T^;RdznBslTC6(v{D_Dt)EyZV9TbRfyeq9 z4gH3l5x71cHm=+fwrt#9)bp&aXA>&uIJt_GFc>@OyfMghx!#zpoxsL|!PNQ>qA@!E zgaus>VOtHjhl0gvCSLUqCSD?#%q=$ ze@sjYe?y)XX69?~_zp5*Pou}jB$JJ>mwOrVsz7=GIv#H=pp&gI-8kv$f^QVTBc1Es z-&jg+(y(!%YvNE5U8X(a_-g;54YSOtH7~aXpO< z;B7r0l^>HIX1Gig3b=dRh;U}N#;-`|0N=A!9u1l#yRii;TC%NSZVK2%w4V|-114P0 zu2~!CL} zD*F6hJ?wGB3)48sDVHwY9s?-?CTN2Oe`vfJd6~0nVNBRGGpnP+rhg<%SFZ<4=J1HP3r}>UW-p zPJ{Q=R44ui7H=O16g>Ao7izvDQA6;IFN~{8XVHu6vSV3J<7CMuRg{EQuQ-7?sbVsf zPq?JRFwIJ=@K=ZbvIQ|&=6PuO znv&&Ud!cFgnP)^IlRNjbM;ID)(3$iTGrF-=JKJa2)0)K9N9}B8D zzLI!47MIyoVPsXo#8K|?*NrJn(J#Mq6#9k(zH#utq4;8vPyEdDdxjBH&hw&%&=p(r zF_L-Og33{41n+5hf!ARpz=u2!EuRAe&GG2)L4s%_3z?V8F2%grb03ieo8H_S-kiHh zY{M`!@xkNDOD=CO*&KGfzdM_8*%{R8emiCHI1chm(V=7e zaOrKOn@F4k_4H~Bx)xvsBje68e$*RS-Z~y|{e-LPcER}N%!l@J7L9`I#!nBt*p$FE z#?vNnARSCJ*!!ZFo2rO|Ts|Zl&o?#Fm+jHuKjpvzTK8L~Traj^c=6lI!txEPO6iRm zI5PC=W-ep?6#w#Ew`3v_#|u`H^iQ!z%KIbagjbfoUOJXB`z_4Og&Ya+*CLZ-ztVdQ z?XLkeGRcKyBfYR}g;P04mJYfK(4X``gU|-2b_t}8>K$9O$Nz6y9P=LMPNf5CdJfqT z)EFm=Dk^_br*r#g1D!PtLnq%Dm>GFk^vqAf{yp}LZ-ZdsP2aGr)w#J}-a?sWD#P$T zpz}6f=jA>PaxWelNcl{9YW_0So#apLuF)5S^Lp})nd1XLczuyQLqR)UgBynr$_84o zOUqhxe&b*&w(m7tDETXHnsr{8KJvN#`L{NNt$Ry%?J;`J0uwDjb&2Z@i1%3XwBvy% zTw|d#MuK~6ClM03_u_GN%Uj=lZ9zkqx6bH#n$3#G=+MHwjrg_&e7>oPR#S5MepB^D zdSPaG_gW~R*O}eI=qm;ke4~l^Pp=6(laD{ek6y94e*@uzjXT21*&EXoXPe+d3p#(w z$(I#0fL=QBic^(Q$#5NkXpf-@@;}c@?q#wAd%c%`FHg@<2G9R&vB-wvt3F@8gTo|g z(Mz=#w>X$4RNz?uE!P=_kRBbUV_!XvvPi}gM9f#G`GRr+w;JEkg073M2mJdMUbIla zd|QH8``)^OZxq2cgV&6^qLkd`58vcF)c-Gg6Yz!bNIx>t5Yp^vHVgI1t`c0 zGnQ~HM&zl_q&Ct^E^w~YyOrO z3w|eBvEm3Sj+wH)DeIryi^HblZ`GzXbz4tb8Z#~IwxBjZ$qM^&LnfYbCIHA(?xMwe z?CCTe=_CgO;yo>yY@`EIH@!T{PKF;f-X}v|+w#~<2_z0EXB1XUc*#7L9Lz0|R}}%g z`t>KmhwrZSq9*BBqw|I#8qpBM04Lh4?7w~cj$zDVTAA1Rng$p&7axbV;stegs3vXcqB{Qo)kvsz$@e%le1&hO@|8gEjNtwP(@y+DX`ishC)iwY)8eGSh-Bu$q;%6X>H+B#56) z&<=+#T{~6z1j}fwI{Ry)`eCwb9+>b4A>_{j{L1V~U@udZ_+w?rX8}guGUbN2TS6f- z;$LPyTz7ZK>yMUZ1*qZ!Vb@wZ-rtllWU*v(?0t_z!z;*6K9yqhlBKlbdQAGBhyGU< zN_FM7Ga2^WI#@@e3G!-q6sux4Dfun*5I3RkRC591$8KFj9y;RUBIxmE)Pq zxL;EO%ISFvoU;d&hnM*>^?Kyev8w3#R8Bm}yq(fJ688BKO`KB#N)C5QoKf3JRF#07 zTsrYpm`d{3G6b_@#T!rjy%$++ueGVM;-hu4Xk6lR`zV9X8j0bT-5yTs!42~o%zyG9 z;&X*HN$aSIH{Dy604pz9MlwuY5HDJTN92-$tAgTsY^E_yVd>ycRk+$tY??p0n@0=@ zL(b^sMGd9@@r@;6_kjbYG;iQVEtA9nQ^cr^GofTixF7&NjBCne3%L>5SBu+)m)C6! z@9);)RTj^FpSQ5H;7Gm<@YE?mmaWXr)7a3Nu^M=;jKRF<#`Bi~CPV3?eSA=CPyL6! z6y>OKw@#hHH>cE1lGzzU0~P9qznibR0v6p=ouPQEg6vs6D}epor`$Ze;2TNc4{t5r z9M<#oTL+VFs9eEt7DrFA?cm(U-%KNJFu@87Di4(byr;b$IZ$OIIXclfS&8sLK)&c{ zWca1WvhKr8$=67%rl&l|t8MV+1gxiS7Q;n2a4!jf0kTi<13atbo6309=xqdX%t zv5GckVDqpderA{Lj?u9*?p*?I5Fn(?g`o*c@?a=~=NHYiF9B~C+c132>1TyYM)(CL zJNUZgT^hNHc6zs7VaAxt(>xC}L0Um$k)?(?pX%V{GnnYA$?izJ?DFIzLgsT+r=A}y=om*|CTQSRL-WPUldrxUJi?jB?HxM)qSpyN9j7Vn<2m7&) zy5cjX^w=3=`78IAWUXQP4Zk?_yh1a~6)S)dN6V@zpyPDEIC$c`mgv=Wd8AWS(G zbf!8veqFx*SD#}}=3bi=94}yE@lW%Ym6AW*_L9Hk{Opm`Y%rBgu1ZFgOjh(<0=tGmZu7%QjPdmg`sz!C$;+R2X=A&or$snP8{|>I8v_7WD#J*wbF7IGjpVFmYhTxim>1f9+s< z#JKdT7IqDTSAM;;F0ISz-^Ho3IaeRNU@C#9a=xlOyo|Dg7l`*V{Kxa6y?;S_ zkWMo3vh#d%bd@i^Zi$v|v-1Hg9-nM9Ty*u|!25I_ctmsGuKiYb@|CT8B?}u?5K0rS zTt|D6>7x;bF}@~X-A?ZkE}Aho+PI%VM&q}>fAGG3pR)}I4%x@}<~{304Wn7jO5vf| zPaI5Tt>JIK^rqOKCuv-l^YS^V^Ae3LG=2g=A1PG%)RE#ypC02oCMY!`ci4F}8u)mY z?XzD>_WU#Zg+XWAB0aujxr;s)Fa@}@z{t7b2knFG!JbweVd+yFpO$D#st(Wr!Wnmx{?ZV%!O@k#{IB=7hO9;V z0vuh!zeQ{B;G%`ehG^mM`n(;oePb6q^&>B`>RhYOGyoBttZWSDZJ;fd*^j*PvvnWB zn)le>S%*lSZwySl@#|?q)C0DW$}*B+4Pty}sZLZfkDH3?9!i$TV2BqtuRk3Q)PD$xEVc?p^&cL1 zOwEIBN7%lRt7zw+**8qK4@4zE;d7TuK7_i;PrO|1p)mF>q0@AInmWEPIvUW@0Du)h z-koYR+&}9v`vQD<+;YXJOEbT+;_!b>9Xsofa3vhdF#ih3j3MmawA*fT7hmaAVM`{0*GYCtVH$xpIE6~UItiRsAX->*kG+$7 znq{Rm-f;1AKMnhKYfR}i^|ZAKMaz&i6j4$e%;pUR4ajM>&+3S&^ieg~zWMF2dLD~w zje)`AuL*t5OP_8CS6S+(Tc94KlPNNcTv)X1yj(hDM+#Q}V5Q~b0AB}#Il{b7(PAQ!EU%JZyf;Y$VFb5Z>SgDTDh>r2TyTU%_h`9B-HvUTUE z3vB1c@}s?$IVvklmDGi%0_QiTv)~&|%zAi5e9X+J zJN3@`GWYspq1P;UH*8wHHI2$7op=3I4#Jd&=oI@jQe72~e*y9{&u5zQZwHAib8 z=4~Fc-n$qcdrC4eBcqyG8A0CjM=uB81(?;3J^g{#C z1|OdikP~~%*~0^pQ*o%h-$y7oV4u2Q^z8o-n_)K}$cCdPb~10AH5&OiAllT%1Y>60 z6WZG+)IH37*lvPaBh(PEH|ZdBz>a@X2L z$vjyuI%ng1VSREOI}uhOwpk!rnk>9z!s?7t9dZJzKG#|(U|65i!(~>6MBpGPeARXa zs>SAJc$qQxbC-U8<>P&t4pAL&GU zz38gA(g|Y&T?O)0@1xjZ?`5f-bVtJYdAU!q2~fHj2?uN~@J`>fPnZ2=-|hop7)BTGDTuM;nsc>G3msymY0 z9X4KrKk|Yer*#i(S>I@Qs(4LXSS#iib;xgEFwAE;JR$I6C2!_VHu`_QBZ5 zgHyn+1XeC}vXa4x$NYt5iE=35+xAVS;{`Y^>~zRuXpICOPh4!_EC)*3zyw?Vd&bUd=p~H%j?2N2M^haapT2;R`uowms$Sif-SbW4P$LFQ;krFu^ssu$GxxM~RF~2}3J1Wq-8t z{P0Dgh(F=6T~E$+sD;n@%#QDScKpb$mo<`pdiC33#RpqUNlqJhUfhi@9}eX$wcOja z+!l*W1DTsj{t%-7%m_`cJk~^udZG!Iu%f*G9;kxg*LU873JF^21;)huWQ$`<-p7O3+D9M2CJ zpVqnDF5LIZBZrPOz%Me^0)n0N`kqlY(VT8k{S1bpi^Z2?Xv#oX<&oN$Nwo2DWFEXu zwDjWtL;^663*J2WS}$q{{nY#y!uH)eN@-nc?|(5N!hzx+IkKR6L-fITidnQ`?4(rM zg*$e2w)@=qvjFr}-2BTstyp78@|9(C6hoN$#*8{YqQnt5Cqt*}0nV?}UmgC-8WR)E z712P3UZ#UIhsllf&{cr`lml9VHfSpWIlr9T^US(R!%lXr_^AFt;mT5^yNRuYwHC2e zq1DJmYCRa;5_+FII1Cs&J?izde#y-6?rN6A8v~=S`h4Bq@|H3qJ|1LDdr6HH$+V|s zCyc%&!OSX0H%@F;A%UAm-&~z?-Sxa@s+xhwf1o z`WqGsV77JXts@G)(Zojk$kW?PHkXo}aP5%Ltyg)ZS~55WCVNJ)Zg#Q7l45C#^Hxym z;=w6+T}sPVa`D2HbqZC!ncBd6JI{-jFN+tJ9Q&zHC2$QWDrTt{g@51ZV#YWI|@>|FX-&~@;> zztQB}u_@rSmGi2;K3nLIme7d4yKV0~W9+n2^o?8I=}@tYLdoW67-kf=Tzq{Rg*WD| z$c0u?h3}+x=m0QnD&Xx?+UpfpGH?~J^nR&VTs3xTIMZIDY9xGQQ~8JIlvXVRUwfW3 zbX-5SS{Hr?`0`6-zPvq6 z9C*cr7VojA6-TzxDGe|n-qVuFMmjKc(#xalWc;aWcu&UjXv=d_0*QmlS%tBN_Mu9K zo{sUKPzrW#*%VgI=9%+q$Qp-g9nx&>ko7^uJR&v&?W&fZ!u;QqR@a(NxCNR{UF?~p ztG@FxY>wIi>6EquvmczB=Q;hG@> zyr`k{><#aR*X<=L4;PT!tTk+OvIRk%c zEH^0pV3Iljui*KrdNRgrFSk&@_-lq7xBcDex?jGpeiE84i4>V>1+j={6hC0>Q-bG= z!w&>$8-QHxfOMXhUOLZ11JVg2Q`_Wa$UVr1g{2ekv1o6DtSV5Eej-EgmQ6dt;$+)L zuRm6riSoraT~Lb7bE0oAeLv=m;9fip=vcz^%Y}#G0bw^IJj>7RiURnCzrsr#&fP7 zs*dYrN20x+bW`3R3!3s~>AeKEPWeQMZv?~M?(b|Z_eYlupJWp)yJ2J3x9Tg!(YKmA z17^t^}$ax~gI*;`fbUO}c=&k9^NMC989>_sNqJEsV9Njg7yK4%*wZypVz7#svKcsz|rRzIJ8Yy@>NHDzbEX7L) z-&`1V&t{u=WX}Smb`~{4DlhR&YK{ez_-_IK8#{)7b|mnkR>i=)VOjZpvZ6)8*AE+H zZpQ!$_Y0h?DE5na*L9!vkwiq++aBp1Syxezn>%sdz7G9A)mbxdFYOz3K1 zx`hHJ+O8sF&pET;k3{e_7I=K*$;04r=Y;b|w7krH+~(++_4~UYFnjJm%WwR7i%v0z zkUew?vPUjjIJNb>()8H3ZAwpi@xm=RhCkj9jN$wXc>ZULita+)#*uIlxf$ZByT{Z$ zhsxylaD1AM_Q9$q?S$1pNUCxsGfFF1!q`PyF!ua_Lwup|blXWK0Y>!*RX!>|%66WG z)fNhP+MYvW-|j{O`NGJb(qt#Jk}5${yq9~r z6xlJ&*$gqePdeK$(N>uTpJV-TgCX7Bq@z8rIOT!~E&gJ0cAL+fCGUjbGZ`ycITdj` zo7lY@*1Y?6dIsDCEI(KuJv90szDGGJ5LYz)kLVOj=dq`;7vEIeRPK-Mb;wI#bqxI} z0aW@nI7Le!aa1|2FlX&)bp*aN&=a_2^9!%oy6MgE_QL1As7acH3&?t@toO-Nj|ZlK ziH7aMID2u+1Kak@f8y`Gs8w{=o@a&&FaK1U#`z|oV~E)?Ox+-O9$GSC$x?eCPqyAh zv?W;paRA|zj>?w&G&eD8N&F+(kiOZ_nSR~d^2OVVfw(tm6}|lZt>L*f>q=?x!L9*z zTpI#KXU&{C`z;yPF*|!9~#e~k~{iLvIvv` z*vsIVe8~oy73I4u6fkh~*`fc)vkJb^#Oy~_*v>w7v&g~a$6<@ZjjOhVw-%M}cH1Fe zvrxCpWwOi}Yna?2lN=t9%wy!bD3YhP@W%q)f7AF)=_CjHu)%mngM19D3hzGUn=XN40UTD~UQB|nT#e3Te1x5+&?9?~CHwAZUs zLUuzQKcX{iqIC1{!I4&8y}l{Bn|(;^_mlUum|*>5gIBW#_wE%wH+DptjeaJ$(}MSX zCQzJ$>OoWZ_soXXyaJBLghz=VvXzY29$qOn;<&}(6$4AMMlk8F8+n>9=Gb4})Tc0! z%;P+29r9@@ZffMyIa*q2>NqJ*kJTJcKZ5@k@WRg)8||xys*H&rzw+XdA?OIFhK16*0)x)(A1)d)8hu0IzE>YDrN>RXsTW=x{U!QX=4tWQ z8=?a=W}^QQpUHFs&}>Y{-;O;y!=ktA9)G4`dn+b|gVqBw1+>uAMUSONPGEB7Z9MJc zNv`*qTi*3*Yg!eS35Z zpBgpHiyBIA+OaDFk+mx-=bWl3shO5OUsi$>M0Z2fTV;_zXE zSGNA>((!Sd#9A&Q^#}HgPcYHaN)|M}>+6T-(SOlG0Ze|zT{WoS8%->+J=eFs!?xQs zI3C*_?%A60W>!*AG1)v<_|xx4`I{OK+~5&{~>x9c*$OE^H$~r~EBTlvVZu?zqhI7cW5} zYBjNH)0(=y`bYI25zgq*yYLzFnE0=QRkh*LSqB`wBty`aWA#ghUtRvXO~mO+O2CCo z%MrdxrpVHRkrBL1wAaZ-GVw}7ajLK-U!)H$kS#oP1S_M0mh5E4RQNVHMN2@VW;;DK zKIxgtnbCV2XjK4@|2mf!>CAid@66In6wu^!(WudC2#h|QvkVMl2HM$gv_X+IEJG$- z7kZsFK(QiT{M=8&zTMkPveqz@ZvJL|+)^s+)k{XG8-y%VHC}RcnBuGAc$w_L06IYF z3un@Lx%5?7ajNjq+S9NXP?caV`$NOrF)ZL~lgEb69VnLL1FUErOgfnxhYv0~v|=!X zJIc2zvLbg>!C;cDW?7+|@*(>&as~XRLMBmkV`IPslCA=D>ZG7ckJk`MD@IKvK%zwS zpf0Jat!#Z|V)<_E4x3>8cwS@Q(>kYnxA5iUIF_H@7CvV|^#bezexLHJ=#?rtI}IM! z{oRxIr8w3lw+y#zG!|w*y3#%v#aOxo&zHD*rn$@Rt+A7>+`-FaD;x1(*$I1@r=@QR zCLVz8k%0VuH#K_kmrK=c)Zx@N<4GnB^_DZfFs@Fx#C9Oex~O~t6(?1enk5(85K@jw zoBjx^Sy`MArkD?PN_IN5WNx>h^27f<{h;v&EEMq2Yt1z=g;}!%?N>DN}?}AxADE#x&{5ZZovo&R^^E`A_Aogh%z6K%S+^-mQiP(nWU(I~D zF0H8}lT|GI$iUAV@==eSKz2&HX7HKJeoYlm&7bga|4ohk|}G`bY1+|PR9TMM5H@4wx|fKI(pj@uJ(j7-)gqsFM_yjDe$Qk40-> z>Eof3UUFgSgpnT&C|+~erfP%ae%zG`s)z$#s9;0xnM)~#&)!@R{|o0VT9;hPta3%!$%dRh8k zuyB?I{GSyokD>8vg6J8?IM9*oyp3pK#r3>su!mX3yzT{TP_pfv zUFD%!Zjg2xp7&{#t6QcSVDJ15ozS;rwr<(`w%O9oXs)ziiW@ds)ygMZtZV_%)`pka z>X+odnXG5ZdZ&_U9TU6N{LHLZvVO^WBl@REw+nMuF9>_>oxTK~HC}B% zqKd4_3LAoeNfLkzS{T~%k_&sCXmrwxZVsDy1Z)HaZFR7qr99bjV?FO=FO3s8|5o96 z%!s>0b-j0zZB?v9Tr^{_Bf5{# ztMh=oIZ&Le&}){GBk?*fFRab~yv^54(XUV%@zqIVLbpylF?M`_m6_cK%BuvQxL|NR z7(P@Ckf1XUs?11s7Ia6Rv$7tI)KE7>G*eBk9@Evq$4`5`Wt+%Ifc@j*imxYoXw%NFJd zEHs`$HadR3YTjaX52HbZqtp$_6X^;PmRvI zC2KLPglOX81j^E;FH*b0)jDkb6X_%4(1+=#pe;}+{9I;RzVlV)kL-Ow2X`7h#{%b# zLYQmvYYVgk1@NBMpWXJ2f^QKKGt`7-p6TnxPY?IoyF_~r)NZ!iZ(F|47&px_s9k>!SA%J`a+dW&`evKy%tUrk7qf^DWo52z)$?33wCJKw8qH&a3G6nW zSu&aM7Z6^-$ozn{vPb93OSWDn8tg+FS9VInV~!0VYYLjvH5KQilebN>PJ&Bj6^+W& z__gl_o%_mJ4~Gdi-W&RycR?-1QUg@3OP=E~LK>LZ5HZ=Jy|BiGfpUkAox`XrKNIFY z^8F&+Jv%-K%U=Ff7(3%0tG;Aa5yr`ixldGRBSXN4tRTMs=s|i(>*{UpO(a3o8 zEwC|J2bN*p&tes8&DQN<&c=63$H|$ytAve|o!7H_T*+!G&Ar!uy-x^7|17}W<3_|S z-rwK7r%3wei&g}}vBF;+{>!6^`dPDn#=>$7jEb^rk<9|=fas}_e=*k?%HVkJjInde zo-oIr?@QsA|HsRz=y>t0p%dDmi4u7Af#qSw7sgiYFj0y@Wv&~`A#&|Ot&cbyhTE5h z`(JowV9P_n+J*0h30K<(+~TsPbwDw`CBQ#3Dwem9nYVI0mbcnPfn>QY31VY&sorOI z4`Xbnm4&vayaRekT1#H|WX6U6gSq3f#=;^C+)<72^3vDBm#+TIF@0q>cC2*;lcn3^ zwC>@`F_(oW-*`ShlD#>&d40P@Z!HVs1~slRzx9$E!vB5$_ljc}L7o5CUhvzbnQeAI zt+97w6xdUq-(#pFkI;^k*~5hC}(U`CR-nyky<-uw}>A zG?^Dous;`89mKph`WiZ~$NvCi(9(;S%wtbCW#`jT`p7(;+DMlfX?hyDQ|E`u-UcUd z30MW>e)kDGqeorwnb0}; zLL&RVf5eBq1RSRoFyV%;wj%cLH0o`3bb3e{)RqP^>}{)fFN5}A)7>!03z%{B>eAVZ z8cP3c;qtI`&)!m+*AE*QI+%ZCE@mENE)YZXrp&b3E$}!MKu+g^*33jonL7sN^W!dz z=R;On3#WDL7)bAT2n?#|TL>y{29*SO9+JGpLIHftYQU(o3ck_A6Mz4PwHd8zCmNln zw)Dy+!15MbHy&xM6_*ICatp^)xU>q8d3&HET=n^49uq|@KP{fQF4_?OV3P+y5@r+r;eodDk-GnD@vvVQIH2JcvrwQ?ccJn7ol zr~2bP_}UYmZ8goddwj_2M1b}-^F1D4EPmj%$3yzE+ZCf*xvXWLz|bcPxP?LF+l86+ zUo2+u^MVE34#*=EH6Xg9$)(pM$;*)`ke+C4WG}gwN$1l#5^YJ=0dYPSK*eu^6T1Zd z&5518EO+m$nj3a~_@343X8!VxbmPlfL~}hMtpp8Bta*9kf=Q}_U z^usO~ykz8o=M<+6JU{m|^(5u%k2~N`Kz0I5+A|rG_l(YQw8`#P#|XcJpSwm6kNHdw zou5J!*Y0?xR`vYu0|)AbzdZ>DC+keAxRb5P6*7G2E(={P6mZ3-8+SLoNYoJ@CA+CO zRU3H%mB6bH*={0x4;7OhO=vt$vso#w&)yEBK%sD35Zh=UE z^ZvruB+F|>VdLeV)u6NSPmKq=7cw&W}ad}-+jikXDJ^k~cL#HmKGMM++-xjuoLg^HO zmJy3z_<73=PfRN&ToevEUcu-zlNbZDOse$121Sz?G?o3~79x-gw^Mr)sUwjtz&Dfr(uTs@?JIM;UgXcX++ ze<1vF@ybRecP#0A-y$*5l23v)@#0z3&sZp+NAJ^)>;CQ>=o4R?WH)cfLK~dOC9wO$ zz2S|g?4?Q7X&crt5%NRCY}JW+p9OzDlEpEbI+1A5E?n`MQqvU<9{4CMeRV@TR!pD6 za6DV_s#@8KdCDhN%0p!H{KdoB4=;ixK%;W21=WYNk$K45@7%L1ypTMa=Jlep7ED|{ z)&RcHSlf2<H_+|GzDNB}?jbMNuV|6Xvw2$OM6+6=+M>hB1}q(z z6)P|Ebd}!6g;pS5bHUVgfIT3bai{2STd#9sU)X2F+p*&P;_&Rne$jPbiI4iJtx!XQ5l;k0u-+8TJh=wc>*DPuzq=XYw-(V(AcXASwX}EltLk`B(`r}7d9+?rKCH5 z)YLHPrf-&HCknH2)`RhdHJ6`$MzieqD+@GQ{>0UYA*-TITn9c4(TZDIVeM#ND*gl8 z4MRqCDYa5Sx<8n^sFX@4JLU|Vlw{qRPDl{FZdrHygMv> zZmsR8(>`>yt>Ut?u%pIYW*n!M1qgCP86h*q$_i0@1d(!sH+i(TfjKK@kuG!=j{&>2 zahW_zC#FTpW+xC;_z3=eHHjsF^! ztzQ{-?A;kU+l$w~4cNMKTlnjT?X`k@Ou;dK{?Y?s%J7NDRE}w$?L%aNT}wRh?$?AN zV`F(|K1v&r{b8r;;Nysn^)Kcua|f<9eT!DHKD6)JEVXyD5?E&ZdJA$4Tu&ewO}FG= z@z6fRCf;P}BZ9JT3i~vgqAl47UN70l3b#$NPSDG36_CO6SDVV-&JQ+*gLX%S4^Ooe zsQQ9qX}n`x*NIL3p-E3pGA6tVT8l}BPfkS5C}UA$bJ z$)hYeF450R=)O-b$YY`8NBh>55mB)(=A>RQz{N-$E=Xo zEF!h!`Jb9;Elq?|Zu{fVZ}`QD<%z+vKmE$r9u9}xc2btJZ(F#}fdPI>=*xR&*e)7$?!^#h~m6Dh?@VwB|cI3z=2` zb;kLWU!<8F7@x^a7I|b@IAoaejpc3WW2-^L_w8e66~J8)W>2bAlC3g(2Cw>VqxymL}<~K%1f9;UT+8dU>y{+<=cx!doBt8V@3-nuGE zuXB}`$#jV+gZYe^i}}prR+&?m97*^u|CQe!3eYERmit>d&xHHnyok!3e0g4;v&5fV zI=APsKk4)bMO&oTHT`_{jP&^{*?8N1z~gj#8fL4^ z>K(zQYgeSFmd&$8(YsB{y>lK+Eb3~wREs%YL$DuJG~_k&QdBV|-r!ybl?7D^)nj}R zAbY%{g}n~U$QmI)8$9UR4;&Mr!?=ut4!eO_3$)**Ek_|>w8CvM{jonD|*_xr0g`^n+$6zC7RPjE!wAxE^If1Y#q-`Ya3 zJU-|@2`h~Hshq`|ZH&-F#)uNJ+5${6zg={$jXN4fj?^yR*y-1`lK1?gN7LG8XP3OT zgb+EMJ^ajgTVDly=0Vmb4IFdI#B|D}iyFpWhEd*=Fa2aURUDXdi5v|C$H3dggNOf4 z+<#KI0xU4Tb@B8I(*@(Pg$R+^YM=l{J19H5l;L0 z@gujl!m@jJ(ZuuA24Zm~ac*}gTAPE>ff)O}hgygAALFEF{aO8Xn-;!LJlpdl76Wek z7c{r+MjR)?h%^>Q;iD1q@Iw|peDLrC??;D?+K)K!fsi@1A71Ouz8_|P@656~+DS^; zH|o?Kaom_R>5@;>tn}itr_-wZ*qbe0)Smf!H!x&78gxb-bVxe)#@}c~$64%)^mv;; zF2f@;v9NNSpTcbcMoN*zY-y93n_`(2*Em}HI6q&qB5l*oO$UCKY1Ha=7wz#^#19judevujO4~jH)ljrc zDK5omIaj&bj+C3I%R0*2^mF-_ou*7vkEUn7S1a}kKiV8|$=&mX`~Enk{EYxtLj(1k z0K6bFZxFA98${>bh>kizUn0L@e9Oe@#p~1ZIWPHcq2`IQn`mPIBtk0i1Qi``xv&vZnGUT8F-$*EN&U3MExckd_=mk z6OY8B<#;3|`Uye6@n4eTpo{|SYVFR0E6lJelLIVn)bZyvtk-VM13F@UUx(B5!|df# zTEFS&amFP&Dt*zr%G%6DkEiF?uWi}5XyWvAu-+l3{;l0rvl8uKF!27YidW6kR-!M* z*zoA$C)1YplPj2;A=e#%i3lEkL|Wrw7e0F7B3y(GzkY`+cy*Yt9kQSY@`)cn9#C+V zQ^7;{LG$V0!?5?20Pn;cS|{tGPyTMY=##&vzSyp^CvN)_wcY{Rsg3@U4aKh`%Q$ui zw2hAJPP_is(}DUu`v$4zd~wNJ4I9XS3#?thl71>d}An$i8n$N^eoEBwxgKkMvhq5okW?G9ky<7mg! zud3q)F)~JYcz%z{ji*g*10)E zCI;}jrUp<;KbXHHz3|4HE%CpeepWexpL|4K49}V+y~xGG0flck`E{J{cE2^!H^Ov(4h zGIf`jAKNq;@~p$45Wn9sLbzNO;b%SUU;ddh)96Dvm!*OE4?L&$>U=KZU7y&wQZRw1 z!RfeV_FZUVC*J2US6V_YDZsTXRCl>N>%d0QjIU&@vTAf>7JkSAA80YueLoBR z(8aIu+E(y`7rufRKo10i$p#9u&-3Zn!>|{XfI8vvO3t_HBprV2sp-rsKAU)~ZQS`c z)}plfnVE?r)(iN^udcR>?Rk)JF_4rvDrEGbN9k0+FIAkBHm+NpmOpuyCdIeR`NB$8 z?3c&4%d_l0Ir`34>~5JK?V`D2AFY^8#lN?haY?f<{F0%UX7nGeyh=4KmPP|&WXo~ z?La+tp_kLu7?^(M<@M>l<*Ql>=bm(2I!*6_HP64h*xP2KTHi>-L}rtiF3lpzCo}lL zGhf+vUj-qdJJ)IW5hm*>>d>=ztOzMc!s8DNgUJR8MO;8PEDN8%d}3ZJ*dcmc@Yl4+ z3jGKZ7MgVD^1!Zy#y1>3apk;-pd9XBu{v$+9JnDho~xnSJ&06=1GGDM++4QqIInt- zk9Jpl?-2U8UNW&OV0ESPvMRHS3L-D8tW;&NEUMs}41I9P%y*in#xYU!!CSP5s3%)! zA%vCvD&epU5EVk4;D^w)_Q>_5@kggAmyT)0wfOPp(+kUA@p*%TMRa@D+R+l-!_!#g z^Gr+$qN&d87^^r8<&uw2YQ^>XhRtdDTx}*v51TfwU@P0RC*bp94|`_LrzZi;`p?88dZh9S^okLiWW~*Kand@1t$?Wg};}4yE znc;Eakc_#{ao)nd4SYEG!ind$GBJDUoU~-kvKDWs#GDli(xTPRw$#r(LqA>}a#%GV z=|=z>j=P=(*b%o7+`xVM$piiefyuQEvo@njhN=Vt1j#nTa`Uz zHWM|Srl_B%6EPxv!JKo~U+bqrJpiteXopr`zVZq`^UBLyo_3$B+=6$=c1LG~I$1AM zoiKK4L}UKqM{dzd?|H>Vjt78RCVbv~|Inj9_1a$@An@MDX7zpK@$6)^;A^7T5{(sB z*N>TgFrR6;cK=JJqDRC_bkxbi;mh;DP@5a*zA{+RKm=LC!j0Wt4MoVlPlH5HToER!o4eoH7r7?Nm}_^(B2|zT)!m zMHiXLu|t^bTLeU6|1J}Oj)D0Vo!Q|PymGeMkL|7P(&f+enWxY(Ke%QD4xqXjE^YzUW zd$!SJ?UQn&uqvQN=pW%)doY|>ap#lwN<4H>?e>P+1s6}fgR=nR58r6nwTUYC>zs7Ym2BJjPWr%BXEDUb zyq>=MyG`W6usaZJ-?lj|eE3H#F-Q8HIR3po<)IbfyCg>FLHntCcC!PtA1Z>yRqbK| zul-nn)x0jwz6du21vr=d!c$JP`h!`QGIjWFVdZNZ)2)k^x6F(`=EyYZ*rTjYy`8w& z+D@ez_Y2%M#0`Z{WZYEbiOeLKMo)rRNHvQtzX_>n!=-C~76DkcX||gE zw}2wru5$nO<8xc)MvokszH;8#Egp%obdg?3B8@FS=cSu>&%a)fT=P;|-C4}Z)ID;W za%7mM9a$9F=Qs4$z6Hn4b#HQ|vj<=(ZIq<D*sF zmtNJ`>^_I6$H%jSHcyDQg$~1!aGa)&SZoE4LCS|FbQG)H;l~`D&cAVdD+#Z>@J3p- zc%9u=qUSE`dRXE|U@}Z)ypF?h4>$Ud1Ji{co!E*;ga*)_Q;ec!xS4*r4se@3c=c{h ztf<>;)F+Xha?DBTym9S??JT_)ELX$i zQzxY3^!*uaX0^7qtSwmOOPh99fqI|!N%0J~D_;X~e)!8@cup_o^_2gk3brwut1W}~ zqk|tlH&pqi%JgeG_-uOx-u1&y_#qEEe3BS=?ZXG)8K|rZ9=s2#&ty0Zdrt}ck4g$J zGZeVs=5Ojbg!aSK>}<6_?e)o@f2U#D44^g~?YO4<;e@RDkl7(J2wOom{lz#Je*9aS zx(MQd^}577Fx1pxGlVV0mkeouKk&5OMVyZCPxz-r*t&9D+3@h2(ChAh|j-9nkwss1C@%TKQG0qs?z)h3J8KL{BT%)Sl z%5aTGD8HFt>NJ&57=5O*d|8BOJc*uv0mwGS9*y z%^-V0)i*vNez!hFa{J=tY1xK$+y26|Dd~hm53#;uH`~%{(rnYaaXdewb9tB~r@5O~ z=$|cG)=e@McULw~yJ@0n7G9sI1F@LcQ|G%AZ@;!ur)MzS>MU|pD^f;Qv0iqWuyTy@ zHlB3QV^P&wJh5;JS@`hG`Lji_=xQ&>8ha(YO5JowpKRfZOa!<=_Kh|jXOuih0vdV9t?a=ta#{|$7)s! zR}Hu5Ago?9VH&w1)cQAHcw?xWuN6`3y<_&P>^gzqLDQCk4j*3dYrn?Pt!eOZ zVP_y;sy%@2ehaOesr|MT_JWM7gYl-&@6|~;^r#cGqxFTFW%!u0F1JVMYQeLQ-;rKj zyG&h5?YZOV@B{ssjgEc^FJbhgmR^oHY(gG+!Wn781s};0_g>IB>>QOCJ6St+_@jfi z_~PeB#ak9(++@cBjK!B6;}1!T^cbxaaWY8vC9&;WH>bHXzUL8Jpp04$91lD5eyPU> zD`3&W%Ub&yj6U>8o#N5nO;jHEneuYEfficu>GwNxVHpG5+{BbLPj zGXA6o4{%wnlX;ac0o<;KB^-8!3PaayEY@L&8{kTD{OPPYkOlY%*jLX#$MOXeJ#&P! zw3}ygSe9o*(u=aecfRKpeC4Yf)7%$YKe%I_yT6w}GGL~5r;0v5g!t308Jjq6?)?Fh zHZ+KGF4`29fV$48`(i^Iul8*rCEc>1QkkmPm9`^-?{~BoR?{)5-3GGMF?|J(Ep}!>7vA;Kw)5o2Yx*zp=Xu&M)+N~W@GGBP*B`t1e zwRBHC_T)6?xRVFo&62iiz7+-{#uCck>mXyQhxlUgy4TjHg{zlz!_E_f1XH0H4&&jA zWuVqq&wlbnIc+J4%2Zt)uItTyWDj^?1#} zHLKw4BGte`=ECuXZxDvuBv%`R7VvG~e6TaiwmsOawk4Mt_+5;^gro9vNAljRG$Xj`|u=~O3;p|@_; z!o=dQhRK(IIvsS#;WcZY!KZ)Zt16nFZv1s+fam}FEwoS$K6uc9Gnj+Z9^)MXp(*{& zCFfZ~uy$29h+NjedZ%OPU+GkbH@9uKxZ?7$Nn>q-(!EP=gNi*Ol2VNmrn$9jpyI<dV4U+_^L_q_l%j zFR%hN=i$2Um`w#1w&B-oMQ3AnM2#z`iT~~Z|Q!ehAYp$WWdFiT7&)7 z`rzu3#)|r&E{xZyl!q2RW^Jz%{vOe}Wo1K7qZa%K0v(WVqD%)IS^cOWH2jd&e$5X( z{DGWAMFw62VfJgFqYuOTTmtHRM=1&au1>}wM;@iW*_J-x}-WNu+aM6)6cYYjyYs>y7<%+b6kos8?m@snr*@+F(xu0j%QzLv3MW^ zxXFc^Zt&F3bl+FP=9Xve4`%2}g^i2@nz}Xy5Jn+S;`_G z;*b{7?s+h;l}|4Z^4>V?;rL?l13G@N1N;R0Z06NjjpT9c;I(y_Qzspp#!ll^mAwzF zYz9e|6`kZ>|4liHtkN)pVzA~*vL~UgwLPwgy(di)3dXM zeVm1nUE#-#IYP%Qw&Rh6U9AanjK2Vd56&2Ti=3=CFxVH3RkMv^g*A2W#-jn$`d6NH zaXR`iZb2Hj^|4>PtM|hiOYbjs(WLXz#FNLh5^>+$hZ8$ig8@fB&Um_g2E}nl9Fr!T zG&Z+`rjDs4>Kag0n=7`v)nc@dub|WqyIOoa>q#Az*Z~Gy#@MjZY7dpJ-zYf)9c$h4 z1IG^+y7)C7y20y5hmG2onVfmBCLX6cSM>|R?pF|->-X zmVF_(=5_J&Z~8`yWd}p(V^%&HFbLHBrS2DP+!rxEXp~VRCJFja^o#75UK0GA3I-b9 zEo*^v7X0#`w0`m(the=*)w1A%C%MJ1u*O%KvBMw##O?hOKC}%@Hdjo!{4+Yysr{JA zQ+NJldYg}VnQlp`k=wU!N{b(D|BA&S7~NA0J@CGBo&Mq9>?W2;`q$-RLDltRBJbzJ z`yb?5+o|8wNrOzxRxc6;&;-0*ysZ5i(xLl*uLTy54%YDD31=i8gVQ~Y$3g5>1T{{$ z+%huemPj-FSU{MLO>E+eKx(qAgL3Gy)1&)a*9$*er`uY1@qw`AzVmkXHAiJ-LHzeE zU!CT39_9PgSrhbS>7#ObA_X4O(o9;5BlqOP^Xd8a{Dapt?`3W6{bKQ*#gs=C^GWzO zh~0Wd@XGgLM~gZc49I_h3bld?K`AsL87G*bERw)=sduxi{Ibj!P`+piomf1D4?pzG z$7Wu19iRo5>6HbRWay(q9Q+Z#r9nKSbK)&+H7!P=hm)ro5=H$ zx2LY9hPFQGJ(JE?`YReU7d?@-#(hf@U4vYd$y-yzps(wS(WK_3O5U3x2H= zc7j*a1K~1f5ON6c{T5;}+;0nDFUa`eO2fac({b?ON2jUR{aVfHi}Uy#I~g^wle1~# zIv@Piw97taobd*xLtMr*H6xG7{U%a=)iQ<>8BE-wF`^a;)c@`RS6q6 zj$z`4|Dv(!;3u&4@VayoQB;m6!p3`QTi_wPE7Ykc6b`M^cVRuK1{Cwj= zekgCi(rtP1bXwZUgB&;ORt7mo$oz4__M&U6`s;1I2VuM{^(W-aWD{edX;18~7C7vv z(LJZW72#flc-4_DWiFSM;mR5^%W#7@4?X4B^jQF*C*R5!I`hiniXAH){EXWoi!jD> zKX_r!?PFUDupCD#PD@{v76KZ0nljONA#ON(1$M<}&(x#ct*4mnV2ro6^B7@%l#nkD z=Y0^&>MSc9K#-bcAHW-ln zqko~7<4_>`Go2mj6z%ACPgqo8Un<8_1k^;=v9ncx$KGy#f_w1(pc?Kk$XshD1t+p5 z#7EA%HXYPCgo{V#J_vAycJ0pSypMMGQ@>2>Hg4#fJr-JbJTapsJ3_A+oO#+LYY+B? z>!@3*U(OnV%}lLeMp(I92UeM@iZbeBh3bI&phvaf>HsGkb(t2E_;p1G4->KniPYEy)4#WFY0_uEp78YB_*GV}4WB;b#$#S)d;hgx_ z-9@s4A@2D*$#Z{*XCHi)5L^7>)PJg-uV6=5n~XBkZ*4J~!{-<*A@C|-N3I5T+ZI0b zLq(OpXv`w3IG-rvkc?(A~izT?}{@da9)4*Pyq#W?7Y zBhti6+P`>J;(Z5IGdlOiuXNMP0_!t!ETCec6@DzR!e<-^$F?Lt0u=bF9z~<2dNo@O zc2VHEbj`1Z~1Z0Z`j3)MHgal9vi&-*_G+l-Y*v4B{NLG5k>fsrt^ffo|U@v zlkeBj?vbQEemB*X3S|&baCi{>%a4VX@r)V#T;430^1>+=Pc6D5L#MdBlVuCBY{&5d z7Fg2p2Ab1Q(g{tvoll;u{y_vLbIh)7USaJ~np>PTXAk!`+9nI_F7v+vA8J< zt7^gHkD)V1$T7W{&nP7vy^3$fC170Zo_2`dya%<5W&tZZ7Czg_VbLW5UvLqx2Qs$Z zqVhC=L!Vt;#@(Ccat#$9{%pBbas_yA>dh_jMTNp16shal6lu2>jVN$A77=*g2l#FnW&GMEJvv&!#goq0c*qkLG%5{zv;(VD zuj@uNYT7wb&iMXkKVw;$8?CZ`YQ?;M2awz+3oMGd5D`X1*21I|eDuNLhb;WyhaWm2 zul>NoZtW8XQ1iODYq*XpTSp1@{^Qe;hv9uF0S#;ns)xz_ejWX2JtsZw`hQ)s2E{pb z?ug^Yv~=d(_kBCY+;@W*47A`F&@m~fKQ%Lg&}IN}T>S@L{&QXWRI3M-G>p68!|9k) zClwlG=@WNqw{3&sVu7V!BH!LFGc2$)j+md*2#rook1v)yuF|Cqkx`Z4L5CcfCSJ@O z)xaxyA8f@_Ga80$y}WWkdTGVnh9~UID?h8nEU%F4`*7x!pX+&yQn{ib9Vi!zsoJOS z*E9<-7FS=DD{$PQ2d7U?8DEOts)goYf39Y@@kFD5XIIF;oYAIS?=L#vfjUUvfnt&6 zc6C)Cb2-kANi$4ma$xH6I~HiW?-YaXW{a-iG3ALL3$eIAg$B$9;lRT;Nps1n#_$QV zk?joYmcu~$=+(NU2l(ShpK_$Sels7zU$G2r(w56c3AcPn8s$R1y{x>PM(T#b%0m1f zE#MV_7Hk&vV2k5L1&elt`I{gDc&F&{PfrPQul0RcF3B75U(eLzto1hnBr=^`5wrnN zcu~$)j=K0RRF;X3#g+Ma0aX@Ml8@~2MAu;gPk*@M4_Ys2MTC|JBJvbj%8Zv2OG-^< zNiLaekkD&5dlh!g7tiTABYoDbi|u*Ax7gmQR$JxeEj>#Zt1+9*Y_8Lwg06B`2A1GE zLoZW}NhghK9Z;2)KJ`M{^x761)HG-1fr$Ys4^mb9Wksh*%%u5FSj;){Nf(}?m&Drf zC|TFZ1+!Z#t?eyVTr=i8AoZO z74h>v^7VA!!7Rpx1_z8BmCpO<*IUZlHou|MAO2ATK&-YH1Ol)9E?;!4z|=QmP@$iK z>tgYUa=g$v`b={y-gzJSS}RR&X-|I1EIrq!L9Bdei`AFX1&wW6B`Q>`#@JW4I7`}d zI>NG&!C-aHwO`iV2@8k@X5ap2Mp(Lq&Aa#ejWA=~^Wu9H_8|u6)pt(`QS!&iT_H!| zu&Sz8S>W{l+I>}Y>&3y}&{uE|)GCOYxAq`km{p5m2)dBlL6_Z+zH`Znw0Qk1EzwnH zoSF{T^Mm|C?B<(mn6@_93KbKWGs|SAImv!ZWP+2(CO2Uw&*5$5g&q^1>9`&t;*S6x z*6q|+Yz0(kum<$W)y`ZuFm>?cnitbU+S;n&h6$&qX*v@hD0v|DB8(2a(5m>zufl-4 zlfL23dvw*>v_faRci@+zS$Sa^2p^m*@jTRYh=8ddIwKu*;?T#n2)Czv@?3y2GSYb= zgGYO?+&W>nwF@nF+KV5F;IRk_r=Tr5_}LGcoq8c&&=y|twT>wi?R=%$aJ*dK<~vRT zMdF#9JVt&T=L(?YVERG4k3Q{X`gP-4DOkJgm9%=1o|R{TQx;U3(_+D+))xyNJ3+(d z74gjX`9sqQBbZMu@a4$!)mqfmz;lZ8AFj5yN*oFUPFy$zGW$7hn8+;_;|W9GIQkIB zr`s>1iOnx#AC+S)j*5tJd4_nc4#?>kw#6dBP$BBv+6aMiC98E56Q>??a+*H2T?UWp ztG{bDw71YpfPDyITk5lywYRr8Zt-Wce$mRsFXzonFTJ|1#XEoeG|hpdY;ooF%smwK z-_$7qpaY^@y-%u)t(f8JPh}tek~PazzAr|w@E;cPdJx&icr1h>qM%U;Ug(7kKU~Q0 z**2?nYS~~ovT#BR9z6Vjf~#x_9>V>_ry~!;9xefOGP)e64*dH%YMy7CcEixOwIVEw z1wOu2!`vBvr=yzmOcrgM!5}7x$`2X+Bo<)>j|Gm&ulO4Hgt|X_a(O64nMZ_ z_)vOw)@^C47J%{WV4Rd;iz~LX!daTV|6AV#&3$>qh`O(W@6eg=C#P}eU7r)+9v|{r zvuKtPEIOCWy0s-gW%9-8*fD$mPwlNl?}Rw_hTrH}Y%0jF$}x$v$QsD8;9`&OYjOpS z9DPvw%vlpv#s2G^)d+3LszAQ|CO-3XSmS5YVI$(%nO~=Jurk=09pr=$8-M(0`?x{m zza86JmSH%ach8gOKXa{2O5UYkmkWHJ8HA_%{PfTB7JIPfN;8-tNA_~bw0wBnFosO1 z&boddPQ$Q>`(8Y0%HmGJbg~|0Las`gie{l>yo_5}+M-EId*eymSuojRiPJ7RuIPoW zDx592!d}QpSMW#`Nr5(mtT`{z*UBtm_W`)(SEpLO>ZHcG@9{F0`Sg!!ZZllGehaM_ zsJu^Q%WBQjt{t0>J(b6<8(2JRjUE%(n)@I9(EB197M?imBo5Kw)=xcr(S>ILsF5A| zpxo*Dpj^)n4j+*U_XN~TeP6as^qq1AI5zLGCB0kQD5%DZUX3=)1}nO`&CqUcy6}cZ zK^Xbr7{U(%+%Ww^>4c;Fhp5QUZTdp<>$*PxsHN^{8Kd;$?ey_ebG`DTu-7H^X?1EL z+jwS`x}cnWytNK$;^?OjEqFB4!^iBsa#Um;1$0%pMTU;uA!Q%BcF1h%?)d$9VXG^v z?R9aPZQy?$@%`IVie~|gkNz;^5~w@&aG~tk68IfOcS;>4A4=*Tt5?eh!bfUr``jCU zqb26t^F=i!}hACn_mteeY8Yw(#h`=9wQnxTLPb<+6G z5u*zhsLOPk@W4#%m^`5!ni?({b5i2@MK#IXo`@mGYrF{EglcNtZy|!FIe^2p2@V`y zad{#%J#^eAUWA2Xw2SN-dKIgn-PF~z-;60Ql;a@fgR#&FKO$K0^7=Gm*~%cz|B!Y8 zFFfT$OJ@jjR)RF&%K_B$N|{jkG5H$}&-f$KvJJ1L$2%{DN&Rs-PQ(bPqXOrq@k1fR z=TWKg)7y{j)bjTouXCWpP!1F(o*}`L&%(!LxeSrn0t;GB1?uG}T`j1LXMB}wUSJjd z2$y}+C%ou-M=S7V(dBWIz=*H-Bs!-Z6ugGW8$6Snuu*Dwb*T3bKV`x(Y4XMGAMTdT zc_FP|y)mDT!Th2Aly|s%eo+6(bDH%ZaQYja5f2vwjL|Nq-FH9ywtd@MY2mErEsTXV zTe^eQd{yGAY?y6bB7RCv;vBE|LOIdovfk%vS%KYvI6HX-=hs4ZAqsFz-cUiUq@`+Y zGIOYJ2r*KNr_Wq^b1Stxih0}PcN_0EHjt0r&RkALk6!SLukymyehbX8!>|WSK%I*&$BoGUR7cAfbo-py zU8@X=^OD!-Lyv0d%$@PK>8tu#d+ee%RK z=FH1Vl0crD`%8OK>zN1sxy9$0M~-m(Ai}x5=f2b4enl=8Rkct5hdxN|>v9DSJ#bX| zyuO618fhjNNv-s$7t?I6)Q8oM`Nd4ulMICp{Mlpe-5C;8_@>okxiH3qAZHLzUp(oVyik+tTajcjLhZ?*G${ zR@dI|UdjO!6-3^|@_Z>LQ3mA7vd(1hd8HBfudbOEANud-EbNI(s(wd80eO<-8JJaJ zSIZshEWdu}^XJs$`+|F(2ano^=3X5Oo68Dm(3F5Jm|#@6EqGWsiO<5w7f+&fb6t2@ z-lCDq*HS-KC@YJ_zv9^RG?EX#TH(_pvr@-JE{-@NG5Q%Io|I z=T`GktVygArHfB>x^F}&PcLsq&`s}AYm(>>WA%&QYXqJZ#g84Gjx zIQW5w-{sYGZKsRNl-Yydh2VwXmG5^n^Xo9|`4aesqHO(K#72{!3a=+>4L z(A$*PL$^Ihyi<5ayEDJk$DwMNcGB^QbLDAI-lnkJSZ}a8RD(D3ncqej&EzJPSWvj1 z<>DjbVz(X&;D0b3+=gknZV3AL(L_D%Qiz5M`Ys*OJ-hCe#Jg=ZT%g^o%T7Dl`T~4U zDM-N#_58U!a46lsx5~!KM`2&tygB{i*`6D^XJvV5j)p7|NkoLt-11?$0*4%Nkj{4> zSLnS5@nn}`id^Ib+RdqWE?0>mHyYe&_iJ1#wn*xIgfJFQ;hSDOMrdJjU1Z!ARw_;6 zIKUQSRXoz^jhn=wPX=5!(wnp7p2=IBLU`Ch?llO9NQ{}%e*A0kW6!6J8#enmWRLMR zi>YNZ)DnFue4*(lTx$5)V?)^4%Jr=lQ3tj56O5?$ z(75WXOVd$@9T{TtiHpM`_I2PxVxPK%r<@yL$L@Rg{U9w^wWwhVn|#Xnbkw078l`$x zg1qT6vXcU*%Towq*MYK}Q~jJyZhfV2oqj>Ho(%N{+%i z_r~9>S?>s+{^76noDo0g?!U1w`o>OCeHcZvpkcD`1rGhk2NY=UKi*GV$K@o?^%!mV zDJG*28xH*w5-c=K~Ekf>XXEd zbmI8)(h1|H)lsEY3m$6GD4%ofm)g7!7)6*}7tbXEL<=4#ba(Oy}m)N}chS#z>xBl_sX-W@Hhx9oa!O(0=0q0zZ;0ZvQm z4WzUcZ!^v}JOWgH4pMRYqXVVS_z}c1AWrzf-{7Wdi!0lm;&H;fkg6A4=*QQJ&9><%Jp70=Y!J8N zGnfzqg@mf0j3UTvAH>M-?UVIIA&c3%M4ZmQ*bZZy>*kG)AFb8)?mQ|q5hS9dvwom zKlQIHxbn|(=+oDj92jYABBAdIH@8(qg%zP2&l zws?81lExo%WSVsB(O$16$uTNO*p8|Uzh$6IbI!HNPAI<3dqVgF{F?d3l_WD=*VGEn zDbnH4FMcbBtYLUo#tRpAnnIH#l>7Nr*ynWe-_JT<4(61>tH(B*k#PeR`HKt$?PcM4 zwS1Cy6IMJiwrzha{b2r*lH81N6xZyEnR95_!wNKQsQ`n^rf7?p)p>~M)^3#Q6f__AY_^@-DMqSwS3ILWg*A0T}t z`JQ+qD>TG|@>@#pBXRC^W7Bb+!z%dT)s{E6^(?e}A;cm`BL^-oaA=}sJ_FBkYq{1H zy-ANgW5^h{p??Yyzz3qbU)l|sT2G)JQdEoANR4l8q-rQ1KK~kh z(fGhhOWSH6kS`i@Z2QMAzOj|JZEtOB(WmRZXAaU(t$W2$c~*gVj!?&d^+(6-8W-!w zss@Uc+JNSa9%~P7#$qHCJ^OnRZfM_#W z>FF2;x;(hLD}$0Pz}}XYUS`F#?QEH5UU=;weIArRr=4}B-rE|VURk>=m@R+G<)79h z{i;r9++@3bE#U`vE~0LK4{T!B%lh|ixh*+*B^0gEnb zR>kZ$8`3l!U$cx)FpRA{;OPeC4GQ0WY_296D!PFyPd_!i_{N+0CduO}3qDzn2`xfc zTdkr_#NFvvuJ}aak+@w2YbUf@ zD<97XQ4txtK3AS~NxFA#^Hd0jH$15Kizexjx_uokn{ttT!FZsrL0>d+x|e!4qfrxu zPxa8Zvahxf2VT%FYfO1O9ljM)Ig-sfLV!|hjBpzKaULV z*ugBwpyhn7K5qEUE;L~QCOv=`%BNoYtLediq^Nsg#$W&EgxgQyTgo>RbrjpDgtuPo zx_4d`%vBi$ni!MyRc2i;X0He8ii8?@@=V~D$tUf`rAQ$-ZS6)ls*Fg(i_}Mw^KeQ>()V-wROw>0nUy!P^&Y1K0?r8CYx!CE|Bh@G<$ z`Nxm96;20r$@Au7pSY?=-^R~!p)&0Bo1MW4 z|F$lUo-n)^;h@J(zrmhi+&gl)SEYizhVgO?1h}utwcztpCtKm>D)6d^a+W4Du$Q{2 zN$%y35tp>g^$`E%zDMm&rAIK#*rgKqJsmyB6b*#$l+W-5FNkY}uHOxG?hIt<{uQg! z(hWT?fjl5H+!@qROo>Tbx~diqc^RkT**+haG$OIRg|i7mW0ZWO zNn9St%!AJ#1R$R;8jl>+zGs?uzrJXk<;7MG)Ej4x+u~QKJaL&iOE$5x1 zWzo^cLInMDxz@hOPj!qQX&)4_rQVf8!zp7zHgwu?r>3(`n>47|52Emb3Dc}RcUq>Z z9#&KSgF5BdtgJF6S+lK$-lEmd+8OVia^n}RsMBtp*A)vMbl~{hDEZA&gV%IFo#2I? zu8rW=zM`@CYaVz&!POTE9>V?AZ;v4RYborxe7Cj|IW3~ki#6{$HTz2Y<}?iYY%He8 zI0KsvZq|*F;h1fV(URNXW?h+asc|FqEX664FTva}^p*f^kbXZ>uJzCf&&?fo@WG{w zW0=fUVf7HaUb9vaR!hCIW@gy5y?KVTpWZNU=Ou8kK5};R9N= z1qDwR(=*(4;MRctk;fmZuMLl_*=gPL8`IjQ8}h=fnV(`Ym!~w@Z_-k5^(0^LML7Ln zsI(k?(&72}{8m&kHrM@(KDJ42My<(5pyn)JP_uT5*W-gJYXIP{;Lvj2BM5hDcpq0F z&l67`H*hXgL{?H!RHn(|%F7E98~QI}sz1-pQ|fwnzxRv9_4rDQ?AI#0e8RxP2SOga zS{^*V*=jGwLKgb9oq=$Vv3*5%7zQYT;fZm8g4vyHb-LUo)2+4LEpNV-Rxjicf!zzU zZ~yb%Yw*3WrXu0A3|nBSi~1zUGlL@7)rzqOT44N)m*m7iXAgiBuHE`w{<;64mvdTc z+Dc)+MPE|RpOd5Csy051XEvs}N_QXk?{2UahJxtjA3hRk(~5f{~@=?3FJUwyH? z1QqnKzw0INdzVhP7e%@uV~`1cGwfKGaCRPw&^4!v-4KXcynaJ^L|>Bb0IzIwB1V9B z$fnwZZXcuZ^e4amrIdE-vjCBRVSl&;`jr6B5Kg~goV84!Dj3LooHCz4KCnT`A%mCo z@_6U8>)O0SIKj30uvguu;LW@LIX9_WSThs5R+71W#R0GDP;TvPdHA{5lWqs@5zTmA zx^0%j<8+KxfLjLmqA`nu0vT=bB&_oncP*&$!KtTv&m%A9K8VQlap$zkBl2ivrb5B@ zGQtlhEo7qYc|p@+H_LalLbhhZi)rZ#E5c;>jJ2GI0Z{BU)O8FVbd9rE3Kx9*SVYyb zz-w6@W-vU`)zt}p@zn>=8HQW}!*`^6wkiC3MaLVj>?jz_zwaLsA4J++@cM?eY2m}| zmn1mi@PeDandg<8m1X)BDLrxcO82gTvM3y#FF&vVC&HWsg z@%UiL!Rbr?`yJ`2pZ|FppTjeb51xe&HTof(@&mRnoc$cz`3%F(N?@~&2IP^x51nypI{Soyk1VBD zRPqJ9e>kVeN+nrOHhoxsHG6(5Y&J4OK0yb9km+2})OrT$IX~OKaSY)B3*%7yf?$vbS8Aq9I=8h3>D-AY}(H1#fhqMqk<7I_2O`vCTyQHV@ zSypUkgvZEuF$*9#W44)*uRwM(2L0_)IfKWSJe5|jTVpR81#f45&)A@X&Ph{bJJQ`x z-=DgtMw~nLoHX{t)5ws_lN5X)<9LSSzk4X=BD!pHus*V@v2qmP%df3ZV^2Cg9bX*> z9dVeigy4IV#P^-7%q=ftk@b+-iA%cgi5Io{>}ZkHd*#Sx6r|7|JY=*U9CSa&&Q+}w zc=!>frfWRpU0-HPeBd>p3xw~~@U@h{@FJ@e#U4RcDwaFtZjRWN&b~8k+PH4FJwo{8 z9pCObQje22jvuRMj_pQ-#gs2F=#RFz&hRBGb@K1b-~FI0lZ9ieGuK&7fsBUm|<}!>HfL+kh_}$QZ~6R zZi{F#PPSby^K2qEW-h-R7R3t0?Qq(1D`)hh94I`)owwwXpHFy0j*Z>w*U%TKOBL~-v5W_wtaAP`s zNCiQeMm-TA+KTm~T1J~uJ4OHHH#eo*7A+Unf~BICwK%;ud_=`W&6#ib?)`v!!4jn-u*r+=c4+;GXH)0KI6>CtDA<12;U zhl$OSVi&rp5Q+iMz-e91yZ_aFt40p|#CC^kr673NNSGge81I22Wod z$u8oQ<*q`s!&bP+M@cesgEZvOH#UCI8oi9qoB}^avs{M^ud%RkT>4c~Np|5!LbBYl zWI~S*F}!Pkby{nY1)mRWopAb*YJhnWrSy6K60JZUkGFMeFG{Pmb@D-)S$xU%%g^b_ zmPN6Vfn|dSb>)Vc?BhrwegHVTS!I!>7}Kqf-<7`k(XWI~@s%4&$LVaX#VeXwMLK*; z?+zVnSi_*7J<*N$2k{w41>(eRFrK<8=a!iABl35bmO|!>D8B>>+0Pv#x}W#5TE|f zucvjTq+9bxoUHV-1TUfpUn~R5V-SP*tw^$7Ki(iFhDAdxfp+`xK(t#R8h{g`#Q)o; zo=IDJzgT>a=?r|K`Y=a3;;ymz>0iWxhgYh-MQro|?;9k;ZaPCGUE^`dfM%rEOcb3^ zAj%vrvz*2_Vde2x-I!baL0jCFoG>zp(`O|LZsFm>%Y23jyV1(yMH7d{1MJHkP`*4& zBHJ!>@Vm00BP~`=!NkwD!lcm@1~1AM_RQJGtHM~j|0 z_>Ju88Oy_D?K4&fP>TUkV)%39mWBZT9_>QCzUhscHDuiC5N$e+fUhHOc)fiv&bedH zPNVd7d)2tdJ!HLRD;ukeU+@-O#r)<+ITG58(-H|JZSd!RE0$=GYv_?1EP? zNWE2BgQBFb?_Vozm>^wfl5YQANTk`{$3o`!1i{Bg_K?2*%_kCXj zFzx@{!BcnsW!kQ1s%w~X`Dcvu#f{p({d-@t?%)@(5r~gtz_h`{xE@|t?>~}hDB)@o z3<&Bz^uQH}!Guz9L;TWH9F?p_D?O59Q1M6~QChshn_`AZk-M`S8ty<$2R6DsaHHmU z42;>=BFp&p5ifS)M#@DPmvI+HoZ$+cZ@fIcvQN027Ci7sD=7qw#+FldMKfH3xo(74 zz^sdEgRh#$kD@(rk2omN=fgp-k$YUOz(Xt7q;;Fx?WhPd(tju|HQrTk(Zl0#t2o{> zqG=IL6s7Hmg&fCYiQk{Qs8Mo^_p_dstIY%+Ju<>T<9OGgz>CXYJ#gc?H|ZMYN`zqr zM%8W!Ncsv3y{t>ODv%W-FOasd$%_`F?c(9xrtI1#w(%V;uBmE@S!y@mxD32ar!Tk4~h&TJW{Lm+hIL|Zr&bXx${h&)}4UR~^RWG|r z4{p6zTHDW3((K^r531!Cofg-KdMqk9^AAK}^o9#g%v4?7dYroEOH)I=_zND&@3rP> z)1#BMvbZ9C{pZD3!AYC`-rl|=@orWJR)}7id3CPARoexiZz+_JE!(!HyPo=G%__J` zw%|1qeW>P<&fBy@Rs&x!KIgP4xv97<3u~w@wv|=IVU;SHf7)jLgW+k>HTdWTo~6Vc znufbdt9@Je94^x8Vcx~n($b_kcC8o#njNd&1(xTf(eXu>)7$mr%wtR9gT#^2^pY?Z z%7Aib4qT;g4jc#?UoEXghYUWB1y!wA>jxfw=+$)C3t#41X@RZ*yu;E_zyEg%ZOlkL2qW3SDVVOJ$baa)4D zx!dlQ<||)FAq!kScRA?mPR9QDSsUE$aF9XcqlSLOk#XW7B#SHI|Lw|?^mAu@E&#S% zM+intzwynq@k?4l&veOYh7OQ;(|+IwAY*~nf<=$5 zQS+qz(@eb|cwca}NMFc|i*8<37(XmpcyGek1I$*j3tjQ74=YXuLC<%h95=i0vyLyY zG7V48cBV3JeVhJWXmqiq4rDIuD=dqz>~eytaFUhu);MDadCNvUBNlDRQfl z9LDh4UZYl-isbATpYqSq4%zQbOXP#Jn?^B)o0<=qKef1`m*8s{trlOpBhG`TdD2<(`19$FS9*TfdVnld zk%LmnKsR;26mNq`^5vGt?(DaFQmYi}w5l4S%pYAc+ur-CWoK)#b+F!lt_!;rUZ$`H zuf-STcNO##msov8)aK{DgFL%tp_6&CW7e&Y!1E%(B4O-(D2v%RL&I`h*|f(k!s8~Z z5hpgGeSY_`qKY@iPdI*JM3ZdJuZRo$UZkP1?n#j7v?S?Pv8CBv~6?yv4P2#eM*zN z-ie|w+AnSXZV_F<`h#^FhCs>1^u>yX6I(SY@~r1eEu;)J0m5yNQWL_*7A!6bU;mk$ zU<5~(KMU(LB9j?XUC_@^p}{H=>;W+-X2Rlf9MlI$;z!*VM5LV?C#MY;aex+88XmKb zr#EDtZU#^3U`u+2bEkzZqwxjM*%{7FnrPAxcKjwU_>w84*=_0MFvMB^Wm{&?;v13H zE`BU+-?jy+hI>R{9MFoez*5% zckcZ71*jV|Xo5v^sBzvsXgzPV=+V_;^@q4w!8gD&)M@KpurlBKTc%rl=f#2v8{Re4 zTUZpyfW;-;Zi3*^rHsxxqFGe67gkQUdFre5W3wUoD0k(tLhKMXZU%k?7&?rK7|9s?#>qP2k0l2A>lkM;ltTH%KTe9$)CP&c{e${ zJNu{{eJV0m>%gO=0C4nq^t-_NjT_R;g*`vA6?s3TDMOe3^wHMAv^p{NfOOt?oyKQ> z9x-8J@f7vw$(9X@zt)A3!Q)aLi)T-Y;G4d};Oo-Q`N9XQ-oG$cc)5=Ig&UM-aeI4n zTOPlqUJWw(u=y&3h_6{hF>e#Tk+tLFee@A}Y>?@-1^N-zzD`X&2Y~mZgU4iC>wwoh z@Swxz9y9Q+AGUVp%S2xw+%OEG1fs(ZL3LQ=U6w$MH2CZ)+)>+GHv1Rp%~#n?-gRJc zHRrCsX$2g8*wN|qX;=H8Zwo9RhSk4nX8;?Xc@(Q{|jTI6Wp^t`z5 zX+>AGEu!$1M)YcD_L?xBSJC{TZ{ZnuUB}cT&+_dLUvdQyjSo$lK^=ErGD4Vc&Xdw& zcVLyCg_qu{1rJwcU?>B6%(^l_4CWAm69Ld_k32d!vvcG(%0mTfQnhjqqZSV1?{x{( zm0rhn@A6gY+4Ze;5n`S_-6Y3ES_d7h2_R$e;9!gb&pxqs$M&snb=tIAJZ%^V>$H7= z-EaxbUBP_%h%1_k;np{#9E&IV9Xj~+;tkw&BrUkd?>A4v?Xf=(i`@tlKD?1Ez;bx^ z)xQ}NB#X4f?o}+7NH*bm!**?PN?qehy6X5ymkfrBIQuOOZ46S_cjHqpHsmM|@0IUy zROL=tQpF?w`OogSFeQH$Ao3x0DQk4ZHz$-o3+Q|^0VB%`4YFI{4|n_lcBWpfW9m-R zsTdVJJ9}-~{Q5S{sZ6lhX#%Dz)!SQ4x{Qgjz4gW`Thg+pUaTw`%Coqy%hf%gWL11w z=mAz;TWjvSIrn|u%7tn1^Gj=UY09Y+(-DUrVFm0jU3rqMBB(-36+F38GmurKSp~US zuRyn9E15hYl5MMGuVSn@R_ug5I)~#Po`Iy{m4?$NT(8V0ZJWfc!?_)|@15Q!_K2?; z1Z-f4Fd7`}7aeGD;~$U~tXi1XX(ikNwlAa3C_P**z@9or#y|_yJ~*;iSp{ADH6C`t zN7j`itDVRe9WwX;w1L`w!GkyKdkN5)hvEGxfxoDu+Eb9SA&%IeQ0n<=lRx<(~kSkBzFtW%BB6)7_>Ra60ple|w zY!2`JverQt$IVX;7dBXJmDNvmucHRruN^dDE>Q;HRhT-*0WxH;c zS^dMy?VxTu-lQamu!dTAZ(-FH16}CDLWTIR{-PhB8bRLL_I6tM$SN(UG)Jlz6yFwA zdC_G!GSS)1y?=!rr5SdzA8*>ypMXdSGov zPMJk`{)Fk0J%Z=|^;;8+~zFLvmkoO6V|{QX!cw! z;Nx2MLRHP&3I3Lz6?^)w@3yoKI^^&)Zu<4IrM`euU*j1;N8zDv7xQdr6IqfAZhvmp zaLb7a)*b_-y@bU57Cw}AEz?st^{U#6aa6G7!aq<^=BwXAy5e|DM^E}5y5P9TbAj8du zu;;mdl~NGG6lfJM@}%*(ZH6gZA{ z6KyB_T92(Qj<_ta@R9a2glhKIerO}Mm@u6sS+1Go?hSOk;3E^#(I+0(5-)u8c|E(o-R4{7Tys0w&6@YX%9afgY^GtXj zi$p5N1|iF2@qQ#M`l2!4$!^y27gwgImbHI$8Gq6l>4YPXsY*ymuvBnm;0af0r3-Bd-@wA+Hxv zJHsQ)V7?`ylaAoEe6@G%48s5=5S@1b@$O)M^1<8m=ze`J!0xd60kc7;_->u2*R&(C zYJT6F=31?V&(k8U`wZg5i*HUN4;*a+zbWT#6+PVE$D)tG7n9|f`MKXP2r*H?#A8Pb zo=t8xNb)4GG3!5w_t-@^q1#Sx|U-;OIU0KibN9vi> z3(6N=fd~IqkQ#obFB&T;23^0_%No6`Va{G#0^fOJt_=a51l}vUZWoAaI&el006!*p z@J({uI2PdRuipP;C_)!0NbY+P4LY=cbD(0#+0Bsk75YY&%lEz(JT+|H&>Ju7TDZ)M zg%*nh*HQn8#S%O@lfet8J{vb^U7rsP;oL4^mDB;9+wNJQ44@x2!?!%w`42p&x!3Lh z@iClbXJwPi@SWmbPl_w*EYdZQpY`haEIs5Sr^Repx#v7Y7<3FL4;eS#nuzL2KFvow z+6*`s!WfF@DB?C$jL>f@=-JRw2c#=McUFtNZOe9@aIvQBSk*gQD3(PSy+d>C^NZG{ z_0RMBRtwnCq8|ZhM(XgPf#5mSTvrHfok;VrUJ`ogwfNnnQ5+0oN6YR7Dy54ioi8ev zvwSO7=XjZtqMRx#v#Ok}o)C-YxYvg*Vr0nb-{vpust@6epYh-+yzf#WubkxAbvvQs zan)@raMLX|t2jC~EsoHWj2G5Q`rz04H7;2!w$QnEZfsJj5E__~(Wujq1WjpzhpXE} zJBHa-@DO>?9*FNBm9A6Q4_8!`_A&lWj^1DC!#jAy*^Tl$ti?4qYf z6NxKyiygP`3p?x&kY_>o(kwBssusC-#H zNUzizd4-O8r=CSvJqOQ(nMW&=zO|Q(BMvZC_`wT56igP(@LhPxCnl%Ejv8HQYT(ol z3ETC}1nV4{Oyf)2@u*-qhEVhE{0Dn>ItbSDrmc9d*?efn%Yc<2TM0EEE-6{J8{Mb!^S23K8|)+{85trkaNybnv=(R~|He*AYFufnYG% zKw2RDz@X1KZ^4WLk%-0vwVMm`BtO5OJ9{iW|`g%4h8kl(DP3geV zhw4`~J<1S$g+*2FO7v_{214Wve#Q}P;KB&$gQD?}YGBkA6421X#TH(8*fKNOhS_Fd zHKu$fy&T3T$2<^gQ0f_&%~qCY+>?k^kIf>(zN{7qTK)mrZez3=p>q*A0tg^pepzGy zXVGbCdRw}RiZa?EUGxoDpi0IsS%kL*>)TS~U-qFrcJ#;tk7o+sp+!?J@Yq!!A-=~; z`R!2J@B!)|gN1Qi<_nmOis9uMVpm)>ul3X)LVhacF|eGOC^VyM9ymtA=dAbNmos32 zb@$Sh>9c1|6zyg2*Q@|nX^Z4}c{H+N?46as+28p;RR??KS%B|8xgc%P4Ppmoh>kW9 ze%($`haXVm!Ncdy?W-UG&*)LYlPBtNBw33lA8k7Ni#I5lln#3Im$w-|tV7F^`k-}F zd<^qrk#5}0hao-Gtvlk=f2D)SyC(gYfO%}l35oO|$(9|ht|g5XuT>H?|GmV?%n+cR zr~UzkWXj5`fB0VRkSz8FgOh{Cm}0CAjL}^*!ZtV@S(SEc{?qBtUZ4# zCT_$BIvy)0-qKUo2K?w5;Jg&XO71i{gI_#(U;4s_KAjFZ@ZeyXPac_%YWT^ccj%0G z%r$V?lne8mm!qOA$&+2s6CY6l`Wg(bStgHp6R`d+JWGux6&Au&3=>0kn7fUKml3m) zFBL1|iar{2t=T!u0QfnIa^3-aIzl^SeVH@F#i!Py4&TcG|Z^d z5fwmAz@sP#6m<9m1MkX$#t*!^%hfMj*xgm1bO3}iP?;Azc*B0X1T;ep!yYVwYG^32 zey3nU-qxqY7e4rdn$;KIA-yMm-U@1u3r@J`W9k76=xY1c{Czl$h+}|6-1Kf?`nTH= z4WvK}CPo;~7eyLfY!KlWpR$W(15E7fIKSS3VKrluUG{A!N52+{X*PVo;?BwSf=`+( z4n{po1B{$L^DUK@TI0*Ug~1L8@r9lR^Y9H1rpFm+u$?7^Ck>tsrBlDCmFI!jakAYf z2Gz*37FhfetN0Gk4wd}^XFFZU8=V72qK7>1VwnuwX_xsgc+C!(XbTg+>=8ftBHeC# z!&oKweYS)#& zTe`L#ct4xutK;Ipj9vA+cKVk^zw&vPJf**N(1`L$T@$~LtGRxS&iMsb1L&qX+K7c` zoRS@DCp0@bAM3%z=g)jvmjk*1H6A?tD?fWyI_Qx0@741kTxmDf)(JGh);n9Qu(;=5 zq6OAgz1P)&hdOkm0G~bw1TXqn?ITC;ryv2Z>v-aO=X6bnwDDhk^R;x()9oMZjyvL* zG~uMNxoo_gyzscHZerdHGfJJ8t0BgJP3ldYohWILA7A4s8T_TR-WUh0% z>>`P_NBC@x&AUtrhuruI@3_cp{Dt9WqN_j^B2?(6{h84GM?O*0;8$ zM;Em=_9+j>#GY*~@uvbf}6jo$w4>w1Hj@(Zb`4cu)HN5f5Au6=tGaxueaJk2}wq} zG$S@Vd;<|aq4*%l0BL}aqTS>$Ec#%Ym2-HXsEmglbQR9DEdYZkJPF{-$E?L+bSxZT zN~Xcf2W1#bi&s5&Aja9?vxUW=U$qg*LRV zMh6w?bsbZrum*$?>A5-gb)^JmkwrjDSyq4!WBq)>ZIQc6?z3_RoCW^$3+vMPCm*l8 z1-J40uo~eow%3)w59TlK_m$%Nl+f7PqDs9%dg`mjXnbhe6@K8*;jqJQ*6DHvs~4?L zZ*16<4nO7)qiILh>~rfZ5m*Bgl^d0f5$cjVWRd=ByH<2gh0`$M36MJBv1c|Vrn~WD zcyuj}q&D0%;fFBmSgN>)KLproe!wN;Lrn9|vPJm}E%!UMv@?ZD-j1;~zDQK&A%!rR zF0P{ynD&rqpD#q(f{L*|>zR#;Phi3ndmx!GA2A3#_xfEAIbw9;5yA)mrFj*RFBm^J z|K&7p+KJZoutS;BR%maG^$Q-Ze{3+zyw5mW&T2Bp6?pit!E*{r%An~%A>#>|ugD!C zXK>4`pQn#qaGl-!2M^y<%!}=WyhJYvdy8 z(*&81EFALCfd|fb8+h&4c!UWbSwAPXK|fjWY8_XO!VSr=-?s#Y7hHXdWw$fExAr$n zBmS=3{}y5%4O}_@!F0y7Yl7AGHE_(nv$dXWFQ5Yqx;&uF0;B;YZa zW*HuXMbIM3;6Z<~9VU#~KSn{MLjd!I$tVjWXyoqYAsHPQk~2^h9Tr$-OMcH{%fndQ zc`z&CEDUloks;fUJSZkDkQ9}M8OtAocRX8U=}|aKK8rp*c4#BKCK(o9;K<%$qVdNL z_0y|`XHjdrN|NyCAaa!X!#+`r7%87}%j;%)>=3c33K39tL_<~x>LMY~0QngNwr<*( z)-2&rqZZ5;rL(mQWb4E*Ak-1@>*&9dE4&|SXX}EKPY}au$^;0lKFwrNwIrKf!)j?X zVT?U|*@GoO1K_d24r~_vh+L<}ti42=2_GFMZ2%rPez>3?74LI$2K0kPv(}^wKQ_T4 zu{JMB(-sumZnE%AkbS4Q!Lyj?AUradX?BzOK|{ZhnA{dxioqTk%%-qw_X9>tF-LBT zE(MFU2+bhHodOZ|La(h}o!!RSjgeuhe~#WsYTuouGUR2X&`Ke=(YiPe!`qQ9vXtCd zY`HHzwH4Y~OiNAp%qNVQ(IrFEE?pLJAP(8pUpObt{P9A)BOBczrFjoLm&Q&#K~31( z9OdfM&5rcS%WtMNOZCoT8~9;@AGrde@r4f`Jm~+wy*CZEZM*9GwgeBSlGDK7$;@N#*V=@$gWBtq>@0&`QXGwTB%}JQk5^shm@V9;$U!`2HTPivWQX1 zPYo(043ba-4|PvZeP`(j=}Ceo>1F)JT)%b3o@cGKo3qck=f1nfy>pK-{-c?5&2FuE z_CEVinH9R$v~zg--^TbG~q|kAGv!hVBE)fAl#W8?DR!2W=f)^svRhe8q<3eLe%x z6CG?VUxwhDrWPlbj=az-gK6ZWu?em2K_2QyYwc|~4d7osvmixI?CtQ!Rus=I_+W^R zd3}6rup>St!Z<^;h?YscjPCd%_oa@xvYdmLa?krbbGq&7IsPjh(F5xt>xfRfgpi+& zoIexgHgo^>>;B)_L)LrW{LeR^{_wkWP5$^l{KCzH9`;SKp|j4qoyFym%e$^|{lbAw zVu;H#OTL(jLy>X_ZYV^sv1A%v@T0N;$B>7vIKTwjehisH-uTj-FYSQS4rioi(`pB< ztcxFCCPtQ)+f9va9*o>6gds{nTQ3rXj1t)h@413^x5l4TtRMg($X;)Gh!nysP z*8tJK1GT!&bl?xh?`VDX%X~s`i(XeW0$SoTQ0xrJR?^ZeeD^m$X??a1x5w{medhb0 zwR!YI;$4ZnzG&y*Y!gAiD)oA`t@73P!0mT_aPyu|-`!WtFXMC2nj(gf`2)eZc|#RL zt_2@Xa=#bl;Zc(K_CXF=jbg6k8W~iWlZE+3y5S%d3nkuEj)T;o^FxdF;0kYk`3x8- zXhg2BY;|S{u&u@9)Wcy8#-fypNJF+%eU9O^j$`5%W7vZh9S$;{F4v)ra^TOJJMJl( za!!OjbniK70v|9AQ`qyJhL96a9&*f~wzq>f9p6@39JLc8HMsy9-~m7U(GS@?>)&|d z<~R6?<$>=0_&uACyyw%K@A&RV#WaiY;T96U%(eV;2j{znPai<%Ee!+jZ8WOh=!utJ z9>Nbt89x%kSH3^{N#7ey-o6&}UwO-GHlO<jU>Fpzb5CZY&Yj-0&muo z6o-h0?Rd@H`ec2r#z7ovkBmb*^SVMAuTOd4Z+`FFHlO+Yx&8H5qY6GRN_9hLC7+XN zQQc>eDfm;WEzx?9w+-!!= z0@Qc2a>E{CXaQ}{pDYb^kQ0h5tvAHeRg9`b=3ObAeL9Q&%#SW`hvX;}JnJLn@FR2D zdmpE-aCzx{FTnwT0pN*_yHl&WeG>-hxhqi#Ny3N9AV!0Jgh$bbT-l-E*}CveV@72? zEuF?pwz=zVw>O8g*UyIdjx#kv8^H;Ni#1Do?Xh19cszC88=dxx@fVH%y=Ol?lVn@Q z_6-AHvsHQYtKcf%h#vU6(|@s;rvk{y#Y&KzH$8?y#$;aZ8HQO(UzMYIt>fqro`tIN0hgn<1Z`r#6I&4!cD6|08mbr!Kx7uCXX^d3mbe74-+^qqj;uQ~LaRaa)_%V3G!^2x{A7RfM z#n4C577k5pnEBumTh#-;CTwwYX{WD{o=+&ALPnG zE4GQOC7&L6wSx@x`NUm!Z@%U6kBq4n;}R?5L+|?3 z=AO@mGb zO#NZYqC9NOd2q_$@M%0Z-%Llnw4|9*vMvq>!-E4qFeiWhbR2)~g#6UJT}3`$WLO@$ zq6gz!F??9#=X!a)k&El+GfX_x@wJ5f%~8V`Bx+i4=h_^#IA6?V2ZmQY4*=^%}A?IGqS zFo3A8y)$T(BGrj$xSkVdz2k5H;Nv%&v+o61x~9XbzMS^%)rPa7JKJ{x6V5#aeb_UT z#S6UJQs;hGY#anglo+=e{;8*cZMv1#hgF{!{gr2I-ub$_at<2x&NqB;^DW;M2dgCm};O?hQQ z{n%gKeU)Q))56!a#QBOY4Xq!4I9hG^pflEvw|dX@kypEN%>mYzfJO{i!cQ9InA4-I zapm(pFy08~T~^B#^#BXu=c4@A*2g;*AAINQH;;S93ljS99ly5uSoaMG-}%%(wt2*( z9~%!Tv1i9Zhw_yJP=0q;YzRD908%&{uMV>504@G}>o_`>lXJVMS*Yrxo@`A72fqh9{yt}RYmz=paxJhjYcxiVjX2q8^I~Bi1JsX+!iJ27vA}<&7XMUW3vLfG)rhCQ65|y16=8b_rStmEWT@@an1)_ z+JtYKHn=!)EuRmaIr+>eNq5B8u>INp__?adnIZ%+i&!69hb|uW@`o4Uful2Zc!#U4 zy@=0s$NC}=I`pCOK zz4`RVKDXgruP=T6%bO4W&Zn4-nR$qQTX5g)z?QRh;8n+6-PsQYx$deAM)`#(FO8CP z?^nLOdDUBAv-wMZto?BIU%vY-n-6^I!(r@_>PCxQ5aMt%|OAHICxLaduzBBL>V7y`QCRAS-<&;ztc7G z0h>ScM}Igs0ycwODsnuzvkV8Ve2N$_TwGQjf^suSL)$de&+Ki}Y!dx|kYt>JlE7k%;A0B$oYpGM zjd^$nj_rf^z`n|N6Am(cl?Ni?^V>2)=WZQ1Pi}criO~EwVRmHT#6-oBseP=1)PYe2 zjc5;ifelEaSx-9TOWrh)s28HX8M^Z=uZqw7^M=6|{pyxRb!}q?uE~_;)jm`XwBgr# zUl-+1pg?c@;72zfxcm1ukN-x#?4Y`)IbUdgwN&}cYn86D2WI|C@oP&P`ngbb+|VuD z;CorDa%6sCVLW8r{fT=vAAkSdn{VY~Rq>Cxkq0CWQz5h7+e6*(?iPzVf8Y@u2ey-; zX111$iI-d|aPtjo2Sy#xDC3mW>O$0~;TL}$sy~;A>$@<`CBJaup&v`0leD+wJ5+Ql zbWa@hAyM83qDvbdt)+Y81)le!)Rh618%O7%A2L@e6EEI#n(wv)zo*szHR9iX!-qCM z@FU-|dGA|38hdr#foc%zBByHsotYI^t~S>gp~#l%!$%(eqbTD?zhC(quipII&-=GF zx5Xc5eEAz+8KyJhU8V2;p69mNnfJP(p8{cJ=Enx3o9@dm+wi_N#1Hp)vvIHF-LP>C zB1anw-o4@=W$yOr7Hv7#96MvuJNAhoj_n}h<2B{fL0V_Rn})PNG_FwAJ?NSm*8K8X z$ejP%_26#XU;SIJ%YCb)M0`yY5;!g)j#M$^eoVnC-XFB|2UqU7IgaHvmiVODn@3sf z=I8&zE8ZL*wmthSz~x=hdf>XezTG!76PQH_@l6@O9OZ{lAbvNC?|#4M?oV!bSF6&u z|7*|PeB)!D6z|0FQz#A(cQGcDgPwOc;Eu~g?GMt^EZCr`?5diW0KJ~DR0$M6?{59;#Ru+9{A#yzr5=& z7Bj32j&q_m%6U-d1$OZI5P+@^T>14O|5TKb_$VO>CI$n zDiD;5u+7C-9?dqgs3$NYQ-B9GEYSvJ{z05!nR~`o;hMGLi?Y|U=!Eg8?3s1f^V0E| zcQ*QHhpl~MXkgn)nv!z7z!Mo{l$;Upysy!9fpB|k#&d@)a-m-o;EJ(Tb_#3^OuRQQ8?Vc6u zv3-UVZ%QA+JYkk)aSd7hfIXigqjKM*y`E!5I*m%i*kWQ{F=O)vg&IBJ_?Y*ex7uxaGp6GBTK0EdqNtt{_2k+yAaY-Fj#rWc>hA-9_z z2On-!iHC`MM&7u{S>l^gY$U{!rO$`L_k&R$8zt#?<2&D<|D#V!nRS@!DzvGCR$Hfd40CKaOeGL6Ek11Q zfy4Ssm&^{{gu#?q!f~*+3mTQY{NDe}igeK2F7<3@Ih}TB6?L8NZpB$Y?e@J4kL}*h zJ)UX?K|Q@fQD>0yI^p6YKeP4kX0`Ys&k*ZXwRzaIu^x>BfBug>ee?7G)$Jq7e0=Mx z?%aI#6Zgc;a*KFJ%k@Ji+tF2?T65av`qW&=>MslVC!#z!O42X>`mb!h=^NVbXouXb({gz)2Gzgni^;be6h&y-}?uB zh5sS|S_~11Yyr~-cf92Kp}W6otGw>H;MHc1DITs#*q|k!=CQsW-2>|(>*%gKX&{q) zSi+mzLw;YBCtAZhZ~q6IFMRG_sV4unCp~-f&5wKP+3#g#biBAxad_c(wjwRtV}2bg zcVzyDEv&h+T3FjGS`*qPNFEL(Y`};RkI3PHt{qC_)@yeNfV?0y-7W zl0J@xwk`!9pL{9nIoolUut)ocnofBcWm_DtW2+^D!{`^rybs`wjG_HU$^EM71FjG5 z`UXZ&<}CI^6HGH|p$r=J@A;#T-F)j49=ZAWoqYPb(T9F_`yM{P?Ar`U9TsHQnz%l4 z!u50Q83Py7pN4Sb_>m|{cYge?&HF#D*7zU#jwf%v<57W+beqb3|K40X43?KfbF9)zDd*~w&e|Y*t zQ_sdK>WvAPI(Yg;{lg?oeftv6X~t^Uiiv6^@n}X+Gt~BcIexG0_P774?sERr%5SO! zrwgaZuEkczZfY#W>cf1?*)%(62s|(w3tfEGK>Kkh^4^XjI z*bs*!QOOhATE25d02^e>F}|Bc&akH~f^!>W_Zdp7Z)g)3{<+QvEVO7~;LVa7UHT=I z^<^&NjL+Od#1=N>&vb)--DL3%ee473jQ-%J0Nyet%l0S20h7MLC*nP5wbt^{!pKK# z*xDaK4tdCxe6&Yi@a%p@qNzhuv(l~oYG~`w8qzl(+R@QHcYk8@;op5@hi-VwlZ}0d zOf&PSQcb1|&*lY3wiu`1uLSTPM)?K}|KM$RZ2scYo~$~yIf3BzG z56t|GEX7nA&a@s-xYf14?ZE_Myz+%T~5Z~ zAeG zg5dOPt1J#rA>?yHJ!A!vK5;*W$NzB3BiRre0ROQKzNzzqD*=O@{>3Lv&D`|#-0vmp z-*O!iSr@#>XvVzhUB9`x`yM_Cx<&j@$vqH5bWjq!jw3QuF@3;l5r>o8m?@tgBW9`u zd=3X&>4ZJ-wI|HArn-0!un<_!Y>fYTl<|u(LLTYS@ov_)KH-_M$+h3d%7Y8PZ<~8V zYyk1qUxp{?+gAkh@w!qRSIdbI2J$P<3;nOzC z7DE$S9weyQn~YB&)|uv^{xD=*Iu5caMw>VWWSmP}9+H5DKz4XH;efexs9jG8{LWRd z{GB!odDn=1;TS!edGfZCH8G6yy#>L*v`7#L1TtrdA#Ll57)5!>eG zl&@n*!QT7pugKrGD-hz@UA-AvC2b=H85(nrV{^0C4-UCe6Z`on{NCVSjFNQ6r#`*; zt&e?j^UQDima)61+KcnqxVG5!_3C5D{Q+a1c+di>?`q}JjP;cdRpG~SY3l~ZZsxIGlW=OQ5;d1( z=L)#b{4`_Bd%3TC4DzZe`|=ViJ;fDmjnj~D%scF#1It0r9b3QP#wSnR3vTl%x9@=T z!&+s%8y{=lg89M0tAFmdHvj4)excx&8HJAxR$^+gPT*CZ!(3DN`XCR63~86&85ni` z0LGpk_s!qAdGceP5OAdXrAPdkxVsOZ1!zZolC(aEkM@O6esL5>typ7i&TuT_hqrBg zwdaY*6#zZd-kIV61=Q~KJbkpk$Jm|?@^J(IEG?!T%y@bjVz`$_O+${C-uDt4mUU6p z(9kA^l2F5|at6;zCA7tJbt5%ds9k0l14jOpx4vfd90Wd8$mjZdik8#sNy>2gyy|t3 zwK=$R+&uc%e7oeOre#Yx z?|aiA*Wx;^MI#Ozva)*=@xUZo^X~98%MoAt!o8a>od0XZH1YSN{EaAIh(hO6SSyq& ztGkx;scTMK&+!#QhMpJYe@m%*dDw%$Ve@rgyZyu6ty!`4e9;9js9QZ$o)@<8vs=l; zBSrJv_8%&o1Q!}}l<9mWJ@N-%zBk_1;u9rX^!m{LVL=>AAjcT4?AXHHFX4{09IO^_ z{p1xxCa1jCgIwF79Po%M!#QE&v(gbgu)ei*M5mn~gbC}Kb@HND{O^T6dC2)=MdadyQjCi$>QVYYn(bZHfV6LJEOcg%5$P5-E;4~n|nUXbM;C$Q4hQ} z+Ui)tp{gS@-Vj_JUgv}Jr04qezSKJ&vbbk|`eVFfy3*PMw@eSPfBL!Ix~}K|^(wpm zuuYYI?YZW9U*1`HZKFi_*YR}h^mFXw`s=sEtFQT% zxb4BGQq@O!Jz&)?I$Y)*la;PV4>+mel-ERYLk{skdf$(~EPfSp{a%0ryX_`+XA}Ol zD9)cHjxTmNeozO4>N-*fb3Ijm*Owey^{x2JNw(6Z_5j~E@`)() zJ7YnC>Nr%nj+ZiApUNG7Fn#0|Q^vMb!aPaDtaQR2SPxt$?93gh_L;oZhsE-L@5s5D zZ|xr7+c4&SEV;D7GPfjTH)b$pIeGN?#gNZ^X*fYE-4s3WFQS!m^TIK9+~xRlgLfXt ztABj}5Z6!snaI)qVqriltvzr{^Z@JPO;K16mbzBNTrY6foi_a|*H3KD_veOu!B)DY z9^i+NjDuqUuX-;#W{k6AEw(SWjy|_|Z3{BgW9rS??8+;@U=OUnpLM|w+!bDaKMU6f z4F2$?!r_5UPg!TcTpQF z4nKryJaOdhv3>H@7~8fKw~xcH>=p57gv07o0JW zV`I$Jk^Au!b6nw7#;)G`QkVJU{d}PAxe}(a?d$Jo(Hj?}^}uz(4%`*KGEy zXRqcxL$;-pUeLap(&0l3~eQb@#4(+PHd2PSeJj#_L*W#uQ<_7DA3kQeS!lsQc zTsvIcJYW#q>xWaEKg=<7IGDO~4h~lBy>32p_I033%5rdJ+uQs0xORB8nWx&1k&nqU z7i0V8>{IPC1{<<^oh#zb6~`Pd=A2<(I5*(p@T!~NT-JdOaSdJvV-3MA&JlDTW}GL^ zC2(YQ?Z(Tg1G#eLfX6l{Axby4mb&&bpUydb&X-GYe?BW+x1jq zkjW($Vy;Ip3mCF%P#vtwRi~}$`lR#33x4?TLpA4=f1hVp?%rRWv49*CWX2G?V~LHT z4!1b2@QNW%x>zd6heH@}!M`g)Dp z<>c&RwH?B14(o*54swYrYn$>D!&kD7DVsi19RDTi^HrVxp@3DHeCl%J&>?ZSww`lM z%ptE$U*}f6nTA`=4f7jkEL<61hX$=J!OpL(q;%;`g$Ddcio^&<`fN0uvNvmSDQ zPxS%w5@8h{?s^%FKK0>d0cQud3^-zuY>E0f)eCyT0J{zK@QNFyA9MXnt+w}pPqMB>UUNL-^$adgkUj28)+JDCBZ@KR7ND;fC@fgz$^#zj#vjsNNQ_4_ieEscu*q;>DKk`&km+MQN<|5X%fJw-`zc#pbu|9`&=nJRK zeH~s0_G5^AJ4;2|uXX#K3DF z>V|sXs%tlgy!XjO-HX1$eN1o1uVVUFOrIXp*ShqBPuZ$pm1|z*%8{oEeOZf}HNN7; zX%pu`8rk^~;#Lg0vB+Ex{*d#4Sm%`23#JUO`9n=IFAvkZo$-$N7=0UG^>kByRf^RSmg;P}>D=u)l6HEV8iDdVqhY_*AQ+8Y`N3Mmwq0}E@ zp;w<>GWFqOy;D5QTci&la?<*dL3`rjd_81cybmvufk|CA8FM-Do%HH%B@gpY2{NMlHACIqJ89ob8^Yn7Kk8_K7 zi0iU|F^8Bx$ohnsKGY3W|Aw?z{o&+bF5+uGl_zWT^(EQ1PxiG!*4gU+DUqFt=&%5d^kUCesId~S?v4>tX{PG#y;Cm*r} z+Y*_B$wUks0yhV8CGcCFu45nArhdHYSm{$SWbnDTxftS%fty3Dw}a0W*93OgM8#ZJ z+R>HqL)Txu+L3F)hxXR>fewxzvTGs4Q8%}k4+rXUbNi4%PVx~i7sHogz3TXZLBv6} z7ays{>W@uazpLi9rE;$W4sJmggFBw|ueP4!qhD`RCqJ_F^gK-GY+x&0au2Mhu1oG< z(s#eK#pHKWa03CGBJqB~ptGPLa!_1sZfxe(PhU9kY>E*6ZgO>zf|0|;t!{nh2Zzwj zlfdxeHDFh+_9@xiQ?@y5F6Njz=VIn?+JtSc4RdYY7q9gySB`9P44n@ZcP#r&yKeCM z%jfddw&UygG6$R!)dzduKg2l?@wHB5=aRWSahbjsCc7ujoKzXeB^BO4oH{zQ!&?%}K}2uvA&WZGqXZ?hfYl4KqD zRU3HKaTe=wY^SEVI>5C}$@VWcWjKX?vGjVy_3yd$fh$)Y@Q5Qgh#*Gck9Ceed8)UeQn+tef zzx9v@lAF7s?NytYvFN=`pCwz3ul~E_TGy@|yX>_t@*d(}V{4o;yqD>Jabyb^8@9R} z9N8FkF#YA#2f32EbBFkz$OGwHWifb_?X$|M!!d$anVjm#$5PFEY>uV4L)y6HYhNwZ zrA^<;k!x`-IOgd6T)@^B{dVzMS8Z4AZ?6}QKRF?N(CbgtwSm`Ia0#1p6CZue*~h69 zUuE>0iEie$UuHk=+b_&NeUT+`vFHBuPyNnj(>@CTCXPRI4HB;$8#=s~#pL+8?!=6# zxcR`0(GQ*LflJuL;2fB=qgz5unY^r}DI$(y4&>@fF3G(3DBBi%^fewoNdKmteOFd(PIzQD z4`db_x*W^`2FaV&UfHl%yp`Axdknkvxyhl6RT*D>Xai)skZTM&1U5x;IKE3ZbM-#9 zNBgmyJbmuDT&JqD=AX(b2JdAtIAbA4u7z>0dX=Ya=EV;gLWb1sCvBf4IE8#0uS#0Ge>Vfr;b)t^km1fV654vfX&dEQ;S#0>j zp(z=R!a{{hzqz^Ja=#ac=XRmX(9=q)N*`9saQlgQK@M~=?9Myn z8W(En;&9^412-4k+#znwPjtz8)pzkj{deWVzHiQb{j77z-+tA%>dN*n+(Ms4%Ixy;$LNTxP*PB!+W5P;~~+P-QRuw zxpq2VXnXY$7lSM7D<*edqbs{!#MD_^6?5%jSMKu~Yn*nj6`LfEj$GG8^|O!MdySWy zr|#**d+>srpBQAw`kHor|F&N3Q@r|^7r%;6#fjAzu|s%`SFRkn7WxGpK3CjNu{*~4 z_PThjV@i%xTkXI{s&ZoM#|G`1)A99?HPe@${4am_uWmLs7aDMcL9O%$4S-bTZXMS{S61X-v@m1Ha=lb;cl5YMbpHtho9_X0%w#fZn zt*3g6O>wYV7!&l$ktuH6@`~vrH)qw6A#9cC6X%CMb$;NFZr_=k9LnCH(aB-FoY<+t zwU{@=(FeIw^{M#2@>RuOw4VNY=ugOgmW$EXV#;ufbEW6n<=WNd=C0hjxmNH>>cmSv z#Gz}~W`Bvp`(i!tzL3WpJ;x?#Q@?K9>%VU8x~ldZIzGqtmvM0FthzGSjO-jnu7$Ot z-E{)jr($(oRoVEOORVOxjbIY_SyK@4?6KwngQoh^T>Wz-xyfe?oI2d|;rNKdE%+fr z@}_O;#`k{px{SLl-q)qxbFK-j7BTo#acnDB#=pwsoyq}L>qPFUVpZ<_R{aD$&@Yq| zq+C<%*#k^&Hk8T4Nw`I`OM{mEL@pf$Z})EthcWd zT~dY44|3(m5Zsc$p!c%=;w!(o zdcgU1s#cp%&iqh@GiN1rFyvaqR7@fED4`2THn#y>W-5s>mlo8UAa3^_wm8G04Kj2jD9X0bY&MB8x;>0q)h^rhNi~{v?L(lAUy7&^ejwPQZ+rIJp z`dQZ@{`Nny%9Y#Sl{;?mzBrd=`1@bk=x#FGzhtURDGmpLnjywYiUp!V;v8S8i-o8zK!wgJvHRh``EmR@%)z^jZu z^Bpc;*He}CoyuXo@<^A2ZvShm7&hu~|0M`JF}P%2 zb#1m!{8YT=t+M$mrd&C)x?_p1?6^(wI?l@SDSvZXr}fg-D))MC!@kmqd%y?r6Q^C% z?b!oNayE;e6J8g9GK-1L3LP1$I=RHtIM`XB#8B{utQVaaHc>EfWqFuJJDBxU86OE; zQl@@oZTs+JV)n^mz)LOdsfLDt)}^SbyF?{h5q_O^Xv_w_UXq5kx-)XVli_Bvm{=jKAiz3Eq8 zW6dvCb?Q_{=DH|gaK}BwEg9WDkk>rQmD6VO;g4+o5K~4sms}fsrL_m_0p}+=G9*`K z4v8_}AoRXqs~EbNd$MZ_TufO`4&@LtCwk2-UODZ=PhyQgaC53xKWyyPHNP@`K1Y~` zdXTXp_a&fn>kl7t!ly_apUa|MxvO2d@?Pfp!6?e`xzdk)^)pZ9=ByYpR5A55__DsE zWlx-*uZOJD_vvEB1K!!H8wm@@O|V}u)`y*i+0)cQ1FQLv!Kjmhu3UM*V;ndnpOdxQ z4)qK^;^m_8M^t$wA@TW=nvVge%{t}c@JiKQ@j3ZW|31$?@%#AHyodPHx0R}HzhhT+ywR26b7krw zz*u9{twT<|OGLcmuOHVAUa8NgF0XlrcOHS;m+&f^Urc@Fw{{OOry%Erdga)vjz9WT zVGfG-GJ0R|?bnxbhWzSqCzxDJyTt>nM$(y#v z>r3x@$sS&-%VZC%r>@Cvxl)iB&n_~>X^1vg2AmaEaWL(y&Pw9!fcnA7;m(LoK8k+o z0gW~=>lUwE89(bqhI+lBSMzEc_N~pf?(2UQezi@?l_U4Xow#!3sX}bs+0l)wa=_zQ zfN+(4E`P_RUWZua zy-YuYQIOS@dtT=YHuM@B>Q+8=cqL-ICg%^I2Y)!ZtBh_5aje@m zfI+?9(Dn6J>H+3cPs|H(c&}F*vFgl42o6S>l3hpI@u@{S92~+|+_>7V>IY5%!?*f} znxqekKFaA(aP7pf&L}mfI2@|H%GE}EwV_MkRTn=M-<7+!t5@F3=oI^Wih~_eYCiMR z2gsGGt>P)KZ)=_Fl9K}KA?u{wxnHt{x3^dfZXj}!R=F-97mu4O@7+V{*um7<{2*{Q zLSz;;I`!bIjNX@klE9(Lm7Ar#f>B4MDZgr8S0F7?Wdsn{<0m>6^J6T6R(`3~WAyp-*K@+iZdFUsoh zrNWr&+vf|@xCdwhlj?PogZ;-jH02`(H(%xEkdq%7-j{&R%_A_VwyPoQ>s9suV|pEl zb~!itHaZV&w|3?qq+JeaH&_dAQ_Sf!CZpjt=1i!C7Yzxwsy5WJsNQD;Zt!)G zq}_ZCU4A9zeno%ffxrB{o6Y!HfKy)^eOsyO%J5yKZq`#W7&)}5_jzfH7&27H)H&hY z!d`FE*om(gx;UJ=ozLd$dAN-AVw0*2*6aFJzK>7!ukxX>`}*(eSKBft)4!53yqCr5 z*gGGvDKByA(O&a`t0O~RqqsWp$kqvWzJ!=$OwWmR?#RhCS4d{Py^Ic3dqZxWSNeCQ zvmUTNoD=GBu&P&?4Cu^H2)XLu>eU8bZB^F~4$bM!+Ftdct>lteE9L=r?a1{rUXC3? zCyu-pY~BaN^a)cEI3#b{d0qTe+-p4~-&9P+l;OQBCZ`{&tPVd!HpdkA+OSR1`i|CQ zx11DQ4_qhh&i#{3ytBnZs2hz70}Ppk=Z5FT28JEI_c1O^V_xg1@yPh>>V#`1Wn2Z>3&VhxcV_KHvjx znbXaMAA}CcmDRmIboa8l59vo7q+ipv+fFX*aO#KGJucM5z$uWtm(y5Ys$YRGP3*7x z(kfaL@9cqHW6|5uEzBF_em$V;BNvk!rw&){Z8b-@C4p5^r%fJ!u-DkS2C+Hs@!?Z4 z)DQJB_*u+>j-NTS2Q*^HkUmwevEb-X^=-)RS7Ud3PJ2wnS4_EbdcsJ~-k`>Kwtt~|x-T)zN~&-J1#$Je&}+ zV3M*t@%0@oyXSKGdf>X;{$0#I@y-_hECe`KE{Z{ zp#WhnW8ERmRBmBX+Kk!+PXkP_H+1eZ5tCz_~ef{mwaT z`ia4NSxinG^A#OZhC`L(gRYM`dYw4PSY-1vx54{4j$N$U#o)b6jC;5|pz)gf7;H6% zdc|yq93N>4({>0a&#^QWE4IX|4Vlu9TaQ(Hm3u$)aZSApw$cskf%TAe13RCp4?Jk~ ztz|*^py9^YZ&=vDs+~BnTC~e+o^Ty~mEn+nO*_-61M2M!y`Hbvt*!Q9-&^8K9NSW1 zoVE8d^gSv^MqRsk;I)vl+NN|cNniCTzK_pTT*Z#faZKDH zG4yXwVpAM!sW>0-nUX8k=b;^7Qv}X+$@NiZevlgRc^)#ngN5S|UbSpI!$CgY;?I8Sb3L4;jw7!heqR#ZHVl zh=Z)F=D`PCU*qIcHgeUt>VxeniMscp-cMal4rFy@d9{Pl*V^R!V)#SGDNk{*eC|wP zeYx}=xU56g&;9H>|K4VElYAE7(tG(d6}XVd)^A7^!>?|F+Kj=5JXP>Dw%Wm`bZ`h; zrK;1Os#ibcC0p;i>a7RrTmVsD;@EbTep6Ss4&uQi^PwX%me}Fya0tw?R%Wj0FJ_#& zd@hGL9Kv6!vKV|0uRc@qv2iumF*%NjsdZM_@eos=;$XXqb40(YSDE_s>FZYChu8U1 z=MTBv+mPwAr9xfWkstOG_B}dYba@eWv1ipMCYY zTos>^#o!j|RNlqy?lz78$Y|BQ^+} z4bMf0PnC^_*AnWeD_ch&Q*mWtB>mMZU$Rxd`J$GK<18HyC*{go!un&}bPL$Y`3b&|uwl9)bl)`9K7&$U)%*OR%_iN_D!IIwD0heOz? z4_>L)tL-x7sXm#jZ_2k~%9St4*p4Z*&p72N4z|(_=z#~s1O5%DcCB)%2ObbKg$L%w zh#4RKl&AEHx%pN9Y8UVGSDlGcV|pE5CzF0)JuTUK-&JotV87P>=rQ%mk*9R=U7Y&# zQHD#{7*DylV?4#drV6nYSC^Yhy>euz`ZQ!3)3(H?axMAK+4j%#Gk^Z}zrNW_eHLK< z9CNKW!x^id#HM(~kSEGZ?|aF#Vaj=|+eBO(@)XC}i--5J*de^nIi*(&xsq+D*eW01 z1C06oji#>2xy`j|ublaWUNL>e_u+Ff;!_;V`E31QK5%HOI#^$fsW@2GktOk}_cHD2 zWALf%M6W*R`%<50-?-jyDNnU2Pw|Q&PwC>7)0R11TjjmHzO6-%TrsVut}E{FUDeH; zy!uUwG<74K(v6wo_={H=Ow!JxR@NrhAC8|Su6|K|>UVUh_8B;iRJAhF?3`|OueDoui9*5<-LqPhiTJJEfwqW+KOs3PP;bSDK6KpKF8o*qc7~%ol7nQ=bK>0iC>*yK~)Joy%GADOs%YUamR~La$hrwO8KD z>$_R>$PJ?Pz;%N_O_qEo0XiZbG6`PX9;8mvXs#m$UP3aXwPFmm5vL~*duZOIw@AHFd><(K~ zn~onLB(HY4HuX7fY;U)%;`+f|{MDwve95NI$`9)S`@Kf>nEF0^%4Z5&s!!Y0c#AFZ z-bTJ9%=)Td#gxsVehPjWIce8re(IVlrd;_xGWAqH z>lLeZZSv|Pw(?uM2b}Bs_CoKk&fHQamvZG*?tPYQ)wjyVb_ zw!s{8<5F?uU7Y;-EXlT``ijl*Lwx6Q?2BJUUB~8hoTl1dZBw$?K77uHda>Dl{lF`x zTye3B@|qJ_GOzl|*B&^w2kN}8Idrk&TRrXpdhJ0P9wr^LPrhF?lB`;xz+P2!~m~Vlra-}`69=NWwyDwJRusPNZt0t^C zvPGZX4i3g5*4A@<;868#$l0s0XUHmDMGw?X*!n7_EXPM#K80zMS0CpMnABr+K1|8` zV&t#BwLaxtd@6od%qi_Ry8fkWspE^Ex_k=T$2R4&)DCU6uclRl9Y~Vb#9M zYY*((19i^#F}t*z$sWI2K@~i8C^^kRSeSNZ;VsosUUCmoJsYB!` z--@xgt8SfBn6?|uceTxHsvFl;w0W*w7i~u$U-d&|edYAYl-ys3J{7Mrd8&S>OgvQM z8gll!WIOXZt#l*mtXHs9zhhGUr{vzR>QnL(Hf5vEiq|&J;nioCtX)ohb22um`iR|p z{7Vn|3qQKq?DtuKoA2uGFD z_4RnwPbFIy*IRQ|Tp3>bciF4&D&N{YFg1UtdSuG4$L92k&&kwTZDSku zeb_$n^91$Rb);+?)yH-gVaZn9u0*uGhgLQhH--0D)#qf|Q*B%A+jq>jf>!0ydSE?d zU0T;(e&S(cw4%B>RT=xCx;f>2{4QMk%CDve<~H0OU(ye;9(c!6JPn-0~hUq z*|{Hyr))h|^(xb@U3#|NHI~XAFWo*~*G84ebcjw{dw*Lqhiku?ct0jnZJ+Y#u|xH- zoYshEeSGS*Do@2}Tlv}pb3M?njX6K@DgS-2-lx~izwIE2sDaY&9^#dmRYUFd|lxGB8HdVQ>B*Ji%sCQY8Xv7f`2 zV#apu5SDW92)#DuQ(VLBxK3QRx{}~ z+ozM>D|p>|U~W98@O`no=3Q%SpN=_l51(5fb9imaK0ej|M#)oSaY$R1;^%C0c-DK0 z@jm1NR^P*WU_E3V-e=b~;>gX>>si|9mTbmf6~B>fS={ihssj$FXRf`6;#uV(8~Pq| zk(|56a+lq%_}RDqU;oZ#v;0|reRG-fX5{-^WUscOC4RxRNb05aXx01p=zCrG;yCQl$X)Tf><8_sd$ebD zxx^k=4_uenxi>2H9-AXg+h=pz$LF#y^E$NQvagZ+J2%!N8?h_zF8hTx^gZXImv-z} z9~)=AHzPmhns<)%L`N*eEZH*Eu@8QG&YOMuU>~1}%U+?i2d<C?Ch(qYR}ElW}Rx|J5?Urwes^lu=l2X zK6Jms+^IM71LtDj>|1G0UG=pdGMD!DJ>~p4VD6lv?@p1UpTDbqYxKb0>tmri_RewP zHTI?Fz5HL_^;v+Kh90mc&9JU3d*T8=P`&V&pS59?Za@#L2d*2?=~rFl5u0P*#C5am zd*piSd*8I{WnATEeu6n!n||5ytvLS=oQCDfZ4r6?k*L#1iYdvJ$pGSA*+Aev6oT+t%uVW8fr_Fqw zm)CKIEVt=K&YyL@ERW4RPzWQ$WzwFlN7Snh%T&wej^uhz8()*e`UVC{jm2i6`~dtmK> pwFlN7SbJdYfyEwp-R9vx`Qm^2@H=q&Q-AV@e(+^4`gi}^{~z=mRb&7F literal 897016 zcmeEv2Yi*sk@$eRP(czPfeMnS0wj=72UO@15)!=`OfkkbHnYUkkLJfHT!w)9Zc=YHI)~k3> zqehLv*sA}*zxrj|x&oBf74@>bjOA)^S_2hc1NdCNgpK?N|7w;~W9fzU5dS{KXO!Ba zL2KZN8o+z@Wi0T&_(vbmvIM$Mm*Rcexh#chSy}@XNdx%&(ctt){BtRXb{#~SZSWbq zRSvSNoyZC~p}-q39IwQF{40+e5XHiEya9bFp+RdvYv5-M;B)DOseAZGzZC%z@j2a4 z5gMY~P%$-t_iSBk*;)K6uL&?I=2g5;|40c9S_9Qh1NaQyz&?AGW51{3^Vg>w|T#KQ-LaCF5#;6cG1 zODxH6I|j{WJ1u2un$|#d(f~e}Y1m(jdyHziG+(QPXfw+9X4)bY%HwB~2!%SPlGp3;&)a zM1##5_}$Qz|5u_ffX!TW#q!iZiI$3oIEcD%^&0$o#88K}j|OYIj~$0yM~{hehT=Qq zL(HSSSQ@kjiZp=t1dTYxm86b4H}65Cm|$q#fyO`T;lzRSuyr}@6Ey=_)>`6yN=L0| z&>E-?8X!xB#X(%*{rfn+s|n9!4{(ScE?>I>U(WeXT=~|gDa%*E z{u8IfBFfX)4tQ&TVb2|Xu-;QHOf#(P8h9Q%P*3ax-agEH@WV$Ijc;NVo726=c984R zVikSgG-wS}c@5yZItV-YjMz~pX@Pk1kJI4x-P>XjW$6K3dxM*+o6K(lwyUCzdF3`% zYP`ZI7cVsVzP>1G)7>`+>c=_8VtiuVL@2m9`qJnTToM2%5$jTz2U`4O1yc^Qz_=XfS07ZW#JGMnJ_?1$liiKc{w4 ziLABPZ{2_y%jYthD#XvHHso~ZCbOgOMZQebxH=860fRcWHgTdsd~LC2c-kna>BLt> z#e7@S5AWZDW2^qfs(X6*z^FHW3&G({86A<)_(7;66E&_iP#rXY_dc!ovQ?+3y8iHT zN-V?#(W#xqFn!e;*m>kwF~I>TY!6y2`pTTO#X{Cd^~Q%|_t_x6Ra(QHT}HGqxMEEG z%Swl7DsN1CnKymRN_5GR3u|0!pfYLz-_icq?$Kh~18diX=erF99}jPl4ByV13SRjbh0wqZrmN*bB$^o+~CO zOr>PEfCdq?U68>G*hT}9mC%URK;_l|eKXk1p;vM?)N=EHthSw4&n{iN4D)|l#AvFJ zK>xbXB{@@ON5|WCmWdiyqk#%Bp}?-tWP(`%!aIzBnl5Y{qol`VojAJkbH!%I{8OK_0|CG{%j(uplL$zb}xUAqCI;V zgQLZ)qSKnDeY}d%R3SB;YC_%%28#tPR*g|5L-WxZs7M-k{QY1H#kFhsKxTBiqGsmA zcL8@-KF0fU&O~_d@PR|RF|5ZRppyzE(D9BWg(ySc|L}teOM#w`Xbrf-MyC$5Nfl%+ckhy}kuwt7pvX~oJznGnrLYoOw2fR<|6HzO}G3tXLD zi(2UAQ46}Z$zZKLec=o&*|37qR3Q!PheG?NRB8G~D~4m~J*05d*W&1k9+7guNKF)~?ZM|QqvWunHl1}d`#@SRMX zhS*67p6K_~H8z8Ft5-+Lj!%V8I8oIJkzEH$mhyZ{_Cl2#bQP2-<5&IqDOK9#5QJ4B-sP;y<%RSHZ)8V zsB`b9d6k^4C*yx^9}g%+l_GL{7t#25yg`&2Jh$y(8P zW!3;iEMg}m^gy2{nqY`f8|V5Nt+=t^;EBWV)An^@F=gqfh-T0>if;x)<~{NG@}==Nd*+g?M_qLDwPiGCP{+ z7;avF~%DJtK|)dRv(pAkvQ(x*250C%pQX8f}HJr6#$nd_f9-xrS$NXs&W zdyK&X-OPn_LTjK3Y5q~glx*-2=nXkyVy zp;w=jX}MA`dQWQQqdspZhe$Lt8rvlj_c`$UdG6&fjhBETHJ^NtE z=537T2;@y;$Zc9Qkq4WO5UqJu01e=vAPJk1DK@2EnD5Tu#sNH{icrAGxhAyl$HqT! z`^G(3@YQ;ul)7X zid}g_@TgA-$ojmu8`4^)KwaN}C$>GjUMRF}%qQ9F zc5Z~dNB2Lmo~@bbaqS=i$8-{m!uIpiMx+87ExiWHWkP|?qs^ae$ul^<54ic%D}BpL zS(2<1N0$GCm3DLYz({vv8LcEFf9QBiSv|Ks*9c~XFqB6hP@+3 z<8iX3fmZbbplg$GR&?vZ!!UotCPs4vVj<)<^+8)K9HHzA#1jVwv0 z20g8rE^f|{+Kcb&zk1;&ESa?BX;p2?4{lTk61#AD$fyhU!Fbw%O%ik^t$}jUz`LSU zvOwhHX-}nre(iD%#VCM7~H*|%$imgUXh6!w@m|V;9wgIn`*HYni?2Wouc14 z0#0w4ftHGsjCI%EBcE!iFjZ)Trv^kPkgGX^e7rfi?VM$6L~Ee(X#nqOn%%R_O=P7= zmNsHPsb{Za>eD`GLgD7=!umYzuPfof{fCUE3h6s8-4ZLkSY=5onpWX8fbZ8tY-EPm(4?ku z5LPExB$b$M+Mp4%4C9mR6`NMUkyA%YtYc3`x0H?$6c8x07>oV*IaryTqj9S=Q05Z~ zyl^xnWh*ib+VlhuztY`+*7WWXhG{&q{4-X?#nlZh6)!Q`(+~?53;KY){>dHj`g$FIQZSvy3w%F<0@L!d=cxXiCF_Qfz8 zmJ4aT+G!x{cVkyqCSL%9MYyz<3e%PClfsORMGH~i&Yx_`R{HCV3)zFZv)2dlr^SdT zZ?sj~imJ=3%qJJ9$_b)6Yv87yi1bk+Nm=^L)>&}-(lN%5g53Gld8%cc)rm>WfXL?D z2M`Mk!w&-AoyuxyL~Ee(XaMhJPi*ZAVvFhIr>v$CBB@mA*HYr}*g*bj`2bIadhpOO zE5N2kbxW{lvFL|=BNrJBRZjynm|;gM<-VNYY;89?^lt&qu8a*#{^N3Z^w2SZxq6T7 zAUBbO>%}TrD;lrR8o>8z6gH6gFU5qnfM&tg&68szqoHwq{+(H{W(l0Vbj~6D&rQn$ zKOci};#KUAheHHbC#LUz_`wAJ>34AN@v3z}5vih>py=-4Stnd1DM^#L;_$M+GhSq_ z=sDzNMk_6n_l&{sjtu7pVrlv5a#{lwOanBv4-nh*9B!tdKo!75i9^wY1-xO?*VaL`Tcrg z|9RN7czk3)}N;N>6DcA>sj&-sHdL?JGYWHzt z?0560Fq$gF9mj$FI^z-19LWE7BqwRiS`Dz9!P?3;lz*Sq!Cf;E>CZ8eD&)+z*>L;f zQO2%w-Z1d5Yj}~fG9@9&9eYAh2%k!0*%$C5V3_Zg#6#1y1}dTk@E)dRQg$=}h0gBZ ziu;CG)AsSWxx&SXzg~WeVYcr-cwh}1Q`veOev1nM;gHoi(_phUl`;HwiI0W0O`FJc z@;A|D3t20b8-E*{?<#g7>-o3`wuXPm7~B$SI?+6|7$*I3X)&TOh4&)#e4eeb5qI)K z9IH@_W~DXYFb&{6Ozs*3MFC`XAd`hHkkTw3LhCeORc9=p3zx24W;9hu-_Cj9y#$)QxNF~(3Yhs(p}q!@lTaS9aNzh6vn zK&o?Fi&lz0_|Y=#jBB2p!=0J0hyum@YZ)_3xhecl!?qC(KxW-g@5>HIVUK> zoG`q0PYkmyfgj?IG2csqG^#aVs|FrV=tqk6g9Czqj()NSGFqh?f?P~lJOggtzG)BJ z66+4^hA|D$6-ojMb6=L@Vo7{Cpl8Q@irZkU^aXf&JIh3gc(G!!Dc!{8)M?rQd_!7^ zBqixbj~*I=RX91jpryk8Z4k6KBdecbLPWETVM-dWnP?4^u?FzjBnx7)C^kQ;G0<*p zdm+%v69z<6aJ^zUasC2){o|5if`d{M23y=?p)D2;(sx<0#rxG4WwDb5oZXx*ruL2s zx3>-T8~H)gM1GRg=A{SV(Du{zvbI>i%aC~R@n>@+D)&0}k-2qQtgMMz1LdcIS5Y+E z-P}7FLuJb|b1U0tDJ1u;JGWu#(pgq=DqV4ZA3VCSU01mjp-4gKvC~hLETy&Fhp{tI(Ov?e6e7WDiI~}d9y!l6JbE}C4(C# zE1{7Js{y=UX*qwK*g%{}TuU3+qLE0mCmj)U?^oYHGWi13gYanX?z(2qE;xPoqCFx? zteg8>3eY4&f(h7fhkvIfnHtp^D2)bQ!a{6v-wQ)&Qy>Tx;DVkdJ>okvR#&cHg;^`+ zF`6o*LERweoRlH6OTg!^qfFHJ(=|{QFD-I8U~(ce$2`4dHk3HGb0OS3yPws}>@f)H zHQ<}@SOFW5oIx*wvy0&)@E+cT%pzk0Yh6Wape!};CJJNE=IFNST@qhRjDcFx#_!)x znhLk>-gRhSJI1w!l$M4)BE9f2jd!SE&D>EMpbtk~k$Jn`%`dpRs~OCg_+|{|&%nb6 z4`JG0SDCVv{(Ad)LdOB*9n65tY;4lcay6ngP!1Zv_of?)Ws`ev4=?Bx(?Mx#vfFls zTJGE}@Y{KlpwK~k@}A8Z3^k2Iw|A9xs+urkg9+7r#v71Uk(gt%Z)gnoH&SCNW9=nN z#nMk0i7XZQ!^Sh33WScQ?a<9|!b}_F-AM(1%|mOz(Hg*Kla^uGS^s3ENDgbDT(qBu zJ3JdpM$2Njccp0q5ViB9u=>z!wq6dW!VD>-O*v#)q6$8WkAT@_uNU(eo-lOfFR8Qw= z4V0Az@Lr@b-Rq(}cRa4YdmNtxi23%Ua}&FQo8%Gl)trg&5Eqdh0ng?RvBdT*OGHg8 zSpyE6T;L5DhGN-O@~+NZO&xiz-t;7xX9*{7^F#g@n)_9~;PL>e?~3<({&%PeVz z+g2uOTx*~rX#nq0vMsY&etkUcDXVFO!Z)0Bb!5PvlzK6rr{TSl`5$O4bP?Kugy%G2X@SUTx zKU)KLZxyH!AaUtEmM_s_wjR((I?`ylXdnlrvV)g1quYbO7dr_`dBf`X_(O*l$&BxT zj<*NZYbd?Rpo@vtpXi`xYFf6tu)bV7m z2Kac!9Jp}pnnT5xV6o_l9}8`&_c9|ROj3Kbw_vJ^3 z-Mo4Se*AW$xs1w0O#4V^*47YMi3T)bD&=TiS_4*UfLSMM;&I3MNm*91u&sEXlpY4( zz%S=~XB)Fp)OseH45JL_!>Gq|OeG~F%3sR(@*}s3(V|EX{}2d@$ree<(ubFP!g!H& zqW6fhyZ&*hT_C(Eo4iwdMxY+pQ3!8cWaj=P;@+5=HF+} zhclNhJG2>TF|D9oEAB6k0?B_p(V>DhbDK1P*QEG>j$7ezkqzJX9bjx5;f z;jnb<6P@?k%Ynx}mDGcFU3x>^`UVU2*vI-PshXy14V1M8XtHkxdPR?jEBg~J+?<`@ zrNq|ec2MGOK>_?}>I_D61fs1Jj?k*Zc)llbew6!iqTxj=l~In83zVyeGvhnuPs@QK z3D9!&j0>DlP`eHIt#ZL?bzQK^&jsRyI&ke-=xf1vyK1QNxRMLR52?f6FXhns& z{J44rKuEmd!bnZ94Lu}<(nk5vdpL6&w~eulwL0cL13tCu$hF_VcDc($jcW~*g9hkW zJodbgj*l*fX8bIY_H@V=i+hEI@cYSA9p0w*h77PIAZ-r|qDK+Q%EVs!RLS>;j*lNx!z&7r?i!C$#Cr_lO_abq2PrIAmAbN~)YaJQ+NF z4ByeWP*2<^t0V@Rt~Fqv253(O+r!j9wWoa??X2B9xd-F$@Zm%FcK&2WQ-!#@xR&VaAg>d1@MMhJF^zEDnp6;|y zn*ojI^rW7tq!>{(&>f8|+0tjjcEiBg%`mtwmTY6{^5OMx<;X@>J0U$A!ke^WwDLf_ zeEgte?hsit`LXB8M2%|=l!XTH9OpmK42CJr~%YJ1sP&)7ydzBryIr9FL$?tx?#T1qCJl# zvGb=Ruy^BehiE8wbShB13<=&t9WftwDPN;n1NLg*Eo8tJpn6lx2)?z#UKv)`C;9ow zS*%{+{X&?y@CQayh4|F+hJ18`l7K#(j#(*}1XW7({SQBwVE3#7UvMtiduAhWcJqSJ z6vw#!99sNWgR*-+TXq2|48kQs&mk|P^DU`_!MhBr07&{P>DTS`1*ibs&`q#T1LUL^ zAXXXGHX3MyLqz~GTZ~VQV@-L0w(bA^-Z5VdQP8`3qlKuBijpRC1shi9yprHoQ5u(t zW(ygn{&|H=Qi2Br`9MY=Zqh@>5A(KM4TkYBzE+f zHZ6cA6%`3+xHLXBmNn@io*7s+R*L>i?qkGo{E($WnxZzD&x9GE?(tye|!_{kW; ztrT>=g$&IeU}HdMCnRYAxO$NVO)XqKz7;MXTF02SZQl(Vx8Q>kR-l}a;P9ppo6=d9PR`pE zud#9~HC~l8@VeL;a(Nntp{*+-bZOKOS_kn>xwC#+2ip%HacFZBn_IM4bi-%0gF^*t z=1c>q??5Ds5{aJPu9ssvhO6JDCknA4P1cIAmi%5_Iej}&5DP~@-&ZUex@jV&L2E#z z2A)S|>`2eN#4K=iX7hcO@@(SeLIJ`Oy0N;~Z{2|D%jPheDx^W(`jDBBCbLVzcb3Hm zQ!ZGYaGNI-s7$KMUTQV*#tU!%tV8s7XwhE`eEW{9c$NxNBlCxjGbP2}H$+6IwFVre zfyXbqB$0o9RAUIj-P#ou#;5Ro6|@TcU*Ar0cymh#VDU?b3)TWe4ZMbkhe+r)Iu7T+ z2G<|4NV%oc2ef3eDb)6iPggUVDx^tl2((NNm)Z5fXPkDoYtS07TLXA6kqMigK{I%| zc|h0L3^QJl65Zl5!N-GnX~B2%r$9krfkT=(tj9pZ@oy-o!tI%LNaw9!YV(8wWsqwH zvjYcXv^zH+T4+)W*G_DMO9y{qOvwV!xCI}8uma_bM72qQX3>dqD+c0&%iK}q0vgvE zP^1Ai+ooaJ$T&U-QY7bTZ2o85o_SXL0%SylKw=0TZCDKR*KdN2`wkQn9FR(A-V`$0 z7|u@Yg3qnCSU5o6GE3C6FVfhNWp1@xuEu9IR=+_{7H%WnqJ0-Cc1R&>G|%o_P*4mwwB;E5U-in(E8i5~G?fF>9f5!_weAggUB*0f94F2mf_ z3mHun(l|5{lA{x3c4XD=iGPt*;IAq5LGGZnxIL4LcSK<|&?FV!VT4FhmbQ5MQ#ppx zE^Wv1j1~*}2)tU_`nsIffGrxp7XzJXoGsQ(j|he47y+-M!}w&Qhv0{0V6k|2aDPjx z`IRN2rYY9|9VkGP3Nv(mHpURn+MHPd613lgLT-ytaPL0M{%nm%QkHJpsVPLXACGMcb#ZgLk=(O6U;ejUgu`l5h;6?> zYJM@y``0?Sb^UHJK~W0z1bJf&8)a!U#dN1goMuzKG|(Hn+d}Me=hkW9Q`sEI3UQCmg(9$@bbBL@`GhNtl<{7Jf0ZPq15=(wq>e5m{HUN?G zo`g;iGi%_nW8vweM3S;}(VhzQu@<2sgwhtcRlQocsD25i;<-gEV^ zG7F8`DzJ`Wz;3gEGN|}QJGO37B=JALp6GDN69#FEg~OF>6z~F)*kViiz*hHhf?+IK z!6;77HK9X)Zn3z1;~vcaa=k-Z+959*>V_ERTrc4>U$IAl4$+2Ys!#*W^MsC(rA^wE z0rHcwz(tA^_ODsr0-Yr22Oqs0tL~rMr_$6FuEJmsuQ7H z3D-~Wh70>wFuu{a3!nBasDy|2@53j*c@^Ft-5q|M{)I}`(4kvjs1p<-^L!cg=p_?1 zt~H=Q19VCNtzw!%wWZ=#K_QHrvlRM&KN}VtJZ6@oDiPNp5Yoc=GqZO0URb_uhbj>k z`NTJC0-fU8$a$UcS=Fa}$XZc=qh8&R4cj1!gY-M?`$hRFU~gt|=Vk#c|86HN{&G89 zzj)hBSCL5Q(HOjHv&Ri);->9_yM-#)GagPDYP;w2?9)s6q^r@aphmO?N~r;S2D@U> zc(Hg|%M_^V8z7SGPH#E91E#N?2S?AJu$!02pmr@^=!nP5ig4ugG5BfQI+3I-or5Qi z2Kv*QU6lsv;7D6~Sc6Vju?Fx4T#Ds0%yn=TKJ}HU;1$%=Oj4ED^5s~#uy+|__wOIi zf)=rM4L@UK7qQfBii>i&;1QR7+zwrT*M zO|pI+$3OGeAlSzXmW}L(=WExrm93#(=07LHv8&h3^soA6K|6F?QVBcHU4*=EW`Ov1 zifz&e7X8m}ES=z4+c=#rDMrAfhmYWsar5EWt_wvX0~&b4_!V|KSG{ta`QL9Z0r< z461B`dM!FRWOi}m%wD*#k2}h>jBBTSc5(I6dDy$1Pj9Dvp>~GRC4V?fMV>eChynrL zM16j&zXXCtbkbf8JckU-vkPK2BCeIv*~RV)m)PurWc+T`2ADB8kCMuO{I)tZ3WG(5 zXu|X}Y&mcU7H!%JUE*U+B;~)cjT;r2Ef#HJqr{H*tfu3$%3nyzrB@$@s81*C?Hgi` zYdY1qmocc7jk61pQ*nCd%H!r41yL`Zz5(lI?}rZkm_K=O1shVSy_&&_iM!zLt>Ulp zRE&<-bx1srnL#DEdAJnKR=)o|E%cdzhJb_d?+Y_UC$t8hq5*sc6R}8!SS+b&oYEOZ z!TkdGe&r0a2-;0OapiRQ)d2SBoAYf+1lFz#?V=K3#jfI2;$4UL!sb0&Auh^bg0@8j zt5XwK>+;gG8LhG-yHQ`?%$p>3qX8nL@bAwe@o8ym*D(!8qC})S#{61z?2}|p>G#|_ zPwS7>`7^)yv%EYB=g%C6!+W<`&8wseZXTY{rPpvdo18ob%0!K84cMZA#{)F-e=>uw zyBiE>rPh@AkBvLc{7s2n=Ptp4OIJ*&vfnq_#Y0UriHh)>ZzdX^S215jngUp$K4n(_ zCKEJnmj-A6+*st0klo})Ey@ZYYHF-BYxc|^Vx0T>t%N3Gxo3QE^ zPF;nSGmWxYAsdmLQ7Pc==_=QL2k%*HzvnU!jaM%XyeD@zKUwW&ilsYN!?oMR=53O< z_3%zOevvF|>Y#6GPs2O=%Q+L(38>4ou3my4r+s0O<@B%qXpwF!F?+xmaCLK+ zt50yuN1(O$ceruCBwqlz5gM;@8t8*PVut0xG0mZttBbOZp1pAkX74*};77aT{<>b> zWZWd6Ssg#<-Xz?F7X5BJbOh$C-N0z7kv5GZp>tcqDpw{xw;8HsRY!iPN1D04E%qI4 z_=TN@#ncgL_NMo)Jqf1|Fbi=J%WD^I!`8(|>}AjD)6Q*qm-&=GEeGB6Vs=b;3uqS1=KEr{)oHZu!mJGo4fP(Om+S2HM!KP( z6@<__4IsX8YgS|Zu1&D#2pyZM;b|JE?d1!7I~hC<>SMhzPpd2DmwIx6m1u_mTf&RT zcwXeCEPZG(SrGVY(tzjRRmR`MXy!M6fxCBZnNaq-cIhlQbDYj0Rs+6%bs#fmh|G*0 z>TH>)ajgLxH1N7u$Hl283~SRuB-x$*Z1YaIj}E1lVD{d_aP$gam9kXMS{fY(CA? zz5LuEt}C0*!^r~|V8bHE3~2Jkw6_${<9$>A^sm;yQ#3#WDmMP>lalk4D%NGM+rJr3 zT{>g2(z2ba;p#25-;2JfY<^TE_bY9&u&94a#AncA;pJ&KNa$Uhd>V+B>JyFKP$d;Y zhBPu^(-;591n>)QTT&I9>13%mziTn8-xNa`($P;U;pX)#u;7Q!N@V#62TtS~r4o7$ z84FGrDNq9SK3$RkjaF?9;PaV|JxmgNnb*8A1bV3(pxwk{pC;|xFN$F42e=ybw~gBw zO$8F+=L@;b`9N*&@e?p()fyG-OXNXTiY{#ptHAA0*O?_&(ix@I06UW$m#H3h8QzAS zD_dGdC6zn4=?om(dA_7|LwY1tWb>OTaxLOy=Z2E@QYVBrwbmQghgxj94e( z{i)WZud2aSh>!RBzfP_yz5sZ?gyN-PZrF|MM*vwXRKkq)^Gak__~1S)*s`R=d|NVT z7kH~MGTIfx^3AK^$f={n1f42T12nPd-z8tpr+4&AGO;AQRFey={0sbJWg{}IvWk=> z-#|aSPnkt~{Xvjb zqH!xV@TORvhBu?*4FeCch}G$Dw(Wr%_X=3OK)>1#M8Xfd4#27F{FxH-ZB36)i8X9E z|IN4G!~F+Lyshj@toN?vJr@5_Vg;S?R1M&}t{WCg5DP}7gj@;;RvUAHj%5-7<>J7f zeZpWFo;j-${Ofx|bVfL1d0@*)*!h!MS1Xf%KCh%1{K+v-FIJfpq<>4&0No?ZFIKgs zV(-ZVu=nJ_lH?cB=WSX7h4%}eSjO7SKB+mZ3b|l?GY{v28cL;sffx_LrS^gX{&_WH?PC|DgR(B z-Ms5SU=d&61He2j*dPFER3Gfq1 zcBRSX>C3IV8IQWYzVL6)4sjBRRPOH^`9Mz0w>90cwh#1f!To#>ojLPM9?N~@x&&UX(Q4mq7{&}MTCWGm%y2e=R}fDm!JVUpLRgk-g0gL z68PX)F7X(D>VyKzk&hFqUS%|Uknkq0ASN+GW|6Bu0@`G@2n_9OklYri1JD8HDU%FYD_0p7~f3*fs1Fzzrhp36($vKwZabgjx z(x)$8K#HYkxfAcIh(4H4>(qIh1wSrlUikwU&%e|K03CJOtS~4)1D4&;Dh(L zj!mISn_#fCy1#eLNjP^@-Qp3oz2nemLl>t0WrcyJf)L#<5}LK8&oqODk8w!AJSUVJYOBmSU2Br{P&Qo{za3C>~>w5pyL-#!uBKF63z+Vm@5DYCH+f2KH6HiievA3_>?331~857`RGh_$sHIiTDKkV~1i#(mo2~ik*e3 z){!VS#Ul)iA_DEBz=uF2*`1y{=~ILCA-ux~3{vD?#5*{?H@JCoYy8~r{{{v3?%FNe z#GqAt2OzgD3G_!mQiw*Ms)2D>;^!OpaZ%NliWNss0tLJiVQ{Am2=MVPBK6NqgV4Hl z*b>v%xcB199d*x(N%kwrZNS}`&0pYuCj9{S3f21Ln&g>&-x=J`(tAi-DNG6~#V@|c zTVt{AV$sm3x|c$lE1gfk;xD)1U`CXJzPsbn&4gYr@fCe4yW;y@=6n^2uoiV8B8KMr z#js}XZaA_3d@(^`D*uJ{GB@&-f9l6yuB7oQuYs4ahisL(7sjS=HXa~jvm(f)Y1z)7 zSpCMKkq{sC^T8Jby7mDI9w!1>DO3W51(#!P6ciS~q(#$IiYW_TXIlOAFOx4oS;(^} zqi^SY@bh7}4V8Nh-|7AqD?BFht1Ohnib05u5J*s~ZUoe8)kP#JN#DM79L{d%zIYK$ zV<1kN+1e}Y-ksY9ONEPPZ3s;oU}jy@*$u){2Agq8T)T1+md^f$(d3>{8rLU}tpMCEybu4{ zqILy}I#92cCs6zi5l&vX2wyH-ERvL`V9;$atz*|2I7-B>D;QS0lXL7uy3>&Wd?5#FX$A*C)?&s8xwOkE<%^o0#=(W z6)3-m)}U?>v~R=reW|KSG{N}N?-(YNrAIp0x$3P(inJiO!+qq$fK`b!|IMB^oB;5nqTQNxJ1R)(XJ zN~l{Z^4<%VVfC?7tZa{zM2M_U3l7EbOqY%jxoY0>=FR8mI z25#Z1@|49h8BHT4Yk(r+)%D{uGc5fozR$T&kHEs>gyQj__IaTkc=(4v{no|HlVYAd z>D!l2z?p5c8H@0y(a^SiH%7A;Arl3iBP)W7hcARC4-!d5X=gVt2ubM2=;u!#gEfn$ zGnzd}4YUL13}XI`#Ew1!WT(`iHBh1k@cAT1AhzV>>+S{vq)i7UGAL!nr+AzbAD#ic zoS$o`?S=#VvH3Y{37XEIrE_RigTIG63~dv|*q*y`1wNnmBcrKCq9em0JIP?GNWp8- zRkf@t$nRb36Z_U=y%=vWsz0IM2VQxmZ>fC&E}gmoJC+?|?3%`fKur5aj7CUGwsDDn z-K_m^`?}f*YWg64D$|3)2Qc%~pA79$LCEYM3x0uoK@9nh!+Vu25UP-+c~mY9JpOjP zF1Fg;#SOa0breZuvD2YSSzlT10Zw^RgZ4qvLgHoeq9p{FU?l z!XI!UO1-Zx>vR=CTwPo(nJv7qd+c5kyUSkqLIzc!ie`vNA6>Mg*-TKBII{F_21`Z1 zk#Cu8W{SkXf~o&3@?{eFHEcf&oZVJp9S)V5@GsRe=_Vu67#K1r)LiUzdC&7 z7kP}w7l7qPSt~%7uc{iL7M!T*zRjS3pTcUdT+ zYz4=^f%mm$iNOX%3e~JmF8O*pJVZYu5&FNHDWc}IrPcj{yZ2%3%>CvvDiW>I8$z8B z{!A?VdIMa)d|L&3#>2(c8FEIYFd8DX%VW4q)OfYiz(DMAW3kuW+ja!tzA3a2i z!$QW6j(f^Y%VIPW(!WbDQ?lrnBH}IF!Yv)FfDMR?vkUZy=WeRkFt)(#74vN1Rz0ia zXY>HFI7#pdzSjdKS`liPP+&oFoMJ2H?!JN0ATG~ReYAQ~18jwW&RQMTrbP)wZ6nh$PK6I1vdS;_>d9~s@kDFMm|XtxxN`R%Ox(UtET$yw zhqB1kON2|;Z@}N@%oj<@(@_mWp+|C}%$F<{YE7%MgvyOiz~;M(UFiN?tdDVCCYG=+ zO&cO;V}uA1E$c(uPL?OXRL;_h{!c&bfx>$aL^gJ%Yoc3H+JKgf$Gtm+F!%GdjHU|7 zdNv6>eGKj_bQYwuDjAwj<tq*D6=sBEm}RiQM1_eeT62=9er$ct(O zfnNLtTAXWr>SjS9Ox(dI1&tcihb*Hj5Mw?jHyeCw@sG*hHf{$xb5=Dx*R~bZ#z}?< zfBIo2T)TBkBq>i%7~Ef5EJ~?WyjN*OYlK+br;g|K==6q4Efq_@*#WdaLWKUWW<1g2 zZE1D?=GD8fY5qZxjos zoWF7b7Oq_^l63kB8t9px4YboAs+6e4@Z*B>#4w09yb^%+L9gn0UgLcNyUe71YbPq26UI!ncEqIqC! z^e*|s<+}6{=qD33t~G!fcvI8@jm&6ptrEW7z87e3ga|L?i$%cO(*vHvD0w1W zxOE4n>^>lpl%&1gTw#>dZ<4fH z7r7s)f*Y6a;3VJ>V;9n-F3_<^PXkM<`zyZRjSF7fjp}K9o|NzG;shP~@wVK&dIuJM zv++sQY|iWXd^>P;GZ-h{#rsp8(rs>?u2>ElV3vxSHEKeB61OVb+ooN|_rm^D{28QB z+7{eqm?(0sq2LH2%v`TNB3{qLZb17-MEKXNZ_!t-QM)!WuF=>75y-jHf>^735#cZ4VN#RVJyQ^hX8HV zE)7KDiC4JP#fqx7SkR_z7gv76qreIJBf!*N?Gd~mnxPW1MMb1XghG=5Kapft`aZs7 zzm(2{t>fzhgF2C631z#DL&B8Yq6?P7ygL7P)q@I&2hWcrc0#K7yR;xJK=r-w}AB9~zw3~A zv59c`%njJN@))DpizM}I3blOs`+fSyD=`SteS29O>igC6g0$YPWjYb~%9vb2*!*`;!q-9ZbjQcOsx%7|^x1NV1sr^7Mip z?YpwPqUd->cQcy3NYOdEtpIPd>>gac~ z_8x}Q(%_szX}i0+mP*(D-rg|0dly!g_|4qMSF>0F8xcBUR__0e+jrp4(`MPov2@j% zhlN5;YLZ;IEfTuR#A=L_)!SR_f9K(?-JP7-?JH(moxXML9;~0kEn$HT{h@u&=u-2d zV`k;m{iR>y325pJ7U1gc49R)4E>H|t&fkRPc+{*TfFk}`m|PiF79Am5^Q=%Bm>{>N zR}x#BlC!PGPhC0-n+|Pb^^&6#fFj_P1_ojnXE{3FEVNjt21`-@zL@om(!+ODx49zm zBX_kCJ%`9mq7{>OMWXn2AB`Ce#LO_-e^A2dNMbo_(?^#59Ue+iQu>Z~L%F453EnUl z&Yob+qv&?7wP+!&^bnCgiWPo@Uz?fVe8gz>BKgC{;e+KQvw24*XuNuA04MzQu;=WF zZWUMuQbG-@26idvpSVd|1e!?Dgu)tlrB|+D0`ljzTj7DqQQG4MJ&T*vgQUGAf15oQ zE?m84O_QxII{{-?SU3slKkzzn)27v&D}M!40lCq>EcV3B)8%$Tb`!%3PAp?>dgY{D zP;iH@`1gG!4Xl~86s_*};;Z{-4qbsmo6lI=oF|rV*Cz^GJ-A!a4}V^P7JKzB7{QI| zKw{VCa(g=Cb)iYGhH9Vzyw-G6vln;UMllc>)KF>fr>&i17?cm`UV6`M^3!kEI+?X; zGe*QadFeEx*^2~t`$IZL#S`J+iNmmNr(@g@MLAU^ot4@J!h%BO?u^EJEy6@BnouBt zc4EI_BGPZso`WZxjwF_~CVlV51vs^78mm$tXR|376I>;bb%Kt66@lEi8nzoQl1i2K zZYT;DFWTjNhN+=W<907^A5k`Vcyf*wPkP2wD+Gt=r_LQuMW{_9s%^5Z( zR$`#LUFR=}wf+C{S#7f~z~D|9K(0{MKs~?OFr;%Pt8(n>b(pvR2&1V$+?<_&j+zzW zHl9HB$EkeZmYA<1O%{vXRQ^#!zBG7K>$t3_6q_+58V|lxH@CQZs{q!{ z;77sMY2XW){bEJ2rAqf2Yh2x5_8p&Omx|F73ZWYq;N*&xID7OmteLZm(NrOQUm?#I z189qdK_yj|@E-PzEp+rr%~9IftGBMh3Tbn7^YA7R7uCipIRm@(VdW_>-s}xOGMWmc zS8@--0S;fz`A!AvYQcl7O2cyp%YCJJgxO+YvQ|8cL^h2LO&Nsy53CzV$dWAKM`%yQ z{d+8{FYc*uLi37BSU&fApx|{PgrX~#tCw|_3R8WGz%2qzUS@y$SCM3QI&UbA9}V!P zfuaf3N&`5-^u!L25j!31;|1AG!bOtZX^Jf&0Pala~k|p~d2% z11uI4r=z|%KXvM}xeMUTrOTpt<>?87Em|yojn|8P$dwDOzznhLZrHrn#3nksI^9dj zYf2v;Wv~h(Vbi2S1ezGo=4fld((3-fO=sZLem2pzmaU;Y9WyIiQYQatxnZ@}P|*fL zlh_bwnH(;xV!kcu;D8`VX%^3_Zr!&7wjEHrc)$cyF{Ep9M+k2aCiiMM z-g7_uyS@M62NQ7MP!y_{EngHZ?WYctYub(9yKxatZk)>a&{BQ7E>GXl!zw)mdHC=F znk)XuN>MaBifU&o)N9!Zyz0@zS`3S4eg)UBUMwcqnQGi37Fxx3kh$dJHHwmn8n0Fw zcn*6TAoh4925fLrc)G#9i&tP3nk+=1;0#Sdf<;oP)4_EDpkD^Rmk(UJ0!t2`C^e7L z7Nme3W0UyO*gZTK_y49$FD(n3$~6lOfu8LwnkstO#IR~q8j0O#Aa*5vK+78LwMw@s zN32&;nxfUMnYNc@*RJaYT?VyfG;0t`%lgZ|Q)@!-t?LP~U4H)fn%Z`B_gUDo?4Su% z`rCUf-;)LgXP@I{}0O#2s{##4Rl{kwGSGTgXzvs8b30#;u$`2swx zjy!X-x$}xElG$Qp&u3(=w0iUlBGQ`TEBMEjF=}9-iglk1qI{)Lc@7!a*?`RP*(Eb&Mbg6Xln?6aA_-oH0|zjOuM zfAGM}`A=&$L#IYz((B!vZF8c3tETYjhV5|t+6^<0e=S%9FXiVz!$8}gdo!~V67S$f ziuErCnbAY-5DdqSrQm;smQ3sd_4RDoorf#l8b5rr9O5$BC)i#wMfE8f zAVmc{vg?eLf^O5 zFQ%IXlykzv%^jR2_W@g*t7E#bnnY(_9A4dnD*r#7@L~Tt`L#NeVAB*QX&mv^T6Ggp4VcSH|g(i_YUt@ajx~L z)=8NVi8etAhT_FyD}YkIMysv{=mBXYc8`31==dX5aQ5bHn7xlrz~h=WhQ#LFE^V#6 zh@e2olUDP0;s%b@l2yc7zLv^6FnOA#;>u`bvB+BxcWsV~nN>$$O!l%*qW3FUv%YR~Ze^gr050>z#s|2^22jJbd&J z=4|Af9=vOLK~9?OC*aW$L%ln4iwZ@=TeWAM@(ZJSbBXc>DrR`+-rZ7JP@ZNrp?r!S zNLC$bGC?h-xx^Iu$8J8#DJ$^An^5&;il2U*2B(j4b8>KeAGA&c+TE=hc+<;~^KM?h z0*j`9X*X|?LC%eH8x>7Xi4PUA5o}#P4xm z-xixx)2YUTw1F+@-SuP$Z2xaq!0H$Q>t^i-ihw6V?&wtT@OrvPv5uv*X?0)B+c@ta z+_=o;iTSpqI}VM;ouj-a9S!9O=#Uo!bwl{`fpRD`uCF8HX`bb$0lcRED1Efpg238! zA+@>v6K|2{>OJe>+|^>M0_F9=iGdv1POMX!bUsExmU+y;U>LN6TO|bg)>S&$QrWgj z&MS&07pKar6X#AHGeoMhr!9{#lGn5^|Btbt2|;+;NJg^?AcGAzIDPN{VOv*PT2I8%~{NG!=*wj?!L{EEW&&o$$Ytr>bCIB9Eq_!O*LN zMTHLHo5dR4onAswDH(>y?~svrAG* zs+qoS9?K$Em_7=EjgiG8t6dky``Cq(u;b_+Mza@Luw|+8Ns7H~sv`AokpF8w{%sID z)I25$noEZWi}|*u&zv{}>z2(nS0`5`I;yGHv#*+I_9Rv>o^FUt7u>cNc+_rS4_j83 zBIwbi;_%W>%=Akbo^j=ZDUg{%)_?nf0#{JT?SK3?Tl}LJ#S8fNU-nDT94d(h{zGgh zEowg3wv|Y-JN?~`y>R^=pA5f@0Vh;$1b+)x^XIPH$d0=C3x?eOqBnOXw7WCNAg5U* ze1iKb_FlMbW;c1+N_e$TKG4A5Q7~apf0({%4bTKe0J2!jswVp?CJ6n;75EwtGX}R( zY6&N6!PWD(L@|&vDg|o!s1ws6FB-o2-6FVf@~T0m_Ca9K4KvN&M8@D~IIyvJ z0zvyLrhmK&UjIu!ds!RmQwZ_N|F;ybU%oBoy@uE5Bb3V@ezAm3R~ilAwWOIlb8D+z z%Lg)}4Ukz*W310=$aX z?T^^+;zkIWI$3ozfY&({d&8W)(;`BkNr0cSPTt3X@E4o;>aU--7YxiKBZ)eQiHv}b zv8`e8=B;L?D~_Io?Po7QOi*1jO+|uSlEx*+!b`Ijo7vHT|F;t-!)I@)S;R0i77~qu z>laxprYv72P$3o(au}W=602Go@73%y^J#cm|4X6}GNZ*_$F;q1^i zb31HJxOuoh*6<|w{`X5o7BtfyjDKH~ zDd2?I25Zxyfx*zOO){+9vc_C>)4^?U{NhOns}o|T*_v2|u3HpnLxjAf?$9Pe?V_E? zzpABaZ_U`hv9%=X;uZY+gxsG9CjR{M^Z#?Fj=-TkTSStQG~Ik@(t*ZwO4Gg_>+R;h ze)$}@h8sylaB=qm4?p{FLXjt>$^D6}AR^Fyjtwj3i6pzyF0O8n)o--SBM=EgWunHb zqz3rTHSA*Z05vN@CT-CM?bA1In$V)(=X0{a6Hk{?4YdD2hTp8P|GAfW65g#zxM89~ zmV}*0_$r&Fy7tiMBq(Rx`y#>r!}s13mcgXbl@sNwIslJ44vAJanIM&H`SEbkNlc=8 zx<8u;J^{5vl9Dv7?tA%qFkXwk*aqZ&r4q;jA_9flp7F_Qk)$k5RvnsvN$?Kdnx-*; zl%>(iq5-^yWJY^UY_F%A2Xu|i5J`5YkE1Kn*26m)pY}NTk7&@4(d-KMkvsc(cH}(H-xX^vm^fLJt325LchE)rH zC`J^fGJ6a%tmrSDP2%l?Y5)3zfkDCTD{$#x@l4lHzz#xq+9;sC8Y0N2PKkN8q`UPQ z2_Bwaa#i{W7)C8}A&pmh4H({_vv2}%@BRa29s6t(|2Vk0y1Ntb;8xn+0s4%B{FG6zfQ0(<8|+LA#7cAgwgCpqEbUa*3;sNo8ij2 z;v=8R&tVetopb3Ubm~LU? z^x*FOREj9#weRQw15a|hu{Sy%xp3`2eTeZ+Z;|ZYq&WXuQ&E0H4WC*op7Oj*w;H z%dOm9NzAt%d<>F?;D3CNFM<>1)Fmwq#IhKOj< zWMZv_n}`*90`kM>|5IWvSu3ucaLk}3GUIvN6C$b*9jB#;-X%7ylo`2$Ujk=*K}hgE zzDQ_MRzua+z$@52wsiSd9DrZF%L9MeqJqC~+`+2R?KCDQo6%Gvf1NSgBD=Z!kHFz8 z*DU7QOMEreusU0`ufkrwPpVHAi{8eQplH;X|D<}AF%R#VHrQ_FtAP6i?u0Z8P}UtZ zGQn~j$csRG9`avoFOrm|XMVaGPxCV_rd>FG4fd`*rBqg%P7&Z2uj<4dB$D)=GabcIz6>DC8M|m|vRo{)<;2 zc-e*?0!5qmD~DdEp|Exxf-Uz#-%(>(xCwqKpm=gCQv-NF>3J8@Irk zU3(Z!HS)IQjTZloBRTQit6FS%@cRV{Wh)(>hqqB`uGsU_2{?7&GV5E%ezBH2F{*5 zVzsDgU;II?^AHIeMB@2fi{a+^Ln298IwI{k1FxB1e`KK9Lnt~f3&RIXz+NO$o<^&V z2FM%_os$RqyfBUU9q|B1(((`1?u{$GZ~UPD8R$5ySJHUXfs7kaMh(o`un^BrH+;u` zFP4-?smu@R4%(u&tXU+MSC*E8JK%%{md#<}pDJtHx}~spyJ5ykqlI6Jl^@{^b!^qY zM3S;}?eI9L-7uE%TeElu(9u?^Ah=-@h&LXK&;?&8ZB@z8d@7~}@VU5&E&ETgb$4;s z_D5^YivnWSQ`6^e-iDd`4zY5vO&UQ`R5M0Xh1|M(7rt7^)+vbf8IeDoZTl$(nQ#Qe z2M0pCum+6v@@+d{)egP`m=#cfP*`ybn=AeupIP~xt3X=0uy_}xvc7bCu8q6Omh9hf z8jcHRt`ctwDA6E7?SRSDX4=p&*Jv3a^9Y-N%aa^aeOrH>U7aAc7tPO#;l`EQu=sn! zYG82zJ5s$~GEB(G%JilkEOpiL(EvSzW~hP2!u;ecGp-^*1IW1>`54frmN!PKqq&S) z_zr`9NkFUF>^L8ah`00DUa8oV(G3T-8bW|~i_K6T8L+q&ck^m3Gx-7(Q>#kqHg0@b zvVEo8oqb4{ClcAm0W+rJ-yN}lqmG4Y8l3>a;Y~$RK<-f1>=OUs^Z#Y_5uuO3f8u~_ z0sd|hGf!`s0e7#mF^!mQS6U8`L4BZ*+bW^=@YgM|d&?40)2f^XKEWQb8NuY8`{BSP zzKgXqz5Zg$E_6~QgI+OEq&P>xH;b0SwOaTrvzu>8 z3`#hPi9Rw5#X5V}oP={ntbg`I9&(x{7am@&j6CfxD=lvHV1;ZzqT5A6v$mmf&B2ea zt4!B;1=Ijut3+&8hS;>Erg0e5LG3fR65Wk1T)!lWfqtFy!2>-`RKl9=>*2t0_K73* zpW`2WE7%8X7DmUj2IMnk$wjPl>vU^aRQs~DTb?Er68s*=+U(k}2?b_e#|m@BN}E-+#0}pnoPjI5_>`W5Pts? zn=^~q7Sp#13gNr$e5I#hU_HoAPOwWY1>-x<`GsX$R#i z2a2+n9>E7t>-Ow}rCYWsmnAZM)ADgp6u^SLcf~rDBs~G!=_0nZ>&Q0lD$kHOyzLwu z+0NaJlCxVtSo1(ptguRTVkdB7Yy!sbUj}!v4YO1Xc{ma-KE-6K^>OQ4|T=UWAZa|lY3-ma~|O7X#069L9x#OZ3=+V9_1o-+hn!U*XXYV`Usydd&N9j!k>{zf@?4qC|_TEckNqLDW zrspN^CGX{X-~YYu%lGoqO)Rl z@XpNHo!Omp&Ysxl#(iO_SFsTjGv98+_Eis5YE6RLUiF|`-@zBiG1V`7MCYGxdTsGgR;IF+cJpR-ypcFHq={2%aA9Fk^7A$Wn@b z9YReBgM6gs6X*W>`LTF4GcO;0j^$^<}7^}RHYY%Wwb4{E?ojz=`ay^SSX*?CA;}er2{8rF_o-HQaePS=9XVDy0 z@sN$XR_#8)J&?Zjn?i>s+$&hT1mg->z`{WvImrITl6BZaNeS@g(RAS&G4b)JFt~G@ zydCO6UbLN`mkSYiB3?RpDbbDA{WeczOsNIKa3WFUTUCknxhtH0bJQ0em;NVy$rMw+IdosZh(u|J zrDw*B1XpKzeAL6K_)8FRlDnNz%L=T6SXg>F5=HF`aL^-rRK)9gaPnBtM^S`#7cK<~ z)3Ozi$KsJeL9+Oc$e1QGD?5$t4^2%zCzjLQ(RI5&1CU5tK#f%n-*pr9gcvPs!B78GeFiK`3k6L?i zmI?AQQz7w?Z9FbMy{3Sp8+Qk8`0X3ECGWdW??xAiFN_*GXsTS^veNyAO@ew2H81#! z*yz+Qt1KgZm9)UySZdZAet*xw2X>HukWRmIa&UmBMh&-_0lr+b-jZzgUfzFKKN1sW z;Q9|$M~I7Z32)==31iywj_~PA39w;bq?}YKO-F7!+S|)A{~O=g;@V<4ZDpZf!Gd^* zg&F!p7bgoxHj7EhgeRR(ftdZgqu-@}E9lU>d70#tCGncC_Q=WU{bJ80KCAa+UT$_i z><>9B=ha1HSQ&I3#Glc)qls{QujT`J^^x%>nQ73K#sY6)Nm%^2 zEj$%;>Y2QSD{UvM8pu;|4?XD;Sc-F}{^=Dl9qmvhpU@{>--27>is0t#3IjU!6w&4{vDep~GVMQbYtjO6|73m97$A9vDXnOFX3=~;t+TTJ{1 z^K1M`5wFw1c=rm(yR8=N9Gsz9z$_+|ygXr>)P-D;y~g5^+Y-=VVvfW!$ZBW-?ARQ_ zYONHjb`}q|TN`s;WHEeS!c{nVBbo6Y9Xt@~d)8q*Rm9EIJFs@|epy@umfcd%ofaV7 z5EK7=_$1K6a$CUbfju>AN#0w6A2l0#ESlD@2h%NkET-F{j<%4AgC7cwc~O+@;^CMp zhwF&ft-*IM!gEyUlO@2kR|g7|TJ%!PbvP2rmiUl@jrgou`T#A!Ygu9m!k6N1YB>cW zOE*4fwx8yJx>a9<>6U9rz{tn@fLkqR`O=$szcnLZIgvq^uLba)&|vQ=QLB%4L!e!s zR6=t4ZJ-!>5xTVL1b(eMi7efCN?Iyx#)FI{AXnv9iAPa1^g;MWIH14`G-O<1N<_c0Ck|S5 zIGre?IDGK9*_un1I^lF;_0a<~)?u(6%e$9x1Ol(6Bl(rUg6uoj;o7lHB1;+GG+-v! zJJGkd2tx2MyaFrFnJbqr9M4yk1uPX2Kmf5q61Xqis)V z`%_!N>iM_;S{}hjyFCfIeGq3olANqOIJ`wOAQ7^Z9mTDAp?v_UDuQEs&RYu6DoS7_ z=IY@D!)N!FbGJYSpXHNNR7$$}f}yum#R62y3W|k#4G9*eHo5Ou%|KMlU=6-5YOChJ zGh28nR_s^;faFJ}qzx zQTillhiY;vgRb%xz;_@HEAfL^5n4R4=xB_{D#T;aoe_4CPu-02?*(m}@*a#Pqm_&t ziecKSnFXM3V@L@i=i{{$@j4v=PN)9a0$vU50S9aB^Ebf^H%S zIw1Xs#H(~9j!0=v`&F@ExF2d>A89{S@wzqm?!_lH?fWCI53)v7sq)bNt$WvNJQGp) zerbZD?hE`2{_+P08R5%Lp$ojYj!>zJqDeG(b{7qA?bpQ>OOv!r3|hJE=wvVFc^mJ0 z4Vh`s6=MOs6}3?FTv5BXhZh8OQXA7l3ler53m4TuvxZHefA>Jf(@SLKWW$=>8)Px` zp;{(0O*Fojw-H}xqUo0wg3uxW5pJd3208&xgdVNCKx40lBFn%_us}xET?jk5PfkJZ zB+bWNOr&WXO&9UzMIufe7c+!!VO|U~Uj;H~>?|gRwt+elvBS)Zk39j0BQ&nD-B?ck z^qX@OA()0@EPg8D;8^$OMYtKWTVyH2eR1V>O*%ZgS#WmYt`U^? zS8RBSy{CHg8F?$61^$U8B6pk_=0_cabZP%sa#zS*ofjvLHRn-{{I)v`Zr-^gi~Stm z!F-u%qVZZ9;$N6(`n`EK3s#&wE3%Z~O=`Krl#Xqfyz^I+VA<{mHYF@iS&28NPqyT? z_!qvjc4d`S&gq{yDHJJf}iT^lfg2R36|$5|FG$gW#Uc-D7^pvinf z4`mT!sI@YUe9A5G0!m~-tCR33ZF@V-Kv0+BEk|}~RyofYF;%nEg)SwSIV5Z&+|eF{ z_A!17@*W`jQ@qY*g4d&4MTD)mn1T*j5MgwWAtGtu53s6)kzcs|2-yA%yISRZK za!QrtrM}(3(T?}&Klp7W#@L2HH{$N6w7SyXjc$0 zslC#f?9e6J8UN|%sX+arYGrsvD$dIOc3752quF&b^FI9s{<;)%4J<7TDK2ek`N6aN zSW@~HhJ3S^@l+9$UKmig=d2{}Er~YB3a|ja0~CYzim2Md#T|xp86dKB_c@@;FF9wbumk9pwb1VrOI5Q{&X#cSl5NK8&DWaXjk9mm4N%h1rX zK6G!%-zAp7AeaTnhFhX{F&+|E@sEuS&FZ!*`WVo@ zvJ(7+@1DkeA*G=Y1-Z7r5&212kF_!9M3yqVF&@_Os2`2Ri{SE&o3MC$shgCdjAg*y zw7k&bpV(U{Jcof>vQ2hp~7WAJI~%_M}bJ_a|h zq%xig!lzYTOZFaub?>7>z9B=G1)jp(jYJ9KdJhL@N6kumF-e*6Pz-mW)!HIV9Wnu& z9M!$+Bzs3>C|pavE~+7XKE8VyGP4-^Q;}bIBHsN)6tvPj^)5NTi(pib!8V(iDAH1W zu<(=nAq7{%OYkM$A5DWPr73wpfdVK{?TDCZ&$g9##HXgs6_Z$v%d_va$~k=VGN5gr zEa?2F6^j0id063TU4<>ct!@jb-*GS#O{ezJ%6C=Z-M|O>4V@&5^T&@skjymb>SqCb zKWRq)J+WrA24Q~WQJ~Z0mf>;LA~bJMAI1dui!7!1SL-zKvGlHeQ3fR+W7;e+J%x19 z>3z0<7y5Js7iq8850|Zk?A!;on^qU6Nxix-V@R-^zfyWBupOZp5$2t!X;<@J|7mJF z`r*Hwf_y0qY5HrymS)UlyIDokD(j6uACU7>4`-3gyidQ3zaHYX3D5V}lrMDYNim5M zJa|TDCWpec)_oVjc&dmgFKfJKB$_-k22_v*@Es&K>YqjRw2je7>?ErM>XdIdxQ&T) z#(~77!Q&WD1+jE1+rf}TX5$~NsVf!WcaPjCY{A5?<9iBE#M^wBdsv(t9AU8EfKtgV zZKBgLo}cqrfYRoydg(SC;?D%ufOc5>xshA-7dLo=T9x1Yv;a&ZMp)M{A z5@GA=pJD&*bs~%KNPNJy%ghJSm?s9)uzjGC<02p}@zjH|T6Nub;4_+hA&bk>*V1Iy zFeW_qvZayuTbhIhqq}(!BCwuF~P>60gGU zBgf^$A^6VmX=pixKx10!;4v3?LTVap!jt@L0rlNnVOCe(QNDIN1r}%*n=4iShUGOD z{|`G7mCa)@1uNc3ti1o&HZ^K_yNWDbct%P#L~V>?G8+1NLEu>4sb&du5<7p3hP32A zJbHqtPx1MqP7VXSCsf&&L}s13?!QldaRB%aZ!fap+~Fihj7?@dy+r4rW>B|DEhc8$ z;zO3o&(c^!wCUypodR0OStp|nXz*)5#aQ4`RNGoqJ+kLupztS^5Pkkc;lgK8=GXxv zp^kfPk)<2oe>@7}5;UKLpYh$hRz~40^n*^vTefE{#9rmH2X+bsckLR~GOBubk{`LC zeGahpbbm_Ku$88Fu^0ui>B7A>3W2JDot>bO)7jm-nUrA3Q}I76QQ3%h<>YjLg)N|V zlg{APihGX_hHU~0=TZgDT6Ki(0YhbRqwyoqO=cQ&^|JuJ=rpzdPqAh+yDy(2=ZWDy zGqtniVm4hkop^_3k!3J9wS2J{nnJp$PGA zRK&f4d$9ideRA3~WPB?#@6*rWZ$0tagy;L!aCCNnsTOAU!|(tLl~AK*4HzhSEXdok z0mHX!0aI|GBmvEq8y*YEDCI%oH=N!z_6|DW;_Pv~M<`b>@6B2h`(o3FPgbsH@#f{S z%mQRs^PQm3{3KD>j^p7XS%{D5u4!jAX$(e@1!zBE38?QsPvR9zKCsosPQq2py+@Ql z+sfwSh|m@=Wc+MPGw!0QXe2g^jI!WZA9Am-`dmM`4KkB2l|@Xc)3ykj!_rcxYyFb> zjHj2F@YpMsL>M0dOIE3#JR0<~FhvS~5@F8h5#Z+REV7j0Dd`!oB5bcLFB+e9_MXWJ zEYXEaBJck9UH-AG9V3k%Iz8odO0Dd<>p3Fw$@dx=*e zB$i_|D()#N?>4k$4bO(s%xK4Fr_59k3y_(f0|(Z{it3=APaPPj-Ia)?(L+RRI0;Gd znu)h>@EOzu6Qe!oX1ylr!^0kaA94S%hc?}tK&QUG;$=7)ehHEj(-}`M(PMZ^s8gTs z`nTc7{g9caajxi91w970g%+I}$#R}VTWFYaB7;_Lfw3sjPn0^yuRnOX*AZE|@$=W> zfliMTA-HFMXi~qi$kL6+Ts#ZWc+#;1tMT3A?n)_vB@**cXY_0F8hI>4=-9L^v~1X1 zWEpq~7NFAuk6qMEj;uplpDQ67yHwf_*;33k(3CSHEd@UNf6qZfpB5&T(qAupkSWZn z82A@fP%|xBOxohu|G^99X_?<~8#3}(be#Cl;?i5q_ihsiwR}3U5G&fEclw;SG@)MB zD(Q&(NVoIzhqEW5L=lhTN8lf5s(6uU;MLCpc#rSlz5Xw(*+xsXqmZZ6jJxo!1vCax z4^d}gEzRzKqC?YrnDMa0S&$jQtT)xeaO zg23Lvj`36yw4JO3RDdrfo{4-Mk!B`>e#g!Utl=sgM@_pXojMS2PIB+w@33dZX_)w0 zA2UxcLY|v`Q#-)MuMU|-(O`nYpeD}kZ|12&&>I8uUS27Rb3)=J{Ch`a8o13Z@Rq2d zCN6cF*k_c;Qi88NuvsIAwv$y2i#BOI6{JGXcYD62WvmwpYfu>~x>gZ(@9fj1`1RjT>Mom=q2H*c}C9lE#c0`uQ4 zQ$O;7Wx9GZ38ugOB;4a0Z|uhBY48IxmyiJM^etX~R!(A9(+*lUY5}ycy(DmRb}i{L zdX$~;pj3O7CMQ3)aQj-+4bAHMKyBAr?7B?2;E@RwPF5V)*R;o-yoxVpR&F-j&CY~? zcDlJa8z zFP263VB&sZKf+)0lb9`^QTsyOtaa(4f;WaP@A^!mYNiT>z7x zcpW_Ic$ZCGF{yh8je@2v+so@Vp2in|?02)6YzDh}S%7A4Y!h@{HajiDXRfH%Shl>T z)c=Rb4E!^<%JcR`+iY<0bkEMP;K(UR6+9NSUS!S~TCiBunKTtn#V>g*#^TOgtHL8h zaqdVwg@50P%yP#6F6$dK%g=ltSDnhDm)WI#DgG z$K9XgL*r)P8Dgq|)>KFX3sUic0zLCk#xwZG9NAQ-vx(;vh%Z@0+|>&uCs25GMZ`LI zY0T5g&nTfHZaBDI9#{~Y`pad5ciwyV?NWa!u$NHOY?e)rs?_l)XpJgiC}$b|vB&O8 z)H2A+qQ!e^p#~10eFyxB3C+pMgsrQ7Vmv(reci^*d{Gub<8cjBPGr#4!vgq@Mq*8F zRd8*nr}nwtGGJY3MuWZY$FALkRj1-_9-AUE ziBC5n<^7B8$Z*t2e`aXo-?%_NIKh0;3WQc@Q~Z+%9-gjXTC5=QRpIfcufefB=Vfse zFidUfH&jyMtGd^H~AJl8Y1KszBoYlm4nKM|41WsuWjxKysaMk=hKrRN= zFl=TosO6=Z&V3!94@cFy40+rFXk(nGgb^aQXR9vC7YJlz-^J77!bLUU+qfC@>)MC$ z^b#p)sjw+xyDa7eGITet_rDVl&8+G2y& zhJW-0q(Ab7rr7u|3+2;Nv{U#u0v{e#?h4wjrT}Grf`4Yn&$$bi4=gp)Dv`!L@FW>2 zzG~C*?;tztt}-=rM7g{tFA9Gd8z^^WN(@1j-2(lwfD}hVKs}A}cuz?my-)P6zJu>p zJpQTxa#b*(!#HdO6FRikh>p2*S;JGsxR7)WHb)+k#Zj!OZ5kn2lVy6o%pNkki_+NF zNL-nnkxGwB5zw@_vvjIl(N+D}sxQJVZBb@%niSj1QK+$}sCCfP?u9GJMV<+_!$lzD zpY3NN3x6rLOWrG&C9s0P72@DOmU&L#`lZ{jZ{tZTq?Me7;=M-ysjr;K2N|8#L{?N;z!W9ru)l2EL4CrPx2R(W!Ug%ejMqRzQz>Qtb?>Sjg)MDV93md+}Y_C#Dnd zzX^EZsdtmK`>u*fVEC+FuwveBNV}bB=I_CJ;!)&E_sR{L-mx&gO7u{y`aB%^kM~pF z5ghw;0IdCHuY3&oTiD@TfVNZbA~9R;;YHB4t+4h}L<_X_X$2jc@Ij?`-5NX(Pa58M zXa~#cUE2#r1rB9Ay+l?{HmupbK^8+Z;S?>R1n^mW5AEiO!k)rEnysZ78=CC01hg8W zq{$LTyk1<;D>a04f6UISvkNMsS@jE^RlxHSeuoSzCzYQXI#U#-8!tQs z@z8cz4vj8;l$oWW%QT=Ujlimu1|P&6d=S6DGXL9D?%a&qka%b{cK+4gIFjGA?;}9R zdFN*rjRw~({t|{ydJ?xiq32jX44?ETShHXr&|pP?H?f{yV|{Z4o`D(`sO%Q_yIjDx z2K1I$O6aWod|Xt)JG%Ai)P_d}1u2m)3%PRh7OV)}BePE-W4+8QXL$Hl z>WVB4PP6+oC|dT(Zu2#&=g}P(q31}}2Qgo_2B}ro6=;vygJ?$+#K&BN=v{Hpb4Xhg zOUbW$9Tyn#NDtV^565VS!pGrXh!Q1+5ZMB6BZGB#rw5Lg)99u{PewrMU2!L(r$ z!2u^FRYFMEM!1uq2`T>=`yyqY{Vu6oDDSU$o&IbF*oUIkZ|0ZT9#?WN{Gacjfp>l7 zI?u$%!JPkjksZ|3y`>*?@$INo*TJ}>g{#jcq2HlB`NJV4D2}wzc;dT}e1$YsUBX^Qc%2TCt z-paL*ivt=7{s$ZNik=023(IsvESIHyV8!dD#c75;d>M_Oion^`5ym_fP{dG-rKe=V zu4PALsb`R}QD)w!$$QOU@{6S#VAAY9)klQTr6&t#Cq3V%H%1(xg#HIb%Y z>g2985f$}|?O`_R{z%kMC)Gu5h!a^>;YYS#fZItKEHBOQTUZp#(&!=Rq`2%%jg$6$ zO9MA-AF1cY2C+Q`wSiV$8kedFwBUU3Bi&irn+bQ|%z;G4(@S_a_JB@7d|-X{@MVaJ zx};aMCV$<=wW0qwO$eYr+N!@MvBA)5fxn;t_Ti>ZWDVX7>eT@>Lw)(yRVHWrz|m0K zy%yu?C3ZxH!nNe&{$Dem zdm4`;ukBV`Sgk(QpRZbn3nMk1+<#l@%% z$x=E@VcJmMCsCnj=~m7B2u)U4J^QVqOb?dv;eq`a%~V`00N$q-C}V;sr+3F5(6)JN zk);e@x@9Hq^_tJ*O$ivwXca_A$N#s`0my&6KFwmp>TAVJCLxPir)gR;y$cz-mrF><1k zX!g-ADd9tK?L(|18x~&J{jE)^?c&)48u*Q1vg1x2hM1%K8BY&Uzj1TuGl`>1O2DTu!df0s-0b*}ah;I?h92yJ_JUxpT3b8NO ztiz*HY0{3td)Sb(wufD<7=mW1*n+*#B_}N{dVXN(Xw%5um{$af(WmWXZ3XuV?!o#W zH0>#kXudB-=iA6BnQtlqy-R8xr095qeD)61X}?<6Qd`csKl=}EjdURcN{ z#e%l<@r6E}dx3gQDG)uTb zyf=3)ABU9q!y-!=-eMqnIwhC#+J&DhlTU&sJ@J~Q2l1~;l^DVd3#hdKb?h8Pk+hD2 zdM~zsZw?-3V(l;#;Drh5-OHpTB|QTV)7!@MA#oZ1ewCROO=EjVgNZ#oHa}^K1z@u) zsWYuBYCF$k=EmvS){G(5`tK#?wp0Ts#ZWXO78Y zR^z*;e}R@P)%Ns@sKQNDL!JtJ?nRaoJR8sRSh;g8lSPBf;7)4igR}5_lI_RBm>wjk zFRXhPLSt~j&H5iCDI&rQ7sGxq<0=u4hHas48$KHmjq58e#`4aN9-Y2zI)fj&HYFI3 zFGDAZXOM;kD%=A2TsFW${7EcGOV8RcvW>6EQidN*x(SC8De|!h#`o(Bts1Kh1u#kf zcKs&Yqnyt~;Wb(~oNG!`$$n|}iWPX*!s9^eB+3G^r=ljFH!^W<|FVcEbJ`F*V?lx( zq`#1OCX!~|*&^8~uV_6Ero^TBoh+SgK^~j%<)=!MwM=P(XYdFl^3qtSdzoYwm)OER zbHKE(6_?)NKiC4;=8+9w7gM@Axxn!5!6HiuPVw{ir1he+Mom{DTNJWrlg3j)mvzsC zi#o~!$9v<1dR`E9dDZfSVSNUPEM@qHJzF3(ozHiS_aCWzyI(p@{cd)qT<%ZsJ47BO zeelkE@4jt}Dg+y%RjEtAMPL;!tVq8aBeIm?zCm*|veqyDO2gB`nE2SMa&{VDzbP{f z+OR++umA9BNE_sf&Ui|Uo(>6uyZ-G?4sU)Iz$H9e@NwP>fwN1mso`V-A zME`LSG-&Pxfn#`Yz36J`Fu#5+aazAAOH7AXkgG=}v=3+k4O@CLk+g^I?d$1`r-JA^ zvI8_}Q%@H44BC)QvC9bzx;CxN4)!^O-(5xIxk5H8!cT4WVI1H-!oGoD_8MwJ`2C*F|{ zd4XQpl;0RoXst-D3I_3eey+FXH#Ajo`vS1k))8JUdqbUO-B{*`?W^H>;zh>OL-_R$ zg0|XVz*+b~U=5lc9Y$Wm0(vcQzmxa8D5sH!8%${5N@OX+&)rUi-4`hosR#yj?+V>o zw=7~P#J=B%(@`0kj^&5=(Pb_Hg(_8v`TzwnmkMofL{spkf!&Kwuw*sxpanVv!IrS* zt39&#d&rn4Gw;){;ID^xZ3?;@?CkY_CTVLZ5$M1UI{ZT=(71cx4DNx>%gu-N-|thY zrcf5$I}3|qC_ZGaa%B&JKy35qA)^TuGdN+#bu11pRKv!@+aVi|eHMWhR$v)KmTsID zR!~P-f)6FS(vxTxG7NJiIXgNjKh(oyh#gU(ka**&i7fmb*?lm$IlD5RUSj{5LqM@` z63|J>84}MbQh%#ymD7;++mHY`w%7I+zTv5wvIy}N(|#>?0OzK%MkIbrD4DHCj=3No{E;G12_!BqSHQFAjSv$TEfF;mLoI92X zCn7G$Qr2KSG(CCjGf-!kHIAP3-642dcR9aRbinA28m~;``dG+J;~hJzgbSIv4sHS7 zKJ3*g+*g-Q|5FXYQ@eqecH6F3@7q$fy6P@(GFGsoSjnMXgP@ixzn9{5Yw*1MJlGPw zn`Nzsd(aQ-Gnn!85-Dk^uqk4@EQV$jSOkcippw#Kuw3lkjI+U^9;Ui4*|Nff7XCWm z_MKyTs~=ddJGhnUK_UNB8!M#CiCU57u zMUa}zKg29S8Hql@W1&&gmU6D=@FUerMz*QEy#5fo@vob|Om|o|JwU zHk{?1&hBkmK~NWcAuq)h@aHYt;MScq`SQoAW`$(IHlY8FGT9bYJAXa?KsCD;N!|&Z z^ph7GJWxm(6Ik$0kc5wm5knp^7P>R>`Tlr%sC_ZX%f_Y|eR&~5ZgxH#SQlG1K@Uhx z$7}bW#`->mr~YkT5cPnZWxwp;Xb)rO1jrfbJMx6gEH&-_=K#DBHjD)*bxA5R;aj3| z+Dn#JYO97_7y^}gmj~`oA2taC_tiY}rt;E}&^fs3Sb`6*%`2sEQ_5MfU*$ferYy$#6 zIP?`-DT<~;7|0t@9MJW1(R?XGuc7Urm0#oHvgqbtzjPb+Z#pf@3B&U5mzg-AbwVBs zZ*Z@JD`oKz4V5rxdUwsC9Q#(q=sH=@Qu%t1Y7HLVF6o$<{ zzl;wr{|d<|4BD`O4K0ArpQdBy;p7N2yR^57)>I5dT7EK?$!_h_7{>JVXFOHJuiJOR zwcE57jscy*mdZGlpZH%aA{&L#c2LEEC2s~D^{mBAtNYYS=nj4H%+jNY$VaGogZ{+#Ru-H>alR+ZyHe(a8Fs)upZY`$a@Q;4&{6P8iR{s z?i_wVDVCVU39Navako8Ny0t$sv1tX?B4Q?K_WpRevTDFKm zmx~3k@urU5pG9S^4)*Xw_l_b<8J?M!55FFbVe%Szd%^TUwwXm=x+4@W-_V3ieu-_% zEmnxCL;RmyR6257hFuF!Xm*F1_Ub@6nNoVgkNY7vn-3IG0qw43sng?dQ1b_K$tBbZ>n<{hA|Pb}z6@OA)`qy}WFZr3ANgaE4}mXE0f`U^{%va>mm`xVU-1(23;rXV3zvsf=R~h6T#U z0<%y}Yf;tAE*-$r#aU!2!+$$*7E-eKyE}LM7(BYxUYQP)paR@d@ryOq?o>InI5^Qby$Du)84kaf@zlbi+{I&) zUg!_BAYFpDB-$Va3*aOQO?8hGSkT3{Bee5j&7gSMYJ9EaoghKB2;*70l`XX*H8cx4 zpODH1QUCqe7pkFuJcrlmIHoQ_}Fgak1s9p)a?MNtOR?6a z9yotBNtXRBwkbDcW)-9VPcCnm;IW{yDX@+#$)j%!Vzg>v>%xOzVH;J7>&}&R;bTW% z>>eV22rj6u=Tk>yDaUEc+Mr1^nO_97MeliOL3`0v>oE5^&d>`NZi|q3{uUhA!o76j zb!+j)E$adujwiujw6Se^V3MLLO8yPY&H7za0>&z@%GKM~AS&(%lk4BPC$w$OpADA4 z3S$59D2Pj-y&M?)jPD+slx0^dFEJO3R$DCIqa&u+BsBJRS_*90zf)8NzV(|z*A|@^ zPcKn`eaFqwTo1~mZ_r}BvMuFj54NQQ1`obK8{84?5Fh>%&i<(Dk?*2JU=7txEsenDdaB z#=Y^+j2(&l!mHOuvreC{)_5vt29JePs;XOg%E698DC*lQ>O2v432t6agWE~zaBSB_ z@lxS#Pu&Kp?Zk)*3UIB!h9CBeEW%Upk37wm;BApv6VF1KXye29O!_9_p?dP{e&|v@ z$Tg+a^BX*)v*z%=74vp$26}pQnDS13X`l%efB!5J2L6WyaG=!`u}4LFv~JX*&|4w$ zRNx_zTQv83=E!L(L^BzSr85@rB6WwrcIdU3)636# zX(i;ONpspqRs<(U2Y6IFoQ$R7d!a*p?*9%nGADhhSNhoDuMzSO=^l zgf2M&yBD9-tU{jt=RwMk@D+83uRRVo6E!<)Sq$#RUIeYA=`ZH*ho|#3Zs9GmY=--^ zstf+Z+cWv+4ktlkY%=5NC2H4qgRX;pnV1VFli>K?3yi0NXw%KdQmor#v~?R5@~fE) z`rfh+y)E1ocQVr;^yEHfy4HAjRqu|>Q^&4bLlkah6rKaBv* zFlERDaMFf%n6F!l+|9~_wbHC{gF5vfpnWfEMHZWHeUzrpnuCeIEjEMB3z{CLPI=We zI}O3v5bt8x_kcZq<@I&0-2fVP9nIv%$DV)_hr$_857DSuEAS8EI#8bByVhe=BX6lK zfSnEM)w9-fe1}%hybi^*s)rrtFF{-~&0-h9W5WhReNXjqFVwmIWW|~yMSozll=wrL zSygGQ<0dToVX^#2cU*+%t>;Bn;W66&!*~apCD2KbtwX+zkfl&N&Ei&t1b-9oLUbZ7 z5-3lY`m%P4KWynK8BaUt@SNPL$C};i zAtRH!&nNU533LLYN;q*T1}z~4oA) zO-(a{pYONVjENmOit`*HGJ6hbYts|?L|rDlP4_0ysjshzEc`tgehHEj(-}`MQNMX@ z=n&Y1i6IZg*~1Bpr-JA)xGl8o+(;JnB-&iVloLHfG<|Sg#QIL|i!m>Xx8TjexSB1| zyG1oPaZ(S4_-hBUEKwPWt2dJ%ESiNvl7!v(?j0zjs0vSx_h2{{qMKNdG5v>wS1nJG zr3BB*&x7UL`Giw#*IF>7i)K)+Tc3p!?`Q^mo^HV<2;jZe5)xMHOZVQKfp0-sVgcSq zdlZN^xfuSVNGikK>a~D+9fFy>;|Iea_T(YP(?hiG*d03e)XdaP!WXTB9=S$d!vZB) z0H3b_yv|zE;ca}OZC&*ZIh7;o$~B0-$|q+g4G4r54I7kHOslVbvSPKIm)eXEWM(y> zsiMVBvp4efhQKjuS0BnYqr(^^AX|Sb@l519h|oCJ43<0x=8HB&CP@oA?%`HVdsduQ zu5r+8ZD`?|uQVM(C3+`3ulbfZq2QO52(RKou+6K5RhTPQ!`s6Pf;#mTSxWGo$HO5l ziw{&E88I2?pbC|+n6@v%T`(l5svcS9S;PAU(-a@-VY1Wai0yDIm4`cy=`|F$B;*Ip zn9^JOo0yE>z4OQA?3-}#r+(`OL5td_Ob}e%P`nZ6@j+XK^)fGaPPuppQZF5WI?cN) zuWpOrXW_=_T}+y$UA}vkwdnHebiTXZ6T6@){N>!2Y6}CwrLpDmCX70lxzshn=5GdAtjF zc)6HZx__ItuM4fZHv~GV&;*gV6;#668-<~1CZ3Al0TcXS=|^~!eEe-u+>2=Y_wkFEB5jF$6^{=;KqQW zY*zg@CZ`XRd8X@P*bmVC@%d$wQi{~IoA`lei{6lO@t`?%WatJ+jE@B$-?nC+9;92} z!O*f@7r1bib-EtMm+YVT5xA;HwvpGcfNTMLuE@naQsf8uHUYl|bw!pk{LIbU5O(o0 zlQ*>nAEUcrTDZKYp$*+WRmSCCQ#lNO<+3qCY1^O#z_^23t>t%i(4oI$F zVx3<(Nx9PZFm!C^ug78HYc%#z2eht298h4p4r{;O1Fy`RqD~1)qOG*X%?X6>|7(LN zmuBYgxL(8W;~%jA&mdjgJreU(OB~xj^4h#2R?fr4-6o+g2hJXbYbiHGRk%K29Jsr< zF`iyx#g4U*qdlef1ANXX)TXKdg@2p-c2xKoBd6M=4&gYFODEuoFuZ#((DR@Y(lXOE zi}g`Hb@)P6>airt2!*m_m121Kz8u3#Y?yxYY#3x**G!NVBcW{U76YGV8Gc81ezo&x{Fs)hH64zREV)NMNeYBlc2M2Bwr4U(@XGM*kHaOgy6;G>znor^D9 zmG}tjsk@Q4{49XaQ+w1lS=89QQGM`l(oke6!!O@ShmElpnY9&c@_Vb z>y=%a{Pv|uWTYvw0KPc8v7y+H99n2v2ua5_LaU)Kfr}@#ooa#3t~kEzFJ_?yxO>2+ z<=??$FaOKT(}UEcQSywr@Z(3+FE9hSt)IichoM7cu5@`IGvB-Uh$$AJJoHsk9dBc~ zn0NH$zTItB)E^F?gaTr^wHW{C z+p-h?G`$bx*#f9HPQ52OwtP_u3WnpgYxrmGNVt1CL(h?|OUP0_mu_E8hm4eL=rg`8 zbQ_|XuL_mAPBg55(yX9lYb=kJnq8h@<#rA{Dtn4(}#{O)4&Z2l+^u5BmV8|nY4M;* zIZ-8~=U|yw2Z{z@s?Up{dY`*|0isUvxusrhxJVRv`22<0~5V>5_vifwh>5O-WukxC(O8Z>`SgfL-Sj|Cx{lUwf^Gtt;hAImnnV zGb@f>{QUC~Ws3#CaUVUXFNtOLaC3*T1Jn=vNXM@mf79Hn$$iI&>XqO}&YytGx30>v ze#GY_S!UXde&@Y+-!?`VR+ty>61~nJiCORE=fLFyOGK73+^6RxaPp|feJxj>Mo0W>;4-k^9|atK&dQ%&kv1M|0E`E=~){_wDA>L%5WN4EIt;)44Z=?CXX4uXwt*fU zx-p(!V*l|dh)Xyxi}@MfD~c_wYCyv;=Fgcjcs#f`xhSv4)tlE~_rY)`w{7EA(7s6< z##2G8i`puSDL}^OGSil{F`%&Cx)elwNz9(Ge+lGgtN%dNw0DAL0W(Eaa3|#!>{?Gn zPy-ZCHDrRuL(vMQPg0}7kn)gP;0>%dYpLh<>1q@A0oixS@fe^OC=p)7qqH6E?M0Rn z{7k}S*b;R}mKBB{**!ATpeuz1$ZcT;yic%i^%*l=-H|@y+JIXfXI9OSZzCW#Tm53& z$u9<3Iy#zlvROf@5rG4zfmj_DogdhtH#G91xfQjr0S~C4Bb!B-IZFMiV|n``)JL7w zPei>{2!Bm1tAo7*Oc|_pt1OdzI-sHeW8_7c8c5-QYT?q&D{wqst z3@A#rPqA0=M$udRshEHU6ITwd5LwFbrZ}LmclKcN=tR5%Tsfr*#?E*F>>XIB3Wte3#07@MjDz^xe(xG5bwX#HGCpfI^{EgT0eGH1X%N5I3$~f_*#I zC{yKKzcKV5K3SI63tz&)GSi?93zWhFuV5NBBA$y!X*=M_S+;=Hr_aIF^b98YnK2{5 z&6&^8vIJHV=dUKgx`+dE?nwN|?vR-VU8yYag;<)L%se=_>72-_)_B0AcHrdd!0Nc> zt36PFr}3$VDKBa|wzP`*&#LuQCVBX+q~0Qn&hOc(3$$$5TzN6K9}R=_EZ*KdYJ*kB z3pXyaRQ7W3tXe9B6+H5un*r|^%os7nOshm_yDU23Lxd>-V=Q^`#3W_LZ)c>!p3{+X zzJvHJD)&Wxw)wKlRjZ~n-mpxp8e6eMtG2HmS#OgX`{K%N3WXA3?XRC(isu%SlnI~s z_-jz3rslCQJQiiLqLCOIYB#jPi=rj!y1KyBPHjb&GMwgSems1F$#ZdXf;l*#umya) ze6?oJTGaEuw$NqBRBC~RDDAo^ao?&KxL2T=-4l~k8{E~y3H--$&%yPC6o^>Q-yIbd z?iA1h+V!N}-xxfKZG-yWHB(Wc1g%V`fs+}g1dcW1s*roQJ@3Xt>EE97T(NV;B0RvvXtT5;^HAL zIhDzKeE49fTe}wHsUYI7U5Axnnh!3;2W(Oy-;iND3t*>+T*>5RHbctwyKp)p!Aw_o zg!Y=XcjWKkiqFFEo27n$HF0i#*_`Amc}-?k2O7VTt*{3#h zsVq5A)k@-z6J0DbsJIkfy%ml1+Je`*U1-+-6CiXdK37^ zVC}-s#U#4%$xmAJQ2#}@U_-z|VFCQGl1q(s8tOPZ!K^OrZC3y95AoSndpkRL0T*f8 z0_Lq;i?c(T34{N|wm^N4%oY_eWYQ!Mi&5RXGUlPNiiejA^cdcf)%SAj4LH0rmhn^& zeMfeHCT;l6g(&wKY$MoYg9=@hKnAU&U?1n{fqVi`RTen_{Iw{cI#SMma4HDHW!FL_sqX~gS!F14=#(kkg z;RA#i1BwS2sFasK!F0J|+KZvz0mdbXEG4*$R}*N^X&93gb96tPIj)JPV`<7lwCU6n z+IQ8s>89aJ*}5!pjIv;kR93a@NzxSSvFPx^h|oKeqI+EEh``f8qvQK66WCRckzWW@o5r$D(v# z`DeS!UT3$o)i`YTg;7HXO?^OaI*D(>bN%F0ZpfG`GpjO<-^qqp{@G$3eCs!b9<95G zEG77fOEGZa+9f7yRNzqM3yxRpSPMD1Yz$61KdhpYC@*2zT|}eJ9{q^&bq*wd+ksth zou4cWqk9YiXGdpIy>6W5vex5HU=pMu{jZKThsV2hEIyOYE8iYG z&Lm(N;FZY}7*7@P`Kl1e&DTr>y^C#t`qi$g3T?=-WPwj*=Dt;DWLCAMy&8Ey=b+}S zezDP4;M9S5##2EAPwfU?4cMLyB=%K&PibILU4Ys=)?=JZ3v>B03ng`3bAU%#QhWD^ z;swW+)&^kVui)P@C6y>gA!r|0RL2Ie^*!ss;NI%J2h_=5vUvro9eI*Q^&G-@Du~Fq zXh=%AE{pmW%Wy|#mKSXdDAZZ{0~EzN6&J&PP$ylGRk9e za7{(CRvnlQB%Z>TlV)NJFf34<1!m%f7UIQOT|0oM3;WiJmv!MWH*dq9c>2y2!SG%^ zphNSfMGQsQjg(YaxP6x_?Hn@r7er2B(3Q>t_|ZLrg$fr7c04p5?%d22S=AaJG|Nw( zmx5JaGLtB&zk{PajC&?fmgS9%CuL?;r16{Rh2?%mEPI1G^&qf)Z;_=0Prx$?j$Sy) zWcBaX7g{vtgF}|U3SwQ@Ce3;P6!S5@H@Q}btLQ9$!UC{xyGL-BOVw)|sFD%KBH=<} zJfUV7ieXQ6+-jR?Rmi%?t+FVZ>H1P;mM3itC=^*6U$nC*YKY>HJ zrveCO@;rxlKjHiMa@t8lj)zVBbyn#&yc{i&%&_|QGV@ z0G+5lzV`^mQ$d_dx(G2>Vr5aw@mrK2Gs~5J=e>8|Hbxj~P5!Da&TQsW|sRdu6LcSrRLM#x1TF;A`BiF^^anlda zATfBRpIH4ue9inl8lGMT?Q2UT3kk@zIZfhKDH6Yh)VQ%(w>oaMV340?A&KsKC8ww0 z!2}1GoDMD8LD#n0d&m-%k=PL#3fGb~u{HCt$C^<_QI#S2L@Y#Wu^cw4Nw9eOS1^t7Fe;(J_7 zQ6TMi_{MY^)2N59)^32i+1aw-f8#r^@mt7=47ze!0Ph{etg@Ja%=8>Mx&!@=4+*sB zQXhQ%c!y`-##3{J1VcwhC|yl z2NTR5Jzcq;rCV3ZGINkIUuKpg{qFIjAF7B2ctb2R#XwCRVjGwCg7u3ZEVO^bEV0iank);IB%FPBkPFsY=^%}y! z9{oj@ZhZgoD2Pj74m^_iGj>+5=@wqj0`UI$qsl;0?GXQg(7>Ayrp4>l;+K*x!#-SG zAwu_-el|H)eO=TxoK!0iRT2J)%%}+37*Hs-I=*;2P$ctFCLP-VS*ceQYocACYC>~7 za$AI)>@3*4;s=qX3wLsMfe}-ll4VdN?kt&U&=0i*{w~&@rXQc}MSTU^z)#VqAUmHo zs;^I-1T}3i@%eQKo@R9Su3VA-;JdDIU&x6Jx^i0p?_oNYkj1FpyqpGcht#*%mDLjM z`!<2bZD`SP5$s%k1a4hr8;g z4;CNe+nxE;^@d3U*kqIv#hKYT@WVz;?CNzCu5Q#`#*&VECINdar8*;^jFOGyXo1i1 zkBw9xrql8037{jlMJUKCfb~D@7g00QQIK|9{bN4)rGZS(eS&q)EhVUAJX*xn zQ7jYfBU;Oq+fcl2ExzY;B-~6(X4$6>od7OQ&Wxv*ShQ)GB@rK6VpahjJx_LGIcJQt zji=(bEvq0;I>d+$D^U&iaLlkddZ#RwI+Y7$W<}A)fWiirzdu@JF2jPnY`A=2sSUIN zH1&T399-)#+3Ru9c77i3bg=|wBqq*&1!(cM1b>%kgM7#=fHOf9AU#5?_CVhz;Mbt8 z$Wn&q78JlQNB9mKo*wQnd)OdlI!uDHa&mwcX-hy2_%{;IAgjIw=809HNQQ`2+#@Dl zuWoovJo-9lI`>j!XXe7@Um}%PwM{o4=p5KWmOB~Sk~T8464NZShW3x8McTH2^-)`y zSV#1POd2$f@$?c`Zze(L;k~k$-Po65q1?m+^Ry%V?_4#G%4!C(oGh_WJHs%`lg;;+BuX+~1 zdrGU54~iv^-gX}D-l0u!9uDX^suk4o<{_`^f7l1vchyZ&nJh_bEF_?Q$(s_dQj+-1 zqqrm1Qw{1A0QG8nD=+WSizng2^-E0dnEu1T8zTr*!s5-#;hwas9?JhvrH*ox^&;wW z6ZMg&0`Hs1Qi8AA6#|*knwQCa$KV=2_1*0C2e-*(rpD%7jmR{1rq#^yK#52h1 zV}Z}bs^sMqK-7jCRM-^3*K43qYG}9U7GBN*@cD5< zl`n{Dy=r;Fus(xCmJ&P{qd`{fT*qY5IVHiJ0~t>Rar$yB#9lowi&}#H%p{pvk+dPcN}z$6CnA&5^}? zi0vNV+DJ}OIqAo-SZsD{`tV6W;bW>{!ya6BcZYv3#`%u`2W`l>Zq@6eEROR&(=AxB z0QIXI6&J87i^KQvE;c~qo-!|H%}KisNk`U$PcOAI4rC|apr_$-)N-Jmr%bTs*H6Je zc#MhE{YxH;Bl~xVa>%FnXZ)iV*?@<{0{C1v!>Y~@tJ|@jHw59j3R}SshqVsqtDKmg^h5 z{;nxaT5=}rTYCmN^;6r4Gs(C1H{*6TG;Hl@>0mi)MHc3Ba&>@!3GHClqM~;#IVlZ7 zR~?0+kM`6hTh82nydNz2a3@@gXPvSa@E*{%&3qO?&RlWy-(}Ud@>HxjunB(0ecJBvm{dz;}iGyPP8Tdl~1E2CsBGbUD zwFTbB+Ob~Yi@2DAnsr-1M8Z`#nsk$io)$a+n%1w!cq)kHyTc*j2G@mhX=R*-sY2hw zBm>)t+E`x->xCcL2AJnbF<&dMIzR_j*b2Uh;=5^G=ILhxa*aP|NFh)VeMy;xYCwSYwIneYb__} zHJW#r$*pi;bBtcmn*2j&^u$%-ZZiK3e7~J#W+kBihGk&=rzrtrZBi^tM<}aySI|Ki z%eJkS%Z~wY6?ZU?T$nP_)V7#`P70hhbfTG7i7eT?f{Am$eg4PxriJ`!AtU=P>^>18 z3q6Sp&59T~QAN<~<*0}PJR~|l#Co#P|Ao+R0c;Sn=vKca)NMC_nf2&_U2x$H?~t(s zWh8nJ91Trcw3Bnq!H)pN85mIgE$}8*(M7E0^Zs3IaVgDr5sllc6DQ`$U*W|f$vah^n1S~52)A5MiQC@zN6N>%NM425gS*JYXW@g8T$%*syV{ppEi3>M2dsMi4Ssoz*+DZwL89)h!% z;+U-9&i%m4y$<84AhtyBf^0leTLL=qPVI{*DX^C0ErzvHjC>_p0Pp=3Ot4f;lzue^ zlFuIyS<3L1gPzmK3R(1phG#7UUtSnD^F=v3`TaCZIgvqEZ42PN_re;m2dpt37%{$` z+7JxUN?%1DlR}GeDd&{&ViF}dwb2&56L~1SQo5RzRJweHJc8=f$E%gS4vq0S?pItk zd=6-7Y^9t5v{dG>Wv48$b@6J~cY|(2S}++GPbI;z@biqPf*AXB064p8R?+?y?@?vy zpS*>2W#OK-@=k0yvP*Mp@}ncBD4(!dvW3sGWBn*3aK4IrS|%A|LqGUcZ7cvycrkc% z_!LH~A{I)g22$T>e4i1j#GB=iH)%`sZZmx!xq-i|UnwW3NSZ=e3@9H9U@Q$hhJWS; zGd}!B@NTU>5}6=tNHJJr66t2ro_wl|>U*U`mYld^omc zBax<2b1eQp zy^c%aYVZv|?iWST3qBtIG_ikhRgwhsZ zO}y26MZ!<*2U=qz!54UsH0}#2O@)!hfTEmCj}O5aeDK$yx``&0$>;XNoy*671^}v| zMbI2L690l(>^*F#)-Rb4&%X7snWqP#qqav+e-;*g@&80IG?PsY@8=@Zz^jJ^@cDJa znlO)4T~`;F+NrJ0D)`O8<4_=ZdS9Jleub3kEyh#vzEUL*sW5zYXt#(SV)X}4?+$cu zfvsT42Rlj<5pe;|97=%py=VtGbwK;eZvHh23i#f&w0@z&2NYO_*YJ;pdrs~<*3#ikx(yO` zQrn4~I%EPkIXdZDVf-3ludM5IPoqM z6@6n9Wr~EF8ze>Y3^65J;63?r{GOj>mJ+&NyCCpr)Sk%--LMo=ZmFAq7&hriaPz3m zbRh92e()S*ra@N=3(yF)u2_>fz5Kx0-cDpG!*69~!J3$JOkOA7<}kEpH^x&zY>7Gy zN6%f5MJ>io%Q@=TBUPvxkcvH;@SG zjLa>Ktjd;#CaSPZo5XURjZB2A=QJ~$ViK$HLFloNVa@zKGD|mIw{dNt{bnWb$LAzS z;*~3j`fH&=_MCWoctKDn_0KNtCAOtPEoOLq6Y-|qf*-`m}6dT%B%#@>5Z#11O(}-CUFh%k9?@D zP=#6mKisrjMyo90SdR&f-EtPLoW03-Du|IYdzpH-X-`9W0xhU`hy@sdp|oxAv>(-QiCj!*CF_gMA7H6Qz9%~>y_MuQI$x=aqDnuC3HC!Z>;cH^~ z#1aKR#y@o%WR;ba3$jiqczcuu#Re&l@eJamJ#%%@Ten>}IMt%@eGzP4@gqEZkY2>F z5F0XX21a3WkfqQ@yJ|AiN>}O@co|F2oJL>h*czPe^f63jxd-j|xZ3!*_Xqa*$D_x=%piZAXC_`UTe;*Acxqmkk_O3Z$APaw6ckzQ^ z(>43E$z5|iOIS$=d^X8Uuum-Ru^pEo?dC&~r34S1$gLEp8&-U!-b&%y${PavHj~R7 zj$Mg*^Q5fB1dV!W5#lB=T&^lR5kWGTZ}AKb`f(Q&9_299JrONr~LHy|2gS4prF z@96=FSK&##KlDvu?zkT4J)wH<2{pvs`)ROgA9ttptJ4rVH0JMyiblJ3Ck5gY`1cFt zuEl$FMumJmWmNPD#i4BrvHE^6xPKqE|3=R?DKpvlfNVx(hed_g<5xrWy&ED+30|#w z4QSM3GLx0{@BwUHsm}e%y=H9)9ir)<$&NjkDYg=owgsMi#Qz{xz{%DIrgUg6vXtWU zj-G}GIk`;ctl<&3N?3#OR1hC7Ss{y|m7#yiOe|@ zVy)RLP7Bb7+fOXcr1q_#rlb0fm2~756u{hg?#fZi)fFc63l)o?1mAn|H0(RYJ8~4f z5%Z(ut zud&^L~!<^7qEr6(UeOulh!_>MbTQ3lHe#4=p-1lJkzkIiIi0)RRVi zv~s~FG7h%(FetFEE^1dD*r<^^K1}U`YTEsKuwk#}#AN!;ZPF;O9E`WbV%pi-!H7OX zMV1o$VOA!rieArTd3$(4_m)A7r-De&ybrtZ)F%lJV}GYt9$z0RES=`knt=byc_sImr%{y+#A2l-7S%8 z&0GF0fcKET5zItkho*w5fixJ?2bP|?0Jk1yGU2ZcLw`Qp#Gx8KSiIbn$bXt*T6t!* z0P|6xh-Y;4lU{Ie(;2vZo%?tWdZ7!{u>H-vH3Vlg)v9Zj7MY)mUS(4)7e^XoPWxDt zo12{v2iBYyStjv_v#Fn$>kAOjvk`dvc`&)#792+Zs|Spyf*3Kgr!0p2UVbMtb*5=% z+*H&ts8e4ce<;;(EB!X?KX-_U)#H5BrQ23PPOfJ5`JX4x6w_bP3GAcjeB};eIfwQP zgIaDiMV1nL<<7N`k(t3{MFtOo8t4+D8a5o>4te=`ve-&tuAo9&Cl~UqD+3x|-~v?h zfvBn=Hw&&DTqLrT;SIY@02_N}CU4FBFQ5QTC8}V=i@!6q+l~!NRq3+IDbp4p4_~%k zIi^i>@OE+1r4V$M?3}~gT0oPEm%^$4Pz#68Uw~c5xGvPN9^X&iyQ@|E+(>(}xWLx+ z$YekP3iN{YKl0^18=O=OoY>i@uD-t~bPUxv@ejkY`Wq8fF60+1?hUcX`_K<6FNeAq zbwFNXve|rknwJ_juOk&Dzd}JSU!@hVn~U2zx`9v7XqNrv zwaY+J@l=6dvo_GSv*!4}NPN-RU{sYptDG`x0oJkBz+HsX@Jv~~V8i)DxSV#MiJv-X zAdnA~YWUA0tEED9n@aL#OEe6g(nV$|qv=?p(+887yq=M5!NZHMDw7YE1hmvhyUHsa z;N+KO~^5Wn*aQ?y5$C2`S%W^g9u3 z(e3Kh2r7nJIFWD}wjPd`g>J`pbg#^`(xqddPQfG!c(|2m$!wbrLSRA9eR#w&BSCr$kA+Q zVF}D6LZaFOZH$rNjWW7NY(b?udV}f}@odwP9gv;NCub7}kA~`n1{10FW|CwHS-eGa zDn%NK`@u|+<(=&vEC7u&j4*!CC?l>4d4%7RMVpr~aldV{>BD#`h{9kcv0G$O50LS_ z%&f??b#hU@cjp^aPcOC+C_3Jw0`-$qdpB>W*KwFp4L1;8~)_nzzha;E$pv`hve2)J|k6#bYif!^vBGWoT^Q5b*I*@AW`?D&)Z@ zR^}f{EA)H|qt$;qu?c8jgI+)@<}oYzWVCsp4>arOqfv?9BU91$XeoNKCUGTT;B!n% zoNRD-_r?P_z2}PZ(gzP|2F@P*q=q%$?SF@6dn_A>ihyndn-q`tZeLkbH+ZuaL|+c#rN`EW8W_SSJ)^ z*x0hFCol9@B0oF*CZrtOAhMLbjzVakcDbj4}`{BV;m zTQ9LxVa@!YnWy>bRTj|rt(VcltdB1MjVoAVO$t}L55>?x z3pVjB4YWC1&;2z9y-$7%@b?ReSK&!M(yZ{st&#=&3~QAo)+}+?_qdY!NMtF&ooo9; zodCWPatdeU=TF2bkxxRp;Q4O#8a9^Yy@nkEtu$Go^0dI8rgD`|cUjK!>V#q2{dV1!<0%05OnpN-O)vTmNv z(0yp}E0~2RQjP5>7LjoB?ddNwD+7%a<7-&Xg<@Hg&)$ZFW13ZJF^O4tcbr@}II}Tb z;aRhE=B!Gf^MJ!92g@S;kTF_js-Urtq7NL~k=?0DJ7`?jSA}>c<2QGg&5Y-WQDq}T zhA^I`#Qx*4a6ajxEaoSCUQ%RcMW^xpkeOh*SkC%hbs;3Mr^r%*A31vrPF_6CWc3c{ z3SRE&hcOgV{bbTvrVEJ`c#kg0%!*E1Cluv;hw!1fg+=&LEXKpsL`XfeQ)DT_{d&Km zk+pjM=Ng{5jOsLtpD;_#?u?ArWTus_m@R-GZt`IpD3&h7&j;Fit9L^(==fT)R1p95 z5fO&V%6=15Zo;ZP`(=5w)Vf7xT4}Re;B_%44c_~W>m;(2;&I!~!Of(*OeU>#+u_Rk z)1a8Z8MB|3r-EPmHSGqEw^-0*A~>QMYl@p!(qQ*$?taD+m`Mzq-rdxFd=I}- zdRUcc5rxT(>aN~eN=*@W;V4|aeVw&RMDM}i?&8LHmJ;)&Rd$p|&&NlW$}Ok-GQPaCM!qzlUyhbKc^P0-AcS(?%ip+m<*&Dt9OQ`%hTSQQrA@_QlY*XED+=>%!aZv!)9FM56?Xooag{E)7fn#6nF1T#g6 z8Jjm~01-Mc8@La`W zu<3Dn@(H+`pw2Bxtfs$Yu|WN>c7ZxqOItj4ZSd&nqDsQ+;NOdcSfS#!0DibzV(Es9 zB@C=r2Z9g#W?M$klm5qjABKwSO*D6PQ49Fqj$@)jv-^M%(T*Gw}57~fAEW4 z%og^njMo$Iz_H!jOQ2hL0C?6b*{bi*S2P>QX;MAoS$Zq*_5X-vqbc*ArQD4}ylyV; zQqutf2R32ZlP{zKS&mf0$XUHjxz4?-SkHSXuZAddO!r|TsSLk(BN0wtJSpkpZzr(4sFB5u6$7y z;g!--K?S!?D5w=G2K*lugPE>rUxgmv*{nObH3(p$W6(@`J0+R%EFlJsdKsLwrrbZ^ zfc5D<36?0e=2g)G&dM8wW@Y}d0f__fk+GP>@EeVWdH2=3h|DYWa^#8mlS#=4Yj zMp9BmWt0vdJ5lKyhj!hjn7A=T=~X| zyd_q~*2Wg1`bLT@CHRfoH(`4$y+TFMyuLn`s_eWR*m{iXLamATx2i~cCBshsHQ+C7 z^iY=N|FNDs?kdygT!6ByhaS4{%|C;3*Nd|6T-XcuubmQE%5cBlFELpK7)Ema;%^ww z62j5h75Wc}bVhLJ%c|vd8T%i~Ic=#mb6|jjl{BG0; zQ=!@|D#FdQG+3}bT9!fY+;W*|rOj@ES23rZn0v@e8k2x{*&;kEBL||F9c4271L{Gm zU|+_277;$Ri=4z28B=6tC8F7~H0`9`iy^mFL#=wQ&^FYMiMkpX8LCiR^+YodJttPUR;5YB3!hv&#nXFEDCVT6~WGPk$3pOsrfF=C9iqhX# zr=jd5VZRiL+JZ%mgSz(zPmfw6O9{RZS6byz=X!0j*?uer9%N<6<^CAQf*OZiIZ;KW z%WZN|Q5&S3%k{sq{>w2x$}AU)iffP#85@iwNXM(LA`jd0`BYq&% zf2p|+D`j2_yocFr#jLM)?Vv-q`kRIaIk~X-B=?!{tLqIB-8BO|u}Eg&pDtSsIk|aq z-v7gox2D&X6ItnJw*bDr*e z6PX4)=WuM1zh61WthX&BLM`Kyi2)swl zWoBt;%ul1zX<{C44=?E6GDu`8!&e{Ng#MoVlQUt^Xl3$AP}==_uzv3rS>_33Y?7Ik zgnnBrC*Cs{r`W_p@+tiI`aPRPRnWk*9t1V*z<4SMdIr`V+A52>hm4j$$k zwQ-5dii4Yjn3#Ee-#oPyGVdgnSVl<`R>Py3{CZ7g$#b$YVbiiXjAseq;aLZIX+M^< z^>=_Ja;m>!Q>bMafNSdBDJ?e9|diw-ldy^fb_xGp)4QEkI7s^b8px95++wc8)B%t!@07K9@y60VsM!mfmm0&cPP?kMC3@ z=r^nx{)EZQ%0NGU?0Ro4PVIj41t=W7GK)n{W*!_^%_s6I>Q%3CO=!`DTQlRMFTkba z*HwsTGDgno#b_jGEW-w#a)J`jvoVdUm~KSZaG>A>sv#rmA?!TCeUutE@Pn>x)lOuV zZd-*5DSXn6b@>mDH6E+3liu>MkD}EHw!4Aa%OBfKH7wb>60&l#nb?tC(P|~no>>a> zh=b>kKuX#zIrF!8i!|m7IZ;KXt&@w2cX$3J7U}V$0-%uFdO-caNU(RS!$fab@*QO3 zhfftmPMi(ZwCBIQt4h9Aj;5`MDTKVhw=SFM3xf# z%T?>JqtJA)|HO~CX19eBh0muD8kiZOm;t--x}`{b^hLOK={6HH5RZMLvuw=)a)F|w zqa=7kqAL^mGZyxSSYUF2%FitjSxWG}liO zDH`*UiHaR7$A-fo_*HGzUu=mBeDH_2kjl>Vj6tZX#9QI)OmRk6G{$k~APiWlp4 z{Rp~l+}F%z#Uy6o)otv+w>#UY1{wF$V8@zY&5~y-Q+?m25QGPDOVAfT=K8#;k|Hbl z>=wX#K`sEezF7qIU0q;U%f>|v#n=*bdrEzn#iYI#84eD1%pIRyH-|`jkO4n#)VKm% zMOpLAk!Q`LZ2^3KIw3b$yi$xyk(k6He8W!%SbnmKgiSVorPr&02Ly+;Vlt5k!@8+; zbeA>gL}NcfBUd&+$wyBKH*cLc=(K-vKV`0m^<;C zj(KZ~0waS5gB|vZsv!^eD{efpgNgO3SsMm)>C1SQ5^M0Rcyi;B;3Iq?%>M%N^VEHRMojsgSRDgSE8=AVr^4RbX7B|l z3w@Pc2X)TuftV584s5hmCMESOKr8&5Bd3@w7e_~UY4AYCQ$c*QW<6wN@ktEI{a0n| zY*n;}PBPw-<(k+4EhjOXPPu#+4(~Y6^3xs;??yElue1b3$CH3|4Y1X0>3T&czrq4O z5DOWz_>}UX5+px#0=HOD;EEMr?iN)m!FvvC3v@h`1T%3U>Mrq~N@994Yq3ts3C}7W z*fI2}`OLkz1KpnPJ>-Me(fx;mt&O_DD{V4fyk&)4#5835ATuioJsZpEAeMDf_*kXZ z9gFw9?hM2GS}GO_W`m5sRFV}MNI1s-~V}?D3UtG>G<~_ zk@*aKGG1qmFr+gcV4_0+1iTNhHf{?mj+28E3=bW`c$N~|4n{-Dotv_luTVdmRLTh| zB8|^2xu(4)maR@rFX$W8TVyH0w;tH3X}7_h`hlYzo69KCbu0Zg&|V4&cHljVmw1(u zv`#20;BCW)s{mi9kFaR8!c++PIS=9LfrZex5A#wnT(;=fxBFzYhWrAL^0JL`R{rt{ z^u~?WCiX%Y)074oKIKh$LP3@?@|9Fyq0$u0nwBOL%mrXt=eFQrQ$x3w`To!eCfnZD z7G4_`&Uh+_Z`YDZ`=KoAeSE((CJQ-HNoagW$#(%wVY(fVWvUpXQEx4*L%ykt<-nVP zI>`ma2&D-aVa!xQBqu8mb}Zq2Z^Pzw@MIwW5=wY_GK$vs#qU>&iD-iO68?QGGAj{I z{Rq9|c7`%XFF2`uLP4^7k81-9{~lvd0|j{nu=e|X@X}w^zT5rBb%Mno?S%Vx*t`9E zd@nx7_oAp1GnRxSn2!3Mw?(eKtsM--356bT@WK(ecIO5Y9T^%9E>84lse^@^m&wA& z1?~%(sg(Zk|90ZrsCSHjsaR&#M^S(e)vihS?7;kuOIY(zV2p5_T<8JoaFQt>U5k3? zeXSBeFPRl8W()j?#bRG1ngA5!Dj65C8r!+lf(C)Z8SmQV^Ke*uJRM6@8lpqje&FA{ zot(w&jdrpiD_!~)z)qHyW8M^VdN|m_gmx`OmQp-A@hY6Ub%)6u+ow0wtyPopEG06t zbKtvm8)Y%1_@J?2I^IA`2Q>zm0J9>}^Oqe#=O!LfVG< zLTw*6CT`2Ghv5F*Ovba6pbhy$rgoLZ)J4WbnOO`y68WvgYyI2z1~(^qwbaAvgZw*0 zgW@s$M<`Wv;6xmpPr4|}{0Z-Wip;F&G`?#zE@dv2&W=tH5i(eL(f1uc2np9NGr9fS z^#%`Tcg9meWaebS_Tw6tK=R|{4~Pn7qw^mM!oyDyuIle4%3iINQ-UQ6Kmw?l==n zr%TasvZ{eLXx~ofE>@`H1AMPCROrG^uNKN_qe7M$GG0afG#M;!Ah)LmVj+q-MM+Fz z5x!||EX(iYY7aCCPzAPjHl~i5^+x&csM1jma%keqjuMSod{TKas#mWDy~na~I%H?& zg1oU_g<_gSIyf5*Fb0u-crq9@(8c|}D_)?ZV`-DN9&k4Kf@WiGzu*w?#$(r2!mrXQ zJIa29e`-5$l?8sfEShZ@UB4;m@X(>);^_SJTxMMTbv>JF<7tQJUKc#j3`x(WYQ4URQ^z`Jc26MNy*VK^PXkMS%a zx(^ru-VOX^5wy8Zox`mx-AbEm0c>-cHLyXFldTO*>Cnn#dKJuLDX9>f%oofex_5;p z4eF^7ZR&PYE&(((6enP~w{CV6Q@v!MU9X8QU9JUQO?uaE!2OYVc@OwKA7 zYPP6V?=xjj>`j2nXZQm}K_?ttXrV$K6fX0{x_OcLs*#CjRKKU*R=MsaqE989g<}^^ zvb5b>1wp-3I^bQj^vV78UsR;RoS9egMD0BEDxRW_EfCq@36Y z^*TftSjv9=L#9E>@r}5+RkXUbdfsQyx?^`GI;!K+(9m(O!JH4@73H}hF&+Qj7nxRE z_ZA?30Y6d8r1q_#rlWo<;a?p*CaNpMUmr14!?Tnzcgr?Nxtk`7`550Pnq^oF_)e0; z4L!3a;rF9Qz<*C3Z}OTsOftO$PM<9wWkq4?f1q!KIWjHHgSrDzMpsb|4a7}^sTPx1 zgs=bc0LxFeVbGLd##2EYk&c&oB9jfsRp=>W8DzFFuFKOWe)p2oFzUb5eA^9mv9YZI zJx8~K4c{C#unK+Fw#+{Sk+1eJ@>C$hXY_&%Xu_h690L3i@s$ECqwQoy_m*saAzwC| zewdvJu@~Z5{-A)s$IX44()U9d$j28w5&VHN1!X7*-)VZGyNN3McL`N);vxCJtXm+e zDa1!~3D@vcF!r8_gVg(XWKmz^_d82wR%Y62vZ(lX?-MK{d*~AIxF$W!X0COcL+xfg zS&JOmw-pl4A7?yEh&~Y$!Of$#EP{61={J&a}|ixAiPj8Q8ZO)Nic$masIXCFm?v8l*}r}?uxVu{7m9GSxge%?}IY4lF)3mk;*)7V3hKr zC*8OTdyn$_-od{OG_LEbTo(ykcW8?&HwPJC%goA7>olSGpO<2W=<5qm*&E)R^5aM1 zCgz(f=6!f80dAj*5n0M`{}A%I=dflzxn-#Vdk04tJZ73K%o`b#WTutYtp(_79Uw{{ z)22CiyKuJ!@w%n>x45z5kzfTQd2fvxZmC#J{sr51!qr=3Rc7!dzDFAK1xrynVuqMp z4nh7fI~%^zPH@C5rN(EE9EL6$?s7hwl+(LYrhq!VP~m7%X+SrB@M~RHWGTfr{*0^A z`P>g##hJ2lArEgq4>yxE?>ir`Ml zeTc^MW>v$;nY~Qy*4GSB4SMlDQX!%ZH2v;78wa=VYwo$2#4Mc7s_oXIGs}A}>4N8l zE0-A0Qld?h*0Km+d`H!qhGY#o&{*ciC^k|Q9n?Ay(7GeCl;FQ^SfuHXMs~vitPXJM z%2~K@JwcW{51+*pnOWIsohBERy;&+;`QOJ9v8A4bo!{zG$eR8^;L)f9lf4gvSEM8- zGM**GpiwV_gLe1Td-y@AVToL8-V(O}>u@R3-SnV#CC*&hgeeam!s@fU6K&hr4|=y# zYaJ!5%ruwAXS5;aWj>FAROOQ-ih%9?0Z1I?i-J6 z=P49XkzEF9B%#|4Sx=S0_(7vgO{(5erl9Nuk@c000jO@OAvNtbY&o!liEUcXAKLo1 zVmwvE>ex-PD0Dtl*C)xs%agWFD9W?-OQ;)XvNW1sDke|AdK}U&^G;U0ZVB$!n_DR! z;Rko!!mlk6$@03m)r5YLlNk?*Cipj8W?E^rEr1>EV3gQNlp5Kx3HZ3t*i;|*;qVE_ zlLivB=K#y1RN<27-H>o2MV9#uzCX8QrUmq?n3r{WuM8OkZ$as;e(?w-;j%;w4KAkb?iCK9Z(rQZ#67>BA?%I}h)1zH?8Vb)<=pKj5AX zxvr7;-s<;)%GxZXv8=UF#0#RB#tr&XP3Sz@g2Q^{4 zQSO&2WLF>=KVeze41D*}(~zG_a}Bl7du)K_xmfzyF5DN$x-pBDQdsnsog2w=#^L+w zD>I2k{bVxvO1zel{SbDY+$XY>;SoV$OxB}Ek7_L4wknS?EhHvjJmI@QkyRtq>RXJ73(XSU!RYI$r z!WO`X@;D+J#pa+r6%VeT5m{<*e2M*fO=Yt3a&utg((f7162h}iedvMb%}PKU?fOVO zE2*djux)86ucs(3teGD)^QpbbmHis%a-|Yh@7)gt-b1D~!~Ehe`ZAKxz&S~O(id6GE^?`?aF zg_lggY%y73fCmhBtQVY#OM;8X_$}-+x+87|c2KI4!glYJ!giyA)A+YbsiJZghVMf* z7J(0@)3PCcTcYxkxqCT5#{rF*+{>p^;HcIj#L}2UjKF;r64)S(R-IyjCRX>vt7~F6 zn;QIyNzB4&uu6kg5teLQ`6w%gZiN~M4(McR-@%LcU8t)@gML!5jI^_Vrl_G7dNd5^ z5-PHk;LCTcflNG=O9a|SL4hh%Lq?X#N$LsIvt6~WiXu;aD^`@6Rn2nzTdr2(?jL2A z61radA>imypUK*^><7rqxX*Z&5F@6%g)R)5NATTJ$?SOll}ncT0+dQ&l}XGj6;!L& zblg*sg(l{CM`?Uk1YREQK;g7i0<8vpv_xa6pc%~vD)peO8KOJ~J3FAkz5yl=><7Ny zbqr+T@0<;r;l>?xt`0OQ`_rVcOa~J5Lj7H4s-|%eKqi9jqFg#o*1a}cWE3x3jIW<_ zKqF(Y4#&yTM&gY0ELqp*_-v_NT9-AH4^0-B3{iE=qLZd9HDVI8@R0FsG%L=_K94a= z*3&WthqZ);&H1~B>0iNTR^vNJa`3i3z@IsU8{qog6o^ltv7cI?fopiD{!D0gPPYA~ zecO~iTP99Sq8a#WOmW2|reLwyBxuaQ5n!kFX0WLAxp}#;d^=z4@o;v7&^GLoW|0mt zGws(g;ZjG@@08wmQD=GMnvq?37s#r=_!hwDkiNuw#S}MBY=+D`S45T)yc!0aY1Csf zlSLDYZL8)oo+U&Bzvj@X`#@Pl7D%2EN1D6>fHlsxw;xv*y8cL@`+Xy zna{)qa$_EIkO*`g3u@V1&KiMb43U|N={IG0gJ0|-vy{9&_n?+I?KA=Mg&}mR2{IY&wwtOO~YRGy^WINhBKxD`ek);G*w`VilN#~0@SVLu3(t6aN;= z%&J6NClrqFL$$4tR_DVaOEi7m}m|TylGNVRpCRd81qZqIbZ$iJT+e5$!*a} z!JBc#7rQw_0o7+z2dG`&U6xerfEw>_5++_HW;m933NGKeDzcQ|j&=^vzdfJt-@J3n z@!*L#CGtrKMV%hqpC1~5>{|Hu0$n!)68hP$D?49+zdrs8m2oKk(dvy6B6<%7cNaG! ztpdSKEwE_QGA4>{Qdq}6jHil-Ikg|sGMKv^iT(^9LcZjtN?`?&W1V4CkQ-5oV&E$e zZ0L6F@CwLzkXi~6B}-bv)&UxI9nTWeuI*hL)y=w_p<`&vcA90+F*u@W!W3Hxvs(b$ zvK3}#T~Dx2T?lHx%+6-ZHEtbur3A0vv=%h&RFBCzwCx-uow><)mJ&5Ew6}?8(i7!QeTwW)pIID7T5=z9n|(m1_kTlwir;uZzdw&8i=a;Yn@P3vO9|%0d;ooi^YDFp9@#^ z{VKAQ;f=aaf*N-GrCasOXFzAps)EQVZ^)wPfj?qS_O8#=(Of|f=(mTC zv%=cj+QRgRL5!z@Fk!BEgmT&Yp+FBg{jw#qX+w*z%h!l`9o(*QA355}AdAmZ_K{QX*2llQ#32C?Zgftxet*X~h zS~a+fepUxIoslcC89%e)d;&5oigGok#RtU;(I@x9y@&TjmJ-~{-4nVr4`i~Ao;zW$G9sQ64v-^5!kG>v z8slH2%q(vj?@1>tLx@<8zMXqR13b+|H7wY;go(68^NxHp7fWCPK^t>Sn7w|&XH9Jf zU_lx6%BuG3tXCXi3^Xh>`ke~fPWGTUI9o@kvXcUum^I9St+_{eNEFn7e z2!lpVG=r0u@q?q^61pV?u0(mU4gImWYem){gDZV4&3Y@>4#$-FznY*Pa0STPA`!RvF0S0zd702m@BkpDUZa+_u(X;_yb(^wNBFg))=yZ=P`0ge8SS&)u zwGmYnhT2~JEg1{cLd3N0Kod|2-oK;xiS_M(8GDMEdyip8OPe&E&#(oIqJRD&rOE7b8N zcK%9Nj8&Igr97mCik+n=urZ(n!>=%_E1;Nj&0p}2cn zf_6xUNxVW5+aM2-uf=QWnfGDunOKpf3?CNEi&0QeP-Df;waVm^poxRWXwLFQw#Fi$ zRB7piy@koyLf81P(WR46g4E<%cKFOumXci18rJ5EJuHC*L}Abq?omk6KC(b$rOIlS zh2r|!zRe820F|n?RkX19L8Mv4mtysDGw#5(BdbJ~GTcAp75rH7M`zW%PnF59VPgwJ z#=R!XBj1@ua@cEaGLeag)*Uwyw<2R(Q-}6CZyF3J8?T z7>VD6Rx-1!X)J>q>U>Gm+o-M&^ytu4WGTUaUb{deXL#4a8lDOUO-N%-ACN^I!m-+J znORk7K0>UjwXC}JA|E~G{CYX&CyZpIezDEj%OAX2@m0|iaWQcIUSxWJXcke-TBJ(1EzJcwbeG`ABdX8O6 zgq`u+#R?T{!VhP*?IR%Ud_@*Zr-t2; zna=^e4C{DNtmlE%=OFuGuEBb6ZIzCOoCHa&xtH0_-_&M`Y~A#vmV-P-nWhMEF~rk(yac|O46H_%2kkm z2FjpGg%QSwjZ(hQbv^Y4>^ic?sDqGzMlCh&XDopQM4`)EJ{i6ke8{LOPFN=tmG}nZ zN025H--%VoN=pWcil+zohiW}4)-GU15{uN@J32ww$f>dnA7o6BnO6FlEkGRyTcMs2 z+zxDTkgFGb9eWJqh)!=t4Og!2{e??S?dADDm5OlSQ3K^M>sW)@o)8w)sZ?qxN?!AZ zd;q?1do)~5xmhYX&q_>@_iEyVQG!2UJv1|PDb3SJnvgn(35LF8ay%v`F&n3qaoR8~ zf?lUgnF`g7dYKE1SdOy8m!Dq0yZn3suuq}Ys87WD=49qU?Ai+=O9@`XraJT*)0)Z3 ze2@bsq7kw*<`5%i(Y7cJJk1^GV@ypOYmK0n#bQ8*P)&5a`5TuM$lp?9LCipc@!0m} zzD?zvFXH#1uFNbe8g<*Eu34f!4_9{>)V;sRQi9LhuozFQWVaJWb`4h|TNaXop}RL9 z*(tM=k+EE6R%Kehkw#T+wTe}@kFYpwsV8yQ_jq97W96lE_h}0?{ezg?gVF2Y+U4_% zX9*EL_En%sh6J?H&ibrMjZz$mZRL#D*hkyT*%6`wnirSFV*ZT>4}s!Sh(NI%dbJG@ zSr+4GuUv)Aaff9&(Ksl=A(>K$Kxu3f3PtiG3q)yv(*I%XD3*mR7A7{Dvoxh8=tQd5 zM^KR&xFhu*cp5lq2$m?yTygOu}d`@HJ$)o&X7LtYkDp{vy+Si9gW#ZDqIOesiPVn3qi0+DHezWP?brpEuH9ffONkl7hCqF^s7UZS4xm^EE?qB< z9Dx^DK0OBaqVf$t9S}*->y)Xj^ADLCys9(1vRYIQ0tY^{JDat^{o9!kzcoR51#IkU zK=+aKNiTx*+Ye#eBK7mRk+`oy0vhxDPU49qv(#X_!1}{#-I5H-hISsP35K_5^Kzvt zf&GF*z}u5NAsNs&!I^Q&j(AH{MH7jrzQaV85*&}{g&b*r%E)d*@Z2Zz08|sJX-lMCmR$6urEIrBn zCFpF`Azg!2s(}m1*I@0w1G3BmI11P+RU#OOhw0wJAI_u#jEOObvD?sj_FT}1q^B- z^#bFR0fB8!=U{aZRSoGe5Xc8gC1hr2!b&N)R&BRh(4&>w{>V_~xC=)i`Ob9%Y54sf zpQUsI@x1(6XBbuBPUC}1U!dh;Rnig%})g6yPqU4NJaLCcjR7U+C6< zxGb*=evo>~Oe?Kv0kSF86S*&R3;<_4@@3Tr<{mi(nHb?sgg2uimG8j&2zNT0a610G zs87$*7$E$Pi7e1#r*_=7b8z!Y8qf*DWOVr^x$4yq1-l4cl;1 zp$C`>@w{Z~N~NkIL&L$vNi#90kmLHY9jnN3&w~45_YTMU}@o&5Bw`Yq?6)0{@4pSdW~z>xU9$ENvq1wqfAp*@$J^yy8cA zkWT%P8W=wL4O0hhKkZCFHHs^-6zm{q*6@y41t(h@n9`xdV@WY{8(-2WcSLJkST4b!cFjU=z7zKPya< zkAL5rNcz08K6=t7#`?!BI%ULFBE3ep23vbR$XotJjB<-ekBBx<*N+zE8O*?U(t~k| zh+h!BSwsS?JG2ECGaiX7C3s7prqH}W6DDiZ{%w!%DC|p}9GoCBB%JBEwIB-3fqpQ! zf1go!p)vkxR`6H`g#@;HTNKAEz&$&52j6-P6>7m38zUGl+qR0ybaixrzU|cZO$G&* zQZB=(D`yR);ddEM)DsQF^YmNjbn-k^SY=)DCGkRe2l0e4B#hTfi}*GTfc9Pb!ch!8 zD!@=|^ZiKkr3fKGIihx`+aLd^2l^I2DE};nvP?vsexlyV9a=+8NAiHt2NoSa3u#%| zO!&;mAz){t_S5p$pXXy|QNZ*dK_inb_)N%rOK5V7U?Y)NhJ=}#=$2YXME5Sx#@`oC zCMFs4lEot7MhYGWLkpViTswG8|gC8H|SXX z&0fgP%r%gOzu6D-;4fp@!a2Kc4naGD&mOuWXT5`rpArr&|s^YK8zCRD?)pfLF0+E+%g`T6PyX@cr7}|LNIM}NFz8Xrs zCU%nn6@Nb#iRTHghA+?aR9~fa#g~y*H0mf7s#ut7y_V2zHuH>9qTx|~F6{o#K*)Je z^zzbf>#tWGGj)WMSvpT0*$aQ2In1oQ6(Ca-zFRBGsvP(%R!3bY@UOK&r#qy7I1 z`MH|OqS-R78P&IdUmGuY?eoFP1>e1q0Uu08x5k2^aY(a{KJcIQYR9=WnVc5<0Z@cR z0jU4>!)GvC;1#0y9?&A;W&ERsqC#lhusQtiq(2!*Wxr$vnfcATaPwYKU+V7S{&>}g z^Q);m8tLypybmFx-!b%`33ey-T6O?{Tn-;oFCP@CO zGnd$8f_T#-1}mLRkj%}8j_5m?$g4DYE& zz~J|dtUs@cd24#RLbvb$k);&BeJ$NOu~6Eg`UtyxG8Jwmso#5AtDXzA@8i!3eIY&> zPV7x!JWC0CM_U-8jjHI4awnORjRV9R$X_g8JAL&$T)3{bTqK{O;|&h%C#r(OP`%zIzOOCxc z5~m#s;n!_DO>H<=sNt!iXm$2!K>pQ!VuHa_g2Bd?<{A1x?FQ~>-PG?O0ewpSd`jKD zR3Go&K9kyN-tXm~$0%>)AsETg!P#hhFvx!gzthEygEVd|>kjF&+CQK}h$bT5f=wo) zej3VM>ffdf8$+9>Z2hU!GE`+qynaQMD5ISDo0b@HLTGa~xt8exw1I!?(Ot6WL}aX$ zna>lg*6H~#CnMSzELX273ob4Zpk@X}440 zYGRRJFqVh>e_p~r-B&E|c??HAo$${H!&@~0A2-d3AI9sJB3mye!G$|%Ow8l~eW6ZG z^>e63b?U&6|MeF9{g;K1ji*fc*6{*6TYUzhv}Ysf=B+|2*{f)I=^NOd)zK1yBHw+! zat#wfTR}S4uM2kQ^Q{st+_?w09%jlSxp`5(3NTw_(L`dv#2}HS9LHxD{J>$~GIqxQ=67zEP3jfeNM!O7JD9JPZ>Lo(|xw!nF`6l7)O7~3L#O9Bj-+8*3I z>C>hT>iD`t%We(e%)Vr!(4#RI;o`AtKvC>efs2PD3>e=D=%g_LS|CoV1}g<#A&ORt zczix%kTANl>^+0-i+x6erz_iXC1)))?S$ioez=KvZQK@P-YY4|a1i_Qu5H!5gP;B3 zefZxwAHW&xtLPY4XWYoGH&8%_lNV1zwh%yK^{(|WWym=2^ib1|@#l^g>JaPFt92Kk zBYX7#It}b09$PEHhxp7eUnuE%l_yP1DEj>Orr;9z0#x~?DRBkKQlVM)pxn85rgh(; z6QQRztYwLXTbeL>{X+Qi@2?xhJj&03s|OZ9(*bW7d8!cK9xp)B-d}-q7+}>epFz)o zqckf}hU+Eyy?5sp#O~U_k_L7k0Dt^w5#v=9A}=ooW<<7!biPsS4eXpgErSovAH{Z{ zBm4BURFJOkBufQ_k)9DbR4j&4JhDeOpufrti?&C@)mugW(^Qw&g4)9$dUjS?tvi`n z(DMi8xk2*d@HyO3oFaTq+u$|hAd1patcaR*^qZg>%HrR;E_|}B=zC@0TYt;91?ZU9 z4`(hh7J}kP>|1db22Bez@>C&xC$(df3zAExLcRO_cvUJ2u zc%t|d{58H84i+Ok~fLmLR`w;NLr1+#MD?$P$so=B4|yUTd{V!whqa&GejH zP`gC-95fnsybZCMFN#$~o=C#-pFb)LJI$D7Nc6(Vj8|PMW)WQ3zX0;H9~3bZVRkN_ zP(N@OlXm6OX^7vqmGMeT>{vTr<3=@L#EYdBQ)cOH@pOp++S59Gu>cWz76PBW5R~6j z)G?@;A2joymr26->1N9V7eMiZ$$#Bc71;QlR?13UlL6 zLr%VC(d<7=xt4enS?oh*!v9>G%gU*;Enw%HU57|ys#;L2U<;YM3xe~j(ctB(wtA$Vox3hmBH|o z6x0kM;XMaJT~B_0mu*`O_cAQsqjM=a5e}S)W2Fjg+7A4^8!?_L;=sAXY-9ZsxfJiF z=+qKb$|{*0>x828H;nw&nSH)>U=JoG8BL3a_H1UnvLtHQ*uv29Gnh)6OeDuGWjs}c zfA3d-j+hl;)x6I{vQ+qE9DHqGGmqu*tWzHXdkkQ_ibf0^`4ak`YYbO3P}37M`+8mr zYNw|@_%`mCM*iy~be%V8^i?(n#o4%Y{RS)c`1UQqQ_-nX2Eo}lk(iH1{7R67^h$|m zByliBvro1Z+@e!Mpi_9Pptc1*I`Q7Q^6&-t>*K#zUJ*K%HR6@-Oi)f{9>lJ@z<8>N zJ`>4mRswW`CqBjQCVeK zXBefpbJ!Et!{jfCS?c=u!>jLo1U2x55n%@aHGE1lHus9$mVf+m0nsv|!=+m=qzA8;B4|m zIDd^mU zy>Z&rJVrk7^=$QA37a9D5^1Wt8sIMIk0u*&nz2F*7`+GX1t0@$flwv zOw3c<*Moa_J1?8HfQr{(@gkKN#G<&nN>={=IYSI&RL`#@LO$uRO4YC;Urm1i>(BE+MIZ)r=pH~tu|jbz@QGxp zptFCc>u~mObi!4*cvrKs`yCFB*h;HZPTC#iFY>}B1?hUGtfFo=(ecYI?E%Oh_QS8`tbBNYm{D8v7OE4C{tG*J?K+;MX>qNz)n+%Cm z{B~>E8NA%M)n@sQHIO45r=&tpdB}*uMOY6Pccyy5#-)&zlg)S~A?`nX0PA*dW=R`( z)`g&^WW-kw6s?d#tV^&BzpqCn-g8S@Cl@8Z^?$&m%u+G(#orscu96pH=f4JMj??;a)g@orX>E^h@ zaP(pVtN8F%O`)N?E90pk<{Z(ip3(s5%V)@*EncTHWy2=pd4N{1w!o7XaINVCeaG@a zTI!W_ID7D#@;0iD`y4{Xaf=0o#$Na10p+?z%+y#cY*5MXO)0{@>@xCKi`U{4PC!zs zdaoA>v=H98zo-iCJxGTw2X-)CWh5N!9bi;{Zr!F`+iPP?S^=I~#id(U;zT5ir5Sp*B5)SHPtjK@MHV5FAv0&*LYCqWNxoP#?T_vun3 z-(FLhT$(VfT~M-v5f1O!0(Vnyu>1ptPtmxTu>=*6puHercr=s*^^uN}c;=GtVn$k7 zGs5d#I`B;v#w%)&pYhC6I%-ISH=`nff@4_0bPMb{b^;Eb;S;j(7LB2CO>X`-ox610 zA25K3iDAdb&yey4US~IOsCAR}Gn(xAyzJemCUgtuR)oakH{r^uROJmqlb9Z(xJzY5 zdKPS+7pGJgP9h-kwH~re+L=B}W)h79``!2xD_)B}xexAV(bfyKK+*B~Vstza60co` z*i(l^R;9#U9GxMmZv-p*t+Z5FUy36%DFB9iHf;ok(xy|U^nlGrcVaX@jf>U?_)KWb z7kcQgEHbSVif7y@{OFKl;|wtwtwe_o885Po_{bNu*3;D%oV98cYylEE=?%@GJaN|# z7Dy~Ut9R>u;8LdvOR!_@uWAdQ$faxnQMmrOfJ1J6E^LX97Fo)0+O%E08Xux9+Pqwu z{PGku4ufhr;-pCgIu@293_c6;agkDP2bzEl=n(R(tY%%OGHpD(UCw(88S`c4^G@@p z^?7d%)Agr8psRRk#FRHR)(OM)Hl2Gx%XVE1WZ~}t++20z$|c4tOQODSQ|J;hgsHrn z5DWLN9A!Ku1a>&SJzr#^a|V=@@hS^Jds!%~xCEWC zqv8*Tl%ND8S<4*6bTe=tg`Spz`8b(K%gzy1!mQy#!44z1RRVBb_dO2m%h4Ag?(BKk zbAk`N2I2&;MeUl(o3EJYch9m^&>M#;g@i?YRoK zlF}JZ1!3o41KozVWTMh;WkB@Oc*e7osDXQlhQ8cQ7E=ouQ)T88dNKYci`Qu2NdsQh zK+*Ann|EYl51u?!C8FcGySM=zH7mmP)D+mVe}~92iEld?4JmhSvTNa;`fFUrSON=( zJ*N-C?fZ9S318vjO_t1j-f8RP;wewSlNG(0ViJl+@T?W|$SLGiPy~-~w!3Ox`R=iZ zocv}{fu3NOV}8`|R50py90m>^^_ZwlIJwBm%wRl`*s}5`pmq|WvTf29%TD-^aj$7s z_mHi<5pu?2XnY>2qhot~Xw&bomb{ZMCc9beeB7Mf<8wsc{TTua+-p1Iak2p-%ZgiDK+^&vW^xN94yu4+ zSLgxVqgsNUBVWK+{^f4vaWcXt2Z6I2Td$$&`~lw+Te)6X-#p}fBl7QN+=G2*<3yG+ z+*B~UWmO;;9yQR!@G)R-$1PI7tY3&5q8}IAe)PO8+|0kNw&?N{-bugyXie;9xjeMe z#-0H=!ShO6ClpV=^JJ+YHvuCIpYj&i;Ryy$gHAodpmB?KEXgiBQ#m!|D&v(U(X@3( z&0ddNXLdlw&5O#5Q4L@4M%^bdxf%D>1pn?HX*mCiDWxyLY zZV3UMcn6H-9)%s1kD2AC$p|ycPsdQ&HKZer&6I62+ltc{;l_i9?Ar99gTdM1>F)r` z@SG9H6B1!Z{4rK*3QE|nt{3B}AeNgK>~0w9_80XJexVE4+VR1gc-@LyTYy@iV`wvI z+=ed}9oc*d9^6sCSnK3!55Ys5vxdK!bQkt-;0qiqff{d*MXV8A!O9Bxjq86B}+KkyAV`^oy;V;|S8EFM$4&i@!epOQmgR90j^# z;b)5#@bIhy{iX9-#p?zf^TM!6EGsUv%P-B27~u5(@@{?177c^Pf~^yO z>uKk9eqJss0ucvyt%tPR{DlpRddbvszb0=*XABwpnp{L$2GcO*L^6%f6#3+_hsw*@ z5u#c*lanf=Y3XRr;S)@rqrE-693IAaR)V*{f6!b(OKT$N5v7nc%rzbT?k7>V37kDg z>QzC*Kte^9q$ygX({6o zA@9_-wY7)dgNs`&(qymD3<_f6-GHqQQ+Z#-e#)_w1t(v;nqr=57)8y&riRE;if_G` z1Q+hyV=|`<>$#J zU_46+@&W8Kx`QmHEi#H*J^2W)u~h>TzA~~rv*_+EgESHGR^X=1Toa3PW+^DgOy0Oi zJFqMFU%GfT0rng{pefbqgtKt$JYV3Vwy9UkTa#7=s+oxOUE>Y827G_i==fSwULKtH#VGJ5sFO{T0SA1PB%1qJcDSOqI+W|hc2fGx)i zM|UO%JZQNz5E4EXyy}vlW)W;&J_jB?;48pISM|r}SSShwF2c3ArI7pZuEq!?OHfDp7w|V)GnwPe+@6G1nL6un?ojYDaF|Vp2x-?g0(!crl)pC~AR(8!516FJEQrP|pjxH>zKhND)TS z?+$3A-z~#|PGWH?@Nx6NYm>z5P1@Fn7M=JvO1y5xtu0`x1qPzKoLem?c4^Pj)5>>K zlLF8Bt`N|>5z}@0;AJ=yo5XmQ5@dntI=GcACKTVBZZh);{fWq@(90VRZxdO{@UV`3 zG)ElK8LRldE7MV4f*RDR4}H;-LIko*h`~9o;O5l5NB4_KpuK+pG^y*yc&do|nGb-1 zkw`$Mi=7hBN(wDtolppkK`4GaXx6}1hFx)I(SVUtrIgPmYuMPrPz-M^Le|3vuxZ&G zkyWNRt)54W=VdC$%Z97_wJQQD3g*`v_eDriX;%I6+jlrGF9%|9C%XtV*^$rZ6|YxD z+!4cvhK6%jFsw%#{NrCy6P3f~k!B5Viw2d+P2Rhr#}p9#q>T0ne%=s5Si_)QiY;Fu8qGV<7}2>Fp)!P)|)Z2@~nTNotm za!yapgkw7rODn2G>HDCSLSBAZ{zXiQ@|Gsi$XUHjC4a{h6M38Q@3eSr>(O11nUf{5 zl;9MQp-)?W=5xZe%Mf?^h!XkbEoADjiJCLg=dLXplgwMcSYrhq*>#9+#ol;idtopV zNdZN?do1y+q|gG^35C!YPf+p?V#}}*6J`Mo3eAB5!(RY8hgO93i@t&EtcN12OmVWD z)UNBpbY8&;MShN|GDW|CeyP?Mz`?yP)N2>c^jOuI} zS2Ym{v6gHSTTHEKb=gn6MiYyg=nAD5oJqY4dy=j(@nJ!M(8|}ks?8cjAd*vW!?Io6 zrLVP@CxkXu-)6TSX2RNY34-=QbT9UM+*Bf@z{w$f_t~m~nT} zlY!=EV&W_e!Mx-6Zjq%7r|{8obiBn|Rw$ESo`U@A`$Er-e4!)$-0{M_6sc*q(ba1^ zE1ti1BWUZVxg{)(xkMhihi^KvQ_g-J8H;76mF6vAom>d*fgcSgB!4fkfTEfW()z}W zNuG+^;(^SAM^9&I9;DxeZL5A^ys{+7iZTQTxFY0brNgzjWg<%%?jJG@SDN^vMgctJ zP1^Pjj?gPCN}1k@B#6$-?bxlqT%=+6%xQWrL!u{xK(~yxz3~-^9SKnorC-dIoiIs|ZT__!$z1;&t|{4Vn^+hfml% zBb)WIwFOMK0BvyYJC1VW9EZVXh z^78W;e^mDf{I>G6EP(|?^vS(&?_s(u;WL~-=E_Vf&0Bz86e|?9zzjs|6p_D0F*Dav zVUROu)XU)P>R})Ye`(V)ZQd>~7%==rpbh9ET;4Mm9u-i4AGP3Ir!mxS!OhCY4(^0= zCt{f*3P&xs6H8FZ33(ES)uH!q)>EoA3a_;mua9Zd0_r%^yjLH%lKucTUbxJJcW%)P zy0mJ+cvhmQ1#YIL!NTp)MGVB6*75)fp{*LSuwyN4#JeYD&>SHfHYrGT@vQQmvlbYE zN6X5an#oF`2Q=*V4|o8X6oPTOHC5>6OXe(>mN+Eyzihc@cc z1Ve;-cW%M14U0rpnd0`2PC)S>M98^+6K=$>5n0Oc#(lVTgap&aP=uH?%?Wy_*I!FP$9N`w^ne373(KY<0^!vnyxUU83|n9_<{TcG$BpbgI5 z!udBX;b;n6Ih|U3CJU~FOyCxaM`*QM{S9|Jv_QNu>u@QII9{;FzvA3gzr<^~#>|M2 z#X=8g-LN?{tKXQ3-ne&bm57cxkc7Mw?m00t>GS_HlYeIZ$s{wUt03K{PoMt#oBsRn?sK}2%kdTe`nK-& zsOZffLkHUKc`H2i(_iVHsp!ahoamdcx*ZWd8%9oVK|JB;F|x#+q0miqTVHW_6m{zmfNO`(q_Mn<1()tIdHN`w)Q(!<}<~4 zaG|VFC7Bj~`hmxpEOLeyt=YwA3#lp?{u#6FKJkcKhU*Tk+hukk!-l^Rnhbs9 zf+)W%BF41%v*RE^yAOX+djM$_Y?!O4EKbGg1}XA&5xebEYq^TcMezGqABIt5Z@1F= zIubg3Bs(~MqpA?fvt!}*`=2%aQ6iT760(Zku_>2)dwlRN*O=I3^e~aMCXMwvTT0Vv z?Zn%G=sXK~S69Lxu@8QVb*n46|5Nh2cDvtt!2Uq1^12#-|9A#8xATo|IuQ8dKmW~D zk)AI%{l8ZI(UvWM=WDjq@YkE_$U%3?Hny~|+50;W&VfM91?-m6?)*LMn)f^13UkS)-B?FP2mv^V(QnueC z5IQmh8fts4LCQUH2`VpLhjEk5*`c!lKt1)5muJED_l{XeXjj}0F>6!c=YRSaE6v>q z*?2ws)*`_7L={Ev(tmdUQ|T1{0RO$J663y&hu=QK4_B)BuErP5oClkec0g5Kja9^k z*e#Hif68N9`PPa|iVQ=O39XX~i)gY#vKO9t@NpOtG{j!ETl7m;OW|~pZe<*eAL;_P z1p86p0Gd(xd%%z1dc@#$6^9pJ{||Wi?RQm@+i`L){mu*b;q20TY6>Hb7#uPJmi>2% z5mx7Tu&<@8d=VPT&pTJnKe&H~?_$^12=+%Zm}5qqPfd@2Po93%V@On^y5hk#+jMqm z9;?tt1>X@i43ggbDUiLI+2PflU&D!#GF2=MBzI!JDOQPU9QzX4(XrL4nlEudz*7iJ zoG}`H^BHMg%?+KMH{t!)qM`aqkD4MRG#J*V{~kg|gqT)YOH&)XHSh0GSJSAkSjnLO96dFLN6I_y>p$+W+FQsZEABGZzXCcD218afz%3b!Ao}Bbr+Z{Ek%#yErt-xE zJuP{>v%8q+ZVo$4VL}r{gdkuDfy!dfd`HN=_Tr!NgB(o_E%5dFG(*p2l!=^j%=qCX zEjwxH7HWyz>3^#9j?NA^n3brKOp8A@XNm5*B-4cVQ$f=w-088!MD2|_ICf6gf71PT zF|l#~YP@_>e=mUX&LNA7@3A_mLWUg*^0MvT{jRb)6)!6;0J>YYzlR~AqdoSBx6NLG zgQ9dh8vyT7Xa|&dS2Vq}TuU-Fy)+`+?><(h0*Jx_anj?d6ZLB|a%>p|u*r;__Lx7A z*{Mj#f`E#^xJl-kKB=-tx=I*2d_2s4PtI)jaf0NU?94|~99tI)k##@481a7ppL1|qBkjwMG zJ+=r!hYWUHz=hn3hDM0hd%Qb_{Y1CDW<&gLr2Sc?j|m?RBgc8hgg~uF;{Jy~eRUHw zTx-Hp)wjaW-%}N^=3)^vBoG$8I1>)6&9n-xzt#wczRG~7-?XPpu(BG8Tsl_)g?bNr zoAJH7VM_qgJ8;~l>!9AZ^)FkqUiSsLUAf$Hjr576*du7=6w1TjzXx z1j<~$7l z_S74UCJAE*kURAHrUorD-Puj&Xe2m+fY_n*fVHM4`n@xSgngS;l3VepO&>u!u8P$# zb=li~x3Ey#?~n14lRLD{rcObZ1!%(tB=np{#KfkoGg=-=`#1vR{chW-JZ&qBkg!)v zbdY9So5i`6!RjQ;K{?411o|xk!M6;C1<%c3&sTNjIvj~Q&1kM9+S@uHSuZC8Cz;uf zC(lHUdr8N}=Y$32h^z zu*Y-pFS7D$K1hak}!tA$?vk@a&@_uc@2Jlbq8s*Op?|kAPWkP38^csp}w{n(v#Wd zo;A;z$ll16(AvUppNn|pR|EE-@<+SEaI4&%djXt5&o_BXPwqssJFUeNFfEy;)vCFwNJw6Q!0s`6~zvSOh{x4uuEs;IkeZ*tGG8)i)43o6DS3A+f8=I3n5d2Rj<{GE%nL`fS%fWL*t z%J?i7b|m_G?^GL@n{f;-7wI~k%Hl8{yKOadUro&9W}0q6lHflG&>`*ChH7ntX>!=x ztZtQBsI{X5$RtLB?<24-lUZX@gWcIZi&GJ6ZfFfGIV+}gDkc&im1vsz+~c+m0;?<-pY#5d(Y@OOkn_7v zjkf#6g(hxN{`KZxG!N$c$-QYhbp>bVS18XM~1;BG!J z4IMEa?wI?Wfzra_O9+(ePim9if$OqtH<<1aW6JX_Xb08V>%zpgFrf|%1nhXf+lez5 zSGUv1CWB3z#13j;^aXj!-Vu1@l^^KtB;WP%k=_exIB3J?@b)YX6-tnhs3jUmGg}(E z$230MHB}0EYfH*aVO37S`&RFpI*ZXHVGM!l`WiTque;(a8GnC!&Pa?2_H!;-P-r>; zb|^Yh%dUAyirWTltz<^vT-d*JE!?Q*2Y;qK{0kTqG{iYU!abl7xKULIHHB>VirPcE z)?VEqB2OFgvQ=sE^?Eo`ba_ByA`kBv0rGffBe=YKhW&_c1z-jgxS;4wU1sAEgd_;` zKLo~29sx5KPGk>8DvSIK=hok6Q;VRiNT*s}hwl$Nfz2wipV)?3Y@jAjYvgv@*${K` zpw2@ZsVM0BCkf6Yun+I~wedTA;JhR$p&}p)3Kfl>_!m70BhnJtp({1pX1t}T z5x(C3Im;V7>{giYz)OrK3ExMcG!5^O)oT=VnlJ}Qe2RIru&@SlV&L0hK~nqL-faPV z8v_74-tUg$1SieSw#TaFD++4^YRVc^lEejp0gJ%0*M6XT_NdkDe3~v_OE~nE?xZ#? zz&C06jHJ6ahS->2(@+a3dAf}zhvFJrn&4nISC)()ITjwed!EB`l4%=(rq*USbV{c+ zA|qoqKH4rVR7VKN;=(pMWbbrC&5Dn9ZcO7g z7#IkI-#rGV-gg^&Sh^>+IKPt7>?IoOTOcJiSDT%Hh#pd8XM1`v=1g$fVM{cH9ju1oOLKVPqFoMo(p26 zNPk9vJl=h!|6L35nRTmD7urFF;%o(xCwI%b0HYM?uLvxAm7g!bZ#hJNdeqS49@~2w zTAG+rn98j;RM-v^)_)bVwy4?U4m+<@cg2*-VvVZq`gP*^s5v6V+6#u1=X z?q6rH+wiC`nISv3V{E+2e#!ZVh+JGzdDcgOEH3Q-^xTdK`HZ^TPS_g>*Q&UOmC1`< z16p86@Gk_q%%FIaNejk4Hay#Iibh1Xn%I$(4=wG^+v(b^K{C9Kz^>f$W_Z7|*$$lO z+{6raE!4&Aa%gGPO{i>^7os50-w~Mhz#TAg#%T7KRGNp!R<~|kWO%&vh*jNJ+?%{sLilhD!L2K%;r!f0gjG-<&rj3x=+N1&my0BVb~wGG{h zeT$#q*K*iMV;TA&kt*F9$nA36`9f=2A_E%%`hhoPpVKyioKtMj65U8M6R7sKPWUdP z%nfl;R1oN22rNYh7Ha5d>wxIb9G|PWTu_AvyXlfA79w%j<#tCb+gJi+oI%b*DmOWv zuraY!S?G>hh)&zjXta=cV(z1iCJEyR#NZ)qO`uzNm?eNFUy@EBAPWk|VBvAmoSQ2l=9azCifDgv9Ja*$=RQ&Gs&^$Q-rQQEvuMAHZEA$A>~jsloj(8@Wgp9L2%euE009{1A%f}Otd$%wJ0^*jLD%+ zRU3)U@FWau1ad2@A+403!NL}iKNGI{G^d$trQ1;345j(H`}xhONu&q@UO?dfv;P&VfCor;U2nY?*%Z5z+Z3v#VBik=PS{>)7Gt4U1>x2h{!%mF?j{H zPWiebUdnh00UPQHdJ<1?z@>U(A`vkIgU`u&AxlQ&^+OQ`zj*-ag-E;GwWHd<_M}X8nHgFd2cg7(!#3?I` zUSPrr99dg3yOreq+jS?m(~s;&RmNE+ZNl8=A!Otoj3x=+N1(ne59%(T(Kd7w_9a?W zm=T!Tq8YD&+I{A{8FEF27Z4zicX8)0YHKEscPF((GtwxR9H6lMJHJ0S2C= zeAGt1*bP7f$SF(SP>V_2PY@`!eNy`i6Pv#aWwGOXk*D%-2S)o{G_ghz

      {dd}mW zqh}^9g*9BcPy^R0ZdlV25d;Q00)vBt;Gq}pX3d}lMB>h#we;Z~nSh5X0vHp(I!)S) zWL;)@g4Pz`6BOLDy1_(jI-le2N{K;LA|KaZSn!k?&*PUe4rCsNW^}2o3FQ5bgREhY zr1J>K;=&kaqzYwMvcyMiu$IhDJq#Dlva4v}w%Jd?Eu$w}c_iW=2(-7{fQr*}IFE(n zICk}X(O8!A36#}}rI-P5acvzOy>!`mVN$}Y2$1)??Vj^~=csM$kOeX3i>qx%cuC#> zL}1|yGa&eu!R*QRtUgK$iY^*DsnFUU(I6Vn3YeI{v76-6=8!eyTsjAZ6~$d?(q7Qr zvud~}d^+4c@h+7laf1kS;4L}_G847jl}PY}d7HK#ze~%4!oW|Cg8&`pVwZ*H=bVJ1 z^Eq9m_iW&r3J3^*h^22DkSr{|hCo@?KA=T~Cg=|7X!2c2lJklVwP2*)*@2oZ@qUQF zPTL;uX!nf$wjOwNGV3HWC~J^26;>!D;v)#$5*7>(K0S@Krs7f!q{QaJx%5kL`9hU; z4z#`x-aXBP0U)MZOA(d&HP9qj4KFTOp(Z8n1Oi9SoPg^38f}S{_#5j~7Fs4rJCA_& zWpZAE|CIPt+th@ZNI0970_U>%U3KH{eGEd!^Fd5oR?@zSKxsPLe4<9y;=kQyw-yIP za@WLmnG*|3pyaw)M>foUVlFR!)T)QHwg8;NzfxqnwkeC-P$L4=ch-9v3G7Rx`e$mj zf~<11JEtpcB?tn(g8H@Bjj`xG)GR4gz$diUtB}ID($X0%P=C z;-9MM_KtS=Z2v}RZ70jtol1CU#wJdC)A_X+|_U!Dbn&@-MI+FRSX+M*9MgG&8-FkYynA@4F@ z%ZwR53KripUuBWF6A0`(c~Hwu8mN!8MBhvATKV2N@7n^%;=;L}MdvHnp>55T?=V#~ z70w~*Bi!Wqt|C`Am?h%Qh|a#ntR|yu8Z4PA_gYgk?Ej8^v&^baVGM^A%maN+n8M5E$SHjJ_ibW-XcQs1@{G*V(Z;D&S5YzI(I-v|5$%{czqx zd4B#=T`skIC=xS@z~$<4$h!Fb*;l1ZEMkn)ZV8@3AjngKeNirbfCB;owqVet*6w7VgwLhU9+ns-Ix%i_IkCxqV)2Sd~O z7Ic&UCs@9(?j?2eQKh(oKnup-!2Y6j^Q*9UJo+Bikm~VTqyj;p{~6xr3Hor4 Date: Tue, 7 Oct 2025 21:42:28 +0200 Subject: [PATCH 060/145] Start more systematic approach to build arrow table schema. NOTE: unfinished. --- d123/datasets/utils/arrow_schema.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/d123/datasets/utils/arrow_schema.py b/d123/datasets/utils/arrow_schema.py index e69de29b..208f4cb2 100644 --- a/d123/datasets/utils/arrow_schema.py +++ b/d123/datasets/utils/arrow_schema.py @@ -0,0 +1,22 @@ +import pyarrow as pa + +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3Index +from d123.geometry.geometry_index import BoundingBoxSE3Index, Vector3DIndex + +schema_column_list = [ + ("token", pa.string()), + ("timestamp", pa.int64()), + ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), + ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), + ("detections_token", pa.list_(pa.string())), + ("detections_type", pa.list_(pa.int16())), + ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), + ("traffic_light_ids", pa.list_(pa.int64())), + ("traffic_light_types", pa.list_(pa.int16())), + ("scenario_tag", pa.list_(pa.string())), + ("route_lane_group_ids", pa.list_(pa.int64())), +] + + +def get_default_arrow_schema() -> pa.schema: + return pa.schema(schema_column_list) From 643a05a4b3e99eaf4967ed33d553f8b296554cdb Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Wed, 8 Oct 2025 14:20:47 +0200 Subject: [PATCH 061/145] Push a few helpers to refactor arrow storing. --- d123/common/utils/arrow_helper.py | 6 +- d123/datasets/nuplan/nuplan_data_converter.py | 6 +- d123/datasets/utils/arrow_schema.py | 56 +++++++++++++++---- 3 files changed, 52 insertions(+), 16 deletions(-) diff --git a/d123/common/utils/arrow_helper.py b/d123/common/utils/arrow_helper.py index 54d231c7..693e8af4 100644 --- a/d123/common/utils/arrow_helper.py +++ b/d123/common/utils/arrow_helper.py @@ -1,6 +1,6 @@ from functools import lru_cache from pathlib import Path -from typing import Final, Union +from typing import Final, Literal, Optional, Union import pyarrow as pa @@ -15,8 +15,10 @@ def open_arrow_table(arrow_file_path: Union[str, Path]) -> pa.Table: def write_arrow_table(table: pa.Table, arrow_file_path: Union[str, Path]) -> None: + compression: Optional[Literal["lz4", "zstd"]] = "zstd" + options = pa.ipc.IpcWriteOptions(compression=compression) with pa.OSFile(str(arrow_file_path), "wb") as sink: - with pa.ipc.new_file(sink, table.schema) as writer: + with pa.ipc.new_file(sink, table.schema, options=options) as writer: writer.write_table(table) diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py index 95067b32..e0029dd2 100644 --- a/d123/datasets/nuplan/nuplan_data_converter.py +++ b/d123/datasets/nuplan/nuplan_data_converter.py @@ -3,7 +3,7 @@ import pickle from functools import partial from pathlib import Path -from typing import Any, Dict, Final, List, Optional, Tuple, Union +from typing import Any, Dict, Final, List, Literal, Optional, Tuple, Union import numpy as np import pyarrow as pa @@ -300,9 +300,11 @@ def _write_recording_table( source_log_path: Path, data_converter_config: DataConverterConfig, ) -> None: + compression: Optional[Literal["lz4", "zstd"]] = "zstd" + options = pa.ipc.IpcWriteOptions(compression=compression) with pa.OSFile(str(log_file_path), "wb") as sink: - with pa.ipc.new_file(sink, recording_schema) as writer: + with pa.ipc.new_file(sink, recording_schema, options=options) as writer: step_interval: float = int(TARGET_DT / NUPLAN_DT) for lidar_pc in log_db.lidar_pc[::step_interval]: lidar_pc_token: str = lidar_pc.token diff --git a/d123/datasets/utils/arrow_schema.py b/d123/datasets/utils/arrow_schema.py index 208f4cb2..3df567f2 100644 --- a/d123/datasets/utils/arrow_schema.py +++ b/d123/datasets/utils/arrow_schema.py @@ -1,22 +1,54 @@ +from typing import List, Tuple + import pyarrow as pa from d123.datatypes.vehicle_state.ego_state import EgoStateSE3Index from d123.geometry.geometry_index import BoundingBoxSE3Index, Vector3DIndex -schema_column_list = [ +schema_list: List[Tuple[str, pa.DataType]] = [ ("token", pa.string()), ("timestamp", pa.int64()), - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ("scenario_tag", pa.list_(pa.string())), - ("route_lane_group_ids", pa.list_(pa.int64())), ] -def get_default_arrow_schema() -> pa.schema: - return pa.schema(schema_column_list) +def get_default_arrow_schema() -> List[Tuple[str, pa.DataType]]: + return schema_list + + +def add_detection_schema(schema_list: List[Tuple[str, pa.DataType]]) -> None: + detection_schema_addon: List[Tuple[str, pa.DataType]] = [ + ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), + ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), + ("detections_token", pa.list_(pa.string())), + ("detections_type", pa.list_(pa.int16())), + ] + schema_list.extend(detection_schema_addon) + + +def add_traffic_light_schema(schema_list: List[Tuple[str, pa.DataType]]) -> None: + traffic_light_schema_addon: List[Tuple[str, pa.DataType]] = [ + ("traffic_light_ids", pa.list_(pa.int64())), + ("traffic_light_types", pa.list_(pa.int16())), + ] + schema_list.extend(traffic_light_schema_addon) + + +def add_ego_state_schema(schema_list: List[Tuple[str, pa.DataType]]) -> None: + ego_state_schema_addon: List[Tuple[str, pa.DataType]] = [ + ("ego_state", pa.list_(pa.float64(), len(EgoStateSE3Index))), + ] + schema_list.extend(ego_state_schema_addon) + + +def add_route_schema(schema_list: List[Tuple[str, pa.DataType]]) -> None: + route_schema_addon: List[Tuple[str, pa.DataType]] = [ + ("route_lane_group_ids", pa.list_(pa.int64())), + ] + add_route_schema(route_schema_addon) + + +def add_scenario_tags_schema(schema_list: List[Tuple[str, pa.DataType]]) -> None: + scenario_tags_schema_addon: List[Tuple[str, pa.DataType]] = [ + ("scenario_tag", pa.list_(pa.string())), + ] + schema_list.extend(scenario_tags_schema_addon) From 65b7d30ed215aa5f8d0325356206f06aa0e345f3 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Wed, 8 Oct 2025 23:20:12 +0200 Subject: [PATCH 062/145] Larger update on dataset conversion. Simpler usage and replaceable log writer. (#39) --- d123/common/utils/arrow_helper.py | 10 +- d123/datasets/av2/av2_data_converter.py | 231 +++----- .../datasets/av2/av2_data_converter_delete.py | 527 ++++++++++++++++++ d123/datasets/nuplan/nuplan_data_converter.py | 268 ++++----- .../nuplan/nuplan_data_converter_delete.py | 505 +++++++++++++++++ d123/datasets/raw_data_converter.py | 37 +- d123/datasets/utils/arrow_ipc_writer.py | 242 ++++++++ d123/datasets/utils/arrow_schema.py | 54 -- d123/datatypes/detections/detection.py | 4 +- .../scene/arrow/utils/arrow_getters.py | 15 +- .../scene/arrow/utils/arrow_metadata_utils.py | 2 +- .../default_dataset_conversion.yaml | 6 +- .../config/datasets/av2_sensor_dataset.yaml | 24 +- .../script/config/datasets/carla_dataset.yaml | 24 +- .../config/datasets/nuplan_dataset.yaml | 24 +- .../config/datasets/nuplan_mini_dataset.yaml | 24 +- .../datasets/nuplan_private_dataset.yaml | 24 +- d123/script/config/datasets/wopd_dataset.yaml | 24 +- test_viser.py | 4 +- 19 files changed, 1653 insertions(+), 396 deletions(-) create mode 100644 d123/datasets/av2/av2_data_converter_delete.py create mode 100644 d123/datasets/nuplan/nuplan_data_converter_delete.py create mode 100644 d123/datasets/utils/arrow_ipc_writer.py delete mode 100644 d123/datasets/utils/arrow_schema.py diff --git a/d123/common/utils/arrow_helper.py b/d123/common/utils/arrow_helper.py index 693e8af4..7e1b8e47 100644 --- a/d123/common/utils/arrow_helper.py +++ b/d123/common/utils/arrow_helper.py @@ -1,6 +1,6 @@ from functools import lru_cache from pathlib import Path -from typing import Final, Literal, Optional, Union +from typing import Final, Union import pyarrow as pa @@ -15,10 +15,12 @@ def open_arrow_table(arrow_file_path: Union[str, Path]) -> pa.Table: def write_arrow_table(table: pa.Table, arrow_file_path: Union[str, Path]) -> None: - compression: Optional[Literal["lz4", "zstd"]] = "zstd" - options = pa.ipc.IpcWriteOptions(compression=compression) + # compression: Optional[Literal["lz4", "zstd"]] = "lz4" + # codec = pa.Codec("zstd", compression_level=100) if compression is not None else None + # options = pa.ipc.IpcWriteOptions(compression=codec) with pa.OSFile(str(arrow_file_path), "wb") as sink: - with pa.ipc.new_file(sink, table.schema, options=options) as writer: + # with pa.ipc.new_file(sink, table.schema, options=options) as writer: + with pa.ipc.new_file(sink, table.schema) as writer: writer.write_table(table) diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py index 63a5279d..881320f5 100644 --- a/d123/datasets/av2/av2_data_converter.py +++ b/d123/datasets/av2/av2_data_converter.py @@ -6,9 +6,10 @@ import numpy as np import pandas as pd -import pyarrow as pa +from typing_extensions import Final from d123.common.multithreading.worker_utils import WorkerPool, worker_map +from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table from d123.datasets.av2.av2_constants import ( AV2_CAMERA_TYPE_MAPPING, AV2_TO_DETECTION_TYPE, @@ -22,7 +23,9 @@ ) from d123.datasets.av2.av2_map_conversion import convert_av2_map from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema +from d123.datasets.utils.arrow_ipc_writer import ArrowLogWriter +from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper +from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, @@ -32,15 +35,17 @@ ) from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from d123.datatypes.vehicle_state.vehicle_parameters import ( get_av2_ford_fusion_hybrid_parameters, rear_axle_se3_to_center_se3, ) from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.geometry_index import StateSE3Index +from d123.geometry.bounding_box import BoundingBoxSE3 from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array +SORT_BY_TIMESTAMP: Final[bool] = True + def create_token(input_data: str) -> str: # TODO: Refactor this function. @@ -124,7 +129,6 @@ def convert_logs(self, worker: WorkerPool) -> None: for split, log_paths in self._log_paths_per_split.items() for log_path in log_paths ] - worker_map( worker, partial( @@ -186,51 +190,28 @@ def convert_av2_log_to_arrow( map_is_local=True, ) - schema_column_list = [ - ("token", pa.string()), - ("timestamp", pa.int64()), - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ("scenario_tag", pa.list_(pa.string())), - ("route_lane_group_ids", pa.list_(pa.int64())), - ] - if data_converter_config.lidar_store_option is not None: - for lidar_type in log_metadata.lidar_metadata.keys(): - if data_converter_config.lidar_store_option == "path": - schema_column_list.append((lidar_type.serialize(), pa.string())) - elif data_converter_config.lidar_store_option == "binary": - raise NotImplementedError("Binary lidar storage is not implemented.") - - if data_converter_config.camera_store_option is not None: - for camera_type in log_metadata.camera_metadata.keys(): - if data_converter_config.camera_store_option == "path": - schema_column_list.append((camera_type.serialize(), pa.string())) - - elif data_converter_config.camera_store_option == "binary": - schema_column_list.append((camera_type.serialize(), pa.binary())) - - schema_column_list.append( - (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) - ) - - recording_schema = pa.schema(schema_column_list) - recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) + log_writer = ArrowLogWriter( + log_path=log_file_path, + data_converter_config=data_converter_config, + log_metadata=log_metadata, + ) _write_recording_table( sensor_df, synchronization_df, - recording_schema, + log_writer, log_file_path, log_path, data_converter_config, ) - del recording_schema + del log_writer gc.collect() + + if SORT_BY_TIMESTAMP: + recording_table = open_arrow_table(log_file_path) + recording_table = recording_table.sort_by([("timestamp", "ascending")]) + write_arrow_table(recording_table, log_file_path) + return [] @@ -279,7 +260,7 @@ def get_av2_lidar_metadata(log_path: Path) -> Dict[LiDARType, LiDARMetadata]: def _write_recording_table( sensor_df: pd.DataFrame, synchronization_df: pd.DataFrame, - recording_schema: pa.schema, + log_writer: ArrowLogWriter, log_file_path: Path, source_log_path: Path, data_converter_config: DataConverterConfig, @@ -304,78 +285,40 @@ def _write_recording_table( else None ) - # with pa.ipc.new_stream(str(log_file_path), recording_schema) as writer: - with pa.OSFile(str(log_file_path), "wb") as sink: - with pa.ipc.new_file(sink, recording_schema) as writer: - - for lidar_timestamp_ns in lidar_timestamps_ns: - - ego_state_se3 = _extract_ego_state(city_se3_egovehicle_df, lidar_timestamp_ns) - ( - detections_state, - detections_velocity, - detections_token, - detections_types, - ) = _extract_box_detections(annotations_df, lidar_timestamp_ns, ego_state_se3) - traffic_light_ids, traffic_light_types = _extract_traffic_lights() - route_lane_group_ids = None # TODO: Add route lane group ids extraction ? - row_data = { - "token": [create_token(str(lidar_timestamp_ns))], - "timestamp": [TimePoint.from_ns(int(lidar_timestamp_ns)).time_us], - "detections_state": [detections_state], - "detections_velocity": [detections_velocity], - "detections_token": [detections_token], - "detections_type": [detections_types], - "ego_states": [ego_state_se3.array.tolist()], - "traffic_light_ids": [traffic_light_ids], - "traffic_light_types": [traffic_light_types], - "scenario_tag": [_extract_scenario_tag()], - "route_lane_group_ids": [route_lane_group_ids], - } - - # TODO: add lidar data - - # if data_converter_config.lidar_store_option is not None: - # lidar_data_dict = _extract_lidar(lidar_pc, data_converter_config) - # for lidar_type, lidar_data in lidar_data_dict.items(): - # if lidar_data is not None: - # row_data[lidar_type.serialize()] = [lidar_data] - # else: - # row_data[lidar_type.serialize()] = [None] - - if data_converter_config.camera_store_option is not None: - camera_data_dict = _extract_camera( - lidar_timestamp_ns, - city_se3_egovehicle_df, - egovehicle_se3_sensor_df, - ego_state_se3, - synchronization_df, - source_log_path, - data_converter_config, - ) - for camera_type, camera_data in camera_data_dict.items(): - if camera_data is not None: - row_data[camera_type.serialize()] = [camera_data[0]] - row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] - else: - row_data[camera_type.serialize()] = [None] - row_data[f"{camera_type.serialize()}_extrinsic"] = [None] - - batch = pa.record_batch(row_data, schema=recording_schema) - writer.write_batch(batch) - del batch, row_data, detections_state, detections_velocity, detections_token, detections_types - - -def _extract_box_detections( + for lidar_timestamp_ns in lidar_timestamps_ns: + + ego_state = _extract_av2_sensor_ego_state(city_se3_egovehicle_df, lidar_timestamp_ns) + log_writer.add_row( + token=create_token(str(lidar_timestamp_ns)), + timestamp=TimePoint.from_ns(int(lidar_timestamp_ns)), + ego_state=ego_state, + box_detections=_extract_av2_sensor_box_detections(annotations_df, lidar_timestamp_ns, ego_state), + traffic_lights=None, # NOTE: Traffic light information is not available in AV2 sensor dataset. + cameras=_extract_av2_sensor_camera( + lidar_timestamp_ns, + egovehicle_se3_sensor_df, + synchronization_df, + source_log_path, + data_converter_config, + ), + lidars=None, + scenario_tags=None, + route_lane_group_ids=None, # NOTE: Route information is not available in AV2 sensor dataset. + ) + + log_writer.close() + + +def _extract_av2_sensor_box_detections( annotations_df: Optional[pd.DataFrame], lidar_timestamp_ns: int, ego_state_se3: EgoStateSE3, -) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: +) -> BoxDetectionWrapper: # TODO: Extract velocity from annotations_df if available. if annotations_df is None: - return [], [], [], [] + return BoxDetectionWrapper(box_detections=[]) annotations_slice = get_slice_with_timestamp_ns(annotations_df, lidar_timestamp_ns) num_detections = len(annotations_slice) @@ -383,7 +326,7 @@ def _extract_box_detections( detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) detections_token: List[str] = annotations_slice["track_uuid"].tolist() - detections_types: List[int] = [] + detections_types: List[DetectionType] = [] for detection_idx, (_, row) in enumerate(annotations_slice.iterrows()): row = row.to_dict() @@ -393,17 +336,32 @@ def _extract_box_detections( detections_state[detection_idx, BoundingBoxSE3Index.EXTENT] = [row["length_m"], row["width_m"], row["height_m"]] av2_detection_type = AV2SensorBoxDetectionType.deserialize(row["category"]) - detections_types.append(int(AV2_TO_DETECTION_TYPE[av2_detection_type])) + detections_types.append(AV2_TO_DETECTION_TYPE[av2_detection_type]) detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( origin=ego_state_se3.rear_axle_se3, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3], ) - return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types + box_detections: List[BoxDetectionSE3] = [] + for detection_idx in range(num_detections): + box_detections.append( + BoxDetectionSE3( + metadata=BoxDetectionMetadata( + detection_type=int(detections_types[detection_idx]), + timepoint=None, + track_token=detections_token[detection_idx], + confidence=None, + ), + bounding_box_se3=BoundingBoxSE3.from_array(detections_state[detection_idx]), + velocity=Vector3D.from_array(detections_velocity[detection_idx]), + ) + ) + + return BoxDetectionWrapper(box_detections=box_detections) -def _extract_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_timestamp_ns: int) -> EgoStateSE3: +def _extract_av2_sensor_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_timestamp_ns: int) -> EgoStateSE3: ego_state_slice = get_slice_with_timestamp_ns(city_se3_egovehicle_df, lidar_timestamp_ns) assert ( len(ego_state_slice) == 1 @@ -425,21 +383,9 @@ def _extract_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_timestamp_ns: # TODO: Add script to calculate the dynamic state from log sequence. dynamic_state = DynamicStateSE3( - velocity=Vector3D( - x=0.0, - y=0.0, - z=0.0, - ), - acceleration=Vector3D( - x=0.0, - y=0.0, - z=0.0, - ), - angular_velocity=Vector3D( - x=0.0, - y=0.0, - z=0.0, - ), + velocity=Vector3D(x=0.0, y=0.0, z=0.0), + acceleration=Vector3D(x=0.0, y=0.0, z=0.0), + angular_velocity=Vector3D(x=0.0, y=0.0, z=0.0), ) return EgoStateSE3( @@ -450,36 +396,20 @@ def _extract_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_timestamp_ns: ) -def _extract_traffic_lights() -> Tuple[List[int], List[int]]: - return [], [] - - -def _extract_scenario_tag() -> List[str]: - return ["unknown"] - - -def _extract_camera( +def _extract_av2_sensor_camera( lidar_timestamp_ns: int, - city_se3_egovehicle_df: pd.DataFrame, egovehicle_se3_sensor_df: pd.DataFrame, - ego_state_se3: EgoStateSE3, synchronization_df: pd.DataFrame, source_log_path: Path, data_converter_config: DataConverterConfig, -) -> Dict[PinholeCameraType, Union[str, bytes]]: +) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: - camera_dict: Dict[PinholeCameraType, Union[str, bytes]] = { - camera_type: None for camera_type in AV2_CAMERA_TYPE_MAPPING.values() - } + camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} split = source_log_path.parent.name log_id = source_log_path.name source_dataset_dir = source_log_path.parent.parent - rear_axle_se3 = ego_state_se3.rear_axle_se3 - ego_transform = rear_axle_se3.transformation_matrix - ego_transform # TODO: Refactor this file, ie. why is the ego transform calculated but not used? - for _, row in egovehicle_se3_sensor_df.iterrows(): row = row.to_dict() if row["sensor_name"] not in AV2_CAMERA_TYPE_MAPPING: @@ -496,9 +426,7 @@ def _extract_camera( target_sensor_name=camera_name, synchronization_df=synchronization_df, ) - if relative_image_path is None: - camera_dict[camera_type] = None - else: + if relative_image_path is not None: absolute_image_path = source_dataset_dir / relative_image_path assert absolute_image_path.exists() @@ -512,12 +440,13 @@ def _extract_camera( qy=row["qy"], qz=row["qz"], ) - + camera_data = None if data_converter_config.camera_store_option == "path": - camera_dict[camera_type] = (str(relative_image_path), camera_extrinsic.tolist()) + camera_data = str(relative_image_path) elif data_converter_config.camera_store_option == "binary": with open(absolute_image_path, "rb") as f: - camera_dict[camera_type] = (f.read(), camera_extrinsic.tolist()) + camera_data = f.read() + camera_dict[camera_type] = camera_data, camera_extrinsic return camera_dict diff --git a/d123/datasets/av2/av2_data_converter_delete.py b/d123/datasets/av2/av2_data_converter_delete.py new file mode 100644 index 00000000..63a5279d --- /dev/null +++ b/d123/datasets/av2/av2_data_converter_delete.py @@ -0,0 +1,527 @@ +import gc +import hashlib +from functools import partial +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import pandas as pd +import pyarrow as pa + +from d123.common.multithreading.worker_utils import WorkerPool, worker_map +from d123.datasets.av2.av2_constants import ( + AV2_CAMERA_TYPE_MAPPING, + AV2_TO_DETECTION_TYPE, + AV2SensorBoxDetectionType, +) +from d123.datasets.av2.av2_helper import ( + build_sensor_dataframe, + build_synchronization_dataframe, + find_closest_target_fpath, + get_slice_with_timestamp_ns, +) +from d123.datasets.av2.av2_map_conversion import convert_av2_map +from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.datatypes.vehicle_state.vehicle_parameters import ( + get_av2_ford_fusion_hybrid_parameters, + rear_axle_se3_to_center_se3, +) +from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.geometry_index import StateSE3Index +from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array + + +def create_token(input_data: str) -> str: + # TODO: Refactor this function. + # TODO: Add a general function to create tokens from arbitrary data. + if isinstance(input_data, str): + input_data = input_data.encode("utf-8") + + hash_obj = hashlib.sha256(input_data) + return hash_obj.hexdigest()[:16] + + +class AV2SensorDataConverter(RawDataConverter): + def __init__( + self, + splits: List[str], + log_path: Union[Path, str], + data_converter_config: DataConverterConfig, + ) -> None: + super().__init__(data_converter_config) + for split in splits: + assert ( + split in self.get_available_splits() + ), f"Split {split} is not available. Available splits: {self.available_splits}" + + self._splits: List[str] = splits + self._data_root: Path = Path(log_path) + self._log_paths_per_split: Dict[str, List[Path]] = self._collect_log_paths() + self._target_dt: float = 0.1 + + def _collect_log_paths(self) -> Dict[str, List[Path]]: + log_paths_per_split: Dict[str, List[Path]] = {} + + for split in self._splits: + subsplit = split.split("_")[-1] + assert subsplit in ["train", "val", "test"] + + if "av2_sensor" in split: + log_folder = self._data_root / "sensor" / subsplit + elif "av2_lidar" in split: + log_folder = self._data_root / "lidar" / subsplit + elif "av2_motion" in split: + log_folder = self._data_root / "motion-forecasting" / subsplit + elif "av2-sensor-mini" in split: + log_folder = self._data_root / "sensor_mini" / subsplit + + log_paths_per_split[split] = list(log_folder.iterdir()) + + return log_paths_per_split + + def get_available_splits(self) -> List[str]: + return [ + "av2-sensor_train", + "av2-sensor_val", + "av2-sensor_test", + "av2-sensor-mini_train", + "av2-sensor-mini_val", + "av2-sensor-mini_test", + ] + + def convert_maps(self, worker: WorkerPool) -> None: + log_args = [ + { + "log_path": log_path, + "split": split, + } + for split, log_paths in self._log_paths_per_split.items() + for log_path in log_paths + ] + worker_map( + worker, + partial(convert_av2_map_to_gpkg, data_converter_config=self.data_converter_config), + log_args, + ) + + def convert_logs(self, worker: WorkerPool) -> None: + log_args = [ + { + "log_path": log_path, + "split": split, + } + for split, log_paths in self._log_paths_per_split.items() + for log_path in log_paths + ] + + worker_map( + worker, + partial( + convert_av2_log_to_arrow, + data_converter_config=self.data_converter_config, + ), + log_args, + ) + + +def convert_av2_map_to_gpkg( + args: List[Dict[str, Union[List[str], List[Path]]]], + data_converter_config: DataConverterConfig, +) -> List[Any]: + for log_info in args: + source_log_path: Path = log_info["log_path"] + split: str = log_info["split"] + + source_log_name = source_log_path.name + + map_path = data_converter_config.output_path / "maps" / split / f"{source_log_name}.gpkg" + if data_converter_config.force_map_conversion or not map_path.exists(): + map_path.unlink(missing_ok=True) + convert_av2_map(source_log_path, map_path) + return [] + + +def convert_av2_log_to_arrow( + args: List[Dict[str, Union[List[str], List[Path]]]], + data_converter_config: DataConverterConfig, +) -> List[Any]: + for log_info in args: + log_path: Path = log_info["log_path"] + split: str = log_info["split"] + + if not log_path.exists(): + raise FileNotFoundError(f"Log path {log_path} does not exist.") + + log_file_path = data_converter_config.output_path / split / f"{log_path.stem}.arrow" + + if data_converter_config.force_log_conversion or not log_file_path.exists(): + log_file_path.unlink(missing_ok=True) + if not log_file_path.parent.exists(): + log_file_path.parent.mkdir(parents=True, exist_ok=True) + + sensor_df = build_sensor_dataframe(log_path) + synchronization_df = build_synchronization_dataframe(sensor_df) + + log_metadata = LogMetadata( + dataset="av2-sensor", + split=split, + log_name=log_path.name, + location=None, # TODO: Add location information. + timestep_seconds=0.1, + vehicle_parameters=get_av2_ford_fusion_hybrid_parameters(), + camera_metadata=get_av2_camera_metadata(log_path), + lidar_metadata=get_av2_lidar_metadata(log_path), + map_has_z=True, + map_is_local=True, + ) + + schema_column_list = [ + ("token", pa.string()), + ("timestamp", pa.int64()), + ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), + ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), + ("detections_token", pa.list_(pa.string())), + ("detections_type", pa.list_(pa.int16())), + ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), + ("traffic_light_ids", pa.list_(pa.int64())), + ("traffic_light_types", pa.list_(pa.int16())), + ("scenario_tag", pa.list_(pa.string())), + ("route_lane_group_ids", pa.list_(pa.int64())), + ] + if data_converter_config.lidar_store_option is not None: + for lidar_type in log_metadata.lidar_metadata.keys(): + if data_converter_config.lidar_store_option == "path": + schema_column_list.append((lidar_type.serialize(), pa.string())) + elif data_converter_config.lidar_store_option == "binary": + raise NotImplementedError("Binary lidar storage is not implemented.") + + if data_converter_config.camera_store_option is not None: + for camera_type in log_metadata.camera_metadata.keys(): + if data_converter_config.camera_store_option == "path": + schema_column_list.append((camera_type.serialize(), pa.string())) + + elif data_converter_config.camera_store_option == "binary": + schema_column_list.append((camera_type.serialize(), pa.binary())) + + schema_column_list.append( + (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) + ) + + recording_schema = pa.schema(schema_column_list) + recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) + + _write_recording_table( + sensor_df, + synchronization_df, + recording_schema, + log_file_path, + log_path, + data_converter_config, + ) + del recording_schema + gc.collect() + return [] + + +def get_av2_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: + + intrinsics_file = log_path / "calibration" / "intrinsics.feather" + intrinsics_df = pd.read_feather(intrinsics_file) + + camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} + for _, row in intrinsics_df.iterrows(): + row = row.to_dict() + camera_type = AV2_CAMERA_TYPE_MAPPING[row["sensor_name"]] + camera_metadata[camera_type] = PinholeCameraMetadata( + camera_type=camera_type, + width=row["width_px"], + height=row["height_px"], + intrinsics=PinholeIntrinsics( + fx=row["fx_px"], + fy=row["fy_px"], + cx=row["cx_px"], + cy=row["cy_px"], + ), + distortion=PinholeDistortion( + k1=row["k1"], + k2=row["k2"], + p1=0.0, + p2=0.0, + k3=row["k3"], + ), + ) + + return camera_metadata + + +def get_av2_lidar_metadata(log_path: Path) -> Dict[LiDARType, LiDARMetadata]: + # metadata: Dict[LiDARType, LiDARMetadata] = {} + # metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( + # lidar_type=LiDARType.LIDAR_MERGED, + # lidar_index=NuplanLidarIndex, + # extrinsic=None, # NOTE: LiDAR extrinsic are unknown + # ) + # return metadata + return {} + + +def _write_recording_table( + sensor_df: pd.DataFrame, + synchronization_df: pd.DataFrame, + recording_schema: pa.schema, + log_file_path: Path, + source_log_path: Path, + data_converter_config: DataConverterConfig, +) -> None: + + # NOTE: Similar to other datasets, we use the lidar timestamps as reference timestamps. + lidar_sensor = sensor_df.xs(key="lidar", level=2) + lidar_timestamps_ns = np.sort([int(idx_tuple[2]) for idx_tuple in lidar_sensor.index]) + + # NOTE: The annotation dataframe is not available for the test split. + annotations_df = ( + pd.read_feather(source_log_path / "annotations.feather") + if (source_log_path / "annotations.feather").exists() + else None + ) + + city_se3_egovehicle_df = pd.read_feather(source_log_path / "city_SE3_egovehicle.feather") + + egovehicle_se3_sensor_df = ( + pd.read_feather(source_log_path / "calibration" / "egovehicle_SE3_sensor.feather") + if data_converter_config.camera_store_option is not None + else None + ) + + # with pa.ipc.new_stream(str(log_file_path), recording_schema) as writer: + with pa.OSFile(str(log_file_path), "wb") as sink: + with pa.ipc.new_file(sink, recording_schema) as writer: + + for lidar_timestamp_ns in lidar_timestamps_ns: + + ego_state_se3 = _extract_ego_state(city_se3_egovehicle_df, lidar_timestamp_ns) + ( + detections_state, + detections_velocity, + detections_token, + detections_types, + ) = _extract_box_detections(annotations_df, lidar_timestamp_ns, ego_state_se3) + traffic_light_ids, traffic_light_types = _extract_traffic_lights() + route_lane_group_ids = None # TODO: Add route lane group ids extraction ? + row_data = { + "token": [create_token(str(lidar_timestamp_ns))], + "timestamp": [TimePoint.from_ns(int(lidar_timestamp_ns)).time_us], + "detections_state": [detections_state], + "detections_velocity": [detections_velocity], + "detections_token": [detections_token], + "detections_type": [detections_types], + "ego_states": [ego_state_se3.array.tolist()], + "traffic_light_ids": [traffic_light_ids], + "traffic_light_types": [traffic_light_types], + "scenario_tag": [_extract_scenario_tag()], + "route_lane_group_ids": [route_lane_group_ids], + } + + # TODO: add lidar data + + # if data_converter_config.lidar_store_option is not None: + # lidar_data_dict = _extract_lidar(lidar_pc, data_converter_config) + # for lidar_type, lidar_data in lidar_data_dict.items(): + # if lidar_data is not None: + # row_data[lidar_type.serialize()] = [lidar_data] + # else: + # row_data[lidar_type.serialize()] = [None] + + if data_converter_config.camera_store_option is not None: + camera_data_dict = _extract_camera( + lidar_timestamp_ns, + city_se3_egovehicle_df, + egovehicle_se3_sensor_df, + ego_state_se3, + synchronization_df, + source_log_path, + data_converter_config, + ) + for camera_type, camera_data in camera_data_dict.items(): + if camera_data is not None: + row_data[camera_type.serialize()] = [camera_data[0]] + row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] + else: + row_data[camera_type.serialize()] = [None] + row_data[f"{camera_type.serialize()}_extrinsic"] = [None] + + batch = pa.record_batch(row_data, schema=recording_schema) + writer.write_batch(batch) + del batch, row_data, detections_state, detections_velocity, detections_token, detections_types + + +def _extract_box_detections( + annotations_df: Optional[pd.DataFrame], + lidar_timestamp_ns: int, + ego_state_se3: EgoStateSE3, +) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: + + # TODO: Extract velocity from annotations_df if available. + + if annotations_df is None: + return [], [], [], [] + + annotations_slice = get_slice_with_timestamp_ns(annotations_df, lidar_timestamp_ns) + num_detections = len(annotations_slice) + + detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) + detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) + detections_token: List[str] = annotations_slice["track_uuid"].tolist() + detections_types: List[int] = [] + + for detection_idx, (_, row) in enumerate(annotations_slice.iterrows()): + row = row.to_dict() + + detections_state[detection_idx, BoundingBoxSE3Index.XYZ] = [row["tx_m"], row["ty_m"], row["tz_m"]] + detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = [row["qw"], row["qx"], row["qy"], row["qz"]] + detections_state[detection_idx, BoundingBoxSE3Index.EXTENT] = [row["length_m"], row["width_m"], row["height_m"]] + + av2_detection_type = AV2SensorBoxDetectionType.deserialize(row["category"]) + detections_types.append(int(AV2_TO_DETECTION_TYPE[av2_detection_type])) + + detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( + origin=ego_state_se3.rear_axle_se3, + se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3], + ) + + return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types + + +def _extract_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_timestamp_ns: int) -> EgoStateSE3: + ego_state_slice = get_slice_with_timestamp_ns(city_se3_egovehicle_df, lidar_timestamp_ns) + assert ( + len(ego_state_slice) == 1 + ), f"Expected exactly one ego state for timestamp {lidar_timestamp_ns}, got {len(ego_state_slice)}." + + ego_pose_dict = ego_state_slice.iloc[0].to_dict() + rear_axle_pose = StateSE3( + x=ego_pose_dict["tx_m"], + y=ego_pose_dict["ty_m"], + z=ego_pose_dict["tz_m"], + qw=ego_pose_dict["qw"], + qx=ego_pose_dict["qx"], + qy=ego_pose_dict["qy"], + qz=ego_pose_dict["qz"], + ) + + vehicle_parameters = get_av2_ford_fusion_hybrid_parameters() + center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) + + # TODO: Add script to calculate the dynamic state from log sequence. + dynamic_state = DynamicStateSE3( + velocity=Vector3D( + x=0.0, + y=0.0, + z=0.0, + ), + acceleration=Vector3D( + x=0.0, + y=0.0, + z=0.0, + ), + angular_velocity=Vector3D( + x=0.0, + y=0.0, + z=0.0, + ), + ) + + return EgoStateSE3( + center_se3=center, + dynamic_state_se3=dynamic_state, + vehicle_parameters=vehicle_parameters, + timepoint=None, + ) + + +def _extract_traffic_lights() -> Tuple[List[int], List[int]]: + return [], [] + + +def _extract_scenario_tag() -> List[str]: + return ["unknown"] + + +def _extract_camera( + lidar_timestamp_ns: int, + city_se3_egovehicle_df: pd.DataFrame, + egovehicle_se3_sensor_df: pd.DataFrame, + ego_state_se3: EgoStateSE3, + synchronization_df: pd.DataFrame, + source_log_path: Path, + data_converter_config: DataConverterConfig, +) -> Dict[PinholeCameraType, Union[str, bytes]]: + + camera_dict: Dict[PinholeCameraType, Union[str, bytes]] = { + camera_type: None for camera_type in AV2_CAMERA_TYPE_MAPPING.values() + } + split = source_log_path.parent.name + log_id = source_log_path.name + + source_dataset_dir = source_log_path.parent.parent + + rear_axle_se3 = ego_state_se3.rear_axle_se3 + ego_transform = rear_axle_se3.transformation_matrix + ego_transform # TODO: Refactor this file, ie. why is the ego transform calculated but not used? + + for _, row in egovehicle_se3_sensor_df.iterrows(): + row = row.to_dict() + if row["sensor_name"] not in AV2_CAMERA_TYPE_MAPPING: + continue + + camera_name = row["sensor_name"] + camera_type = AV2_CAMERA_TYPE_MAPPING[camera_name] + + relative_image_path = find_closest_target_fpath( + split=split, + log_id=log_id, + src_sensor_name="lidar", + src_timestamp_ns=lidar_timestamp_ns, + target_sensor_name=camera_name, + synchronization_df=synchronization_df, + ) + if relative_image_path is None: + camera_dict[camera_type] = None + else: + absolute_image_path = source_dataset_dir / relative_image_path + assert absolute_image_path.exists() + + # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. + camera_extrinsic = StateSE3( + x=row["tx_m"], + y=row["ty_m"], + z=row["tz_m"], + qw=row["qw"], + qx=row["qx"], + qy=row["qy"], + qz=row["qz"], + ) + + if data_converter_config.camera_store_option == "path": + camera_dict[camera_type] = (str(relative_image_path), camera_extrinsic.tolist()) + elif data_converter_config.camera_store_option == "binary": + with open(absolute_image_path, "rb") as f: + camera_dict[camera_type] = (f.read(), camera_extrinsic.tolist()) + + return camera_dict + + +def _extract_lidar(lidar_pc, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: + # TODO: Implement this function to extract lidar data. + return {} diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py index e0029dd2..7beb3f73 100644 --- a/d123/datasets/nuplan/nuplan_data_converter.py +++ b/d123/datasets/nuplan/nuplan_data_converter.py @@ -3,22 +3,27 @@ import pickle from functools import partial from pathlib import Path -from typing import Any, Dict, Final, List, Literal, Optional, Tuple, Union +from typing import Any, Dict, Final, List, Optional, Tuple, Union import numpy as np -import pyarrow as pa import yaml import d123.datasets.nuplan.utils as nuplan_utils from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table from d123.common.utils.dependencies import check_dependencies from d123.datasets.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datasets.utils.arrow_ipc_writer import ArrowLogWriter from d123.datasets.utils.sensor.lidar_index_registry import NuplanLidarIndex -from d123.datatypes.detections.detection import TrafficLightStatus +from d123.datatypes.detections.detection import ( + BoxDetectionMetadata, + BoxDetectionSE3, + BoxDetectionWrapper, + TrafficLightDetection, + TrafficLightDetectionWrapper, + TrafficLightStatus, +) from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, @@ -28,13 +33,12 @@ ) from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from d123.datatypes.vehicle_state.vehicle_parameters import ( get_nuplan_chrysler_pacifica_parameters, rear_axle_se3_to_center_se3, ) -from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.geometry_index import StateSE3Index +from d123.geometry import BoundingBoxSE3, StateSE3, Vector3D from d123.geometry.rotation import EulerAngles from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL @@ -191,10 +195,10 @@ def convert_nuplan_log_to_arrow( if not log_path.exists(): raise FileNotFoundError(f"Log path {log_path} does not exist.") - log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None) log_file_path = data_converter_config.output_path / split / f"{log_path.stem}.arrow" if data_converter_config.force_log_conversion or not log_file_path.exists(): + log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None) log_file_path.unlink(missing_ok=True) if not log_file_path.parent.exists(): @@ -213,45 +217,18 @@ def convert_nuplan_log_to_arrow( map_is_local=False, ) - schema_column_list = [ - ("token", pa.string()), - ("timestamp", pa.int64()), - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ("scenario_tag", pa.list_(pa.string())), - ("route_lane_group_ids", pa.list_(pa.int64())), - ] - if data_converter_config.lidar_store_option is not None: - for lidar_type in log_metadata.lidar_metadata.keys(): - if data_converter_config.lidar_store_option == "path": - schema_column_list.append((lidar_type.serialize(), pa.string())) - elif data_converter_config.lidar_store_option == "binary": - raise NotImplementedError("Binary lidar storage is not implemented.") - - if data_converter_config.camera_store_option is not None: - for camera_type in log_metadata.camera_metadata.keys(): - if data_converter_config.camera_store_option == "path": - schema_column_list.append((camera_type.serialize(), pa.string())) - schema_column_list.append( - (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) - ) - - elif data_converter_config.camera_store_option == "binary": - raise NotImplementedError("Binary camera storage is not implemented.") - - recording_schema = pa.schema(schema_column_list) - recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) - _write_recording_table(log_db, recording_schema, log_file_path, log_path, data_converter_config) + log_writer: ArrowLogWriter = ArrowLogWriter( + log_path=log_file_path, + data_converter_config=data_converter_config, + log_metadata=log_metadata, + ) + + _write_recording_table(log_db, log_writer, log_file_path, log_path, data_converter_config) # Detach and remove log_db, for memory management log_db.detach_tables() log_db.remove_ref() - del recording_schema, log_db + del log_writer, log_db gc.collect() return [] @@ -262,10 +239,10 @@ def get_nuplan_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, Pinhol def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadata: cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0] - intrinsics_camera_matrix = np.array(pickle.loads(cam.intrinsic)) # array of shape (3, 3) + intrinsics_camera_matrix = np.array(pickle.loads(cam.intrinsic), dtype=np.float64) # array of shape (3, 3) intrinsic = PinholeIntrinsics.from_camera_matrix(intrinsics_camera_matrix) - distortion_array = np.array(pickle.loads(cam.distortion)) # array of shape (5,) + distortion_array = np.array(pickle.loads(cam.distortion), dtype=np.float64) # array of shape (5,) distortion = PinholeDistortion.from_array(distortion_array, copy=False) return PinholeCameraMetadata( @@ -295,103 +272,37 @@ def get_nuplan_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: def _write_recording_table( log_db: NuPlanDB, - recording_schema: pa.schema, + log_writer: ArrowLogWriter, log_file_path: Path, source_log_path: Path, data_converter_config: DataConverterConfig, ) -> None: - compression: Optional[Literal["lz4", "zstd"]] = "zstd" - options = pa.ipc.IpcWriteOptions(compression=compression) - - with pa.OSFile(str(log_file_path), "wb") as sink: - with pa.ipc.new_file(sink, recording_schema, options=options) as writer: - step_interval: float = int(TARGET_DT / NUPLAN_DT) - for lidar_pc in log_db.lidar_pc[::step_interval]: - lidar_pc_token: str = lidar_pc.token - ( - detections_state, - detections_velocity, - detections_token, - detections_types, - ) = _extract_detections(lidar_pc) - traffic_light_ids, traffic_light_types = _extract_traffic_lights(log_db, lidar_pc_token) - route_lane_group_ids = [ - int(roadblock_id) - for roadblock_id in str(lidar_pc.scene.roadblock_ids).split(" ") - if len(roadblock_id) > 0 - ] - - row_data = { - "token": [lidar_pc_token], - "timestamp": [lidar_pc.timestamp], - "detections_state": [detections_state], - "detections_velocity": [detections_velocity], - "detections_token": [detections_token], - "detections_type": [detections_types], - "ego_states": [_extract_ego_state(lidar_pc)], - "traffic_light_ids": [traffic_light_ids], - "traffic_light_types": [traffic_light_types], - "scenario_tag": [_extract_scenario_tag(log_db, lidar_pc_token)], - "route_lane_group_ids": [route_lane_group_ids], - } - - if data_converter_config.lidar_store_option is not None: - lidar_data_dict = _extract_lidar(lidar_pc, data_converter_config) - for lidar_type, lidar_data in lidar_data_dict.items(): - if lidar_data is not None: - row_data[lidar_type.serialize()] = [lidar_data] - else: - row_data[lidar_type.serialize()] = [None] - - if data_converter_config.camera_store_option is not None: - camera_data_dict = _extract_camera(log_db, lidar_pc, source_log_path, data_converter_config) - for camera_type, camera_data in camera_data_dict.items(): - if camera_data is not None: - row_data[camera_type.serialize()] = [camera_data[0]] - row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] - else: - row_data[camera_type.serialize()] = [None] - row_data[f"{camera_type.serialize()}_extrinsic"] = [None] - - batch = pa.record_batch(row_data, schema=recording_schema) - writer.write_batch(batch) - del batch, row_data, detections_state, detections_velocity, detections_token, detections_types - - if SORT_BY_TIMESTAMP: - recording_table = open_arrow_table(log_file_path) - recording_table = recording_table.sort_by([("timestamp", "ascending")]) - write_arrow_table(recording_table, log_file_path) - - -def _extract_detections(lidar_pc: LidarPc) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: - detections_state: List[List[float]] = [] - detections_velocity: List[List[float]] = [] - detections_token: List[str] = [] - detections_types: List[int] = [] - for lidar_box in lidar_pc.lidar_boxes: - lidar_box: LidarBox - lidar_quaternion = EulerAngles(roll=DEFAULT_ROLL, pitch=DEFAULT_PITCH, yaw=lidar_box.yaw).quaternion - center = StateSE3( - x=lidar_box.x, - y=lidar_box.y, - z=lidar_box.z, - qw=lidar_quaternion.qw, - qx=lidar_quaternion.qx, - qy=lidar_quaternion.qy, - qz=lidar_quaternion.qz, + step_interval: float = int(TARGET_DT / NUPLAN_DT) + for lidar_pc in log_db.lidar_pc[::step_interval]: + lidar_pc_token: str = lidar_pc.token + + log_writer.add_row( + token=lidar_pc_token, + timestamp=TimePoint.from_us(lidar_pc.timestamp), + ego_state=_extract_nuplan_ego_state(lidar_pc), + box_detections=_extract_nuplan_box_detections(lidar_pc), + traffic_lights=_extract_nuplan_traffic_lights(log_db, lidar_pc_token), + cameras=_extract_nuplan_cameras( + log_db=log_db, + lidar_pc=lidar_pc, + source_log_path=source_log_path, + data_converter_config=data_converter_config, + ), + lidars=_extract_nuplan_lidars(lidar_pc, data_converter_config), + scenario_tags=_extract_nuplan_scenario_tag(log_db, lidar_pc_token), + route_lane_group_ids=_extract_nuplan_route_lane_group_ids(lidar_pc), ) - bounding_box_se3 = BoundingBoxSE3(center, lidar_box.length, lidar_box.width, lidar_box.height) - detections_state.append(bounding_box_se3.tolist()) - detections_velocity.append(lidar_box.velocity) - detections_token.append(lidar_box.track_token) - detections_types.append(int(NUPLAN_DETECTION_NAME_DICT[lidar_box.category.name])) + log_writer.close() - return detections_state, detections_velocity, detections_token, detections_types - -def _extract_ego_state(lidar_pc: LidarPc) -> List[float]: +def _extract_nuplan_ego_state(lidar_pc: LidarPc) -> EgoStateSE3: vehicle_parameters = get_nuplan_chrysler_pacifica_parameters() rear_axle_pose = StateSE3( @@ -425,35 +336,63 @@ def _extract_ego_state(lidar_pc: LidarPc) -> List[float]: center_se3=center, dynamic_state_se3=dynamic_state, vehicle_parameters=vehicle_parameters, - timepoint=None, - ).array.tolist() + timepoint=None, # NOTE: Timepoint is not needed during writing, set to None + ) -def _extract_traffic_lights(log_db: NuPlanDB, lidar_pc_token: str) -> Tuple[List[int], List[int]]: - traffic_light_ids: List[int] = [] - traffic_light_types: List[int] = [] - traffic_lights = log_db.traffic_light_status.select_many(lidar_pc_token=lidar_pc_token) - for traffic_light in traffic_lights: - traffic_light_ids.append(int(traffic_light.lane_connector_id)) - traffic_light_types.append(int(NUPLAN_TRAFFIC_STATUS_DICT[traffic_light.status].value)) - return traffic_light_ids, traffic_light_types +def _extract_nuplan_box_detections(lidar_pc: LidarPc) -> BoxDetectionWrapper: + box_detections: List[BoxDetectionSE3] = [] + for lidar_box in lidar_pc.lidar_boxes: + lidar_box: LidarBox -def _extract_scenario_tag(log_db: NuPlanDB, lidar_pc_token: str) -> List[str]: - scenario_tags = [ - scenario_tag.type for scenario_tag in log_db.scenario_tag.select_many(lidar_pc_token=lidar_pc_token) + box_quaternion = EulerAngles(roll=DEFAULT_ROLL, pitch=DEFAULT_PITCH, yaw=lidar_box.yaw).quaternion + box_center = StateSE3( + x=lidar_box.x, + y=lidar_box.y, + z=lidar_box.z, + qw=box_quaternion.qw, + qx=box_quaternion.qx, + qy=box_quaternion.qy, + qz=box_quaternion.qz, + ) + bounding_box_se3 = BoundingBoxSE3(box_center, lidar_box.length, lidar_box.width, lidar_box.height) + box_detections.append( + BoxDetectionSE3( + metadata=BoxDetectionMetadata( + detection_type=NUPLAN_DETECTION_NAME_DICT[lidar_box.category.name], + timepoint=None, # NOTE: Timepoint is not needed during writing, set to None + track_token=lidar_box.token, + confidence=None, # NOTE: Not currently written, requires refactoring + ), + bounding_box_se3=bounding_box_se3, + velocity=Vector3D(x=lidar_box.vx, y=lidar_box.vy, z=lidar_box.vz), + ) + ) + + return BoxDetectionWrapper(box_detections=box_detections) + + +def _extract_nuplan_traffic_lights(log_db: NuPlanDB, lidar_pc_token: str) -> TrafficLightDetectionWrapper: + + traffic_lights_detections: List[TrafficLightDetection] = [ + TrafficLightDetection( + timepoint=None, # NOTE: Timepoint is not needed during writing, set to None + lane_id=int(traffic_light.lane_connector_id), + status=NUPLAN_TRAFFIC_STATUS_DICT[traffic_light.status], + ) + for traffic_light in log_db.traffic_light_status.select_many(lidar_pc_token=lidar_pc_token) ] - if len(scenario_tags) == 0: - scenario_tags = ["unknown"] - return scenario_tags + return TrafficLightDetectionWrapper(traffic_light_detections=traffic_lights_detections) -def _extract_camera( + +def _extract_nuplan_cameras( log_db: NuPlanDB, lidar_pc: LidarPc, source_log_path: Path, data_converter_config: DataConverterConfig, -) -> Dict[PinholeCameraType, Union[str, bytes]]: +) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: camera_dict: Dict[str, Union[str, bytes]] = {} sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" @@ -461,8 +400,8 @@ def _extract_camera( for camera_type, camera_channel in NUPLAN_CAMERA_TYPES.items(): camera_data: Optional[Union[str, bytes]] = None - c2e: Optional[List[float]] = None image_class = list(get_images_from_lidar_tokens(source_log_path, [lidar_pc.token], [str(camera_channel.value)])) + if len(image_class) != 0: image = image_class[0] filename_jpg = sensor_root / image.filename_jpg @@ -485,17 +424,19 @@ def _extract_camera( extrinsic = StateSE3.from_transformation_matrix(c2e) if data_converter_config.camera_store_option == "path": - camera_data = str(filename_jpg), extrinsic.tolist() + camera_data = str(filename_jpg) elif data_converter_config.camera_store_option == "binary": with open(filename_jpg, "rb") as f: - camera_data = f.read(), extrinsic.tolist() + camera_data = f.read() - camera_dict[camera_type] = camera_data + camera_dict[camera_type] = camera_data, extrinsic return camera_dict -def _extract_lidar(lidar_pc: LidarPc, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: +def _extract_nuplan_lidars( + lidar_pc: LidarPc, data_converter_config: DataConverterConfig +) -> Dict[LiDARType, Optional[str]]: lidar: Optional[str] = None lidar_full_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" / lidar_pc.filename @@ -503,3 +444,16 @@ def _extract_lidar(lidar_pc: LidarPc, data_converter_config: DataConverterConfig lidar = lidar_pc.filename return {LiDARType.LIDAR_MERGED: lidar} + + +def _extract_nuplan_scenario_tag(log_db: NuPlanDB, lidar_pc_token: str) -> List[str]: + scenario_tags = [ + scenario_tag.type for scenario_tag in log_db.scenario_tag.select_many(lidar_pc_token=lidar_pc_token) + ] + if len(scenario_tags) == 0: + scenario_tags = ["unknown"] + return scenario_tags + + +def _extract_nuplan_route_lane_group_ids(lidar_pc: LidarPc) -> List[int]: + return [int(roadblock_id) for roadblock_id in str(lidar_pc.scene.roadblock_ids).split(" ") if len(roadblock_id) > 0] diff --git a/d123/datasets/nuplan/nuplan_data_converter_delete.py b/d123/datasets/nuplan/nuplan_data_converter_delete.py new file mode 100644 index 00000000..e0029dd2 --- /dev/null +++ b/d123/datasets/nuplan/nuplan_data_converter_delete.py @@ -0,0 +1,505 @@ +import gc +import os +import pickle +from functools import partial +from pathlib import Path +from typing import Any, Dict, Final, List, Literal, Optional, Tuple, Union + +import numpy as np +import pyarrow as pa +import yaml + +import d123.datasets.nuplan.utils as nuplan_utils +from d123.common.multithreading.worker_utils import WorkerPool, worker_map +from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table +from d123.common.utils.dependencies import check_dependencies +from d123.datasets.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter +from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datasets.utils.sensor.lidar_index_registry import NuplanLidarIndex +from d123.datatypes.detections.detection import TrafficLightStatus +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.datatypes.vehicle_state.vehicle_parameters import ( + get_nuplan_chrysler_pacifica_parameters, + rear_axle_se3_to_center_se3, +) +from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.geometry_index import StateSE3Index +from d123.geometry.rotation import EulerAngles +from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL + +check_dependencies(["nuplan", "sqlalchemy"], "nuplan") +from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens +from nuplan.database.nuplan_db_orm.ego_pose import EgoPose +from nuplan.database.nuplan_db_orm.lidar_box import LidarBox +from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc +from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB +from nuplan.planning.simulation.observation.observation_type import CameraChannel +from sqlalchemy import func + +TARGET_DT: Final[float] = 0.1 +NUPLAN_DT: Final[float] = 0.05 +SORT_BY_TIMESTAMP: Final[bool] = True + +NUPLAN_TRAFFIC_STATUS_DICT: Final[Dict[str, TrafficLightStatus]] = { + "green": TrafficLightStatus.GREEN, + "red": TrafficLightStatus.RED, + "unknown": TrafficLightStatus.UNKNOWN, +} +NUPLAN_DETECTION_NAME_DICT = { + "vehicle": DetectionType.VEHICLE, + "bicycle": DetectionType.BICYCLE, + "pedestrian": DetectionType.PEDESTRIAN, + "traffic_cone": DetectionType.TRAFFIC_CONE, + "barrier": DetectionType.BARRIER, + "czone_sign": DetectionType.CZONE_SIGN, + "generic_object": DetectionType.GENERIC_OBJECT, +} + +NUPLAN_CAMERA_TYPES = { + PinholeCameraType.CAM_F0: CameraChannel.CAM_F0, + PinholeCameraType.CAM_B0: CameraChannel.CAM_B0, + PinholeCameraType.CAM_L0: CameraChannel.CAM_L0, + PinholeCameraType.CAM_L1: CameraChannel.CAM_L1, + PinholeCameraType.CAM_L2: CameraChannel.CAM_L2, + PinholeCameraType.CAM_R0: CameraChannel.CAM_R0, + PinholeCameraType.CAM_R1: CameraChannel.CAM_R1, + PinholeCameraType.CAM_R2: CameraChannel.CAM_R2, +} + +NUPLAN_DATA_ROOT = Path(os.environ["NUPLAN_DATA_ROOT"]) +NUPLAN_ROLLING_SHUTTER_S: Final[TimePoint] = TimePoint.from_s(1 / 60) + + +def create_splits_logs() -> Dict[str, List[str]]: + yaml_filepath = Path(nuplan_utils.__path__[0]) / "log_splits.yaml" + with open(yaml_filepath, "r") as stream: + splits = yaml.safe_load(stream) + + return splits["log_splits"] + + +class NuplanDataConverter(RawDataConverter): + def __init__( + self, + splits: List[str], + log_path: Union[Path, str], + data_converter_config: DataConverterConfig, + ) -> None: + super().__init__(data_converter_config) + for split in splits: + assert ( + split in self.get_available_splits() + ), f"Split {split} is not available. Available splits: {self.available_splits}" + + self._splits: List[str] = splits + self._log_path: Path = Path(log_path) + self._log_paths_per_split: Dict[str, List[Path]] = self._collect_log_paths() + self._target_dt: float = 0.1 + + def _collect_log_paths(self) -> Dict[str, List[Path]]: + # NOTE: the nuplan mini folder has an internal train, val, test structure, all stored in "mini". + # The complete dataset is saved in the "trainval" folder (train and val), or in the "test" folder (for test). + # subsplit_log_names: Dict[str, List[str]] = create_splits_logs() + log_paths_per_split: Dict[str, List[Path]] = {} + + for split in self._splits: + subsplit = split.split("_")[-1] + assert subsplit in ["train", "val", "test"] + if split in ["nuplan_train", "nuplan_val"]: + log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "trainval" + elif split in ["nuplan_test"]: + log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "test" + elif split in ["nuplan_mini_train", "nuplan_mini_val", "nuplan_mini_test"]: + log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "mini" + elif split == "nuplan_private_test": + log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "private_test" + + all_log_files_in_path = [log_file for log_file in log_path.glob("*.db")] + all_log_names = set([str(log_file.stem) for log_file in all_log_files_in_path]) + # set(subsplit_log_names[subsplit]) + # log_paths = [log_path / f"{log_name}.db" for log_name in list(all_log_names & split_log_names)] + log_paths = [log_path / f"{log_name}.db" for log_name in list(all_log_names)] + log_paths_per_split[split] = log_paths + + return log_paths_per_split + + def get_available_splits(self) -> List[str]: + return [ + "nuplan_train", + "nuplan_val", + "nuplan_test", + "nuplan_mini_train", + "nuplan_mini_val", + "nuplan_mini_test", + "nuplan_private_test", # TODO: remove, not publicly available + ] + + def convert_maps(self, worker: WorkerPool) -> None: + worker_map( + worker, + partial(convert_nuplan_map_to_gpkg, data_converter_config=self.data_converter_config), + list(MAP_LOCATIONS), + ) + + def convert_logs(self, worker: WorkerPool) -> None: + log_args = [ + { + "log_path": log_path, + "split": split, + } + for split, log_paths in self._log_paths_per_split.items() + for log_path in log_paths + ] + + worker_map( + worker, + partial( + convert_nuplan_log_to_arrow, + data_converter_config=self.data_converter_config, + ), + log_args, + ) + + +def convert_nuplan_map_to_gpkg(map_names: List[str], data_converter_config: DataConverterConfig) -> List[Any]: + for map_name in map_names: + map_path = data_converter_config.output_path / "maps" / f"nuplan_{map_name}.gpkg" + if data_converter_config.force_map_conversion or not map_path.exists(): + map_path.unlink(missing_ok=True) + NuPlanMapConverter(data_converter_config.output_path / "maps").convert(map_name=map_name) + return [] + + +def convert_nuplan_log_to_arrow( + args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig +) -> List[Any]: + for log_info in args: + log_path: Path = log_info["log_path"] + split: str = log_info["split"] + + if not log_path.exists(): + raise FileNotFoundError(f"Log path {log_path} does not exist.") + + log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None) + log_file_path = data_converter_config.output_path / split / f"{log_path.stem}.arrow" + + if data_converter_config.force_log_conversion or not log_file_path.exists(): + + log_file_path.unlink(missing_ok=True) + if not log_file_path.parent.exists(): + log_file_path.parent.mkdir(parents=True, exist_ok=True) + + log_metadata = LogMetadata( + dataset="nuplan", + split=split, + log_name=log_db.log_name, + location=log_db.log.map_version, + timestep_seconds=TARGET_DT, + vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(), + camera_metadata=get_nuplan_camera_metadata(log_path), + lidar_metadata=get_nuplan_lidar_metadata(), + map_has_z=False, + map_is_local=False, + ) + + schema_column_list = [ + ("token", pa.string()), + ("timestamp", pa.int64()), + ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), + ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), + ("detections_token", pa.list_(pa.string())), + ("detections_type", pa.list_(pa.int16())), + ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), + ("traffic_light_ids", pa.list_(pa.int64())), + ("traffic_light_types", pa.list_(pa.int16())), + ("scenario_tag", pa.list_(pa.string())), + ("route_lane_group_ids", pa.list_(pa.int64())), + ] + if data_converter_config.lidar_store_option is not None: + for lidar_type in log_metadata.lidar_metadata.keys(): + if data_converter_config.lidar_store_option == "path": + schema_column_list.append((lidar_type.serialize(), pa.string())) + elif data_converter_config.lidar_store_option == "binary": + raise NotImplementedError("Binary lidar storage is not implemented.") + + if data_converter_config.camera_store_option is not None: + for camera_type in log_metadata.camera_metadata.keys(): + if data_converter_config.camera_store_option == "path": + schema_column_list.append((camera_type.serialize(), pa.string())) + schema_column_list.append( + (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) + ) + + elif data_converter_config.camera_store_option == "binary": + raise NotImplementedError("Binary camera storage is not implemented.") + + recording_schema = pa.schema(schema_column_list) + recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) + _write_recording_table(log_db, recording_schema, log_file_path, log_path, data_converter_config) + + # Detach and remove log_db, for memory management + log_db.detach_tables() + log_db.remove_ref() + del recording_schema, log_db + gc.collect() + + return [] + + +def get_nuplan_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: + + def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadata: + cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0] + + intrinsics_camera_matrix = np.array(pickle.loads(cam.intrinsic)) # array of shape (3, 3) + intrinsic = PinholeIntrinsics.from_camera_matrix(intrinsics_camera_matrix) + + distortion_array = np.array(pickle.loads(cam.distortion)) # array of shape (5,) + distortion = PinholeDistortion.from_array(distortion_array, copy=False) + + return PinholeCameraMetadata( + camera_type=camera_type, + width=cam.width, + height=cam.height, + intrinsics=intrinsic, + distortion=distortion, + ) + + log_cam_infos: Dict[str, PinholeCameraMetadata] = {} + for camera_type in NUPLAN_CAMERA_TYPES.keys(): + log_cam_infos[camera_type] = _get_camera_metadata(camera_type) + + return log_cam_infos + + +def get_nuplan_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: + metadata: Dict[LiDARType, LiDARMetadata] = {} + metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( + lidar_type=LiDARType.LIDAR_MERGED, + lidar_index=NuplanLidarIndex, + extrinsic=None, # NOTE: LiDAR extrinsic are unknown + ) + return metadata + + +def _write_recording_table( + log_db: NuPlanDB, + recording_schema: pa.schema, + log_file_path: Path, + source_log_path: Path, + data_converter_config: DataConverterConfig, +) -> None: + compression: Optional[Literal["lz4", "zstd"]] = "zstd" + options = pa.ipc.IpcWriteOptions(compression=compression) + + with pa.OSFile(str(log_file_path), "wb") as sink: + with pa.ipc.new_file(sink, recording_schema, options=options) as writer: + step_interval: float = int(TARGET_DT / NUPLAN_DT) + for lidar_pc in log_db.lidar_pc[::step_interval]: + lidar_pc_token: str = lidar_pc.token + ( + detections_state, + detections_velocity, + detections_token, + detections_types, + ) = _extract_detections(lidar_pc) + traffic_light_ids, traffic_light_types = _extract_traffic_lights(log_db, lidar_pc_token) + route_lane_group_ids = [ + int(roadblock_id) + for roadblock_id in str(lidar_pc.scene.roadblock_ids).split(" ") + if len(roadblock_id) > 0 + ] + + row_data = { + "token": [lidar_pc_token], + "timestamp": [lidar_pc.timestamp], + "detections_state": [detections_state], + "detections_velocity": [detections_velocity], + "detections_token": [detections_token], + "detections_type": [detections_types], + "ego_states": [_extract_ego_state(lidar_pc)], + "traffic_light_ids": [traffic_light_ids], + "traffic_light_types": [traffic_light_types], + "scenario_tag": [_extract_scenario_tag(log_db, lidar_pc_token)], + "route_lane_group_ids": [route_lane_group_ids], + } + + if data_converter_config.lidar_store_option is not None: + lidar_data_dict = _extract_lidar(lidar_pc, data_converter_config) + for lidar_type, lidar_data in lidar_data_dict.items(): + if lidar_data is not None: + row_data[lidar_type.serialize()] = [lidar_data] + else: + row_data[lidar_type.serialize()] = [None] + + if data_converter_config.camera_store_option is not None: + camera_data_dict = _extract_camera(log_db, lidar_pc, source_log_path, data_converter_config) + for camera_type, camera_data in camera_data_dict.items(): + if camera_data is not None: + row_data[camera_type.serialize()] = [camera_data[0]] + row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] + else: + row_data[camera_type.serialize()] = [None] + row_data[f"{camera_type.serialize()}_extrinsic"] = [None] + + batch = pa.record_batch(row_data, schema=recording_schema) + writer.write_batch(batch) + del batch, row_data, detections_state, detections_velocity, detections_token, detections_types + + if SORT_BY_TIMESTAMP: + recording_table = open_arrow_table(log_file_path) + recording_table = recording_table.sort_by([("timestamp", "ascending")]) + write_arrow_table(recording_table, log_file_path) + + +def _extract_detections(lidar_pc: LidarPc) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: + detections_state: List[List[float]] = [] + detections_velocity: List[List[float]] = [] + detections_token: List[str] = [] + detections_types: List[int] = [] + + for lidar_box in lidar_pc.lidar_boxes: + lidar_box: LidarBox + lidar_quaternion = EulerAngles(roll=DEFAULT_ROLL, pitch=DEFAULT_PITCH, yaw=lidar_box.yaw).quaternion + center = StateSE3( + x=lidar_box.x, + y=lidar_box.y, + z=lidar_box.z, + qw=lidar_quaternion.qw, + qx=lidar_quaternion.qx, + qy=lidar_quaternion.qy, + qz=lidar_quaternion.qz, + ) + bounding_box_se3 = BoundingBoxSE3(center, lidar_box.length, lidar_box.width, lidar_box.height) + + detections_state.append(bounding_box_se3.tolist()) + detections_velocity.append(lidar_box.velocity) + detections_token.append(lidar_box.track_token) + detections_types.append(int(NUPLAN_DETECTION_NAME_DICT[lidar_box.category.name])) + + return detections_state, detections_velocity, detections_token, detections_types + + +def _extract_ego_state(lidar_pc: LidarPc) -> List[float]: + + vehicle_parameters = get_nuplan_chrysler_pacifica_parameters() + rear_axle_pose = StateSE3( + x=lidar_pc.ego_pose.x, + y=lidar_pc.ego_pose.y, + z=lidar_pc.ego_pose.z, + qw=lidar_pc.ego_pose.qw, + qx=lidar_pc.ego_pose.qx, + qy=lidar_pc.ego_pose.qy, + qz=lidar_pc.ego_pose.qz, + ) + center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) + dynamic_state = DynamicStateSE3( + velocity=Vector3D( + x=lidar_pc.ego_pose.vx, + y=lidar_pc.ego_pose.vy, + z=lidar_pc.ego_pose.vz, + ), + acceleration=Vector3D( + x=lidar_pc.ego_pose.acceleration_x, + y=lidar_pc.ego_pose.acceleration_y, + z=lidar_pc.ego_pose.acceleration_z, + ), + angular_velocity=Vector3D( + x=lidar_pc.ego_pose.angular_rate_x, + y=lidar_pc.ego_pose.angular_rate_y, + z=lidar_pc.ego_pose.angular_rate_z, + ), + ) + return EgoStateSE3( + center_se3=center, + dynamic_state_se3=dynamic_state, + vehicle_parameters=vehicle_parameters, + timepoint=None, + ).array.tolist() + + +def _extract_traffic_lights(log_db: NuPlanDB, lidar_pc_token: str) -> Tuple[List[int], List[int]]: + traffic_light_ids: List[int] = [] + traffic_light_types: List[int] = [] + traffic_lights = log_db.traffic_light_status.select_many(lidar_pc_token=lidar_pc_token) + for traffic_light in traffic_lights: + traffic_light_ids.append(int(traffic_light.lane_connector_id)) + traffic_light_types.append(int(NUPLAN_TRAFFIC_STATUS_DICT[traffic_light.status].value)) + return traffic_light_ids, traffic_light_types + + +def _extract_scenario_tag(log_db: NuPlanDB, lidar_pc_token: str) -> List[str]: + scenario_tags = [ + scenario_tag.type for scenario_tag in log_db.scenario_tag.select_many(lidar_pc_token=lidar_pc_token) + ] + if len(scenario_tags) == 0: + scenario_tags = ["unknown"] + return scenario_tags + + +def _extract_camera( + log_db: NuPlanDB, + lidar_pc: LidarPc, + source_log_path: Path, + data_converter_config: DataConverterConfig, +) -> Dict[PinholeCameraType, Union[str, bytes]]: + + camera_dict: Dict[str, Union[str, bytes]] = {} + sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" + log_cam_infos = {camera.token: camera for camera in log_db.log.cameras} + + for camera_type, camera_channel in NUPLAN_CAMERA_TYPES.items(): + camera_data: Optional[Union[str, bytes]] = None + c2e: Optional[List[float]] = None + image_class = list(get_images_from_lidar_tokens(source_log_path, [lidar_pc.token], [str(camera_channel.value)])) + if len(image_class) != 0: + image = image_class[0] + filename_jpg = sensor_root / image.filename_jpg + if filename_jpg.exists(): + + # Code taken from MTGS + # https://github.com/OpenDriveLab/MTGS/blob/main/nuplan_scripts/utils/nuplan_utils_custom.py#L117 + # TODO: Refactor + timestamp = image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us + img_ego_pose: EgoPose = ( + log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first() + ) + img_e2g = img_ego_pose.trans_matrix + g2e = lidar_pc.ego_pose.trans_matrix_inv + img_e2e = g2e @ img_e2g + cam_info = log_cam_infos[image.camera_token] + c2img_e = cam_info.trans_matrix + c2e = img_e2e @ c2img_e + + extrinsic = StateSE3.from_transformation_matrix(c2e) + + if data_converter_config.camera_store_option == "path": + camera_data = str(filename_jpg), extrinsic.tolist() + elif data_converter_config.camera_store_option == "binary": + with open(filename_jpg, "rb") as f: + camera_data = f.read(), extrinsic.tolist() + + camera_dict[camera_type] = camera_data + + return camera_dict + + +def _extract_lidar(lidar_pc: LidarPc, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: + + lidar: Optional[str] = None + lidar_full_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" / lidar_pc.filename + if lidar_full_path.exists(): + lidar = lidar_pc.filename + + return {LiDARType.LIDAR_MERGED: lidar} diff --git a/d123/datasets/raw_data_converter.py b/d123/datasets/raw_data_converter.py index 179f9ef5..862edd09 100644 --- a/d123/datasets/raw_data_converter.py +++ b/d123/datasets/raw_data_converter.py @@ -1,7 +1,7 @@ import abc from dataclasses import dataclass from pathlib import Path -from typing import List, Literal, Optional, Union +from typing import List, Literal, Union from d123.common.multithreading.worker_utils import WorkerPool @@ -12,13 +12,44 @@ class DataConverterConfig: output_path: Union[str, Path] force_log_conversion: bool = False force_map_conversion: bool = False - camera_store_option: Optional[Literal["path", "binary"]] = None - lidar_store_option: Optional[Literal["path", "binary"]] = None + + # Ego + include_ego: bool = False + + # Box Detections + include_box_detections: bool = False + + # Traffic Lights + include_traffic_lights: bool = False + + # Cameras + include_cameras: bool = False + camera_store_option: Literal["path", "binary", "mp4"] = "path" + + # LiDARs + include_lidars: bool = False + lidar_store_option: Literal["path", "binary"] = "path" + + # Scenario tag / Route + # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + include_scenario_tags: bool = False + include_route: bool = False def __post_init__(self): if isinstance(self.output_path, str): self.output_path = Path(self.output_path) + assert self.camera_store_option != "mp4", "MP4 format is not yet supported." + assert self.camera_store_option in [ + "path", + "binary", + ], f"Invalid camera store option, got {self.camera_store_option}." + + assert self.lidar_store_option in [ + "path", + "binary", + ], f"Invalid LiDAR store option, got {self.lidar_store_option}." + class RawDataConverter(abc.ABC): diff --git a/d123/datasets/utils/arrow_ipc_writer.py b/d123/datasets/utils/arrow_ipc_writer.py new file mode 100644 index 00000000..fa0ed439 --- /dev/null +++ b/d123/datasets/utils/arrow_ipc_writer.py @@ -0,0 +1,242 @@ +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union + +import pyarrow as pa + +from d123.datasets.raw_data_converter import DataConverterConfig +from d123.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper +from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from d123.datatypes.sensors.lidar.lidar import LiDARType +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3, EgoStateSE3Index +from d123.geometry import BoundingBoxSE3Index, StateSE3, StateSE3Index, Vector3DIndex + + +class ArrowLogWriter: + + def __init__( + self, + log_path: Union[str, Path], + data_converter_config: DataConverterConfig, + log_metadata: LogMetadata, + ) -> None: + + self._log_path = Path(log_path) + self._data_converter_config = data_converter_config + self._log_metadata = log_metadata + + self._schema: pa.Schema = self._build_schema() + + def _build_schema(self) -> pa.Schema: + + schema_list: List[Tuple[str, pa.DataType]] = [ + ("token", pa.string()), + ("timestamp", pa.int64()), + ] + + # -------------------------------------------------------------------------------------------------------------- + # Ego State + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_ego: + schema_list.extend( + [ + ("ego_state", pa.list_(pa.float64(), len(EgoStateSE3Index))), + ] + ) + + # -------------------------------------------------------------------------------------------------------------- + # Box Detections + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_box_detections: + schema_list.extend( + [ + ("box_detection_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), + ("box_detection_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), + ("box_detection_token", pa.list_(pa.string())), + ("box_detection_type", pa.list_(pa.int16())), + ] + ) + + # -------------------------------------------------------------------------------------------------------------- + # Traffic Lights + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_traffic_lights: + schema_list.extend( + [ + ("traffic_light_ids", pa.list_(pa.int64())), + ("traffic_light_types", pa.list_(pa.int16())), + ] + ) + + # -------------------------------------------------------------------------------------------------------------- + # Cameras + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_cameras: + for camera_type in self._log_metadata.camera_metadata.keys(): + camera_name = camera_type.serialize() + + # Depending on the storage option, define the schema for camera data + if self._data_converter_config.camera_store_option == "path": + schema_list.append((f"{camera_name}_data", pa.string())) + + elif self._data_converter_config.camera_store_option == "binary": + schema_list.append((f"{camera_name}_data", pa.binary())) + + # Add camera pose + schema_list.append((f"{camera_name}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))) + + # -------------------------------------------------------------------------------------------------------------- + # LiDARs + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_lidars: + for lidar_type in self._log_metadata.lidar_metadata.keys(): + lidar_name = lidar_type.serialize() + + # Depending on the storage option, define the schema for LiDAR data + if self._data_converter_config.lidar_store_option == "path": + schema_list.append((f"{lidar_name}_data", pa.string())) + + elif self._data_converter_config.lidar_store_option == "binary": + schema_list.append((f"{lidar_name}_data", pa.binary())) + + # -------------------------------------------------------------------------------------------------------------- + # Miscellaneous (Scenario Tags / Route) + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_scenario_tags: + schema_list.append(("scenario_tags", pa.list_(pa.string()))) + + if self._data_converter_config.include_route: + schema_list.append(("route_lane_group_ids", pa.list_(pa.int64()))) + + return add_log_metadata_to_arrow_schema(pa.schema(schema_list), self._log_metadata) + + def add_row( + self, + token: str, + timestamp: TimePoint, + ego_state: Optional[EgoStateSE3] = None, + box_detections: Optional[BoxDetectionWrapper] = None, + traffic_lights: Optional[TrafficLightDetectionWrapper] = None, + cameras: Optional[Dict[PinholeCameraType, Tuple[Any, ...]]] = None, + lidars: Optional[Dict[LiDARType, Any]] = None, + scenario_tags: Optional[List[str]] = None, + route_lane_group_ids: Optional[List[int]] = None, + ) -> None: + if not hasattr(self, "_sink"): + self._sink = pa.OSFile(str(self._log_path), "wb") + self._writer = pa.ipc.new_file(self._sink, self._schema) + + record_batch_data = { + "token": [token], + "timestamp": [timestamp.time_us], + } + + # -------------------------------------------------------------------------------------------------------------- + # Ego State + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_ego: + assert ego_state is not None, "Ego state is required but not provided." + record_batch_data["ego_state"] = [ego_state.array] + + # -------------------------------------------------------------------------------------------------------------- + # Box Detections + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_box_detections: + assert box_detections is not None, "Box detections are required but not provided." + # TODO: Figure out more elegant way without for-loops. + + # Accumulate box detection data + box_detection_state = [] + box_detection_velocity = [] + box_detection_token = [] + box_detection_type = [] + + for box_detection in box_detections: + box_detection_state.append(box_detection.bounding_box.array) + box_detection_velocity.append(box_detection.velocity.array) + box_detection_token.append(box_detection.metadata.track_token) + box_detection_type.append(int(box_detection.metadata.detection_type)) + + # Add to record batch data + record_batch_data["box_detection_state"] = [box_detection_state] + record_batch_data["box_detection_velocity"] = [box_detection_velocity] + record_batch_data["box_detection_token"] = [box_detection_token] + record_batch_data["box_detection_type"] = [box_detection_type] + + # -------------------------------------------------------------------------------------------------------------- + # Traffic Lights + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_traffic_lights: + assert traffic_lights is not None, "Traffic light detections are required but not provided." + # TODO: Figure out more elegant way without for-loops. + + # Accumulate traffic light data + traffic_light_ids = [] + traffic_light_types = [] + + for traffic_light in traffic_lights: + traffic_light_ids.append(traffic_light.lane_id) + traffic_light_types.append(int(traffic_light.status)) + + # Add to record batch data + record_batch_data["traffic_light_ids"] = [traffic_light_ids] + record_batch_data["traffic_light_types"] = [traffic_light_types] + + # -------------------------------------------------------------------------------------------------------------- + # Cameras + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_cameras: + assert cameras is not None, "Camera data is required but not provided." + provided_cameras = set(cameras.keys()) + expected_cameras = set(self._log_metadata.camera_metadata.keys()) + for camera_type in expected_cameras: + camera_name = camera_type.serialize() + + # NOTE: Missing cameras are allowed, e.g., for synchronization mismatches. + # In this case, we write None/null to the arrow table. + camera_data: Optional[Any] = None + camera_pose: Optional[StateSE3] = None + if camera_type in provided_cameras: + camera_data, camera_pose = cameras[camera_type] + + record_batch_data[f"{camera_name}_data"] = [camera_data] + record_batch_data[f"{camera_name}_extrinsic"] = [camera_pose.array if camera_pose else None] + + # -------------------------------------------------------------------------------------------------------------- + # LiDARs + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_lidars: + assert lidars is not None, "LiDAR data is required but not provided." + provided_lidars = set(lidars.keys()) + expected_lidars = set(self._log_metadata.lidar_metadata.keys()) + for lidar_type in expected_lidars: + lidar_name = lidar_type.serialize() + + # NOTE: Missing LiDARs are allowed, similar to cameras + # In this case, we write None/null to the arrow table. + lidar_data: Optional[Any] = None + if lidar_type in provided_lidars: + lidar_data = lidars[lidar_type] + record_batch_data[f"{lidar_name}_data"] = [lidar_data] + + # -------------------------------------------------------------------------------------------------------------- + # Miscellaneous (Scenario Tags / Route) + # -------------------------------------------------------------------------------------------------------------- + if self._data_converter_config.include_scenario_tags: + assert scenario_tags is not None, "Scenario tags are required but not provided." + record_batch_data["scenario_tags"] = [scenario_tags] + + if self._data_converter_config.include_route: + assert route_lane_group_ids is not None, "Route lane group IDs are required but not provided." + record_batch_data["route_lane_group_ids"] = [route_lane_group_ids] + + record_batch = pa.record_batch(record_batch_data, schema=self._schema) + self._writer.write_batch(record_batch) + + def close(self) -> None: + if hasattr(self, "_writer"): + self._writer.close() + if hasattr(self, "_sink"): + self._sink.close() diff --git a/d123/datasets/utils/arrow_schema.py b/d123/datasets/utils/arrow_schema.py deleted file mode 100644 index 3df567f2..00000000 --- a/d123/datasets/utils/arrow_schema.py +++ /dev/null @@ -1,54 +0,0 @@ -from typing import List, Tuple - -import pyarrow as pa - -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3Index -from d123.geometry.geometry_index import BoundingBoxSE3Index, Vector3DIndex - -schema_list: List[Tuple[str, pa.DataType]] = [ - ("token", pa.string()), - ("timestamp", pa.int64()), -] - - -def get_default_arrow_schema() -> List[Tuple[str, pa.DataType]]: - return schema_list - - -def add_detection_schema(schema_list: List[Tuple[str, pa.DataType]]) -> None: - detection_schema_addon: List[Tuple[str, pa.DataType]] = [ - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ] - schema_list.extend(detection_schema_addon) - - -def add_traffic_light_schema(schema_list: List[Tuple[str, pa.DataType]]) -> None: - traffic_light_schema_addon: List[Tuple[str, pa.DataType]] = [ - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ] - schema_list.extend(traffic_light_schema_addon) - - -def add_ego_state_schema(schema_list: List[Tuple[str, pa.DataType]]) -> None: - ego_state_schema_addon: List[Tuple[str, pa.DataType]] = [ - ("ego_state", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ] - schema_list.extend(ego_state_schema_addon) - - -def add_route_schema(schema_list: List[Tuple[str, pa.DataType]]) -> None: - route_schema_addon: List[Tuple[str, pa.DataType]] = [ - ("route_lane_group_ids", pa.list_(pa.int64())), - ] - add_route_schema(route_schema_addon) - - -def add_scenario_tags_schema(schema_list: List[Tuple[str, pa.DataType]]) -> None: - scenario_tags_schema_addon: List[Tuple[str, pa.DataType]] = [ - ("scenario_tag", pa.list_(pa.string())), - ] - schema_list.extend(scenario_tags_schema_addon) diff --git a/d123/datatypes/detections/detection.py b/d123/datatypes/detections/detection.py index aa1cc679..3e7e09a2 100644 --- a/d123/datatypes/detections/detection.py +++ b/d123/datatypes/detections/detection.py @@ -14,7 +14,7 @@ class BoxDetectionMetadata: detection_type: DetectionType - timepoint: TimePoint + timepoint: TimePoint # TODO: Consider removing or making optional track_token: str confidence: Optional[float] = None @@ -125,7 +125,7 @@ class TrafficLightStatus(SerialIntEnum): @dataclass class TrafficLightDetection: - timepoint: TimePoint + timepoint: TimePoint # TODO: Consider removing or making optional lane_id: int status: TrafficLightStatus diff --git a/d123/datatypes/scene/arrow/utils/arrow_getters.py b/d123/datatypes/scene/arrow/utils/arrow_getters.py index ac86ca18..c70e9184 100644 --- a/d123/datatypes/scene/arrow/utils/arrow_getters.py +++ b/d123/datatypes/scene/arrow/utils/arrow_getters.py @@ -43,7 +43,7 @@ def get_ego_vehicle_state_from_arrow_table( ) -> EgoStateSE3: timepoint = get_timepoint_from_arrow_table(arrow_table, index) return EgoStateSE3.from_array( - array=pa.array(arrow_table["ego_states"][index]).to_numpy(), + array=pa.array(arrow_table["ego_state"][index]).to_numpy(), vehicle_parameters=vehicle_parameters, timepoint=timepoint, ) @@ -54,10 +54,10 @@ def get_box_detections_from_arrow_table(arrow_table: pa.Table, index: int) -> Bo box_detections: List[BoxDetection] = [] for detection_state, detection_velocity, detection_token, detection_type in zip( - arrow_table["detections_state"][index].as_py(), - arrow_table["detections_velocity"][index].as_py(), - arrow_table["detections_token"][index].as_py(), - arrow_table["detections_type"][index].as_py(), + arrow_table["box_detection_state"][index].as_py(), + arrow_table["box_detection_velocity"][index].as_py(), + arrow_table["box_detection_token"][index].as_py(), + arrow_table["box_detection_type"][index].as_py(), ): box_detection = BoxDetectionSE3( metadata=BoxDetectionMetadata( @@ -97,8 +97,9 @@ def get_camera_from_arrow_table( log_metadata: LogMetadata, ) -> PinholeCamera: - table_data = arrow_table[camera_type.serialize()][index].as_py() - extrinsic_values = arrow_table[f"{camera_type.serialize()}_extrinsic"][index].as_py() + camera_name = camera_type.serialize() + table_data = arrow_table[f"{camera_name}_data"][index].as_py() + extrinsic_values = arrow_table[f"{camera_name}_extrinsic"][index].as_py() extrinsic = StateSE3.from_list(extrinsic_values) if extrinsic_values is not None else None if table_data is None or extrinsic is None: diff --git a/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py b/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py index e392d337..b39d987d 100644 --- a/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py +++ b/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py @@ -14,6 +14,6 @@ def get_log_metadata_from_arrow(arrow_file_path: Union[Path, str]) -> LogMetadat return log_metadata -def add_log_metadata_to_arrow_schema(schema: pa.Schema, log_metadata: LogMetadata) -> pa.Schema: +def add_log_metadata_to_arrow_schema(schema: pa.schema, log_metadata: LogMetadata) -> pa.schema: schema = schema.with_metadata({"log_metadata": json.dumps(log_metadata.to_dict())}) return schema diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index 22743010..f9e0f5a6 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -12,11 +12,11 @@ defaults: - default_common - default_dataset_paths - datasets: - - nuplan_private_dataset + # - nuplan_private_dataset # - carla_dataset # - wopd_dataset - # - av2_sensor_dataset + - av2_sensor_dataset - _self_ -force_map_conversion: True +force_map_conversion: False force_log_conversion: True diff --git a/d123/script/config/datasets/av2_sensor_dataset.yaml b/d123/script/config/datasets/av2_sensor_dataset.yaml index d65947e4..93460579 100644 --- a/d123/script/config/datasets/av2_sensor_dataset.yaml +++ b/d123/script/config/datasets/av2_sensor_dataset.yaml @@ -12,5 +12,25 @@ av2_sensor_dataset: output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} - camera_store_option: "path" - lidar_store_option: "path" + + # Ego + include_ego: true + + # Box Detections + include_box_detections: true + + # Traffic Lights + include_traffic_lights: false + + # Cameras + include_cameras: true + camera_store_option: "binary" # "path", "binary", "mp4" + + # LiDARs + include_lidars: false + lidar_store_option: "path" # "path", "binary" + + # Scenario tag / Route + # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + include_scenario_tags: false + include_route: false diff --git a/d123/script/config/datasets/carla_dataset.yaml b/d123/script/config/datasets/carla_dataset.yaml index 14160a0c..1f005578 100644 --- a/d123/script/config/datasets/carla_dataset.yaml +++ b/d123/script/config/datasets/carla_dataset.yaml @@ -12,5 +12,25 @@ carla_dataset: output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} - camera_store_option: "path" - lidar_store_option: "path" + + # Ego + include_ego: true + + # Box Detections + include_box_detections: true + + # Traffic Lights + include_traffic_lights: true + + # Cameras + include_cameras: true + camera_store_option: "path" # "path", "binary", "mp4" + + # LiDARs + include_lidars: true + lidar_store_option: "path" # "path", "binary" + + # Scenario tag / Route + # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + include_scenario_tags: true + include_route: true diff --git a/d123/script/config/datasets/nuplan_dataset.yaml b/d123/script/config/datasets/nuplan_dataset.yaml index 6bcca6fb..3907f69c 100644 --- a/d123/script/config/datasets/nuplan_dataset.yaml +++ b/d123/script/config/datasets/nuplan_dataset.yaml @@ -12,5 +12,25 @@ nuplan_dataset: output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} - camera_store_option: "path" - lidar_store_option: "path" + + # Ego + include_ego: true + + # Box Detections + include_box_detections: true + + # Traffic Lights + include_traffic_lights: true + + # Cameras + include_cameras: true + camera_store_option: "path" # "path", "binary", "mp4" + + # LiDARs + include_lidars: true + lidar_store_option: "path" # "path", "binary" + + # Scenario tag / Route + # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + include_scenario_tags: true + include_route: true diff --git a/d123/script/config/datasets/nuplan_mini_dataset.yaml b/d123/script/config/datasets/nuplan_mini_dataset.yaml index a371d34d..7b509e55 100644 --- a/d123/script/config/datasets/nuplan_mini_dataset.yaml +++ b/d123/script/config/datasets/nuplan_mini_dataset.yaml @@ -13,5 +13,25 @@ nuplan_mini_dataset: output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} - camera_store_option: "path" - lidar_store_option: "path" + + # Ego + include_ego: true + + # Box Detections + include_box_detections: true + + # Traffic Lights + include_traffic_lights: true + + # Cameras + include_cameras: true + camera_store_option: "path" # "path", "binary", "mp4" + + # LiDARs + include_lidars: true + lidar_store_option: "path" # "path", "binary" + + # Scenario tag / Route + # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + include_scenario_tags: true + include_route: true diff --git a/d123/script/config/datasets/nuplan_private_dataset.yaml b/d123/script/config/datasets/nuplan_private_dataset.yaml index 7062f38f..3f44e33a 100644 --- a/d123/script/config/datasets/nuplan_private_dataset.yaml +++ b/d123/script/config/datasets/nuplan_private_dataset.yaml @@ -12,5 +12,25 @@ nuplan_private_dataset: output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} - camera_store_option: "path" - lidar_store_option: "path" + + # Ego + include_ego: true + + # Box Detections + include_box_detections: true + + # Traffic Lights + include_traffic_lights: true + + # Cameras + include_cameras: true + camera_store_option: "path" # "path", "binary", "mp4" + + # LiDARs + include_lidars: true + lidar_store_option: "path" # "path", "binary" + + # Scenario tag / Route + # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + include_scenario_tags: true + include_route: true diff --git a/d123/script/config/datasets/wopd_dataset.yaml b/d123/script/config/datasets/wopd_dataset.yaml index 906aef03..64ac38e3 100644 --- a/d123/script/config/datasets/wopd_dataset.yaml +++ b/d123/script/config/datasets/wopd_dataset.yaml @@ -12,5 +12,25 @@ wopd_dataset: output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} - camera_store_option: "binary" - lidar_store_option: "binary" + + # Ego + include_ego: true + + # Box Detections + include_box_detections: true + + # Traffic Lights + include_traffic_lights: true + + # Cameras + include_cameras: true + camera_store_option: "path" # "path", "binary", "mp4" + + # LiDARs + include_lidars: true + lidar_store_option: "path" # "path", "binary" + + # Scenario tag / Route + # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + include_scenario_tags: false + include_route: false diff --git a/test_viser.py b/test_viser.py index d1449ec1..8b6f7019 100644 --- a/test_viser.py +++ b/test_viser.py @@ -10,8 +10,8 @@ # splits = ["nuplan_private_test"] # splits = ["carla"] - splits = ["wopd_train"] - # splits = ["av2-sensor-mini_train"] + # splits = ["wopd_train"] + splits = ["av2-sensor-mini_train"] log_names = None scene_tokens = None From 4e07a76f52e38434e83f37676fcb14d9d63b6091 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 9 Oct 2025 11:22:43 +0200 Subject: [PATCH 063/145] Refactor Waymo Open Dataset with new log writer structure. --- d123/__init__.py | 14 + d123/datasets/av2/av2_data_converter.py | 8 +- d123/datasets/wopd/wopd_data_converter.py | 212 +++---- .../wopd/wopd_data_converter_delete.py | 521 ++++++++++++++++++ .../default_dataset_conversion.yaml | 4 +- d123/script/config/datasets/wopd_dataset.yaml | 8 +- d123/script/run_dataset_conversion.py | 2 + 7 files changed, 628 insertions(+), 141 deletions(-) create mode 100644 d123/datasets/wopd/wopd_data_converter_delete.py diff --git a/d123/__init__.py b/d123/__init__.py index e39cd70a..e118230f 100644 --- a/d123/__init__.py +++ b/d123/__init__.py @@ -10,4 +10,18 @@ # /___\_\/__/\/ /_____/ //__________\ \ \\ \___\/ / # \_________\/\________/ \_____________\/ \/_____/ +ascii_banner = r""" + _ _ _ _ + / /\ /\ \ /\ \ /\ \ + / / \ / \ \ / \ \ / \ \____ +/_/ /\ \ / /\ \ \ / /\ \ \ / /\ \_____\ +\_\/\ \ \ \/_/\ \ \ / / /\ \ \ / / /\/___ / + \ \ \ / / / \/_//_\ \ \ / / / / / / + \ \ \ / / / __\___ \ \ / / / / / / + \ \ \ / / / _ / /\ \ \ \ / / / / / / + __\ \ \___ / / /_/\_\ / /_/____\ \ \\ \ \__/ / / + /___\_\/__/\/ /_____/ //__________\ \ \\ \___\/ / + \_________\/\________/ \_____________\/ \/_____/ +""" + __version__ = "0.0.7" diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py index 881320f5..48fc8cb9 100644 --- a/d123/datasets/av2/av2_data_converter.py +++ b/d123/datasets/av2/av2_data_converter.py @@ -10,11 +10,7 @@ from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table -from d123.datasets.av2.av2_constants import ( - AV2_CAMERA_TYPE_MAPPING, - AV2_TO_DETECTION_TYPE, - AV2SensorBoxDetectionType, -) +from d123.datasets.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING, AV2_TO_DETECTION_TYPE, AV2SensorBoxDetectionType from d123.datasets.av2.av2_helper import ( build_sensor_dataframe, build_synchronization_dataframe, @@ -348,7 +344,7 @@ def _extract_av2_sensor_box_detections( box_detections.append( BoxDetectionSE3( metadata=BoxDetectionMetadata( - detection_type=int(detections_types[detection_idx]), + detection_type=detections_types[detection_idx], timepoint=None, track_token=detections_token[detection_idx], confidence=None, diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py index ea418d57..c6a9f3a2 100644 --- a/d123/datasets/wopd/wopd_data_converter.py +++ b/d123/datasets/wopd/wopd_data_converter.py @@ -7,17 +7,17 @@ import numpy as np import numpy.typing as npt -import pyarrow as pa from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.dependencies import check_dependencies from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datasets.utils.arrow_ipc_writer import ArrowLogWriter from d123.datasets.utils.sensor.camera_conventions import CameraConvention, convert_camera_convention from d123.datasets.utils.sensor.lidar_index_registry import WopdLidarIndex from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection +from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, @@ -26,10 +26,12 @@ PinholeIntrinsics, ) from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.geometry_index import EulerAnglesIndex, StateSE3Index +from d123.geometry.bounding_box import BoundingBoxSE3 +from d123.geometry.geometry_index import EulerAnglesIndex from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL from d123.geometry.utils.rotation_utils import ( @@ -230,43 +232,15 @@ def convert_wopd_tfrecord_log_to_arrow( map_is_local=True, ) - schema_column_list = [ - ("token", pa.string()), - ("timestamp", pa.int64()), - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ("scenario_tag", pa.list_(pa.string())), - ("route_lane_group_ids", pa.list_(pa.int64())), - ] - # TODO: Adjust how cameras are added - if data_converter_config.camera_store_option is not None: - for camera_type in log_metadata.camera_metadata.keys(): - if data_converter_config.camera_store_option == "path": - raise NotImplementedError("Path camera storage is not implemented.") - elif data_converter_config.camera_store_option == "binary": - schema_column_list.append((camera_type.serialize(), pa.binary())) - schema_column_list.append( - (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) - ) - - if data_converter_config.lidar_store_option is not None: - for lidar_type in log_metadata.lidar_metadata.keys(): - if data_converter_config.lidar_store_option == "path": - raise NotImplementedError("Filepath lidar storage is not implemented.") - elif data_converter_config.lidar_store_option == "binary": - schema_column_list.append((lidar_type.serialize(), (pa.list_(pa.float32())))) - - recording_schema = pa.schema(schema_column_list) - recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) - - _write_recording_table(dataset, recording_schema, log_file_path, tf_record_path, data_converter_config) - - del recording_schema, dataset + log_writer = ArrowLogWriter( + log_path=log_file_path, + data_converter_config=data_converter_config, + log_metadata=log_metadata, + ) + + _write_recording_table(dataset, log_writer, tf_record_path, data_converter_config) + + del dataset except Exception as e: import traceback @@ -327,63 +301,29 @@ def get_wopd_lidar_metadata( def _write_recording_table( dataset: tf.data.TFRecordDataset, - recording_schema: pa.schema, - log_file_path: Path, + log_writer: ArrowLogWriter, tf_record_path: Path, data_converter_config: DataConverterConfig, ) -> None: - with pa.OSFile(str(log_file_path), "wb") as sink: - with pa.ipc.new_file(sink, recording_schema) as writer: + dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") + for frame_idx, data in enumerate(dataset): + frame = dataset_pb2.Frame() + frame.ParseFromString(data.numpy()) + + log_writer.add_row( + token=create_token(f"{frame.context.name}_{int(frame.timestamp_micros)}"), + timestamp=TimePoint.from_us(frame.timestamp_micros), + ego_state=_extract_wopd_ego_state(frame), + box_detections=_extract_wopd_box_detections(frame), + traffic_lights=None, # NOTE: WOPD does not have traffic light information + cameras=_extract_wopd_cameras(frame, data_converter_config), + lidars=_extract_wopd_lidars(frame, data_converter_config), + scenario_tags=None, # NOTE: WOPD does not have scenario tags + route_lane_group_ids=None, # NOTE: WOPD does not have route information + ) - dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") - for frame_idx, data in enumerate(dataset): - frame = dataset_pb2.Frame() - frame.ParseFromString(data.numpy()) - - (detections_state, detections_velocity, detections_token, detections_types) = _extract_detections(frame) - - # TODO: Implement traffic light extraction - traffic_light_ids = [] - traffic_light_types = [] - - # TODO: Implement detections - row_data = { - "token": [create_token(f"{frame.context.name}_{int(frame.timestamp_micros)}")], - "timestamp": [int(frame.timestamp_micros)], - "detections_state": [detections_state], - "detections_velocity": [detections_velocity], - "detections_token": [detections_token], - "detections_type": [detections_types], - "ego_states": [_extract_ego_state(frame)], - "traffic_light_ids": [traffic_light_ids], - "traffic_light_types": [traffic_light_types], - "scenario_tag": ["unknown"], - "route_lane_group_ids": [None], - } - - # TODO: Implement lidar extraction - if data_converter_config.lidar_store_option is not None: - lidar_data_dict = _extract_lidar(frame, data_converter_config) - for lidar_type, lidar_data in lidar_data_dict.items(): - if lidar_data is not None: - row_data[lidar_type.serialize()] = [lidar_data.tolist()] - else: - row_data[lidar_type.serialize()] = [None] - - if data_converter_config.camera_store_option is not None: - camera_data_dict = _extract_camera(frame, data_converter_config) - for camera_type, camera_data in camera_data_dict.items(): - if camera_data is not None: - row_data[camera_type.serialize()] = [camera_data[0]] - row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] - else: - row_data[camera_type.serialize()] = [None] - row_data[f"{camera_type.serialize()}_extrinsic"] = [None] - - batch = pa.record_batch(row_data, schema=recording_schema) - writer.write_batch(batch) - del batch, row_data, detections_state, detections_velocity, detections_token, detections_types + log_writer.close() def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> StateSE3: @@ -391,15 +331,35 @@ def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> StateSE3: return StateSE3.from_transformation_matrix(ego_pose_matrix) -def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: +def _extract_wopd_ego_state(frame: dataset_pb2.Frame) -> List[float]: + rear_axle_pose = _get_ego_pose_se3(frame) + + vehicle_parameters = get_wopd_chrysler_pacifica_parameters() + # FIXME: Find dynamic state in waymo open perception dataset + # https://github.com/waymo-research/waymo-open-dataset/issues/55#issuecomment-546152290 + dynamic_state = DynamicStateSE3( + velocity=Vector3D(*np.zeros(3)), + acceleration=Vector3D(*np.zeros(3)), + angular_velocity=Vector3D(*np.zeros(3)), + ) + + return EgoStateSE3.from_rear_axle( + rear_axle_se3=rear_axle_pose, + dynamic_state_se3=dynamic_state, + vehicle_parameters=vehicle_parameters, + time_point=None, + ) + + +def _extract_wopd_box_detections(frame: dataset_pb2.Frame) -> BoxDetectionWrapper: ego_rear_axle = _get_ego_pose_se3(frame) num_detections = len(frame.laser_labels) detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) - detections_token: List[str] = [] detections_types: List[int] = [] + detections_token: List[str] = [] for detection_idx, detection in enumerate(frame.laser_labels): if detection.type not in WOPD_DETECTION_NAME_DICT: @@ -427,8 +387,8 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li ).array # 3. Type and track token + detections_types.append(WOPD_DETECTION_NAME_DICT[detection.type]) detections_token.append(str(detection.id)) - detections_types.append(int(WOPD_DETECTION_NAME_DICT[detection.type])) detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( origin=ego_rear_axle, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3] @@ -439,44 +399,37 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li euler_array[..., EulerAnglesIndex.PITCH] = DEFAULT_PITCH detections_state[..., BoundingBoxSE3Index.QUATERNION] = get_quaternion_array_from_euler_array(euler_array) - return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types - - -def _extract_ego_state(frame: dataset_pb2.Frame) -> List[float]: - rear_axle_pose = _get_ego_pose_se3(frame) - - vehicle_parameters = get_wopd_chrysler_pacifica_parameters() - # FIXME: Find dynamic state in waymo open perception dataset - # https://github.com/waymo-research/waymo-open-dataset/issues/55#issuecomment-546152290 - dynamic_state = DynamicStateSE3( - velocity=Vector3D(*np.zeros(3)), - acceleration=Vector3D(*np.zeros(3)), - angular_velocity=Vector3D(*np.zeros(3)), - ) - - return EgoStateSE3.from_rear_axle( - rear_axle_se3=rear_axle_pose, - dynamic_state_se3=dynamic_state, - vehicle_parameters=vehicle_parameters, - time_point=None, - ).array.tolist() - + box_detections: List[BoxDetectionSE3] = [] + for detection_idx in range(num_detections): + box_detections.append( + BoxDetectionSE3( + metadata=BoxDetectionMetadata( + detection_type=detections_types[detection_idx], + timepoint=None, + track_token=detections_token[detection_idx], + confidence=None, + ), + bounding_box_se3=BoundingBoxSE3.from_array(detections_state[detection_idx]), + velocity=Vector3D.from_array(detections_velocity[detection_idx]), + ) + ) -def _extract_traffic_lights() -> Tuple[List[int], List[int]]: - pass + return BoxDetectionWrapper(box_detections=box_detections) -def _extract_camera( +def _extract_wopd_cameras( frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig -) -> Dict[PinholeCameraType, Union[str, bytes]]: +) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: + + # TODO: Implement option to store images as paths + assert data_converter_config.camera_store_option == "binary", "Camera store option must be 'binary' for WOPD." - camera_dict: Dict[str, Union[str, bytes]] = {} # TODO: Fix wrong type hint - np.array(frame.pose.transform).reshape(4, 4) + camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} # NOTE: The extrinsic matrix in frame.context.camera_calibration is fixed to model the ego to camera transformation. # The poses in frame.images[idx] are the motion compensated ego poses when the camera triggers. - context_extrinsic: Dict[str, StateSE3] = {} + camera_extrinsic: Dict[str, StateSE3] = {} for calibration in frame.context.camera_calibrations: camera_type = WOPD_CAMERA_TYPES[calibration.name] camera_transform = np.array(calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) @@ -488,20 +441,21 @@ def _extract_camera( from_convention=CameraConvention.pXpZmY, to_convention=CameraConvention.pZmYpX, ) - context_extrinsic[camera_type] = camera_pose + camera_extrinsic[camera_type] = camera_pose for image_proto in frame.images: camera_type = WOPD_CAMERA_TYPES[image_proto.name] - camera_bytes = image_proto.image - camera_dict[camera_type] = camera_bytes, context_extrinsic[camera_type].tolist() + camera_bytes: bytes = image_proto.image + camera_dict[camera_type] = camera_bytes, camera_extrinsic[camera_type] return camera_dict -def _extract_lidar( +def _extract_wopd_lidars( frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig ) -> Dict[LiDARType, npt.NDArray[np.float32]]: + # TODO: Implement option to store point clouds as paths assert data_converter_config.lidar_store_option == "binary", "Lidar store option must be 'binary' for WOPD." (range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame) diff --git a/d123/datasets/wopd/wopd_data_converter_delete.py b/d123/datasets/wopd/wopd_data_converter_delete.py new file mode 100644 index 00000000..ea418d57 --- /dev/null +++ b/d123/datasets/wopd/wopd_data_converter_delete.py @@ -0,0 +1,521 @@ +import gc +import hashlib +import os +from functools import partial +from pathlib import Path +from typing import Any, Dict, Final, List, Optional, Tuple, Union + +import numpy as np +import numpy.typing as npt +import pyarrow as pa + +from d123.common.multithreading.worker_utils import WorkerPool, worker_map +from d123.common.utils.dependencies import check_dependencies +from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datasets.utils.sensor.camera_conventions import CameraConvention, convert_camera_convention +from d123.datasets.utils.sensor.lidar_index_registry import WopdLidarIndex +from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map +from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters +from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.geometry_index import EulerAnglesIndex, StateSE3Index +from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array +from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry.utils.rotation_utils import ( + get_euler_array_from_quaternion_array, + get_quaternion_array_from_euler_array, +) + +check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") +import tensorflow as tf +from waymo_open_dataset import dataset_pb2 +from waymo_open_dataset.utils import frame_utils + +# TODO: Make keep_polar_features an optional argument. +# With polar features, the lidar loading time is SIGNIFICANTLY higher. + +os.environ["CUDA_VISIBLE_DEVICES"] = "-1" +D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) + +TARGET_DT: Final[float] = 0.1 +SORT_BY_TIMESTAMP: Final[bool] = False + + +# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63 +WOPD_DETECTION_NAME_DICT: Dict[int, DetectionType] = { + 0: DetectionType.GENERIC_OBJECT, # TYPE_UNKNOWN + 1: DetectionType.VEHICLE, # TYPE_VEHICLE + 2: DetectionType.PEDESTRIAN, # TYPE_PEDESTRIAN + 3: DetectionType.SIGN, # TYPE_SIGN + 4: DetectionType.BICYCLE, # TYPE_CYCLIST +} + +# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L50 +WOPD_CAMERA_TYPES: Dict[int, PinholeCameraType] = { + 1: PinholeCameraType.CAM_F0, # front_camera + 2: PinholeCameraType.CAM_L0, # front_left_camera + 3: PinholeCameraType.CAM_R0, # front_right_camera + 4: PinholeCameraType.CAM_L1, # left_camera + 5: PinholeCameraType.CAM_R1, # right_camera +} + +# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L66 +WOPD_LIDAR_TYPES: Dict[int, LiDARType] = { + 0: LiDARType.LIDAR_UNKNOWN, # UNKNOWN + 1: LiDARType.LIDAR_TOP, # TOP + 2: LiDARType.LIDAR_FRONT, # FRONT + 3: LiDARType.LIDAR_SIDE_LEFT, # SIDE_LEFT + 4: LiDARType.LIDAR_SIDE_RIGHT, # SIDE_RIGHT + 5: LiDARType.LIDAR_BACK, # REAR +} + +WOPD_DATA_ROOT = Path("/media/nvme1/waymo_perception") # TODO: set as environment variable !!!! + +# Whether to use ego or zero roll and pitch values for bounding box detections (after global conversion) +ZERO_ROLL_PITCH: Final[bool] = True + + +def create_token(input_data: str) -> str: + # TODO: Refactor this function. + # TODO: Add a general function to create tokens from arbitrary data. + if isinstance(input_data, str): + input_data = input_data.encode("utf-8") + + hash_obj = hashlib.sha256(input_data) + return hash_obj.hexdigest()[:16] + + +class WOPDDataConverter(RawDataConverter): + def __init__( + self, + splits: List[str], + log_path: Union[Path, str], + data_converter_config: DataConverterConfig, + ) -> None: + super().__init__(data_converter_config) + for split in splits: + assert ( + split in self.get_available_splits() + ), f"Split {split} is not available. Available splits: {self.available_splits}" + + self._splits: List[str] = splits + self._tf_records_per_split: Dict[str, List[Path]] = self._collect_tf_records() + self._target_dt: float = 0.1 + + def _collect_tf_records(self) -> Dict[str, List[Path]]: + tf_records_per_split: Dict[str, List[Path]] = {} + + for split in self._splits: + if split in ["wopd_train"]: + log_path = WOPD_DATA_ROOT / "training" + else: + raise ValueError(f"Split {split} is not supported.") + + log_paths = [log_file for log_file in log_path.glob("*.tfrecord")] + tf_records_per_split[split] = log_paths + + return tf_records_per_split + + def get_available_splits(self) -> List[str]: + # TODO: Add more splits if available + return [ + "wopd_train", + ] + + def convert_maps(self, worker: WorkerPool) -> None: + log_args = [ + { + "tf_record": tf_record, + "split": split, + } + for split, tf_record_paths in self._tf_records_per_split.items() + for tf_record in tf_record_paths + ] + + worker_map( + worker, + partial(convert_wopd_tfrecord_map_to_gpkg, data_converter_config=self.data_converter_config), + log_args, + ) + + def convert_logs(self, worker: WorkerPool) -> None: + log_args = [ + { + "tf_record": tf_record, + "split": split, + } + for split, tf_record_paths in self._tf_records_per_split.items() + for tf_record in tf_record_paths + ] + + worker_map( + worker, + partial(convert_wopd_tfrecord_log_to_arrow, data_converter_config=self.data_converter_config), + log_args, + ) + + +def convert_wopd_tfrecord_map_to_gpkg( + args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig +) -> List[Any]: + + for log_info in args: + tf_record_path: Path = log_info["tf_record"] + split: str = log_info["split"] + + if not tf_record_path.exists(): + raise FileNotFoundError(f"TFRecord path {tf_record_path} does not exist.") + + dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") + for data in dataset: + initial_frame = dataset_pb2.Frame() + initial_frame.ParseFromString(data.numpy()) + break + log_name = str(initial_frame.context.name) + map_file_path = D123_MAPS_ROOT / split / f"{log_name}.gpkg" + + if data_converter_config.force_map_conversion or not map_file_path.exists(): + map_file_path.unlink(missing_ok=True) + convert_wopd_map(initial_frame, map_file_path) + return [] + + +def convert_wopd_tfrecord_log_to_arrow( + args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig +) -> List[Any]: + for log_info in args: + try: + + tf_record_path: Path = log_info["tf_record"] + split: str = log_info["split"] + + if not tf_record_path.exists(): + raise FileNotFoundError(f"TFRecord path {tf_record_path} does not exist.") + + dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") + for data in dataset: + initial_frame = dataset_pb2.Frame() + initial_frame.ParseFromString(data.numpy()) + break + + log_name = str(initial_frame.context.name) + log_file_path = data_converter_config.output_path / split / f"{log_name}.arrow" + + if data_converter_config.force_log_conversion or not log_file_path.exists(): + log_file_path.unlink(missing_ok=True) + if not log_file_path.parent.exists(): + log_file_path.parent.mkdir(parents=True, exist_ok=True) + + log_metadata = LogMetadata( + dataset="wopd", + split=split, + log_name=log_name, + location=None, # TODO: Add location information. + timestep_seconds=TARGET_DT, + vehicle_parameters=get_wopd_chrysler_pacifica_parameters(), + camera_metadata=get_wopd_camera_metadata(initial_frame, data_converter_config), + lidar_metadata=get_wopd_lidar_metadata(initial_frame, data_converter_config), + map_has_z=True, + map_is_local=True, + ) + + schema_column_list = [ + ("token", pa.string()), + ("timestamp", pa.int64()), + ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), + ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), + ("detections_token", pa.list_(pa.string())), + ("detections_type", pa.list_(pa.int16())), + ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), + ("traffic_light_ids", pa.list_(pa.int64())), + ("traffic_light_types", pa.list_(pa.int16())), + ("scenario_tag", pa.list_(pa.string())), + ("route_lane_group_ids", pa.list_(pa.int64())), + ] + # TODO: Adjust how cameras are added + if data_converter_config.camera_store_option is not None: + for camera_type in log_metadata.camera_metadata.keys(): + if data_converter_config.camera_store_option == "path": + raise NotImplementedError("Path camera storage is not implemented.") + elif data_converter_config.camera_store_option == "binary": + schema_column_list.append((camera_type.serialize(), pa.binary())) + schema_column_list.append( + (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) + ) + + if data_converter_config.lidar_store_option is not None: + for lidar_type in log_metadata.lidar_metadata.keys(): + if data_converter_config.lidar_store_option == "path": + raise NotImplementedError("Filepath lidar storage is not implemented.") + elif data_converter_config.lidar_store_option == "binary": + schema_column_list.append((lidar_type.serialize(), (pa.list_(pa.float32())))) + + recording_schema = pa.schema(schema_column_list) + recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) + + _write_recording_table(dataset, recording_schema, log_file_path, tf_record_path, data_converter_config) + + del recording_schema, dataset + except Exception as e: + import traceback + + print(f"Error processing log {str(tf_record_path)}: {e}") + traceback.print_exc() + gc.collect() + return [] + + +def get_wopd_camera_metadata( + initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig +) -> Dict[PinholeCameraType, PinholeCameraMetadata]: + + cam_metadatas: Dict[PinholeCameraType, PinholeCameraMetadata] = {} + if data_converter_config.camera_store_option is not None: + for calibration in initial_frame.context.camera_calibrations: + camera_type = WOPD_CAMERA_TYPES[calibration.name] + # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L96 + # https://github.com/waymo-research/waymo-open-dataset/issues/834#issuecomment-2134995440 + fx, fy, cx, cy, k1, k2, p1, p2, k3 = calibration.intrinsic + intrinsics = PinholeIntrinsics(fx=fx, fy=fy, cx=cx, cy=cy) + distortion = PinholeDistortion(k1=k1, k2=k2, p1=p1, p2=p2, k3=k3) + if camera_type in WOPD_CAMERA_TYPES.values(): + cam_metadatas[camera_type] = PinholeCameraMetadata( + camera_type=camera_type, + width=calibration.width, + height=calibration.height, + intrinsics=intrinsics, + distortion=distortion, + ) + + return cam_metadatas + + +def get_wopd_lidar_metadata( + initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig +) -> Dict[LiDARType, LiDARMetadata]: + + laser_metadatas: Dict[LiDARType, LiDARMetadata] = {} + if data_converter_config.lidar_store_option is not None: + for laser_calibration in initial_frame.context.laser_calibrations: + + lidar_type = WOPD_LIDAR_TYPES[laser_calibration.name] + + extrinsic: Optional[StateSE3] = None + if laser_calibration.extrinsic: + extrinsic_transform = np.array(laser_calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) + extrinsic = StateSE3.from_transformation_matrix(extrinsic_transform) + + laser_metadatas[lidar_type] = LiDARMetadata( + lidar_type=lidar_type, + lidar_index=WopdLidarIndex, + extrinsic=extrinsic, + ) + + return laser_metadatas + + +def _write_recording_table( + dataset: tf.data.TFRecordDataset, + recording_schema: pa.schema, + log_file_path: Path, + tf_record_path: Path, + data_converter_config: DataConverterConfig, +) -> None: + + with pa.OSFile(str(log_file_path), "wb") as sink: + with pa.ipc.new_file(sink, recording_schema) as writer: + + dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") + for frame_idx, data in enumerate(dataset): + frame = dataset_pb2.Frame() + frame.ParseFromString(data.numpy()) + + (detections_state, detections_velocity, detections_token, detections_types) = _extract_detections(frame) + + # TODO: Implement traffic light extraction + traffic_light_ids = [] + traffic_light_types = [] + + # TODO: Implement detections + row_data = { + "token": [create_token(f"{frame.context.name}_{int(frame.timestamp_micros)}")], + "timestamp": [int(frame.timestamp_micros)], + "detections_state": [detections_state], + "detections_velocity": [detections_velocity], + "detections_token": [detections_token], + "detections_type": [detections_types], + "ego_states": [_extract_ego_state(frame)], + "traffic_light_ids": [traffic_light_ids], + "traffic_light_types": [traffic_light_types], + "scenario_tag": ["unknown"], + "route_lane_group_ids": [None], + } + + # TODO: Implement lidar extraction + if data_converter_config.lidar_store_option is not None: + lidar_data_dict = _extract_lidar(frame, data_converter_config) + for lidar_type, lidar_data in lidar_data_dict.items(): + if lidar_data is not None: + row_data[lidar_type.serialize()] = [lidar_data.tolist()] + else: + row_data[lidar_type.serialize()] = [None] + + if data_converter_config.camera_store_option is not None: + camera_data_dict = _extract_camera(frame, data_converter_config) + for camera_type, camera_data in camera_data_dict.items(): + if camera_data is not None: + row_data[camera_type.serialize()] = [camera_data[0]] + row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] + else: + row_data[camera_type.serialize()] = [None] + row_data[f"{camera_type.serialize()}_extrinsic"] = [None] + + batch = pa.record_batch(row_data, schema=recording_schema) + writer.write_batch(batch) + del batch, row_data, detections_state, detections_velocity, detections_token, detections_types + + +def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> StateSE3: + ego_pose_matrix = np.array(frame.pose.transform, dtype=np.float64).reshape(4, 4) + return StateSE3.from_transformation_matrix(ego_pose_matrix) + + +def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: + + ego_rear_axle = _get_ego_pose_se3(frame) + + num_detections = len(frame.laser_labels) + detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) + detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) + detections_token: List[str] = [] + detections_types: List[int] = [] + + for detection_idx, detection in enumerate(frame.laser_labels): + if detection.type not in WOPD_DETECTION_NAME_DICT: + continue + detection_quaternion = EulerAngles( + roll=DEFAULT_ROLL, + pitch=DEFAULT_PITCH, + yaw=detection.box.heading, + ).quaternion + + # 2. Fill SE3 Bounding Box + detections_state[detection_idx, BoundingBoxSE3Index.X] = detection.box.center_x + detections_state[detection_idx, BoundingBoxSE3Index.Y] = detection.box.center_y + detections_state[detection_idx, BoundingBoxSE3Index.Z] = detection.box.center_z + detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = detection_quaternion + detections_state[detection_idx, BoundingBoxSE3Index.LENGTH] = detection.box.length + detections_state[detection_idx, BoundingBoxSE3Index.WIDTH] = detection.box.width + detections_state[detection_idx, BoundingBoxSE3Index.HEIGHT] = detection.box.height + + # 2. Velocity TODO: check if velocity needs to be rotated + detections_velocity[detection_idx] = Vector3D( + x=detection.metadata.speed_x, + y=detection.metadata.speed_y, + z=detection.metadata.speed_z, + ).array + + # 3. Type and track token + detections_token.append(str(detection.id)) + detections_types.append(int(WOPD_DETECTION_NAME_DICT[detection.type])) + + detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( + origin=ego_rear_axle, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3] + ) + if ZERO_ROLL_PITCH: + euler_array = get_euler_array_from_quaternion_array(detections_state[:, BoundingBoxSE3Index.QUATERNION]) + euler_array[..., EulerAnglesIndex.ROLL] = DEFAULT_ROLL + euler_array[..., EulerAnglesIndex.PITCH] = DEFAULT_PITCH + detections_state[..., BoundingBoxSE3Index.QUATERNION] = get_quaternion_array_from_euler_array(euler_array) + + return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types + + +def _extract_ego_state(frame: dataset_pb2.Frame) -> List[float]: + rear_axle_pose = _get_ego_pose_se3(frame) + + vehicle_parameters = get_wopd_chrysler_pacifica_parameters() + # FIXME: Find dynamic state in waymo open perception dataset + # https://github.com/waymo-research/waymo-open-dataset/issues/55#issuecomment-546152290 + dynamic_state = DynamicStateSE3( + velocity=Vector3D(*np.zeros(3)), + acceleration=Vector3D(*np.zeros(3)), + angular_velocity=Vector3D(*np.zeros(3)), + ) + + return EgoStateSE3.from_rear_axle( + rear_axle_se3=rear_axle_pose, + dynamic_state_se3=dynamic_state, + vehicle_parameters=vehicle_parameters, + time_point=None, + ).array.tolist() + + +def _extract_traffic_lights() -> Tuple[List[int], List[int]]: + pass + + +def _extract_camera( + frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig +) -> Dict[PinholeCameraType, Union[str, bytes]]: + + camera_dict: Dict[str, Union[str, bytes]] = {} # TODO: Fix wrong type hint + np.array(frame.pose.transform).reshape(4, 4) + + # NOTE: The extrinsic matrix in frame.context.camera_calibration is fixed to model the ego to camera transformation. + # The poses in frame.images[idx] are the motion compensated ego poses when the camera triggers. + + context_extrinsic: Dict[str, StateSE3] = {} + for calibration in frame.context.camera_calibrations: + camera_type = WOPD_CAMERA_TYPES[calibration.name] + camera_transform = np.array(calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) + camera_pose = StateSE3.from_transformation_matrix(camera_transform) + # NOTE: WOPD uses a different camera convention than d123 + # https://arxiv.org/pdf/1912.04838 (Figure 1.) + camera_pose = convert_camera_convention( + camera_pose, + from_convention=CameraConvention.pXpZmY, + to_convention=CameraConvention.pZmYpX, + ) + context_extrinsic[camera_type] = camera_pose + + for image_proto in frame.images: + camera_type = WOPD_CAMERA_TYPES[image_proto.name] + camera_bytes = image_proto.image + camera_dict[camera_type] = camera_bytes, context_extrinsic[camera_type].tolist() + + return camera_dict + + +def _extract_lidar( + frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig +) -> Dict[LiDARType, npt.NDArray[np.float32]]: + + assert data_converter_config.lidar_store_option == "binary", "Lidar store option must be 'binary' for WOPD." + (range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame) + + points, cp_points = frame_utils.convert_range_image_to_point_cloud( + frame=frame, + range_images=range_images, + camera_projections=camera_projections, + range_image_top_pose=range_image_top_pose, + keep_polar_features=False, + ) + + lidar_data: Dict[LiDARType, npt.NDArray[np.float32]] = {} + for lidar_idx, frame_lidar in enumerate(frame.lasers): + lidar_type = WOPD_LIDAR_TYPES[frame_lidar.name] + lidar_data[lidar_type] = np.array(points[lidar_idx], dtype=np.float32).flatten() + + return lidar_data diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index f9e0f5a6..1d18b82a 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -14,8 +14,8 @@ defaults: - datasets: # - nuplan_private_dataset # - carla_dataset - # - wopd_dataset - - av2_sensor_dataset + - wopd_dataset + # - av2_sensor_dataset - _self_ force_map_conversion: False diff --git a/d123/script/config/datasets/wopd_dataset.yaml b/d123/script/config/datasets/wopd_dataset.yaml index 64ac38e3..2c1be19c 100644 --- a/d123/script/config/datasets/wopd_dataset.yaml +++ b/d123/script/config/datasets/wopd_dataset.yaml @@ -20,15 +20,15 @@ wopd_dataset: include_box_detections: true # Traffic Lights - include_traffic_lights: true + include_traffic_lights: false # Cameras include_cameras: true - camera_store_option: "path" # "path", "binary", "mp4" + camera_store_option: "binary" # "path", "binary", "mp4" # LiDARs - include_lidars: true - lidar_store_option: "path" # "path", "binary" + include_lidars: false + lidar_store_option: "binary" # "path", "binary" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. diff --git a/d123/script/run_dataset_conversion.py b/d123/script/run_dataset_conversion.py index cdbee27d..042614f4 100644 --- a/d123/script/run_dataset_conversion.py +++ b/d123/script/run_dataset_conversion.py @@ -4,6 +4,7 @@ import hydra from omegaconf import DictConfig +from d123 import ascii_banner from d123.script.builders.data_converter_builder import RawDataConverter, build_data_converter from d123.script.builders.worker_pool_builder import build_worker @@ -19,6 +20,7 @@ def main(cfg: DictConfig) -> None: Main entrypoint for metric caching. :param cfg: omegaconf dictionary """ + logger.info(ascii_banner) # Build worker worker = build_worker(cfg) From a8cba362648d87bf12893a4f8017b6d813b0c2ab Mon Sep 17 00:00:00 2001 From: DanielDauner Date: Thu, 9 Oct 2025 14:59:15 +0200 Subject: [PATCH 064/145] Remove old data converter files for nuPlan, Waymo, AV2 --- .../datasets/av2/av2_data_converter_delete.py | 527 ------------------ .../nuplan/nuplan_data_converter_delete.py | 505 ----------------- .../wopd/wopd_data_converter_delete.py | 521 ----------------- 3 files changed, 1553 deletions(-) delete mode 100644 d123/datasets/av2/av2_data_converter_delete.py delete mode 100644 d123/datasets/nuplan/nuplan_data_converter_delete.py delete mode 100644 d123/datasets/wopd/wopd_data_converter_delete.py diff --git a/d123/datasets/av2/av2_data_converter_delete.py b/d123/datasets/av2/av2_data_converter_delete.py deleted file mode 100644 index 63a5279d..00000000 --- a/d123/datasets/av2/av2_data_converter_delete.py +++ /dev/null @@ -1,527 +0,0 @@ -import gc -import hashlib -from functools import partial -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union - -import numpy as np -import pandas as pd -import pyarrow as pa - -from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.datasets.av2.av2_constants import ( - AV2_CAMERA_TYPE_MAPPING, - AV2_TO_DETECTION_TYPE, - AV2SensorBoxDetectionType, -) -from d123.datasets.av2.av2_helper import ( - build_sensor_dataframe, - build_synchronization_dataframe, - find_closest_target_fpath, - get_slice_with_timestamp_ns, -) -from d123.datasets.av2.av2_map_conversion import convert_av2_map -from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import ( - PinholeCameraMetadata, - PinholeCameraType, - PinholeDistortion, - PinholeIntrinsics, -) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -from d123.datatypes.vehicle_state.vehicle_parameters import ( - get_av2_ford_fusion_hybrid_parameters, - rear_axle_se3_to_center_se3, -) -from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.geometry_index import StateSE3Index -from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array - - -def create_token(input_data: str) -> str: - # TODO: Refactor this function. - # TODO: Add a general function to create tokens from arbitrary data. - if isinstance(input_data, str): - input_data = input_data.encode("utf-8") - - hash_obj = hashlib.sha256(input_data) - return hash_obj.hexdigest()[:16] - - -class AV2SensorDataConverter(RawDataConverter): - def __init__( - self, - splits: List[str], - log_path: Union[Path, str], - data_converter_config: DataConverterConfig, - ) -> None: - super().__init__(data_converter_config) - for split in splits: - assert ( - split in self.get_available_splits() - ), f"Split {split} is not available. Available splits: {self.available_splits}" - - self._splits: List[str] = splits - self._data_root: Path = Path(log_path) - self._log_paths_per_split: Dict[str, List[Path]] = self._collect_log_paths() - self._target_dt: float = 0.1 - - def _collect_log_paths(self) -> Dict[str, List[Path]]: - log_paths_per_split: Dict[str, List[Path]] = {} - - for split in self._splits: - subsplit = split.split("_")[-1] - assert subsplit in ["train", "val", "test"] - - if "av2_sensor" in split: - log_folder = self._data_root / "sensor" / subsplit - elif "av2_lidar" in split: - log_folder = self._data_root / "lidar" / subsplit - elif "av2_motion" in split: - log_folder = self._data_root / "motion-forecasting" / subsplit - elif "av2-sensor-mini" in split: - log_folder = self._data_root / "sensor_mini" / subsplit - - log_paths_per_split[split] = list(log_folder.iterdir()) - - return log_paths_per_split - - def get_available_splits(self) -> List[str]: - return [ - "av2-sensor_train", - "av2-sensor_val", - "av2-sensor_test", - "av2-sensor-mini_train", - "av2-sensor-mini_val", - "av2-sensor-mini_test", - ] - - def convert_maps(self, worker: WorkerPool) -> None: - log_args = [ - { - "log_path": log_path, - "split": split, - } - for split, log_paths in self._log_paths_per_split.items() - for log_path in log_paths - ] - worker_map( - worker, - partial(convert_av2_map_to_gpkg, data_converter_config=self.data_converter_config), - log_args, - ) - - def convert_logs(self, worker: WorkerPool) -> None: - log_args = [ - { - "log_path": log_path, - "split": split, - } - for split, log_paths in self._log_paths_per_split.items() - for log_path in log_paths - ] - - worker_map( - worker, - partial( - convert_av2_log_to_arrow, - data_converter_config=self.data_converter_config, - ), - log_args, - ) - - -def convert_av2_map_to_gpkg( - args: List[Dict[str, Union[List[str], List[Path]]]], - data_converter_config: DataConverterConfig, -) -> List[Any]: - for log_info in args: - source_log_path: Path = log_info["log_path"] - split: str = log_info["split"] - - source_log_name = source_log_path.name - - map_path = data_converter_config.output_path / "maps" / split / f"{source_log_name}.gpkg" - if data_converter_config.force_map_conversion or not map_path.exists(): - map_path.unlink(missing_ok=True) - convert_av2_map(source_log_path, map_path) - return [] - - -def convert_av2_log_to_arrow( - args: List[Dict[str, Union[List[str], List[Path]]]], - data_converter_config: DataConverterConfig, -) -> List[Any]: - for log_info in args: - log_path: Path = log_info["log_path"] - split: str = log_info["split"] - - if not log_path.exists(): - raise FileNotFoundError(f"Log path {log_path} does not exist.") - - log_file_path = data_converter_config.output_path / split / f"{log_path.stem}.arrow" - - if data_converter_config.force_log_conversion or not log_file_path.exists(): - log_file_path.unlink(missing_ok=True) - if not log_file_path.parent.exists(): - log_file_path.parent.mkdir(parents=True, exist_ok=True) - - sensor_df = build_sensor_dataframe(log_path) - synchronization_df = build_synchronization_dataframe(sensor_df) - - log_metadata = LogMetadata( - dataset="av2-sensor", - split=split, - log_name=log_path.name, - location=None, # TODO: Add location information. - timestep_seconds=0.1, - vehicle_parameters=get_av2_ford_fusion_hybrid_parameters(), - camera_metadata=get_av2_camera_metadata(log_path), - lidar_metadata=get_av2_lidar_metadata(log_path), - map_has_z=True, - map_is_local=True, - ) - - schema_column_list = [ - ("token", pa.string()), - ("timestamp", pa.int64()), - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ("scenario_tag", pa.list_(pa.string())), - ("route_lane_group_ids", pa.list_(pa.int64())), - ] - if data_converter_config.lidar_store_option is not None: - for lidar_type in log_metadata.lidar_metadata.keys(): - if data_converter_config.lidar_store_option == "path": - schema_column_list.append((lidar_type.serialize(), pa.string())) - elif data_converter_config.lidar_store_option == "binary": - raise NotImplementedError("Binary lidar storage is not implemented.") - - if data_converter_config.camera_store_option is not None: - for camera_type in log_metadata.camera_metadata.keys(): - if data_converter_config.camera_store_option == "path": - schema_column_list.append((camera_type.serialize(), pa.string())) - - elif data_converter_config.camera_store_option == "binary": - schema_column_list.append((camera_type.serialize(), pa.binary())) - - schema_column_list.append( - (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) - ) - - recording_schema = pa.schema(schema_column_list) - recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) - - _write_recording_table( - sensor_df, - synchronization_df, - recording_schema, - log_file_path, - log_path, - data_converter_config, - ) - del recording_schema - gc.collect() - return [] - - -def get_av2_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - - intrinsics_file = log_path / "calibration" / "intrinsics.feather" - intrinsics_df = pd.read_feather(intrinsics_file) - - camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} - for _, row in intrinsics_df.iterrows(): - row = row.to_dict() - camera_type = AV2_CAMERA_TYPE_MAPPING[row["sensor_name"]] - camera_metadata[camera_type] = PinholeCameraMetadata( - camera_type=camera_type, - width=row["width_px"], - height=row["height_px"], - intrinsics=PinholeIntrinsics( - fx=row["fx_px"], - fy=row["fy_px"], - cx=row["cx_px"], - cy=row["cy_px"], - ), - distortion=PinholeDistortion( - k1=row["k1"], - k2=row["k2"], - p1=0.0, - p2=0.0, - k3=row["k3"], - ), - ) - - return camera_metadata - - -def get_av2_lidar_metadata(log_path: Path) -> Dict[LiDARType, LiDARMetadata]: - # metadata: Dict[LiDARType, LiDARMetadata] = {} - # metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( - # lidar_type=LiDARType.LIDAR_MERGED, - # lidar_index=NuplanLidarIndex, - # extrinsic=None, # NOTE: LiDAR extrinsic are unknown - # ) - # return metadata - return {} - - -def _write_recording_table( - sensor_df: pd.DataFrame, - synchronization_df: pd.DataFrame, - recording_schema: pa.schema, - log_file_path: Path, - source_log_path: Path, - data_converter_config: DataConverterConfig, -) -> None: - - # NOTE: Similar to other datasets, we use the lidar timestamps as reference timestamps. - lidar_sensor = sensor_df.xs(key="lidar", level=2) - lidar_timestamps_ns = np.sort([int(idx_tuple[2]) for idx_tuple in lidar_sensor.index]) - - # NOTE: The annotation dataframe is not available for the test split. - annotations_df = ( - pd.read_feather(source_log_path / "annotations.feather") - if (source_log_path / "annotations.feather").exists() - else None - ) - - city_se3_egovehicle_df = pd.read_feather(source_log_path / "city_SE3_egovehicle.feather") - - egovehicle_se3_sensor_df = ( - pd.read_feather(source_log_path / "calibration" / "egovehicle_SE3_sensor.feather") - if data_converter_config.camera_store_option is not None - else None - ) - - # with pa.ipc.new_stream(str(log_file_path), recording_schema) as writer: - with pa.OSFile(str(log_file_path), "wb") as sink: - with pa.ipc.new_file(sink, recording_schema) as writer: - - for lidar_timestamp_ns in lidar_timestamps_ns: - - ego_state_se3 = _extract_ego_state(city_se3_egovehicle_df, lidar_timestamp_ns) - ( - detections_state, - detections_velocity, - detections_token, - detections_types, - ) = _extract_box_detections(annotations_df, lidar_timestamp_ns, ego_state_se3) - traffic_light_ids, traffic_light_types = _extract_traffic_lights() - route_lane_group_ids = None # TODO: Add route lane group ids extraction ? - row_data = { - "token": [create_token(str(lidar_timestamp_ns))], - "timestamp": [TimePoint.from_ns(int(lidar_timestamp_ns)).time_us], - "detections_state": [detections_state], - "detections_velocity": [detections_velocity], - "detections_token": [detections_token], - "detections_type": [detections_types], - "ego_states": [ego_state_se3.array.tolist()], - "traffic_light_ids": [traffic_light_ids], - "traffic_light_types": [traffic_light_types], - "scenario_tag": [_extract_scenario_tag()], - "route_lane_group_ids": [route_lane_group_ids], - } - - # TODO: add lidar data - - # if data_converter_config.lidar_store_option is not None: - # lidar_data_dict = _extract_lidar(lidar_pc, data_converter_config) - # for lidar_type, lidar_data in lidar_data_dict.items(): - # if lidar_data is not None: - # row_data[lidar_type.serialize()] = [lidar_data] - # else: - # row_data[lidar_type.serialize()] = [None] - - if data_converter_config.camera_store_option is not None: - camera_data_dict = _extract_camera( - lidar_timestamp_ns, - city_se3_egovehicle_df, - egovehicle_se3_sensor_df, - ego_state_se3, - synchronization_df, - source_log_path, - data_converter_config, - ) - for camera_type, camera_data in camera_data_dict.items(): - if camera_data is not None: - row_data[camera_type.serialize()] = [camera_data[0]] - row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] - else: - row_data[camera_type.serialize()] = [None] - row_data[f"{camera_type.serialize()}_extrinsic"] = [None] - - batch = pa.record_batch(row_data, schema=recording_schema) - writer.write_batch(batch) - del batch, row_data, detections_state, detections_velocity, detections_token, detections_types - - -def _extract_box_detections( - annotations_df: Optional[pd.DataFrame], - lidar_timestamp_ns: int, - ego_state_se3: EgoStateSE3, -) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: - - # TODO: Extract velocity from annotations_df if available. - - if annotations_df is None: - return [], [], [], [] - - annotations_slice = get_slice_with_timestamp_ns(annotations_df, lidar_timestamp_ns) - num_detections = len(annotations_slice) - - detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) - detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) - detections_token: List[str] = annotations_slice["track_uuid"].tolist() - detections_types: List[int] = [] - - for detection_idx, (_, row) in enumerate(annotations_slice.iterrows()): - row = row.to_dict() - - detections_state[detection_idx, BoundingBoxSE3Index.XYZ] = [row["tx_m"], row["ty_m"], row["tz_m"]] - detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = [row["qw"], row["qx"], row["qy"], row["qz"]] - detections_state[detection_idx, BoundingBoxSE3Index.EXTENT] = [row["length_m"], row["width_m"], row["height_m"]] - - av2_detection_type = AV2SensorBoxDetectionType.deserialize(row["category"]) - detections_types.append(int(AV2_TO_DETECTION_TYPE[av2_detection_type])) - - detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( - origin=ego_state_se3.rear_axle_se3, - se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3], - ) - - return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types - - -def _extract_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_timestamp_ns: int) -> EgoStateSE3: - ego_state_slice = get_slice_with_timestamp_ns(city_se3_egovehicle_df, lidar_timestamp_ns) - assert ( - len(ego_state_slice) == 1 - ), f"Expected exactly one ego state for timestamp {lidar_timestamp_ns}, got {len(ego_state_slice)}." - - ego_pose_dict = ego_state_slice.iloc[0].to_dict() - rear_axle_pose = StateSE3( - x=ego_pose_dict["tx_m"], - y=ego_pose_dict["ty_m"], - z=ego_pose_dict["tz_m"], - qw=ego_pose_dict["qw"], - qx=ego_pose_dict["qx"], - qy=ego_pose_dict["qy"], - qz=ego_pose_dict["qz"], - ) - - vehicle_parameters = get_av2_ford_fusion_hybrid_parameters() - center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) - - # TODO: Add script to calculate the dynamic state from log sequence. - dynamic_state = DynamicStateSE3( - velocity=Vector3D( - x=0.0, - y=0.0, - z=0.0, - ), - acceleration=Vector3D( - x=0.0, - y=0.0, - z=0.0, - ), - angular_velocity=Vector3D( - x=0.0, - y=0.0, - z=0.0, - ), - ) - - return EgoStateSE3( - center_se3=center, - dynamic_state_se3=dynamic_state, - vehicle_parameters=vehicle_parameters, - timepoint=None, - ) - - -def _extract_traffic_lights() -> Tuple[List[int], List[int]]: - return [], [] - - -def _extract_scenario_tag() -> List[str]: - return ["unknown"] - - -def _extract_camera( - lidar_timestamp_ns: int, - city_se3_egovehicle_df: pd.DataFrame, - egovehicle_se3_sensor_df: pd.DataFrame, - ego_state_se3: EgoStateSE3, - synchronization_df: pd.DataFrame, - source_log_path: Path, - data_converter_config: DataConverterConfig, -) -> Dict[PinholeCameraType, Union[str, bytes]]: - - camera_dict: Dict[PinholeCameraType, Union[str, bytes]] = { - camera_type: None for camera_type in AV2_CAMERA_TYPE_MAPPING.values() - } - split = source_log_path.parent.name - log_id = source_log_path.name - - source_dataset_dir = source_log_path.parent.parent - - rear_axle_se3 = ego_state_se3.rear_axle_se3 - ego_transform = rear_axle_se3.transformation_matrix - ego_transform # TODO: Refactor this file, ie. why is the ego transform calculated but not used? - - for _, row in egovehicle_se3_sensor_df.iterrows(): - row = row.to_dict() - if row["sensor_name"] not in AV2_CAMERA_TYPE_MAPPING: - continue - - camera_name = row["sensor_name"] - camera_type = AV2_CAMERA_TYPE_MAPPING[camera_name] - - relative_image_path = find_closest_target_fpath( - split=split, - log_id=log_id, - src_sensor_name="lidar", - src_timestamp_ns=lidar_timestamp_ns, - target_sensor_name=camera_name, - synchronization_df=synchronization_df, - ) - if relative_image_path is None: - camera_dict[camera_type] = None - else: - absolute_image_path = source_dataset_dir / relative_image_path - assert absolute_image_path.exists() - - # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. - camera_extrinsic = StateSE3( - x=row["tx_m"], - y=row["ty_m"], - z=row["tz_m"], - qw=row["qw"], - qx=row["qx"], - qy=row["qy"], - qz=row["qz"], - ) - - if data_converter_config.camera_store_option == "path": - camera_dict[camera_type] = (str(relative_image_path), camera_extrinsic.tolist()) - elif data_converter_config.camera_store_option == "binary": - with open(absolute_image_path, "rb") as f: - camera_dict[camera_type] = (f.read(), camera_extrinsic.tolist()) - - return camera_dict - - -def _extract_lidar(lidar_pc, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: - # TODO: Implement this function to extract lidar data. - return {} diff --git a/d123/datasets/nuplan/nuplan_data_converter_delete.py b/d123/datasets/nuplan/nuplan_data_converter_delete.py deleted file mode 100644 index e0029dd2..00000000 --- a/d123/datasets/nuplan/nuplan_data_converter_delete.py +++ /dev/null @@ -1,505 +0,0 @@ -import gc -import os -import pickle -from functools import partial -from pathlib import Path -from typing import Any, Dict, Final, List, Literal, Optional, Tuple, Union - -import numpy as np -import pyarrow as pa -import yaml - -import d123.datasets.nuplan.utils as nuplan_utils -from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table -from d123.common.utils.dependencies import check_dependencies -from d123.datasets.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter -from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.datasets.utils.sensor.lidar_index_registry import NuplanLidarIndex -from d123.datatypes.detections.detection import TrafficLightStatus -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import ( - PinholeCameraMetadata, - PinholeCameraType, - PinholeDistortion, - PinholeIntrinsics, -) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -from d123.datatypes.vehicle_state.vehicle_parameters import ( - get_nuplan_chrysler_pacifica_parameters, - rear_axle_se3_to_center_se3, -) -from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.geometry_index import StateSE3Index -from d123.geometry.rotation import EulerAngles -from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL - -check_dependencies(["nuplan", "sqlalchemy"], "nuplan") -from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens -from nuplan.database.nuplan_db_orm.ego_pose import EgoPose -from nuplan.database.nuplan_db_orm.lidar_box import LidarBox -from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc -from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB -from nuplan.planning.simulation.observation.observation_type import CameraChannel -from sqlalchemy import func - -TARGET_DT: Final[float] = 0.1 -NUPLAN_DT: Final[float] = 0.05 -SORT_BY_TIMESTAMP: Final[bool] = True - -NUPLAN_TRAFFIC_STATUS_DICT: Final[Dict[str, TrafficLightStatus]] = { - "green": TrafficLightStatus.GREEN, - "red": TrafficLightStatus.RED, - "unknown": TrafficLightStatus.UNKNOWN, -} -NUPLAN_DETECTION_NAME_DICT = { - "vehicle": DetectionType.VEHICLE, - "bicycle": DetectionType.BICYCLE, - "pedestrian": DetectionType.PEDESTRIAN, - "traffic_cone": DetectionType.TRAFFIC_CONE, - "barrier": DetectionType.BARRIER, - "czone_sign": DetectionType.CZONE_SIGN, - "generic_object": DetectionType.GENERIC_OBJECT, -} - -NUPLAN_CAMERA_TYPES = { - PinholeCameraType.CAM_F0: CameraChannel.CAM_F0, - PinholeCameraType.CAM_B0: CameraChannel.CAM_B0, - PinholeCameraType.CAM_L0: CameraChannel.CAM_L0, - PinholeCameraType.CAM_L1: CameraChannel.CAM_L1, - PinholeCameraType.CAM_L2: CameraChannel.CAM_L2, - PinholeCameraType.CAM_R0: CameraChannel.CAM_R0, - PinholeCameraType.CAM_R1: CameraChannel.CAM_R1, - PinholeCameraType.CAM_R2: CameraChannel.CAM_R2, -} - -NUPLAN_DATA_ROOT = Path(os.environ["NUPLAN_DATA_ROOT"]) -NUPLAN_ROLLING_SHUTTER_S: Final[TimePoint] = TimePoint.from_s(1 / 60) - - -def create_splits_logs() -> Dict[str, List[str]]: - yaml_filepath = Path(nuplan_utils.__path__[0]) / "log_splits.yaml" - with open(yaml_filepath, "r") as stream: - splits = yaml.safe_load(stream) - - return splits["log_splits"] - - -class NuplanDataConverter(RawDataConverter): - def __init__( - self, - splits: List[str], - log_path: Union[Path, str], - data_converter_config: DataConverterConfig, - ) -> None: - super().__init__(data_converter_config) - for split in splits: - assert ( - split in self.get_available_splits() - ), f"Split {split} is not available. Available splits: {self.available_splits}" - - self._splits: List[str] = splits - self._log_path: Path = Path(log_path) - self._log_paths_per_split: Dict[str, List[Path]] = self._collect_log_paths() - self._target_dt: float = 0.1 - - def _collect_log_paths(self) -> Dict[str, List[Path]]: - # NOTE: the nuplan mini folder has an internal train, val, test structure, all stored in "mini". - # The complete dataset is saved in the "trainval" folder (train and val), or in the "test" folder (for test). - # subsplit_log_names: Dict[str, List[str]] = create_splits_logs() - log_paths_per_split: Dict[str, List[Path]] = {} - - for split in self._splits: - subsplit = split.split("_")[-1] - assert subsplit in ["train", "val", "test"] - if split in ["nuplan_train", "nuplan_val"]: - log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "trainval" - elif split in ["nuplan_test"]: - log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "test" - elif split in ["nuplan_mini_train", "nuplan_mini_val", "nuplan_mini_test"]: - log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "mini" - elif split == "nuplan_private_test": - log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "private_test" - - all_log_files_in_path = [log_file for log_file in log_path.glob("*.db")] - all_log_names = set([str(log_file.stem) for log_file in all_log_files_in_path]) - # set(subsplit_log_names[subsplit]) - # log_paths = [log_path / f"{log_name}.db" for log_name in list(all_log_names & split_log_names)] - log_paths = [log_path / f"{log_name}.db" for log_name in list(all_log_names)] - log_paths_per_split[split] = log_paths - - return log_paths_per_split - - def get_available_splits(self) -> List[str]: - return [ - "nuplan_train", - "nuplan_val", - "nuplan_test", - "nuplan_mini_train", - "nuplan_mini_val", - "nuplan_mini_test", - "nuplan_private_test", # TODO: remove, not publicly available - ] - - def convert_maps(self, worker: WorkerPool) -> None: - worker_map( - worker, - partial(convert_nuplan_map_to_gpkg, data_converter_config=self.data_converter_config), - list(MAP_LOCATIONS), - ) - - def convert_logs(self, worker: WorkerPool) -> None: - log_args = [ - { - "log_path": log_path, - "split": split, - } - for split, log_paths in self._log_paths_per_split.items() - for log_path in log_paths - ] - - worker_map( - worker, - partial( - convert_nuplan_log_to_arrow, - data_converter_config=self.data_converter_config, - ), - log_args, - ) - - -def convert_nuplan_map_to_gpkg(map_names: List[str], data_converter_config: DataConverterConfig) -> List[Any]: - for map_name in map_names: - map_path = data_converter_config.output_path / "maps" / f"nuplan_{map_name}.gpkg" - if data_converter_config.force_map_conversion or not map_path.exists(): - map_path.unlink(missing_ok=True) - NuPlanMapConverter(data_converter_config.output_path / "maps").convert(map_name=map_name) - return [] - - -def convert_nuplan_log_to_arrow( - args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig -) -> List[Any]: - for log_info in args: - log_path: Path = log_info["log_path"] - split: str = log_info["split"] - - if not log_path.exists(): - raise FileNotFoundError(f"Log path {log_path} does not exist.") - - log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None) - log_file_path = data_converter_config.output_path / split / f"{log_path.stem}.arrow" - - if data_converter_config.force_log_conversion or not log_file_path.exists(): - - log_file_path.unlink(missing_ok=True) - if not log_file_path.parent.exists(): - log_file_path.parent.mkdir(parents=True, exist_ok=True) - - log_metadata = LogMetadata( - dataset="nuplan", - split=split, - log_name=log_db.log_name, - location=log_db.log.map_version, - timestep_seconds=TARGET_DT, - vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(), - camera_metadata=get_nuplan_camera_metadata(log_path), - lidar_metadata=get_nuplan_lidar_metadata(), - map_has_z=False, - map_is_local=False, - ) - - schema_column_list = [ - ("token", pa.string()), - ("timestamp", pa.int64()), - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ("scenario_tag", pa.list_(pa.string())), - ("route_lane_group_ids", pa.list_(pa.int64())), - ] - if data_converter_config.lidar_store_option is not None: - for lidar_type in log_metadata.lidar_metadata.keys(): - if data_converter_config.lidar_store_option == "path": - schema_column_list.append((lidar_type.serialize(), pa.string())) - elif data_converter_config.lidar_store_option == "binary": - raise NotImplementedError("Binary lidar storage is not implemented.") - - if data_converter_config.camera_store_option is not None: - for camera_type in log_metadata.camera_metadata.keys(): - if data_converter_config.camera_store_option == "path": - schema_column_list.append((camera_type.serialize(), pa.string())) - schema_column_list.append( - (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) - ) - - elif data_converter_config.camera_store_option == "binary": - raise NotImplementedError("Binary camera storage is not implemented.") - - recording_schema = pa.schema(schema_column_list) - recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) - _write_recording_table(log_db, recording_schema, log_file_path, log_path, data_converter_config) - - # Detach and remove log_db, for memory management - log_db.detach_tables() - log_db.remove_ref() - del recording_schema, log_db - gc.collect() - - return [] - - -def get_nuplan_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - - def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadata: - cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0] - - intrinsics_camera_matrix = np.array(pickle.loads(cam.intrinsic)) # array of shape (3, 3) - intrinsic = PinholeIntrinsics.from_camera_matrix(intrinsics_camera_matrix) - - distortion_array = np.array(pickle.loads(cam.distortion)) # array of shape (5,) - distortion = PinholeDistortion.from_array(distortion_array, copy=False) - - return PinholeCameraMetadata( - camera_type=camera_type, - width=cam.width, - height=cam.height, - intrinsics=intrinsic, - distortion=distortion, - ) - - log_cam_infos: Dict[str, PinholeCameraMetadata] = {} - for camera_type in NUPLAN_CAMERA_TYPES.keys(): - log_cam_infos[camera_type] = _get_camera_metadata(camera_type) - - return log_cam_infos - - -def get_nuplan_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: - metadata: Dict[LiDARType, LiDARMetadata] = {} - metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( - lidar_type=LiDARType.LIDAR_MERGED, - lidar_index=NuplanLidarIndex, - extrinsic=None, # NOTE: LiDAR extrinsic are unknown - ) - return metadata - - -def _write_recording_table( - log_db: NuPlanDB, - recording_schema: pa.schema, - log_file_path: Path, - source_log_path: Path, - data_converter_config: DataConverterConfig, -) -> None: - compression: Optional[Literal["lz4", "zstd"]] = "zstd" - options = pa.ipc.IpcWriteOptions(compression=compression) - - with pa.OSFile(str(log_file_path), "wb") as sink: - with pa.ipc.new_file(sink, recording_schema, options=options) as writer: - step_interval: float = int(TARGET_DT / NUPLAN_DT) - for lidar_pc in log_db.lidar_pc[::step_interval]: - lidar_pc_token: str = lidar_pc.token - ( - detections_state, - detections_velocity, - detections_token, - detections_types, - ) = _extract_detections(lidar_pc) - traffic_light_ids, traffic_light_types = _extract_traffic_lights(log_db, lidar_pc_token) - route_lane_group_ids = [ - int(roadblock_id) - for roadblock_id in str(lidar_pc.scene.roadblock_ids).split(" ") - if len(roadblock_id) > 0 - ] - - row_data = { - "token": [lidar_pc_token], - "timestamp": [lidar_pc.timestamp], - "detections_state": [detections_state], - "detections_velocity": [detections_velocity], - "detections_token": [detections_token], - "detections_type": [detections_types], - "ego_states": [_extract_ego_state(lidar_pc)], - "traffic_light_ids": [traffic_light_ids], - "traffic_light_types": [traffic_light_types], - "scenario_tag": [_extract_scenario_tag(log_db, lidar_pc_token)], - "route_lane_group_ids": [route_lane_group_ids], - } - - if data_converter_config.lidar_store_option is not None: - lidar_data_dict = _extract_lidar(lidar_pc, data_converter_config) - for lidar_type, lidar_data in lidar_data_dict.items(): - if lidar_data is not None: - row_data[lidar_type.serialize()] = [lidar_data] - else: - row_data[lidar_type.serialize()] = [None] - - if data_converter_config.camera_store_option is not None: - camera_data_dict = _extract_camera(log_db, lidar_pc, source_log_path, data_converter_config) - for camera_type, camera_data in camera_data_dict.items(): - if camera_data is not None: - row_data[camera_type.serialize()] = [camera_data[0]] - row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] - else: - row_data[camera_type.serialize()] = [None] - row_data[f"{camera_type.serialize()}_extrinsic"] = [None] - - batch = pa.record_batch(row_data, schema=recording_schema) - writer.write_batch(batch) - del batch, row_data, detections_state, detections_velocity, detections_token, detections_types - - if SORT_BY_TIMESTAMP: - recording_table = open_arrow_table(log_file_path) - recording_table = recording_table.sort_by([("timestamp", "ascending")]) - write_arrow_table(recording_table, log_file_path) - - -def _extract_detections(lidar_pc: LidarPc) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: - detections_state: List[List[float]] = [] - detections_velocity: List[List[float]] = [] - detections_token: List[str] = [] - detections_types: List[int] = [] - - for lidar_box in lidar_pc.lidar_boxes: - lidar_box: LidarBox - lidar_quaternion = EulerAngles(roll=DEFAULT_ROLL, pitch=DEFAULT_PITCH, yaw=lidar_box.yaw).quaternion - center = StateSE3( - x=lidar_box.x, - y=lidar_box.y, - z=lidar_box.z, - qw=lidar_quaternion.qw, - qx=lidar_quaternion.qx, - qy=lidar_quaternion.qy, - qz=lidar_quaternion.qz, - ) - bounding_box_se3 = BoundingBoxSE3(center, lidar_box.length, lidar_box.width, lidar_box.height) - - detections_state.append(bounding_box_se3.tolist()) - detections_velocity.append(lidar_box.velocity) - detections_token.append(lidar_box.track_token) - detections_types.append(int(NUPLAN_DETECTION_NAME_DICT[lidar_box.category.name])) - - return detections_state, detections_velocity, detections_token, detections_types - - -def _extract_ego_state(lidar_pc: LidarPc) -> List[float]: - - vehicle_parameters = get_nuplan_chrysler_pacifica_parameters() - rear_axle_pose = StateSE3( - x=lidar_pc.ego_pose.x, - y=lidar_pc.ego_pose.y, - z=lidar_pc.ego_pose.z, - qw=lidar_pc.ego_pose.qw, - qx=lidar_pc.ego_pose.qx, - qy=lidar_pc.ego_pose.qy, - qz=lidar_pc.ego_pose.qz, - ) - center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) - dynamic_state = DynamicStateSE3( - velocity=Vector3D( - x=lidar_pc.ego_pose.vx, - y=lidar_pc.ego_pose.vy, - z=lidar_pc.ego_pose.vz, - ), - acceleration=Vector3D( - x=lidar_pc.ego_pose.acceleration_x, - y=lidar_pc.ego_pose.acceleration_y, - z=lidar_pc.ego_pose.acceleration_z, - ), - angular_velocity=Vector3D( - x=lidar_pc.ego_pose.angular_rate_x, - y=lidar_pc.ego_pose.angular_rate_y, - z=lidar_pc.ego_pose.angular_rate_z, - ), - ) - return EgoStateSE3( - center_se3=center, - dynamic_state_se3=dynamic_state, - vehicle_parameters=vehicle_parameters, - timepoint=None, - ).array.tolist() - - -def _extract_traffic_lights(log_db: NuPlanDB, lidar_pc_token: str) -> Tuple[List[int], List[int]]: - traffic_light_ids: List[int] = [] - traffic_light_types: List[int] = [] - traffic_lights = log_db.traffic_light_status.select_many(lidar_pc_token=lidar_pc_token) - for traffic_light in traffic_lights: - traffic_light_ids.append(int(traffic_light.lane_connector_id)) - traffic_light_types.append(int(NUPLAN_TRAFFIC_STATUS_DICT[traffic_light.status].value)) - return traffic_light_ids, traffic_light_types - - -def _extract_scenario_tag(log_db: NuPlanDB, lidar_pc_token: str) -> List[str]: - scenario_tags = [ - scenario_tag.type for scenario_tag in log_db.scenario_tag.select_many(lidar_pc_token=lidar_pc_token) - ] - if len(scenario_tags) == 0: - scenario_tags = ["unknown"] - return scenario_tags - - -def _extract_camera( - log_db: NuPlanDB, - lidar_pc: LidarPc, - source_log_path: Path, - data_converter_config: DataConverterConfig, -) -> Dict[PinholeCameraType, Union[str, bytes]]: - - camera_dict: Dict[str, Union[str, bytes]] = {} - sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" - log_cam_infos = {camera.token: camera for camera in log_db.log.cameras} - - for camera_type, camera_channel in NUPLAN_CAMERA_TYPES.items(): - camera_data: Optional[Union[str, bytes]] = None - c2e: Optional[List[float]] = None - image_class = list(get_images_from_lidar_tokens(source_log_path, [lidar_pc.token], [str(camera_channel.value)])) - if len(image_class) != 0: - image = image_class[0] - filename_jpg = sensor_root / image.filename_jpg - if filename_jpg.exists(): - - # Code taken from MTGS - # https://github.com/OpenDriveLab/MTGS/blob/main/nuplan_scripts/utils/nuplan_utils_custom.py#L117 - # TODO: Refactor - timestamp = image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us - img_ego_pose: EgoPose = ( - log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first() - ) - img_e2g = img_ego_pose.trans_matrix - g2e = lidar_pc.ego_pose.trans_matrix_inv - img_e2e = g2e @ img_e2g - cam_info = log_cam_infos[image.camera_token] - c2img_e = cam_info.trans_matrix - c2e = img_e2e @ c2img_e - - extrinsic = StateSE3.from_transformation_matrix(c2e) - - if data_converter_config.camera_store_option == "path": - camera_data = str(filename_jpg), extrinsic.tolist() - elif data_converter_config.camera_store_option == "binary": - with open(filename_jpg, "rb") as f: - camera_data = f.read(), extrinsic.tolist() - - camera_dict[camera_type] = camera_data - - return camera_dict - - -def _extract_lidar(lidar_pc: LidarPc, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: - - lidar: Optional[str] = None - lidar_full_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" / lidar_pc.filename - if lidar_full_path.exists(): - lidar = lidar_pc.filename - - return {LiDARType.LIDAR_MERGED: lidar} diff --git a/d123/datasets/wopd/wopd_data_converter_delete.py b/d123/datasets/wopd/wopd_data_converter_delete.py deleted file mode 100644 index ea418d57..00000000 --- a/d123/datasets/wopd/wopd_data_converter_delete.py +++ /dev/null @@ -1,521 +0,0 @@ -import gc -import hashlib -import os -from functools import partial -from pathlib import Path -from typing import Any, Dict, Final, List, Optional, Tuple, Union - -import numpy as np -import numpy.typing as npt -import pyarrow as pa - -from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.dependencies import check_dependencies -from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.datasets.utils.sensor.camera_conventions import CameraConvention, convert_camera_convention -from d123.datasets.utils.sensor.lidar_index_registry import WopdLidarIndex -from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map -from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import ( - PinholeCameraMetadata, - PinholeCameraType, - PinholeDistortion, - PinholeIntrinsics, -) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters -from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.geometry_index import EulerAnglesIndex, StateSE3Index -from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array -from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL -from d123.geometry.utils.rotation_utils import ( - get_euler_array_from_quaternion_array, - get_quaternion_array_from_euler_array, -) - -check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") -import tensorflow as tf -from waymo_open_dataset import dataset_pb2 -from waymo_open_dataset.utils import frame_utils - -# TODO: Make keep_polar_features an optional argument. -# With polar features, the lidar loading time is SIGNIFICANTLY higher. - -os.environ["CUDA_VISIBLE_DEVICES"] = "-1" -D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) - -TARGET_DT: Final[float] = 0.1 -SORT_BY_TIMESTAMP: Final[bool] = False - - -# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63 -WOPD_DETECTION_NAME_DICT: Dict[int, DetectionType] = { - 0: DetectionType.GENERIC_OBJECT, # TYPE_UNKNOWN - 1: DetectionType.VEHICLE, # TYPE_VEHICLE - 2: DetectionType.PEDESTRIAN, # TYPE_PEDESTRIAN - 3: DetectionType.SIGN, # TYPE_SIGN - 4: DetectionType.BICYCLE, # TYPE_CYCLIST -} - -# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L50 -WOPD_CAMERA_TYPES: Dict[int, PinholeCameraType] = { - 1: PinholeCameraType.CAM_F0, # front_camera - 2: PinholeCameraType.CAM_L0, # front_left_camera - 3: PinholeCameraType.CAM_R0, # front_right_camera - 4: PinholeCameraType.CAM_L1, # left_camera - 5: PinholeCameraType.CAM_R1, # right_camera -} - -# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L66 -WOPD_LIDAR_TYPES: Dict[int, LiDARType] = { - 0: LiDARType.LIDAR_UNKNOWN, # UNKNOWN - 1: LiDARType.LIDAR_TOP, # TOP - 2: LiDARType.LIDAR_FRONT, # FRONT - 3: LiDARType.LIDAR_SIDE_LEFT, # SIDE_LEFT - 4: LiDARType.LIDAR_SIDE_RIGHT, # SIDE_RIGHT - 5: LiDARType.LIDAR_BACK, # REAR -} - -WOPD_DATA_ROOT = Path("/media/nvme1/waymo_perception") # TODO: set as environment variable !!!! - -# Whether to use ego or zero roll and pitch values for bounding box detections (after global conversion) -ZERO_ROLL_PITCH: Final[bool] = True - - -def create_token(input_data: str) -> str: - # TODO: Refactor this function. - # TODO: Add a general function to create tokens from arbitrary data. - if isinstance(input_data, str): - input_data = input_data.encode("utf-8") - - hash_obj = hashlib.sha256(input_data) - return hash_obj.hexdigest()[:16] - - -class WOPDDataConverter(RawDataConverter): - def __init__( - self, - splits: List[str], - log_path: Union[Path, str], - data_converter_config: DataConverterConfig, - ) -> None: - super().__init__(data_converter_config) - for split in splits: - assert ( - split in self.get_available_splits() - ), f"Split {split} is not available. Available splits: {self.available_splits}" - - self._splits: List[str] = splits - self._tf_records_per_split: Dict[str, List[Path]] = self._collect_tf_records() - self._target_dt: float = 0.1 - - def _collect_tf_records(self) -> Dict[str, List[Path]]: - tf_records_per_split: Dict[str, List[Path]] = {} - - for split in self._splits: - if split in ["wopd_train"]: - log_path = WOPD_DATA_ROOT / "training" - else: - raise ValueError(f"Split {split} is not supported.") - - log_paths = [log_file for log_file in log_path.glob("*.tfrecord")] - tf_records_per_split[split] = log_paths - - return tf_records_per_split - - def get_available_splits(self) -> List[str]: - # TODO: Add more splits if available - return [ - "wopd_train", - ] - - def convert_maps(self, worker: WorkerPool) -> None: - log_args = [ - { - "tf_record": tf_record, - "split": split, - } - for split, tf_record_paths in self._tf_records_per_split.items() - for tf_record in tf_record_paths - ] - - worker_map( - worker, - partial(convert_wopd_tfrecord_map_to_gpkg, data_converter_config=self.data_converter_config), - log_args, - ) - - def convert_logs(self, worker: WorkerPool) -> None: - log_args = [ - { - "tf_record": tf_record, - "split": split, - } - for split, tf_record_paths in self._tf_records_per_split.items() - for tf_record in tf_record_paths - ] - - worker_map( - worker, - partial(convert_wopd_tfrecord_log_to_arrow, data_converter_config=self.data_converter_config), - log_args, - ) - - -def convert_wopd_tfrecord_map_to_gpkg( - args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig -) -> List[Any]: - - for log_info in args: - tf_record_path: Path = log_info["tf_record"] - split: str = log_info["split"] - - if not tf_record_path.exists(): - raise FileNotFoundError(f"TFRecord path {tf_record_path} does not exist.") - - dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") - for data in dataset: - initial_frame = dataset_pb2.Frame() - initial_frame.ParseFromString(data.numpy()) - break - log_name = str(initial_frame.context.name) - map_file_path = D123_MAPS_ROOT / split / f"{log_name}.gpkg" - - if data_converter_config.force_map_conversion or not map_file_path.exists(): - map_file_path.unlink(missing_ok=True) - convert_wopd_map(initial_frame, map_file_path) - return [] - - -def convert_wopd_tfrecord_log_to_arrow( - args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig -) -> List[Any]: - for log_info in args: - try: - - tf_record_path: Path = log_info["tf_record"] - split: str = log_info["split"] - - if not tf_record_path.exists(): - raise FileNotFoundError(f"TFRecord path {tf_record_path} does not exist.") - - dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") - for data in dataset: - initial_frame = dataset_pb2.Frame() - initial_frame.ParseFromString(data.numpy()) - break - - log_name = str(initial_frame.context.name) - log_file_path = data_converter_config.output_path / split / f"{log_name}.arrow" - - if data_converter_config.force_log_conversion or not log_file_path.exists(): - log_file_path.unlink(missing_ok=True) - if not log_file_path.parent.exists(): - log_file_path.parent.mkdir(parents=True, exist_ok=True) - - log_metadata = LogMetadata( - dataset="wopd", - split=split, - log_name=log_name, - location=None, # TODO: Add location information. - timestep_seconds=TARGET_DT, - vehicle_parameters=get_wopd_chrysler_pacifica_parameters(), - camera_metadata=get_wopd_camera_metadata(initial_frame, data_converter_config), - lidar_metadata=get_wopd_lidar_metadata(initial_frame, data_converter_config), - map_has_z=True, - map_is_local=True, - ) - - schema_column_list = [ - ("token", pa.string()), - ("timestamp", pa.int64()), - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ("scenario_tag", pa.list_(pa.string())), - ("route_lane_group_ids", pa.list_(pa.int64())), - ] - # TODO: Adjust how cameras are added - if data_converter_config.camera_store_option is not None: - for camera_type in log_metadata.camera_metadata.keys(): - if data_converter_config.camera_store_option == "path": - raise NotImplementedError("Path camera storage is not implemented.") - elif data_converter_config.camera_store_option == "binary": - schema_column_list.append((camera_type.serialize(), pa.binary())) - schema_column_list.append( - (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index))) - ) - - if data_converter_config.lidar_store_option is not None: - for lidar_type in log_metadata.lidar_metadata.keys(): - if data_converter_config.lidar_store_option == "path": - raise NotImplementedError("Filepath lidar storage is not implemented.") - elif data_converter_config.lidar_store_option == "binary": - schema_column_list.append((lidar_type.serialize(), (pa.list_(pa.float32())))) - - recording_schema = pa.schema(schema_column_list) - recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata) - - _write_recording_table(dataset, recording_schema, log_file_path, tf_record_path, data_converter_config) - - del recording_schema, dataset - except Exception as e: - import traceback - - print(f"Error processing log {str(tf_record_path)}: {e}") - traceback.print_exc() - gc.collect() - return [] - - -def get_wopd_camera_metadata( - initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig -) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - - cam_metadatas: Dict[PinholeCameraType, PinholeCameraMetadata] = {} - if data_converter_config.camera_store_option is not None: - for calibration in initial_frame.context.camera_calibrations: - camera_type = WOPD_CAMERA_TYPES[calibration.name] - # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L96 - # https://github.com/waymo-research/waymo-open-dataset/issues/834#issuecomment-2134995440 - fx, fy, cx, cy, k1, k2, p1, p2, k3 = calibration.intrinsic - intrinsics = PinholeIntrinsics(fx=fx, fy=fy, cx=cx, cy=cy) - distortion = PinholeDistortion(k1=k1, k2=k2, p1=p1, p2=p2, k3=k3) - if camera_type in WOPD_CAMERA_TYPES.values(): - cam_metadatas[camera_type] = PinholeCameraMetadata( - camera_type=camera_type, - width=calibration.width, - height=calibration.height, - intrinsics=intrinsics, - distortion=distortion, - ) - - return cam_metadatas - - -def get_wopd_lidar_metadata( - initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig -) -> Dict[LiDARType, LiDARMetadata]: - - laser_metadatas: Dict[LiDARType, LiDARMetadata] = {} - if data_converter_config.lidar_store_option is not None: - for laser_calibration in initial_frame.context.laser_calibrations: - - lidar_type = WOPD_LIDAR_TYPES[laser_calibration.name] - - extrinsic: Optional[StateSE3] = None - if laser_calibration.extrinsic: - extrinsic_transform = np.array(laser_calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) - extrinsic = StateSE3.from_transformation_matrix(extrinsic_transform) - - laser_metadatas[lidar_type] = LiDARMetadata( - lidar_type=lidar_type, - lidar_index=WopdLidarIndex, - extrinsic=extrinsic, - ) - - return laser_metadatas - - -def _write_recording_table( - dataset: tf.data.TFRecordDataset, - recording_schema: pa.schema, - log_file_path: Path, - tf_record_path: Path, - data_converter_config: DataConverterConfig, -) -> None: - - with pa.OSFile(str(log_file_path), "wb") as sink: - with pa.ipc.new_file(sink, recording_schema) as writer: - - dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") - for frame_idx, data in enumerate(dataset): - frame = dataset_pb2.Frame() - frame.ParseFromString(data.numpy()) - - (detections_state, detections_velocity, detections_token, detections_types) = _extract_detections(frame) - - # TODO: Implement traffic light extraction - traffic_light_ids = [] - traffic_light_types = [] - - # TODO: Implement detections - row_data = { - "token": [create_token(f"{frame.context.name}_{int(frame.timestamp_micros)}")], - "timestamp": [int(frame.timestamp_micros)], - "detections_state": [detections_state], - "detections_velocity": [detections_velocity], - "detections_token": [detections_token], - "detections_type": [detections_types], - "ego_states": [_extract_ego_state(frame)], - "traffic_light_ids": [traffic_light_ids], - "traffic_light_types": [traffic_light_types], - "scenario_tag": ["unknown"], - "route_lane_group_ids": [None], - } - - # TODO: Implement lidar extraction - if data_converter_config.lidar_store_option is not None: - lidar_data_dict = _extract_lidar(frame, data_converter_config) - for lidar_type, lidar_data in lidar_data_dict.items(): - if lidar_data is not None: - row_data[lidar_type.serialize()] = [lidar_data.tolist()] - else: - row_data[lidar_type.serialize()] = [None] - - if data_converter_config.camera_store_option is not None: - camera_data_dict = _extract_camera(frame, data_converter_config) - for camera_type, camera_data in camera_data_dict.items(): - if camera_data is not None: - row_data[camera_type.serialize()] = [camera_data[0]] - row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] - else: - row_data[camera_type.serialize()] = [None] - row_data[f"{camera_type.serialize()}_extrinsic"] = [None] - - batch = pa.record_batch(row_data, schema=recording_schema) - writer.write_batch(batch) - del batch, row_data, detections_state, detections_velocity, detections_token, detections_types - - -def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> StateSE3: - ego_pose_matrix = np.array(frame.pose.transform, dtype=np.float64).reshape(4, 4) - return StateSE3.from_transformation_matrix(ego_pose_matrix) - - -def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: - - ego_rear_axle = _get_ego_pose_se3(frame) - - num_detections = len(frame.laser_labels) - detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) - detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) - detections_token: List[str] = [] - detections_types: List[int] = [] - - for detection_idx, detection in enumerate(frame.laser_labels): - if detection.type not in WOPD_DETECTION_NAME_DICT: - continue - detection_quaternion = EulerAngles( - roll=DEFAULT_ROLL, - pitch=DEFAULT_PITCH, - yaw=detection.box.heading, - ).quaternion - - # 2. Fill SE3 Bounding Box - detections_state[detection_idx, BoundingBoxSE3Index.X] = detection.box.center_x - detections_state[detection_idx, BoundingBoxSE3Index.Y] = detection.box.center_y - detections_state[detection_idx, BoundingBoxSE3Index.Z] = detection.box.center_z - detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = detection_quaternion - detections_state[detection_idx, BoundingBoxSE3Index.LENGTH] = detection.box.length - detections_state[detection_idx, BoundingBoxSE3Index.WIDTH] = detection.box.width - detections_state[detection_idx, BoundingBoxSE3Index.HEIGHT] = detection.box.height - - # 2. Velocity TODO: check if velocity needs to be rotated - detections_velocity[detection_idx] = Vector3D( - x=detection.metadata.speed_x, - y=detection.metadata.speed_y, - z=detection.metadata.speed_z, - ).array - - # 3. Type and track token - detections_token.append(str(detection.id)) - detections_types.append(int(WOPD_DETECTION_NAME_DICT[detection.type])) - - detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( - origin=ego_rear_axle, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3] - ) - if ZERO_ROLL_PITCH: - euler_array = get_euler_array_from_quaternion_array(detections_state[:, BoundingBoxSE3Index.QUATERNION]) - euler_array[..., EulerAnglesIndex.ROLL] = DEFAULT_ROLL - euler_array[..., EulerAnglesIndex.PITCH] = DEFAULT_PITCH - detections_state[..., BoundingBoxSE3Index.QUATERNION] = get_quaternion_array_from_euler_array(euler_array) - - return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types - - -def _extract_ego_state(frame: dataset_pb2.Frame) -> List[float]: - rear_axle_pose = _get_ego_pose_se3(frame) - - vehicle_parameters = get_wopd_chrysler_pacifica_parameters() - # FIXME: Find dynamic state in waymo open perception dataset - # https://github.com/waymo-research/waymo-open-dataset/issues/55#issuecomment-546152290 - dynamic_state = DynamicStateSE3( - velocity=Vector3D(*np.zeros(3)), - acceleration=Vector3D(*np.zeros(3)), - angular_velocity=Vector3D(*np.zeros(3)), - ) - - return EgoStateSE3.from_rear_axle( - rear_axle_se3=rear_axle_pose, - dynamic_state_se3=dynamic_state, - vehicle_parameters=vehicle_parameters, - time_point=None, - ).array.tolist() - - -def _extract_traffic_lights() -> Tuple[List[int], List[int]]: - pass - - -def _extract_camera( - frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig -) -> Dict[PinholeCameraType, Union[str, bytes]]: - - camera_dict: Dict[str, Union[str, bytes]] = {} # TODO: Fix wrong type hint - np.array(frame.pose.transform).reshape(4, 4) - - # NOTE: The extrinsic matrix in frame.context.camera_calibration is fixed to model the ego to camera transformation. - # The poses in frame.images[idx] are the motion compensated ego poses when the camera triggers. - - context_extrinsic: Dict[str, StateSE3] = {} - for calibration in frame.context.camera_calibrations: - camera_type = WOPD_CAMERA_TYPES[calibration.name] - camera_transform = np.array(calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) - camera_pose = StateSE3.from_transformation_matrix(camera_transform) - # NOTE: WOPD uses a different camera convention than d123 - # https://arxiv.org/pdf/1912.04838 (Figure 1.) - camera_pose = convert_camera_convention( - camera_pose, - from_convention=CameraConvention.pXpZmY, - to_convention=CameraConvention.pZmYpX, - ) - context_extrinsic[camera_type] = camera_pose - - for image_proto in frame.images: - camera_type = WOPD_CAMERA_TYPES[image_proto.name] - camera_bytes = image_proto.image - camera_dict[camera_type] = camera_bytes, context_extrinsic[camera_type].tolist() - - return camera_dict - - -def _extract_lidar( - frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig -) -> Dict[LiDARType, npt.NDArray[np.float32]]: - - assert data_converter_config.lidar_store_option == "binary", "Lidar store option must be 'binary' for WOPD." - (range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame) - - points, cp_points = frame_utils.convert_range_image_to_point_cloud( - frame=frame, - range_images=range_images, - camera_projections=camera_projections, - range_image_top_pose=range_image_top_pose, - keep_polar_features=False, - ) - - lidar_data: Dict[LiDARType, npt.NDArray[np.float32]] = {} - for lidar_idx, frame_lidar in enumerate(frame.lasers): - lidar_type = WOPD_LIDAR_TYPES[frame_lidar.name] - lidar_data[lidar_type] = np.array(points[lidar_idx], dtype=np.float32).flatten() - - return lidar_data From 79cb5cdb0b1e487f9ec2ff61fb0417f30d7c72cd Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Sat, 11 Oct 2025 13:32:34 +0800 Subject: [PATCH 065/145] merge dev_v0.0.7 into kitti360 --- d123/datasets/nuplan/nuplan_data_converter.py | 26 +++---------------- d123/datasets/wopd/wopd_data_converter.py | 18 ------------- 2 files changed, 3 insertions(+), 41 deletions(-) diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py index fc0375c7..93da2a8e 100644 --- a/d123/datasets/nuplan/nuplan_data_converter.py +++ b/d123/datasets/nuplan/nuplan_data_converter.py @@ -25,11 +25,6 @@ ) from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata -<<<<<<< HEAD -from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.datatypes.sensors.lidar_index import NuplanLidarIndex -======= from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, @@ -37,7 +32,6 @@ PinholeIntrinsics, ) from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType ->>>>>>> dev_v0.0.7 from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from d123.datatypes.vehicle_state.vehicle_parameters import ( @@ -240,29 +234,15 @@ def convert_nuplan_log_to_arrow( return [] -<<<<<<< HEAD -def get_nuplan_camera_metadata(log_path: Path) -> Dict[CameraType, PinholeCameraMetadata]: +def get_nuplan_camera_metadata(log_path: Path) -> Dict[CameraType, CameraMetadata]: - def _get_camera_metadata(camera_type: CameraType) -> PinholeCameraMetadata: + def _get_camera_metadata(camera_type: CameraType) -> CameraMetadata: cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0] intrinsic = np.array(pickle.loads(cam.intrinsic)) rotation = np.array(pickle.loads(cam.rotation)) rotation = Quaternion(rotation).rotation_matrix distortion = np.array(pickle.loads(cam.distortion)) -======= -def get_nuplan_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - - def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadata: - cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0] - - intrinsics_camera_matrix = np.array(pickle.loads(cam.intrinsic), dtype=np.float64) # array of shape (3, 3) - intrinsic = PinholeIntrinsics.from_camera_matrix(intrinsics_camera_matrix) - - distortion_array = np.array(pickle.loads(cam.distortion), dtype=np.float64) # array of shape (5,) - distortion = PinholeDistortion.from_array(distortion_array, copy=False) - ->>>>>>> dev_v0.0.7 - return PinholeCameraMetadata( + return CameraMetadata( camera_type=camera_type, width=cam.width, height=cam.height, diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py index 700172bd..c6a9f3a2 100644 --- a/d123/datasets/wopd/wopd_data_converter.py +++ b/d123/datasets/wopd/wopd_data_converter.py @@ -7,11 +7,6 @@ import numpy as np import numpy.typing as npt -<<<<<<< HEAD -import pyarrow as pa -from d123.datatypes.detections.detection_types import DetectionType -======= ->>>>>>> dev_v0.0.7 from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.dependencies import check_dependencies @@ -24,12 +19,6 @@ from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata -<<<<<<< HEAD -from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.datatypes.sensors.lidar_index import WopdLidarIndex -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -======= from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, @@ -39,7 +28,6 @@ from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 ->>>>>>> dev_v0.0.7 from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex from d123.geometry.bounding_box import BoundingBoxSE3 @@ -264,15 +252,9 @@ def convert_wopd_tfrecord_log_to_arrow( def get_wopd_camera_metadata( initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig -<<<<<<< HEAD -) -> Dict[CameraType, PinholeCameraMetadata]: - - cam_metadatas: Dict[CameraType, PinholeCameraMetadata] = {} -======= ) -> Dict[PinholeCameraType, PinholeCameraMetadata]: cam_metadatas: Dict[PinholeCameraType, PinholeCameraMetadata] = {} ->>>>>>> dev_v0.0.7 if data_converter_config.camera_store_option is not None: for calibration in initial_frame.context.camera_calibrations: camera_type = WOPD_CAMERA_TYPES[calibration.name] From 058198828808866c995928ef08d058ccaa9dee98 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 11 Oct 2025 14:14:27 +0200 Subject: [PATCH 066/145] Intermediate state of dataset conversion refactoring. Note the currently commit has many errors and is not stable. (#51) --- d123/common/visualization/matplotlib/plots.py | 2 +- .../visualization/viser/viser_config.py | 2 +- d123/{datasets => conversion}/__init__.py | 0 d123/conversion/abstract_dataset_converter.py | 38 ++ .../dataset_converter_config.py} | 33 +- .../carla => conversion/datasets}/__init__.py | 0 .../datasets/av2/av2_constants.py | 19 +- .../datasets/av2/av2_helper.py | 36 +- .../datasets/av2/av2_map_conversion.py | 8 +- .../datasets/av2/av2_sensor_converter.py} | 270 +++----- .../datasets/carla}/__init__.py | 0 .../datasets/carla/carla_data_converter.py | 66 +- .../datasets/carla/carla_load_sensor.py} | 2 +- .../datasets/kitti_360/.gitkeep | 0 .../datasets/nuplan}/__init__.py | 0 .../datasets/nuplan/nuplan_constants.py | 85 +++ .../datasets/nuplan/nuplan_converter.py | 607 ++++++++++++++++++ .../datasets/nuplan/nuplan_load_sensor.py | 0 .../datasets/nuplan/nuplan_map_conversion.py | 60 +- .../datasets/nuplan/utils/log_splits.yaml | 0 .../datasets/nuscenes/.gitkeep | 0 .../waymo_map_utils/womp_boundary_utils.py | 0 .../wopd/waymo_map_utils/wopd_map_utils.py | 2 +- .../datasets/wopd/wopd_data_converter.py | 62 +- .../datasets/wopd/wopd_utils.py | 0 .../log_writer}/__init__.py | 0 .../log_writer/abstract_log_writer.py | 48 ++ .../log_writer/arrow_log_writer.py} | 263 +++++--- .../map_writer/abstract_map_writer.py} | 0 .../map_writer/gpkg_map_writer.py} | 0 .../utils}/__init__.py | 0 .../utils/map_utils}/__init__.py | 0 .../map_utils/opendrive/__init__ copy.py} | 0 .../utils/map_utils/opendrive/__init__.py | 0 .../opendrive/opendrive_map_conversion.py | 16 +- .../map_utils/opendrive/parser/__init__.py | 0 .../map_utils}/opendrive/parser/elevation.py | 2 +- .../map_utils}/opendrive/parser/geometry.py | 0 .../utils/map_utils}/opendrive/parser/lane.py | 2 +- .../map_utils}/opendrive/parser/objects.py | 0 .../map_utils}/opendrive/parser/opendrive.py | 2 +- .../map_utils}/opendrive/parser/polynomial.py | 0 .../map_utils}/opendrive/parser/reference.py | 8 +- .../utils/map_utils}/opendrive/parser/road.py | 8 +- .../map_utils/opendrive/utils/__init__.py | 0 .../map_utils}/opendrive/utils/collection.py | 12 +- .../map_utils}/opendrive/utils/id_mapping.py | 0 .../map_utils}/opendrive/utils/id_system.py | 0 .../map_utils}/opendrive/utils/lane_helper.py | 8 +- .../opendrive/utils/objects_helper.py | 4 +- .../utils/map_utils/road_edge/__init__.py | 0 .../road_edge/road_edge_2d_utils.py | 0 .../road_edge/road_edge_3d_utils.py | 2 +- .../utils/sensor_utils}/camera_conventions.py | 0 .../sensor_utils}/lidar_index_registry.py | 2 +- d123/datasets/nuplan/nuplan_data_converter.py | 459 ------------- .../scene/arrow/utils/arrow_getters.py | 4 +- .../scene/arrow/utils/arrow_metadata_utils.py | 2 + d123/datatypes/scene/scene_metadata.py | 2 +- d123/datatypes/sensors/lidar/lidar.py | 2 +- d123/geometry/se.py | 5 +- .../script/builders/data_converter_builder.py | 22 - .../builders/dataset_converter_builder.py | 22 + d123/script/builders/log_writer_builder.py | 17 + .../config/common/default_dataset_paths.yaml | 10 +- d123/script/config/conversion/__init__.py | 0 .../config/conversion/datasets/__init__.py | 0 .../datasets/av2_sensor_dataset.yaml | 6 +- .../datasets/carla_dataset.yaml | 6 +- .../datasets/nuplan_dataset.yaml | 10 +- .../datasets/nuplan_mini_dataset.yaml | 11 +- .../datasets/nuplan_private_dataset.yaml | 10 +- .../datasets/wopd_dataset.yaml | 6 +- .../default_conversion.yaml} | 6 +- .../config/conversion/log_writer/__init__.py | 0 .../log_writer/arrow_ipc_log_writer.yaml | 5 + d123/script/run_conversion.py | 82 +++ d123/script/run_dataset_conversion.py | 45 -- d123/script/run_preprocessing.py | 58 -- d123/script/run_simulation.py | 83 --- d123/script/run_training.py | 80 --- d123/script/run_viser.py | 2 +- d123/simulation/agents/abstract_agents.py | 4 +- .../agents/constant_velocity_agents.py | 4 +- d123/simulation/agents/idm_agents.py | 4 +- d123/simulation/agents/path_following.py | 4 +- d123/simulation/agents/smart_agents.py | 4 +- .../controller/abstract_controller.py | 2 +- .../controller/action_controller.py | 2 +- d123/simulation/gym/demo_gym_env.py | 4 +- .../environment/helper/environment_cache.py | 6 +- d123/simulation/gym/gym_env.py | 4 +- d123/simulation/history/simulation_history.py | 2 +- .../history/simulation_history_buffer.py | 2 +- .../metrics/sim_agents/map_based.py | 6 +- .../metrics/sim_agents/sim_agents.py | 4 +- d123/simulation/metrics/sim_agents/utils.py | 2 +- .../observation/abstract_observation.py | 2 +- .../observation/agents_observation.py | 2 +- .../observation/log_replay_observation.py | 2 +- d123/simulation/planning/abstract_planner.py | 2 +- d123/simulation/simulation_2d.py | 2 +- .../abstract_time_controller.py | 2 +- .../time_controller/log_time_controller.py | 2 +- .../feature_builder/smart_feature_builder.py | 6 +- notebooks/av2/delete_me.ipynb | 8 +- notebooks/av2/delete_me_map.ipynb | 2 +- notebooks/deprecated/extraction_testing.ipynb | 2 +- .../test_nuplan_conversion.ipynb | 2 +- notebooks/deprecated/test_waypoints.ipynb | 2 +- notebooks/nuplan/nuplan_sensor_loading.ipynb | 4 +- notebooks/waymo_perception/map_testing.ipynb | 2 +- pyproject.toml | 1 + scripts/dataset/run_log_caching.sh | 3 +- test_viser.py | 13 +- 115 files changed, 1411 insertions(+), 1326 deletions(-) rename d123/{datasets => conversion}/__init__.py (100%) create mode 100644 d123/conversion/abstract_dataset_converter.py rename d123/{datasets/raw_data_converter.py => conversion/dataset_converter_config.py} (58%) rename d123/{datasets/carla => conversion/datasets}/__init__.py (100%) rename d123/{ => conversion}/datasets/av2/av2_constants.py (90%) rename d123/{ => conversion}/datasets/av2/av2_helper.py (88%) rename d123/{ => conversion}/datasets/av2/av2_map_conversion.py (98%) rename d123/{datasets/av2/av2_data_converter.py => conversion/datasets/av2/av2_sensor_converter.py} (55%) rename d123/{datasets/nuplan => conversion/datasets/carla}/__init__.py (100%) rename d123/{ => conversion}/datasets/carla/carla_data_converter.py (87%) rename d123/{datasets/carla/load_sensor.py => conversion/datasets/carla/carla_load_sensor.py} (80%) rename d123/{ => conversion}/datasets/kitti_360/.gitkeep (100%) rename d123/{datasets/utils => conversion/datasets/nuplan}/__init__.py (100%) create mode 100644 d123/conversion/datasets/nuplan/nuplan_constants.py create mode 100644 d123/conversion/datasets/nuplan/nuplan_converter.py rename d123/{ => conversion}/datasets/nuplan/nuplan_load_sensor.py (100%) rename d123/{ => conversion}/datasets/nuplan/nuplan_map_conversion.py (92%) rename d123/{ => conversion}/datasets/nuplan/utils/log_splits.yaml (100%) rename d123/{ => conversion}/datasets/nuscenes/.gitkeep (100%) rename d123/{ => conversion}/datasets/wopd/waymo_map_utils/womp_boundary_utils.py (100%) rename d123/{ => conversion}/datasets/wopd/waymo_map_utils/wopd_map_utils.py (99%) rename d123/{ => conversion}/datasets/wopd/wopd_data_converter.py (87%) rename d123/{ => conversion}/datasets/wopd/wopd_utils.py (100%) rename d123/{datasets/utils/maps => conversion/log_writer}/__init__.py (100%) create mode 100644 d123/conversion/log_writer/abstract_log_writer.py rename d123/{datasets/utils/arrow_ipc_writer.py => conversion/log_writer/arrow_log_writer.py} (68%) rename d123/{datasets/utils/maps/opendrive/__init__.py => conversion/map_writer/abstract_map_writer.py} (100%) rename d123/{datasets/utils/maps/opendrive/parser/__init__.py => conversion/map_writer/gpkg_map_writer.py} (100%) rename d123/{datasets/utils/maps/road_edge => conversion/utils}/__init__.py (100%) rename d123/{script/config/dataset_conversion => conversion/utils/map_utils}/__init__.py (100%) rename d123/{script/config/datasets/__init__.py => conversion/utils/map_utils/opendrive/__init__ copy.py} (100%) create mode 100644 d123/conversion/utils/map_utils/opendrive/__init__.py rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/opendrive_map_conversion.py (95%) create mode 100644 d123/conversion/utils/map_utils/opendrive/parser/__init__.py rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/parser/elevation.py (97%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/parser/geometry.py (100%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/parser/lane.py (98%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/parser/objects.py (100%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/parser/opendrive.py (99%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/parser/polynomial.py (100%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/parser/reference.py (94%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/parser/road.py (93%) create mode 100644 d123/conversion/utils/map_utils/opendrive/utils/__init__.py rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/utils/collection.py (96%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/utils/id_mapping.py (100%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/utils/id_system.py (100%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/utils/lane_helper.py (96%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/opendrive/utils/objects_helper.py (94%) create mode 100644 d123/conversion/utils/map_utils/road_edge/__init__.py rename d123/{datasets/utils/maps => conversion/utils/map_utils}/road_edge/road_edge_2d_utils.py (100%) rename d123/{datasets/utils/maps => conversion/utils/map_utils}/road_edge/road_edge_3d_utils.py (99%) rename d123/{datasets/utils/sensor => conversion/utils/sensor_utils}/camera_conventions.py (100%) rename d123/{datasets/utils/sensor => conversion/utils/sensor_utils}/lidar_index_registry.py (96%) delete mode 100644 d123/datasets/nuplan/nuplan_data_converter.py delete mode 100644 d123/script/builders/data_converter_builder.py create mode 100644 d123/script/builders/dataset_converter_builder.py create mode 100644 d123/script/builders/log_writer_builder.py create mode 100644 d123/script/config/conversion/__init__.py create mode 100644 d123/script/config/conversion/datasets/__init__.py rename d123/script/config/{ => conversion}/datasets/av2_sensor_dataset.yaml (80%) rename d123/script/config/{ => conversion}/datasets/carla_dataset.yaml (80%) rename d123/script/config/{ => conversion}/datasets/nuplan_dataset.yaml (70%) rename d123/script/config/{ => conversion}/datasets/nuplan_mini_dataset.yaml (71%) rename d123/script/config/{ => conversion}/datasets/nuplan_private_dataset.yaml (70%) rename d123/script/config/{ => conversion}/datasets/wopd_dataset.yaml (81%) rename d123/script/config/{dataset_conversion/default_dataset_conversion.yaml => conversion/default_conversion.yaml} (75%) create mode 100644 d123/script/config/conversion/log_writer/__init__.py create mode 100644 d123/script/config/conversion/log_writer/arrow_ipc_log_writer.yaml create mode 100644 d123/script/run_conversion.py delete mode 100644 d123/script/run_dataset_conversion.py delete mode 100644 d123/script/run_preprocessing.py delete mode 100644 d123/script/run_simulation.py delete mode 100644 d123/script/run_training.py diff --git a/d123/common/visualization/matplotlib/plots.py b/d123/common/visualization/matplotlib/plots.py index b61b8ab2..3e297aac 100644 --- a/d123/common/visualization/matplotlib/plots.py +++ b/d123/common/visualization/matplotlib/plots.py @@ -11,7 +11,7 @@ add_ego_vehicle_to_ax, add_traffic_lights_to_ax, ) -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.datatypes.scene.abstract_scene import AbstractScene def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes: diff --git a/d123/common/visualization/viser/viser_config.py b/d123/common/visualization/viser/viser_config.py index f99f7823..fedb643b 100644 --- a/d123/common/visualization/viser/viser_config.py +++ b/d123/common/visualization/viser/viser_config.py @@ -44,7 +44,7 @@ class ViserConfig: # Map map_visible: bool = True - map_radius: float = 200.0 # [m] + map_radius: float = 1000.0 # [m] map_non_road_z_offset: float = 0.1 # small z-translation to place crosswalks, parking, etc. on top of the road # Bounding boxes diff --git a/d123/datasets/__init__.py b/d123/conversion/__init__.py similarity index 100% rename from d123/datasets/__init__.py rename to d123/conversion/__init__.py diff --git a/d123/conversion/abstract_dataset_converter.py b/d123/conversion/abstract_dataset_converter.py new file mode 100644 index 00000000..f0631a37 --- /dev/null +++ b/d123/conversion/abstract_dataset_converter.py @@ -0,0 +1,38 @@ +import abc + +from d123.conversion.dataset_converter_config import DatasetConverterConfig +from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter + + +class AbstractDatasetConverter(abc.ABC): + """Abstract base class for dataset converters. + + A dataset converter for implementing all dataset specific conversion logic. + + """ + + def __init__(self, dataset_converter_config: DatasetConverterConfig) -> None: + self.dataset_converter_config = dataset_converter_config + + @abc.abstractmethod + def get_number_of_maps(self) -> int: + """Returns the number of available raw data maps for conversion.""" + + @abc.abstractmethod + def get_number_of_logs(self) -> int: + """Returns the number of available raw data logs for conversion.""" + + @abc.abstractmethod + def convert_map(self, map_index: int) -> None: + """ + Convert a single map in raw data format to the uniform 123D format. + :param map_index: The index of the map to convert. + """ + + @abc.abstractmethod + def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: + """ + Convert a single log in raw data format to the uniform 123D format. + :param log_index: The index of the log to convert. + :param log_writer: The log writer to use for writing the converted log. + """ diff --git a/d123/datasets/raw_data_converter.py b/d123/conversion/dataset_converter_config.py similarity index 58% rename from d123/datasets/raw_data_converter.py rename to d123/conversion/dataset_converter_config.py index 862edd09..018a9fd1 100644 --- a/d123/datasets/raw_data_converter.py +++ b/d123/conversion/dataset_converter_config.py @@ -1,13 +1,12 @@ -import abc +from __future__ import annotations + from dataclasses import dataclass from pathlib import Path -from typing import List, Literal, Union - -from d123.common.multithreading.worker_utils import WorkerPool +from typing import Literal, Union @dataclass -class DataConverterConfig: +class DatasetConverterConfig: output_path: Union[str, Path] force_log_conversion: bool = False @@ -49,27 +48,3 @@ def __post_init__(self): "path", "binary", ], f"Invalid LiDAR store option, got {self.lidar_store_option}." - - -class RawDataConverter(abc.ABC): - - def __init__(self, data_converter_config: DataConverterConfig) -> None: - self.data_converter_config = data_converter_config - - @abc.abstractmethod - def get_available_splits(self) -> List[str]: - """Returns a list of available raw data types.""" - - @abc.abstractmethod - def convert_maps(self, worker: WorkerPool) -> None: - """ - Convert maps in raw data format to the uniform 123D format. - :param worker: The worker pool to use for parallel processing. - """ - - @abc.abstractmethod - def convert_logs(self, worker: WorkerPool) -> None: - """ - Convert logs in raw data format to the uniform 123D format. - :param worker: The worker pool to use for parallel processing. - """ diff --git a/d123/datasets/carla/__init__.py b/d123/conversion/datasets/__init__.py similarity index 100% rename from d123/datasets/carla/__init__.py rename to d123/conversion/datasets/__init__.py diff --git a/d123/datasets/av2/av2_constants.py b/d123/conversion/datasets/av2/av2_constants.py similarity index 90% rename from d123/datasets/av2/av2_constants.py rename to d123/conversion/datasets/av2/av2_constants.py index 16dab0b3..fb61313d 100644 --- a/d123/datasets/av2/av2_constants.py +++ b/d123/conversion/datasets/av2/av2_constants.py @@ -1,8 +1,19 @@ +from typing import Dict, Final, Set + from d123.common.utils.enums import SerialIntEnum from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.maps.map_datatypes import RoadLineType from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +AV2_SENSOR_SPLITS: Set[str] = { + "av2-sensor_train", + "av2-sensor_val", + "av2-sensor_test", + "av2-sensor-mini_train", + "av2-sensor-mini_val", + "av2-sensor-mini_test", +} + class AV2SensorBoxDetectionType(SerialIntEnum): """Sensor dataset annotation categories.""" @@ -75,7 +86,7 @@ class AV2SensorBoxDetectionType(SerialIntEnum): } -AV2_CAMERA_TYPE_MAPPING = { +AV2_CAMERA_TYPE_MAPPING: Dict[str, PinholeCameraType] = { "ring_front_center": PinholeCameraType.CAM_F0, "ring_front_left": PinholeCameraType.CAM_L0, "ring_front_right": PinholeCameraType.CAM_R0, @@ -88,7 +99,7 @@ class AV2SensorBoxDetectionType(SerialIntEnum): } -AV2_ROAD_LINE_TYPE_MAPPING = { +AV2_ROAD_LINE_TYPE_MAPPING: Dict[str, RoadLineType] = { "NONE": RoadLineType.NONE, "UNKNOWN": RoadLineType.UNKNOWN, "DASH_SOLID_YELLOW": RoadLineType.DASH_SOLID_YELLOW, @@ -105,3 +116,7 @@ class AV2SensorBoxDetectionType(SerialIntEnum): "SOLID_DASH_YELLOW": RoadLineType.SOLID_DASH_YELLOW, "SOLID_BLUE": RoadLineType.SOLID_BLUE, } + + +AV2_SENSOR_CAM_SHUTTER_INTERVAL_MS: Final[float] = 50.0 +AV2_SENSOR_LIDAR_SWEEP_INTERVAL_W_BUFFER_NS: Final[float] = 102000000.0 diff --git a/d123/datasets/av2/av2_helper.py b/d123/conversion/datasets/av2/av2_helper.py similarity index 88% rename from d123/datasets/av2/av2_helper.py rename to d123/conversion/datasets/av2/av2_helper.py index 5e130eeb..401a979e 100644 --- a/d123/datasets/av2/av2_helper.py +++ b/d123/conversion/datasets/av2/av2_helper.py @@ -1,25 +1,13 @@ from pathlib import Path -from typing import Final, List, Literal, Optional +from typing import List, Literal, Optional import pandas as pd -from d123.datasets.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING - -AV2_SENSOR_CAM_SHUTTER_INTERVAL_MS: Final[float] = 50.0 -AV2_SENSOR_LIDAR_SWEEP_INTERVAL_W_BUFFER_NS: Final[float] = 102000000.0 - - -AV2_SENSOR_CAMERA_NAMES = [ - "ring_front_center", - "ring_front_left", - "ring_front_right", - "ring_rear_left", - "ring_rear_right", - "ring_side_left", - "ring_side_right", - "stereo_front_left", - "stereo_front_right", -] +from d123.conversion.datasets.av2.av2_constants import ( + AV2_CAMERA_TYPE_MAPPING, + AV2_SENSOR_CAM_SHUTTER_INTERVAL_MS, + AV2_SENSOR_LIDAR_SWEEP_INTERVAL_W_BUFFER_NS, +) def get_dataframe_from_file(file_path: Path) -> pd.DataFrame: @@ -40,15 +28,15 @@ def get_slice_with_timestamp_ns(dataframe: pd.DataFrame, timestamp_ns: int): return dataframe[dataframe["timestamp_ns"] == timestamp_ns] -def build_sensor_dataframe(raw_log_path: Path) -> pd.DataFrame: +def build_sensor_dataframe(source_log_path: Path) -> pd.DataFrame: # https://github.com/argoverse/av2-api/blob/main/src/av2/datasets/sensor/sensor_dataloader.py#L209 - split = raw_log_path.parent.name - log_id = raw_log_path.name + split = source_log_path.parent.name + log_id = source_log_path.name - lidar_path = raw_log_path / "sensors" / "lidar" - cameras_path = raw_log_path / "sensors" / "cameras" + lidar_path = source_log_path / "sensors" / "lidar" + cameras_path = source_log_path / "sensors" / "cameras" # Find all the lidar records and timestamps from file names. lidar_records = populate_sensor_records(lidar_path, split, log_id) @@ -56,7 +44,7 @@ def build_sensor_dataframe(raw_log_path: Path) -> pd.DataFrame: # Find all the camera records and timestamps from file names. camera_records = [] for camera_folder in cameras_path.iterdir(): - assert camera_folder.name in AV2_SENSOR_CAMERA_NAMES + assert camera_folder.name in AV2_CAMERA_TYPE_MAPPING.keys() camera_record = populate_sensor_records(camera_folder, split, log_id) camera_records.append(camera_record) diff --git a/d123/datasets/av2/av2_map_conversion.py b/d123/conversion/datasets/av2/av2_map_conversion.py similarity index 98% rename from d123/datasets/av2/av2_map_conversion.py rename to d123/conversion/datasets/av2/av2_map_conversion.py index d158007d..9aae5385 100644 --- a/d123/datasets/av2/av2_map_conversion.py +++ b/d123/conversion/datasets/av2/av2_map_conversion.py @@ -10,9 +10,11 @@ import shapely import shapely.geometry as geom -from d123.datasets.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING -from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length -from d123.datasets.utils.maps.road_edge.road_edge_3d_utils import get_road_edges_3d_from_generic_drivable_area_df +from d123.conversion.datasets.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING +from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length +from d123.conversion.utils.map_utils.road_edge.road_edge_3d_utils import ( + get_road_edges_3d_from_generic_drivable_area_df, +) from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType from d123.geometry import OccupancyMap2D, Point3DIndex, Polyline2D, Polyline3D diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/conversion/datasets/av2/av2_sensor_converter.py similarity index 55% rename from d123/datasets/av2/av2_data_converter.py rename to d123/conversion/datasets/av2/av2_sensor_converter.py index 48fc8cb9..f3ed06af 100644 --- a/d123/datasets/av2/av2_data_converter.py +++ b/d123/conversion/datasets/av2/av2_sensor_converter.py @@ -1,25 +1,28 @@ -import gc import hashlib -from functools import partial from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Dict, List, Optional, Tuple, Union import numpy as np import pandas as pd -from typing_extensions import Final -from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table -from d123.datasets.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING, AV2_TO_DETECTION_TYPE, AV2SensorBoxDetectionType -from d123.datasets.av2.av2_helper import ( +from d123.conversion.abstract_dataset_converter import ( + AbstractDatasetConverter, + AbstractLogWriter, + DatasetConverterConfig, +) +from d123.conversion.datasets.av2.av2_constants import ( + AV2_CAMERA_TYPE_MAPPING, + AV2_SENSOR_SPLITS, + AV2_TO_DETECTION_TYPE, + AV2SensorBoxDetectionType, +) +from d123.conversion.datasets.av2.av2_helper import ( build_sensor_dataframe, build_synchronization_dataframe, find_closest_target_fpath, get_slice_with_timestamp_ns, ) -from d123.datasets.av2.av2_map_conversion import convert_av2_map -from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.datasets.utils.arrow_ipc_writer import ArrowLogWriter +from d123.conversion.datasets.av2.av2_map_conversion import convert_av2_map from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata @@ -40,8 +43,6 @@ from d123.geometry.bounding_box import BoundingBoxSE3 from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array -SORT_BY_TIMESTAMP: Final[bool] = True - def create_token(input_data: str) -> str: # TODO: Refactor this function. @@ -53,26 +54,25 @@ def create_token(input_data: str) -> str: return hash_obj.hexdigest()[:16] -class AV2SensorDataConverter(RawDataConverter): +class AV2SensorConverter(AbstractDatasetConverter): def __init__( self, splits: List[str], log_path: Union[Path, str], - data_converter_config: DataConverterConfig, + dataset_converter_config: DatasetConverterConfig, ) -> None: - super().__init__(data_converter_config) + super().__init__(dataset_converter_config) for split in splits: assert ( - split in self.get_available_splits() + split in AV2_SENSOR_SPLITS ), f"Split {split} is not available. Available splits: {self.available_splits}" self._splits: List[str] = splits self._data_root: Path = Path(log_path) - self._log_paths_per_split: Dict[str, List[Path]] = self._collect_log_paths() - self._target_dt: float = 0.1 + self._log_paths_and_split: Dict[str, List[Path]] = self._collect_log_paths() def _collect_log_paths(self) -> Dict[str, List[Path]]: - log_paths_per_split: Dict[str, List[Path]] = {} + log_paths_and_split: List[Tuple[Path, str]] = [] for split in self._splits: subsplit = split.split("_")[-1] @@ -87,133 +87,95 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]: elif "av2-sensor-mini" in split: log_folder = self._data_root / "sensor_mini" / subsplit - log_paths_per_split[split] = list(log_folder.iterdir()) - - return log_paths_per_split - - def get_available_splits(self) -> List[str]: - return [ - "av2-sensor_train", - "av2-sensor_val", - "av2-sensor_test", - "av2-sensor-mini_train", - "av2-sensor-mini_val", - "av2-sensor-mini_test", - ] - - def convert_maps(self, worker: WorkerPool) -> None: - log_args = [ - { - "log_path": log_path, - "split": split, - } - for split, log_paths in self._log_paths_per_split.items() - for log_path in log_paths - ] - worker_map( - worker, - partial(convert_av2_map_to_gpkg, data_converter_config=self.data_converter_config), - log_args, - ) + log_paths_and_split.extend([(log_path, split) for log_path in log_folder.iterdir()]) - def convert_logs(self, worker: WorkerPool) -> None: - log_args = [ - { - "log_path": log_path, - "split": split, - } - for split, log_paths in self._log_paths_per_split.items() - for log_path in log_paths - ] - worker_map( - worker, - partial( - convert_av2_log_to_arrow, - data_converter_config=self.data_converter_config, - ), - log_args, - ) + return log_paths_and_split + def get_number_of_maps(self) -> int: + """Inherited, see superclass.""" + return len(self._log_paths_and_split) -def convert_av2_map_to_gpkg( - args: List[Dict[str, Union[List[str], List[Path]]]], - data_converter_config: DataConverterConfig, -) -> List[Any]: - for log_info in args: - source_log_path: Path = log_info["log_path"] - split: str = log_info["split"] + def get_number_of_logs(self) -> int: + """Inherited, see superclass.""" + return len(self._log_paths_and_split) - source_log_name = source_log_path.name + def convert_map(self, map_index: int) -> None: + """Inherited, see superclass.""" - map_path = data_converter_config.output_path / "maps" / split / f"{source_log_name}.gpkg" - if data_converter_config.force_map_conversion or not map_path.exists(): + source_log_path, split = self._log_paths_and_split[map_index] + log_name = source_log_path.name + map_path = self.dataset_converter_config.output_path / "maps" / split / f"{log_name}.gpkg" + if self.dataset_converter_config.force_map_conversion or not map_path.exists(): map_path.unlink(missing_ok=True) + if not map_path.parent.exists(): + map_path.parent.mkdir(parents=True, exist_ok=True) convert_av2_map(source_log_path, map_path) - return [] + def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: + """Inherited, see superclass.""" -def convert_av2_log_to_arrow( - args: List[Dict[str, Union[List[str], List[Path]]]], - data_converter_config: DataConverterConfig, -) -> List[Any]: - for log_info in args: - log_path: Path = log_info["log_path"] - split: str = log_info["split"] + source_log_path, split = self._log_paths_and_split[log_index] - if not log_path.exists(): - raise FileNotFoundError(f"Log path {log_path} does not exist.") + # 1. Initialize Metadata + log_metadata = LogMetadata( + dataset="av2-sensor", + split=split, + log_name=source_log_path.name, + location=None, # TODO: Add location information. + timestep_seconds=0.1, + vehicle_parameters=get_av2_ford_fusion_hybrid_parameters(), + camera_metadata=get_av2_camera_metadata(source_log_path), + lidar_metadata=get_av2_lidar_metadata(source_log_path), + map_has_z=True, + map_is_local=True, + ) - log_file_path = data_converter_config.output_path / split / f"{log_path.stem}.arrow" + # 2. Prepare log writer + overwrite_log = log_writer.reset(self.dataset_converter_config, log_metadata) - if data_converter_config.force_log_conversion or not log_file_path.exists(): - log_file_path.unlink(missing_ok=True) - if not log_file_path.parent.exists(): - log_file_path.parent.mkdir(parents=True, exist_ok=True) + if overwrite_log: - sensor_df = build_sensor_dataframe(log_path) + # 3. Process source log data + sensor_df = build_sensor_dataframe(source_log_path) synchronization_df = build_synchronization_dataframe(sensor_df) - log_metadata = LogMetadata( - dataset="av2-sensor", - split=split, - log_name=log_path.name, - location=None, # TODO: Add location information. - timestep_seconds=0.1, - vehicle_parameters=get_av2_ford_fusion_hybrid_parameters(), - camera_metadata=get_av2_camera_metadata(log_path), - lidar_metadata=get_av2_lidar_metadata(log_path), - map_has_z=True, - map_is_local=True, - ) + lidar_sensor = sensor_df.xs(key="lidar", level=2) + lidar_timestamps_ns = np.sort([int(idx_tuple[2]) for idx_tuple in lidar_sensor.index]) - log_writer = ArrowLogWriter( - log_path=log_file_path, - data_converter_config=data_converter_config, - log_metadata=log_metadata, + annotations_df = ( + pd.read_feather(source_log_path / "annotations.feather") + if (source_log_path / "annotations.feather").exists() + else None ) - - _write_recording_table( - sensor_df, - synchronization_df, - log_writer, - log_file_path, - log_path, - data_converter_config, + city_se3_egovehicle_df = pd.read_feather(source_log_path / "city_SE3_egovehicle.feather") + egovehicle_se3_sensor_df = ( + pd.read_feather(source_log_path / "calibration" / "egovehicle_SE3_sensor.feather") + if self.dataset_converter_config.camera_store_option is not None + else None ) - del log_writer - gc.collect() - if SORT_BY_TIMESTAMP: - recording_table = open_arrow_table(log_file_path) - recording_table = recording_table.sort_by([("timestamp", "ascending")]) - write_arrow_table(recording_table, log_file_path) + for lidar_timestamp_ns in lidar_timestamps_ns: + ego_state = _extract_av2_sensor_ego_state(city_se3_egovehicle_df, lidar_timestamp_ns) + log_writer.write( + token=create_token(str(lidar_timestamp_ns)), + timestamp=TimePoint.from_ns(int(lidar_timestamp_ns)), + ego_state=ego_state, + box_detections=_extract_av2_sensor_box_detections(annotations_df, lidar_timestamp_ns, ego_state), + cameras=_extract_av2_sensor_camera( + lidar_timestamp_ns, + egovehicle_se3_sensor_df, + synchronization_df, + source_log_path, + self.dataset_converter_config, + ), + ) - return [] + log_writer.close() -def get_av2_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: +def get_av2_camera_metadata(source_log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - intrinsics_file = log_path / "calibration" / "intrinsics.feather" + intrinsics_file = source_log_path / "calibration" / "intrinsics.feather" intrinsics_df = pd.read_feather(intrinsics_file) camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} @@ -253,58 +215,6 @@ def get_av2_lidar_metadata(log_path: Path) -> Dict[LiDARType, LiDARMetadata]: return {} -def _write_recording_table( - sensor_df: pd.DataFrame, - synchronization_df: pd.DataFrame, - log_writer: ArrowLogWriter, - log_file_path: Path, - source_log_path: Path, - data_converter_config: DataConverterConfig, -) -> None: - - # NOTE: Similar to other datasets, we use the lidar timestamps as reference timestamps. - lidar_sensor = sensor_df.xs(key="lidar", level=2) - lidar_timestamps_ns = np.sort([int(idx_tuple[2]) for idx_tuple in lidar_sensor.index]) - - # NOTE: The annotation dataframe is not available for the test split. - annotations_df = ( - pd.read_feather(source_log_path / "annotations.feather") - if (source_log_path / "annotations.feather").exists() - else None - ) - - city_se3_egovehicle_df = pd.read_feather(source_log_path / "city_SE3_egovehicle.feather") - - egovehicle_se3_sensor_df = ( - pd.read_feather(source_log_path / "calibration" / "egovehicle_SE3_sensor.feather") - if data_converter_config.camera_store_option is not None - else None - ) - - for lidar_timestamp_ns in lidar_timestamps_ns: - - ego_state = _extract_av2_sensor_ego_state(city_se3_egovehicle_df, lidar_timestamp_ns) - log_writer.add_row( - token=create_token(str(lidar_timestamp_ns)), - timestamp=TimePoint.from_ns(int(lidar_timestamp_ns)), - ego_state=ego_state, - box_detections=_extract_av2_sensor_box_detections(annotations_df, lidar_timestamp_ns, ego_state), - traffic_lights=None, # NOTE: Traffic light information is not available in AV2 sensor dataset. - cameras=_extract_av2_sensor_camera( - lidar_timestamp_ns, - egovehicle_se3_sensor_df, - synchronization_df, - source_log_path, - data_converter_config, - ), - lidars=None, - scenario_tags=None, - route_lane_group_ids=None, # NOTE: Route information is not available in AV2 sensor dataset. - ) - - log_writer.close() - - def _extract_av2_sensor_box_detections( annotations_df: Optional[pd.DataFrame], lidar_timestamp_ns: int, @@ -397,7 +307,7 @@ def _extract_av2_sensor_camera( egovehicle_se3_sensor_df: pd.DataFrame, synchronization_df: pd.DataFrame, source_log_path: Path, - data_converter_config: DataConverterConfig, + dataset_converter_config: DatasetConverterConfig, ) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} @@ -437,9 +347,9 @@ def _extract_av2_sensor_camera( qz=row["qz"], ) camera_data = None - if data_converter_config.camera_store_option == "path": + if dataset_converter_config.camera_store_option == "path": camera_data = str(relative_image_path) - elif data_converter_config.camera_store_option == "binary": + elif dataset_converter_config.camera_store_option == "binary": with open(absolute_image_path, "rb") as f: camera_data = f.read() camera_dict[camera_type] = camera_data, camera_extrinsic @@ -447,6 +357,6 @@ def _extract_av2_sensor_camera( return camera_dict -def _extract_lidar(lidar_pc, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: +def _extract_lidar(lidar_pc, dataset_converter_config: DatasetConverterConfig) -> Dict[LiDARType, Optional[str]]: # TODO: Implement this function to extract lidar data. return {} diff --git a/d123/datasets/nuplan/__init__.py b/d123/conversion/datasets/carla/__init__.py similarity index 100% rename from d123/datasets/nuplan/__init__.py rename to d123/conversion/datasets/carla/__init__.py diff --git a/d123/datasets/carla/carla_data_converter.py b/d123/conversion/datasets/carla/carla_data_converter.py similarity index 87% rename from d123/datasets/carla/carla_data_converter.py rename to d123/conversion/datasets/carla/carla_data_converter.py index c38fc83a..fedb027e 100644 --- a/d123/datasets/carla/carla_data_converter.py +++ b/d123/conversion/datasets/carla/carla_data_converter.py @@ -13,9 +13,9 @@ from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table -from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.datasets.utils.maps.opendrive.opendrive_map_conversion import convert_from_xodr -from d123.datasets.utils.sensor.lidar_index_registry import CarlaLidarIndex +from d123.conversion.abstract_dataset_converter import AbstractDataConverter, DatasetConverterConfig +from d123.conversion.utils.map_utils.opendrive.opendrive_map_conversion import convert_from_xodr +from d123.conversion.utils.sensor.lidar_index_registry import CarlaLidarIndex from d123.datatypes.maps.abstract_map import AbstractMap, MapLayer from d123.datatypes.maps.abstract_map_objects import AbstractLane from d123.datatypes.maps.gpkg.gpkg_map import get_global_map_api @@ -76,15 +76,15 @@ def create_token(input_data: str) -> str: return hash_obj.hexdigest()[:16] -class CarlaDataConverter(RawDataConverter): +class CarlaDataConverter(AbstractDataConverter): def __init__( self, splits: List[str], log_path: Union[Path, str], - data_converter_config: DataConverterConfig, + dataset_converter_config: DatasetConverterConfig, ) -> None: - super().__init__(data_converter_config) + super().__init__(dataset_converter_config) for split in splits: assert ( split in self.get_available_splits() @@ -112,7 +112,7 @@ def convert_maps(self, worker: WorkerPool) -> None: worker, partial( convert_carla_map_to_gpkg, - data_converter_config=self.data_converter_config, + dataset_converter_config=self.dataset_converter_config, ), list(AVAILABLE_CARLA_MAP_LOCATIONS), ) @@ -126,18 +126,20 @@ def convert_logs(self, worker: WorkerPool) -> None: ] worker_map( - worker, partial(convert_carla_log_to_arrow, data_converter_config=self.data_converter_config), log_args + worker, + partial(convert_carla_log_to_arrow, dataset_converter_config=self.dataset_converter_config), + log_args, ) -def convert_carla_map_to_gpkg(map_names: List[str], data_converter_config: DataConverterConfig) -> List[Any]: +def convert_carla_map_to_gpkg(map_names: List[str], dataset_converter_config: DatasetConverterConfig) -> List[Any]: # TODO: add to config _interpolation_step_size = 0.5 # [m] _connection_distance_threshold = 0.1 # [m] for map_name in map_names: - map_path = data_converter_config.output_path / "maps" / f"carla_{map_name.lower()}.gpkg" - if data_converter_config.force_map_conversion or not map_path.exists(): + map_path = dataset_converter_config.output_path / "maps" / f"carla_{map_name.lower()}.gpkg" + if dataset_converter_config.force_map_conversion or not map_path.exists(): map_path.unlink(missing_ok=True) assert os.environ["CARLA_ROOT"] is not None CARLA_ROOT = Path(os.environ["CARLA_ROOT"]) @@ -160,17 +162,17 @@ def convert_carla_map_to_gpkg(map_names: List[str], data_converter_config: DataC def convert_carla_log_to_arrow( - args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig + args: List[Dict[str, Union[List[str], List[Path]]]], dataset_converter_config: DatasetConverterConfig ) -> List[Any]: def convert_log_internal(args: List[Dict[str, Union[List[str], List[Path]]]]) -> None: for log_info in args: log_path: Path = log_info["log_path"] split: str = log_info["split"] - output_path: Path = data_converter_config.output_path + output_path: Path = dataset_converter_config.output_path log_file_path = output_path / split / f"{log_path.stem}.arrow" - if data_converter_config.force_log_conversion or not log_file_path.exists(): + if dataset_converter_config.force_log_conversion or not log_file_path.exists(): log_file_path.unlink(missing_ok=True) if not log_file_path.parent.exists(): log_file_path.parent.mkdir(parents=True, exist_ok=True) @@ -198,22 +200,22 @@ def convert_log_internal(args: List[Dict[str, Union[List[str], List[Path]]]]) -> ("scenario_tag", pa.list_(pa.string())), ("route_lane_group_ids", pa.list_(pa.int64())), ] - if data_converter_config.lidar_store_option is not None: + if dataset_converter_config.lidar_store_option is not None: for lidar_type in lidar_metadata.keys(): - if data_converter_config.lidar_store_option == "path": + if dataset_converter_config.lidar_store_option == "path": schema_column_list.append((lidar_type.serialize(), pa.string())) - elif data_converter_config.lidar_store_option == "binary": + elif dataset_converter_config.lidar_store_option == "binary": raise NotImplementedError("Binary lidar storage is not implemented.") # TODO: Adjust how cameras are added - if data_converter_config.camera_store_option is not None: + if dataset_converter_config.camera_store_option is not None: for camera_type in camera_metadata.keys(): - if data_converter_config.camera_store_option == "path": + if dataset_converter_config.camera_store_option == "path": schema_column_list.append((camera_type.serialize(), pa.string())) schema_column_list.append( (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 16)) ) - elif data_converter_config.camera_store_option == "binary": + elif dataset_converter_config.camera_store_option == "binary": raise NotImplementedError("Binary camera storage is not implemented.") recording_schema = pa.schema(schema_column_list) @@ -231,7 +233,7 @@ def convert_log_internal(args: List[Dict[str, Union[List[str], List[Path]]]]) -> map_api, recording_schema, log_file_path, - data_converter_config, + dataset_converter_config, ) gc.collect() @@ -289,7 +291,7 @@ def _write_recording_table( map_api: AbstractMap, recording_schema: pa.Schema, log_file_path: Path, - data_converter_config: DataConverterConfig, + dataset_converter_config: DatasetConverterConfig, ) -> pa.Table: # TODO: Refactor this function to be more readable log_name = str(bounding_box_paths[0].parent.parent.stem) @@ -324,13 +326,13 @@ def _write_recording_table( "scenario_tag": [data["scenario_tag"]], "route_lane_group_ids": [route_lane_group_ids], } - if data_converter_config.lidar_store_option is not None: - lidar_data_dict = _extract_lidar(log_name, sample_name, data_converter_config) + if dataset_converter_config.lidar_store_option is not None: + lidar_data_dict = _extract_lidar(log_name, sample_name, dataset_converter_config) for lidar_type, lidar_data in lidar_data_dict.items(): row_data[lidar_type.serialize()] = [lidar_data] - if data_converter_config.camera_store_option is not None: - camera_data_dict = _extract_cameras(data, log_name, sample_name, data_converter_config) + if dataset_converter_config.camera_store_option is not None: + camera_data_dict = _extract_cameras(data, log_name, sample_name, dataset_converter_config) for camera_type, camera_data in camera_data_dict.items(): if camera_data is not None: row_data[camera_type.serialize()] = [camera_data[0]] @@ -420,20 +422,20 @@ def _extract_route_lane_group_ids(route: List[List[float]], map_api: AbstractMap def _extract_cameras( - data: Dict[str, Any], log_name: str, sample_name: str, data_converter_config: DataConverterConfig + data: Dict[str, Any], log_name: str, sample_name: str, dataset_converter_config: DatasetConverterConfig ) -> Dict[PinholeCameraType, Optional[str]]: camera_dict: Dict[str, Union[str, bytes]] = {} for camera_type in CARLA_CAMERA_TYPES: camera_full_path = CARLA_DATA_ROOT / "sensor_blobs" / log_name / camera_type.name / f"{sample_name}.jpg" if camera_full_path.exists(): - if data_converter_config.camera_store_option == "path": + if dataset_converter_config.camera_store_option == "path": path = f"{log_name}/{camera_type.name}/{sample_name}.jpg" extrinsics = data.get(f"{camera_type.serialize()}_transform", None) camera_dict[camera_type] = path, ( np.array(extrinsics, dtype=np.float64).flatten() if extrinsics is not None else None ) - elif data_converter_config.camera_store_option == "binary": + elif dataset_converter_config.camera_store_option == "binary": raise NotImplementedError("Binary camera storage is not implemented.") else: camera_dict[camera_type] = None @@ -441,15 +443,15 @@ def _extract_cameras( def _extract_lidar( - log_name: str, sample_name: str, data_converter_config: DataConverterConfig + log_name: str, sample_name: str, dataset_converter_config: DatasetConverterConfig ) -> Dict[LiDARType, Optional[str]]: lidar: Optional[str] = None lidar_full_path = CARLA_DATA_ROOT / "sensor_blobs" / log_name / "lidar" / f"{sample_name}.npy" if lidar_full_path.exists(): - if data_converter_config.lidar_store_option == "path": + if dataset_converter_config.lidar_store_option == "path": lidar = f"{log_name}/lidar/{sample_name}.npy" - elif data_converter_config.lidar_store_option == "binary": + elif dataset_converter_config.lidar_store_option == "binary": raise NotImplementedError("Binary lidar storage is not implemented.") else: raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}") diff --git a/d123/datasets/carla/load_sensor.py b/d123/conversion/datasets/carla/carla_load_sensor.py similarity index 80% rename from d123/datasets/carla/load_sensor.py rename to d123/conversion/datasets/carla/carla_load_sensor.py index 5fcbc890..464fecef 100644 --- a/d123/datasets/carla/load_sensor.py +++ b/d123/conversion/datasets/carla/carla_load_sensor.py @@ -2,7 +2,7 @@ import numpy as np -from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata +from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata def load_carla_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: diff --git a/d123/datasets/kitti_360/.gitkeep b/d123/conversion/datasets/kitti_360/.gitkeep similarity index 100% rename from d123/datasets/kitti_360/.gitkeep rename to d123/conversion/datasets/kitti_360/.gitkeep diff --git a/d123/datasets/utils/__init__.py b/d123/conversion/datasets/nuplan/__init__.py similarity index 100% rename from d123/datasets/utils/__init__.py rename to d123/conversion/datasets/nuplan/__init__.py diff --git a/d123/conversion/datasets/nuplan/nuplan_constants.py b/d123/conversion/datasets/nuplan/nuplan_constants.py new file mode 100644 index 00000000..d68bf672 --- /dev/null +++ b/d123/conversion/datasets/nuplan/nuplan_constants.py @@ -0,0 +1,85 @@ +from enum import IntEnum +from typing import Dict, Final, List, Set + +from d123.datatypes.detections.detection import TrafficLightStatus +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.time.time_point import TimePoint + + +class NuPlanBoxDetectionType(IntEnum): + + VEHICLE = 0 + BICYCLE = 1 + PEDESTRIAN = 2 + TRAFFIC_CONE = 3 + BARRIER = 4 + CZONE_SIGN = 5 + GENERIC_OBJECT = 6 + + +NUPLAN_DEFAULT_DT: Final[float] = 0.05 + +NUPLAN_TRAFFIC_STATUS_DICT: Final[Dict[str, TrafficLightStatus]] = { + "green": TrafficLightStatus.GREEN, + "red": TrafficLightStatus.RED, + "unknown": TrafficLightStatus.UNKNOWN, +} + + +NUPLAN_DETECTION_NAME_DICT = { + "vehicle": DetectionType.VEHICLE, + "bicycle": DetectionType.BICYCLE, + "pedestrian": DetectionType.PEDESTRIAN, + "traffic_cone": DetectionType.TRAFFIC_CONE, + "barrier": DetectionType.BARRIER, + "czone_sign": DetectionType.CZONE_SIGN, + "generic_object": DetectionType.GENERIC_OBJECT, +} + + +NUPLAN_DATA_SPLITS: Set[str] = { + "nuplan_train", + "nuplan_val", + "nuplan_test", + "nuplan_mini_train", + "nuplan_mini_val", + "nuplan_mini_test", + "nuplan_private_test", # TODO: remove, not publicly available +} + +NUPLAN_MAP_LOCATIONS: List[str] = [ + "sg-one-north", + "us-ma-boston", + "us-nv-las-vegas-strip", + "us-pa-pittsburgh-hazelwood", +] + +NUPLAN_MAP_LOCATION_FILES: Dict[str, str] = { + "sg-one-north": "sg-one-north/9.17.1964/map.gpkg", + "us-ma-boston": "us-ma-boston/9.12.1817/map.gpkg", + "us-nv-las-vegas-strip": "us-nv-las-vegas-strip/9.15.1915/map.gpkg", + "us-pa-pittsburgh-hazelwood": "us-pa-pittsburgh-hazelwood/9.17.1937/map.gpkg", +} + + +NUPLAN_MAP_GPKG_LAYERS: Set[str] = { + "baseline_paths", + "carpark_areas", + "generic_drivable_areas", + "dubins_nodes", + "lane_connectors", + "intersections", + "boundaries", + "crosswalks", + "lanes_polygons", + "lane_group_connectors", + "lane_groups_polygons", + "road_segments", + "stop_polygons", + "traffic_lights", + "walkways", + "gen_lane_connectors_scaled_width_polygons", +} + + +NUPLAN_ROLLING_SHUTTER_S: Final[TimePoint] = TimePoint.from_s(1 / 60) diff --git a/d123/conversion/datasets/nuplan/nuplan_converter.py b/d123/conversion/datasets/nuplan/nuplan_converter.py new file mode 100644 index 00000000..fda5d2dd --- /dev/null +++ b/d123/conversion/datasets/nuplan/nuplan_converter.py @@ -0,0 +1,607 @@ +import logging +import os +import pickle +import uuid +from pathlib import Path +from typing import Dict, Final, List, Optional, Tuple, Union + +import numpy as np +import yaml +from pyparsing import Generator + +import d123.conversion.datasets.nuplan.utils as nuplan_utils +from d123.common.utils.dependencies import check_dependencies +from d123.common.utils.timer import Timer +from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter +from d123.conversion.dataset_converter_config import DatasetConverterConfig +from d123.conversion.datasets.nuplan.nuplan_constants import ( + NUPLAN_DATA_SPLITS, + NUPLAN_DEFAULT_DT, + NUPLAN_DETECTION_NAME_DICT, + NUPLAN_MAP_LOCATIONS, + NUPLAN_ROLLING_SHUTTER_S, + NUPLAN_TRAFFIC_STATUS_DICT, +) +from d123.conversion.datasets.nuplan.nuplan_map_conversion import NuPlanMapConverter +from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from d123.conversion.utils.sensor_utils.lidar_index_registry import NuPlanLidarIndex +from d123.datatypes.detections.detection import ( + BoxDetectionMetadata, + BoxDetectionSE3, + BoxDetectionWrapper, + TrafficLightDetection, + TrafficLightDetectionWrapper, +) +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 +from d123.datatypes.vehicle_state.vehicle_parameters import ( + get_nuplan_chrysler_pacifica_parameters, + rear_axle_se3_to_center_se3, +) +from d123.geometry import BoundingBoxSE3, EulerAngles, StateSE3, Vector3D +from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL + +check_dependencies(["nuplan", "sqlalchemy"], "nuplan") +from nuplan.database.nuplan_db.nuplan_scenario_queries import ( + get_cameras, + get_images_from_lidar_tokens, +) +from nuplan.database.nuplan_db.query_session import execute_many, execute_one +from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc +from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB +from nuplan.planning.simulation.observation.observation_type import CameraChannel + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +# NOTE: Leaving this constant here, to avoid having a nuplan dependency in nuplan_constants.py +NUPLAN_CAMERA_MAPPING = { + PinholeCameraType.CAM_F0: CameraChannel.CAM_F0, + PinholeCameraType.CAM_B0: CameraChannel.CAM_B0, + PinholeCameraType.CAM_L0: CameraChannel.CAM_L0, + PinholeCameraType.CAM_L1: CameraChannel.CAM_L1, + PinholeCameraType.CAM_L2: CameraChannel.CAM_L2, + PinholeCameraType.CAM_R0: CameraChannel.CAM_R0, + PinholeCameraType.CAM_R1: CameraChannel.CAM_R1, + PinholeCameraType.CAM_R2: CameraChannel.CAM_R2, +} + +TARGET_DT: Final[float] = 0.1 # TODO: make configurable + + +def create_splits_logs() -> Dict[str, List[str]]: + # NOTE: nuPlan stores the training and validataion logs + yaml_filepath = Path(nuplan_utils.__path__[0]) / "log_splits.yaml" + with open(yaml_filepath, "r") as stream: + splits = yaml.safe_load(stream) + + return splits["log_splits"] + + +class NuPlanConverter(AbstractDatasetConverter): + def __init__( + self, + splits: List[str], + nuplan_data_root: Union[Path, str], + nuplan_map_root: Union[Path, str], + nuplan_sensor_root: Union[Path, str], + dataset_converter_config: DatasetConverterConfig, + ) -> None: + super().__init__(dataset_converter_config) + + for split in splits: + assert ( + split in NUPLAN_DATA_SPLITS + ), f"Split {split} is not available. Available splits: {NUPLAN_DATA_SPLITS}" + + self._splits: List[str] = splits + self._nuplan_data_root: Path = Path(nuplan_data_root) + self._nuplan_map_root: Path = Path(nuplan_map_root) + self._nuplan_sensor_root: Path = Path(nuplan_sensor_root) + + self._split_log_path_pairs: List[Tuple[str, List[Path]]] = self._collect_split_log_path_pairs() + + def _collect_split_log_path_pairs(self) -> List[Tuple[str, List[Path]]]: + # NOTE: the nuplan mini folder has an internal train, val, test structure, all stored in "mini". + # The complete dataset is saved in the "trainval" folder (train and val), or in the "test" folder (for test). + split_log_path_pairs: List[Tuple[str, List[Path]]] = [] + create_splits_logs() + + for split in self._splits: + split_type = split.split("_")[-1] + assert split_type in ["train", "val", "test"] + + if split in ["nuplan_train", "nuplan_val"]: + nuplan_split_folder = self._nuplan_data_root / "nuplan-v1.1" / "splits" / "trainval" + elif split in ["nuplan_test"]: + nuplan_split_folder = self._nuplan_data_root / "nuplan-v1.1" / "splits" / "test" + elif split in ["nuplan_mini_train", "nuplan_mini_val", "nuplan_mini_test"]: + nuplan_split_folder = self._nuplan_data_root / "nuplan-v1.1" / "splits" / "mini" + elif split == "nuplan_private_test": + nuplan_split_folder = self._nuplan_data_root / "nuplan-v1.1" / "splits" / "private_test" + + # set(split_type_log_names[split_type]) + # [log_path / f"{log_name}.db" for log_name in list(all_log_names & split_log_names)] + + all_log_files_in_path = [log_file for log_file in nuplan_split_folder.glob("*.db")] + all_log_names = set([str(log_file.stem) for log_file in all_log_files_in_path]) + + for log_name in list(all_log_names): + log_path = nuplan_split_folder / f"{log_name}.db" + split_log_path_pairs.append((split, log_path)) + + return split_log_path_pairs + + def get_number_of_maps(self) -> int: + """Inherited, see superclass.""" + return len(NUPLAN_MAP_LOCATIONS) + + def get_number_of_logs(self) -> int: + """Inherited, see superclass.""" + return len(self._split_log_path_pairs) + + def convert_map(self, map_index: int) -> None: + """Inherited, see superclass.""" + map_name = NUPLAN_MAP_LOCATIONS[map_index] + map_path = self.dataset_converter_config.output_path / "maps" / f"nuplan_{map_name}.gpkg" + if self.dataset_converter_config.force_map_conversion or not map_path.exists(): + map_path.unlink(missing_ok=True) + NuPlanMapConverter( + nuplan_map_root=self._nuplan_map_root, + map_path=self.dataset_converter_config.output_path / "maps", + ).convert(map_name=map_name) + + def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: + """Inherited, see superclass.""" + int(os.environ.get("NODE_RANK", 0)) + str(uuid.uuid4()) + + split, source_log_path = self._split_log_path_pairs[log_index] + + nuplan_log_db = NuPlanDB(self._nuplan_data_root, str(source_log_path), None) + + log_name = nuplan_log_db.log_name + + # 1. Initialize log metadata + log_metadata = LogMetadata( + dataset="nuplan", + split=split, + log_name=log_name, + location=nuplan_log_db.log.map_version, + timestep_seconds=TARGET_DT, + vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(), + camera_metadata=_get_nuplan_camera_metadata(source_log_path, self.dataset_converter_config), + lidar_metadata=_get_nuplan_lidar_metadata( + self._nuplan_sensor_root, log_name, self.dataset_converter_config + ), + map_has_z=False, + map_is_local=False, + ) + + # 2. Prepare log writer + overwrite_log = log_writer.reset(self.dataset_converter_config, log_metadata) + timer = Timer() + camera_timer = Timer() + + if overwrite_log: + counter = 0 + step_interval: float = int(TARGET_DT / NUPLAN_DEFAULT_DT) + total = len(nuplan_log_db.lidar_pc[::step_interval]) + for nuplan_lidar_pc in nuplan_log_db.lidar_pc[::step_interval]: + + timer.start() + lidar_pc_token: str = nuplan_lidar_pc.token + token = lidar_pc_token + timer.log("1. lidar_pc_token") + + timestamp = TimePoint.from_us(nuplan_lidar_pc.timestamp) + timer.log("1. time point") + + ego_state = _extract_nuplan_ego_state(nuplan_lidar_pc) + timer.log("1. ego_state") + + box_detections = _extract_nuplan_box_detections(nuplan_lidar_pc, source_log_path) + timer.log("1. box_detections") + + traffic_lights = _extract_nuplan_traffic_lights(nuplan_log_db, lidar_pc_token) + timer.log("1. traffic_lights") + + cameras = _extract_nuplan_cameras( + nuplan_log_db=nuplan_log_db, + nuplan_lidar_pc=nuplan_lidar_pc, + source_log_path=source_log_path, + nuplan_sensor_root=self._nuplan_sensor_root, + dataset_converter_config=self.dataset_converter_config, + timer=camera_timer, + ) + timer.log("1. cameras") + + lidars = _extract_nuplan_lidars( + nuplan_lidar_pc=nuplan_lidar_pc, + nuplan_sensor_root=self._nuplan_sensor_root, + dataset_converter_config=self.dataset_converter_config, + ) + timer.log("1. lidars") + + scenario_tags = _extract_nuplan_scenario_tag(nuplan_log_db, lidar_pc_token) + timer.log("1. scenario_tags") + + route_lane_group_ids = _extract_nuplan_route_lane_group_ids(nuplan_lidar_pc) + timer.log("1. route_lane_group_ids") + + log_writer.write( + token=token, + timestamp=timestamp, + ego_state=ego_state, + box_detections=box_detections, + traffic_lights=traffic_lights, + cameras=cameras, + lidars=lidars, + scenario_tags=scenario_tags, + route_lane_group_ids=route_lane_group_ids, + ) + timer.log("2. Write Data") + timer.end() + + # log_writer.write( + # token=lidar_pc_token, + # timestamp=TimePoint.from_us(nuplan_lidar_pc.timestamp), + # ego_state=_extract_nuplan_ego_state(nuplan_lidar_pc), + # box_detections=_extract_nuplan_box_detections(nuplan_lidar_pc), + # traffic_lights=_extract_nuplan_traffic_lights(nuplan_log_db, lidar_pc_token), + # cameras=_extract_nuplan_cameras( + # nuplan_log_db=nuplan_log_db, + # nuplan_lidar_pc=nuplan_lidar_pc, + # source_log_path=source_log_path, + # nuplan_sensor_root=self._nuplan_sensor_root, + # dataset_converter_config=self.dataset_converter_config, + # ), + # lidars=_extract_nuplan_lidars( + # nuplan_lidar_pc=nuplan_lidar_pc, + # nuplan_sensor_root=self._nuplan_sensor_root, + # dataset_converter_config=self.dataset_converter_config, + # ), + # scenario_tags=_extract_nuplan_scenario_tag(nuplan_log_db, lidar_pc_token), + # route_lane_group_ids=_extract_nuplan_route_lane_group_ids(nuplan_lidar_pc), + # ) + del nuplan_lidar_pc + + # logger.info(f"Finished processing scenarios for thread_id={thread_id}, {counter + 1}/{total}") + counter += 1 + + # logger.info(timer) + logger.info(camera_timer) + log_writer.close() + + nuplan_log_db.detach_tables() + nuplan_log_db.remove_ref() + assert nuplan_log_db._refcount == 0, "NuPlanDB still has references, potential memory leak." + + del nuplan_log_db + + +def _get_nuplan_camera_metadata( + source_log_path: Path, + dataset_converter_config: DatasetConverterConfig, +) -> Dict[PinholeCameraType, PinholeCameraMetadata]: + + def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadata: + cam = list(get_cameras(source_log_path, [str(NUPLAN_CAMERA_MAPPING[camera_type].value)]))[0] + + intrinsics_camera_matrix = np.array(pickle.loads(cam.intrinsic), dtype=np.float64) # array of shape (3, 3) + intrinsic = PinholeIntrinsics.from_camera_matrix(intrinsics_camera_matrix) + + distortion_array = np.array(pickle.loads(cam.distortion), dtype=np.float64) # array of shape (5,) + distortion = PinholeDistortion.from_array(distortion_array, copy=False) + + return PinholeCameraMetadata( + camera_type=camera_type, + width=cam.width, + height=cam.height, + intrinsics=intrinsic, + distortion=distortion, + ) + + camera_metadata: Dict[str, PinholeCameraMetadata] = {} + if dataset_converter_config.include_cameras: + for camera_type in NUPLAN_CAMERA_MAPPING.keys(): + camera_metadata[camera_type] = _get_camera_metadata(camera_type) + + return camera_metadata + + +def _get_nuplan_lidar_metadata( + nuplan_sensor_root: Path, + log_name: str, + dataset_converter_config: DatasetConverterConfig, +) -> Dict[LiDARType, LiDARMetadata]: + + metadata: Dict[LiDARType, LiDARMetadata] = {} + log_lidar_folder = nuplan_sensor_root / log_name / "MergedPointCloud" + + # NOTE: We first need to check if the LiDAR folder exists, as not all logs have LiDAR data + if log_lidar_folder.exists() and log_lidar_folder.is_dir() and dataset_converter_config.include_lidars: + metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( + lidar_type=LiDARType.LIDAR_MERGED, + lidar_index=NuPlanLidarIndex, + extrinsic=None, # NOTE: LiDAR extrinsic are unknown + ) + return metadata + + +def _extract_nuplan_ego_state(nuplan_lidar_pc: LidarPc) -> EgoStateSE3: + + vehicle_parameters = get_nuplan_chrysler_pacifica_parameters() + rear_axle_pose = StateSE3( + x=nuplan_lidar_pc.ego_pose.x, + y=nuplan_lidar_pc.ego_pose.y, + z=nuplan_lidar_pc.ego_pose.z, + qw=nuplan_lidar_pc.ego_pose.qw, + qx=nuplan_lidar_pc.ego_pose.qx, + qy=nuplan_lidar_pc.ego_pose.qy, + qz=nuplan_lidar_pc.ego_pose.qz, + ) + center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) + dynamic_state = DynamicStateSE3( + velocity=Vector3D( + x=nuplan_lidar_pc.ego_pose.vx, + y=nuplan_lidar_pc.ego_pose.vy, + z=nuplan_lidar_pc.ego_pose.vz, + ), + acceleration=Vector3D( + x=nuplan_lidar_pc.ego_pose.acceleration_x, + y=nuplan_lidar_pc.ego_pose.acceleration_y, + z=nuplan_lidar_pc.ego_pose.acceleration_z, + ), + angular_velocity=Vector3D( + x=nuplan_lidar_pc.ego_pose.angular_rate_x, + y=nuplan_lidar_pc.ego_pose.angular_rate_y, + z=nuplan_lidar_pc.ego_pose.angular_rate_z, + ), + ) + return EgoStateSE3( + center_se3=center, + dynamic_state_se3=dynamic_state, + vehicle_parameters=vehicle_parameters, + timepoint=None, # NOTE: Timepoint is not needed during writing, set to None + ) + + +def _extract_nuplan_box_detections(lidar_pc: LidarPc, source_log_file: Path) -> BoxDetectionWrapper: + # tracked_objects = list(get_tracked_objects_for_lidarpc_token_from_db(source_log_file, lidar_pc.token)) + + box_detections: List[BoxDetectionSE3] = list( + get_box_detections_for_lidarpc_token_from_db(source_log_file, lidar_pc.token) + ) + return BoxDetectionWrapper(box_detections=box_detections) + + +def _extract_nuplan_traffic_lights(log_db: NuPlanDB, lidar_pc_token: str) -> TrafficLightDetectionWrapper: + + traffic_lights_detections: List[TrafficLightDetection] = [ + TrafficLightDetection( + timepoint=None, # NOTE: Timepoint is not needed during writing, set to None + lane_id=int(traffic_light.lane_connector_id), + status=NUPLAN_TRAFFIC_STATUS_DICT[traffic_light.status], + ) + for traffic_light in log_db.traffic_light_status.select_many(lidar_pc_token=lidar_pc_token) + ] + + return TrafficLightDetectionWrapper(traffic_light_detections=traffic_lights_detections) + + +def _extract_nuplan_cameras( + nuplan_log_db: NuPlanDB, + nuplan_lidar_pc: LidarPc, + source_log_path: Path, + nuplan_sensor_root: Path, + dataset_converter_config: DatasetConverterConfig, + timer: Timer, +) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: + + camera_dict: Dict[str, Union[str, bytes]] = {} + log_cam_infos = {camera.token: camera for camera in nuplan_log_db.log.cameras} + # timer.log("0. get camera infos") + + for camera_type, camera_channel in NUPLAN_CAMERA_MAPPING.items(): + timer.start() + camera_data: Optional[Union[str, bytes]] = None + image_class = list( + get_images_from_lidar_tokens(source_log_path, [nuplan_lidar_pc.token], [str(camera_channel.value)]) + ) + timer.log("0. get image from lidar token") + + if len(image_class) != 0: + image = image_class[0] + filename_jpg = nuplan_sensor_root / image.filename_jpg + if filename_jpg.exists() and filename_jpg.is_file(): + + # Code taken from MTGS + # https://github.com/OpenDriveLab/MTGS/blob/main/nuplan_scripts/utils/nuplan_utils_custom.py#L117 + # TODO: Refactor + image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us + timer.log("0. Misc") + + # img_ego_pose: EgoPose = ( + # nuplan_log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first() + # ) + ego_pose = get_ego_pose_for_lidarpc_token_from_db(source_log_path, nuplan_lidar_pc.token) + + timer.log("0. img_ego_pose") + img_e2g = ego_pose.transformation_matrix + g2e = nuplan_lidar_pc.ego_pose.trans_matrix_inv + img_e2e = g2e @ img_e2g + cam_info = log_cam_infos[image.camera_token] + c2img_e = cam_info.trans_matrix + c2e = img_e2e @ c2img_e + timer.log("0. matrix multiplications") + + extrinsic = StateSE3.from_transformation_matrix(c2e) + + if dataset_converter_config.camera_store_option == "path": + camera_data = str(filename_jpg) + elif dataset_converter_config.camera_store_option == "binary": + with open(filename_jpg, "rb") as f: + camera_data = f.read() + + camera_dict[camera_type] = camera_data, extrinsic + timer.log("0. my bullshit") + timer.end() + + # timer.log("1. big for loop") + # timer.end() + return camera_dict + + +def _extract_nuplan_lidars( + nuplan_lidar_pc: LidarPc, + nuplan_sensor_root: Path, + dataset_converter_config: DatasetConverterConfig, +) -> Dict[LiDARType, Optional[str]]: + + lidar: Optional[str] = None + lidar_full_path = nuplan_sensor_root / nuplan_lidar_pc.filename + if lidar_full_path.exists() and lidar_full_path.is_file(): + lidar = nuplan_lidar_pc.filename + + return {LiDARType.LIDAR_MERGED: lidar} + + +def _extract_nuplan_scenario_tag(nuplan_log_db: NuPlanDB, lidar_pc_token: str) -> List[str]: + scenario_tags = [ + scenario_tag.type for scenario_tag in nuplan_log_db.scenario_tag.select_many(lidar_pc_token=lidar_pc_token) + ] + if len(scenario_tags) == 0: + scenario_tags = ["unknown"] + return scenario_tags + + +def _extract_nuplan_route_lane_group_ids(nuplan_lidar_pc: LidarPc) -> List[int]: + return [ + int(roadblock_id) + for roadblock_id in str(nuplan_lidar_pc.scene.roadblock_ids).split(" ") + if len(roadblock_id) > 0 + ] + + +def get_ego_pose_for_lidarpc_token_from_db(log_file: str, token: str) -> StateSE3: + """ + Get the ego state associated with an individual lidar_pc token from the db. + + :param log_file: The log file to query. + :param token: The lidar_pc token to query. + :return: The EgoState associated with the LidarPC. + """ + query = """ + SELECT ep.x, + ep.y, + ep.z, + ep.qw, + ep.qx, + ep.qy, + ep.qz, + -- ego_pose and lidar_pc timestamps are not the same, even when linked by token! + -- use lidar_pc timestamp for backwards compatibility. + lp.timestamp, + ep.vx, + ep.vy, + ep.acceleration_x, + ep.acceleration_y + FROM ego_pose AS ep + INNER JOIN lidar_pc AS lp + ON lp.ego_pose_token = ep.token + WHERE lp.token = ? + """ + + row = execute_one(query, (bytearray.fromhex(token),), log_file) + if row is None: + return None + + # q = Quaternion(row["qw"], row["qx"], row["qy"], row["qz"]) + # return EgoState.build_from_rear_axle( + # StateSE2(row["x"], row["y"], q.yaw_pitch_roll[0]), + # tire_steering_angle=0.0, + # vehicle_parameters=get_pacifica_parameters(), + # time_point=TimePoint(row["timestamp"]), + # rear_axle_velocity_2d=StateVector2D(row["vx"], y=row["vy"]), + # rear_axle_acceleration_2d=StateVector2D(x=row["acceleration_x"], y=row["acceleration_y"]), + # ) + + return StateSE3(x=row["x"], y=row["y"], z=row["z"], qw=row["qw"], qx=row["qx"], qy=row["qy"], qz=row["qz"]) + + +def get_box_detections_for_lidarpc_token_from_db(log_file: str, token: str) -> Generator[BoxDetectionSE3, None, None]: + """ + Get all tracked objects for a given lidar_pc. + This includes both agents and static objects. + The values are returned in random order. + + For agents, this query will not obtain the future waypoints. + For that, call `get_future_waypoints_for_agents_from_db()` + with the tokens of the agents of interest. + + :param log_file: The log file to query. + :param token: The lidar_pc token for which to obtain the objects. + :return: The tracked objects associated with the token. + """ + query = """ + SELECT c.name AS category_name, + lb.x, + lb.y, + lb.z, + lb.yaw, + lb.width, + lb.length, + lb.height, + lb.vx, + lb.vy, + lb.vz, + lb.token, + lb.track_token, + lp.timestamp + FROM lidar_box AS lb + INNER JOIN track AS t + ON t.token = lb.track_token + INNER JOIN category AS c + ON c.token = t.category_token + INNER JOIN lidar_pc AS lp + ON lp.token = lb.lidar_pc_token + WHERE lp.token = ? + """ + + for row in execute_many(query, (bytearray.fromhex(token),), log_file): + quaternion = EulerAngles(roll=DEFAULT_ROLL, pitch=DEFAULT_PITCH, yaw=row["yaw"]).quaternion + bounding_box = BoundingBoxSE3( + center=StateSE3( + x=row["x"], + y=row["y"], + z=row["z"], + qw=quaternion.qw, + qx=quaternion.qx, + qy=quaternion.qy, + qz=quaternion.qz, + ), + length=row["length"], # nuPlan uses length, + width=row["width"], # width, + height=row["height"], # height + ) + box_detection = BoxDetectionSE3( + metadata=BoxDetectionMetadata( + detection_type=NUPLAN_DETECTION_NAME_DICT[row["category_name"]], + timepoint=None, # NOTE: Timepoint is not needed during writing, set to None + track_token=row["track_token"].hex(), + confidence=None, # NOTE: Not currently written, requires refactoring + ), + bounding_box_se3=bounding_box, + velocity=Vector3D(x=row["vx"], y=row["vy"], z=row["vz"]), + ) + yield box_detection diff --git a/d123/datasets/nuplan/nuplan_load_sensor.py b/d123/conversion/datasets/nuplan/nuplan_load_sensor.py similarity index 100% rename from d123/datasets/nuplan/nuplan_load_sensor.py rename to d123/conversion/datasets/nuplan/nuplan_load_sensor.py diff --git a/d123/datasets/nuplan/nuplan_map_conversion.py b/d123/conversion/datasets/nuplan/nuplan_map_conversion.py similarity index 92% rename from d123/datasets/nuplan/nuplan_map_conversion.py rename to d123/conversion/datasets/nuplan/nuplan_map_conversion.py index 7b444e5c..7152f7da 100644 --- a/d123/datasets/nuplan/nuplan_map_conversion.py +++ b/d123/conversion/datasets/nuplan/nuplan_map_conversion.py @@ -1,9 +1,8 @@ # TODO: Refactor this mess. -import os import warnings from pathlib import Path -from typing import Dict, List, Optional +from typing import Dict, Optional, Union import geopandas as gpd import numpy as np @@ -11,41 +10,17 @@ import pyogrio from shapely.geometry import LineString -from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import ( +# Suppress numpy runtime warnings for casting operations +np.seterr(invalid="ignore") + +from d123.conversion.datasets.nuplan.nuplan_constants import NUPLAN_MAP_GPKG_LAYERS, NUPLAN_MAP_LOCATION_FILES +from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, split_line_geometry_by_max_length, ) from d123.datatypes.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType -MAP_FILES = { - "sg-one-north": "sg-one-north/9.17.1964/map.gpkg", - "us-ma-boston": "us-ma-boston/9.12.1817/map.gpkg", - "us-nv-las-vegas-strip": "us-nv-las-vegas-strip/9.15.1915/map.gpkg", - "us-pa-pittsburgh-hazelwood": "us-pa-pittsburgh-hazelwood/9.17.1937/map.gpkg", -} - -NUPLAN_MAPS_ROOT = os.environ["NUPLAN_MAPS_ROOT"] -MAP_LOCATIONS = {"sg-one-north", "us-ma-boston", "us-nv-las-vegas-strip", "us-pa-pittsburgh-hazelwood"} -GPKG_LAYERS: List[str] = [ - "baseline_paths", - "carpark_areas", - "generic_drivable_areas", - "dubins_nodes", - "lane_connectors", - "intersections", - "boundaries", - "crosswalks", - "lanes_polygons", - "lane_group_connectors", - "lane_groups_polygons", - "road_segments", - "stop_polygons", - "traffic_lights", - "walkways", - "gen_lane_connectors_scaled_width_polygons", -] - # 0: generic lane I guess. # 1: ending? # 3: bike lanes. @@ -60,15 +35,16 @@ class NuPlanMapConverter: - def __init__(self, map_path: Path) -> None: + def __init__(self, nuplan_map_root: Union[str, Path], map_path: Path) -> None: self._map_path: Path = map_path + self._nuplan_maps_root: Path = Path(nuplan_map_root) self._gdf: Optional[Dict[str, gpd.GeoDataFrame]] = None def convert(self, map_name: str = "us-pa-pittsburgh-hazelwood") -> None: - assert map_name in MAP_LOCATIONS, f"Map name {map_name} is not supported." + assert map_name in NUPLAN_MAP_LOCATION_FILES.keys(), f"Map name {map_name} is not supported." - map_file_path = Path(NUPLAN_MAPS_ROOT) / MAP_FILES[map_name] + map_file_path = self._nuplan_maps_root / NUPLAN_MAP_LOCATION_FILES[map_name] self._load_dataframes(map_file_path) dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} @@ -85,11 +61,15 @@ def convert(self, map_name: str = "us-pa-pittsburgh-hazelwood") -> None: if not self._map_path.exists(): self._map_path.mkdir(parents=True, exist_ok=True) - map_file_name = self._map_path / f"nuplan_{map_name}.gpkg" - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", message="'crs' was not provided") - for layer, gdf in dataframes.items(): - gdf.to_file(map_file_name, layer=layer.serialize(), driver="GPKG", mode="a") + try: + map_file_name = self._map_path / f"nuplan_{map_name}.gpkg" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message="'crs' was not provided") + for layer, gdf in dataframes.items(): + gdf.to_file(map_file_name, layer=layer.serialize(), driver="GPKG", mode="a") + except Exception as e: + print(f"Error occurred while converting map {map_name}: {e}") + print(map_file_name, map_file_path) def _load_dataframes(self, map_file_path: Path) -> None: @@ -98,7 +78,7 @@ def _load_dataframes(self, map_file_path: Path) -> None: projection_system = map_meta[map_meta["key"] == "projectedCoordSystem"]["value"].iloc[0] self._gdf = {} - for layer_name in GPKG_LAYERS: + for layer_name in NUPLAN_MAP_GPKG_LAYERS: with warnings.catch_warnings(): # Suppress the warnings from the GPKG operations below so that they don't spam the training logs. warnings.filterwarnings("ignore") diff --git a/d123/datasets/nuplan/utils/log_splits.yaml b/d123/conversion/datasets/nuplan/utils/log_splits.yaml similarity index 100% rename from d123/datasets/nuplan/utils/log_splits.yaml rename to d123/conversion/datasets/nuplan/utils/log_splits.yaml diff --git a/d123/datasets/nuscenes/.gitkeep b/d123/conversion/datasets/nuscenes/.gitkeep similarity index 100% rename from d123/datasets/nuscenes/.gitkeep rename to d123/conversion/datasets/nuscenes/.gitkeep diff --git a/d123/datasets/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py similarity index 100% rename from d123/datasets/wopd/waymo_map_utils/womp_boundary_utils.py rename to d123/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py diff --git a/d123/datasets/wopd/waymo_map_utils/wopd_map_utils.py b/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py similarity index 99% rename from d123/datasets/wopd/waymo_map_utils/wopd_map_utils.py rename to d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py index 561d83f0..d98a0691 100644 --- a/d123/datasets/wopd/waymo_map_utils/wopd_map_utils.py +++ b/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py @@ -9,7 +9,7 @@ import shapely.geometry as geom from d123.common.utils.dependencies import check_dependencies -from d123.datasets.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries +from d123.conversion.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType from d123.geometry import Point3DIndex, Polyline3D from d123.geometry.utils.units import mph_to_mps diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/conversion/datasets/wopd/wopd_data_converter.py similarity index 87% rename from d123/datasets/wopd/wopd_data_converter.py rename to d123/conversion/datasets/wopd/wopd_data_converter.py index c6a9f3a2..80f32d27 100644 --- a/d123/datasets/wopd/wopd_data_converter.py +++ b/d123/conversion/datasets/wopd/wopd_data_converter.py @@ -10,12 +10,12 @@ from d123.common.multithreading.worker_utils import WorkerPool, worker_map from d123.common.utils.dependencies import check_dependencies -from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.datasets.utils.arrow_ipc_writer import ArrowLogWriter -from d123.datasets.utils.sensor.camera_conventions import CameraConvention, convert_camera_convention -from d123.datasets.utils.sensor.lidar_index_registry import WopdLidarIndex -from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map -from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection +from d123.conversion.abstract_dataset_converter import AbstractDataConverter, DatasetConverterConfig +from d123.conversion.utils.log_writer.arrow_ipc_writer import ArrowLogWriter +from d123.conversion.utils.sensor.camera_conventions import CameraConvention, convert_camera_convention +from d123.conversion.utils.sensor.lidar_index_registry import WopdLidarIndex +from d123.conversion.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map +from d123.conversion.wopd.wopd_utils import parse_range_image_and_camera_projection from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata @@ -98,14 +98,14 @@ def create_token(input_data: str) -> str: return hash_obj.hexdigest()[:16] -class WOPDDataConverter(RawDataConverter): +class WOPDDataConverter(AbstractDataConverter): def __init__( self, splits: List[str], log_path: Union[Path, str], - data_converter_config: DataConverterConfig, + dataset_converter_config: DatasetConverterConfig, ) -> None: - super().__init__(data_converter_config) + super().__init__(dataset_converter_config) for split in splits: assert ( split in self.get_available_splits() @@ -147,7 +147,7 @@ def convert_maps(self, worker: WorkerPool) -> None: worker_map( worker, - partial(convert_wopd_tfrecord_map_to_gpkg, data_converter_config=self.data_converter_config), + partial(convert_wopd_tfrecord_map_to_gpkg, dataset_converter_config=self.dataset_converter_config), log_args, ) @@ -163,13 +163,13 @@ def convert_logs(self, worker: WorkerPool) -> None: worker_map( worker, - partial(convert_wopd_tfrecord_log_to_arrow, data_converter_config=self.data_converter_config), + partial(convert_wopd_tfrecord_log_to_arrow, dataset_converter_config=self.dataset_converter_config), log_args, ) def convert_wopd_tfrecord_map_to_gpkg( - args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig + args: List[Dict[str, Union[List[str], List[Path]]]], dataset_converter_config: DatasetConverterConfig ) -> List[Any]: for log_info in args: @@ -187,14 +187,14 @@ def convert_wopd_tfrecord_map_to_gpkg( log_name = str(initial_frame.context.name) map_file_path = D123_MAPS_ROOT / split / f"{log_name}.gpkg" - if data_converter_config.force_map_conversion or not map_file_path.exists(): + if dataset_converter_config.force_map_conversion or not map_file_path.exists(): map_file_path.unlink(missing_ok=True) convert_wopd_map(initial_frame, map_file_path) return [] def convert_wopd_tfrecord_log_to_arrow( - args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig + args: List[Dict[str, Union[List[str], List[Path]]]], dataset_converter_config: DatasetConverterConfig ) -> List[Any]: for log_info in args: try: @@ -212,9 +212,9 @@ def convert_wopd_tfrecord_log_to_arrow( break log_name = str(initial_frame.context.name) - log_file_path = data_converter_config.output_path / split / f"{log_name}.arrow" + log_file_path = dataset_converter_config.output_path / split / f"{log_name}.arrow" - if data_converter_config.force_log_conversion or not log_file_path.exists(): + if dataset_converter_config.force_log_conversion or not log_file_path.exists(): log_file_path.unlink(missing_ok=True) if not log_file_path.parent.exists(): log_file_path.parent.mkdir(parents=True, exist_ok=True) @@ -226,19 +226,19 @@ def convert_wopd_tfrecord_log_to_arrow( location=None, # TODO: Add location information. timestep_seconds=TARGET_DT, vehicle_parameters=get_wopd_chrysler_pacifica_parameters(), - camera_metadata=get_wopd_camera_metadata(initial_frame, data_converter_config), - lidar_metadata=get_wopd_lidar_metadata(initial_frame, data_converter_config), + camera_metadata=get_wopd_camera_metadata(initial_frame, dataset_converter_config), + lidar_metadata=get_wopd_lidar_metadata(initial_frame, dataset_converter_config), map_has_z=True, map_is_local=True, ) log_writer = ArrowLogWriter( log_path=log_file_path, - data_converter_config=data_converter_config, + dataset_converter_config=dataset_converter_config, log_metadata=log_metadata, ) - _write_recording_table(dataset, log_writer, tf_record_path, data_converter_config) + _write_recording_table(dataset, log_writer, tf_record_path, dataset_converter_config) del dataset except Exception as e: @@ -251,11 +251,11 @@ def convert_wopd_tfrecord_log_to_arrow( def get_wopd_camera_metadata( - initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig + initial_frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig ) -> Dict[PinholeCameraType, PinholeCameraMetadata]: cam_metadatas: Dict[PinholeCameraType, PinholeCameraMetadata] = {} - if data_converter_config.camera_store_option is not None: + if dataset_converter_config.camera_store_option is not None: for calibration in initial_frame.context.camera_calibrations: camera_type = WOPD_CAMERA_TYPES[calibration.name] # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L96 @@ -276,11 +276,11 @@ def get_wopd_camera_metadata( def get_wopd_lidar_metadata( - initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig + initial_frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig ) -> Dict[LiDARType, LiDARMetadata]: laser_metadatas: Dict[LiDARType, LiDARMetadata] = {} - if data_converter_config.lidar_store_option is not None: + if dataset_converter_config.lidar_store_option is not None: for laser_calibration in initial_frame.context.laser_calibrations: lidar_type = WOPD_LIDAR_TYPES[laser_calibration.name] @@ -303,7 +303,7 @@ def _write_recording_table( dataset: tf.data.TFRecordDataset, log_writer: ArrowLogWriter, tf_record_path: Path, - data_converter_config: DataConverterConfig, + dataset_converter_config: DatasetConverterConfig, ) -> None: dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") @@ -317,8 +317,8 @@ def _write_recording_table( ego_state=_extract_wopd_ego_state(frame), box_detections=_extract_wopd_box_detections(frame), traffic_lights=None, # NOTE: WOPD does not have traffic light information - cameras=_extract_wopd_cameras(frame, data_converter_config), - lidars=_extract_wopd_lidars(frame, data_converter_config), + cameras=_extract_wopd_cameras(frame, dataset_converter_config), + lidars=_extract_wopd_lidars(frame, dataset_converter_config), scenario_tags=None, # NOTE: WOPD does not have scenario tags route_lane_group_ids=None, # NOTE: WOPD does not have route information ) @@ -418,11 +418,11 @@ def _extract_wopd_box_detections(frame: dataset_pb2.Frame) -> BoxDetectionWrappe def _extract_wopd_cameras( - frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig + frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig ) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: # TODO: Implement option to store images as paths - assert data_converter_config.camera_store_option == "binary", "Camera store option must be 'binary' for WOPD." + assert dataset_converter_config.camera_store_option == "binary", "Camera store option must be 'binary' for WOPD." camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} @@ -452,11 +452,11 @@ def _extract_wopd_cameras( def _extract_wopd_lidars( - frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig + frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig ) -> Dict[LiDARType, npt.NDArray[np.float32]]: # TODO: Implement option to store point clouds as paths - assert data_converter_config.lidar_store_option == "binary", "Lidar store option must be 'binary' for WOPD." + assert dataset_converter_config.lidar_store_option == "binary", "Lidar store option must be 'binary' for WOPD." (range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame) points, cp_points = frame_utils.convert_range_image_to_point_cloud( diff --git a/d123/datasets/wopd/wopd_utils.py b/d123/conversion/datasets/wopd/wopd_utils.py similarity index 100% rename from d123/datasets/wopd/wopd_utils.py rename to d123/conversion/datasets/wopd/wopd_utils.py diff --git a/d123/datasets/utils/maps/__init__.py b/d123/conversion/log_writer/__init__.py similarity index 100% rename from d123/datasets/utils/maps/__init__.py rename to d123/conversion/log_writer/__init__.py diff --git a/d123/conversion/log_writer/abstract_log_writer.py b/d123/conversion/log_writer/abstract_log_writer.py new file mode 100644 index 00000000..4b606ec9 --- /dev/null +++ b/d123/conversion/log_writer/abstract_log_writer.py @@ -0,0 +1,48 @@ +import abc +from typing import Any, Dict, List, Optional, Tuple + +from d123.conversion.dataset_converter_config import DatasetConverterConfig +from d123.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from d123.datatypes.sensors.lidar.lidar import LiDARType +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 + + +class AbstractLogWriter(abc.ABC): + """Abstract base class for log writers. + + A log writer is responsible specifying the output format of a converted log. + This includes how data is organized, how it is serialized, and how it is stored. + """ + + @abc.abstractmethod + def reset( + self, + dataset_converter_config: DatasetConverterConfig, + log_metadata: LogMetadata, + ) -> None: + """ + Reset the log writer for a new log. + """ + + @abc.abstractmethod + def write( + self, + token: str, + timestamp: TimePoint, + ego_state: Optional[EgoStateSE3] = None, + box_detections: Optional[BoxDetectionWrapper] = None, + traffic_lights: Optional[TrafficLightDetectionWrapper] = None, + cameras: Optional[Dict[PinholeCameraType, Tuple[Any, ...]]] = None, + lidars: Optional[Dict[LiDARType, Any]] = None, + scenario_tags: Optional[List[str]] = None, + route_lane_group_ids: Optional[List[int]] = None, + **kwargs, + ) -> None: + pass + + @abc.abstractmethod + def close(self) -> None: + pass diff --git a/d123/datasets/utils/arrow_ipc_writer.py b/d123/conversion/log_writer/arrow_log_writer.py similarity index 68% rename from d123/datasets/utils/arrow_ipc_writer.py rename to d123/conversion/log_writer/arrow_log_writer.py index fa0ed439..5fdac367 100644 --- a/d123/datasets/utils/arrow_ipc_writer.py +++ b/d123/conversion/log_writer/arrow_log_writer.py @@ -1,9 +1,9 @@ from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Literal, Optional, Tuple import pyarrow as pa -from d123.datasets.raw_data_converter import DataConverterConfig +from d123.conversion.abstract_dataset_converter import AbstractLogWriter, DatasetConverterConfig from d123.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from d123.datatypes.scene.scene_metadata import LogMetadata @@ -14,105 +14,60 @@ from d123.geometry import BoundingBoxSE3Index, StateSE3, StateSE3Index, Vector3DIndex -class ArrowLogWriter: +class ArrowLogWriter(AbstractLogWriter): def __init__( self, - log_path: Union[str, Path], - data_converter_config: DataConverterConfig, - log_metadata: LogMetadata, + compression: Optional[Literal["lz4", "zstd"]] = None, + compression_level: Optional[int] = None, ) -> None: - self._log_path = Path(log_path) - self._data_converter_config = data_converter_config - self._log_metadata = log_metadata - - self._schema: pa.Schema = self._build_schema() - - def _build_schema(self) -> pa.Schema: - - schema_list: List[Tuple[str, pa.DataType]] = [ - ("token", pa.string()), - ("timestamp", pa.int64()), - ] - - # -------------------------------------------------------------------------------------------------------------- - # Ego State - # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_ego: - schema_list.extend( - [ - ("ego_state", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ] + self._compression = compression + self._compression_level = compression_level + + # Loaded during .reset() and cleared during .close() + self._dataset_converter_config: Optional[DatasetConverterConfig] = None + self._log_metadata: Optional[LogMetadata] = None + self._schema: Optional[LogMetadata] = None + self._source: Optional[pa.NativeFile] = None + self._record_batch_writer: Optional[pa.ipc.RecordBatchWriter] = None + + def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: LogMetadata) -> bool: + + overwrite_log: bool = False + sink_log_path: Path = ( + dataset_converter_config.output_path / log_metadata.split / f"{log_metadata.log_name}.arrow" + ) + + # Check if the log file already exists or needs to be overwritten + if not sink_log_path.exists() or dataset_converter_config.force_log_conversion: + overwrite_log = True + + # Delete the file if it exists (no error if it doesn't) + sink_log_path.unlink(missing_ok=True) + if not sink_log_path.parent.exists(): + sink_log_path.parent.mkdir(parents=True, exist_ok=True) + + # Load config and metadata + self._dataset_converter_config = dataset_converter_config + self._log_metadata = log_metadata + self._schema = self._build_schema(dataset_converter_config, log_metadata) + + # Initialize Arrow IPC writer, optionally with compression + # NOTE @DanielDauner: I tried some compression settings, which did not lead to significant reductions. + compression = ( + pa.Codec(self._compression, compression_level=self._compression_level) + if self._compression is not None + else None ) - # -------------------------------------------------------------------------------------------------------------- - # Box Detections - # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_box_detections: - schema_list.extend( - [ - ("box_detection_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("box_detection_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("box_detection_token", pa.list_(pa.string())), - ("box_detection_type", pa.list_(pa.int16())), - ] - ) + options = pa.ipc.IpcWriteOptions(compression=compression) + self._source = pa.OSFile(str(sink_log_path), "wb") + self._record_batch_writer = pa.ipc.new_file(self._source, schema=self._schema, options=options) - # -------------------------------------------------------------------------------------------------------------- - # Traffic Lights - # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_traffic_lights: - schema_list.extend( - [ - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ] - ) + return overwrite_log - # -------------------------------------------------------------------------------------------------------------- - # Cameras - # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_cameras: - for camera_type in self._log_metadata.camera_metadata.keys(): - camera_name = camera_type.serialize() - - # Depending on the storage option, define the schema for camera data - if self._data_converter_config.camera_store_option == "path": - schema_list.append((f"{camera_name}_data", pa.string())) - - elif self._data_converter_config.camera_store_option == "binary": - schema_list.append((f"{camera_name}_data", pa.binary())) - - # Add camera pose - schema_list.append((f"{camera_name}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))) - - # -------------------------------------------------------------------------------------------------------------- - # LiDARs - # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_lidars: - for lidar_type in self._log_metadata.lidar_metadata.keys(): - lidar_name = lidar_type.serialize() - - # Depending on the storage option, define the schema for LiDAR data - if self._data_converter_config.lidar_store_option == "path": - schema_list.append((f"{lidar_name}_data", pa.string())) - - elif self._data_converter_config.lidar_store_option == "binary": - schema_list.append((f"{lidar_name}_data", pa.binary())) - - # -------------------------------------------------------------------------------------------------------------- - # Miscellaneous (Scenario Tags / Route) - # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_scenario_tags: - schema_list.append(("scenario_tags", pa.list_(pa.string()))) - - if self._data_converter_config.include_route: - schema_list.append(("route_lane_group_ids", pa.list_(pa.int64()))) - - return add_log_metadata_to_arrow_schema(pa.schema(schema_list), self._log_metadata) - - def add_row( + def write( self, token: str, timestamp: TimePoint, @@ -124,9 +79,12 @@ def add_row( scenario_tags: Optional[List[str]] = None, route_lane_group_ids: Optional[List[int]] = None, ) -> None: - if not hasattr(self, "_sink"): - self._sink = pa.OSFile(str(self._log_path), "wb") - self._writer = pa.ipc.new_file(self._sink, self._schema) + + assert self._dataset_converter_config is not None, "Log writer is not initialized." + assert self._log_metadata is not None, "Log writer is not initialized." + assert self._schema is not None, "Log writer is not initialized." + assert self._record_batch_writer is not None, "Log writer is not initialized." + assert self._source is not None, "Log writer is not initialized." record_batch_data = { "token": [token], @@ -136,14 +94,14 @@ def add_row( # -------------------------------------------------------------------------------------------------------------- # Ego State # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_ego: + if self._dataset_converter_config.include_ego: assert ego_state is not None, "Ego state is required but not provided." record_batch_data["ego_state"] = [ego_state.array] # -------------------------------------------------------------------------------------------------------------- # Box Detections # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_box_detections: + if self._dataset_converter_config.include_box_detections: assert box_detections is not None, "Box detections are required but not provided." # TODO: Figure out more elegant way without for-loops. @@ -168,7 +126,7 @@ def add_row( # -------------------------------------------------------------------------------------------------------------- # Traffic Lights # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_traffic_lights: + if self._dataset_converter_config.include_traffic_lights: assert traffic_lights is not None, "Traffic light detections are required but not provided." # TODO: Figure out more elegant way without for-loops. @@ -187,7 +145,7 @@ def add_row( # -------------------------------------------------------------------------------------------------------------- # Cameras # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_cameras: + if self._dataset_converter_config.include_cameras: assert cameras is not None, "Camera data is required but not provided." provided_cameras = set(cameras.keys()) expected_cameras = set(self._log_metadata.camera_metadata.keys()) @@ -207,7 +165,7 @@ def add_row( # -------------------------------------------------------------------------------------------------------------- # LiDARs # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_lidars: + if self._dataset_converter_config.include_lidars: assert lidars is not None, "LiDAR data is required but not provided." provided_lidars = set(lidars.keys()) expected_lidars = set(self._log_metadata.lidar_metadata.keys()) @@ -224,19 +182,110 @@ def add_row( # -------------------------------------------------------------------------------------------------------------- # Miscellaneous (Scenario Tags / Route) # -------------------------------------------------------------------------------------------------------------- - if self._data_converter_config.include_scenario_tags: + if self._dataset_converter_config.include_scenario_tags: assert scenario_tags is not None, "Scenario tags are required but not provided." record_batch_data["scenario_tags"] = [scenario_tags] - if self._data_converter_config.include_route: + if self._dataset_converter_config.include_route: assert route_lane_group_ids is not None, "Route lane group IDs are required but not provided." record_batch_data["route_lane_group_ids"] = [route_lane_group_ids] record_batch = pa.record_batch(record_batch_data, schema=self._schema) - self._writer.write_batch(record_batch) + self._record_batch_writer.write_batch(record_batch) def close(self) -> None: - if hasattr(self, "_writer"): - self._writer.close() - if hasattr(self, "_sink"): - self._sink.close() + if self._record_batch_writer is not None: + self._record_batch_writer.close() + self._record_batch_writer: Optional[pa.ipc.RecordBatchWriter] = None + + if self._source is not None: + self._source.close() + self._source: Optional[pa.NativeFile] = None + + self._dataset_converter_config: Optional[DatasetConverterConfig] = None + self._log_metadata: Optional[LogMetadata] = None + self._schema: Optional[LogMetadata] = None + + @staticmethod + def _build_schema(dataset_converter_config: DatasetConverterConfig, log_metadata: LogMetadata) -> pa.Schema: + + schema_list: List[Tuple[str, pa.DataType]] = [ + ("token", pa.string()), + ("timestamp", pa.int64()), + ] + + # -------------------------------------------------------------------------------------------------------------- + # Ego State + # -------------------------------------------------------------------------------------------------------------- + if dataset_converter_config.include_ego: + schema_list.extend( + [ + ("ego_state", pa.list_(pa.float64(), len(EgoStateSE3Index))), + ] + ) + + # -------------------------------------------------------------------------------------------------------------- + # Box Detections + # -------------------------------------------------------------------------------------------------------------- + if dataset_converter_config.include_box_detections: + schema_list.extend( + [ + ("box_detection_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), + ("box_detection_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), + ("box_detection_token", pa.list_(pa.string())), + ("box_detection_type", pa.list_(pa.int16())), + ] + ) + + # -------------------------------------------------------------------------------------------------------------- + # Traffic Lights + # -------------------------------------------------------------------------------------------------------------- + if dataset_converter_config.include_traffic_lights: + schema_list.extend( + [ + ("traffic_light_ids", pa.list_(pa.int64())), + ("traffic_light_types", pa.list_(pa.int16())), + ] + ) + + # -------------------------------------------------------------------------------------------------------------- + # Cameras + # -------------------------------------------------------------------------------------------------------------- + if dataset_converter_config.include_cameras: + for camera_type in log_metadata.camera_metadata.keys(): + camera_name = camera_type.serialize() + + # Depending on the storage option, define the schema for camera data + if dataset_converter_config.camera_store_option == "path": + schema_list.append((f"{camera_name}_data", pa.string())) + + elif dataset_converter_config.camera_store_option == "binary": + schema_list.append((f"{camera_name}_data", pa.binary())) + + # Add camera pose + schema_list.append((f"{camera_name}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))) + + # -------------------------------------------------------------------------------------------------------------- + # LiDARs + # -------------------------------------------------------------------------------------------------------------- + if dataset_converter_config.include_lidars: + for lidar_type in log_metadata.lidar_metadata.keys(): + lidar_name = lidar_type.serialize() + + # Depending on the storage option, define the schema for LiDAR data + if dataset_converter_config.lidar_store_option == "path": + schema_list.append((f"{lidar_name}_data", pa.string())) + + elif dataset_converter_config.lidar_store_option == "binary": + schema_list.append((f"{lidar_name}_data", pa.binary())) + + # -------------------------------------------------------------------------------------------------------------- + # Miscellaneous (Scenario Tags / Route) + # -------------------------------------------------------------------------------------------------------------- + if dataset_converter_config.include_scenario_tags: + schema_list.append(("scenario_tags", pa.list_(pa.string()))) + + if dataset_converter_config.include_route: + schema_list.append(("route_lane_group_ids", pa.list_(pa.int64()))) + + return add_log_metadata_to_arrow_schema(pa.schema(schema_list), log_metadata) diff --git a/d123/datasets/utils/maps/opendrive/__init__.py b/d123/conversion/map_writer/abstract_map_writer.py similarity index 100% rename from d123/datasets/utils/maps/opendrive/__init__.py rename to d123/conversion/map_writer/abstract_map_writer.py diff --git a/d123/datasets/utils/maps/opendrive/parser/__init__.py b/d123/conversion/map_writer/gpkg_map_writer.py similarity index 100% rename from d123/datasets/utils/maps/opendrive/parser/__init__.py rename to d123/conversion/map_writer/gpkg_map_writer.py diff --git a/d123/datasets/utils/maps/road_edge/__init__.py b/d123/conversion/utils/__init__.py similarity index 100% rename from d123/datasets/utils/maps/road_edge/__init__.py rename to d123/conversion/utils/__init__.py diff --git a/d123/script/config/dataset_conversion/__init__.py b/d123/conversion/utils/map_utils/__init__.py similarity index 100% rename from d123/script/config/dataset_conversion/__init__.py rename to d123/conversion/utils/map_utils/__init__.py diff --git a/d123/script/config/datasets/__init__.py b/d123/conversion/utils/map_utils/opendrive/__init__ copy.py similarity index 100% rename from d123/script/config/datasets/__init__.py rename to d123/conversion/utils/map_utils/opendrive/__init__ copy.py diff --git a/d123/conversion/utils/map_utils/opendrive/__init__.py b/d123/conversion/utils/map_utils/opendrive/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datasets/utils/maps/opendrive/opendrive_map_conversion.py b/d123/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py similarity index 95% rename from d123/datasets/utils/maps/opendrive/opendrive_map_conversion.py rename to d123/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py index c011a07f..662d08cc 100644 --- a/d123/datasets/utils/maps/opendrive/opendrive_map_conversion.py +++ b/d123/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py @@ -10,19 +10,19 @@ import shapely from shapely.ops import polygonize, unary_union -from d123.datasets.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType -from d123.datasets.utils.maps.opendrive.parser.opendrive import Junction, OpenDrive -from d123.datasets.utils.maps.opendrive.utils.collection import collect_element_helpers -from d123.datasets.utils.maps.opendrive.utils.id_mapping import IntIDMapping -from d123.datasets.utils.maps.opendrive.utils.lane_helper import ( +from d123.conversion.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.conversion.utils.map_utils.opendrive.parser.opendrive import Junction, OpenDrive +from d123.conversion.utils.map_utils.opendrive.utils.collection import collect_element_helpers +from d123.conversion.utils.map_utils.opendrive.utils.id_mapping import IntIDMapping +from d123.conversion.utils.map_utils.opendrive.utils.lane_helper import ( OpenDriveLaneGroupHelper, OpenDriveLaneHelper, ) -from d123.datasets.utils.maps.opendrive.utils.objects_helper import ( +from d123.conversion.utils.map_utils.opendrive.utils.objects_helper import ( OpenDriveObjectHelper, ) -from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length -from d123.datasets.utils.maps.road_edge.road_edge_3d_utils import get_road_edges_3d_from_gdf +from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length +from d123.conversion.utils.map_utils.road_edge.road_edge_3d_utils import get_road_edges_3d_from_gdf logger = logging.getLogger(__name__) D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) diff --git a/d123/conversion/utils/map_utils/opendrive/parser/__init__.py b/d123/conversion/utils/map_utils/opendrive/parser/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datasets/utils/maps/opendrive/parser/elevation.py b/d123/conversion/utils/map_utils/opendrive/parser/elevation.py similarity index 97% rename from d123/datasets/utils/maps/opendrive/parser/elevation.py rename to d123/conversion/utils/map_utils/opendrive/parser/elevation.py index a529bc0b..c9339091 100644 --- a/d123/datasets/utils/maps/opendrive/parser/elevation.py +++ b/d123/conversion/utils/map_utils/opendrive/parser/elevation.py @@ -4,7 +4,7 @@ from typing import List, Optional from xml.etree.ElementTree import Element -from d123.datasets.utils.maps.opendrive.parser.polynomial import Polynomial +from d123.conversion.utils.map_utils.opendrive.parser.polynomial import Polynomial @dataclass diff --git a/d123/datasets/utils/maps/opendrive/parser/geometry.py b/d123/conversion/utils/map_utils/opendrive/parser/geometry.py similarity index 100% rename from d123/datasets/utils/maps/opendrive/parser/geometry.py rename to d123/conversion/utils/map_utils/opendrive/parser/geometry.py diff --git a/d123/datasets/utils/maps/opendrive/parser/lane.py b/d123/conversion/utils/map_utils/opendrive/parser/lane.py similarity index 98% rename from d123/datasets/utils/maps/opendrive/parser/lane.py rename to d123/conversion/utils/map_utils/opendrive/parser/lane.py index e27de490..4a8341a3 100644 --- a/d123/datasets/utils/maps/opendrive/parser/lane.py +++ b/d123/conversion/utils/map_utils/opendrive/parser/lane.py @@ -4,7 +4,7 @@ from typing import List, Optional from xml.etree.ElementTree import Element -from d123.datasets.utils.maps.opendrive.parser.polynomial import Polynomial +from d123.conversion.utils.map_utils.opendrive.parser.polynomial import Polynomial @dataclass diff --git a/d123/datasets/utils/maps/opendrive/parser/objects.py b/d123/conversion/utils/map_utils/opendrive/parser/objects.py similarity index 100% rename from d123/datasets/utils/maps/opendrive/parser/objects.py rename to d123/conversion/utils/map_utils/opendrive/parser/objects.py diff --git a/d123/datasets/utils/maps/opendrive/parser/opendrive.py b/d123/conversion/utils/map_utils/opendrive/parser/opendrive.py similarity index 99% rename from d123/datasets/utils/maps/opendrive/parser/opendrive.py rename to d123/conversion/utils/map_utils/opendrive/parser/opendrive.py index 4ed7bb33..586a72d8 100644 --- a/d123/datasets/utils/maps/opendrive/parser/opendrive.py +++ b/d123/conversion/utils/map_utils/opendrive/parser/opendrive.py @@ -6,7 +6,7 @@ from typing import List, Literal, Optional from xml.etree.ElementTree import Element, parse -from d123.datasets.utils.maps.opendrive.parser.road import Road +from d123.conversion.utils.map_utils.opendrive.parser.road import Road @dataclass diff --git a/d123/datasets/utils/maps/opendrive/parser/polynomial.py b/d123/conversion/utils/map_utils/opendrive/parser/polynomial.py similarity index 100% rename from d123/datasets/utils/maps/opendrive/parser/polynomial.py rename to d123/conversion/utils/map_utils/opendrive/parser/polynomial.py diff --git a/d123/datasets/utils/maps/opendrive/parser/reference.py b/d123/conversion/utils/map_utils/opendrive/parser/reference.py similarity index 94% rename from d123/datasets/utils/maps/opendrive/parser/reference.py rename to d123/conversion/utils/map_utils/opendrive/parser/reference.py index ed19a98d..b94ddc5a 100644 --- a/d123/datasets/utils/maps/opendrive/parser/reference.py +++ b/d123/conversion/utils/map_utils/opendrive/parser/reference.py @@ -9,10 +9,10 @@ import numpy as np import numpy.typing as npt -from d123.datasets.utils.maps.opendrive.parser.elevation import Elevation -from d123.datasets.utils.maps.opendrive.parser.geometry import Arc, Geometry, Line, Spiral -from d123.datasets.utils.maps.opendrive.parser.lane import LaneOffset, Width -from d123.datasets.utils.maps.opendrive.parser.polynomial import Polynomial +from d123.conversion.utils.map_utils.opendrive.parser.elevation import Elevation +from d123.conversion.utils.map_utils.opendrive.parser.geometry import Arc, Geometry, Line, Spiral +from d123.conversion.utils.map_utils.opendrive.parser.lane import LaneOffset, Width +from d123.conversion.utils.map_utils.opendrive.parser.polynomial import Polynomial from d123.geometry import Point3DIndex, StateSE2Index TOLERANCE: Final[float] = 1e-3 diff --git a/d123/datasets/utils/maps/opendrive/parser/road.py b/d123/conversion/utils/map_utils/opendrive/parser/road.py similarity index 93% rename from d123/datasets/utils/maps/opendrive/parser/road.py rename to d123/conversion/utils/map_utils/opendrive/parser/road.py index 28b5b679..a763911b 100644 --- a/d123/datasets/utils/maps/opendrive/parser/road.py +++ b/d123/conversion/utils/map_utils/opendrive/parser/road.py @@ -4,10 +4,10 @@ from typing import List, Optional from xml.etree.ElementTree import Element -from d123.datasets.utils.maps.opendrive.parser.elevation import ElevationProfile, LateralProfile -from d123.datasets.utils.maps.opendrive.parser.lane import Lanes -from d123.datasets.utils.maps.opendrive.parser.objects import Object -from d123.datasets.utils.maps.opendrive.parser.reference import PlanView +from d123.conversion.utils.map_utils.opendrive.parser.elevation import ElevationProfile, LateralProfile +from d123.conversion.utils.map_utils.opendrive.parser.lane import Lanes +from d123.conversion.utils.map_utils.opendrive.parser.objects import Object +from d123.conversion.utils.map_utils.opendrive.parser.reference import PlanView @dataclass diff --git a/d123/conversion/utils/map_utils/opendrive/utils/__init__.py b/d123/conversion/utils/map_utils/opendrive/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datasets/utils/maps/opendrive/utils/collection.py b/d123/conversion/utils/map_utils/opendrive/utils/collection.py similarity index 96% rename from d123/datasets/utils/maps/opendrive/utils/collection.py rename to d123/conversion/utils/map_utils/opendrive/utils/collection.py index bf28c997..6b78f3b1 100644 --- a/d123/datasets/utils/maps/opendrive/utils/collection.py +++ b/d123/conversion/utils/map_utils/opendrive/utils/collection.py @@ -3,21 +3,21 @@ import numpy as np -from d123.datasets.utils.maps.opendrive.parser.opendrive import Junction, OpenDrive -from d123.datasets.utils.maps.opendrive.parser.reference import ReferenceLine -from d123.datasets.utils.maps.opendrive.parser.road import Road -from d123.datasets.utils.maps.opendrive.utils.id_system import ( +from d123.conversion.utils.map_utils.opendrive.parser.opendrive import Junction, OpenDrive +from d123.conversion.utils.map_utils.opendrive.parser.reference import ReferenceLine +from d123.conversion.utils.map_utils.opendrive.parser.road import Road +from d123.conversion.utils.map_utils.opendrive.utils.id_system import ( build_lane_id, derive_lane_section_id, lane_group_id_from_lane_id, road_id_from_lane_group_id, ) -from d123.datasets.utils.maps.opendrive.utils.lane_helper import ( +from d123.conversion.utils.map_utils.opendrive.utils.lane_helper import ( OpenDriveLaneGroupHelper, OpenDriveLaneHelper, lane_section_to_lane_helpers, ) -from d123.datasets.utils.maps.opendrive.utils.objects_helper import OpenDriveObjectHelper, get_object_helper +from d123.conversion.utils.map_utils.opendrive.utils.objects_helper import OpenDriveObjectHelper, get_object_helper logger = logging.getLogger(__name__) diff --git a/d123/datasets/utils/maps/opendrive/utils/id_mapping.py b/d123/conversion/utils/map_utils/opendrive/utils/id_mapping.py similarity index 100% rename from d123/datasets/utils/maps/opendrive/utils/id_mapping.py rename to d123/conversion/utils/map_utils/opendrive/utils/id_mapping.py diff --git a/d123/datasets/utils/maps/opendrive/utils/id_system.py b/d123/conversion/utils/map_utils/opendrive/utils/id_system.py similarity index 100% rename from d123/datasets/utils/maps/opendrive/utils/id_system.py rename to d123/conversion/utils/map_utils/opendrive/utils/id_system.py diff --git a/d123/datasets/utils/maps/opendrive/utils/lane_helper.py b/d123/conversion/utils/map_utils/opendrive/utils/lane_helper.py similarity index 96% rename from d123/datasets/utils/maps/opendrive/utils/lane_helper.py rename to d123/conversion/utils/map_utils/opendrive/utils/lane_helper.py index 34d57054..76ef62fa 100644 --- a/d123/datasets/utils/maps/opendrive/utils/lane_helper.py +++ b/d123/conversion/utils/map_utils/opendrive/utils/lane_helper.py @@ -6,10 +6,10 @@ import numpy.typing as npt import shapely -from d123.datasets.utils.maps.opendrive.parser.lane import Lane, LaneSection -from d123.datasets.utils.maps.opendrive.parser.reference import ReferenceLine -from d123.datasets.utils.maps.opendrive.parser.road import RoadType -from d123.datasets.utils.maps.opendrive.utils.id_system import ( +from d123.conversion.utils.map_utils.opendrive.parser.lane import Lane, LaneSection +from d123.conversion.utils.map_utils.opendrive.parser.reference import ReferenceLine +from d123.conversion.utils.map_utils.opendrive.parser.road import RoadType +from d123.conversion.utils.map_utils.opendrive.utils.id_system import ( derive_lane_group_id, derive_lane_id, lane_group_id_from_lane_id, diff --git a/d123/datasets/utils/maps/opendrive/utils/objects_helper.py b/d123/conversion/utils/map_utils/opendrive/utils/objects_helper.py similarity index 94% rename from d123/datasets/utils/maps/opendrive/utils/objects_helper.py rename to d123/conversion/utils/map_utils/opendrive/utils/objects_helper.py index ff478149..88a11bcf 100644 --- a/d123/datasets/utils/maps/opendrive/utils/objects_helper.py +++ b/d123/conversion/utils/map_utils/opendrive/utils/objects_helper.py @@ -5,8 +5,8 @@ import numpy.typing as npt import shapely -from d123.datasets.utils.maps.opendrive.parser.objects import Object -from d123.datasets.utils.maps.opendrive.parser.reference import ReferenceLine +from d123.conversion.utils.map_utils.opendrive.parser.objects import Object +from d123.conversion.utils.map_utils.opendrive.parser.reference import ReferenceLine from d123.geometry import Point2D, Point3D, Point3DIndex, StateSE2 from d123.geometry.transform.tranform_2d import translate_along_yaw from d123.geometry.utils.rotation_utils import normalize_angle diff --git a/d123/conversion/utils/map_utils/road_edge/__init__.py b/d123/conversion/utils/map_utils/road_edge/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datasets/utils/maps/road_edge/road_edge_2d_utils.py b/d123/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py similarity index 100% rename from d123/datasets/utils/maps/road_edge/road_edge_2d_utils.py rename to d123/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py diff --git a/d123/datasets/utils/maps/road_edge/road_edge_3d_utils.py b/d123/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py similarity index 99% rename from d123/datasets/utils/maps/road_edge/road_edge_3d_utils.py rename to d123/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py index ceeee62a..2cf843a3 100644 --- a/d123/datasets/utils/maps/road_edge/road_edge_3d_utils.py +++ b/d123/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py @@ -9,7 +9,7 @@ import shapely from shapely.geometry import LineString -from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import get_road_edge_linear_rings +from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import get_road_edge_linear_rings from d123.geometry import Point3DIndex from d123.geometry.occupancy_map import OccupancyMap2D diff --git a/d123/datasets/utils/sensor/camera_conventions.py b/d123/conversion/utils/sensor_utils/camera_conventions.py similarity index 100% rename from d123/datasets/utils/sensor/camera_conventions.py rename to d123/conversion/utils/sensor_utils/camera_conventions.py diff --git a/d123/datasets/utils/sensor/lidar_index_registry.py b/d123/conversion/utils/sensor_utils/lidar_index_registry.py similarity index 96% rename from d123/datasets/utils/sensor/lidar_index_registry.py rename to d123/conversion/utils/sensor_utils/lidar_index_registry.py index 0df92cff..ff76c19c 100644 --- a/d123/datasets/utils/sensor/lidar_index_registry.py +++ b/d123/conversion/utils/sensor_utils/lidar_index_registry.py @@ -35,7 +35,7 @@ class DefaultLidarIndex(LiDARIndex): @register_lidar_index -class NuplanLidarIndex(LiDARIndex): +class NuPlanLidarIndex(LiDARIndex): X = 0 Y = 1 Z = 2 diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py deleted file mode 100644 index 7beb3f73..00000000 --- a/d123/datasets/nuplan/nuplan_data_converter.py +++ /dev/null @@ -1,459 +0,0 @@ -import gc -import os -import pickle -from functools import partial -from pathlib import Path -from typing import Any, Dict, Final, List, Optional, Tuple, Union - -import numpy as np -import yaml - -import d123.datasets.nuplan.utils as nuplan_utils -from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.dependencies import check_dependencies -from d123.datasets.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter -from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.datasets.utils.arrow_ipc_writer import ArrowLogWriter -from d123.datasets.utils.sensor.lidar_index_registry import NuplanLidarIndex -from d123.datatypes.detections.detection import ( - BoxDetectionMetadata, - BoxDetectionSE3, - BoxDetectionWrapper, - TrafficLightDetection, - TrafficLightDetectionWrapper, - TrafficLightStatus, -) -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import ( - PinholeCameraMetadata, - PinholeCameraType, - PinholeDistortion, - PinholeIntrinsics, -) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 -from d123.datatypes.vehicle_state.vehicle_parameters import ( - get_nuplan_chrysler_pacifica_parameters, - rear_axle_se3_to_center_se3, -) -from d123.geometry import BoundingBoxSE3, StateSE3, Vector3D -from d123.geometry.rotation import EulerAngles -from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL - -check_dependencies(["nuplan", "sqlalchemy"], "nuplan") -from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens -from nuplan.database.nuplan_db_orm.ego_pose import EgoPose -from nuplan.database.nuplan_db_orm.lidar_box import LidarBox -from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc -from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB -from nuplan.planning.simulation.observation.observation_type import CameraChannel -from sqlalchemy import func - -TARGET_DT: Final[float] = 0.1 -NUPLAN_DT: Final[float] = 0.05 -SORT_BY_TIMESTAMP: Final[bool] = True - -NUPLAN_TRAFFIC_STATUS_DICT: Final[Dict[str, TrafficLightStatus]] = { - "green": TrafficLightStatus.GREEN, - "red": TrafficLightStatus.RED, - "unknown": TrafficLightStatus.UNKNOWN, -} -NUPLAN_DETECTION_NAME_DICT = { - "vehicle": DetectionType.VEHICLE, - "bicycle": DetectionType.BICYCLE, - "pedestrian": DetectionType.PEDESTRIAN, - "traffic_cone": DetectionType.TRAFFIC_CONE, - "barrier": DetectionType.BARRIER, - "czone_sign": DetectionType.CZONE_SIGN, - "generic_object": DetectionType.GENERIC_OBJECT, -} - -NUPLAN_CAMERA_TYPES = { - PinholeCameraType.CAM_F0: CameraChannel.CAM_F0, - PinholeCameraType.CAM_B0: CameraChannel.CAM_B0, - PinholeCameraType.CAM_L0: CameraChannel.CAM_L0, - PinholeCameraType.CAM_L1: CameraChannel.CAM_L1, - PinholeCameraType.CAM_L2: CameraChannel.CAM_L2, - PinholeCameraType.CAM_R0: CameraChannel.CAM_R0, - PinholeCameraType.CAM_R1: CameraChannel.CAM_R1, - PinholeCameraType.CAM_R2: CameraChannel.CAM_R2, -} - -NUPLAN_DATA_ROOT = Path(os.environ["NUPLAN_DATA_ROOT"]) -NUPLAN_ROLLING_SHUTTER_S: Final[TimePoint] = TimePoint.from_s(1 / 60) - - -def create_splits_logs() -> Dict[str, List[str]]: - yaml_filepath = Path(nuplan_utils.__path__[0]) / "log_splits.yaml" - with open(yaml_filepath, "r") as stream: - splits = yaml.safe_load(stream) - - return splits["log_splits"] - - -class NuplanDataConverter(RawDataConverter): - def __init__( - self, - splits: List[str], - log_path: Union[Path, str], - data_converter_config: DataConverterConfig, - ) -> None: - super().__init__(data_converter_config) - for split in splits: - assert ( - split in self.get_available_splits() - ), f"Split {split} is not available. Available splits: {self.available_splits}" - - self._splits: List[str] = splits - self._log_path: Path = Path(log_path) - self._log_paths_per_split: Dict[str, List[Path]] = self._collect_log_paths() - self._target_dt: float = 0.1 - - def _collect_log_paths(self) -> Dict[str, List[Path]]: - # NOTE: the nuplan mini folder has an internal train, val, test structure, all stored in "mini". - # The complete dataset is saved in the "trainval" folder (train and val), or in the "test" folder (for test). - # subsplit_log_names: Dict[str, List[str]] = create_splits_logs() - log_paths_per_split: Dict[str, List[Path]] = {} - - for split in self._splits: - subsplit = split.split("_")[-1] - assert subsplit in ["train", "val", "test"] - if split in ["nuplan_train", "nuplan_val"]: - log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "trainval" - elif split in ["nuplan_test"]: - log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "test" - elif split in ["nuplan_mini_train", "nuplan_mini_val", "nuplan_mini_test"]: - log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "mini" - elif split == "nuplan_private_test": - log_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "splits" / "private_test" - - all_log_files_in_path = [log_file for log_file in log_path.glob("*.db")] - all_log_names = set([str(log_file.stem) for log_file in all_log_files_in_path]) - # set(subsplit_log_names[subsplit]) - # log_paths = [log_path / f"{log_name}.db" for log_name in list(all_log_names & split_log_names)] - log_paths = [log_path / f"{log_name}.db" for log_name in list(all_log_names)] - log_paths_per_split[split] = log_paths - - return log_paths_per_split - - def get_available_splits(self) -> List[str]: - return [ - "nuplan_train", - "nuplan_val", - "nuplan_test", - "nuplan_mini_train", - "nuplan_mini_val", - "nuplan_mini_test", - "nuplan_private_test", # TODO: remove, not publicly available - ] - - def convert_maps(self, worker: WorkerPool) -> None: - worker_map( - worker, - partial(convert_nuplan_map_to_gpkg, data_converter_config=self.data_converter_config), - list(MAP_LOCATIONS), - ) - - def convert_logs(self, worker: WorkerPool) -> None: - log_args = [ - { - "log_path": log_path, - "split": split, - } - for split, log_paths in self._log_paths_per_split.items() - for log_path in log_paths - ] - - worker_map( - worker, - partial( - convert_nuplan_log_to_arrow, - data_converter_config=self.data_converter_config, - ), - log_args, - ) - - -def convert_nuplan_map_to_gpkg(map_names: List[str], data_converter_config: DataConverterConfig) -> List[Any]: - for map_name in map_names: - map_path = data_converter_config.output_path / "maps" / f"nuplan_{map_name}.gpkg" - if data_converter_config.force_map_conversion or not map_path.exists(): - map_path.unlink(missing_ok=True) - NuPlanMapConverter(data_converter_config.output_path / "maps").convert(map_name=map_name) - return [] - - -def convert_nuplan_log_to_arrow( - args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig -) -> List[Any]: - for log_info in args: - log_path: Path = log_info["log_path"] - split: str = log_info["split"] - - if not log_path.exists(): - raise FileNotFoundError(f"Log path {log_path} does not exist.") - - log_file_path = data_converter_config.output_path / split / f"{log_path.stem}.arrow" - - if data_converter_config.force_log_conversion or not log_file_path.exists(): - log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None) - - log_file_path.unlink(missing_ok=True) - if not log_file_path.parent.exists(): - log_file_path.parent.mkdir(parents=True, exist_ok=True) - - log_metadata = LogMetadata( - dataset="nuplan", - split=split, - log_name=log_db.log_name, - location=log_db.log.map_version, - timestep_seconds=TARGET_DT, - vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(), - camera_metadata=get_nuplan_camera_metadata(log_path), - lidar_metadata=get_nuplan_lidar_metadata(), - map_has_z=False, - map_is_local=False, - ) - - log_writer: ArrowLogWriter = ArrowLogWriter( - log_path=log_file_path, - data_converter_config=data_converter_config, - log_metadata=log_metadata, - ) - - _write_recording_table(log_db, log_writer, log_file_path, log_path, data_converter_config) - - # Detach and remove log_db, for memory management - log_db.detach_tables() - log_db.remove_ref() - del log_writer, log_db - gc.collect() - - return [] - - -def get_nuplan_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - - def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadata: - cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0] - - intrinsics_camera_matrix = np.array(pickle.loads(cam.intrinsic), dtype=np.float64) # array of shape (3, 3) - intrinsic = PinholeIntrinsics.from_camera_matrix(intrinsics_camera_matrix) - - distortion_array = np.array(pickle.loads(cam.distortion), dtype=np.float64) # array of shape (5,) - distortion = PinholeDistortion.from_array(distortion_array, copy=False) - - return PinholeCameraMetadata( - camera_type=camera_type, - width=cam.width, - height=cam.height, - intrinsics=intrinsic, - distortion=distortion, - ) - - log_cam_infos: Dict[str, PinholeCameraMetadata] = {} - for camera_type in NUPLAN_CAMERA_TYPES.keys(): - log_cam_infos[camera_type] = _get_camera_metadata(camera_type) - - return log_cam_infos - - -def get_nuplan_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: - metadata: Dict[LiDARType, LiDARMetadata] = {} - metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( - lidar_type=LiDARType.LIDAR_MERGED, - lidar_index=NuplanLidarIndex, - extrinsic=None, # NOTE: LiDAR extrinsic are unknown - ) - return metadata - - -def _write_recording_table( - log_db: NuPlanDB, - log_writer: ArrowLogWriter, - log_file_path: Path, - source_log_path: Path, - data_converter_config: DataConverterConfig, -) -> None: - - step_interval: float = int(TARGET_DT / NUPLAN_DT) - for lidar_pc in log_db.lidar_pc[::step_interval]: - lidar_pc_token: str = lidar_pc.token - - log_writer.add_row( - token=lidar_pc_token, - timestamp=TimePoint.from_us(lidar_pc.timestamp), - ego_state=_extract_nuplan_ego_state(lidar_pc), - box_detections=_extract_nuplan_box_detections(lidar_pc), - traffic_lights=_extract_nuplan_traffic_lights(log_db, lidar_pc_token), - cameras=_extract_nuplan_cameras( - log_db=log_db, - lidar_pc=lidar_pc, - source_log_path=source_log_path, - data_converter_config=data_converter_config, - ), - lidars=_extract_nuplan_lidars(lidar_pc, data_converter_config), - scenario_tags=_extract_nuplan_scenario_tag(log_db, lidar_pc_token), - route_lane_group_ids=_extract_nuplan_route_lane_group_ids(lidar_pc), - ) - - log_writer.close() - - -def _extract_nuplan_ego_state(lidar_pc: LidarPc) -> EgoStateSE3: - - vehicle_parameters = get_nuplan_chrysler_pacifica_parameters() - rear_axle_pose = StateSE3( - x=lidar_pc.ego_pose.x, - y=lidar_pc.ego_pose.y, - z=lidar_pc.ego_pose.z, - qw=lidar_pc.ego_pose.qw, - qx=lidar_pc.ego_pose.qx, - qy=lidar_pc.ego_pose.qy, - qz=lidar_pc.ego_pose.qz, - ) - center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) - dynamic_state = DynamicStateSE3( - velocity=Vector3D( - x=lidar_pc.ego_pose.vx, - y=lidar_pc.ego_pose.vy, - z=lidar_pc.ego_pose.vz, - ), - acceleration=Vector3D( - x=lidar_pc.ego_pose.acceleration_x, - y=lidar_pc.ego_pose.acceleration_y, - z=lidar_pc.ego_pose.acceleration_z, - ), - angular_velocity=Vector3D( - x=lidar_pc.ego_pose.angular_rate_x, - y=lidar_pc.ego_pose.angular_rate_y, - z=lidar_pc.ego_pose.angular_rate_z, - ), - ) - return EgoStateSE3( - center_se3=center, - dynamic_state_se3=dynamic_state, - vehicle_parameters=vehicle_parameters, - timepoint=None, # NOTE: Timepoint is not needed during writing, set to None - ) - - -def _extract_nuplan_box_detections(lidar_pc: LidarPc) -> BoxDetectionWrapper: - - box_detections: List[BoxDetectionSE3] = [] - for lidar_box in lidar_pc.lidar_boxes: - lidar_box: LidarBox - - box_quaternion = EulerAngles(roll=DEFAULT_ROLL, pitch=DEFAULT_PITCH, yaw=lidar_box.yaw).quaternion - box_center = StateSE3( - x=lidar_box.x, - y=lidar_box.y, - z=lidar_box.z, - qw=box_quaternion.qw, - qx=box_quaternion.qx, - qy=box_quaternion.qy, - qz=box_quaternion.qz, - ) - bounding_box_se3 = BoundingBoxSE3(box_center, lidar_box.length, lidar_box.width, lidar_box.height) - box_detections.append( - BoxDetectionSE3( - metadata=BoxDetectionMetadata( - detection_type=NUPLAN_DETECTION_NAME_DICT[lidar_box.category.name], - timepoint=None, # NOTE: Timepoint is not needed during writing, set to None - track_token=lidar_box.token, - confidence=None, # NOTE: Not currently written, requires refactoring - ), - bounding_box_se3=bounding_box_se3, - velocity=Vector3D(x=lidar_box.vx, y=lidar_box.vy, z=lidar_box.vz), - ) - ) - - return BoxDetectionWrapper(box_detections=box_detections) - - -def _extract_nuplan_traffic_lights(log_db: NuPlanDB, lidar_pc_token: str) -> TrafficLightDetectionWrapper: - - traffic_lights_detections: List[TrafficLightDetection] = [ - TrafficLightDetection( - timepoint=None, # NOTE: Timepoint is not needed during writing, set to None - lane_id=int(traffic_light.lane_connector_id), - status=NUPLAN_TRAFFIC_STATUS_DICT[traffic_light.status], - ) - for traffic_light in log_db.traffic_light_status.select_many(lidar_pc_token=lidar_pc_token) - ] - - return TrafficLightDetectionWrapper(traffic_light_detections=traffic_lights_detections) - - -def _extract_nuplan_cameras( - log_db: NuPlanDB, - lidar_pc: LidarPc, - source_log_path: Path, - data_converter_config: DataConverterConfig, -) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: - - camera_dict: Dict[str, Union[str, bytes]] = {} - sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" - log_cam_infos = {camera.token: camera for camera in log_db.log.cameras} - - for camera_type, camera_channel in NUPLAN_CAMERA_TYPES.items(): - camera_data: Optional[Union[str, bytes]] = None - image_class = list(get_images_from_lidar_tokens(source_log_path, [lidar_pc.token], [str(camera_channel.value)])) - - if len(image_class) != 0: - image = image_class[0] - filename_jpg = sensor_root / image.filename_jpg - if filename_jpg.exists(): - - # Code taken from MTGS - # https://github.com/OpenDriveLab/MTGS/blob/main/nuplan_scripts/utils/nuplan_utils_custom.py#L117 - # TODO: Refactor - timestamp = image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us - img_ego_pose: EgoPose = ( - log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first() - ) - img_e2g = img_ego_pose.trans_matrix - g2e = lidar_pc.ego_pose.trans_matrix_inv - img_e2e = g2e @ img_e2g - cam_info = log_cam_infos[image.camera_token] - c2img_e = cam_info.trans_matrix - c2e = img_e2e @ c2img_e - - extrinsic = StateSE3.from_transformation_matrix(c2e) - - if data_converter_config.camera_store_option == "path": - camera_data = str(filename_jpg) - elif data_converter_config.camera_store_option == "binary": - with open(filename_jpg, "rb") as f: - camera_data = f.read() - - camera_dict[camera_type] = camera_data, extrinsic - - return camera_dict - - -def _extract_nuplan_lidars( - lidar_pc: LidarPc, data_converter_config: DataConverterConfig -) -> Dict[LiDARType, Optional[str]]: - - lidar: Optional[str] = None - lidar_full_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" / lidar_pc.filename - if lidar_full_path.exists(): - lidar = lidar_pc.filename - - return {LiDARType.LIDAR_MERGED: lidar} - - -def _extract_nuplan_scenario_tag(log_db: NuPlanDB, lidar_pc_token: str) -> List[str]: - scenario_tags = [ - scenario_tag.type for scenario_tag in log_db.scenario_tag.select_many(lidar_pc_token=lidar_pc_token) - ] - if len(scenario_tags) == 0: - scenario_tags = ["unknown"] - return scenario_tags - - -def _extract_nuplan_route_lane_group_ids(lidar_pc: LidarPc) -> List[int]: - return [int(roadblock_id) for roadblock_id in str(lidar_pc.scene.roadblock_ids).split(" ") if len(roadblock_id) > 0] diff --git a/d123/datatypes/scene/arrow/utils/arrow_getters.py b/d123/datatypes/scene/arrow/utils/arrow_getters.py index c70e9184..600c1444 100644 --- a/d123/datatypes/scene/arrow/utils/arrow_getters.py +++ b/d123/datatypes/scene/arrow/utils/arrow_getters.py @@ -145,11 +145,11 @@ def get_lidar_from_arrow_table( # NOTE: We move data specific import into if-else block, to avoid data specific import errors if log_metadata.dataset == "nuplan": - from d123.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path + from d123.conversion.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "carla": - from d123.datasets.carla.load_sensor import load_carla_lidar_from_path + from d123.conversion.carla.load_sensor import load_carla_lidar_from_path lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "wopd": diff --git a/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py b/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py index b39d987d..bcca116e 100644 --- a/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py +++ b/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py @@ -1,4 +1,5 @@ import json +from functools import lru_cache from pathlib import Path from typing import Union @@ -8,6 +9,7 @@ from d123.datatypes.scene.scene_metadata import LogMetadata +@lru_cache(maxsize=10000) def get_log_metadata_from_arrow(arrow_file_path: Union[Path, str]) -> LogMetadata: table = get_lru_cached_arrow_table(arrow_file_path) log_metadata = LogMetadata.from_dict(json.loads(table.schema.metadata[b"log_metadata"].decode())) diff --git a/d123/datatypes/scene/scene_metadata.py b/d123/datatypes/scene/scene_metadata.py index ae40aa4d..59389d13 100644 --- a/d123/datatypes/scene/scene_metadata.py +++ b/d123/datatypes/scene/scene_metadata.py @@ -29,7 +29,7 @@ class LogMetadata: @classmethod def from_dict(cls, data_dict: Dict) -> LogMetadata: - data_dict["vehicle_parameters"] = VehicleParameters(**data_dict["vehicle_parameters"]) + data_dict["vehicle_parameters"] = VehicleParameters.from_dict(data_dict["vehicle_parameters"]) data_dict["camera_metadata"] = { PinholeCameraType.deserialize(key): PinholeCameraMetadata.from_dict(value) for key, value in data_dict.get("camera_metadata", {}).items() diff --git a/d123/datatypes/sensors/lidar/lidar.py b/d123/datatypes/sensors/lidar/lidar.py index d32d73fd..e057c46c 100644 --- a/d123/datatypes/sensors/lidar/lidar.py +++ b/d123/datatypes/sensors/lidar/lidar.py @@ -7,7 +7,7 @@ import numpy.typing as npt from d123.common.utils.enums import SerialIntEnum -from d123.datasets.utils.sensor.lidar_index_registry import LIDAR_INDEX_REGISTRY, LiDARIndex +from d123.conversion.utils.sensor_utils.lidar_index_registry import LIDAR_INDEX_REGISTRY, LiDARIndex from d123.geometry import StateSE3 diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 0efc8898..3d328f4b 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -106,10 +106,7 @@ def shapely_point(self) -> geom.Point: class StateSE3(ArrayMixin): - """Class representing a quaternion in SE3 space. - - TODO: Implement and replace StateSE3. - """ + """Class representing a quaternion in SE3 space.""" _array: npt.NDArray[np.float64] diff --git a/d123/script/builders/data_converter_builder.py b/d123/script/builders/data_converter_builder.py deleted file mode 100644 index cb7cc83b..00000000 --- a/d123/script/builders/data_converter_builder.py +++ /dev/null @@ -1,22 +0,0 @@ -import logging -from typing import List - -from hydra.utils import instantiate -from omegaconf import DictConfig - -from d123.datasets.raw_data_converter import RawDataConverter -from d123.script.builders.utils.utils_type import validate_type - -logger = logging.getLogger(__name__) - - -def build_data_converter(cfg: DictConfig) -> List[RawDataConverter]: - logger.info("Building RawDataProcessor...") - instantiated_datasets: List[RawDataConverter] = [] - for dataset_type in cfg.values(): - processor: RawDataConverter = instantiate(dataset_type) - validate_type(processor, RawDataConverter) - instantiated_datasets.append(processor) - - logger.info("Building RawDataProcessor...DONE!") - return instantiated_datasets diff --git a/d123/script/builders/dataset_converter_builder.py b/d123/script/builders/dataset_converter_builder.py new file mode 100644 index 00000000..1c95cb61 --- /dev/null +++ b/d123/script/builders/dataset_converter_builder.py @@ -0,0 +1,22 @@ +import logging +from typing import List + +from hydra.utils import instantiate +from omegaconf import DictConfig + +from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter +from d123.script.builders.utils.utils_type import validate_type + +logger = logging.getLogger(__name__) + + +def build_dataset_converters(cfg: DictConfig) -> List[AbstractDatasetConverter]: + logger.info("Building AbstractDatasetConverter...") + instantiated_datasets: List[AbstractDatasetConverter] = [] + for dataset_type in cfg.values(): + processor: AbstractDatasetConverter = instantiate(dataset_type) + validate_type(processor, AbstractDatasetConverter) + instantiated_datasets.append(processor) + + logger.info("Building AbstractDatasetConverter...DONE!") + return instantiated_datasets diff --git a/d123/script/builders/log_writer_builder.py b/d123/script/builders/log_writer_builder.py new file mode 100644 index 00000000..d0c3a394 --- /dev/null +++ b/d123/script/builders/log_writer_builder.py @@ -0,0 +1,17 @@ +import logging + +from hydra.utils import instantiate +from omegaconf import DictConfig + +from d123.conversion.abstract_dataset_converter import AbstractLogWriter +from d123.script.builders.utils.utils_type import validate_type + +logger = logging.getLogger(__name__) + + +def build_log_writer(cfg: DictConfig) -> AbstractLogWriter: + logger.info("Building AbstractLogWriter...") + log_writer: AbstractLogWriter = instantiate(cfg) + validate_type(log_writer, AbstractLogWriter) + logger.info("Building AbstractLogWriter...DONE!") + return log_writer diff --git a/d123/script/config/common/default_dataset_paths.yaml b/d123/script/config/common/default_dataset_paths.yaml index 28c5151b..a18ecb77 100644 --- a/d123/script/config/common/default_dataset_paths.yaml +++ b/d123/script/config/common/default_dataset_paths.yaml @@ -1,11 +1,11 @@ -# D123 +# 123D Defaults d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} -d123_maps_root: ${oc.env:D123_MAPS_ROOT} d123_data_root: ${oc.env:D123_DATA_ROOT} +d123_maps_root: ${oc.env:D123_MAPS_ROOT} -# nuplan -nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +# nuPlan defaults nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} +nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +nuplan_sensor_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/sensor_blobs diff --git a/d123/script/config/conversion/__init__.py b/d123/script/config/conversion/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/script/config/conversion/datasets/__init__.py b/d123/script/config/conversion/datasets/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/script/config/datasets/av2_sensor_dataset.yaml b/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml similarity index 80% rename from d123/script/config/datasets/av2_sensor_dataset.yaml rename to d123/script/config/conversion/datasets/av2_sensor_dataset.yaml index 93460579..0f2d4d9f 100644 --- a/d123/script/config/datasets/av2_sensor_dataset.yaml +++ b/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml @@ -1,12 +1,12 @@ av2_sensor_dataset: - _target_: d123.datasets.av2.av2_data_converter.AV2SensorDataConverter + _target_: d123.conversion.datasets.av2.av2_sensor_converter.AV2SensorDataConverter _convert_: 'all' splits: ["av2-sensor-mini_train"] log_path: "/media/nvme1/argoverse" - data_converter_config: - _target_: d123.datasets.raw_data_converter.DataConverterConfig + dataset_converter_config: + _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/datasets/carla_dataset.yaml b/d123/script/config/conversion/datasets/carla_dataset.yaml similarity index 80% rename from d123/script/config/datasets/carla_dataset.yaml rename to d123/script/config/conversion/datasets/carla_dataset.yaml index 1f005578..a6540ba3 100644 --- a/d123/script/config/datasets/carla_dataset.yaml +++ b/d123/script/config/conversion/datasets/carla_dataset.yaml @@ -1,12 +1,12 @@ carla_dataset: - _target_: d123.datasets.carla.carla_data_converter.CarlaDataConverter + _target_: d123.conversion.datasets.carla.carla_data_converter.CarlaDataConverter _convert_: 'all' splits: ["carla"] log_path: "${oc.env:HOME}/carla_workspace/data" - data_converter_config: - _target_: d123.datasets.raw_data_converter.DataConverterConfig + dataset_converter_config: + _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/datasets/nuplan_dataset.yaml b/d123/script/config/conversion/datasets/nuplan_dataset.yaml similarity index 70% rename from d123/script/config/datasets/nuplan_dataset.yaml rename to d123/script/config/conversion/datasets/nuplan_dataset.yaml index 3907f69c..e744a1ad 100644 --- a/d123/script/config/datasets/nuplan_dataset.yaml +++ b/d123/script/config/conversion/datasets/nuplan_dataset.yaml @@ -1,12 +1,14 @@ nuplan_dataset: - _target_: d123.datasets.nuplan.nuplan_data_converter.NuplanDataConverter + _target_: d123.conversion.datasets.nuplan.nuplan_converter.NuPlanConverter _convert_: 'all' splits: ["nuplan_train", "nuplan_val", "nuplan_test"] - log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits # NOTE: folder including [mini, trainval, test], sometimes not inside "splits" folder + nuplan_data_root: ${nuplan_data_root} + nuplan_map_root: ${nuplan_maps_root} + nuplan_sensor_root: ${nuplan_sensor_root} - data_converter_config: - _target_: d123.datasets.raw_data_converter.DataConverterConfig + dataset_converter_config: + _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/datasets/nuplan_mini_dataset.yaml b/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml similarity index 71% rename from d123/script/config/datasets/nuplan_mini_dataset.yaml rename to d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml index 7b509e55..92b86322 100644 --- a/d123/script/config/datasets/nuplan_mini_dataset.yaml +++ b/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml @@ -1,13 +1,14 @@ nuplan_mini_dataset: - _target_: d123.datasets.nuplan.nuplan_data_converter.NuplanDataConverter + _target_: d123.conversion.datasets.nuplan.nuplan_converter.NuPlanConverter _convert_: 'all' - splits: ["nuplan_mini_train", "nuplan_mini_val", "nuplan_mini_test"] - log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits # NOTE: folder including [mini, trainval, test], sometimes not inside "splits" folder + nuplan_data_root: ${nuplan_data_root} + nuplan_map_root: ${nuplan_maps_root} + nuplan_sensor_root: ${nuplan_sensor_root} - data_converter_config: - _target_: d123.datasets.raw_data_converter.DataConverterConfig + dataset_converter_config: + _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/datasets/nuplan_private_dataset.yaml b/d123/script/config/conversion/datasets/nuplan_private_dataset.yaml similarity index 70% rename from d123/script/config/datasets/nuplan_private_dataset.yaml rename to d123/script/config/conversion/datasets/nuplan_private_dataset.yaml index 3f44e33a..bd5caaec 100644 --- a/d123/script/config/datasets/nuplan_private_dataset.yaml +++ b/d123/script/config/conversion/datasets/nuplan_private_dataset.yaml @@ -1,12 +1,14 @@ nuplan_private_dataset: - _target_: d123.datasets.nuplan.nuplan_data_converter.NuplanDataConverter + _target_: d123.conversion.datasets.nuplan.nuplan_converter.NuPlanConverter _convert_: 'all' splits: ["nuplan_private_test"] - log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits # NOTE: folder including [mini, trainval, test], sometimes not inside "splits" folder + nuplan_data_root: ${nuplan_data_root} + nuplan_map_root: ${nuplan_maps_root} + nuplan_sensor_root: ${nuplan_sensor_root} - data_converter_config: - _target_: d123.datasets.raw_data_converter.DataConverterConfig + dataset_converter_config: + _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/datasets/wopd_dataset.yaml b/d123/script/config/conversion/datasets/wopd_dataset.yaml similarity index 81% rename from d123/script/config/datasets/wopd_dataset.yaml rename to d123/script/config/conversion/datasets/wopd_dataset.yaml index 2c1be19c..a5069b8b 100644 --- a/d123/script/config/datasets/wopd_dataset.yaml +++ b/d123/script/config/conversion/datasets/wopd_dataset.yaml @@ -1,12 +1,12 @@ wopd_dataset: - _target_: d123.datasets.wopd.wopd_data_converter.WOPDDataConverter + _target_: d123.conversion.datasets.wopd.wopd_converter.WOPDConverter _convert_: 'all' splits: ["wopd_train"] log_path: null # TODO: implement - data_converter_config: - _target_: d123.datasets.raw_data_converter.DataConverterConfig + dataset_converter_config: + _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' output_path: ${d123_data_root} diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/conversion/default_conversion.yaml similarity index 75% rename from d123/script/config/dataset_conversion/default_dataset_conversion.yaml rename to d123/script/config/conversion/default_conversion.yaml index 1d18b82a..a53e4939 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/conversion/default_conversion.yaml @@ -11,12 +11,14 @@ hydra: defaults: - default_common - default_dataset_paths + - log_writer: arrow_ipc_log_writer - datasets: + - nuplan_mini_dataset # - nuplan_private_dataset # - carla_dataset - - wopd_dataset + # - wopd_dataset # - av2_sensor_dataset - _self_ -force_map_conversion: False +force_map_conversion: True force_log_conversion: True diff --git a/d123/script/config/conversion/log_writer/__init__.py b/d123/script/config/conversion/log_writer/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/script/config/conversion/log_writer/arrow_ipc_log_writer.yaml b/d123/script/config/conversion/log_writer/arrow_ipc_log_writer.yaml new file mode 100644 index 00000000..d74a121a --- /dev/null +++ b/d123/script/config/conversion/log_writer/arrow_ipc_log_writer.yaml @@ -0,0 +1,5 @@ +_target_: d123.conversion.log_writer.arrow_log_writer.ArrowLogWriter +_convert_: 'all' + +compression: null # Compression method for ipc files. Options: None, 'lz4', 'zstd' +compression_level: null # Compression level for ipc files. Options: None, or depending on compression method diff --git a/d123/script/run_conversion.py b/d123/script/run_conversion.py new file mode 100644 index 00000000..dc33653a --- /dev/null +++ b/d123/script/run_conversion.py @@ -0,0 +1,82 @@ +import gc +import logging +from functools import partial +from typing import Dict, List + +import hydra +from omegaconf import DictConfig + +from d123 import ascii_banner +from d123.common.multithreading.worker_utils import worker_map +from d123.script.builders.dataset_converter_builder import AbstractDatasetConverter, build_dataset_converters +from d123.script.builders.log_writer_builder import build_log_writer +from d123.script.builders.worker_pool_builder import build_worker + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +CONFIG_PATH = "config/conversion" +CONFIG_NAME = "default_conversion" + + +@hydra.main(config_path=CONFIG_PATH, config_name=CONFIG_NAME, version_base=None) +def main(cfg: DictConfig) -> None: + """ + Main entrypoint for metric caching. + :param cfg: omegaconf dictionary + """ + logger.info(ascii_banner) + + # Build worker + + # Precompute and cache all features + logger.info("Starting Dataset Caching...") + dataset_converters: List[AbstractDatasetConverter] = build_dataset_converters(cfg.datasets) + + for dataset_converter in dataset_converters: + + worker = build_worker(cfg) + + logger.info(f"Processing dataset: {dataset_converter.__class__.__name__}") + + # map_args = [{"map_index": i} for i in range(dataset_converter.get_number_of_maps())] + # worker_map(worker, partial(_convert_maps, cfg=cfg, dataset_converter=dataset_converter), map_args) + # logger.info(f"Finished maps: {dataset_converter.__class__.__name__}") + + # worker.shutdown() + + # del worker + + # worker = build_worker(cfg) + + log_args = [{"log_index": i} for i in range(dataset_converter.get_number_of_logs())] + worker_map(worker, partial(_convert_logs, cfg=cfg, dataset_converter=dataset_converter), log_args) + + logger.info(f"Finished logs: {dataset_converter.__class__.__name__}") + logger.info(f"Finished processing dataset: {dataset_converter.__class__.__name__}") + + +def _convert_maps(args: List[Dict[str, int]], cfg: DictConfig, dataset_converter: AbstractDatasetConverter) -> List: + for arg in args: + dataset_converter.convert_map(arg["map_index"]) + return [] + + +def _convert_logs(args: List[Dict[str, int]], cfg: DictConfig, dataset_converter: AbstractDatasetConverter) -> None: + + def _internal_convert_log(args: Dict[str, int], dataset_converter_: AbstractDatasetConverter) -> int: + # for i2 in tqdm(range(300), leave=False) + log_writer = build_log_writer(cfg.log_writer) + for arg in args: + dataset_converter_.convert_log(arg["log_index"], log_writer) + del log_writer + gc.collect() + + # for arg in : + _internal_convert_log(args, dataset_converter) + gc.collect() + return [] + + +if __name__ == "__main__": + main() diff --git a/d123/script/run_dataset_conversion.py b/d123/script/run_dataset_conversion.py deleted file mode 100644 index 042614f4..00000000 --- a/d123/script/run_dataset_conversion.py +++ /dev/null @@ -1,45 +0,0 @@ -import logging -from typing import List - -import hydra -from omegaconf import DictConfig - -from d123 import ascii_banner -from d123.script.builders.data_converter_builder import RawDataConverter, build_data_converter -from d123.script.builders.worker_pool_builder import build_worker - -logger = logging.getLogger(__name__) - -CONFIG_PATH = "config/dataset_conversion" -CONFIG_NAME = "default_dataset_conversion" - - -@hydra.main(config_path=CONFIG_PATH, config_name=CONFIG_NAME, version_base=None) -def main(cfg: DictConfig) -> None: - """ - Main entrypoint for metric caching. - :param cfg: omegaconf dictionary - """ - logger.info(ascii_banner) - - # Build worker - worker = build_worker(cfg) - - # Precompute and cache all features - logger.info("Starting Dataset Caching...") - data_processors: List[RawDataConverter] = build_data_converter(cfg.datasets) - for data_processor in data_processors: - - logger.info(f"Processing dataset: {data_processor.__class__.__name__}") - - data_processor.convert_maps(worker=worker) - logger.info(f"Finished maps: {data_processor.__class__.__name__}") - - data_processor.convert_logs(worker=worker) - logger.info(f"Finished logs: {data_processor.__class__.__name__}") - - logger.info(f"Finished processing dataset: {data_processor.__class__.__name__}") - - -if __name__ == "__main__": - main() diff --git a/d123/script/run_preprocessing.py b/d123/script/run_preprocessing.py deleted file mode 100644 index 9e77c514..00000000 --- a/d123/script/run_preprocessing.py +++ /dev/null @@ -1,58 +0,0 @@ -import logging -import pickle -from functools import partial -from pathlib import Path -from typing import List - -import hydra -import lightning as L -from omegaconf import DictConfig - -from d123.common.multithreading.worker_utils import worker_map -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.script.builders.scene_builder_builder import build_scene_builder -from d123.script.builders.scene_filter_builder import build_scene_filter -from d123.script.run_dataset_conversion import build_worker -from d123.training.feature_builder.smart_feature_builder import SMARTFeatureBuilder - -logger = logging.getLogger(__name__) - -CONFIG_PATH = "config/preprocessing" -CONFIG_NAME = "default_preprocessing" - - -@hydra.main(config_path=CONFIG_PATH, config_name=CONFIG_NAME, version_base=None) -def main(cfg: DictConfig) -> None: - - L.seed_everything(cfg.seed, workers=True) - - worker = build_worker(cfg) - scene_filter = build_scene_filter(cfg.scene_filter) - scene_builder = build_scene_builder(cfg.scene_builder) - - scenes = scene_builder.get_scenes(scene_filter, worker=worker) - logger.info(f"Found {len(scenes)} scenes.") - - cache_path = Path(cfg.cache_path) - cache_path.mkdir(parents=True, exist_ok=True) - - feature_builder = SMARTFeatureBuilder() - - worker_map(worker, partial(_apply_feature_builder, feature_builder=feature_builder, cache_path=cache_path), scenes) - - -def _apply_feature_builder(scenes: List[AbstractScene], feature_builder: SMARTFeatureBuilder, cache_path: Path): - - for scene in scenes: - scene.open() - feature_dict = feature_builder.build_features(scene=scene) - output_file = cache_path / f"{feature_dict['scenario_id']}.pkl" - with open(output_file, "wb") as f: - pickle.dump(feature_dict, f) - scene.close() - - return [] - - -if __name__ == "__main__": - main() diff --git a/d123/script/run_simulation.py b/d123/script/run_simulation.py deleted file mode 100644 index 76ed5e50..00000000 --- a/d123/script/run_simulation.py +++ /dev/null @@ -1,83 +0,0 @@ -import logging -import traceback -from pathlib import Path -from typing import Dict, List - -import hydra -import lightning as L -import pandas as pd -from omegaconf import DictConfig -from tqdm import tqdm - -from d123.common.multithreading.worker_utils import worker_map -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.script.builders.scene_builder_builder import build_scene_builder -from d123.script.builders.scene_filter_builder import build_scene_filter -from d123.script.run_dataset_conversion import build_worker -from d123.simulation.gym.demo_gym_env import DemoGymEnv -from d123.simulation.metrics.sim_agents.sim_agents import get_sim_agents_metrics - -logger = logging.getLogger(__name__) - -CONFIG_PATH = "config/preprocessing" -CONFIG_NAME = "default_preprocessing" - - -@hydra.main(config_path=CONFIG_PATH, config_name=CONFIG_NAME, version_base=None) -def main(cfg: DictConfig) -> None: - - L.seed_everything(cfg.seed, workers=True) - - worker = build_worker(cfg) - scene_filter = build_scene_filter(cfg.scene_filter) - scene_builder = build_scene_builder(cfg.scene_builder) - - scenes = scene_builder.get_scenes(scene_filter, worker=worker) - logger.info(f"Found {len(scenes)} scenes.") - - results = worker_map(worker, _run_simulation, scenes) - - df = pd.DataFrame(results) - avg_row = df.drop(columns=["token"]).mean(numeric_only=True) - avg_row["token"] = "average" - df = pd.concat([df, pd.DataFrame([avg_row])], ignore_index=True) - - output_dir = Path(cfg.output_dir) - df.to_csv(output_dir / "sim_agent_results.csv") - - -def _run_simulation(scenes: List[AbstractScene]) -> List[Dict[str, float]]: - - action = [1.0, 0.1] # Placeholder action, replace with actual action logic - env = DemoGymEnv(scenes) - - results = [] - - for scene in tqdm(scenes): - try: - - agent_rollouts = [] - - map_api, ego_state, detection_observation, current_scene = env.reset(scene) - agent_rollouts.append(detection_observation.box_detections) - - result = {} - result["token"] = scene.token - for i in range(150): - ego_state, detection_observation, end = env.step(action) - agent_rollouts.append(detection_observation.box_detections) - if end: - break - result.update(get_sim_agents_metrics(current_scene, agent_rollouts)) - results.append(result) - except Exception: - print(current_scene.token) - traceback.print_exc() - continue - - scene.close() - return results - - -if __name__ == "__main__": - main() diff --git a/d123/script/run_training.py b/d123/script/run_training.py deleted file mode 100644 index c42a4b44..00000000 --- a/d123/script/run_training.py +++ /dev/null @@ -1,80 +0,0 @@ -import logging -from typing import List - -import hydra -import lightning as L -from lightning import Callback, LightningDataModule, LightningModule, Trainer -from omegaconf import DictConfig - -logger = logging.getLogger(__name__) - -CONFIG_PATH = "config/lightning_training" -CONFIG_NAME = "default_lightning_training" - - -def instantiate_callbacks(callbacks_cfg: DictConfig) -> List[Callback]: - """Instantiates callbacks from config. - - :param callbacks_cfg: A DictConfig object containing callback configurations. - :return: A list of instantiated callbacks. - """ - callbacks: List[Callback] = [] - - if not callbacks_cfg: - logger.warning("No callback configs found! Skipping..") - return callbacks - - if not isinstance(callbacks_cfg, DictConfig): - raise TypeError("Callbacks config must be a DictConfig!") - - for _, cb_conf in callbacks_cfg.items(): - if isinstance(cb_conf, DictConfig) and "_target_" in cb_conf: - logger.info(f"Instantiating callback <{cb_conf._target_}>") - callbacks.append(hydra.utils.instantiate(cb_conf)) - - return callbacks - - -@hydra.main(config_path=CONFIG_PATH, config_name=CONFIG_NAME, version_base=None) -def main(cfg: DictConfig) -> None: - L.seed_everything(cfg.seed, workers=True) - - logger.info(f"Instantiating datamodule <{cfg.data._target_}>") - datamodule: LightningDataModule = hydra.utils.instantiate(cfg.data) - - logger.info(f"Instantiating model <{cfg.model._target_}>") - model: LightningModule = hydra.utils.instantiate(cfg.model) - - logger.info("Instantiating callbacks...") - callbacks: List[Callback] = instantiate_callbacks(cfg.callbacks) - - # logger.info(f"Instantiating loggers...") - # logger: List[Logger] = instantiate_loggers(cfg.get("logger")) - # # setup model watching - # for _logger in logger: - # if isinstance(_logger, WandbLogger): - # _logger.watch(model, log="all") - - logger.info(f"Instantiating trainer <{cfg.trainer._target_}>") - trainer: Trainer = hydra.utils.instantiate(cfg.trainer, callbacks=callbacks) - - logger.info(f"Resuming from ckpt: cfg.ckpt_path={cfg.ckpt_path}") - if cfg.action == "fit": - logger.info("Starting training!") - trainer.fit(model=model, datamodule=datamodule, ckpt_path=cfg.get("ckpt_path")) - # elif cfg.action == "finetune": - # logger.info("Starting finetuning!") - # model.load_state_dict(torch.load(cfg.ckpt_path)["state_dict"], strict=False) - # trainer.fit(model=model, datamodule=datamodule) - # elif cfg.action == "validate": - # logger.info("Starting validating!") - # trainer.validate( - # model=model, datamodule=datamodule, ckpt_path=cfg.get("ckpt_path") - # ) - # elif cfg.action == "test": - # logger.info("Starting testing!") - # trainer.test(model=model, datamodule=datamodule, ckpt_path=cfg.get("ckpt_path")) - - -if __name__ == "__main__": - main() diff --git a/d123/script/run_viser.py b/d123/script/run_viser.py index 87c3218a..04c900c7 100644 --- a/d123/script/run_viser.py +++ b/d123/script/run_viser.py @@ -6,7 +6,7 @@ from d123.common.visualization.viser.viser_viewer import ViserViewer from d123.script.builders.scene_builder_builder import build_scene_builder from d123.script.builders.scene_filter_builder import build_scene_filter -from d123.script.run_dataset_conversion import build_worker +from d123.script.run_conversion import build_worker logger = logging.getLogger(__name__) diff --git a/d123/simulation/agents/abstract_agents.py b/d123/simulation/agents/abstract_agents.py index 14c0f3b3..390bbab2 100644 --- a/d123/simulation/agents/abstract_agents.py +++ b/d123/simulation/agents/abstract_agents.py @@ -2,8 +2,8 @@ from typing import List, Optional from d123.common.datatypes.detection.detection import BoxDetection -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.maps.abstract_map import AbstractMap +from d123.conversion.scene.abstract_scene import AbstractScene class AbstractAgents: diff --git a/d123/simulation/agents/constant_velocity_agents.py b/d123/simulation/agents/constant_velocity_agents.py index 57c48d01..13b4beff 100644 --- a/d123/simulation/agents/constant_velocity_agents.py +++ b/d123/simulation/agents/constant_velocity_agents.py @@ -3,8 +3,8 @@ from typing import List, Optional from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.maps.abstract_map import AbstractMap +from d123.conversion.scene.abstract_scene import AbstractScene from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.point import Point2D from d123.geometry.transform.tranform_2d import translate_along_yaw diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py index 5227c10d..95d8263a 100644 --- a/d123/simulation/agents/idm_agents.py +++ b/d123/simulation/agents/idm_agents.py @@ -7,8 +7,8 @@ from shapely.geometry import CAP_STYLE, Polygon from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.maps.abstract_map import AbstractMap +from d123.conversion.scene.abstract_scene import AbstractScene from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.point import Point2D diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py index 661d4178..e6357a61 100644 --- a/d123/simulation/agents/path_following.py +++ b/d123/simulation/agents/path_following.py @@ -3,8 +3,8 @@ from typing import Dict, List, Optional from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.maps.abstract_map import AbstractMap +from d123.conversion.scene.abstract_scene import AbstractScene from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.point import Point2D from d123.geometry.polyline import PolylineSE2 diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py index 778b144a..d160c2e7 100644 --- a/d123/simulation/agents/smart_agents.py +++ b/d123/simulation/agents/smart_agents.py @@ -6,8 +6,8 @@ from torch_geometric.data import HeteroData from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.maps.abstract_map import AbstractMap +from d123.conversion.scene.abstract_scene import AbstractScene from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper, DetectionType from d123.geometry.bounding_box import BoundingBoxSE2 from d123.geometry.se import StateSE2 diff --git a/d123/simulation/controller/abstract_controller.py b/d123/simulation/controller/abstract_controller.py index eaa5aed5..db0315e6 100644 --- a/d123/simulation/controller/abstract_controller.py +++ b/d123/simulation/controller/abstract_controller.py @@ -1,7 +1,7 @@ import abc from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/controller/action_controller.py b/d123/simulation/controller/action_controller.py index 5e924c78..a598358c 100644 --- a/d123/simulation/controller/action_controller.py +++ b/d123/simulation/controller/action_controller.py @@ -1,7 +1,7 @@ from typing import Optional from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.simulation.controller.abstract_controller import AbstractEgoController from d123.simulation.controller.motion_model.abstract_motion_model import AbstractMotionModel from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput diff --git a/d123/simulation/gym/demo_gym_env.py b/d123/simulation/gym/demo_gym_env.py index 030c0f69..6497b825 100644 --- a/d123/simulation/gym/demo_gym_env.py +++ b/d123/simulation/gym/demo_gym_env.py @@ -9,8 +9,8 @@ from nuplan.planning.simulation.controller.motion_model.kinematic_bicycle import KinematicBicycleModel from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.maps.abstract_map import AbstractMap +from d123.conversion.scene.abstract_scene import AbstractScene from d123.datatypes.scene.arrow.utils.arrow_getters import EgoStateSE3 from d123.simulation.observation.abstract_observation import AbstractObservation from d123.simulation.observation.agents_observation import AgentsObservation diff --git a/d123/simulation/gym/environment/helper/environment_cache.py b/d123/simulation/gym/environment/helper/environment_cache.py index 00672828..29f2d4c0 100644 --- a/d123/simulation/gym/environment/helper/environment_cache.py +++ b/d123/simulation/gym/environment/helper/environment_cache.py @@ -14,8 +14,8 @@ from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.maps.abstract_map_objects import ( +from d123.conversion.maps.abstract_map import AbstractMap +from d123.conversion.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, AbstractIntersection, @@ -23,7 +23,7 @@ AbstractLaneGroup, AbstractStopLine, ) -from d123.datasets.maps.map_datatypes import MapLayer +from d123.conversion.maps.map_datatypes import MapLayer from d123.geometry.occupancy_map import OccupancyMap2D from d123.geometry.se import StateSE2 from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea diff --git a/d123/simulation/gym/gym_env.py b/d123/simulation/gym/gym_env.py index cf9621c4..d539df8e 100644 --- a/d123/simulation/gym/gym_env.py +++ b/d123/simulation/gym/gym_env.py @@ -5,8 +5,8 @@ from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2 -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.maps.abstract_map import AbstractMap +from d123.conversion.scene.abstract_scene import AbstractScene from d123.geometry.vector import Vector2D from d123.simulation.controller.motion_model.kinematic_bicycle_model import KinematicBicycleModel from d123.simulation.observation.abstract_observation import AbstractObservation diff --git a/d123/simulation/history/simulation_history.py b/d123/simulation/history/simulation_history.py index b5488780..22f779b7 100644 --- a/d123/simulation/history/simulation_history.py +++ b/d123/simulation/history/simulation_history.py @@ -5,7 +5,7 @@ from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/history/simulation_history_buffer.py b/d123/simulation/history/simulation_history_buffer.py index 21bfdf7d..f98f3844 100644 --- a/d123/simulation/history/simulation_history_buffer.py +++ b/d123/simulation/history/simulation_history_buffer.py @@ -6,7 +6,7 @@ from d123.common.datatypes.recording.abstract_recording import Recording from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene class Simulation2DHistoryBuffer: diff --git a/d123/simulation/metrics/sim_agents/map_based.py b/d123/simulation/metrics/sim_agents/map_based.py index d204eeb3..8c5c0673 100644 --- a/d123/simulation/metrics/sim_agents/map_based.py +++ b/d123/simulation/metrics/sim_agents/map_based.py @@ -4,9 +4,9 @@ import numpy.typing as npt import shapely -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.maps.abstract_map_objects import AbstractLane -from d123.datasets.maps.map_datatypes import MapLayer +from d123.conversion.maps.abstract_map import AbstractMap +from d123.conversion.maps.abstract_map_objects import AbstractLane +from d123.conversion.maps.map_datatypes import MapLayer from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, StateSE2Index from d123.geometry.se import StateSE2 from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array diff --git a/d123/simulation/metrics/sim_agents/sim_agents.py b/d123/simulation/metrics/sim_agents/sim_agents.py index 5c9f3f63..a06b7435 100644 --- a/d123/simulation/metrics/sim_agents/sim_agents.py +++ b/d123/simulation/metrics/sim_agents/sim_agents.py @@ -5,8 +5,8 @@ import numpy.typing as npt from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper, DetectionType -from d123.datasets.maps.abstract_map import AbstractMap -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.maps.abstract_map import AbstractMap +from d123.conversion.scene.abstract_scene import AbstractScene from d123.geometry.geometry_index import BoundingBoxSE2Index from d123.simulation.metrics.sim_agents.histogram_metric import ( BinaryHistogramIntersectionMetric, diff --git a/d123/simulation/metrics/sim_agents/utils.py b/d123/simulation/metrics/sim_agents/utils.py index a08af428..2d1c6ded 100644 --- a/d123/simulation/metrics/sim_agents/utils.py +++ b/d123/simulation/metrics/sim_agents/utils.py @@ -4,7 +4,7 @@ import numpy.typing as npt from d123.common.datatypes.detection.detection import BoxDetectionWrapper -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.geometry.geometry_index import BoundingBoxSE2Index diff --git a/d123/simulation/observation/abstract_observation.py b/d123/simulation/observation/abstract_observation.py index ae8f0293..3d2ad387 100644 --- a/d123/simulation/observation/abstract_observation.py +++ b/d123/simulation/observation/abstract_observation.py @@ -5,7 +5,7 @@ from d123.common.datatypes.recording.abstract_recording import Recording from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/observation/agents_observation.py b/d123/simulation/observation/agents_observation.py index d42d2aeb..c63fe222 100644 --- a/d123/simulation/observation/agents_observation.py +++ b/d123/simulation/observation/agents_observation.py @@ -5,7 +5,7 @@ from d123.common.datatypes.recording.abstract_recording import Recording from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper from d123.simulation.agents.abstract_agents import AbstractAgents diff --git a/d123/simulation/observation/log_replay_observation.py b/d123/simulation/observation/log_replay_observation.py index 0a986ca3..b9f41fae 100644 --- a/d123/simulation/observation/log_replay_observation.py +++ b/d123/simulation/observation/log_replay_observation.py @@ -3,7 +3,7 @@ from d123.common.datatypes.recording.abstract_recording import Recording from d123.common.datatypes.recording.detection_recording import DetectionRecording from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.simulation.observation.abstract_observation import AbstractObservation from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/planning/abstract_planner.py b/d123/simulation/planning/abstract_planner.py index 3a0f486d..c7a56569 100644 --- a/d123/simulation/planning/abstract_planner.py +++ b/d123/simulation/planning/abstract_planner.py @@ -4,7 +4,7 @@ from dataclasses import dataclass from typing import List -from d123.datasets.maps.abstract_map import AbstractMap +from d123.conversion.maps.abstract_map import AbstractMap from d123.simulation.history.simulation_history_buffer import Simulation2DHistoryBuffer from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/simulation_2d.py b/d123/simulation/simulation_2d.py index ab0a81c8..2c15dcd0 100644 --- a/d123/simulation/simulation_2d.py +++ b/d123/simulation/simulation_2d.py @@ -3,7 +3,7 @@ import logging from typing import Any, Optional, Tuple, Type -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.simulation.callback.abstract_callback import AbstractCallback from d123.simulation.callback.multi_callback import MultiCallback from d123.simulation.history.simulation_history import Simulation2DHistory, Simulation2DHistorySample diff --git a/d123/simulation/time_controller/abstract_time_controller.py b/d123/simulation/time_controller/abstract_time_controller.py index 1da25641..135da686 100644 --- a/d123/simulation/time_controller/abstract_time_controller.py +++ b/d123/simulation/time_controller/abstract_time_controller.py @@ -3,7 +3,7 @@ import abc from typing import Tuple -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.simulation.time_controller.simulation_iteration import SimulationIteration diff --git a/d123/simulation/time_controller/log_time_controller.py b/d123/simulation/time_controller/log_time_controller.py index cb853418..3be18673 100644 --- a/d123/simulation/time_controller/log_time_controller.py +++ b/d123/simulation/time_controller/log_time_controller.py @@ -1,6 +1,6 @@ from typing import Optional, Tuple -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.simulation.time_controller.abstract_time_controller import ( AbstractTimeController, ) diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py index f4e7e10b..0dc94d03 100644 --- a/d123/training/feature_builder/smart_feature_builder.py +++ b/d123/training/feature_builder/smart_feature_builder.py @@ -8,14 +8,14 @@ from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper from d123.common.datatypes.detection.detection_types import DetectionType from d123.common.visualization.color.default import TrafficLightStatus -from d123.datasets.maps.abstract_map import MapLayer -from d123.datasets.maps.abstract_map_objects import ( +from d123.conversion.maps.abstract_map import MapLayer +from d123.conversion.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, AbstractGenericDrivable, AbstractLaneGroup, ) -from d123.datasets.scene.abstract_scene import AbstractScene +from d123.conversion.scene.abstract_scene import AbstractScene from d123.geometry import BoundingBoxSE2, PolylineSE2, StateSE2 from d123.geometry.geometry_index import StateSE2Index from d123.geometry.transform.transform_se2 import convert_absolute_to_relative_se2_array diff --git a/notebooks/av2/delete_me.ipynb b/notebooks/av2/delete_me.ipynb index 8d224181..c3f0188f 100644 --- a/notebooks/av2/delete_me.ipynb +++ b/notebooks/av2/delete_me.ipynb @@ -126,8 +126,8 @@ "# # # 4. sensors\n", "# # print(_ls(log_folder))\n", "\n", - "# # from d123.datasets.av2.av2_data_converter import AV2SensorDataConverter\n", - "# from d123.datasets.av2.av2_data_converter import AV2SensorDataConverter\n", + "# # from d123.conversion.av2.av2_data_converter import AV2SensorDataConverter\n", + "# from d123.conversion.av2.av2_data_converter import AV2SensorDataConverter\n", "\n", "# # AV2SensorDataConverter([])" ] @@ -197,8 +197,8 @@ "# Testing sensor syn dataframes\n", "\n", "from typing import Optional\n", - "from d123.datasets.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING\n", - "from d123.datasets.av2.av2_helper import build_sensor_dataframe, build_synchronization_dataframe\n", + "from d123.conversion.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING\n", + "from d123.conversion.av2.av2_helper import build_sensor_dataframe, build_synchronization_dataframe\n", "\n", "\n", "sensor_df = build_sensor_dataframe(log_folder)\n", diff --git a/notebooks/av2/delete_me_map.ipynb b/notebooks/av2/delete_me_map.ipynb index 2e8dba42..ce3ca200 100644 --- a/notebooks/av2/delete_me_map.ipynb +++ b/notebooks/av2/delete_me_map.ipynb @@ -59,7 +59,7 @@ "from typing import Dict, List\n", "\n", "from d123.geometry.line.polylines import Polyline3D\n", - "from d123.datasets.av2.av2_map_conversion import _extract_lane_group_dict\n", + "from d123.conversion.av2.av2_map_conversion import _extract_lane_group_dict\n", "\n", "\n", "def _extract_polyline(data: List[Dict[str, float]], close: bool = False) -> Polyline3D:\n", diff --git a/notebooks/deprecated/extraction_testing.ipynb b/notebooks/deprecated/extraction_testing.ipynb index b4c2d5bb..6b08c67f 100644 --- a/notebooks/deprecated/extraction_testing.ipynb +++ b/notebooks/deprecated/extraction_testing.ipynb @@ -17,7 +17,7 @@ "from d123.common.multithreading.worker_pool import WorkerPool\n", "\n", "from d123.dataset.arrow.helper import open_arrow_arrow_table\n", - "from d123.datasets.nuplan.nuplan_data_processor import worker_map\n", + "from d123.conversion.nuplan.nuplan_data_processor import worker_map\n", "from d123.dataset.logs.log_metadata import LogMetadata\n", "from d123.dataset.scene.abstract_scene import AbstractScene\n", "from d123.dataset.scene.arrow_scene import ArrowScene, SceneExtractionInfo\n", diff --git a/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb b/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb index 8c9339ad..780092ce 100644 --- a/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb +++ b/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb @@ -7,7 +7,7 @@ "outputs": [], "source": [ "from pathlib import Path\n", - "from d123.datasets.nuplan.nuplan_map_conversion import NuPlanMapConverter, MAP_LOCATIONS\n", + "from d123.conversion.nuplan.nuplan_map_conversion import NuPlanMapConverter, MAP_LOCATIONS\n", "\n", "\n", "\n", diff --git a/notebooks/deprecated/test_waypoints.ipynb b/notebooks/deprecated/test_waypoints.ipynb index cdafea69..08e21ba0 100644 --- a/notebooks/deprecated/test_waypoints.ipynb +++ b/notebooks/deprecated/test_waypoints.ipynb @@ -60,7 +60,7 @@ "from shapely.geometry import LineString\n", "import numpy as np\n", "from matplotlib import pyplot as plt\n", - "from d123.datasets.carla.carla_data_processor import _load_json_gz \n", + "from d123.conversion.carla.carla_data_processor import _load_json_gz \n", "from d123.common.visualization.matplotlib.plots import _plot_scene_on_ax\n", "json_dict = _load_json_gz(\"/home/daniel/carla_workspace/data/_Rep0_longest1_route0_06_13_17_21_21/boxes/0000000002.json.gz\")\n", "json_dict\n", diff --git a/notebooks/nuplan/nuplan_sensor_loading.ipynb b/notebooks/nuplan/nuplan_sensor_loading.ipynb index 939097bf..c1e419a9 100644 --- a/notebooks/nuplan/nuplan_sensor_loading.ipynb +++ b/notebooks/nuplan/nuplan_sensor_loading.ipynb @@ -35,7 +35,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.datasets.nuplan.nuplan_data_converter import NuplanDataConverter" + "from d123.conversion.nuplan.nuplan_converter import NuplanDataConverter" ] }, { @@ -266,7 +266,7 @@ "outputs": [], "source": [ "from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB\n", - "\n", + "import os\n", "NUPLAN_DATA_ROOT = Path(os.environ[\"NUPLAN_DATA_ROOT\"])\n", "NUPLAN_SENSOR_ROOT = Path(\"/media/nvme1/nuplan/dataset/nuplan-v1.1/sensor_blobs\")\n", "\n", diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb index ed242b08..6bb42684 100644 --- a/notebooks/waymo_perception/map_testing.ipynb +++ b/notebooks/waymo_perception/map_testing.ipynb @@ -241,7 +241,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.datasets.wopd.wopd_map_utils import extract_lane_boundaries\n", + "from d123.conversion.wopd.wopd_map_utils import extract_lane_boundaries\n", "\n", "\n", "left_boundaries, right_boundaries = extract_lane_boundaries(\n", diff --git a/pyproject.toml b/pyproject.toml index 4e2da9e5..2e5e5149 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,6 +51,7 @@ dependencies = [ [project.scripts] d123-viser = "d123.script.run_viser:main" +d123-conversion = "d123.script.run_conversion:main" [project.optional-dependencies] diff --git a/scripts/dataset/run_log_caching.sh b/scripts/dataset/run_log_caching.sh index 923aaeca..834d98fc 100644 --- a/scripts/dataset/run_log_caching.sh +++ b/scripts/dataset/run_log_caching.sh @@ -1,5 +1,4 @@ -python $D123_DEVKIT_ROOT/d123/script/run_dataset_conversion.py \ -worker.threads_per_node=32 +python $D123_DEVKIT_ROOT/d123/script/run_conversion.py diff --git a/test_viser.py b/test_viser.py index 8b6f7019..abdeac38 100644 --- a/test_viser.py +++ b/test_viser.py @@ -4,14 +4,14 @@ from d123.common.visualization.viser.viser_viewer import ViserViewer from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder from d123.datatypes.scene.scene_filter import SceneFilter -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType if __name__ == "__main__": + splits = ["nuplan_mini_test", "nuplan_mini_train", "nuplan_mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla"] # splits = ["wopd_train"] - splits = ["av2-sensor-mini_train"] + # splits = ["av2-sensor-mini_train"] log_names = None scene_tokens = None @@ -19,11 +19,12 @@ split_names=splits, log_names=log_names, scene_tokens=scene_tokens, - duration_s=None, - history_s=None, - timestamp_threshold_s=None, + duration_s=10.0, + history_s=0.0, + timestamp_threshold_s=10.0, shuffle=True, - camera_types=[PinholeCameraType.CAM_F0], + # camera_types=[PinholeCameraType.CAM_F0], + camera_types=None, ) scene_builder = ArrowSceneBuilder(os.environ["D123_DATA_ROOT"]) worker = Sequential() From 35100b922fd59d5427f1142f26f029fbd819198b Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 11 Oct 2025 16:26:40 +0200 Subject: [PATCH 067/145] Make some adjustments to the nuPlan converter, mostly concerning speed and memory usage. (#52) --- .../datasets/carla/carla_data_converter.py | 4 +- .../datasets/nuplan/nuplan_converter.py | 365 +++++------------- .../datasets/nuplan/utils/__init__.py | 0 .../nuplan/utils/nuplan_sql_helper.py | 130 +++++++ .../sensor_utils/lidar_index_registry.py | 4 +- .../datasets/nuplan_mini_dataset.yaml | 4 +- .../config/conversion/default_conversion.yaml | 8 +- 7 files changed, 229 insertions(+), 286 deletions(-) create mode 100644 d123/conversion/datasets/nuplan/utils/__init__.py create mode 100644 d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py diff --git a/d123/conversion/datasets/carla/carla_data_converter.py b/d123/conversion/datasets/carla/carla_data_converter.py index fedb027e..11820c0f 100644 --- a/d123/conversion/datasets/carla/carla_data_converter.py +++ b/d123/conversion/datasets/carla/carla_data_converter.py @@ -15,7 +15,7 @@ from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table from d123.conversion.abstract_dataset_converter import AbstractDataConverter, DatasetConverterConfig from d123.conversion.utils.map_utils.opendrive.opendrive_map_conversion import convert_from_xodr -from d123.conversion.utils.sensor.lidar_index_registry import CarlaLidarIndex +from d123.conversion.utils.sensor.lidar_index_registry import CARLALidarIndex from d123.datatypes.maps.abstract_map import AbstractMap, MapLayer from d123.datatypes.maps.abstract_map_objects import AbstractLane from d123.datatypes.maps.gpkg.gpkg_map import get_global_map_api @@ -279,7 +279,7 @@ def get_carla_lidar_metadata(first_log_dict: Dict[str, Any]) -> Dict[LiDARType, lidar_metadata = { LiDARType.LIDAR_TOP: LiDARMetadata( lidar_type=LiDARType.LIDAR_TOP, - lidar_index=CarlaLidarIndex, + lidar_index=CARLALidarIndex, extrinsic=None, ) } diff --git a/d123/conversion/datasets/nuplan/nuplan_converter.py b/d123/conversion/datasets/nuplan/nuplan_converter.py index fda5d2dd..4201dede 100644 --- a/d123/conversion/datasets/nuplan/nuplan_converter.py +++ b/d123/conversion/datasets/nuplan/nuplan_converter.py @@ -1,4 +1,3 @@ -import logging import os import pickle import uuid @@ -7,26 +6,26 @@ import numpy as np import yaml -from pyparsing import Generator import d123.conversion.datasets.nuplan.utils as nuplan_utils from d123.common.utils.dependencies import check_dependencies -from d123.common.utils.timer import Timer from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter from d123.conversion.dataset_converter_config import DatasetConverterConfig from d123.conversion.datasets.nuplan.nuplan_constants import ( NUPLAN_DATA_SPLITS, NUPLAN_DEFAULT_DT, - NUPLAN_DETECTION_NAME_DICT, NUPLAN_MAP_LOCATIONS, NUPLAN_ROLLING_SHUTTER_S, NUPLAN_TRAFFIC_STATUS_DICT, ) from d123.conversion.datasets.nuplan.nuplan_map_conversion import NuPlanMapConverter +from d123.conversion.datasets.nuplan.utils.nuplan_sql_helper import ( + get_box_detections_for_lidarpc_token_from_db, + get_nearest_ego_pose_for_timestamp_from_db, +) from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter from d123.conversion.utils.sensor_utils.lidar_index_registry import NuPlanLidarIndex from d123.datatypes.detections.detection import ( - BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper, TrafficLightDetection, @@ -46,23 +45,14 @@ get_nuplan_chrysler_pacifica_parameters, rear_axle_se3_to_center_se3, ) -from d123.geometry import BoundingBoxSE3, EulerAngles, StateSE3, Vector3D -from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry import StateSE3, Vector3D check_dependencies(["nuplan", "sqlalchemy"], "nuplan") -from nuplan.database.nuplan_db.nuplan_scenario_queries import ( - get_cameras, - get_images_from_lidar_tokens, -) -from nuplan.database.nuplan_db.query_session import execute_many, execute_one +from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB from nuplan.planning.simulation.observation.observation_type import CameraChannel -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - # NOTE: Leaving this constant here, to avoid having a nuplan dependency in nuplan_constants.py NUPLAN_CAMERA_MAPPING = { PinholeCameraType.CAM_F0: CameraChannel.CAM_F0, @@ -113,8 +103,9 @@ def __init__( def _collect_split_log_path_pairs(self) -> List[Tuple[str, List[Path]]]: # NOTE: the nuplan mini folder has an internal train, val, test structure, all stored in "mini". # The complete dataset is saved in the "trainval" folder (train and val), or in the "test" folder (for test). + # Thus, we need filter the logs in a split, based on the internal nuPlan configuration. split_log_path_pairs: List[Tuple[str, List[Path]]] = [] - create_splits_logs() + log_names_per_split = create_splits_logs() for split in self._splits: split_type = split.split("_")[-1] @@ -127,15 +118,21 @@ def _collect_split_log_path_pairs(self) -> List[Tuple[str, List[Path]]]: elif split in ["nuplan_mini_train", "nuplan_mini_val", "nuplan_mini_test"]: nuplan_split_folder = self._nuplan_data_root / "nuplan-v1.1" / "splits" / "mini" elif split == "nuplan_private_test": + # TODO: Remove private split nuplan_split_folder = self._nuplan_data_root / "nuplan-v1.1" / "splits" / "private_test" - # set(split_type_log_names[split_type]) - # [log_path / f"{log_name}.db" for log_name in list(all_log_names & split_log_names)] - all_log_files_in_path = [log_file for log_file in nuplan_split_folder.glob("*.db")] - all_log_names = set([str(log_file.stem) for log_file in all_log_files_in_path]) - for log_name in list(all_log_names): + if split == "nuplan_private_test": + # TODO: Remove private split + valid_log_names = [str(log_file.stem) for log_file in all_log_files_in_path] + else: + all_log_files_in_path = [log_file for log_file in nuplan_split_folder.glob("*.db")] + all_log_names = set([str(log_file.stem) for log_file in all_log_files_in_path]) + log_names_in_split = set(log_names_per_split[split_type]) + valid_log_names = list(all_log_names & log_names_in_split) + + for log_name in valid_log_names: log_path = nuplan_split_folder / f"{log_name}.db" split_log_path_pairs.append((split, log_path)) @@ -189,103 +186,41 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: # 2. Prepare log writer overwrite_log = log_writer.reset(self.dataset_converter_config, log_metadata) - timer = Timer() - camera_timer = Timer() if overwrite_log: - counter = 0 step_interval: float = int(TARGET_DT / NUPLAN_DEFAULT_DT) - total = len(nuplan_log_db.lidar_pc[::step_interval]) for nuplan_lidar_pc in nuplan_log_db.lidar_pc[::step_interval]: - timer.start() lidar_pc_token: str = nuplan_lidar_pc.token - token = lidar_pc_token - timer.log("1. lidar_pc_token") - - timestamp = TimePoint.from_us(nuplan_lidar_pc.timestamp) - timer.log("1. time point") - - ego_state = _extract_nuplan_ego_state(nuplan_lidar_pc) - timer.log("1. ego_state") - - box_detections = _extract_nuplan_box_detections(nuplan_lidar_pc, source_log_path) - timer.log("1. box_detections") - - traffic_lights = _extract_nuplan_traffic_lights(nuplan_log_db, lidar_pc_token) - timer.log("1. traffic_lights") - - cameras = _extract_nuplan_cameras( - nuplan_log_db=nuplan_log_db, - nuplan_lidar_pc=nuplan_lidar_pc, - source_log_path=source_log_path, - nuplan_sensor_root=self._nuplan_sensor_root, - dataset_converter_config=self.dataset_converter_config, - timer=camera_timer, - ) - timer.log("1. cameras") - - lidars = _extract_nuplan_lidars( - nuplan_lidar_pc=nuplan_lidar_pc, - nuplan_sensor_root=self._nuplan_sensor_root, - dataset_converter_config=self.dataset_converter_config, - ) - timer.log("1. lidars") - - scenario_tags = _extract_nuplan_scenario_tag(nuplan_log_db, lidar_pc_token) - timer.log("1. scenario_tags") - - route_lane_group_ids = _extract_nuplan_route_lane_group_ids(nuplan_lidar_pc) - timer.log("1. route_lane_group_ids") - log_writer.write( - token=token, - timestamp=timestamp, - ego_state=ego_state, - box_detections=box_detections, - traffic_lights=traffic_lights, - cameras=cameras, - lidars=lidars, - scenario_tags=scenario_tags, - route_lane_group_ids=route_lane_group_ids, + token=lidar_pc_token, + timestamp=TimePoint.from_us(nuplan_lidar_pc.timestamp), + ego_state=_extract_nuplan_ego_state(nuplan_lidar_pc), + box_detections=_extract_nuplan_box_detections(nuplan_lidar_pc, source_log_path), + traffic_lights=_extract_nuplan_traffic_lights(nuplan_log_db, lidar_pc_token), + cameras=_extract_nuplan_cameras( + nuplan_log_db=nuplan_log_db, + nuplan_lidar_pc=nuplan_lidar_pc, + source_log_path=source_log_path, + nuplan_sensor_root=self._nuplan_sensor_root, + dataset_converter_config=self.dataset_converter_config, + ), + lidars=_extract_nuplan_lidars( + nuplan_lidar_pc=nuplan_lidar_pc, + nuplan_sensor_root=self._nuplan_sensor_root, + dataset_converter_config=self.dataset_converter_config, + ), + scenario_tags=_extract_nuplan_scenario_tag(nuplan_log_db, lidar_pc_token), + route_lane_group_ids=_extract_nuplan_route_lane_group_ids(nuplan_lidar_pc), ) - timer.log("2. Write Data") - timer.end() - - # log_writer.write( - # token=lidar_pc_token, - # timestamp=TimePoint.from_us(nuplan_lidar_pc.timestamp), - # ego_state=_extract_nuplan_ego_state(nuplan_lidar_pc), - # box_detections=_extract_nuplan_box_detections(nuplan_lidar_pc), - # traffic_lights=_extract_nuplan_traffic_lights(nuplan_log_db, lidar_pc_token), - # cameras=_extract_nuplan_cameras( - # nuplan_log_db=nuplan_log_db, - # nuplan_lidar_pc=nuplan_lidar_pc, - # source_log_path=source_log_path, - # nuplan_sensor_root=self._nuplan_sensor_root, - # dataset_converter_config=self.dataset_converter_config, - # ), - # lidars=_extract_nuplan_lidars( - # nuplan_lidar_pc=nuplan_lidar_pc, - # nuplan_sensor_root=self._nuplan_sensor_root, - # dataset_converter_config=self.dataset_converter_config, - # ), - # scenario_tags=_extract_nuplan_scenario_tag(nuplan_log_db, lidar_pc_token), - # route_lane_group_ids=_extract_nuplan_route_lane_group_ids(nuplan_lidar_pc), - # ) del nuplan_lidar_pc - # logger.info(f"Finished processing scenarios for thread_id={thread_id}, {counter + 1}/{total}") - counter += 1 - - # logger.info(timer) - logger.info(camera_timer) log_writer.close() + # NOTE: The nuPlanDB class has several internal references, which makes memory management tricky. + # We need to ensure all references are released properly. It is not always working with just del. nuplan_log_db.detach_tables() nuplan_log_db.remove_ref() - assert nuplan_log_db._refcount == 0, "NuPlanDB still has references, potential memory leak." - del nuplan_log_db @@ -376,17 +311,14 @@ def _extract_nuplan_ego_state(nuplan_lidar_pc: LidarPc) -> EgoStateSE3: ) -def _extract_nuplan_box_detections(lidar_pc: LidarPc, source_log_file: Path) -> BoxDetectionWrapper: - # tracked_objects = list(get_tracked_objects_for_lidarpc_token_from_db(source_log_file, lidar_pc.token)) - +def _extract_nuplan_box_detections(lidar_pc: LidarPc, source_log_path: Path) -> BoxDetectionWrapper: box_detections: List[BoxDetectionSE3] = list( - get_box_detections_for_lidarpc_token_from_db(source_log_file, lidar_pc.token) + get_box_detections_for_lidarpc_token_from_db(source_log_path, lidar_pc.token) ) return BoxDetectionWrapper(box_detections=box_detections) def _extract_nuplan_traffic_lights(log_db: NuPlanDB, lidar_pc_token: str) -> TrafficLightDetectionWrapper: - traffic_lights_detections: List[TrafficLightDetection] = [ TrafficLightDetection( timepoint=None, # NOTE: Timepoint is not needed during writing, set to None @@ -395,7 +327,6 @@ def _extract_nuplan_traffic_lights(log_db: NuPlanDB, lidar_pc_token: str) -> Tra ) for traffic_light in log_db.traffic_light_status.select_many(lidar_pc_token=lidar_pc_token) ] - return TrafficLightDetectionWrapper(traffic_light_detections=traffic_lights_detections) @@ -405,60 +336,56 @@ def _extract_nuplan_cameras( source_log_path: Path, nuplan_sensor_root: Path, dataset_converter_config: DatasetConverterConfig, - timer: Timer, ) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: camera_dict: Dict[str, Union[str, bytes]] = {} - log_cam_infos = {camera.token: camera for camera in nuplan_log_db.log.cameras} - # timer.log("0. get camera infos") - - for camera_type, camera_channel in NUPLAN_CAMERA_MAPPING.items(): - timer.start() - camera_data: Optional[Union[str, bytes]] = None - image_class = list( - get_images_from_lidar_tokens(source_log_path, [nuplan_lidar_pc.token], [str(camera_channel.value)]) - ) - timer.log("0. get image from lidar token") - - if len(image_class) != 0: - image = image_class[0] - filename_jpg = nuplan_sensor_root / image.filename_jpg - if filename_jpg.exists() and filename_jpg.is_file(): - - # Code taken from MTGS - # https://github.com/OpenDriveLab/MTGS/blob/main/nuplan_scripts/utils/nuplan_utils_custom.py#L117 - # TODO: Refactor - image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us - timer.log("0. Misc") - - # img_ego_pose: EgoPose = ( - # nuplan_log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first() - # ) - ego_pose = get_ego_pose_for_lidarpc_token_from_db(source_log_path, nuplan_lidar_pc.token) - - timer.log("0. img_ego_pose") - img_e2g = ego_pose.transformation_matrix - g2e = nuplan_lidar_pc.ego_pose.trans_matrix_inv - img_e2e = g2e @ img_e2g - cam_info = log_cam_infos[image.camera_token] - c2img_e = cam_info.trans_matrix - c2e = img_e2e @ c2img_e - timer.log("0. matrix multiplications") - - extrinsic = StateSE3.from_transformation_matrix(c2e) - - if dataset_converter_config.camera_store_option == "path": - camera_data = str(filename_jpg) - elif dataset_converter_config.camera_store_option == "binary": - with open(filename_jpg, "rb") as f: - camera_data = f.read() - - camera_dict[camera_type] = camera_data, extrinsic - timer.log("0. my bullshit") - timer.end() - - # timer.log("1. big for loop") - # timer.end() + + if dataset_converter_config.include_cameras: + log_cam_infos = {camera.token: camera for camera in nuplan_log_db.log.cameras} + for camera_type, camera_channel in NUPLAN_CAMERA_MAPPING.items(): + camera_data: Optional[Union[str, bytes]] = None + image_class = list( + get_images_from_lidar_tokens(source_log_path, [nuplan_lidar_pc.token], [str(camera_channel.value)]) + ) + + if len(image_class) != 0: + image = image_class[0] + filename_jpg = nuplan_sensor_root / image.filename_jpg + if filename_jpg.exists() and filename_jpg.is_file(): + + # NOTE: This part of the modified from the MTGS code + # In MTGS, a slower method is used to find the nearest ego pose. + # The code below uses a direct SQL query to find the nearest ego pose, in a given window. + # https://github.com/OpenDriveLab/MTGS/blob/main/nuplan_scripts/utils/nuplan_utils_custom.py#L117 + + # Query nearest ego pose for the image timestamp + timestamp = image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us + nearest_ego_pose = get_nearest_ego_pose_for_timestamp_from_db( + source_log_path, + timestamp, + [nuplan_lidar_pc.token], + ) + + # Compute camera to ego transformation, given the nearest ego pose + img_e2g = nearest_ego_pose.transformation_matrix + g2e = nuplan_lidar_pc.ego_pose.trans_matrix_inv + img_e2e = g2e @ img_e2g + cam_info = log_cam_infos[image.camera_token] + c2img_e = cam_info.trans_matrix + c2e = img_e2e @ c2img_e + extrinsic = StateSE3.from_transformation_matrix(c2e) + + # Store camera data, either as path or binary + camera_data: Optional[Union[str, bytes]] = None + if dataset_converter_config.camera_store_option == "path": + camera_data = str(filename_jpg) + elif dataset_converter_config.camera_store_option == "binary": + with open(filename_jpg, "rb") as f: + camera_data = f.read() + + # Store in dictionary + camera_dict[camera_type] = camera_data, extrinsic + return camera_dict @@ -491,117 +418,3 @@ def _extract_nuplan_route_lane_group_ids(nuplan_lidar_pc: LidarPc) -> List[int]: for roadblock_id in str(nuplan_lidar_pc.scene.roadblock_ids).split(" ") if len(roadblock_id) > 0 ] - - -def get_ego_pose_for_lidarpc_token_from_db(log_file: str, token: str) -> StateSE3: - """ - Get the ego state associated with an individual lidar_pc token from the db. - - :param log_file: The log file to query. - :param token: The lidar_pc token to query. - :return: The EgoState associated with the LidarPC. - """ - query = """ - SELECT ep.x, - ep.y, - ep.z, - ep.qw, - ep.qx, - ep.qy, - ep.qz, - -- ego_pose and lidar_pc timestamps are not the same, even when linked by token! - -- use lidar_pc timestamp for backwards compatibility. - lp.timestamp, - ep.vx, - ep.vy, - ep.acceleration_x, - ep.acceleration_y - FROM ego_pose AS ep - INNER JOIN lidar_pc AS lp - ON lp.ego_pose_token = ep.token - WHERE lp.token = ? - """ - - row = execute_one(query, (bytearray.fromhex(token),), log_file) - if row is None: - return None - - # q = Quaternion(row["qw"], row["qx"], row["qy"], row["qz"]) - # return EgoState.build_from_rear_axle( - # StateSE2(row["x"], row["y"], q.yaw_pitch_roll[0]), - # tire_steering_angle=0.0, - # vehicle_parameters=get_pacifica_parameters(), - # time_point=TimePoint(row["timestamp"]), - # rear_axle_velocity_2d=StateVector2D(row["vx"], y=row["vy"]), - # rear_axle_acceleration_2d=StateVector2D(x=row["acceleration_x"], y=row["acceleration_y"]), - # ) - - return StateSE3(x=row["x"], y=row["y"], z=row["z"], qw=row["qw"], qx=row["qx"], qy=row["qy"], qz=row["qz"]) - - -def get_box_detections_for_lidarpc_token_from_db(log_file: str, token: str) -> Generator[BoxDetectionSE3, None, None]: - """ - Get all tracked objects for a given lidar_pc. - This includes both agents and static objects. - The values are returned in random order. - - For agents, this query will not obtain the future waypoints. - For that, call `get_future_waypoints_for_agents_from_db()` - with the tokens of the agents of interest. - - :param log_file: The log file to query. - :param token: The lidar_pc token for which to obtain the objects. - :return: The tracked objects associated with the token. - """ - query = """ - SELECT c.name AS category_name, - lb.x, - lb.y, - lb.z, - lb.yaw, - lb.width, - lb.length, - lb.height, - lb.vx, - lb.vy, - lb.vz, - lb.token, - lb.track_token, - lp.timestamp - FROM lidar_box AS lb - INNER JOIN track AS t - ON t.token = lb.track_token - INNER JOIN category AS c - ON c.token = t.category_token - INNER JOIN lidar_pc AS lp - ON lp.token = lb.lidar_pc_token - WHERE lp.token = ? - """ - - for row in execute_many(query, (bytearray.fromhex(token),), log_file): - quaternion = EulerAngles(roll=DEFAULT_ROLL, pitch=DEFAULT_PITCH, yaw=row["yaw"]).quaternion - bounding_box = BoundingBoxSE3( - center=StateSE3( - x=row["x"], - y=row["y"], - z=row["z"], - qw=quaternion.qw, - qx=quaternion.qx, - qy=quaternion.qy, - qz=quaternion.qz, - ), - length=row["length"], # nuPlan uses length, - width=row["width"], # width, - height=row["height"], # height - ) - box_detection = BoxDetectionSE3( - metadata=BoxDetectionMetadata( - detection_type=NUPLAN_DETECTION_NAME_DICT[row["category_name"]], - timepoint=None, # NOTE: Timepoint is not needed during writing, set to None - track_token=row["track_token"].hex(), - confidence=None, # NOTE: Not currently written, requires refactoring - ), - bounding_box_se3=bounding_box, - velocity=Vector3D(x=row["vx"], y=row["vy"], z=row["vz"]), - ) - yield box_detection diff --git a/d123/conversion/datasets/nuplan/utils/__init__.py b/d123/conversion/datasets/nuplan/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py b/d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py new file mode 100644 index 00000000..903fbeb2 --- /dev/null +++ b/d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py @@ -0,0 +1,130 @@ +from typing import List + +from d123.common.utils.dependencies import check_dependencies +from d123.conversion.datasets.nuplan.nuplan_constants import NUPLAN_DETECTION_NAME_DICT +from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3 +from d123.geometry import BoundingBoxSE3, EulerAngles, StateSE3, Vector3D +from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL + +check_dependencies(modules=["nuplan"], optional_name="nuplan") +from nuplan.database.nuplan_db.query_session import execute_many, execute_one + + +def get_box_detections_for_lidarpc_token_from_db(log_file: str, token: str) -> List[BoxDetectionSE3]: + + query = """ + SELECT c.name AS category_name, + lb.x, + lb.y, + lb.z, + lb.yaw, + lb.width, + lb.length, + lb.height, + lb.vx, + lb.vy, + lb.vz, + lb.token, + lb.track_token, + lp.timestamp + FROM lidar_box AS lb + INNER JOIN track AS t + ON t.token = lb.track_token + INNER JOIN category AS c + ON c.token = t.category_token + INNER JOIN lidar_pc AS lp + ON lp.token = lb.lidar_pc_token + WHERE lp.token = ? + """ + + box_detections: List[BoxDetectionSE3] = [] + + for row in execute_many(query, (bytearray.fromhex(token),), log_file): + quaternion = EulerAngles(roll=DEFAULT_ROLL, pitch=DEFAULT_PITCH, yaw=row["yaw"]).quaternion + bounding_box = BoundingBoxSE3( + center=StateSE3( + x=row["x"], + y=row["y"], + z=row["z"], + qw=quaternion.qw, + qx=quaternion.qx, + qy=quaternion.qy, + qz=quaternion.qz, + ), + length=row["length"], # nuPlan uses length, + width=row["width"], # width, + height=row["height"], # height + ) + box_detection = BoxDetectionSE3( + metadata=BoxDetectionMetadata( + detection_type=NUPLAN_DETECTION_NAME_DICT[row["category_name"]], + timepoint=None, # NOTE: Timepoint is not needed during writing, set to None + track_token=row["track_token"].hex(), + confidence=None, # NOTE: Not currently written, requires refactoring + ), + bounding_box_se3=bounding_box, + velocity=Vector3D(x=row["vx"], y=row["vy"], z=row["vz"]), + ) + box_detections.append(box_detection) + + return box_detections + + +def get_ego_pose_for_timestamp_from_db(log_file: str, timestamp: int) -> StateSE3: + + query = """ + SELECT ep.x, + ep.y, + ep.z, + ep.qw, + ep.qx, + ep.qy, + ep.qz, + ep.timestamp, + ep.vx, + ep.vy, + ep.acceleration_x, + ep.acceleration_y + FROM ego_pose AS ep + ORDER BY ABS(ep.timestamp - ?) + LIMIT 1 + """ + + row = execute_one(query, (timestamp,), log_file) + if row is None: + return None + + return StateSE3(x=row["x"], y=row["y"], z=row["z"], qw=row["qw"], qx=row["qx"], qy=row["qy"], qz=row["qz"]) + + +def get_nearest_ego_pose_for_timestamp_from_db( + log_file: str, + timestamp: int, + tokens: List[str], + lookahead_window_us: int = 50000, + lookback_window_us: int = 50000, +) -> StateSE3: + + query = f""" + SELECT ep.x, + ep.y, + ep.z, + ep.qw, + ep.qx, + ep.qy, + ep.qz + FROM ego_pose AS ep + INNER JOIN lidar_pc AS lpc + ON ep.timestamp <= lpc.timestamp + ? + AND ep.timestamp >= lpc.timestamp - ? + WHERE lpc.token IN ({('?,'*len(tokens))[:-1]}) + ORDER BY ABS(ep.timestamp - ?) + LIMIT 1 + """ # noqa: E226 + + args = [lookahead_window_us, lookback_window_us] + args += [bytearray.fromhex(t) for t in tokens] + args += [timestamp] + + for row in execute_many(query, args, log_file): + return StateSE3(x=row["x"], y=row["y"], z=row["z"], qw=row["qw"], qx=row["qx"], qy=row["qy"], qz=row["qz"]) diff --git a/d123/conversion/utils/sensor_utils/lidar_index_registry.py b/d123/conversion/utils/sensor_utils/lidar_index_registry.py index ff76c19c..ad90b334 100644 --- a/d123/conversion/utils/sensor_utils/lidar_index_registry.py +++ b/d123/conversion/utils/sensor_utils/lidar_index_registry.py @@ -45,7 +45,7 @@ class NuPlanLidarIndex(LiDARIndex): @register_lidar_index -class CarlaLidarIndex(LiDARIndex): +class CARLALidarIndex(LiDARIndex): X = 0 Y = 1 Z = 2 @@ -53,7 +53,7 @@ class CarlaLidarIndex(LiDARIndex): @register_lidar_index -class WopdLidarIndex(LiDARIndex): +class WOPDLidarIndex(LiDARIndex): RANGE = 0 INTENSITY = 1 ELONGATION = 2 diff --git a/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml b/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml index 92b86322..adff591a 100644 --- a/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml +++ b/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml @@ -25,11 +25,11 @@ nuplan_mini_dataset: include_traffic_lights: true # Cameras - include_cameras: true + include_cameras: false camera_store_option: "path" # "path", "binary", "mp4" # LiDARs - include_lidars: true + include_lidars: false lidar_store_option: "path" # "path", "binary" # Scenario tag / Route diff --git a/d123/script/config/conversion/default_conversion.yaml b/d123/script/config/conversion/default_conversion.yaml index a53e4939..9834174a 100644 --- a/d123/script/config/conversion/default_conversion.yaml +++ b/d123/script/config/conversion/default_conversion.yaml @@ -14,10 +14,10 @@ defaults: - log_writer: arrow_ipc_log_writer - datasets: - nuplan_mini_dataset - # - nuplan_private_dataset - # - carla_dataset - # - wopd_dataset - # - av2_sensor_dataset + # - nuplan_private_dataset + # - carla_dataset + # - wopd_dataset + # - av2_sensor_dataset - _self_ force_map_conversion: True From 7bf6608aa42be24a744ceb0de083386874cdedda Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 11 Oct 2025 16:48:25 +0200 Subject: [PATCH 068/145] Fix a bug in the scene builder --- d123/common/visualization/viser/viser_config.py | 2 +- d123/datatypes/scene/arrow/arrow_scene_builder.py | 2 +- .../config/conversion/datasets/nuplan_mini_dataset.yaml | 4 ++-- test_viser.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/d123/common/visualization/viser/viser_config.py b/d123/common/visualization/viser/viser_config.py index fedb643b..f99f7823 100644 --- a/d123/common/visualization/viser/viser_config.py +++ b/d123/common/visualization/viser/viser_config.py @@ -44,7 +44,7 @@ class ViserConfig: # Map map_visible: bool = True - map_radius: float = 1000.0 # [m] + map_radius: float = 200.0 # [m] map_non_road_z_offset: float = 0.1 # small z-translation to place crosswalks, parking, etc. on top of the road # Bounding boxes diff --git a/d123/datatypes/scene/arrow/arrow_scene_builder.py b/d123/datatypes/scene/arrow/arrow_scene_builder.py index bf48c96b..4acb53d6 100644 --- a/d123/datatypes/scene/arrow/arrow_scene_builder.py +++ b/d123/datatypes/scene/arrow/arrow_scene_builder.py @@ -145,7 +145,7 @@ def _get_scene_extraction_metadatas(log_path: Union[str, Path], filter: SceneFil # NOTE: We only check camera availability at the initial index of the scene. if filter.camera_types is not None: cameras_available = [ - recording_table[camera_type.serialize()][start_idx].as_py() is not None + recording_table[f"{camera_type.serialize()}_data"][start_idx].as_py() is not None for camera_type in filter.camera_types ] if not all(cameras_available): diff --git a/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml b/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml index adff591a..92b86322 100644 --- a/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml +++ b/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml @@ -25,11 +25,11 @@ nuplan_mini_dataset: include_traffic_lights: true # Cameras - include_cameras: false + include_cameras: true camera_store_option: "path" # "path", "binary", "mp4" # LiDARs - include_lidars: false + include_lidars: true lidar_store_option: "path" # "path", "binary" # Scenario tag / Route diff --git a/test_viser.py b/test_viser.py index abdeac38..9661edde 100644 --- a/test_viser.py +++ b/test_viser.py @@ -4,6 +4,7 @@ from d123.common.visualization.viser.viser_viewer import ViserViewer from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder from d123.datatypes.scene.scene_filter import SceneFilter +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType if __name__ == "__main__": @@ -23,8 +24,7 @@ history_s=0.0, timestamp_threshold_s=10.0, shuffle=True, - # camera_types=[PinholeCameraType.CAM_F0], - camera_types=None, + camera_types=[PinholeCameraType.CAM_F0], ) scene_builder = ArrowSceneBuilder(os.environ["D123_DATA_ROOT"]) worker = Sequential() From 284bb9b1bbb124f95b01b87f5e2e7cffb9f05198 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 11 Oct 2025 16:53:41 +0200 Subject: [PATCH 069/145] First removal of simulation/training/carl/smart and other deprecated implementations (#54) --- .../config/lightning_training/__init__.py | 0 .../callbacks/default_callbacks.yaml | 14 - .../callbacks/learning_rate_monitor.yaml | 4 - .../callbacks/model_checkpoint.yaml | 17 - .../callbacks/model_summary.yaml | 5 - .../config/lightning_training/data/waymo.yaml | 15 - .../default_lightning_training.yaml | 29 - .../lightning_training/model/smart.yaml | 77 -- .../model/smart_mini_3M.yaml | 77 -- .../model/smart_nano_1M.yaml | 77 -- .../lightning_training/trainer/ddp.yaml | 13 - .../trainer/default_trainer.yaml | 27 - .../preprocessing/default_preprocessing.yaml | 20 - .../config/simulation/default_simulation.yaml | 20 - d123/simulation/__init__.py | 0 d123/simulation/agents/__init__.py | 0 d123/simulation/agents/abstract_agents.py | 27 - .../agents/constant_velocity_agents.py | 67 - d123/simulation/agents/idm_agents.py | 196 --- d123/simulation/agents/path_following.py | 91 -- d123/simulation/agents/smart_agents.py | 146 --- d123/simulation/callback/abstract_callback.py | 79 -- d123/simulation/callback/multi_callback.py | 74 -- d123/simulation/controller/__init__.py | 0 .../controller/abstract_controller.py | 42 - .../controller/action_controller.py | 55 - .../controller/motion_model/__init__.py | 0 .../motion_model/abstract_motion_model.py | 25 - .../motion_model/kinematic_bicycle_model.py | 166 --- .../simulation/controller/tracker/__init__.py | 0 d123/simulation/gym/__init__.py | 0 d123/simulation/gym/demo_gym_env.py | 122 -- .../gym/environment/environment_wrapper.py | 191 --- .../environment/gym_observation/__init__.py | 0 .../abstract_gym_observation.py | 36 - .../raster/raster_gym_observation.py | 276 ----- .../gym_observation/raster/raster_renderer.py | 505 -------- .../environment/helper/environment_area.py | 78 -- .../environment/helper/environment_cache.py | 229 ---- .../environment/output_converter/__init__.py | 0 .../abstract_output_converter.py | 32 - .../action_output_converter.py | 209 ---- .../environment/reward_builder/__init__.py | 0 .../reward_builder/abstract_reward_builder.py | 24 - .../reward_builder/components/__init__.py | 0 .../reward_builder/components/collision.py | 103 -- .../reward_builder/components/comfort.py | 145 --- .../reward_builder/components/off_route.py | 98 -- .../reward_builder/components/progress.py | 120 -- .../components/time_to_collision.py | 289 ----- .../reward_builder/default_reward_builder.py | 753 ----------- .../environment/scenario_sampler/__init__.py | 0 .../abstract_scenario_sampler.py | 25 - .../cache_scenario_sampler.py | 61 - .../simulation_builder/__init__.py | 0 .../abstract_simulation_builder.py | 16 - .../default_simulation_builder.py | 91 -- .../gym/environment/simulation_wrapper.py | 160 --- d123/simulation/gym/gym_env.py | 91 -- d123/simulation/gym/policy/__init__.py | 0 d123/simulation/gym/policy/ppo/__init__.py | 0 d123/simulation/gym/policy/ppo/ppo_config.py | 282 ----- .../gym/policy/ppo/ppo_distributions.py | 371 ------ d123/simulation/gym/policy/ppo/ppo_model.py | 916 -------------- d123/simulation/gym/training.py | 1097 ----------------- d123/simulation/history/simulation_history.py | 95 -- .../history/simulation_history_buffer.py | 194 --- d123/simulation/metrics/__init__.py | 0 .../simulation/metrics/sim_agents/__init__.py | 0 .../metrics/sim_agents/histogram_metric.py | 218 ---- .../metrics/sim_agents/interaction_based.py | 75 -- .../metrics/sim_agents/kinematics.py | 165 --- .../metrics/sim_agents/map_based.py | 151 --- .../metrics/sim_agents/sim_agents.py | 225 ---- d123/simulation/metrics/sim_agents/utils.py | 54 - d123/simulation/observation/__init__.py | 0 .../observation/abstract_observation.py | 33 - .../observation/agents_observation.py | 87 -- .../observation/log_replay_observation.py | 47 - d123/simulation/planning/.gitkeep | 0 d123/simulation/planning/__init__.py | 0 d123/simulation/planning/abstract_planner.py | 40 - .../planning/planner_output/__init__.py | 0 .../planner_output/abstract_planner_output.py | 8 - .../planner_output/action_planner_output.py | 29 - .../trajectory_planner_output.py | 7 - d123/simulation/simulation_2d.py | 171 --- d123/simulation/simulation_2d_setup.py | 26 - d123/simulation/time_controller/__init__.py | 0 .../abstract_time_controller.py | 47 - .../time_controller/log_time_controller.py | 44 - .../time_controller/simulation_iteration.py | 34 - d123/tests/.gitkeep | 0 d123/training/__init__.py | 0 .../feature_builder/smart_feature_builder.py | 344 ------ d123/training/models/__init__.py | 0 .../models/motion_forecasting/__init__.py | 0 d123/training/models/sim_agent/__init__.py | 0 .../models/sim_agent/smart/__init__.py | 0 .../sim_agent/smart/datamodules/__init__.py | 0 .../smart/datamodules/scalable_datamodule.py | 95 -- .../smart/datamodules/target_builder.py | 62 - .../sim_agent/smart/datasets/__init__.py | 0 .../smart/datasets/scalable_dataset.py | 40 - .../models/sim_agent/smart/layers/__init__.py | 0 .../sim_agent/smart/layers/attention_layer.py | 113 -- .../smart/layers/fourier_embedding.py | 88 -- .../sim_agent/smart/layers/mlp_layer.py | 19 - .../sim_agent/smart/metrics/__init__.py | 1 - .../sim_agent/smart/metrics/cross_entropy.py | 104 -- .../models/sim_agent/smart/metrics/ego_nll.py | 120 -- .../models/sim_agent/smart/metrics/gmm_ade.py | 33 - .../models/sim_agent/smart/metrics/min_ade.py | 28 - .../sim_agent/smart/metrics/next_token_cls.py | 27 - .../models/sim_agent/smart/metrics/utils.py | 70 -- .../sim_agent/smart/modules/__init__.py | 0 .../sim_agent/smart/modules/smart_decoder.py | 745 ----------- d123/training/models/sim_agent/smart/smart.py | 176 --- .../models/sim_agent/smart/smart_config.py | 68 - .../models/sim_agent/smart/tokens/__init__.py | 0 .../sim_agent/smart/tokens/token_processor.py | 339 ----- .../sim_agent/smart/tokens/traj_clustering.py | 155 --- .../models/sim_agent/smart/utils/__init__.py | 0 .../models/sim_agent/smart/utils/finetune.py | 33 - .../models/sim_agent/smart/utils/geometry.py | 14 - .../sim_agent/smart/utils/preprocess.py | 150 --- .../models/sim_agent/smart/utils/rollout.py | 260 ---- .../sim_agent/smart/utils/weight_init.py | 69 -- scripts/install/install_smart.sh | 17 - scripts/preprocessing/preprocess_smart.sh | 17 - scripts/simulation/run_sim_agents.sh | 7 - scripts/training/train_smart.sh | 6 - 132 files changed, 12633 deletions(-) delete mode 100644 d123/script/config/lightning_training/__init__.py delete mode 100644 d123/script/config/lightning_training/callbacks/default_callbacks.yaml delete mode 100644 d123/script/config/lightning_training/callbacks/learning_rate_monitor.yaml delete mode 100644 d123/script/config/lightning_training/callbacks/model_checkpoint.yaml delete mode 100644 d123/script/config/lightning_training/callbacks/model_summary.yaml delete mode 100644 d123/script/config/lightning_training/data/waymo.yaml delete mode 100644 d123/script/config/lightning_training/default_lightning_training.yaml delete mode 100644 d123/script/config/lightning_training/model/smart.yaml delete mode 100644 d123/script/config/lightning_training/model/smart_mini_3M.yaml delete mode 100644 d123/script/config/lightning_training/model/smart_nano_1M.yaml delete mode 100644 d123/script/config/lightning_training/trainer/ddp.yaml delete mode 100644 d123/script/config/lightning_training/trainer/default_trainer.yaml delete mode 100644 d123/script/config/preprocessing/default_preprocessing.yaml delete mode 100644 d123/script/config/simulation/default_simulation.yaml delete mode 100644 d123/simulation/__init__.py delete mode 100644 d123/simulation/agents/__init__.py delete mode 100644 d123/simulation/agents/abstract_agents.py delete mode 100644 d123/simulation/agents/constant_velocity_agents.py delete mode 100644 d123/simulation/agents/idm_agents.py delete mode 100644 d123/simulation/agents/path_following.py delete mode 100644 d123/simulation/agents/smart_agents.py delete mode 100644 d123/simulation/callback/abstract_callback.py delete mode 100644 d123/simulation/callback/multi_callback.py delete mode 100644 d123/simulation/controller/__init__.py delete mode 100644 d123/simulation/controller/abstract_controller.py delete mode 100644 d123/simulation/controller/action_controller.py delete mode 100644 d123/simulation/controller/motion_model/__init__.py delete mode 100644 d123/simulation/controller/motion_model/abstract_motion_model.py delete mode 100644 d123/simulation/controller/motion_model/kinematic_bicycle_model.py delete mode 100644 d123/simulation/controller/tracker/__init__.py delete mode 100644 d123/simulation/gym/__init__.py delete mode 100644 d123/simulation/gym/demo_gym_env.py delete mode 100644 d123/simulation/gym/environment/environment_wrapper.py delete mode 100644 d123/simulation/gym/environment/gym_observation/__init__.py delete mode 100644 d123/simulation/gym/environment/gym_observation/abstract_gym_observation.py delete mode 100644 d123/simulation/gym/environment/gym_observation/raster/raster_gym_observation.py delete mode 100644 d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py delete mode 100644 d123/simulation/gym/environment/helper/environment_area.py delete mode 100644 d123/simulation/gym/environment/helper/environment_cache.py delete mode 100644 d123/simulation/gym/environment/output_converter/__init__.py delete mode 100644 d123/simulation/gym/environment/output_converter/abstract_output_converter.py delete mode 100644 d123/simulation/gym/environment/output_converter/action_output_converter.py delete mode 100644 d123/simulation/gym/environment/reward_builder/__init__.py delete mode 100644 d123/simulation/gym/environment/reward_builder/abstract_reward_builder.py delete mode 100644 d123/simulation/gym/environment/reward_builder/components/__init__.py delete mode 100644 d123/simulation/gym/environment/reward_builder/components/collision.py delete mode 100644 d123/simulation/gym/environment/reward_builder/components/comfort.py delete mode 100644 d123/simulation/gym/environment/reward_builder/components/off_route.py delete mode 100644 d123/simulation/gym/environment/reward_builder/components/progress.py delete mode 100644 d123/simulation/gym/environment/reward_builder/components/time_to_collision.py delete mode 100644 d123/simulation/gym/environment/reward_builder/default_reward_builder.py delete mode 100644 d123/simulation/gym/environment/scenario_sampler/__init__.py delete mode 100644 d123/simulation/gym/environment/scenario_sampler/abstract_scenario_sampler.py delete mode 100644 d123/simulation/gym/environment/scenario_sampler/cache_scenario_sampler.py delete mode 100644 d123/simulation/gym/environment/simulation_builder/__init__.py delete mode 100644 d123/simulation/gym/environment/simulation_builder/abstract_simulation_builder.py delete mode 100644 d123/simulation/gym/environment/simulation_builder/default_simulation_builder.py delete mode 100644 d123/simulation/gym/environment/simulation_wrapper.py delete mode 100644 d123/simulation/gym/gym_env.py delete mode 100644 d123/simulation/gym/policy/__init__.py delete mode 100644 d123/simulation/gym/policy/ppo/__init__.py delete mode 100644 d123/simulation/gym/policy/ppo/ppo_config.py delete mode 100644 d123/simulation/gym/policy/ppo/ppo_distributions.py delete mode 100644 d123/simulation/gym/policy/ppo/ppo_model.py delete mode 100644 d123/simulation/gym/training.py delete mode 100644 d123/simulation/history/simulation_history.py delete mode 100644 d123/simulation/history/simulation_history_buffer.py delete mode 100644 d123/simulation/metrics/__init__.py delete mode 100644 d123/simulation/metrics/sim_agents/__init__.py delete mode 100644 d123/simulation/metrics/sim_agents/histogram_metric.py delete mode 100644 d123/simulation/metrics/sim_agents/interaction_based.py delete mode 100644 d123/simulation/metrics/sim_agents/kinematics.py delete mode 100644 d123/simulation/metrics/sim_agents/map_based.py delete mode 100644 d123/simulation/metrics/sim_agents/sim_agents.py delete mode 100644 d123/simulation/metrics/sim_agents/utils.py delete mode 100644 d123/simulation/observation/__init__.py delete mode 100644 d123/simulation/observation/abstract_observation.py delete mode 100644 d123/simulation/observation/agents_observation.py delete mode 100644 d123/simulation/observation/log_replay_observation.py delete mode 100644 d123/simulation/planning/.gitkeep delete mode 100644 d123/simulation/planning/__init__.py delete mode 100644 d123/simulation/planning/abstract_planner.py delete mode 100644 d123/simulation/planning/planner_output/__init__.py delete mode 100644 d123/simulation/planning/planner_output/abstract_planner_output.py delete mode 100644 d123/simulation/planning/planner_output/action_planner_output.py delete mode 100644 d123/simulation/planning/planner_output/trajectory_planner_output.py delete mode 100644 d123/simulation/simulation_2d.py delete mode 100644 d123/simulation/simulation_2d_setup.py delete mode 100644 d123/simulation/time_controller/__init__.py delete mode 100644 d123/simulation/time_controller/abstract_time_controller.py delete mode 100644 d123/simulation/time_controller/log_time_controller.py delete mode 100644 d123/simulation/time_controller/simulation_iteration.py delete mode 100644 d123/tests/.gitkeep delete mode 100644 d123/training/__init__.py delete mode 100644 d123/training/feature_builder/smart_feature_builder.py delete mode 100644 d123/training/models/__init__.py delete mode 100644 d123/training/models/motion_forecasting/__init__.py delete mode 100644 d123/training/models/sim_agent/__init__.py delete mode 100644 d123/training/models/sim_agent/smart/__init__.py delete mode 100644 d123/training/models/sim_agent/smart/datamodules/__init__.py delete mode 100644 d123/training/models/sim_agent/smart/datamodules/scalable_datamodule.py delete mode 100644 d123/training/models/sim_agent/smart/datamodules/target_builder.py delete mode 100644 d123/training/models/sim_agent/smart/datasets/__init__.py delete mode 100644 d123/training/models/sim_agent/smart/datasets/scalable_dataset.py delete mode 100644 d123/training/models/sim_agent/smart/layers/__init__.py delete mode 100644 d123/training/models/sim_agent/smart/layers/attention_layer.py delete mode 100644 d123/training/models/sim_agent/smart/layers/fourier_embedding.py delete mode 100644 d123/training/models/sim_agent/smart/layers/mlp_layer.py delete mode 100644 d123/training/models/sim_agent/smart/metrics/__init__.py delete mode 100644 d123/training/models/sim_agent/smart/metrics/cross_entropy.py delete mode 100644 d123/training/models/sim_agent/smart/metrics/ego_nll.py delete mode 100644 d123/training/models/sim_agent/smart/metrics/gmm_ade.py delete mode 100644 d123/training/models/sim_agent/smart/metrics/min_ade.py delete mode 100644 d123/training/models/sim_agent/smart/metrics/next_token_cls.py delete mode 100644 d123/training/models/sim_agent/smart/metrics/utils.py delete mode 100644 d123/training/models/sim_agent/smart/modules/__init__.py delete mode 100644 d123/training/models/sim_agent/smart/modules/smart_decoder.py delete mode 100644 d123/training/models/sim_agent/smart/smart.py delete mode 100644 d123/training/models/sim_agent/smart/smart_config.py delete mode 100644 d123/training/models/sim_agent/smart/tokens/__init__.py delete mode 100644 d123/training/models/sim_agent/smart/tokens/token_processor.py delete mode 100644 d123/training/models/sim_agent/smart/tokens/traj_clustering.py delete mode 100644 d123/training/models/sim_agent/smart/utils/__init__.py delete mode 100644 d123/training/models/sim_agent/smart/utils/finetune.py delete mode 100644 d123/training/models/sim_agent/smart/utils/geometry.py delete mode 100644 d123/training/models/sim_agent/smart/utils/preprocess.py delete mode 100644 d123/training/models/sim_agent/smart/utils/rollout.py delete mode 100644 d123/training/models/sim_agent/smart/utils/weight_init.py delete mode 100644 scripts/install/install_smart.sh delete mode 100644 scripts/preprocessing/preprocess_smart.sh delete mode 100644 scripts/simulation/run_sim_agents.sh delete mode 100644 scripts/training/train_smart.sh diff --git a/d123/script/config/lightning_training/__init__.py b/d123/script/config/lightning_training/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/script/config/lightning_training/callbacks/default_callbacks.yaml b/d123/script/config/lightning_training/callbacks/default_callbacks.yaml deleted file mode 100644 index 5809cfa9..00000000 --- a/d123/script/config/lightning_training/callbacks/default_callbacks.yaml +++ /dev/null @@ -1,14 +0,0 @@ -defaults: - - model_checkpoint - - model_summary - - learning_rate_monitor - - _self_ - -model_checkpoint: - dirpath: ${output_dir}/checkpoints - filename: "epoch_{epoch:03d}" - save_last: link - auto_insert_metric_name: false - -model_summary: - max_depth: -1 diff --git a/d123/script/config/lightning_training/callbacks/learning_rate_monitor.yaml b/d123/script/config/lightning_training/callbacks/learning_rate_monitor.yaml deleted file mode 100644 index 8cf24c03..00000000 --- a/d123/script/config/lightning_training/callbacks/learning_rate_monitor.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.LearningRateMonitor.html -learning_rate_monitor: - _target_: lightning.pytorch.callbacks.LearningRateMonitor - logging_interval: epoch diff --git a/d123/script/config/lightning_training/callbacks/model_checkpoint.yaml b/d123/script/config/lightning_training/callbacks/model_checkpoint.yaml deleted file mode 100644 index b6858737..00000000 --- a/d123/script/config/lightning_training/callbacks/model_checkpoint.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html - -model_checkpoint: - _target_: lightning.pytorch.callbacks.ModelCheckpoint - dirpath: null # directory to save the model file - filename: null # checkpoint filename - monitor: null # name of the logged metric which determines when model is improving - verbose: false # verbosity mode - save_last: null # additionally always save an exact copy of the last checkpoint to a file last.ckpt - save_top_k: 1 # save k best models (determined by above metric) - mode: "min" # "max" means higher metric value is better, can be also "min" - auto_insert_metric_name: true # when True, the checkpoints filenames will contain the metric name - save_weights_only: false # if True, then only the model’s weights will be saved - every_n_train_steps: null # number of training steps between checkpoints - train_time_interval: null # checkpoints are monitored at the specified time interval - every_n_epochs: 1 # number of epochs between checkpoints - save_on_train_epoch_end: null # whether to run checkpointing at the end of the training epoch or the end of validation diff --git a/d123/script/config/lightning_training/callbacks/model_summary.yaml b/d123/script/config/lightning_training/callbacks/model_summary.yaml deleted file mode 100644 index b75981d8..00000000 --- a/d123/script/config/lightning_training/callbacks/model_summary.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichModelSummary.html - -model_summary: - _target_: lightning.pytorch.callbacks.RichModelSummary - max_depth: 1 # the maximum depth of layer nesting that the summary will include diff --git a/d123/script/config/lightning_training/data/waymo.yaml b/d123/script/config/lightning_training/data/waymo.yaml deleted file mode 100644 index fae54e8a..00000000 --- a/d123/script/config/lightning_training/data/waymo.yaml +++ /dev/null @@ -1,15 +0,0 @@ -_target_: d123.training.models.sim_agent.smart.datamodules.scalable_datamodule.MultiDataModule -_convert_: 'all' - -train_batch_size: 4 -val_batch_size: 4 -test_batch_size: 4 -num_workers: 4 -shuffle: true -pin_memory: true -persistent_workers: true -train_raw_dir: ${paths.cache_root}/training -val_raw_dir: ${paths.cache_root}/validation -val_tfrecords_splitted: ${paths.cache_root}/validation_tfrecords_splitted -test_raw_dir: ${paths.cache_root}/testing -train_max_num: 32 diff --git a/d123/script/config/lightning_training/default_lightning_training.yaml b/d123/script/config/lightning_training/default_lightning_training.yaml deleted file mode 100644 index 3ff3b2ce..00000000 --- a/d123/script/config/lightning_training/default_lightning_training.yaml +++ /dev/null @@ -1,29 +0,0 @@ -hydra: - run: - dir: ${output_dir} - output_subdir: ${output_dir}/code/hydra # Store hydra's config breakdown here for debugging - searchpath: # Only in these paths are discoverable - - pkg://d123.script.config.common - - pkg://d123.script.config.lightning_training - job: - chdir: False - -defaults: - - default_common - - default_experiment - - data: waymo - - model: smart_nano_1M_forecasting - - callbacks: default_callbacks - - trainer: default_trainer - # - default_dataset_paths - - _self_ - # - datasets: - # - nuplan_mini_dataset - # - carla_dataset - -# force_feature_computation: True - -ckpt_path: null -action: fit -paths: - cache_root: /home/daniel/nuplan_cache diff --git a/d123/script/config/lightning_training/model/smart.yaml b/d123/script/config/lightning_training/model/smart.yaml deleted file mode 100644 index 4c05352b..00000000 --- a/d123/script/config/lightning_training/model/smart.yaml +++ /dev/null @@ -1,77 +0,0 @@ - -_target_: d123.training.models.sim_agent.smart.smart.SMART -_convert_: 'all' - -model_config: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTConfig - _convert_: 'all' - - lr: 0.0005 - lr_warmup_steps: 0 - lr_total_steps: 100000 - lr_min_ratio: 0.05 - - val_open_loop: True - val_closed_loop: True - - # Tokenizer - map_token_file: "map_traj_token5.pkl" - agent_token_file: "agent_vocab_555_s2.pkl" - - map_token_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: 1 - temp: 1.0 - criteria: null - - agent_token_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: 1 - temp: 1.0 - criteria: null - - # Rollout Sampling - validation_rollout_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: 5 - temp: 1.0 - criteria: "topk_prob" - - training_rollout_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: -1 - temp: 1.0 - criteria: "topk_prob" - - - # Decoder - hidden_dim: 128 - num_freq_bands: 64 - num_heads: 8 - head_dim: 16 - dropout: 0.1 - hist_drop_prob: 0.1 - num_map_layers: 3 - num_agent_layers: 6 - pl2pl_radius: 10 - pl2a_radius: 30 - a2a_radius: 60 - time_span: 30 - num_historical_steps: 11 - num_future_steps: 80 - - # train loss - use_gt_raw: True - gt_thresh_scale_length: -1.0 # {"veh": 4.8, "cyc": 2.0, "ped": 1.0} - label_smoothing: 0.1 - rollout_as_gt: False - - # else: - n_rollout_closed_val: 10 - n_vis_batch: 2 - n_vis_scenario: 2 - n_vis_rollout: 5 diff --git a/d123/script/config/lightning_training/model/smart_mini_3M.yaml b/d123/script/config/lightning_training/model/smart_mini_3M.yaml deleted file mode 100644 index e24ae979..00000000 --- a/d123/script/config/lightning_training/model/smart_mini_3M.yaml +++ /dev/null @@ -1,77 +0,0 @@ - -_target_: d123.training.models.sim_agent.smart.smart.SMART -_convert_: 'all' - -model_config: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTConfig - _convert_: 'all' - - lr: 0.0005 - lr_warmup_steps: 0 - lr_total_steps: 100000 - lr_min_ratio: 0.05 - - val_open_loop: True - val_closed_loop: True - - # Tokenizer - map_token_file: "map_traj_token5.pkl" - agent_token_file: "agent_vocab_555_s2.pkl" - - map_token_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: 1 - temp: 1.0 - criteria: null - - agent_token_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: 1 - temp: 1.0 - criteria: null - - # Rollout Sampling - validation_rollout_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: 5 - temp: 1.0 - criteria: "topk_prob" - - training_rollout_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: -1 - temp: 1.0 - criteria: "topk_prob" - - - # Decoder - hidden_dim: 128 - num_freq_bands: 64 - num_heads: 4 - head_dim: 8 - dropout: 0.1 - hist_drop_prob: 0.1 - num_map_layers: 2 - num_agent_layers: 4 - pl2pl_radius: 10 - pl2a_radius: 30 - a2a_radius: 60 - time_span: 30 - num_historical_steps: 11 - num_future_steps: 80 - - # train loss - use_gt_raw: True - gt_thresh_scale_length: -1.0 # {"veh": 4.8, "cyc": 2.0, "ped": 1.0} - label_smoothing: 0.1 - rollout_as_gt: False - - # else: - n_rollout_closed_val: 10 - n_vis_batch: 2 - n_vis_scenario: 2 - n_vis_rollout: 5 diff --git a/d123/script/config/lightning_training/model/smart_nano_1M.yaml b/d123/script/config/lightning_training/model/smart_nano_1M.yaml deleted file mode 100644 index 1fa717e0..00000000 --- a/d123/script/config/lightning_training/model/smart_nano_1M.yaml +++ /dev/null @@ -1,77 +0,0 @@ - -_target_: d123.training.models.sim_agent.smart.smart.SMART -_convert_: 'all' - -model_config: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTConfig - _convert_: 'all' - - lr: 0.0005 - lr_warmup_steps: 0 - lr_total_steps: 100000 - lr_min_ratio: 0.05 - - val_open_loop: True - val_closed_loop: True - - # Tokenizer - map_token_file: "map_traj_token5.pkl" - agent_token_file: "agent_vocab_555_s2.pkl" - - map_token_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: 1 - temp: 1.0 - criteria: null - - agent_token_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: 1 - temp: 1.0 - criteria: null - - # Rollout Sampling - validation_rollout_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: 5 - temp: 1.0 - criteria: "topk_prob" - - training_rollout_sampling: - _target_: d123.training.models.sim_agent.smart.smart_config.SMARTRolloutSampling - _convert_: 'all' - num_k: -1 - temp: 1.0 - criteria: "topk_prob" - - - # Decoder - hidden_dim: 64 - num_freq_bands: 64 - num_heads: 4 - head_dim: 8 - dropout: 0.1 - hist_drop_prob: 0.1 - num_map_layers: 2 - num_agent_layers: 4 - pl2pl_radius: 10 - pl2a_radius: 20 - a2a_radius: 20 - time_span: 20 - num_historical_steps: 11 - num_future_steps: 80 - - # train loss - use_gt_raw: True - gt_thresh_scale_length: -1.0 # {"veh": 4.8, "cyc": 2.0, "ped": 1.0} - label_smoothing: 0.1 - rollout_as_gt: False - - # else: - n_rollout_closed_val: 10 - n_vis_batch: 2 - n_vis_scenario: 2 - n_vis_rollout: 5 diff --git a/d123/script/config/lightning_training/trainer/ddp.yaml b/d123/script/config/lightning_training/trainer/ddp.yaml deleted file mode 100644 index 20fe3133..00000000 --- a/d123/script/config/lightning_training/trainer/ddp.yaml +++ /dev/null @@ -1,13 +0,0 @@ -defaults: - - default_trainer - -strategy: - _target_: lightning.pytorch.strategies.DDPStrategy - find_unused_parameters: false - gradient_as_bucket_view: true - -accelerator: gpu -devices: -1 -num_nodes: 1 -sync_batchnorm: true -log_every_n_steps: 20 diff --git a/d123/script/config/lightning_training/trainer/default_trainer.yaml b/d123/script/config/lightning_training/trainer/default_trainer.yaml deleted file mode 100644 index 7496cc30..00000000 --- a/d123/script/config/lightning_training/trainer/default_trainer.yaml +++ /dev/null @@ -1,27 +0,0 @@ -_target_: lightning.pytorch.trainer.Trainer - -default_root_dir: ${output_dir} - -limit_train_batches: null -limit_val_batches: null -limit_test_batches: 1.0 - -# max_steps: 25000 -# val_check_interval: 0.5 - -max_epochs: 100 - -accelerator: gpu -devices: -1 - -precision: 32-true -check_val_every_n_epoch: 1 - -# set True to to ensure deterministic results -# makes training slower but gives more reproducibility than just setting seeds -deterministic: false -gradient_clip_val: 0.5 -num_sanity_val_steps: 0 -accumulate_grad_batches: 1 -log_every_n_steps: 1 -strategy: auto diff --git a/d123/script/config/preprocessing/default_preprocessing.yaml b/d123/script/config/preprocessing/default_preprocessing.yaml deleted file mode 100644 index 7b9bc0c5..00000000 --- a/d123/script/config/preprocessing/default_preprocessing.yaml +++ /dev/null @@ -1,20 +0,0 @@ -hydra: - run: - dir: ${output_dir} - output_subdir: ${output_dir}/code/hydra # Store hydra's config breakdown here for debugging - searchpath: # Only in these paths are discoverable - - pkg://d123.script.config - - pkg://d123.script.config.common - - pkg://d123.script.config.preprocessing - job: - chdir: False - -defaults: - - default_common - - default_experiment - - default_dataset_paths - - _self_ - - - -cache_path: ??? diff --git a/d123/script/config/simulation/default_simulation.yaml b/d123/script/config/simulation/default_simulation.yaml deleted file mode 100644 index 0be7fd09..00000000 --- a/d123/script/config/simulation/default_simulation.yaml +++ /dev/null @@ -1,20 +0,0 @@ -hydra: - run: - dir: ${output_dir} - output_subdir: ${output_dir}/code/hydra # Store hydra's config breakdown here for debugging - searchpath: # Only in these paths are discoverable - - pkg://d123.script.config - - pkg://d123.script.config.common - - pkg://d123.script.config.preprocessing - job: - chdir: False - -defaults: - - default_common - - default_experiment - - default_dataset_paths - - _self_ - - -paths: - cache_root: /home/daniel/waymo_training_catk diff --git a/d123/simulation/__init__.py b/d123/simulation/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/agents/__init__.py b/d123/simulation/agents/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/agents/abstract_agents.py b/d123/simulation/agents/abstract_agents.py deleted file mode 100644 index 390bbab2..00000000 --- a/d123/simulation/agents/abstract_agents.py +++ /dev/null @@ -1,27 +0,0 @@ -from abc import abstractmethod -from typing import List, Optional - -from d123.common.datatypes.detection.detection import BoxDetection -from d123.conversion.maps.abstract_map import AbstractMap -from d123.conversion.scene.abstract_scene import AbstractScene - - -class AbstractAgents: - - # Whether the agent class requires the scenario object to be passed at construction time. - # This can be set to true only for oracle planners and cannot be used for submissions. - requires_scene: bool = True - - @abstractmethod - def reset( - self, - map_api: AbstractMap, - target_agents: List[BoxDetection], - non_target_agents: List[BoxDetection], - scene: Optional[AbstractScene] = None, - ) -> List[BoxDetection]: - raise NotImplementedError - - @abstractmethod - def step(self, non_target_agents: List[BoxDetection]) -> List[BoxDetection]: - raise NotImplementedError diff --git a/d123/simulation/agents/constant_velocity_agents.py b/d123/simulation/agents/constant_velocity_agents.py deleted file mode 100644 index 13b4beff..00000000 --- a/d123/simulation/agents/constant_velocity_agents.py +++ /dev/null @@ -1,67 +0,0 @@ -import copy -from abc import abstractmethod -from typing import List, Optional - -from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.conversion.maps.abstract_map import AbstractMap -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.geometry.bounding_box import BoundingBoxSE2 -from d123.geometry.point import Point2D -from d123.geometry.transform.tranform_2d import translate_along_yaw -from d123.simulation.agents.abstract_agents import AbstractAgents - - -class ConstantVelocityAgents(AbstractAgents): - - # Whether the agent class requires the scenario object to be passed at construction time. - # This can be set to true only for oracle planners and cannot be used for submissions. - requires_scene: bool = False - - def __init__(self) -> None: - """ - Initialize the constant velocity agents. - """ - super().__init__() - self._timestep_s: float = 0.1 - self._current_iteration: int = 0 - self._map_api: AbstractMap = None - - self._initial_target_agents: List[BoxDetection] = [] - - @abstractmethod - def reset( - self, - map_api: AbstractMap, - target_agents: List[BoxDetection], - non_target_agents: List[BoxDetection], - scene: Optional[AbstractScene] = None, - ) -> List[BoxDetection]: - assert scene is None - - self._map_api = map_api - self._current_iteration = 0 - self._initial_target_agents = [copy.deepcopy(agent) for agent in target_agents] - return self._initial_target_agents - - def step(self, non_target_agents: List[BoxDetection]): - self._current_iteration += 1 - - time_delta_s = self._timestep_s * self._current_iteration - current_target_agents = [] - for initial_agent in self._initial_target_agents: - speed: float = float(initial_agent.velocity.vector_2d.magnitude) - - propagated_center = translate_along_yaw(initial_agent.center, Point2D(speed * time_delta_s, 0.0)) - propagated_bounding_box = BoundingBoxSE2( - propagated_center, - initial_agent.bounding_box_se3.length, - initial_agent.bounding_box_se3.width, - ) - propagated_agent: BoxDetectionSE2 = BoxDetectionSE2( - metadata=initial_agent.metadata, - bounding_box_se2=propagated_bounding_box, - velocity=initial_agent.velocity, - ) - current_target_agents.append(propagated_agent) - - return current_target_agents diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py deleted file mode 100644 index 95d8263a..00000000 --- a/d123/simulation/agents/idm_agents.py +++ /dev/null @@ -1,196 +0,0 @@ -import copy -from abc import abstractmethod -from dataclasses import dataclass -from typing import Dict, List, Optional, Tuple - -import numpy as np -from shapely.geometry import CAP_STYLE, Polygon - -from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.conversion.maps.abstract_map import AbstractMap -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper -from d123.geometry.bounding_box import BoundingBoxSE2 -from d123.geometry.point import Point2D -from d123.geometry.polyline import PolylineSE2 -from d123.geometry.se import StateSE2 -from d123.geometry.transform.tranform_2d import translate_along_yaw -from d123.geometry.vector import Vector2D -from d123.simulation.agents.abstract_agents import AbstractAgents - - -@dataclass -class IDMConfig: - target_velocity: float = 10.0 # [m/s] - min_gap_to_lead_agent: float = 1.0 # [m] - headway_time: float = 1.5 # [s] - accel_max: float = 1.0 # [m/s^2] - decel_max: float = 2.0 # [m/s^2] - acceleration_exponent: float = 4.0 # Usually set to 4 - - -class IDMAgents(AbstractAgents): - - # Whether the agent class requires the scenario object to be passed at construction time. - # This can be set to true only for oracle planners and cannot be used for submissions. - requires_scene: bool = True - - def __init__(self) -> None: - """ - Initialize the constant velocity agents. - """ - super().__init__() - self._timestep_s: float = 0.1 - self._current_iteration: int = 0 - self._map_api: AbstractMap = None - - self._idm_config: IDMConfig = IDMConfig(target_velocity=5.0, accel_max=1.0, decel_max=2.0) - - self._initial_target_agents: List[BoxDetection] = [] - self._past_target_agents: List[BoxDetection] = [] - self._agent_paths: Dict[str, PolylineSE2] = {} - self._agent_paths_buffer: Dict[str, Polygon] = {} - self._agent_initial_vel: Dict[str, float] = {} - self._extend_path_length: float = 100 - - @abstractmethod - def reset( - self, - map_api: AbstractMap, - target_agents: List[BoxDetection], - non_target_agents: List[BoxDetection], - scene: Optional[AbstractScene] = None, - ) -> List[BoxDetection]: - assert scene is not None - - self._map_api = map_api - self._current_iteration = 0 - self._initial_target_agents = [copy.deepcopy(agent) for agent in target_agents] - - future_box_detections = [ - scene.get_box_detections_at_iteration(iteration) for iteration in range(0, scene.number_of_iterations) - ] - - # TODO: refactor or move for general use - for agent in self._initial_target_agents: - future_trajectory: List[StateSE2] = [] - for box_detections in future_box_detections: - agent_at_iteration = box_detections.get_detection_by_track_token(agent.metadata.track_token) - if agent_at_iteration is None: - break - - future_trajectory.append(agent_at_iteration.center.state_se2) - - if len(future_trajectory) < 2: - future_trajectory = [agent.center.state_se2, translate_along_yaw(agent.center, Point2D(0.1, 0.0))] - - future_trajectory.append(translate_along_yaw(future_trajectory[-1], Point2D(self._extend_path_length, 0.0))) - - polyline_se2 = PolylineSE2.from_discrete_se2(future_trajectory) - self._agent_paths[agent.metadata.track_token] = polyline_se2 - self._agent_paths_buffer[agent.metadata.track_token] = polyline_se2.linestring.buffer( - agent.bounding_box_se2.width / 2, cap_style=CAP_STYLE.square - ) - self._agent_initial_vel[agent.metadata.track_token] = float(agent.velocity.vector_2d.magnitude) - - self._past_target_agents = self._initial_target_agents - return self._initial_target_agents - - def step(self, non_target_agents: List[BoxDetection]): - self._current_iteration += 1 - - box_detections = BoxDetectionWrapper(box_detections=non_target_agents + self._past_target_agents) - occupancy_map = box_detections.occupancy_map - - # time_delta_s = self._timestep_s * self._current_iteration - current_target_agents = [] - for past_agent in self._past_target_agents: - agent_velocity: float = float(past_agent.velocity.vector_2d.magnitude) - - agent_path = self._agent_paths[past_agent.metadata.track_token] - agent_path_buffer = self._agent_paths_buffer[past_agent.metadata.track_token] - agent_distance_on_path = agent_path.project(past_agent.center.point_2d) - - track_token_in_path: List[str] = occupancy_map.intersects(agent_path_buffer) - - leading_agent: Optional[BoxDetection] = None - leading_agent_distance_on_path: float = float("inf") - for track_token in track_token_in_path: - if track_token == past_agent.metadata.track_token: - continue - - other_agent = box_detections.get_detection_by_track_token(track_token) - if other_agent is None: - continue - - other_agent_distance_on_path = agent_path.project(other_agent.center.point_2d) - if other_agent_distance_on_path < agent_distance_on_path: - continue - - if other_agent_distance_on_path < leading_agent_distance_on_path: - leading_agent = other_agent - leading_agent_distance_on_path = other_agent_distance_on_path - - if leading_agent is not None: - distance_to_lead_agent = past_agent.shapely_polygon.distance(leading_agent.shapely_polygon) - lead_agent_velocity = float(leading_agent.velocity.vector_2d.magnitude) - else: - distance_to_lead_agent = float( - np.clip(agent_path.length - agent_distance_on_path, a_min=0.0, a_max=None) - ) - lead_agent_velocity = 0.0 - - # propagate the agent using IDM - self._idm_config.target_velocity = self._agent_initial_vel[past_agent.metadata.track_token] + 0.01 - x_dot, v_agent_dot = _propagate_idm( - agent_velocity, lead_agent_velocity, distance_to_lead_agent, self._idm_config - ) - - v_agent_dot = min(max(-self._idm_config.decel_max, v_agent_dot), self._idm_config.accel_max) - propagate_distance = agent_distance_on_path + x_dot * self._timestep_s - propagated_center = agent_path.interpolate(propagate_distance) - propagated_bounding_box = BoundingBoxSE2( - propagated_center, - past_agent.bounding_box_se2.length, - past_agent.bounding_box_se2.width, - ) - new_velocity = Vector2D(agent_velocity + v_agent_dot * self._timestep_s, 0.0) - propagated_agent: BoxDetectionSE2 = BoxDetectionSE2( - metadata=past_agent.metadata, - bounding_box_se2=propagated_bounding_box, - velocity=new_velocity, - ) - current_target_agents.append(propagated_agent) - - self._past_target_agents = current_target_agents - return current_target_agents - - -def _propagate_idm( - agent_velocity: float, lead_velocity: float, agent_lead_distance: float, idm_config: IDMConfig -) -> Tuple[float, float]: - - # convenience definitions - s_star = ( - idm_config.min_gap_to_lead_agent - + agent_velocity * idm_config.headway_time - + (agent_velocity * (agent_velocity - lead_velocity)) - / (2 * np.sqrt(idm_config.accel_max * idm_config.decel_max)) - ) - s_alpha = max(agent_lead_distance, idm_config.min_gap_to_lead_agent) # clamp to avoid zero division - - # differential equations - x_dot = agent_velocity - try: - v_agent_dot = idm_config.accel_max * ( - 1 - - (agent_velocity / idm_config.target_velocity) ** idm_config.acceleration_exponent - - (s_star / s_alpha) ** 2 - ) - except: # noqa: E722 - print("input", agent_velocity, lead_velocity, agent_lead_distance) - print("s_star", s_star) - print("s_alpha", s_alpha) - print("x_dot", x_dot) - v_agent_dot = 0.0 - return [x_dot, v_agent_dot] diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py deleted file mode 100644 index e6357a61..00000000 --- a/d123/simulation/agents/path_following.py +++ /dev/null @@ -1,91 +0,0 @@ -import copy -from abc import abstractmethod -from typing import Dict, List, Optional - -from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.conversion.maps.abstract_map import AbstractMap -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.geometry.bounding_box import BoundingBoxSE2 -from d123.geometry.point import Point2D -from d123.geometry.polyline import PolylineSE2 -from d123.geometry.se import StateSE2 -from d123.geometry.transform.tranform_2d import translate_along_yaw -from d123.simulation.agents.abstract_agents import AbstractAgents - - -class PathFollowingAgents(AbstractAgents): - - # Whether the agent class requires the scenario object to be passed at construction time. - # This can be set to true only for oracle planners and cannot be used for submissions. - requires_scene: bool = True - - def __init__(self) -> None: - """ - Initialize the constant velocity agents. - """ - super().__init__() - self._timestep_s: float = 0.1 - self._current_iteration: int = 0 - self._map_api: AbstractMap = None - - self._initial_target_agents: List[BoxDetection] = [] - self._agent_paths: Dict[str, PolylineSE2] = {} - self._extend_path_length: float = 0.1 - - @abstractmethod - def reset( - self, - map_api: AbstractMap, - target_agents: List[BoxDetection], - non_target_agents: List[BoxDetection], - scene: Optional[AbstractScene] = None, - ) -> List[BoxDetection]: - assert scene is not None - - self._map_api = map_api - self._current_iteration = 0 - self._initial_target_agents = [copy.deepcopy(agent) for agent in target_agents] - - future_box_detections = [ - scene.get_box_detections_at_iteration(iteration) for iteration in range(0, scene.number_of_iterations) - ] - - # TODO: refactor or move for general use - for agent in self._initial_target_agents: - future_trajectory: List[StateSE2] = [] - for box_detections in future_box_detections: - agent_at_iteration = box_detections.get_detection_by_track_token(agent.metadata.track_token) - if agent_at_iteration is None: - break - - future_trajectory.append(agent_at_iteration.center.state_se2) - - future_trajectory.append(translate_along_yaw(future_trajectory[-1], Point2D(self._extend_path_length, 0.0))) - - self._agent_paths[agent.metadata.track_token] = PolylineSE2.from_discrete_se2(future_trajectory) - - return self._initial_target_agents - - def step(self, non_target_agents: List[BoxDetection]): - self._current_iteration += 1 - - time_delta_s = self._timestep_s * self._current_iteration - current_target_agents = [] - for initial_agent in self._initial_target_agents: - speed: float = float(initial_agent.velocity.vector_2d.magnitude) - - propagate_distance = speed * time_delta_s - propagated_center = self._agent_paths[initial_agent.metadata.track_token].interpolate(propagate_distance) - propagated_bounding_box = BoundingBoxSE2( - propagated_center, - initial_agent.bounding_box_se3.length, - initial_agent.bounding_box_se3.width, - ) - propagated_agent: BoxDetectionSE2 = BoxDetectionSE2( - metadata=initial_agent.metadata, - bounding_box_se2=propagated_bounding_box, - velocity=initial_agent.velocity, - ) - current_target_agents.append(propagated_agent) - - return current_target_agents diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py deleted file mode 100644 index d160c2e7..00000000 --- a/d123/simulation/agents/smart_agents.py +++ /dev/null @@ -1,146 +0,0 @@ -from abc import abstractmethod -from pathlib import Path -from typing import List, Optional - -import torch -from torch_geometric.data import HeteroData - -from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2 -from d123.conversion.maps.abstract_map import AbstractMap -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper, DetectionType -from d123.geometry.bounding_box import BoundingBoxSE2 -from d123.geometry.se import StateSE2 -from d123.geometry.transform.transform_se2 import convert_relative_to_absolute_point_2d_array -from d123.geometry.utils.rotation_utils import normalize_angle -from d123.simulation.agents.abstract_agents import AbstractAgents -from d123.training.feature_builder.smart_feature_builder import SMARTFeatureBuilder -from d123.training.models.sim_agent.smart.datamodules.target_builder import _numpy_dict_to_torch -from d123.training.models.sim_agent.smart.smart import SMART -from d123.training.models.sim_agent.smart.smart_config import SMARTConfig - - -class SMARTAgents(AbstractAgents): - - # Whether the agent class requires the scenario object to be passed at construction time. - # This can be set to true only for oracle planners and cannot be used for submissions. - requires_scene: bool = True - - def __init__(self) -> None: - """ - Initialize the constant velocity agents. - """ - super().__init__() - self._timestep_s: float = 0.1 - self._current_iteration: int = 0 - self._map_api: AbstractMap = None - - checkpoint_path = Path( - "/home/daniel/d123_workspace/exp/smart_mini_run/2025.06.23.20.45.20/checkpoints/epoch_050.ckpt" - ) - # checkpoint_path = Path("/home/daniel/epoch_050.ckpt") - # checkpoint_path = Path("/home/daniel/epoch_027.ckpt") - # checkpoint_path = Path("/home/daniel/epoch_008.ckpt") - config = SMARTConfig( - hidden_dim=64, - num_freq_bands=64, - num_heads=4, - head_dim=8, - dropout=0.1, - hist_drop_prob=0.1, - num_map_layers=2, - num_agent_layers=4, - pl2pl_radius=10, - pl2a_radius=20, - a2a_radius=20, - time_span=20, - num_historical_steps=11, - num_future_steps=90, - ) - - self._device = "cuda:0" if torch.cuda.is_available() else "cpu" - self._smart_model = SMART.load_from_checkpoint( - checkpoint_path, config=config, strict=False, map_location=self._device - ) - self._smart_model.eval() - self._smart_model.to(self._device) - - self._smart_model.encoder.agent_encoder.num_future_steps = 150 - self._smart_model.validation_rollout_sampling.num_k = 1 - - self._initial_box_detections: Optional[BoxDetectionWrapper] = None - self._agent_indices: List[int] = [] - - @abstractmethod - def reset( - self, - map_api: AbstractMap, - target_agents: List[BoxDetection], - non_target_agents: List[BoxDetection], - scene: Optional[AbstractScene] = None, - ) -> List[BoxDetection]: - assert scene is not None - self._current_iteration = 0 - - feature_builder = SMARTFeatureBuilder() - features = feature_builder.build_features(scene) - self._agent_indices = features["agent"]["id"].tolist() - _numpy_dict_to_torch(features, device="cpu") - torch_features = HeteroData(features) - from torch_geometric.loader import DataLoader - - # If you have a dataset - dataset = [torch_features] # List with single sample - loader = DataLoader(dataset, batch_size=1, shuffle=False) - with torch.no_grad(): - for batch in loader: - batch.to(self._device) - pred_traj, pred_z, pred_head = self._smart_model.test_step(batch, 0) - break - - origin = scene.get_ego_state_at_iteration(0).bounding_box.center.state_se2 - - self._pred_traj = convert_relative_to_absolute_point_2d_array(origin, pred_traj.cpu().numpy()) - self._pred_z = pred_z.cpu().numpy() - self._pred_head = normalize_angle(pred_head.cpu().numpy() + origin.yaw) - - self._initial_box_detections = scene.get_box_detections_at_iteration(0) - - # self._initial_target_agents = [copy.deepcopy(agent) for agent in target_agents] - return target_agents - - def step(self, non_target_agents: List[BoxDetection]): - - # (16, 10, 80, 2) - pred_traj = self._pred_traj[:, 0] - pred_head = self._pred_head[:, 0] - - current_target_agents = [] - for agent_idx, agent_id in enumerate(self._agent_indices): - if agent_id == -1: - continue - - initial_agent = self._initial_box_detections[agent_id] - if initial_agent.metadata.detection_type != DetectionType.VEHICLE: - continue - - new_center = StateSE2( - x=pred_traj[agent_idx, self._current_iteration, 0], - y=pred_traj[agent_idx, self._current_iteration, 1], - yaw=pred_head[agent_idx, self._current_iteration], - ) - propagated_bounding_box = BoundingBoxSE2( - new_center, - initial_agent.bounding_box_se2.length, - initial_agent.bounding_box_se2.width, - ) - new_velocity = initial_agent.velocity - propagated_agent: BoxDetectionSE2 = BoxDetectionSE2( - metadata=initial_agent.metadata, - bounding_box_se2=propagated_bounding_box, - velocity=new_velocity, - ) - current_target_agents.append(propagated_agent) - - self._current_iteration += 1 - return current_target_agents diff --git a/d123/simulation/callback/abstract_callback.py b/d123/simulation/callback/abstract_callback.py deleted file mode 100644 index bd22c678..00000000 --- a/d123/simulation/callback/abstract_callback.py +++ /dev/null @@ -1,79 +0,0 @@ -from abc import ABC, abstractmethod - -from d123.simulation.history.simulation_history import Simulation2DHistory, Simulation2DHistorySample -from d123.simulation.planning.abstract_planner import AbstractPlanner -from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput -from d123.simulation.simulation_2d_setup import Simulation2DSetup - - -class AbstractCallback(ABC): - """ - Base class for simulation callbacks. - """ - - @abstractmethod - def on_initialization_start(self, setup: Simulation2DSetup, planner: AbstractPlanner) -> None: - """ - Called when initialization of simulation starts. - :param setup: simulation setup - :param planner: planner before initialization - """ - - @abstractmethod - def on_initialization_end(self, setup: Simulation2DSetup, planner: AbstractPlanner) -> None: - """ - Called when initialization of simulation ends. - :param setup: simulation setup - :param planner: planner after initialization - """ - - @abstractmethod - def on_step_start(self, setup: Simulation2DSetup, planner: AbstractPlanner) -> None: - """ - Called when simulation step starts. - :param setup: simulation setup - :param planner: planner at start of a step - """ - - @abstractmethod - def on_step_end( - self, setup: Simulation2DSetup, planner: AbstractPlanner, sample: Simulation2DHistorySample - ) -> None: - """ - Called when simulation step ends. - :param setup: simulation setup - :param planner: planner at end of a step - :param sample: result of a step - """ - - @abstractmethod - def on_planner_start(self, setup: Simulation2DSetup, planner: AbstractPlanner) -> None: - """ - Called when planner starts to compute trajectory. - :param setup: simulation setup - :param planner: planner before planner.compute_trajectory() is called - """ - - @abstractmethod - def on_planner_end( - self, setup: Simulation2DSetup, planner: AbstractPlanner, planner_output: AbstractPlannerOutput - ) -> None: - pass - - @abstractmethod - def on_simulation_start(self, setup: Simulation2DSetup) -> None: - """ - Called when simulation starts. - :param setup: simulation setup - """ - - @abstractmethod - def on_simulation_end( - self, setup: Simulation2DSetup, planner: AbstractPlanner, history: Simulation2DHistory - ) -> None: - """ - Called when simulation ends. - :param setup: simulation setup - :param planner: planner when simulation ends - :param history: resulting from simulation - """ diff --git a/d123/simulation/callback/multi_callback.py b/d123/simulation/callback/multi_callback.py deleted file mode 100644 index f1dd1d58..00000000 --- a/d123/simulation/callback/multi_callback.py +++ /dev/null @@ -1,74 +0,0 @@ -from typing import List - -from d123.simulation.callback.abstract_callback import AbstractCallback -from d123.simulation.history.simulation_history import Simulation2DHistory, Simulation2DHistorySample -from d123.simulation.planning.abstract_planner import AbstractPlanner -from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput -from d123.simulation.simulation_2d_setup import Simulation2DSetup - - -class MultiCallback(AbstractCallback): - """ - This class simply calls many callbacks for simplified code. - """ - - def __init__(self, callbacks: List[AbstractCallback]): - """ - Initialize with multiple callbacks. - :param callbacks: all callbacks that will be called sequentially. - """ - self._callbacks = callbacks - - @property - def callbacks(self) -> List[AbstractCallback]: - """ - Property to access callbacks. - :return: list of callbacks this MultiCallback runs. - """ - return self._callbacks - - def on_initialization_start(self, setup: Simulation2DSetup, planner: AbstractPlanner) -> None: - """Inherited, see superclass.""" - for callback in self._callbacks: - callback.on_initialization_start(setup, planner) - - def on_initialization_end(self, setup: Simulation2DSetup, planner: AbstractPlanner) -> None: - """Inherited, see superclass.""" - for callback in self._callbacks: - callback.on_initialization_end(setup, planner) - - def on_step_start(self, setup: Simulation2DSetup, planner: AbstractPlanner) -> None: - """Inherited, see superclass.""" - for callback in self._callbacks: - callback.on_step_start(setup, planner) - - def on_step_end( - self, setup: Simulation2DSetup, planner: AbstractPlanner, sample: Simulation2DHistorySample - ) -> None: - """Inherited, see superclass.""" - for callback in self._callbacks: - callback.on_step_end(setup, planner, sample) - - def on_planner_start(self, setup: Simulation2DSetup, planner: AbstractPlanner) -> None: - """Inherited, see superclass.""" - for callback in self._callbacks: - callback.on_planner_start(setup, planner) - - def on_planner_end( - self, setup: Simulation2DSetup, planner: AbstractPlanner, planner_output: AbstractPlannerOutput - ) -> None: - """Inherited, see superclass.""" - for callback in self._callbacks: - callback.on_planner_end(setup, planner, planner_output) - - def on_simulation_start(self, setup: Simulation2DSetup) -> None: - """Inherited, see superclass.""" - for callback in self._callbacks: - callback.on_simulation_start(setup) - - def on_simulation_end( - self, setup: Simulation2DSetup, planner: AbstractPlanner, history: Simulation2DHistory - ) -> None: - """Inherited, see superclass.""" - for callback in self._callbacks: - callback.on_simulation_end(setup, planner, history) diff --git a/d123/simulation/controller/__init__.py b/d123/simulation/controller/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/controller/abstract_controller.py b/d123/simulation/controller/abstract_controller.py deleted file mode 100644 index db0315e6..00000000 --- a/d123/simulation/controller/abstract_controller.py +++ /dev/null @@ -1,42 +0,0 @@ -import abc - -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput -from d123.simulation.time_controller.simulation_iteration import SimulationIteration - - -class AbstractEgoController(abc.ABC): - """ - Interface for generic ego controllers. - """ - - @abc.abstractmethod - def get_state(self) -> EgoStateSE2: - """ - Returns the current ego state. - :return: The current ego state. - """ - - @abc.abstractmethod - def reset(self, scene: AbstractScene) -> EgoStateSE2: - """ - Reset the observation (all internal states should be reseted, if any). - """ - - @abc.abstractmethod - def step( - self, - current_iteration: SimulationIteration, - next_iteration: SimulationIteration, - ego_state: EgoStateSE2, - planner_output: AbstractPlannerOutput, - ) -> EgoStateSE2: - """ - Update the ego state based on the planner output and the current state. - :param current_iteration: The current simulation iteration. - :param next_iteration: The next simulation iteration after propagation. - :param ego_state: The current ego state. - :param planner_output: The output of a planner, e.g. action or trajectory. - :return: The updated ego state. - """ diff --git a/d123/simulation/controller/action_controller.py b/d123/simulation/controller/action_controller.py deleted file mode 100644 index a598358c..00000000 --- a/d123/simulation/controller/action_controller.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Optional - -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.simulation.controller.abstract_controller import AbstractEgoController -from d123.simulation.controller.motion_model.abstract_motion_model import AbstractMotionModel -from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput -from d123.simulation.planning.planner_output.action_planner_output import ActionPlannerOutput -from d123.simulation.time_controller.simulation_iteration import SimulationIteration - - -class ActionController(AbstractEgoController): - - def __init__(self, motion_model: AbstractMotionModel): - - self._motion_model = motion_model - - # lazy loaded - self._scene: Optional[AbstractScene] = None - self._current_state: Optional[EgoStateSE2] = None - - def get_state(self) -> EgoStateSE2: - """Inherited, see superclass.""" - if self._current_state is None: - self._current_state = self._scene.get_ego_state_at_iteration(0).ego_state_se2 - return self._current_state - - def reset(self, scene: AbstractScene) -> EgoStateSE2: - """Inherited, see superclass.""" - self._current_state = None - self._scene = scene - return self.get_state() - - def step( - self, - current_iteration: SimulationIteration, - next_iteration: SimulationIteration, - ego_state: EgoStateSE2, - planner_output: AbstractPlannerOutput, - ) -> EgoStateSE2: - """Inherited, see superclass.""" - - assert isinstance(planner_output, ActionPlannerOutput) - action: ActionPlannerOutput = planner_output - - # Compute the dynamic state to propagate the model - dynamic_state = action.dynamic_state_se2 - - # Propagate ego state using the motion model - self._current_state = self._motion_model.step( - ego_state=ego_state, - ideal_dynamic_state=dynamic_state, - next_timepoint=next_iteration.time_point, - ) - return self._current_state diff --git a/d123/simulation/controller/motion_model/__init__.py b/d123/simulation/controller/motion_model/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/controller/motion_model/abstract_motion_model.py b/d123/simulation/controller/motion_model/abstract_motion_model.py deleted file mode 100644 index 22862c5a..00000000 --- a/d123/simulation/controller/motion_model/abstract_motion_model.py +++ /dev/null @@ -1,25 +0,0 @@ -import abc - -from d123.common.datatypes.time.time_point import TimePoint -from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2 - - -class AbstractMotionModel(abc.ABC): - """ - Interface for generic ego motion model. - """ - - @abc.abstractmethod - def step( - self, - ego_state: EgoStateSE2, - ideal_dynamic_state: DynamicStateSE2, - next_timepoint: TimePoint, - ) -> EgoStateSE2: - """ - Propagate the ego state using the ideal dynamic state and next timepoint. - :param ego_state: The current ego state. - :param ideal_dynamic_state: The ideal dynamic state to propagate. - :param next_timepoint: The next timepoint for propagation. - :return: The updated ego state after propagation. - """ diff --git a/d123/simulation/controller/motion_model/kinematic_bicycle_model.py b/d123/simulation/controller/motion_model/kinematic_bicycle_model.py deleted file mode 100644 index 6ef5893c..00000000 --- a/d123/simulation/controller/motion_model/kinematic_bicycle_model.py +++ /dev/null @@ -1,166 +0,0 @@ -import numpy as np -from nuplan.common.geometry.compute import principal_value - -from d123.common.datatypes.time.time_point import TimeDuration, TimePoint -from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2 -from d123.geometry.se import StateSE2 -from d123.geometry.vector import Vector2D -from d123.simulation.controller.motion_model.abstract_motion_model import AbstractMotionModel - - -def forward_integrate(init: float, delta: float, sampling_duration: TimeDuration) -> float: - return float(init + delta * sampling_duration.time_s) - - -class KinematicBicycleModel(AbstractMotionModel): - - def __init__( - self, - max_steering_angle: float = np.pi / 3, - acceleration_time_constant: float = 0.2, - steering_angle_time_constant: float = 0.05, - acceleration_low_pass_filter: bool = True, - steering_angle_low_pass_filter: bool = True, - ): - self._max_steering_angle = max_steering_angle - self._acceleration_time_constant = acceleration_time_constant - self._steering_angle_time_constant = steering_angle_time_constant - self._acceleration_low_pass_filter = acceleration_low_pass_filter - self._steering_angle_low_pass_filter = steering_angle_low_pass_filter - - def get_state_dot(self, state: EgoStateSE2) -> EgoStateSE2: - - long_speed = state.dynamic_state_se2.velocity.x - wheel_base = state.vehicle_parameters.wheel_base - x_dot = long_speed * np.cos(state.rear_axle.yaw) - y_dot = long_speed * np.sin(state.rear_axle.yaw) - yaw_dot = long_speed * np.tan(state.tire_steering_angle) / wheel_base - - return EgoStateSE2.from_rear_axle( - rear_axle_se2=StateSE2(x=x_dot, y=y_dot, yaw=yaw_dot), - dynamic_state_se2=DynamicStateSE2( - velocity=state.dynamic_state_se2.acceleration, - acceleration=Vector2D(0.0, 0.0), - angular_velocity=0.0, - tire_steering_rate=0.0, - ), - vehicle_parameters=state.vehicle_parameters, - time_point=state.timepoint, - tire_steering_angle=state.dynamic_state_se2.tire_steering_rate, - ) - - def _update_commands( - self, - ego_state: EgoStateSE2, - ideal_dynamic_state: DynamicStateSE2, - step_duration: TimeDuration, - ) -> EgoStateSE2: - - dt_control = step_duration.time_s - long_acceleration = ego_state.dynamic_state_se2.acceleration.x - tire_steering_angle = ego_state.tire_steering_angle - - ideal_long_acceleration = ideal_dynamic_state.acceleration.x - ideal_steering_angle = dt_control * ideal_dynamic_state.tire_steering_rate + tire_steering_angle - - if self._acceleration_low_pass_filter: - updated_long_acceleration = ( - dt_control - / (dt_control + self._acceleration_time_constant) - * (ideal_long_acceleration - long_acceleration) - + long_acceleration - ) - else: - updated_long_acceleration = ideal_long_acceleration - - if self._steering_angle_low_pass_filter: - updated_steering_angle = ( - dt_control - / (dt_control + self._steering_angle_time_constant) - * (ideal_steering_angle - tire_steering_angle) - + tire_steering_angle - ) - else: - updated_steering_angle = ideal_steering_angle - - updated_steering_rate = (updated_steering_angle - tire_steering_angle) / dt_control - dynamic_state = DynamicStateSE2( - velocity=ego_state.dynamic_state_se2.velocity, - acceleration=Vector2D(updated_long_acceleration, 0.0), - angular_velocity=0.0, - tire_steering_rate=updated_steering_rate, - angular_acceleration=0.0, - ) - propagating_state = EgoStateSE2( - center_se2=ego_state.center_se2, - dynamic_state_se2=dynamic_state, - vehicle_parameters=ego_state.vehicle_parameters, - timepoint=ego_state.timepoint, - tire_steering_angle=ego_state.tire_steering_angle, - ) - return propagating_state - - def step( - self, - ego_state: EgoStateSE2, - ideal_dynamic_state: DynamicStateSE2, - next_timepoint: TimePoint, - ) -> EgoStateSE2: - - vehicle_parameters = ego_state.vehicle_parameters - - # step_duration = ego_state.timepoint.diff(sampling_time) - step_duration = next_timepoint.diff(ego_state.timepoint) - propagating_state = self._update_commands(ego_state, ideal_dynamic_state, step_duration) - - # Compute state derivatives - state_dot = self.get_state_dot(propagating_state) - - # Integrate position and heading - next_x = forward_integrate(propagating_state.rear_axle.x, state_dot.rear_axle.x, step_duration) - next_y = forward_integrate(propagating_state.rear_axle.y, state_dot.rear_axle.y, step_duration) - next_yaw = forward_integrate(propagating_state.rear_axle.yaw, state_dot.rear_axle.yaw, step_duration) - # Wrap angle between [-pi, pi] - next_yaw = principal_value(next_yaw) - - # Compute rear axle velocity in car frame - next_point_velocity_x = forward_integrate( - propagating_state.dynamic_state_se2.velocity.x, - state_dot.dynamic_state_se2.velocity.x, - step_duration, - ) - next_point_velocity_y = 0.0 # Lateral velocity is always zero in kinematic bicycle model - - # Integrate steering angle and clip to bounds - next_point_tire_steering_angle = np.clip( - forward_integrate(propagating_state.tire_steering_angle, state_dot.tire_steering_angle, step_duration), - -self._max_steering_angle, - self._max_steering_angle, - ) - - # Compute angular velocity - next_point_angular_velocity = ( - next_point_velocity_x * np.tan(next_point_tire_steering_angle) / vehicle_parameters.wheel_base - ) - - rear_axle_accel = [ - state_dot.dynamic_state_se2.velocity.x, - state_dot.dynamic_state_se2.velocity.y, - ] - angular_accel = ( - next_point_angular_velocity - ego_state.dynamic_state_se2.angular_velocity - ) / step_duration.time_s - - return EgoStateSE2.from_rear_axle( - rear_axle_se2=StateSE2(next_x, next_y, next_yaw), - dynamic_state_se2=DynamicStateSE2( - velocity=Vector2D(next_point_velocity_x, next_point_velocity_y), - acceleration=Vector2D(rear_axle_accel[0], rear_axle_accel[1]), - tire_steering_rate=state_dot.tire_steering_angle, - angular_velocity=next_point_angular_velocity, - angular_acceleration=angular_accel, - ), - vehicle_parameters=vehicle_parameters, - time_point=next_timepoint, - tire_steering_angle=float(next_point_tire_steering_angle), - ) diff --git a/d123/simulation/controller/tracker/__init__.py b/d123/simulation/controller/tracker/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/gym/__init__.py b/d123/simulation/gym/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/gym/demo_gym_env.py b/d123/simulation/gym/demo_gym_env.py deleted file mode 100644 index 6497b825..00000000 --- a/d123/simulation/gym/demo_gym_env.py +++ /dev/null @@ -1,122 +0,0 @@ -from typing import List, Optional, Tuple - -import numpy as np -import numpy.typing as npt -from nuplan.common.actor_state.dynamic_car_state import DynamicCarState -from nuplan.common.actor_state.ego_state import EgoState -from nuplan.common.actor_state.state_representation import StateSE2, StateVector2D, TimePoint -from nuplan.common.geometry.compute import get_pacifica_parameters -from nuplan.planning.simulation.controller.motion_model.kinematic_bicycle import KinematicBicycleModel - -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.conversion.maps.abstract_map import AbstractMap -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.arrow.utils.arrow_getters import EgoStateSE3 -from d123.simulation.observation.abstract_observation import AbstractObservation -from d123.simulation.observation.agents_observation import AgentsObservation - -# from d123.simulation.observation.log_replay_observation import LogReplayObservation - - -class DemoGymEnv: - """ - A simple demo environment for testing purposes. - This class is a placeholder and does not implement any specific functionality. - """ - - def __init__(self, scenes: List[AbstractScene]) -> None: - - self._scenes = scenes - self._current_iteration = 0 - self._current_scene: Optional[AbstractScene] = None - self._current_ego_vehicle_state: Optional[EgoState] = None - - self._observation: AbstractObservation = AgentsObservation(None) - # self._observation: AbstractObservation = LogReplayObservation() - self._observation.initialize() - - self._ego_replay: bool = True - - def reset(self, scene: Optional[AbstractScene]) -> Tuple[AbstractMap, EgoState, DetectionRecording]: - """ - Reset the environment to the initial state. - Returns a tuple containing the map, ego vehicle state, and detection observation. - """ - if scene is not None: - self._current_scene = scene - else: - self._current_scene = np.random.choice(self._scenes) - - self._current_scene_index = 0 - self._current_ego_vehicle_state = to_nuplan_ego_vehicle_state( - self._current_scene.get_ego_vehicle_state_at_iteration(self._current_scene_index) - ) - # detection_observation = DetectionRecording( - # box_detections=self._current_scene.get_box_detections_at_iteration(self._current_scene_index), - # traffic_light_detections=self._current_scene.get_traffic_light_detections_at_iteration( - # self._current_scene_index - # ), - # ) - detection_observation = self._observation.reset(self._current_scene) - - return ( - self._current_scene.get_map_api(), - self._current_ego_vehicle_state, - detection_observation, - self._current_scene, - ) - - def step(self, action: npt.NDArray[np.float64]) -> Tuple[EgoState, DetectionRecording, bool]: - self._current_scene_index += 1 - if self._ego_replay: - ego_vehicle_state = self._current_scene.get_ego_vehicle_state_at_iteration(self._current_scene_index) - self._current_ego_vehicle_state = to_nuplan_ego_vehicle_state(ego_vehicle_state) - else: - dynamic_car_state = get_dynamic_car_state(ego_state=self._current_ego_vehicle_state, action=action) - self._current_ego_vehicle_state = KinematicBicycleModel(get_pacifica_parameters()).propagate_state( - self._current_ego_vehicle_state, dynamic_car_state, TimePoint(int(0.1 * int(1e6))) - ) - - detection_observation = self._observation.step() - is_done = self._current_scene_index == self._current_scene.number_of_iterations - 1 - - return self._current_ego_vehicle_state, detection_observation, is_done - - -def to_nuplan_ego_vehicle_state(ego_vehicle_state: EgoStateSE3) -> EgoState: - """ - Convert a custom EgoVehicleState to a NuPlan EgoVehicleState. - This is a placeholder function and should be implemented based on the actual structure of EgoVehicleState. - """ - - # Assuming EgoVehicleState has attributes like position, velocity, heading, etc. - return EgoState.build_from_rear_axle( - rear_axle_pose=StateSE2( - ego_vehicle_state.bounding_box.center.x, - ego_vehicle_state.bounding_box.center.y, - ego_vehicle_state.bounding_box.center.yaw, - ), - rear_axle_velocity_2d=StateVector2D( - ego_vehicle_state.dynamic_state.velocity.x, ego_vehicle_state.dynamic_state.velocity.y - ), - rear_axle_acceleration_2d=StateVector2D( - ego_vehicle_state.dynamic_state.acceleration.x, ego_vehicle_state.dynamic_state.acceleration.y - ), - tire_steering_angle=0.0, - time_point=TimePoint(0), - vehicle_parameters=get_pacifica_parameters(), - is_in_auto_mode=True, - angular_vel=ego_vehicle_state.dynamic_state.angular_velocity.z, - angular_accel=0.0, - tire_steering_rate=0.0, - ) - - -def get_dynamic_car_state(ego_state: EgoState, action: npt.NDArray[np.float64]) -> DynamicCarState: - acceleration, steering_rate = action[0], action[1] - return DynamicCarState.build_from_rear_axle( - rear_axle_to_center_dist=ego_state.car_footprint.rear_axle_to_center_dist, - rear_axle_velocity_2d=ego_state.dynamic_car_state.rear_axle_velocity_2d, - rear_axle_acceleration_2d=StateVector2D(acceleration, 0), - tire_steering_rate=steering_rate, - ) diff --git a/d123/simulation/gym/environment/environment_wrapper.py b/d123/simulation/gym/environment/environment_wrapper.py deleted file mode 100644 index fe3cd5b9..00000000 --- a/d123/simulation/gym/environment/environment_wrapper.py +++ /dev/null @@ -1,191 +0,0 @@ -import logging -import sys -import traceback -from threading import Timer -from typing import Any, Dict, Optional - -import gymnasium as gym -import numpy as np -import numpy.typing as npt - -from d123.simulation.gym.environment.gym_observation.abstract_gym_observation import ( - AbstractGymObservation, -) -from d123.simulation.gym.environment.output_converter.abstract_output_converter import ( - AbstractOutputConverter, -) -from d123.simulation.gym.environment.reward_builder.abstract_reward_builder import AbstractRewardBuilder -from d123.simulation.gym.environment.scenario_sampler.abstract_scenario_sampler import AbstractScenarioSampler -from d123.simulation.gym.environment.simulation_builder.abstract_simulation_builder import ( - AbstractSimulationBuilder, -) -from d123.simulation.gym.environment.simulation_wrapper import SimulationWrapper - -logger = logging.getLogger(__name__) - - -class EnvironmentWrapper(gym.Env): - """ - Gymnasium environment class interface. Wraps the simulation, trajectory builder, observation builder, and reward builder. - """ - - metadata = {"render_modes": ["rgb_array"]} # TODO: Figure out the purpose of this metadata. - - def __init__( - self, - scenario_sampler: AbstractScenarioSampler, - simulation_builder: AbstractSimulationBuilder, - output_converter: AbstractOutputConverter, - observation_builder: AbstractGymObservation, - reward_builder: AbstractRewardBuilder, - terminate_on_failure: bool = False, - ): - """ - Initializes the EnvironmentWrapper. - :param scenario_sampler: Scenario sampler to sample scenarios for the environment. - :param simulation_builder: Simulation builder to create the simulation from the sampled scenario. - :param trajectory_builder: Trajectory builder to create trajectories based on actions. - :param observation_builder: Observation builder to create observations from the simulation state. - :param reward_builder: Reward builder to create rewards based on the simulation state and actions. - :param terminate_on_failure: Whether to terminate during an error of the simulation, defaults to False - """ - - self._scenario_sampler = scenario_sampler - self._simulation_builder = simulation_builder - self._trajectory_builder = output_converter - self._observation_builder = observation_builder - self._reward_builder = reward_builder - - # lazy loaded - self._simulation_wrapper: Optional[SimulationWrapper] = None - - # timer - # TODO: Consider removing the timers. - self._reset_timer = Timer(end_key="reset_total") - self._step_timer = Timer(end_key="step_total") - - self._terminate_on_error = terminate_on_failure - - # Set for super class - self.observation_space = observation_builder.get_observation_space() - self.action_space = output_converter.get_action_space() - - def reset(self, seed: Optional[int] = None, options: Optional[dict] = None): - """Inherited, see superclass.""" - super().reset(seed=seed) - info: Dict[str, Any] = {} - - try: - - self._reward_builder.reset() - self._observation_builder.reset() - - self._reset_timer.flush() - self._reset_timer.start() - - scenario = self._scenario_sampler.sample(seed) - self._reset_timer.log("reset_1_sample_scenario") - - simulation = self._simulation_builder.build_simulation(scenario) - self._reset_timer.log("reset_2_build_simulation") - - self._simulation_wrapper = SimulationWrapper(simulation) - self._reset_timer.log("reset_3_wrap_simulation") - - ( - planner_input, - planner_initialization, - ) = self._simulation_wrapper.initialize() - self._reset_timer.log("reset_4_init_wrapper") - - observation = self._observation_builder.get_gym_observation(planner_input, planner_initialization, info) - self._reset_timer.log("reset_5_build_observation") - self._reset_timer.end() - - info["timing"] = self._reset_timer.info() - - except Exception: - logger.warning(f"{type(self).__name__} failed during .reset() with the following exception:") - traceback.print_exc() - - if self._terminate_on_error: - sys.exit(1) - else: - observation = create_zero_like_observation(self.observation_space) - - return observation, info - - def step(self, action): - """Inherited, see superclass.""" - info: Dict[str, Any] = {} - - try: - assert self._simulation_wrapper is not None - - self._step_timer.flush() - self._step_timer.start() - - trajectory = self._trajectory_builder.build_trajectory( - action, self._simulation_wrapper.current_ego_state, info - ) - self._step_timer.log("step_1_build_trajectory") - - planner_input, is_simulation_running = self._simulation_wrapper.step(trajectory) - self._step_timer.log("step_2_simulation_step") - - reward, termination, truncation = self._reward_builder.build_reward(self._simulation_wrapper, info) - termination = termination or not is_simulation_running - self._step_timer.log("step_3_build_reward") - - observation = self._observation_builder.get_gym_observation( - planner_input, self._simulation_wrapper.planner_initialization, info - ) - - self._step_timer.log("step_4_build_observation") - self._step_timer.end() - - info["timing"] = self._step_timer.info() - - except Exception: - logger.warning(f"{type(self).__name__} failed during .step() with the following exception:") - traceback.print_exc() - - if self._terminate_on_error: - sys.exit(1) # Exit with error code 1 - else: - observation = create_zero_like_observation(self.observation_space) - reward = 0.0 - termination = truncation = True - - return observation, reward, termination, truncation, info - - def close(self): - """Inherited, see superclass.""" - # TODO: Figure out the purpose of this function. :D - logger.info("EnvironmentWrapper close!") - - -def create_zero_like_observation( - observation_space: gym.spaces, -) -> Dict[str, npt.NDArray]: - """ - TODO: Consider moving elsewhere. - Creates a zero-like gymnasium observation given the observation space. - :param observation_space: Gymnasium observation space. - :raises TypeError: Invalid observation space. - :return: zero-like gymnasium observation - """ - if isinstance(observation_space, gym.spaces.Discrete): - return 0 - elif isinstance(observation_space, gym.spaces.Box): - return np.zeros(observation_space.shape, dtype=observation_space.dtype) - elif isinstance(observation_space, gym.spaces.MultiBinary): - return np.zeros(observation_space.shape, dtype=np.int8) - elif isinstance(observation_space, gym.spaces.MultiDiscrete): - return np.zeros(observation_space.shape, dtype=np.int64) - elif isinstance(observation_space, gym.spaces.Dict): - return {key: create_zero_like_observation(subspace) for key, subspace in observation_space.spaces.items()} - elif isinstance(observation_space, gym.spaces.Tuple): - return tuple(create_zero_like_observation(subspace) for subspace in observation_space.spaces) - else: - raise TypeError(f"Unsupported space type: {type(observation_space)}") diff --git a/d123/simulation/gym/environment/gym_observation/__init__.py b/d123/simulation/gym/environment/gym_observation/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/gym/environment/gym_observation/abstract_gym_observation.py b/d123/simulation/gym/environment/gym_observation/abstract_gym_observation.py deleted file mode 100644 index 819e5052..00000000 --- a/d123/simulation/gym/environment/gym_observation/abstract_gym_observation.py +++ /dev/null @@ -1,36 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, Dict - -from gymnasium import spaces - -from d123.simulation.planning.abstract_planner import PlannerInitialization, PlannerInput - - -class AbstractGymObservation(ABC): - """Abstract class for building observations in a gym environment.""" - - @abstractmethod - def reset(self) -> None: - """Reset the observation builder.""" - - @abstractmethod - def get_observation_space(self) -> spaces.Space: - """ - Get the observation space of the environment. - :return: Observation space as a gymnasium Space. - """ - - @abstractmethod - def get_gym_observation( - self, - planner_input: PlannerInput, - planner_initialization: PlannerInitialization, - info: Dict[str, Any], - ) -> Dict[str, Any]: - """ - Build an observation from the planner input and initialization. - :param planner_input: Planner input as defined in the d123 interface. - :param planner_initialization: Planner initialization as defined in the d123 interface. - :param info: Arbitrary information dictionary, for passing information between modules. - :return: Observation as a named dictionary. - """ diff --git a/d123/simulation/gym/environment/gym_observation/raster/raster_gym_observation.py b/d123/simulation/gym/environment/gym_observation/raster/raster_gym_observation.py deleted file mode 100644 index 51072e1f..00000000 --- a/d123/simulation/gym/environment/gym_observation/raster/raster_gym_observation.py +++ /dev/null @@ -1,276 +0,0 @@ -from __future__ import annotations - -import math -from enum import IntEnum -from typing import Any, Dict, List, Optional - -import numpy as np -import numpy.typing as npt -from gymnasium import spaces - -from d123.simulation.gym.environment.gym_observation.abstract_gym_observation import AbstractGymObservation -from d123.simulation.gym.environment.gym_observation.raster.raster_renderer import RasterRenderer -from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea -from d123.simulation.gym.environment.helper.environment_cache import ( - BoxDetectionCache, - MapCache, - build_environment_caches, -) -from d123.simulation.planning.abstract_planner import PlannerInitialization, PlannerInput - - -def del_keys_in_dict(info: Dict[str, Any], keys: List[str]) -> None: - """ - Deletes specified keys from the info dictionary if they exist. - :param info: Dictionary from which keys will be deleted. - :param keys: List of keys to delete from the dictionary. - """ - for key in keys: - if key in info.keys(): - del info[key] - - -class RasterObservationType(IntEnum): - """Enum to represent behavior at different stages in the RasterGymObservation.""" - - INFERENCE = 0 - RESET = 1 - STEP = 2 - - -class RasterGymObservation(AbstractGymObservation): - """Default raster observation builder for the CaRL model.""" - - def __init__( - self, - environment_area: AbstractEnvironmentArea, - renderer: RasterRenderer, - obs_num_measurements: int = 10, - num_value_measurements: int = 4, - action_space_dim: int = 2, - inference: bool = False, - ) -> None: - """ - Initializes the RasterGymObservation. - :param environment_area: Environment area to be used for rendering. - :param renderer: Renderer class, see implementation of DefaultRenderer. - :param obs_num_measurements: number of observation measurements passed to the policy, defaults to 10 - :param num_value_measurements: number of value measurements passed to the value network, defaults to 4 - :param action_space_dim: dimension of action space (steering and acceleration), defaults to 2 - :param inference: Whether the observation builder is used during inference, defaults to False - """ - - self._environment_area = environment_area - self._renderer = renderer - - self._obs_num_measurements = obs_num_measurements - self._num_value_measurements = num_value_measurements - self._action_space_dim = action_space_dim - - self._inference = inference - - # lazy loaded during inference - # NOTE: route roadblocks of current scenario are stored, as they may require correction during inference - self._route_lane_group_ids: Optional[List[str]] = None - - def reset(self) -> None: - """Inherited, see superclass.""" - self._route_lane_group_ids = None - - def get_observation_space(self) -> spaces.Space: - """Inherited, see superclass.""" - return spaces.Dict( - { - "bev_semantics": spaces.Box( - 0, - 255, - shape=self._renderer.shape, - dtype=np.uint8, - ), - "measurements": spaces.Box( - -math.inf, - math.inf, - shape=(self._obs_num_measurements,), - dtype=np.float32, - ), - "value_measurements": spaces.Box( - -math.inf, - math.inf, - shape=(self._num_value_measurements,), - dtype=np.float32, - ), - } - ) - - def get_gym_observation( - self, - planner_input: PlannerInput, - planner_initialization: PlannerInitialization, - info: Dict[str, Any], - ) -> Dict[str, Any]: - """Inherited, see superclass.""" - - if self._inference: - observation_type = RasterObservationType.INFERENCE - elif planner_input.iteration.index == 0: - observation_type = RasterObservationType.RESET - else: - observation_type = RasterObservationType.STEP - - observation = {} - observation["bev_semantics"] = self._get_bev_semantics( - planner_input, - planner_initialization, - observation_type, - info, - ) - observation["measurements"] = self._get_build_measurements(planner_input, observation_type, info) - observation["value_measurements"] = self._get_value_measurements(observation_type, info) - - return observation - - def _get_bev_semantics( - self, - planner_input: PlannerInput, - planner_initialization: PlannerInitialization, - observation_type: RasterObservationType, - info: Dict[str, Any], - ) -> npt.NDArray[np.uint8]: - """ - Helper function to build BEV raster of the current environment step. - :param planner_input: Planner input interface of d123. - :param planner_initialization: Planner initialization interface of d123. - :param observation_type: Enum whether to render for inference, reset or step. - :param info: Arbitrary information dictionary, for passing information between modules. - :raises ValueError: If the DefaultObservationType is invalid. - :return: BEV raster as a numpy array. - """ - # FIXME: - # if observation_type == RasterObservationType.INFERENCE: - - if observation_type in [RasterObservationType.INFERENCE, RasterObservationType.RESET]: - map_cache, detection_cache = build_environment_caches( - planner_input, planner_initialization, self._environment_area - ) - elif observation_type == RasterObservationType.STEP: - assert "map_cache" in info.keys() - assert "detection_cache" in info.keys() - assert isinstance(info["map_cache"], MapCache) - assert isinstance(info["detection_cache"], BoxDetectionCache) - map_cache, detection_cache = info["map_cache"], info["detection_cache"] - else: - raise ValueError("RasterObservationType is invalid") - - del_keys_in_dict(info, ["map_cache", "detection_cache"]) - return self._renderer.render(map_cache, detection_cache) - - def _get_build_measurements( - self, - planner_input: PlannerInput, - observation_type: RasterObservationType, - info: Dict[str, Any], - ) -> npt.NDArray[np.float32]: - """ - Helper function to build measurements of the current environment step. - :param planner_input: Planner input interface of d123. - :param observation_type: Enum whether to render for inference, reset or step. - :param info: Arbitrary information dictionary, for passing information between modules. - :return: Ego measurements as a numpy array. - """ - - if "last_action" in info.keys(): - assert observation_type in [ - RasterObservationType.INFERENCE, - RasterObservationType.STEP, - ] - assert len(info["last_action"]) == self._action_space_dim - last_action = info["last_action"] - else: - assert observation_type in [ - RasterObservationType.INFERENCE, - RasterObservationType.RESET, - ] - last_action = np.zeros(self._action_space_dim, dtype=np.float32) - - ego_state = planner_input.history.current_state[0] - last_acceleration, last_steering_rate = last_action[0], last_action[1] - - # FIXME: rear axle to center conversion of kinematic states - # state_array = ego_state_to_center_state_array(ego_state) - # observation_measurements = np.array( - # [ - # last_acceleration, - # last_steering_rate, - # state_array[StateIndex.VELOCITY_X], - # state_array[StateIndex.VELOCITY_Y], - # state_array[StateIndex.ACCELERATION_X], - # state_array[StateIndex.ACCELERATION_Y], - # state_array[StateIndex.STEERING_ANGLE], - # state_array[StateIndex.STEERING_RATE], - # state_array[StateIndex.ANGULAR_VELOCITY], - # state_array[StateIndex.ANGULAR_ACCELERATION], - # ], - # dtype=np.float32, - # ) - observation_measurements = np.array( - [ - last_acceleration, - last_steering_rate, - ego_state.dynamic_state_se2.velocity.x, - ego_state.dynamic_state_se2.velocity.y, - ego_state.dynamic_state_se2.acceleration.x, - ego_state.dynamic_state_se2.acceleration.y, - ego_state.tire_steering_angle, - ego_state.dynamic_state_se2.tire_steering_rate, - ego_state.dynamic_state_se2.angular_velocity, - ego_state.dynamic_state_se2.angular_acceleration, - ], - dtype=np.float32, - ) - del_keys_in_dict(info, ["last_action"]) - return observation_measurements - - def _get_value_measurements( - self, - observation_type: RasterObservationType, - info: Dict[str, Any], - ) -> npt.NDArray[np.float32]: - """ - Helper function to build value measurements of the current environment step. - :param observation_type: Enum whether to render for inference, reset or step. - :param info: Arbitrary information dictionary, for passing information between modules. - :raises ValueError: If the DefaultObservationType is invalid. - :return: Value measurements as a numpy array. - """ - - if observation_type in [ - RasterObservationType.INFERENCE, - RasterObservationType.RESET, - ]: - remaining_time = 1.0 - remaining_progress = 1.0 - comfort_score = 1.0 - ttc_score = 1.0 - elif observation_type == RasterObservationType.STEP: - assert "remaining_time" in info.keys() - assert "remaining_progress" in info.keys() - assert "comfort_score" in info.keys() - assert "ttc_score" in info.keys() - remaining_time = info["remaining_time"] - remaining_progress = info["remaining_progress"] - comfort_score = info["comfort_score"] - ttc_score = info["ttc_score"] - else: - raise ValueError("DefaultObservationType is invalid") - - value_measurements = np.array( - [ - remaining_time, - remaining_progress, - comfort_score, - ttc_score, - ], - dtype=np.float32, - ) - del_keys_in_dict(info, ["remaining_time", "remaining_progress", "comfort_score", "ttc_score"]) - return value_measurements diff --git a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py deleted file mode 100644 index 52da337c..00000000 --- a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py +++ /dev/null @@ -1,505 +0,0 @@ -from __future__ import annotations - -import itertools -from enum import IntEnum -from functools import cached_property -from typing import Callable, Dict, Final, List, Optional, Tuple - -import cv2 -import numpy as np -import numpy.typing as npt -from shapely import LineString, Polygon, union_all -from shapely.affinity import scale as shapely_scale - -from d123.common.datatypes.detection.detection import BoxDetectionSE2, TrafficLightStatus -from d123.geometry.se import StateSE2 -from d123.geometry.transform.tranform_2d import translate_along_yaw -from d123.geometry.transform.transform_se2 import convert_absolute_to_relative_point_2d_array -from d123.geometry.vector import Vector2D -from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea, RectangleEnvironmentArea -from d123.simulation.gym.environment.helper.environment_cache import BoxDetectionCache, MapCache - -# TODO: add to config -MIN_VALUE: Final[int] = 0 # Lowest value for a pixel in the raster -MAX_VALUE: Final[int] = 255 # Highest value for a pixel in the raster -LINE_THICKNESS: int = 1 # Width of the lines in pixels -TRAFFIC_LIGHT_VALUE: Dict[TrafficLightStatus, int] = { - TrafficLightStatus.GREEN: 80, - TrafficLightStatus.YELLOW: 170, - TrafficLightStatus.RED: 255, -} -UNIONIZE: Final[bool] = False # Whether to unionize polygons before rendering - - -class PolygonRenderType(IntEnum): - CONVEX_SINGLE = 0 - NON_CONVEX_SINGLE = 1 - NON_CONVEX_BATCH = 2 - - -RenderFunc = Callable[[npt.NDArray[np.uint8], List[npt.NDArray[np.int32]], int], None] - - -def _render_polygon_convex_single( - raster: npt.NDArray[np.uint8], pixel_exteriors: List[npt.NDArray[np.int32]], color: int -) -> None: - """ - Renders a list of convex polygons on the raster. - :param raster: uint8 numpy array representing the raster to render on. - :param pixel_exteriors: List of pixel exteriors of the polygons to render. - :param color: Color to render the polygons in, as an integer value. - """ - for pixel_exterior in pixel_exteriors: - cv2.fillConvexPoly(raster, pixel_exterior, color=color) - - -def _render_polygon_non_convex_single( - raster: npt.NDArray[np.uint8], pixel_exteriors: List[npt.NDArray[np.int32]], color: int -) -> None: - """ - Renders a list of non-convex polygons on the raster. - :param raster: uint8 numpy array representing the raster to render on. - :param pixel_exteriors: List of pixel exteriors of the polygons to render. - :param color: Color to render the polygons in, as an integer value. - """ - for pixel_exterior in pixel_exteriors: - cv2.fillPoly(raster, [pixel_exterior], color=color) - - -def _render_polygon_non_convex_batch( - raster: npt.NDArray[np.uint8], pixel_exteriors: List[npt.NDArray[np.int32]], color: int -) -> None: - """ - Renders a list of non-convex polygons on the raster batch-wise. - :param raster: uint8 numpy array representing the raster to render on. - :param pixel_exteriors: List of pixel exteriors of the polygons to render. - :param color: Color to render the polygons in, as an integer value. - """ - cv2.fillPoly(raster, pixel_exteriors, color=color) - - -POLYGON_RENDER_FUNCTIONS: Dict[PolygonRenderType, RenderFunc] = { - PolygonRenderType.CONVEX_SINGLE: _render_polygon_convex_single, - PolygonRenderType.NON_CONVEX_SINGLE: _render_polygon_non_convex_single, - PolygonRenderType.NON_CONVEX_BATCH: _render_polygon_non_convex_batch, -} - - -def unionize_polygons(polygons: List[Polygon], grid_size: Optional[float] = None) -> List[Polygon]: - """ - Unionizes a list of polygons into a single polygon or multiple polygons if they are disjoint. - :param polygons: List of polygons to unionize. - :param grid_size: Precision grid size for union call, defaults to None - :return: List of polygon(s) after unionization. - """ - unionized_polygons: List[Polygon] = [] - if len(polygons) == 1: - unionized_polygons.append(polygons[0]) - elif len(polygons) > 1: - union_polygon = union_all(polygons, grid_size=grid_size) - if union_polygon.geom_type == "Polygon": - unionized_polygons.append(union_polygon) - elif union_polygon.geom_type == "MultiPolygon": - for polygon in union_polygon.geoms: - unionized_polygons.append(polygon) - return unionized_polygons - - -class RasterRenderer: - """Renderer class for observation used in CaRL.""" - - def __init__( - self, - environment_area: AbstractEnvironmentArea, - pixel_per_meter: float = 2.0, - max_vehicle_speed: float = 30.0, - max_pedestrian_speed: float = 4.0, - vehicle_scaling: float = 1.0, - pedestrian_scaling: float = 1.0, - static_scaling: float = 1.0, - include_speed_line: bool = False, - ) -> None: - """ - Initializes the DefaultRenderer object. - :param environment_area: Area to render the observation in (should be rectangular). - :param pixel_per_meter: number of pixels that should represent a meter in raster, defaults to 2.0 - :param max_vehicle_speed: Max vehicle speed after clipping for rendering the color, defaults to 30.0 - :param max_pedestrian_speed: Max pedestrian speed after clipping for rendering the color, defaults to 4.0 - :param vehicle_scaling: Factor to scale size of vehicle bounding boxes, defaults to 1.0 - :param pedestrian_scaling: Factor to scale size of pedestrian bounding boxes, defaults to 1.0 - :param static_scaling: Factor to scale size of static object bounding boxes, defaults to 1.0 - :param include_speed_line: Whether to include the constant velocity speed line into the raster, defaults to False - """ - - assert isinstance( - environment_area, RectangleEnvironmentArea - ), "DefaultRendering requires a rectangular environment area!" - - self._environment_area = environment_area - self._pixel_per_meter = pixel_per_meter # [ppm] - - self._max_vehicle_speed = max_vehicle_speed # [m/s] - self._max_pedestrian_speed = max_pedestrian_speed # [m/s] - - self._vehicle_scaling = vehicle_scaling - self._pedestrian_scaling = pedestrian_scaling - self._static_scaling = static_scaling - - self._include_speed_line = include_speed_line - - # maybe remove: - self._polygon_render_type = PolygonRenderType.NON_CONVEX_SINGLE - - @cached_property - def pixel_frame(self) -> Tuple[int, int]: - """ - :return: Width and height of the pixel frame in pixels. - """ - width, height = self._environment_area.frame - return int(width * self._pixel_per_meter), int(height * self._pixel_per_meter) - - @property - def shape(self) -> Tuple[int, int]: - """ - :return: Shape of the raster (including channel, width, height). - """ - width, height = self.pixel_frame - return (9, width, height) - - @property - def _meter_per_pixel(self) -> float: - """ - :return: Meters per pixel, i.e., the inverse of pixel_per_meter. - """ - return 1 / self._pixel_per_meter - - def _scale_to_color(self, value: Optional[float], max_value: float) -> int: - """ - Scales a value to a color in the range [0, 255]. - :param value: Value to scale, if None, max_value is used instead. - :param max_value: Maximum value to scale to color. - :return: Scaled color value in the range [0, 255]. - """ - _value = value - if value is None: - _value = max_value - normed = np.clip(_value / max_value, 0.0, 1.0) - normed_color = np.clip(int((MAX_VALUE / 2) * normed + (MAX_VALUE / 2)), MIN_VALUE, MAX_VALUE) - return int(normed_color) - - def _scale_polygon(self, polygon: Polygon, factor: float) -> Polygon: - """ - Scales a polygon in size by a factor. - :param polygon: shapely polygon to scale. - :param factor: Scaling factor, e.g., 1.0 for no scaling, 0.5 for half size. - :return: Scaled polygon. - """ - if factor != 1.0: - polygon = shapely_scale(polygon, xfact=factor, yfact=factor, origin="centroid") - return polygon - - def _local_coords_to_pixel(self, coords: npt.NDArray[np.float32]) -> npt.NDArray[np.int32]: - """ - Converts local coordinates to pixel coordinates. - :param coords: Local coordinates to convert, shape (N, 2). - :return: Integer pixel coordinates, shape (N, 2). - """ - pixel_width, pixel_height = self.pixel_frame - pixel_center = np.array([[pixel_height, pixel_width]]) / 2.0 - pixel_coords = (coords * self._pixel_per_meter) + pixel_center - return pixel_coords.astype(np.int32) - - def _global_coords_to_pixel(self, origin: StateSE2, coords: npt.NDArray[np.float64]) -> npt.NDArray[np.int32]: - """ - Converts global coordinates to pixel coordinates. - :param origin: SE2 of origin, i.e. the center of the raster in global coordinates. - :param coords: Global coordinates to convert, shape (N, 2). - :return: Integer pixel coordinates, shape (N, 2). - """ - local_coords = convert_absolute_to_relative_point_2d_array(origin, coords) - return self._local_coords_to_pixel(local_coords) - - def _global_polygon_to_pixel(self, origin: StateSE2, polygon: Polygon) -> npt.NDArray[np.int32]: - """ - Converts a global polygon to pixel coordinates. - :param origin: SE2 of origin, i.e. the center of the raster in global coordinates. - :param polygon: Shapely polygon to convert. - :return: Integer pixel coordinates of the polygon exterior, shape (N, 1, 2). - """ - exterior = np.array(polygon.exterior.coords).reshape((-1, 1, 2)) - return self._global_coords_to_pixel(origin, exterior) - - def _global_linestring_to_pixel(self, origin: StateSE2, linestring: LineString) -> npt.NDArray[np.int32]: - """ - Converts a global linestring to pixel coordinates. - :param origin: SE2 of origin, i.e. the center of the raster in global coordinates. - :param linestring: Shapely linestring to convert. - :return: Integer pixel coordinates of the linestring, shape (N, 1, 2). - """ - coords = np.array(linestring.coords).reshape((-1, 1, 2)) - return self._global_coords_to_pixel(origin, coords) - - def _render_polygons( - self, - raster: npt.NDArray[np.uint8], - origin: StateSE2, - polygons: List[Polygon], - color: int = MAX_VALUE, - ) -> None: - """ - Renders a list of arbitrary polygons on the raster. - :param raster: uint8 numpy array representing the raster to render on. - :param origin: SE2 of origin, i.e. the center of the raster in global coordinates. - :param polygons: List of shapely polygons to render. - :param color: Integer value of color, defaults to MAX_VALUE - """ - if len(polygons) > 0: - pixel_exteriors: List[npt.NDArray[np.int32]] = [] - if UNIONIZE: - polygons = unionize_polygons(polygons, grid_size=None) - - for polygon in polygons: - pixel_exteriors.append(self._global_polygon_to_pixel(origin, polygon)) - POLYGON_RENDER_FUNCTIONS[PolygonRenderType.NON_CONVEX_SINGLE](raster, pixel_exteriors, color) - - def _render_convex_polygons( - self, - raster: npt.NDArray[np.uint8], - origin: StateSE2, - polygons: List[Polygon], - color: int = MAX_VALUE, - ) -> None: - """ - Renders a list of convex polygons on the raster. - :param raster: uint8 numpy array representing the raster to render on. - :param origin: SE2 of origin, i.e. the center of the raster in global coordinates. - :param polygons: List of shapely polygons to render. - :param color: Integer value of color, defaults to MAX_VALUE - """ - if len(polygons) > 0: - pixel_exteriors: List[npt.NDArray[np.int32]] = [] - for polygon in polygons: - pixel_exteriors.append(self._global_polygon_to_pixel(origin, polygon)) - POLYGON_RENDER_FUNCTIONS[PolygonRenderType.CONVEX_SINGLE](raster, pixel_exteriors, color) - - def _render_linestrings( - self, - raster: npt.NDArray[np.uint8], - origin: StateSE2, - linestrings: List[LineString], - color: int = MAX_VALUE, - ) -> None: - """ - Renders a list of linestrings on the raster. - :param raster: uint8 numpy array representing the raster to render on. - :param origin: SE2 of origin, i.e. the center of the raster in global coordinates. - :param linestrings: List of shapely linestrings to render. - :param color: Integer value of color, defaults to MAX_VALUE - """ - if len(linestrings) > 0: - pixel_linestrings: List[npt.NDArray[np.int32]] = [] - for linestring in linestrings: - pixel_linestrings.append(self._global_linestring_to_pixel(origin, linestring)) - cv2.polylines( - raster, - pixel_linestrings, - isClosed=False, - color=color, - thickness=LINE_THICKNESS, - ) - - def _render_speed_line( - self, raster: npt.NDArray[np.uint8], origin: StateSE2, box_detection: BoxDetectionSE2, color: int - ) -> None: - """ - Renders a speed line for the agent on the raster. - :param raster: uint8 numpy array representing the raster to render on. - :param origin: SE2 of origin, i.e. the center of the raster in global coordinates. - :param agent: Agent object containing the state and velocity. - :param color: Integer value of color - """ - if box_detection.velocity.magnitude > self._meter_per_pixel: - future = translate_along_yaw( - pose=box_detection.center, - translation=Vector2D( - x=box_detection.bounding_box_se2.half_length + box_detection.velocity.magnitude, - y=0.0, - ), - ) - linestring = LineString( - [ - [box_detection.center.x, box_detection.center.y], - [future.x, future.y], - ] - ) - self._render_linestrings(raster, origin, [linestring], color=color) - - def _get_empty_raster(self) -> npt.NDArray[np.uint8]: - """ - Helper function to create an empty raster with the shape of the pixel frame. - :return: Empty raster with the shape of the pixel frame. - """ - pixel_width, pixel_height = self.pixel_frame - return np.zeros((pixel_width, pixel_height), dtype=np.uint8) - - def _render_map_from_cache(self, map_cache: MapCache) -> List[npt.NDArray[np.uint8]]: - """ - Renders the map from the map cache into a list of rasters. - :param map_cache: MapCache object containing the map data. - :return: List of rasters representing the map data. - """ - - # 1. Drivable Area (Lane group, Intersection, Car-Park), Polygon - # 2. Route (Lane group), Polygon - # 3. Lane Boundaries (Lane), Polyline - # 6. Traffic Light (Lane), Polygon - # 7. Stop-Signs (Stop-Signs), Polygon FIXME - # 8. Speed-Signs (Lane), Polygon FIXME - drivable_area_raster = self._get_empty_raster() - route_raster = self._get_empty_raster() - lane_boundary_raster = self._get_empty_raster() - traffic_light_raster = self._get_empty_raster() - stop_sign_raster = self._get_empty_raster() - speed_raster = self._get_empty_raster() - - mask = self._get_empty_raster() - drivable_area_polygons: List[Polygon] = [] - stop_sign_polygons: List[Polygon] = [] - lane_boundary_linestrings: List[LineString] = [] - - for lane_group_id, lane_group in map_cache.lane_groups.items(): - # Roadblock: (1) drivable_area_raster, (2) route_raster - not_intersection = lane_group.intersection is None - on_route = lane_group_id in map_cache.route_lane_group_ids - - if not_intersection or on_route: - self._render_polygons(mask, map_cache.origin, [lane_group.shapely_polygon], color=MAX_VALUE) - - if not_intersection: - # Lane group without intersection: (1) drivable_area_raster - drivable_area_raster[mask == MAX_VALUE] = MAX_VALUE - - if on_route: - route_raster[mask == MAX_VALUE] = MAX_VALUE - mask.fill(0) - - for lane_id, lane in map_cache.lanes.items(): - # Lane: (3) lane_boundary_raster, (4) traffic_light_raster, (6) speed_raster - # - (3) lane_boundary_raster - if lane.lane_group.intersection is None: - lane_boundary_linestrings.extend( - [lane.left_boundary.polyline_2d.linestring, lane.right_boundary.polyline_2d.linestring] - ) - - # (4) traffic_light_raster - self._render_linestrings(mask, map_cache.origin, [lane.centerline.polyline_2d.linestring], color=MAX_VALUE) - if lane_id in map_cache.traffic_lights.keys(): - traffic_light_status = map_cache.traffic_lights[lane_id] - traffic_light_raster[mask == MAX_VALUE] = TRAFFIC_LIGHT_VALUE[traffic_light_status] - - # (6) speed_raster - speed_raster[mask == MAX_VALUE] = self._scale_to_color(lane.speed_limit_mps, self._max_vehicle_speed) - mask.fill(0) - - for drivable_area_element in itertools.chain(map_cache.intersections.values(), map_cache.car_parks.values()): - # Intersections & Carparks: (1) drivable_area_raster - drivable_area_polygons.append(drivable_area_element.shapely_polygon) - - for stop_sign in map_cache.stop_lines.values(): - # Stop Signs: (1) stop_sign_raster - stop_sign_polygons.append(stop_sign.shapely_polygon) - - self._render_polygons(drivable_area_raster, map_cache.origin, drivable_area_polygons, color=MAX_VALUE) - self._render_polygons(stop_sign_raster, map_cache.origin, stop_sign_polygons, color=MAX_VALUE) - self._render_linestrings(lane_boundary_raster, map_cache.origin, lane_boundary_linestrings, color=MAX_VALUE) - - return [ - drivable_area_raster, - route_raster, - lane_boundary_raster, - traffic_light_raster, - stop_sign_raster, - speed_raster, - ] - - def _render_detections_from_cache(self, box_detection_cache: BoxDetectionCache) -> List[npt.NDArray[np.uint8]]: - """ - Renders the detections from the detection cache into a list of rasters. - :param detection_cache: DetectionCache object containing the detection data. - :return: List of rasters representing the detection data. - """ - - mask = self._get_empty_raster() - - # 1. Vehicles (Vehicles, Bicycles), Polygon, LineString - # 2. Pedestrians+Static (Pedestrians, Static objects), Polygon - vehicles_raster = self._get_empty_raster() - pedestrians_raster = self._get_empty_raster() - ego_raster = self._get_empty_raster() - - # 1. Vehicles - for vehicle in box_detection_cache.vehicles: - if self._include_speed_line: - self._render_speed_line(mask, box_detection_cache.origin, vehicle, color=MAX_VALUE) - - polygon: Polygon = self._scale_polygon(vehicle.bounding_box_se2.shapely_polygon, self._vehicle_scaling) - self._render_convex_polygons(mask, box_detection_cache.origin, [polygon], color=MAX_VALUE) - vehicles_raster[mask > 0] = self._scale_to_color( - vehicle.velocity.magnitude, - self._max_vehicle_speed, - ) - mask.fill(0) - - # 2. Pedestrian - for pedestrian in box_detection_cache.pedestrians: - if self._include_speed_line: - self._render_speed_line(mask, box_detection_cache.origin, pedestrian, color=MAX_VALUE) - - polygon: Polygon = self._scale_polygon( - pedestrian.bounding_box_se2.shapely_polygon, self._pedestrian_scaling - ) - self._render_convex_polygons(mask, box_detection_cache.origin, [polygon], color=MAX_VALUE) - pedestrians_raster[mask > 0] = self._scale_to_color( - pedestrian.velocity.magnitude, - self._max_pedestrian_speed, - ) - mask.fill(0) - - # 3. Static Objects - static_polygons: List[Polygon] = [] - for static_object in box_detection_cache.static_objects: - polygon: Polygon = self._scale_polygon(static_object.bounding_box_se2.shapely_polygon, self._static_scaling) - static_polygons.append(polygon) - self._render_convex_polygons( - pedestrians_raster, - box_detection_cache.origin, - static_polygons, - color=self._scale_to_color(0.0, self._max_vehicle_speed), - ) - - # 4. Ego Vehicle - ego_detection = box_detection_cache.ego_state.box_detection_se2 - if self._include_speed_line: - self._render_speed_line(mask, box_detection_cache.origin, ego_detection, color=MAX_VALUE) - - ego_polygon: Polygon = self._scale_polygon(ego_detection.shapely_polygon, self._vehicle_scaling) - self._render_convex_polygons(mask, box_detection_cache.origin, [ego_polygon], color=MAX_VALUE) - ego_raster[mask > 0] = self._scale_to_color(ego_detection.velocity.magnitude, self._max_vehicle_speed) - mask.fill(0) - - return [vehicles_raster, pedestrians_raster, ego_raster] - - def render(self, map_cache: MapCache, detection_cache: BoxDetectionCache) -> npt.NDArray[np.uint8]: - """ - Renders the map and detections from the caches into a single raster. - :param map_cache: MapCache object containing the map data. - :param detection_cache: DetectionCache object containing the detection data. - :return: Raster representing the map and detections. - """ - map_raster = self._render_map_from_cache(map_cache) - detection_raster = self._render_detections_from_cache(detection_cache) - - raster: npt.NDArray[np.uint8] = np.concatenate( - [channel[None, ...] for channel in (map_raster + detection_raster)], axis=0 - ) - return raster diff --git a/d123/simulation/gym/environment/helper/environment_area.py b/d123/simulation/gym/environment/helper/environment_area.py deleted file mode 100644 index 90f5a0c9..00000000 --- a/d123/simulation/gym/environment/helper/environment_area.py +++ /dev/null @@ -1,78 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Tuple - -from shapely import Polygon - -from d123.geometry.se import StateSE2 -from d123.geometry.transform.tranform_2d import translate_along_yaw -from d123.geometry.vector import Vector2D - - -class AbstractEnvironmentArea(ABC): - """ - Abstract class for defining an environment area in a Gym simulation. - The area defines the area used for the observation, reward, and other simulation components. - """ - - @abstractmethod - def get_global_origin(self, ego_pose: StateSE2) -> StateSE2: - """ - Given the global ego pose, returns the global origin of the environment area. - :param ego_pose: Global ego pose in the environment. - :return: Global origin of the environment area. - """ - - @abstractmethod - def get_global_polygon(self, ego_pose: StateSE2) -> Polygon: - """ - Given the global ego pose, returns the environment area as 2D polygon. - :param ego_pose: Global ego pose in the environment. - :return: 2D polygon representing the environment area. - """ - - -class RectangleEnvironmentArea(AbstractEnvironmentArea): - def __init__( - self, - front: float = 78.0, - back: float = 50.0, - left: float = 64.0, - right: float = 64.0, - ) -> None: - """ - Initializes a rectangular environment area. - :param front: extent of area in the front of the ego vehicle [m], defaults to 78.0 - :param back: extent of area in back of the ego vehicle [m], defaults to 50.0 - :param left: extent of area to the left of the ego vehicle [m], defaults to 64.0 - :param right: extent of area to the right of the ego vehicle [m], defaults to 64.0 - """ - self._front = front - self._back = back - self._left = left - self._right = right - - @property - def frame(self) -> Tuple[float, float]: - """ - Returns the dimensions of the rectangle as a tuple (width, height). - :return: Tuple of width and height of the rectangle. - """ - return (self._left + self._right), (self._front + self._back) - - def get_global_origin(self, ego_pose: StateSE2) -> StateSE2: - """Inherited, see superclass.""" - width, height = self.frame - longitudinal_offset = (height / 2.0) - self._back - lateral_offset = (width / 2.0) - self._right - return translate_along_yaw(ego_pose, Vector2D(longitudinal_offset, lateral_offset)) - - def get_global_polygon(self, ego_pose: StateSE2) -> Polygon: - """Inherited, see superclass.""" - return Polygon( - [ - tuple(translate_along_yaw(ego_pose, Vector2D(self._front, self._left)).point_2d.array), # front left - tuple(translate_along_yaw(ego_pose, Vector2D(self._front, -self._right)).point_2d.array), # front right - tuple(translate_along_yaw(ego_pose, Vector2D(-self._back, -self._right)).point_2d.array), # rear right - tuple(translate_along_yaw(ego_pose, Vector2D(-self._back, self._left)).point_2d.array), # rear left - ] - ) diff --git a/d123/simulation/gym/environment/helper/environment_cache.py b/d123/simulation/gym/environment/helper/environment_cache.py deleted file mode 100644 index 29f2d4c0..00000000 --- a/d123/simulation/gym/environment/helper/environment_cache.py +++ /dev/null @@ -1,229 +0,0 @@ -from __future__ import annotations - -from functools import cached_property -from typing import Dict, List, Optional, Tuple - -from shapely import Polygon - -from d123.common.datatypes.detection.detection import ( - BoxDetectionSE2, - BoxDetectionWrapper, - TrafficLightDetectionWrapper, - TrafficLightStatus, -) -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.conversion.maps.abstract_map import AbstractMap -from d123.conversion.maps.abstract_map_objects import ( - AbstractCarpark, - AbstractCrosswalk, - AbstractIntersection, - AbstractLane, - AbstractLaneGroup, - AbstractStopLine, -) -from d123.conversion.maps.map_datatypes import MapLayer -from d123.geometry.occupancy_map import OccupancyMap2D -from d123.geometry.se import StateSE2 -from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea -from d123.simulation.planning.abstract_planner import PlannerInitialization, PlannerInput - - -class MapCache: - """ - Helper class to save and load map-related data for the current environment area. - NOTE: This class helps to avoid Map API calls during observation and reward computation. - """ - - def __init__( - self, - ego_state: EgoStateSE2, - map_api: AbstractMap, - environment_area: AbstractEnvironmentArea, - traffic_lights: TrafficLightDetectionWrapper, - route_lane_group_ids: List[str], - load_crosswalks: bool = False, - load_stop_lines: bool = False, - ) -> None: - """ - Initializes the MapCache object. - :param ego_state: Current ego state in the environment. - :param map_api: Map interface of nuPlan maps. - :param environment_area: Area to cache map data for. - :param traffic_light_status: Current traffic light status data. - :param route_lane_group_ids: List of lane group ids for the ego route. - :param load_crosswalks: whether to load crosswalks, defaults to False - :param load_stop_lines: whether to load stop lines, defaults to False - """ - - self.ego_state = ego_state - self.map_api = map_api - self.environment_area = environment_area - self.load_crosswalks = load_crosswalks - self.load_stop_lines = load_stop_lines - - self.route_lane_group_ids = route_lane_group_ids - self.traffic_lights: Dict[str, TrafficLightStatus] = {str(data.lane_id): data.status for data in traffic_lights} - - self.lane_groups: Dict[str, AbstractLaneGroup] = {} - self.lanes: Dict[str, AbstractLane] = {} - - self.intersections: Dict[str, AbstractIntersection] = {} - self.stop_lines: Dict[str, AbstractStopLine] = {} - self.car_parks: Dict[str, AbstractCarpark] = {} - self.crosswalks: Dict[str, AbstractCrosswalk] = {} - self._load_cache() - - def _load_cache(self) -> None: - - query_map_layers = [MapLayer.LANE_GROUP, MapLayer.CARPARK] - # FIXME: Add stop lines and crosswalks to the map layers if needed - # if self.load_crosswalks: - # query_map_layers.append(MapLayer.CROSSWALK) - # if self.load_stop_lines: - # query_map_layers.append(MapLayer.STOP_LINE) - - map_object_dict = self.map_api.query( - geometry=self.environment_area.get_global_polygon(self.ego_state.center), - layers=query_map_layers, - predicate="intersects", - ) - - # 1. load (1.1) lane groups, (1.2) lanes, (1.3) intersections - for lane_group in map_object_dict[MapLayer.LANE_GROUP]: - lane_group: AbstractLaneGroup - self.lane_groups[lane_group.id] = lane_group - for lane in lane_group.lanes: - self.lanes[lane.id] = lane - optional_intersection = lane_group.intersection - if optional_intersection is not None: - self.intersections[optional_intersection.id] = optional_intersection - - # 2. load car parks - for car_park in map_object_dict[MapLayer.CARPARK]: - car_park: AbstractCarpark - self.car_parks[car_park.id] = car_park - - # FIXME: Add stop lines and crosswalks to the map layers if needed - # if self.load_crosswalks: - # for crosswalk in map_object_dict[MapLayer.CROSSWALK]: - # crosswalk: AbstractCarpark - # self.crosswalks[crosswalk.id] = crosswalk - - # if self.load_stop_lines: - # for stop_line in map_object_dict[MapLayer.STOP_LINE]: - # stop_line: AbstractStopLine - # self.stop_lines[stop_line.id] = stop_line - - @property - def drivable_area_map(self) -> OccupancyMap2D: - - tokens: List[str] = [] - polygons: List[Polygon] = [] - - # FIXME: Remove lane groups on intersections - for element_dict in [self.intersections, self.lane_groups, self.car_parks]: - for token, element in element_dict.items(): - tokens.append(token) - polygons.append(element.polygon) - return OccupancyMap2D(polygons, tokens) - - @cached_property - def origin(self) -> StateSE2: - """ - Returns the global origin of the environment area based on the ego state. - :return: Global origin of the environment area as StateSE2. - """ - return self.environment_area.get_global_origin(self.ego_state.center) - - -class BoxDetectionCache: - """Helper class to save and load detection-related data for the current environment area.""" - - def __init__( - self, - ego_state: EgoStateSE2, - box_detections: BoxDetectionWrapper, - environment_area: AbstractEnvironmentArea, - ) -> None: - """ - Initializes the BoxDetectionCache object. - :param ego_state: Ego vehicle state in the environment. - :param tracked_objects: Tracked objects wrapper of nuPlan. - :param environment_area: Area to cache detection data for. - """ - - self.ego_state = ego_state - self.environment_area = environment_area - self.tracked_objects = box_detections - - self.vehicles: List[BoxDetectionSE2] = [] - self.pedestrians: List[BoxDetectionSE2] = [] - self.static_objects: List[BoxDetectionSE2] = [] - self._load_cache(box_detections) - - def _load_cache(self, box_detections: BoxDetectionWrapper) -> None: - global_area_polygon = self.environment_area.get_global_polygon(self.ego_state.center) - - for box_detection in box_detections: - if global_area_polygon.contains(box_detection.center.shapely_point): - if box_detection.metadata.detection_type in [DetectionType.VEHICLE, DetectionType.BICYCLE]: - self.vehicles.append(box_detection) - elif box_detection.metadata.detection_type in [DetectionType.PEDESTRIAN]: - self.pedestrians.append(box_detection) - elif box_detection.metadata.detection_type in [ - DetectionType.CZONE_SIGN, - DetectionType.BARRIER, - DetectionType.TRAFFIC_CONE, - DetectionType.GENERIC_OBJECT, - ]: - self.static_objects.append(box_detection) - - @cached_property - def origin(self) -> StateSE2: - """ - Returns the global origin of the environment area based on the ego state. - :return: Global origin of the environment area as StateSE2. - """ - return self.environment_area.get_global_origin(self.ego_state.center) - - -def build_environment_caches( - planner_input: PlannerInput, - planner_initialization: PlannerInitialization, - environment_area: AbstractEnvironmentArea, - route_lane_group_ids: Optional[List[str]] = None, -) -> Tuple[MapCache, BoxDetectionCache]: - """ - Helper function to build the environment caches for the current planner input and initialization. - :param planner_input: Planner input interface of nuPlan, ego, detection, and traffic light data. - :param planner_initialization: Planner initialization interface of nuPlan, map API and route lane group ids. - :param environment_area: Area object used to cache the map and detection data. - :param route_lane_group_ids: Optional route lane group ids, to overwrite the planner initialization, defaults to None - :return: Tuple of MapCache and DetectionCache objects. - """ - - ego_state, detection_recording = planner_input.history.current_state - assert isinstance(detection_recording, DetectionRecording), "Recording must be of type DetectionRecording" - - # TODO: Add route correction? - route_lane_group_ids = planner_initialization.route_lane_group_ids - - # TODO: Add box detection filtering? - box_detections = detection_recording.box_detections - - map_cache = MapCache( - ego_state=ego_state, - map_api=planner_initialization.map_api, - environment_area=environment_area, - traffic_lights=detection_recording.traffic_light_detections, - route_lane_group_ids=route_lane_group_ids, - ) - detection_cache = BoxDetectionCache( - ego_state=ego_state, - box_detections=box_detections, - environment_area=environment_area, - ) - - return map_cache, detection_cache diff --git a/d123/simulation/gym/environment/output_converter/__init__.py b/d123/simulation/gym/environment/output_converter/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/gym/environment/output_converter/abstract_output_converter.py b/d123/simulation/gym/environment/output_converter/abstract_output_converter.py deleted file mode 100644 index 94f3ef0c..00000000 --- a/d123/simulation/gym/environment/output_converter/abstract_output_converter.py +++ /dev/null @@ -1,32 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, Dict - -import numpy as np -import numpy.typing as npt -from gymnasium import spaces - -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput - - -class AbstractOutputConverter(ABC): - """Abstract class for building trajectories (nuPlan interface) in a Gym simulation environment.""" - - @abstractmethod - def get_action_space(self) -> spaces.Space: - """ - Returns the action space of the gym environment. - :return: gymnasium action space. - """ - - @abstractmethod - def get_planner_output( - self, action: npt.NDArray[np.float32], ego_state: EgoStateSE2, info: Dict[str, Any] - ) -> AbstractPlannerOutput: - """ - Builds a planner output based on the action and the current ego state. - :param action: Action taken by the agent, typically a numpy array. - :param ego_state: Current state of the ego vehicle. - :param info: Arbitrary information dictionary, for passing information between modules. - :return: Planner output object. - """ diff --git a/d123/simulation/gym/environment/output_converter/action_output_converter.py b/d123/simulation/gym/environment/output_converter/action_output_converter.py deleted file mode 100644 index ed8aff94..00000000 --- a/d123/simulation/gym/environment/output_converter/action_output_converter.py +++ /dev/null @@ -1,209 +0,0 @@ -from typing import Any, Dict, Optional - -import numpy as np -import numpy.typing as npt -from gymnasium import spaces - -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.simulation.gym.environment.output_converter.abstract_output_converter import AbstractOutputConverter -from d123.simulation.gym.policy.ppo.ppo_config import GlobalConfig -from d123.simulation.planning.planner_output.action_planner_output import ActionPlannerOutput - - -class ActionOutputConverter(AbstractOutputConverter): - """ - Default action trajectory builder for training CaRL. - TODO: Refactor this class - NOTE @DanielDauner: - We do an unclean hack here use an action (acceleration, steering) but package it into a Trajectory interface according to nuPlan. - The nuPlan simulation strictly requires a trajectory. We use a OneStageController to skip the controller and directly propagate the bicycle model. - You can create a new TrajectoryBuilder and SimulationBuilder to use a TwoStageController if you want to use a trajectory action. - """ - - def __init__( - self, - scale_max_acceleration: float = 2.4, - scale_max_deceleration: float = 3.2, - scale_max_steering_angle: float = 0.83775804096, - clip_max_abs_steering_rate: Optional[float] = None, - clip_max_abs_lon_jerk: Optional[float] = None, - clip_max_abs_yaw_accel: Optional[float] = None, - clip_angular_adjustment: bool = False, - convert_low_pass_acceleration: bool = False, - convert_low_pass_steering: bool = False, - disable_reverse_driving: bool = True, - ): - """ - Initializes the ActionTrajectoryBuilder. - :param scale_max_acceleration: max acceleration used for scaling the normed action [m/s^2], defaults to 2.4 - :param scale_max_deceleration: max deceleration (positive) used for scaling the normed action [m/s^2], defaults to 3.2 - :param scale_max_steering_angle: max absolute steering angle used for scaling the normed action [rad], defaults to 0.83775804096 - :param clip_max_abs_steering_rate: optional value to clip the steering rate [rad/s], defaults to None - :param clip_max_abs_lon_jerk: optional value to clip the longitudinal jerk [m/s^3], defaults to None - :param clip_max_abs_yaw_accel: optional value to clip the yaw acceleration [rad/s^2], defaults to None - :param clip_angular_adjustment: Whether to adjust the longitudinal acceleration for lower rotation, defaults to False - :param convert_low_pass_acceleration: Undo the low pass filtering of nuPlan's bicycle model, defaults to False - :param convert_low_pass_steering: Undo the low pass filtering of nuPlan's bicycle model, defaults to False - :param disable_reverse_driving: Whether to disable reverse driving with a controller, defaults to True - """ - self._scale_max_acceleration = scale_max_acceleration # [m/s^2] - self._scale_max_deceleration = scale_max_deceleration # [m/s^2] - self._scale_max_steering_angle = scale_max_steering_angle # [rad] - - self._clip_max_abs_steering_rate = clip_max_abs_steering_rate # [rad/s] - self._clip_max_abs_lon_jerk = clip_max_abs_lon_jerk # [m/s^3] - self._clip_max_abs_yaw_accel = clip_max_abs_yaw_accel # [rad/s^2] - self._clip_angular_adjustment = clip_angular_adjustment - - self._convert_low_pass_acceleration = convert_low_pass_acceleration - self._convert_low_pass_steering = convert_low_pass_steering - self._disable_reverse_driving = disable_reverse_driving - - self._config = GlobalConfig() - self._dt_control = 0.1 # [s] - self._accel_time_constant = 0.2 # [s] - self._steering_angle_time_constant = 0.05 # [s] - - def get_action_space(self) -> spaces.Space: - """Inherited, see superclass.""" - return spaces.Box( - self._config.action_space_min, - self._config.action_space_max, - shape=(self._config.action_space_dim,), - dtype=np.float32, - ) - - def get_planner_output( - self, action: npt.NDArray[np.float32], ego_state: EgoStateSE2, info: Dict[str, Any] - ) -> ActionPlannerOutput: - """Inherited, see superclass.""" - assert len(action) == self._config.action_space_dim - info["last_action"] = action - - action_acceleration_normed, action_steering_angle_normed = action - - target_steering_rate = self._scale_steering(action_steering_angle_normed, ego_state.tire_steering_angle) - target_acceleration = self._scale_acceleration( - action_acceleration_normed, ego_state.dynamic_state_se2.acceleration.x - ) - clipped_steering_rate = self._clip_steering(target_acceleration, target_steering_rate, ego_state) - clipped_acceleration = self._clip_acceleration(target_acceleration, clipped_steering_rate, ego_state) - return ActionPlannerOutput(clipped_acceleration, clipped_steering_rate, ego_state) - - def _scale_steering(self, action_steering_angle_normed: float, current_steering_angle: float) -> float: - """ - Scales the steering angle based on the action and current steering angle. - :param action_steering_angle_normed: Normalized steering angle action, typically in [-1, 1]. - :param current_steering_angle: Current steering angle of the ego vehicle. - :return: Scaled steering rate based on the action and current steering angle. - """ - - target_steering_angle = action_steering_angle_normed * self._scale_max_steering_angle - if self._convert_low_pass_steering: - factor = (self._dt_control + self._steering_angle_time_constant) / self._dt_control - target_steering_angle = (target_steering_angle - current_steering_angle) * factor + current_steering_angle - target_steering_rate = (target_steering_angle - current_steering_angle) / self._dt_control - return target_steering_rate - - def _scale_acceleration(self, action_acceleration_normed: float, current_acceleration: float) -> float: - """ - Scales the acceleration based on the action. - :param action_acceleration_normed: Normalized acceleration action, typically in [-1, 1]. - :param current_acceleration: Current acceleration of the ego vehicle. - :return: Scaled acceleration based on the action. - """ - if action_acceleration_normed >= 0: - target_acceleration = self._scale_max_acceleration * action_acceleration_normed - else: - target_acceleration = self._scale_max_deceleration * action_acceleration_normed - if self._convert_low_pass_acceleration: - factor = self._dt_control / (self._dt_control + self._accel_time_constant) - target_acceleration = (target_acceleration - current_acceleration) / factor + current_acceleration - return target_acceleration - - def _clip_acceleration( - self, target_acceleration: float, target_steering_rate: float, ego_state: EgoStateSE2 - ) -> float: - """ - Clips the acceleration based on the target acceleration, steering rate, and current ego state. - :param target_acceleration: Acceleration as targeted by the agent. - :param target_steering_rate: Steering rate as targeted by the agent. - :param ego_state: Current state of the ego vehicle. - :return: Clipped acceleration based on the target acceleration and steering rate. - """ - - current_acceleration = ego_state.dynamic_state_se2.acceleration.x - - if self._disable_reverse_driving: - speed = ego_state.dynamic_state_se2.velocity.x - updated_speed = speed + target_acceleration * self._dt_control - # * self._dt_control - if updated_speed < 0: - k_p, k_d = 1.0, 0.75 - error = -speed - dt_error = -current_acceleration - target_acceleration = k_p * error + k_d * dt_error - - if self._clip_max_abs_lon_jerk is not None: - max_acceleration_change = self._clip_max_abs_lon_jerk * self._dt_control - target_acceleration = np.clip( - target_acceleration, - current_acceleration - max_acceleration_change, - current_acceleration + max_acceleration_change, - ) - - _max_acceleration = self._scale_max_acceleration - if self._clip_angular_adjustment: - rear_axle_to_center_dist = ego_state.vehicle_parameters.rear_axle_to_center_longitudinal - - next_point_velocity_x = ego_state.dynamic_state_se2.velocity.x + target_acceleration * self._dt_control - next_point_tire_steering_angle = ego_state.tire_steering_angle + target_steering_rate * self._dt_control - next_point_angular_velocity = ( - next_point_velocity_x * np.tan(next_point_tire_steering_angle) / ego_state.vehicle_parameters.wheel_base - ) - next_point_angular_acceleration = ( - next_point_angular_velocity - ego_state.dynamic_state_se2.angular_velocity - ) / self._dt_control - - centripetal_acceleration_term = rear_axle_to_center_dist * (next_point_angular_velocity) ** 2 - angular_acceleration_term = rear_axle_to_center_dist * (next_point_angular_acceleration) - _max_acceleration -= centripetal_acceleration_term + angular_acceleration_term - - target_acceleration = np.clip(target_acceleration, -self._scale_max_deceleration, _max_acceleration) - return target_acceleration - - def _clip_steering(self, target_acceleration: float, target_steering_rate: float, ego_state: EgoStateSE2) -> float: - """ - Clips the steering rate based on the target acceleration and current ego state. - :param target_acceleration: Acceleration as targeted by the agent. - :param target_steering_rate: Steering rate as targeted by the agent. - :param ego_state: Current state of the ego vehicle. - :return: Clipped steering rate based on the target acceleration and steering rate. - """ - - current_steering_angle = ego_state.tire_steering_angle - target_steering_angle = current_steering_angle + target_steering_rate * self._dt_control - - if self._clip_max_abs_yaw_accel is not None: - wheel_base = ego_state.vehicle_parameters.wheel_base - target_velocity = ego_state.dynamic_state_se2.acceleration.x + target_acceleration * self._dt_control - - current_angular_velocity = ego_state.dynamic_state_se2.angular_velocity - max_abs_yaw_velocity = self._clip_max_abs_yaw_accel * self._dt_control - - min_angular_velocity = current_angular_velocity - max_abs_yaw_velocity - max_angular_velocity = current_angular_velocity + max_abs_yaw_velocity - - min_tire_steering_angle = np.arctan((min_angular_velocity * wheel_base) / target_velocity) - max_tire_steering_angle = np.arctan((max_angular_velocity * wheel_base) / target_velocity) - target_steering_angle = np.clip(target_steering_angle, min_tire_steering_angle, max_tire_steering_angle) - target_steering_rate = (target_steering_angle - current_steering_angle) / self._dt_control - - if self._clip_max_abs_steering_rate is not None: - target_steering_rate = np.clip( - target_steering_rate, - -self._clip_max_abs_steering_rate, - self._clip_max_abs_steering_rate, - ) - - return target_steering_rate diff --git a/d123/simulation/gym/environment/reward_builder/__init__.py b/d123/simulation/gym/environment/reward_builder/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/gym/environment/reward_builder/abstract_reward_builder.py b/d123/simulation/gym/environment/reward_builder/abstract_reward_builder.py deleted file mode 100644 index bbf01920..00000000 --- a/d123/simulation/gym/environment/reward_builder/abstract_reward_builder.py +++ /dev/null @@ -1,24 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, Dict, Tuple - -from d123.simulation.gym.environment.simulation_wrapper import SimulationWrapper - - -class AbstractRewardBuilder(ABC): - """Abstract class for building rewards in a Gym simulation environment.""" - - @abstractmethod - def reset(self) -> None: - """Reset the reward builder to its initial state.""" - - @abstractmethod - def build_reward(self, simulation_wrapper: SimulationWrapper, info: Dict[str, Any]) -> Tuple[float, bool, bool]: - """ - Build the reward based on the current simulation state and additional information. - :param simulation_wrapper: Wrapper object containing complete nuPlan simulation. - :param info: Arbitrary information dictionary, for passing information between modules. - :return: A tuple containing: - - reward: The calculated reward value. - - termination: Whether the simulation terminates in the current step. - - truncation: Whether the simulation is truncated in the current step. - """ diff --git a/d123/simulation/gym/environment/reward_builder/components/__init__.py b/d123/simulation/gym/environment/reward_builder/components/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/gym/environment/reward_builder/components/collision.py b/d123/simulation/gym/environment/reward_builder/components/collision.py deleted file mode 100644 index 7bd00e50..00000000 --- a/d123/simulation/gym/environment/reward_builder/components/collision.py +++ /dev/null @@ -1,103 +0,0 @@ -from typing import Dict, Final, List, Tuple - -from nuplan.common.actor_state.ego_state import EgoState -from nuplan.common.actor_state.oriented_box import in_collision -from nuplan.common.actor_state.tracked_objects import TrackedObject, TrackedObjects -from nuplan.planning.metrics.evaluation_metrics.common.no_ego_at_fault_collisions import ( - _get_collision_type, -) -from nuplan.planning.metrics.utils.collision_utils import CollisionType - -STOPPED_SPEED_THRESHOLD: Final[float] = 5e-02 # Threshold of ego considered stationary [mps] - - -def _get_collisions( - ego_state: EgoState, - tracked_objects: TrackedObjects, - ignore_track_tokens: List[str] = [], -) -> Dict[str, TrackedObject]: - """ - Helper function to get all tracked objects that ego collides with. - Ignores agents with tokens in ignore_track_tokens (similar to the nuPlan collision function) - :param ego_state: Ego state object of nuPlan. - :param tracked_objects: Tracked object wrapper of nuPlan. - :param ignore_track_tokens: list of tokens (str) ego collided before, defaults to [] - :return: dictionary of tokens and tracked objects. - """ - collided_track_dict: Dict[str, TrackedObject] = {} - for tracked_object in tracked_objects: - tracked_object: TrackedObject - if (tracked_object.track_token not in ignore_track_tokens) and in_collision( - ego_state.car_footprint.oriented_box, tracked_object.box - ): - collided_track_dict[tracked_object.track_token] = tracked_object - return collided_track_dict - - -def calculate_all_collisions( - ego_state: EgoState, - tracked_objects: TrackedObjects, - prev_collided_track_tokens: List[str] = [], -) -> Tuple[bool, List[str]]: - """ - Reward term for ego collision. Considers all collision types. - :param ego_state: Ego state object of nuPlan. - :param tracked_objects: Tracked object wrapper of nuPlan. - :param prev_collided_track_tokens: list of tokens (str) ego collided before, defaults to [] - :return: whether ego collides and corresponding detection tokens. - """ - collided_track_dict = _get_collisions(ego_state, tracked_objects, prev_collided_track_tokens) - collided_track_tokens = list(collided_track_dict.keys()) - return len(collided_track_tokens) > 0, collided_track_tokens - - -def calculate_non_stationary_collisions( - ego_state: EgoState, - tracked_objects: TrackedObjects, - prev_collided_track_tokens: List[str], -) -> Tuple[bool, List[str]]: - """ - Reward term for ego collision. Ignores collision when ego stationary. - :param ego_state: Ego state object of nuPlan. - :param tracked_objects: Tracked object wrapper of nuPlan. - :param prev_collided_track_tokens: list of tokens (str) ego collided before, defaults to [] - :return: whether ego collides and corresponding detection tokens. - """ - collided_track_dict = _get_collisions(ego_state, tracked_objects, prev_collided_track_tokens) - collided_track_tokens = list(collided_track_dict.keys()) - ego_stationary = ego_state.dynamic_car_state.speed < STOPPED_SPEED_THRESHOLD - return (len(collided_track_tokens) > 0 and not ego_stationary), collided_track_tokens - - -def calculate_at_fault_collision( - ego_state: EgoState, - tracked_objects: TrackedObjects, - prev_collided_track_tokens: List[str], - in_multiple_lanes_or_offroad: bool = False, -) -> Tuple[bool, List[str]]: - """ - Reward term for ego collision. Ignores non-at-fault collisions. - https://github.com/motional/nuplan-devkit/blob/master/nuplan/planning/metrics/evaluation_metrics/common/no_ego_at_fault_collisions.py - :param ego_state: Ego state object of nuPlan. - :param tracked_objects: Tracked object wrapper of nuPlan. - :param prev_collided_track_tokens: list of tokens (str) ego collided before, defaults to [] - :param in_multiple_lanes_or_offroad: whether ego is in multiple roads of off road. - :return: whether ego collides and corresponding detection tokens. - """ - - collided_track_dict = _get_collisions(ego_state, tracked_objects, prev_collided_track_tokens) - collided_track_tokens = list(collided_track_dict.keys()) - - at_fault_collision: bool = False - for tracked_object in collided_track_dict.values(): - collision_type = _get_collision_type(ego_state, tracked_object) - collisions_at_stopped_track_or_active_front: bool = collision_type in [ - CollisionType.ACTIVE_FRONT_COLLISION, - CollisionType.STOPPED_TRACK_COLLISION, - ] - collision_at_lateral: bool = collision_type == CollisionType.ACTIVE_LATERAL_COLLISION - if collisions_at_stopped_track_or_active_front or (in_multiple_lanes_or_offroad and collision_at_lateral): - at_fault_collision = True - break - - return at_fault_collision, collided_track_tokens diff --git a/d123/simulation/gym/environment/reward_builder/components/comfort.py b/d123/simulation/gym/environment/reward_builder/components/comfort.py deleted file mode 100644 index 04ea40a7..00000000 --- a/d123/simulation/gym/environment/reward_builder/components/comfort.py +++ /dev/null @@ -1,145 +0,0 @@ -""" -NOTE @DanielDauner: - -Learning comfortable driving behavior—according to nuPlan's comfort metrics proved to be challenging for a PPO policy operating directly in action space. -In contrast, the default nuPlan controllers (e.g., LQR, iLQR) explicitly optimize for comfort, so trajectory-based policies tend to achieve high comfort scores. -For example, Gigaflow generates full agent rollouts as trajectories and then uses the nuPlan controller to execute them, which likely mitigates comfort issues. - -Designing effective comfort reward terms (see below) was one of the key challenges in adapting CaRL to nuPlan. A few more comments: -- nuPlan's comfort metrics apply Savitzky-Golay filtering, which can be noisy with short history windows. This also requires a min history length of 4. -- The metrics compute accelerations and velocities using the agent's center, not the rear axle. This mismatch previously introduced bugs in our PDM planners. -- The PPO policy requires a very large number of environment steps to appropriately balance the comfort term against others (e.g., collision avoidance). -- We spent a lot of time trying to make the comfort term work and expected bugs in the reward. In hindsight, we did not train for enough steps required for the comfort terms. - -All comfort-related reward terms remain in the codebase but may be refactored in the future. We used `calculate_kinematics_comfort`. -""" - -from typing import Tuple - -import numpy as np -import numpy.typing as npt -from carl_nuplan.planning.simulation.planner.pdm_planner.scoring.pdm_comfort_metrics import ego_is_comfortable -from carl_nuplan.planning.simulation.planner.pdm_planner.scoring.pdm_comfort_metrics_debug import ( - ego_is_comfortable_debug, -) -from carl_nuplan.planning.simulation.planner.pdm_planner.utils.pdm_array_representation import ( - ego_states_to_state_array, -) - -from d123.simulation.gym.environment.simulation_wrapper import SimulationWrapper - - -def calculate_action_delta_comfort(simulation_wrapper: SimulationWrapper, max_change: float = 0.25) -> float: - """ - Calculate the comfort score based on the change in actions between the last two time steps. - :param simulation_wrapper: Wrapper object containing complete nuPlan simulation. - :param max_change: max change considered comfortable in action space, defaults to 0.25 - :return: float values between 0.0 and 1.0, where 1.0 is most comfortable and 0.0 is least comfortable. - """ - history_trajectories = simulation_wrapper.history_trajectories - if len(history_trajectories) >= 2: - - current_action = simulation_wrapper.history_trajectories[-1]._raw_action - previous_action = simulation_wrapper.history_trajectories[-2]._raw_action - comfort = np.abs(np.array(current_action) - np.array(previous_action)) > max_change - - if np.any(comfort): - return 0.5 - - return 1.0 - - -def calculate_kinematics_comfort(simulation_wrapper: SimulationWrapper) -> Tuple[float, npt.NDArray[np.bool_]]: - """ - Calculate the comfort score based on the six kinematic metrics of nuPlan. - NOTE: uses the debugged version of the comfort metrics using the center coordinate of the ego vehicle. - :param simulation_wrapper: Complete simulation wrapper object used for gym simulation. - :return: Whether the ego vehicle is comfortable and the comfort scores. - """ - history_ego_states = simulation_wrapper.simulation_ego_states - is_comfortable: npt.NDArray[np.bool_] = np.zeros((6), dtype=np.bool_) - comfort_score: float = 1.0 - - if len(history_ego_states) >= 4: - history_states_array = ego_states_to_state_array(history_ego_states)[None, ...] - time_points = np.array( - [ego_state.time_point.time_s for ego_state in history_ego_states], - dtype=np.float64, - ) - is_comfortable = ego_is_comfortable_debug(history_states_array, time_points)[0] - comfort_score = is_comfortable.sum() / len(is_comfortable) - - return comfort_score, is_comfortable - - -def calculate_kinematics_history_comfort(simulation_wrapper: SimulationWrapper) -> Tuple[float, npt.NDArray[np.bool_]]: - """ - Calculate the comfort score based on the six kinematic metrics of nuPlan. Includes the ego history for calculation. - NOTE: Adds the ego history of the logs to the comfort metrics. Was not relevant. - :param simulation_wrapper: Complete simulation wrapper object used for gym simulation. - :return: Whether the ego vehicle is comfortable and the comfort scores. - """ - history_ego_states = simulation_wrapper.current_planner_input.history.ego_states - assert len(history_ego_states) >= 4 - is_comfortable: npt.NDArray[np.bool_] = np.zeros((6), dtype=np.bool_) - - history_states_array = ego_states_to_state_array(history_ego_states)[None, ...] - time_points = np.array( - [ego_state.time_point.time_s for ego_state in history_ego_states], - dtype=np.float64, - ) - is_comfortable = ego_is_comfortable_debug(history_states_array, time_points)[0] - comfort_score = is_comfortable.sum() / len(is_comfortable) - - return comfort_score, is_comfortable - - -def calculate_kinematics_comfort_legacy(simulation_wrapper: SimulationWrapper) -> Tuple[float, npt.NDArray[np.bool_]]: - """ - Calculate the comfort score based on the six kinematic metrics of nuPlan. - NOTE: Uses the rear-axle instead of center coordinate of the ego vehicle. Leads to slight mismatch to nuPlan metric. - :param simulation_wrapper: Complete simulation wrapper object used for gym simulation. - :return: Whether the ego vehicle is comfortable and the comfort scores. - """ - history_ego_states = simulation_wrapper.simulation_ego_states - is_comfortable: npt.NDArray[np.bool_] = np.zeros((6), dtype=np.bool_) - - if len(history_ego_states) >= 4: - history_states_array = ego_states_to_state_array(history_ego_states)[None, ...] - time_points = np.array( - [ego_state.time_point.time_s for ego_state in history_ego_states], - dtype=np.float64, - ) - is_comfortable = ego_is_comfortable(history_states_array, time_points)[0] - if not is_comfortable.all(): - return 0.5, is_comfortable - - return 1.0, is_comfortable - - -def calculate_kinematics_comfort_fixed(simulation_wrapper: SimulationWrapper) -> Tuple[float, npt.NDArray[np.bool_]]: - """ - Calculate the comfort score based on the six kinematic metrics of nuPlan. - NOTE: Ignores certain jerk metrics that are noisy initially. - :param simulation_wrapper: Complete simulation wrapper object used for gym simulation. - :return: Whether the ego vehicle is comfortable and the comfort scores. - """ - history_ego_states = simulation_wrapper.simulation_ego_states - is_comfortable: npt.NDArray[np.bool_] = np.zeros((6), dtype=np.bool_) - - if len(history_ego_states) >= 4: - history_states_array = ego_states_to_state_array(history_ego_states)[None, ...] - time_points = np.array( - [ego_state.time_point.time_s for ego_state in history_ego_states], - dtype=np.float64, - ) - is_comfortable = ego_is_comfortable_debug(history_states_array, time_points)[0] - - if len(history_ego_states) < 15: - is_comfortable[2] = True # NOTE: jerk metric is trash in first few frames - is_comfortable[3] = True # NOTE: lon jerk metric is trash in first few frames - - if not is_comfortable.all(): - return 0.5, is_comfortable - - return 1.0, is_comfortable diff --git a/d123/simulation/gym/environment/reward_builder/components/off_route.py b/d123/simulation/gym/environment/reward_builder/components/off_route.py deleted file mode 100644 index 1615bd7a..00000000 --- a/d123/simulation/gym/environment/reward_builder/components/off_route.py +++ /dev/null @@ -1,98 +0,0 @@ -from typing import Dict - -import numpy as np -from carl_nuplan.planning.simulation.planner.pdm_planner.observation.pdm_occupancy_map import ( - PDMOccupancyMap, -) -from nuplan.common.maps.abstract_map_objects import LaneGraphEdgeMapObject -from shapely import Polygon - -from d123.simulation.gym.environment.helper.environment_cache import MapCache -from d123.simulation.gym.environment.simulation_wrapper import SimulationWrapper - - -def calculate_off_route_v1(simulation_wrapper: SimulationWrapper, map_cache: MapCache) -> float: - """ - Calculate the off-route based on the polygons of the route roadblocks and roadblock connectors polygons. - NOTE: The route roadblock connector polygons often have a strange shape. We expect the ego learned to exploit this - during strange overtaking maneuvers. We fixed this in the v2 version below. - TODO: Refactor or remove this implementation. - :param simulation_wrapper: Complete simulation wrapper object used for gym simulation. - :param map_cache: Map cache object storing relevant nearby map objects. - :return: 1.0 if the ego is on route, 0.0 if the ego is off route. - """ - iteration = simulation_wrapper.current_planner_input.iteration.index - - ego_state = simulation_wrapper.current_ego_state - expert_ego_state = simulation_wrapper.scenario.get_ego_state_at_iteration(iteration) - - route_roadblocks: Dict[str, Polygon] = {} - for route_roadblock_id in map_cache.route_lane_group_ids: - if route_roadblock_id in map_cache.lane_groups: - route_roadblocks[route_roadblock_id] = map_cache.lane_groups[route_roadblock_id].polygon - if route_roadblock_id in map_cache.roadblock_connectors: - route_roadblocks[route_roadblock_id] = map_cache.roadblock_connectors[route_roadblock_id].polygon - - route_map = PDMOccupancyMap(list(route_roadblocks.keys()), list(route_roadblocks.values())) - - points = np.array( - [ - [point.x, point.y] - for point in [ - ego_state.center.point, - expert_ego_state.center.point, - ] - ], - dtype=np.float64, - ) - points_in_polygon = route_map.points_in_polygons(points) - on_route = points_in_polygon.sum(axis=0) > 0 - ego_on_route, expert_on_route = on_route[0], on_route[1] - - if not ego_on_route and expert_on_route: - return 0.0 - - return 1.0 - - -def calculate_off_route_v2(simulation_wrapper: SimulationWrapper, map_cache: MapCache) -> float: - """ - Calculate the off-route based on the polygons of the route roadblocks and roadblock connectors polygons. - NOTE: This implementation uses the lane/lane-connector polygons instead of the roadblock/roadblock-connector polygons. - :param simulation_wrapper: Complete simulation wrapper object used for gym simulation. - :param map_cache: Map cache object storing relevant nearby map objects. - :return: 1.0 if the ego is on route, 0.0 if the ego is off route. - """ - iteration = simulation_wrapper.current_planner_input.iteration.index - - ego_state = simulation_wrapper.current_ego_state - expert_ego_state = simulation_wrapper.scenario.get_ego_state_at_iteration(iteration) - - route_lane_polygons: Dict[str, Polygon] = {} - - for lane_dict in [map_cache.lanes, map_cache.lane_connectors]: - lane_dict: Dict[str, LaneGraphEdgeMapObject] - for lane_id, lane in lane_dict.items(): - if lane.get_roadblock_id() in map_cache.route_lane_group_ids: - route_lane_polygons[lane_id] = lane.polygon - - route_map = PDMOccupancyMap(list(route_lane_polygons.keys()), list(route_lane_polygons.values())) - - points = np.array( - [ - [point.x, point.y] - for point in [ - ego_state.center.point, - expert_ego_state.center.point, - ] - ], - dtype=np.float64, - ) - points_in_polygon = route_map.points_in_polygons(points) - on_route = points_in_polygon.sum(axis=0) > 0 - ego_on_route, expert_on_route = on_route[0], on_route[1] - - if not ego_on_route and expert_on_route: - return 0.0 - - return 1.0 diff --git a/d123/simulation/gym/environment/reward_builder/components/progress.py b/d123/simulation/gym/environment/reward_builder/components/progress.py deleted file mode 100644 index c8bc6b55..00000000 --- a/d123/simulation/gym/environment/reward_builder/components/progress.py +++ /dev/null @@ -1,120 +0,0 @@ -from dataclasses import dataclass -from typing import List, Optional - -import numpy as np -from carl_nuplan.planning.simulation.planner.pdm_planner.utils.pdm_path import PDMPath -from nuplan.common.actor_state.ego_state import EgoState -from nuplan.planning.metrics.evaluation_metrics.common.ego_progress_along_expert_route import ( - PerFrameProgressAlongRouteComputer, -) -from nuplan.planning.metrics.utils.route_extractor import ( - RouteRoadBlockLinkedList, - get_route, - get_route_baseline_roadblock_linkedlist, - get_route_simplified, -) -from nuplan.planning.metrics.utils.state_extractors import extract_ego_center -from shapely import Point - -from d123.simulation.gym.environment.simulation_wrapper import SimulationWrapper - - -@dataclass -class ProgressCache: - - expert_route_roadblocks: Optional[RouteRoadBlockLinkedList] - expert_progress: float # [m] - progress_computer: Optional[PerFrameProgressAlongRouteComputer] = None - - -def calculate_route_completion_human(ego_states: List[EgoState], scenario_simulation: SimulationWrapper) -> float: - """ - Calculates route completion relative to the human trajectory from the logs. Used as default. - :param ego_states: List of ego states from the simulation. - :param scenario_simulation: Simulation wrapper object containing the scenario simulation. - :return: Route completion delta, which is the difference in route completion from the last time step to the current one (normalized). - """ - assert len(ego_states) > 2 - current_ego_state = ego_states[-1] - ego_linestring = scenario_simulation.ego_linestring - current_route_completion = ego_linestring.project(Point(*current_ego_state.center.array), normalized=True) - past_route_completion = scenario_simulation.route_completion - route_completion_delta = np.maximum(0.0, current_route_completion - past_route_completion) - scenario_simulation.update_route_completion(current_route_completion) - return route_completion_delta - - -def calculate_route_completion_mean(ego_states: List[EgoState], scenario_simulation: SimulationWrapper) -> float: - """ - Calculates route completion relative to the overall average in the logs (i.e. 62 meter) - NOTE: This lead to aggressive ego behavior. Function likely removed in the future. - :param ego_states: List of ego states from the simulation. - :param scenario_simulation: Simulation wrapper object containing the scenario simulation. - :return: Route completion delta, which is the difference in route completion from the last time step to the current one (normalized). - """ - MEAN_ROUTE_COMPLETION: float = 62.0 # [m] - ego_linestring = PDMPath([ego_state.center for ego_state in scenario_simulation.simulation_ego_states]).linestring - current_route_completion = np.clip(ego_linestring.length / MEAN_ROUTE_COMPLETION, 0.0, 1.0) - past_route_completion = scenario_simulation.route_completion - route_completion_delta = np.clip(current_route_completion - past_route_completion, 0.0, 1.0) - scenario_simulation.update_route_completion(current_route_completion) - return route_completion_delta - - -def calculate_route_completion_nuplan( - ego_states: List[EgoState], - scenario_simulation: SimulationWrapper, - progress_cache: Optional[ProgressCache] = None, - score_progress_threshold: float = 0.001, -) -> float: - """ - Calculates route completion based on the nuPlan progress metric. - NOTE: This function worked okay, but did not lead to better results compared to the human trajectory. - The implementation is also more complex. We might remove it in the future. - :param ego_states: List of ego states from the simulation. - :param scenario_simulation: Simulation wrapper object containing the scenario simulation. - :return: Route completion delta, which is the difference in route completion from the last time step to the current one (normalized). - """ - - first_iteration = progress_cache is None - - # 1. Calculate expert route and progress - if first_iteration: - scenario = scenario_simulation.scenario - expert_states = scenario.get_expert_ego_trajectory() - expert_poses = extract_ego_center(expert_states) - - expert_route = get_route(map_api=scenario.map_api, poses=expert_poses) - expert_route_simplified = get_route_simplified(expert_route) - expert_route_roadblocks = get_route_baseline_roadblock_linkedlist(scenario.map_api, expert_route_simplified) - - if expert_route_roadblocks.head is None: - progress_cache = ProgressCache(expert_route_roadblocks, 0.0) - else: - expert_progress_computer = PerFrameProgressAlongRouteComputer(expert_route_roadblocks) - expert_progress = np.sum(expert_progress_computer(ego_poses=expert_poses)) - ego_progress_computer = PerFrameProgressAlongRouteComputer(expert_route_roadblocks) - progress_cache = ProgressCache(expert_route_roadblocks, expert_progress, ego_progress_computer) - - # 2. Whether or not valid route was found: - # - Return standard values for no progress. - # - Calculate new route completion. - if progress_cache.expert_route_roadblocks.head is None: - scenario_simulation.update_route_completion(1.0) - route_completion_delta = 0.0 - - else: - ego_poses = extract_ego_center(ego_states[-2:]) - ego_progress = np.sum(progress_cache.progress_computer(ego_poses=ego_poses)) - - current_route_completion = np.clip( - max(ego_progress, score_progress_threshold) / max(progress_cache.expert_progress, score_progress_threshold), - 0.0, - 1.0, - ) - past_route_completion = scenario_simulation.route_completion - - route_completion_delta = np.clip(current_route_completion - past_route_completion, 0.0, 1.0) - scenario_simulation.update_route_completion(current_route_completion) - - return route_completion_delta, progress_cache diff --git a/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py b/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py deleted file mode 100644 index 7c3e0883..00000000 --- a/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py +++ /dev/null @@ -1,289 +0,0 @@ -from typing import Dict, Final, List, Optional, Tuple - -import numpy as np -import numpy.typing as npt -from carl_nuplan.planning.simulation.planner.pdm_planner.observation.pdm_occupancy_map import ( - PDMOccupancyMap, -) -from carl_nuplan.planning.simulation.planner.pdm_planner.utils.pdm_enums import ( - BBCoordsIndex, -) -from nuplan.common.actor_state.agent import Agent -from nuplan.common.actor_state.ego_state import EgoState, StateSE2 -from nuplan.common.actor_state.oriented_box import OrientedBox, in_collision -from nuplan.common.actor_state.tracked_objects import TrackedObject, TrackedObjects -from nuplan.common.maps.abstract_map import AbstractMap, SemanticMapLayer -from nuplan.planning.metrics.evaluation_metrics.common.time_to_collision_within_bound import ( - _get_ego_tracks_displacement_info, - _get_relevant_tracks, -) -from nuplan.planning.simulation.observation.idm.utils import ( - is_agent_ahead, - is_agent_behind, -) -from nuplan.planning.simulation.observation.observation_type import DetectionsTracks -from shapely import creation - -from d123.simulation.gym.environment.simulation_wrapper import SimulationWrapper - -# TODO: Add to config. -STOPPED_SPEED_THRESHOLD: Final[float] = 5e-03 # [m/s] (ttc) -SUCCESS_TTC: Final[float] = 1.0 -FAIL_TTC: Final[float] = 0.5 - - -def _get_coords_array(oriented_box: OrientedBox) -> npt.NDArray[np.float64]: - """ - Helper function to get corner coordinates of an oriented box. - :param oriented_box: OrientedBox object from nuPlan. - :return: numpy array with shape (5, 2) containing the closed corner coordinates of the oriented box. - """ - coords_array = np.zeros((len(BBCoordsIndex), 2), dtype=np.float64) - corners = oriented_box.all_corners() - coords_array[BBCoordsIndex.FRONT_LEFT] = corners[0].array - coords_array[BBCoordsIndex.REAR_LEFT] = corners[1].array - coords_array[BBCoordsIndex.REAR_RIGHT] = corners[2].array - coords_array[BBCoordsIndex.FRONT_RIGHT] = corners[3].array - coords_array[BBCoordsIndex.CENTER] = corners[0].array # close polygon - return coords_array - - -def _get_dxy(heading: float, velocity: float) -> npt.NDArray[np.float64]: - """ - Get the displacement vector (global x,y) to propagate a bounding box along its heading with a given velocity. - :param heading: Heading angle of the bounding box [rad]. - :param velocity: Velocity of the bounding box [m/s]. - :return: Displacement vector (x, y) to propagate the bounding box. - """ - dxy = np.stack( - [ - np.cos(heading) * velocity, - np.sin(heading) * velocity, - ], - axis=-1, - ) - return dxy - - -def calculate_ttc_v1(simulation_wrapper: SimulationWrapper, resolution: int = 2) -> float: - """ - Calculate the time to collision (TTC) of the ego vehicle, based on the constant velocity forecast. - NOTE: Uses less complex logic than the v2 version. TTC required many steps to converge. We are unsure if v2 was required. - TODO: Refactor or remove this implementation. - :param simulation_wrapper: Simulation wrapper object containing the scenario simulation. - :param resolution: The temporal resolution to check collisions (i.e. every two steps), defaults to 2 - :return: 1.0 if no collision is expected, 0.5 if a collision is expected. - """ - - future_time_indices = np.arange(1, 10, resolution, dtype=int) - future_time_deltas = future_time_indices * simulation_wrapper.scenario.database_interval - - ( - ego_state, - observation, - ) = simulation_wrapper.current_planner_input.history.current_state - assert isinstance(observation, DetectionsTracks) - tracked_objects = observation.tracked_objects - ego_speed = ego_state.dynamic_car_state.center_velocity_2d.magnitude - if len(tracked_objects) == 0 or ego_speed < STOPPED_SPEED_THRESHOLD: - return SUCCESS_TTC - - unique_tracked_objects: Dict[str, TrackedObject] = { - tracked_object.track_token: tracked_object for tracked_object in tracked_objects - } - map_api = simulation_wrapper.scenario.map_api - ego_in_intersection = map_api.is_in_layer(ego_state.rear_axle, layer=SemanticMapLayer.INTERSECTION) - - def _add_object(tracked_object: TrackedObject) -> bool: - if is_agent_ahead(ego_state.rear_axle, tracked_object.center) or ( - ego_in_intersection and not is_agent_behind(ego_state.rear_axle, tracked_object.center) - ): - return True - return False - - # extract static object polygons - static_object_tokens, static_object_coords_list = [], [] - for static_object in tracked_objects.get_static_objects(): - if _add_object(static_object): - static_object_tokens.append(static_object.track_token) - static_object_coords_list.append(_get_coords_array(static_object.box)) - static_object_coords_array = np.array(static_object_coords_list, dtype=np.float64) # (num_agents, 5, 2) - if len(static_object_tokens) == 0: - static_object_polygons = np.array([], dtype=np.object_) - else: - static_object_polygons = creation.polygons(static_object_coords_array) - - # extract agents - agent_tokens, agent_coords_list, agent_dxy = [], [], [] - for agent in tracked_objects.get_agents(): - if _add_object(agent): - agent_tokens.append(agent.track_token) - agent_coords_list.append(_get_coords_array(agent.box)) - agent_dxy.append(_get_dxy(agent.box.center.heading, agent.velocity.magnitude)) - agent_coords_array = np.array(agent_coords_list, dtype=np.float64) # (num_agents, 5, 2) - agent_dxy = np.array(agent_dxy, dtype=np.float64) # (num_agents, 2) - if len(agent_tokens) == 0: - projected_agent_polygons = np.array([], dtype=np.object_) - - # extract ego - ego_coords_array = _get_coords_array(ego_state.car_footprint.oriented_box) # (5, 2) - ego_dxy = _get_dxy(ego_state.center.heading, ego_speed) - ego_displacements = future_time_deltas[:, None, None] * ego_dxy # (num_steps, 1, 2) - projected_ego_coords = ego_coords_array[None, ...] + ego_displacements # (num_steps, 5, 2) - projected_ego_polygons = creation.polygons(projected_ego_coords) - - for time_delta, ego_polygon in zip(future_time_deltas, projected_ego_polygons): - - # project agents - if len(agent_tokens) > 0: - agent_displacements = agent_dxy * time_delta - projected_agent_coords = agent_coords_array + agent_displacements[:, None, :] - projected_agent_polygons = creation.polygons(projected_agent_coords) - - polygons = np.concatenate([static_object_polygons, projected_agent_polygons], axis=0) - occupancy_map = PDMOccupancyMap(tokens=static_object_tokens + agent_tokens, geometries=polygons) - - # check for collisions - ego_collision = occupancy_map.intersects(ego_polygon) - if len(ego_collision) > 0: - for ego_collision_token in ego_collision: - track_state = unique_tracked_objects[ego_collision_token].center - if is_agent_ahead(ego_state.rear_axle, track_state) or ( - (map_api.is_in_layer(ego_state.rear_axle, layer=SemanticMapLayer.INTERSECTION)) - and not is_agent_behind(ego_state.rear_axle, track_state) - ): - return FAIL_TTC - - return SUCCESS_TTC - - -def calculate_ttc_v2( - simulation_wrapper: SimulationWrapper, - collided_track_tokens: List[str], - in_multiple_lanes_or_offroad: bool = False, - resolution: int = 2, -) -> float: - """ - Calculate the time to collision (TTC) of the ego vehicle, based on the constant velocity forecast. - NOTE: Uses TTC logic closely aligned to nuPlan's implementation, i.e. first extract relevant tracks, then compute TTC. - :param simulation_wrapper: Simulation wrapper object containing the scenario simulation. - :param collided_track_tokens: Detection track tokens that are already collided (ignored). - :param in_multiple_lanes_or_offroad: Whether the ego agent is in multiple lanes or offroad, defaults to False - :param resolution: The temporal resolution to check collisions (TODO: remove or implement). - :return: 1.0 if no collision is expected, 0.5 if a collision is expected. - """ - ( - ego_state, - observation, - ) = simulation_wrapper.current_planner_input.history.current_state - assert isinstance(observation, DetectionsTracks) - tracked_objects = observation.tracked_objects - - # Early non-violation conditions - if len(tracked_objects) == 0 or ego_state.dynamic_car_state.speed <= STOPPED_SPEED_THRESHOLD: - return SUCCESS_TTC - - ( - tracks_poses, - tracks_speed, - tracks_boxes, - ) = _extract_tracks_info_excluding_collided_tracks( - ego_state, - simulation_wrapper.scenario.map_api, - tracked_objects, - collided_track_tokens, - in_multiple_lanes_or_offroad, - ) - tracks_poses = np.array(tracks_poses, dtype=np.float64) - tracks_speed = np.array(tracks_speed, dtype=np.float64) - tracks_boxes = np.array(tracks_boxes) - - ttc_at_index = _compute_time_to_collision_at_timestamp(ego_state, tracks_poses, tracks_speed, tracks_boxes) - if ttc_at_index is None: - return SUCCESS_TTC - - return FAIL_TTC - - -def _extract_tracks_info_excluding_collided_tracks( - ego_state: EgoState, - map_api: AbstractMap, - tracked_objects: TrackedObjects, - collided_track_tokens: List[str], - in_multiple_lanes_or_offroad: bool = False, -) -> Tuple[List[List[float]], List[float], List[OrientedBox]]: - - ego_in_intersection = map_api.is_in_layer(ego_state.rear_axle, layer=SemanticMapLayer.INTERSECTION) - - relevant_tracked_objects: List[TrackedObject] = [] - for tracked_object in tracked_objects: - tracked_object: TrackedObject - if tracked_object.track_token not in collided_track_tokens: - if is_agent_ahead(ego_state.rear_axle, tracked_object.center) or ( - (in_multiple_lanes_or_offroad or ego_in_intersection) - and not is_agent_behind(ego_state.rear_axle, tracked_object.center) - ): - relevant_tracked_objects.append(tracked_object) - - tracks_poses: List[List[float]] = [[*tracked_object.center] for tracked_object in tracked_objects] - tracks_speed: List[float] = [ - tracked_object.velocity.magnitude if isinstance(tracked_object, Agent) else 0 - for tracked_object in tracked_objects - ] - tracks_boxes: List[OrientedBox] = [tracked_object.box for tracked_object in tracked_objects] - return tracks_poses, tracks_speed, tracks_boxes - - -def _compute_time_to_collision_at_timestamp( - ego_state: EgoState, - tracks_poses: npt.NDArray[np.float64], - tracks_speed: npt.NDArray[np.float64], - tracks_boxes: List[OrientedBox], - time_step_start: float = 0.1, - time_step_size: float = 0.2, - time_horizon: float = 1.0, -) -> Optional[float]: - - ego_speed = ego_state.dynamic_car_state.speed - - # Remain default if we don't have any agents or ego is stopped - if len(tracks_poses) == 0 or ego_speed <= STOPPED_SPEED_THRESHOLD: - return None - - displacement_info = _get_ego_tracks_displacement_info( - ego_state, ego_speed, tracks_poses, tracks_speed, time_step_size - ) - relevant_tracks_mask = _get_relevant_tracks( - displacement_info.ego_pose, - displacement_info.ego_box, - displacement_info.ego_dx, - displacement_info.ego_dy, - tracks_poses, - tracks_boxes, - displacement_info.tracks_dxy, - time_step_size, - time_horizon, - ) - - # If there is no relevant track affecting TTC, remain default - if not len(relevant_tracks_mask): - return None - - # Find TTC for relevant tracks by projecting ego and tracks boxes with time_step_size - for time_to_collision in np.arange(time_step_start, time_horizon, time_step_size): - # project ego's center pose and footprint with a fixed speed - displacement_info.ego_pose[:2] += ( - displacement_info.ego_dx, - displacement_info.ego_dy, - ) - projected_ego_box = OrientedBox.from_new_pose( - displacement_info.ego_box, StateSE2(*(displacement_info.ego_pose)) - ) - # project tracks's center pose and footprint with a fixed speed - tracks_poses[:, :2] += displacement_info.tracks_dxy - for track_box, track_pose in zip(tracks_boxes[relevant_tracks_mask], tracks_poses[relevant_tracks_mask]): - projected_track_box = OrientedBox.from_new_pose(track_box, StateSE2(*track_pose)) - if in_collision(projected_ego_box, projected_track_box): - return float(time_to_collision) - - return None diff --git a/d123/simulation/gym/environment/reward_builder/default_reward_builder.py b/d123/simulation/gym/environment/reward_builder/default_reward_builder.py deleted file mode 100644 index ac18ffaa..00000000 --- a/d123/simulation/gym/environment/reward_builder/default_reward_builder.py +++ /dev/null @@ -1,753 +0,0 @@ -""" -NOTE @DanielDauner: - -This file may be cleaned up in the future. - -""" - -from dataclasses import dataclass -from typing import Any, Dict, Final, List, Optional, Tuple, Union - -import numpy as np -import numpy.typing as npt -from carl_nuplan.planning.simulation.planner.pdm_planner.observation.pdm_occupancy_map import PDMOccupancyMap -from carl_nuplan.planning.simulation.planner.pdm_planner.utils.pdm_array_representation import states_se2_to_array -from carl_nuplan.planning.simulation.planner.pdm_planner.utils.pdm_geometry_utils import normalize_angle -from nuplan.common.actor_state.ego_state import EgoState, StateSE2 -from nuplan.common.maps.abstract_map import Lane -from nuplan.common.maps.abstract_map_objects import LaneConnector, LaneGraphEdgeMapObject -from nuplan.common.maps.maps_datatypes import TrafficLightStatusType -from shapely import Point, Polygon - -from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea -from d123.simulation.gym.environment.helper.environment_cache import ( - BoxDetectionCache, - MapCache, - environment_cache_manager, -) -from d123.simulation.gym.environment.reward_builder.abstract_reward_builder import AbstractRewardBuilder -from d123.simulation.gym.environment.reward_builder.components.collision import ( - calculate_all_collisions, - calculate_at_fault_collision, - calculate_non_stationary_collisions, -) -from d123.simulation.gym.environment.reward_builder.components.comfort import ( - calculate_action_delta_comfort, - calculate_kinematics_comfort, - calculate_kinematics_comfort_fixed, - calculate_kinematics_comfort_legacy, - calculate_kinematics_history_comfort, -) -from d123.simulation.gym.environment.reward_builder.components.off_route import ( - calculate_off_route_v1, - calculate_off_route_v2, -) -from d123.simulation.gym.environment.reward_builder.components.progress import ( - ProgressCache, - calculate_route_completion_human, - calculate_route_completion_mean, - calculate_route_completion_nuplan, -) -from d123.simulation.gym.environment.reward_builder.components.time_to_collision import ( - FAIL_TTC, - SUCCESS_TTC, - calculate_ttc_v1, - calculate_ttc_v2, -) -from d123.simulation.gym.environment.simulation_wrapper import SimulationWrapper - -NUM_SCENARIO_ITERATIONS: Final[int] = 150 # TODO: Remove this constant. - - -@dataclass -class DefaultRewardComponents: - """Dataclass to store the components of the default reward builder.""" - - route_completion: float = 0.0 - - # hard constraints - blocked: bool = False # not implemented - red_light: bool = False - collision: bool = False - stop_sign: bool = False # not implemented - off_road: bool = False - - # soft constraints - lane_distance: float = 1.0 - too_fast: float = 1.0 - off_route: float = 1.0 - comfort: float = 1.0 - ttc: float = 1.0 - - @property - def hard_constraints(self) -> List[bool]: - """ - :return: boolean values of the hard constraints, i.e. collision, that lead to termination. - """ - return [ - self.blocked, - self.red_light, - self.collision, - self.stop_sign, - self.off_road, - ] - - @property - def soft_constraints(self) -> List[float]: - """ - :return: float values of the soft constraints. - """ - return [ - self.lane_distance, - self.too_fast, - self.off_route, - self.comfort, - self.ttc, - ] - - -@dataclass -class DefaultRewardConfig: - """Configuration for the default reward builder.""" - - route_completion_type: Optional[str] = "human" - collision_type: Optional[str] = "non_stationary" - ttc_type: Optional[str] = "v2" - red_light_type: Optional[str] = None - lane_distance_type: Optional[str] = "v1" - off_route_type: Optional[str] = "v1" - comfort_type: Optional[str] = "kinematics" - - comfort_accumulation: str = "value" - ttc_accumulation: str = "value" - reward_accumulation: str = "regular" - - terminal_penalty: float = 0.0 - collision_terminal_penalty: float = 0.0 - off_road_violation_threshold: float = 0.0 - lane_distance_violation_threshold: float = 0.5 - survival_ratio: float = 0.6 - - reward_factor: float = 100.0 - - def __post_init__(self): - assert self.route_completion_type is None or self.route_completion_type in [ - "human", - "mean", - "nuplan", - ] - assert self.collision_type is None or self.collision_type in [ - "all", - "non_stationary", - "at_fault", - ] - assert self.ttc_type is None or self.ttc_type in ["v1", "v2"] - assert self.red_light_type is None or self.red_light_type in ["v1"] - assert self.lane_distance_type is None or self.lane_distance_type in ["v1"] - assert self.off_route_type is None or self.off_route_type in ["v1", "v2"] - assert self.comfort_type is None or self.comfort_type in [ - "action_delta", - "kinematics", - "kinematics_legacy", - "kinematics_history", - "kinematics_fixed", - ] - assert self.comfort_accumulation in ["terminal", "value"] - assert self.ttc_accumulation in ["terminal", "value"] - assert self.reward_accumulation in ["regular", "nuplan", "survival"] - - -class DefaultRewardBuilder(AbstractRewardBuilder): - """Default reward builder for the Gym simulation environment.""" - - def __init__(self, environment_area: AbstractEnvironmentArea, config: DefaultRewardConfig) -> None: - """ - Initializes the default reward builder. - :param environment_area: Environment area class that defines the map patch to calculate the reward. - :param config: Configuration for the default reward builder. - """ - - self._environment_area = environment_area - self._config = config - - # lazy loaded - self._reward_history: List[DefaultRewardComponents] = [] - self._prev_collided_track_tokens: List[str] = [] - self._expert_red_light_infractions: List[str] = [] - self._comfort_values: List[npt.NDArray[np.bool_]] = [] - self._progress_cache: Optional[ProgressCache] = None - - def reset(self) -> None: - """Inherited, see superclass.""" - self._reward_history: List[DefaultRewardComponents] = [] - self._prev_collided_track_tokens: List[str] = [] - self._expert_red_light_infractions: List[str] = [] - self._comfort_values: List[npt.NDArray[np.bool_]] = [] - self._progress_cache: Optional[ProgressCache] = None - - def build_reward(self, simulation_wrapper: SimulationWrapper, info: Dict[str, Any]) -> Tuple[float, bool, bool]: - """Inherited, see superclass.""" - - map_cache, detection_cache = environment_cache_manager.build_environment_caches( - planner_input=simulation_wrapper.current_planner_input, - planner_initialization=simulation_wrapper.planner_initialization, - environment_area=self._environment_area, - ) - info["map_cache"] = map_cache - info["detection_cache"] = detection_cache - - reward_components = self._calculate_reward_components(simulation_wrapper, map_cache, detection_cache) - - if self._config.reward_accumulation == "nuplan": - reward, termination, truncation = self._nuplan_accumulate(reward_components) - elif self._config.reward_accumulation == "survival": - reward, termination, truncation = self._survival_accumulate(reward_components) - else: - reward, termination, truncation = self._regular_accumulate(reward_components) - self._reward_history.append(reward_components) - - if termination or truncation or not simulation_wrapper.is_simulation_running(): - info["reward"] = self._accumulate_info() - info["comfort"] = self._accumulate_info_comfort() - - self._add_value_measurements(simulation_wrapper, info) - return reward, termination, truncation - - def _regular_accumulate(self, reward_components: DefaultRewardComponents) -> Tuple[float, bool, bool]: - """ - Accumulate the reward components into a single reward value, as described in CaRL paper. - TODO: Refactor this method. - :param reward_components: Dataclass storing reward components. - :return: - - reward: The accumulated reward value. - - termination: Whether the simulation terminates in the current step. - - truncation: Whether the simulation is truncated in the current step. - """ - termination = any(reward_components.hard_constraints) - if self._config.comfort_accumulation == "terminal": - termination = termination or reward_components.comfort < 1.0 - if self._config.ttc_accumulation == "terminal": - termination = termination or reward_components.ttc < 1.0 - truncation = termination - terminal_penalty = ( - self._config.collision_terminal_penalty if reward_components.collision else self._config.terminal_penalty - ) - terminate_factor = 0.0 if termination else 1.0 - reward = ( - reward_components.route_completion * np.prod(reward_components.soft_constraints) * terminate_factor - + terminal_penalty - ) - return reward * self._config.reward_factor, termination, truncation - - def _survival_accumulate(self, reward_components: DefaultRewardComponents) -> Tuple[float, bool, bool]: - """ - Accumulate the reward components into a single reward value, and adding a survival bonus. - TODO: Refactor this method. - :param reward_components: Dataclass storing reward components. - :return: - - reward: The accumulated reward value. - - termination: Whether the simulation terminates in the current step. - - truncation: Whether the simulation is truncated in the current step. - """ - termination = any(reward_components.hard_constraints) - truncation = termination - terminal_penalty = ( - self._config.collision_terminal_penalty if reward_components.collision else self._config.terminal_penalty - ) - terminate_factor = 0.0 if termination else 1.0 - raw_reward = (1 - self._config.survival_ratio) * reward_components.route_completion * np.prod( - reward_components.soft_constraints - ) + (self._config.survival_ratio / NUM_SCENARIO_ITERATIONS) - - reward = raw_reward * terminate_factor + terminal_penalty - return reward * self._config.reward_factor, termination, truncation - - def _nuplan_accumulate(self, reward_components: DefaultRewardComponents) -> Tuple[float, bool, bool]: - """ - Accumulate the reward components into a single reward value, using a weighted combination similar to nuPlan. - TODO: Refactor this method. - :param reward_components: Dataclass storing reward components. - :return: - - reward: The accumulated reward value. - - termination: Whether the simulation terminates in the current step. - - truncation: Whether the simulation is truncated in the current step. - """ - - termination = any(reward_components.hard_constraints) - truncation = termination - reward = 0.0 - - if not termination: - - progress = reward_components.route_completion - - ttc = 1.0 if reward_components.ttc == 1.0 else 0.0 - speed = reward_components.too_fast - comfort = 1.0 if reward_components.comfort == 1.0 else 0.0 - - ttc /= NUM_SCENARIO_ITERATIONS - speed /= NUM_SCENARIO_ITERATIONS - comfort /= NUM_SCENARIO_ITERATIONS - - reward = (5 * progress + 5 * ttc + 4 * speed + 2 * comfort) / 16 - reward = reward * reward_components.off_route * reward_components.lane_distance * self._config.reward_factor - - return reward, termination, truncation - - def _accumulate_info(self) -> Dict[str, float]: - """ - Helper function to log the accumulated reward information. - TODO: Remove this method. - """ - reward_info: Dict[str, float] = {} - reward_info["reward_progress"] = np.sum([reward.route_completion for reward in self._reward_history]) - - # reward_info["reward_blocked"] = not np.any([reward.blocked for reward in self._reward_history]) - reward_info["reward_red_light"] = not np.any([reward.red_light for reward in self._reward_history]) - reward_info["reward_collision"] = not np.any([reward.collision for reward in self._reward_history]) - # reward_info["reward_stop_sign"] = not np.any([reward.stop_sign for reward in self._reward_history]) - reward_info["reward_off_road"] = not np.any([reward.off_road for reward in self._reward_history]) - - reward_info["reward_lane_distance"] = np.mean([reward.lane_distance for reward in self._reward_history]) - reward_info["reward_too_fast"] = np.mean([reward.too_fast for reward in self._reward_history]) - reward_info["reward_off_route"] = np.mean([reward.off_route for reward in self._reward_history]) - reward_info["reward_comfort"] = not np.any([self._reward_history[-1].comfort < 1.0]) - reward_info["reward_ttc"] = not np.any([reward.ttc < 1.0 for reward in self._reward_history]) - - for key, value in reward_info.items(): - reward_info[key] = float(value) - - return reward_info - - def _accumulate_info_comfort(self) -> Dict[str, float]: - """ - Helper function to log the accumulated comfort information. - TODO: Remove this method. - """ - comfort_info: Dict[str, float] = {} - comfort = np.array(self._comfort_values, dtype=np.bool_) - - comfort_info["comfort_lon_acceleration"] = comfort[-1, 0] - comfort_info["comfort_lat_acceleration"] = comfort[-1, 1] - comfort_info["comfort_jerk_metric"] = comfort[-1, 2] - comfort_info["comfort_lon_jerk_metric"] = comfort[-1, 3] - comfort_info["comfort_yaw_accel"] = comfort[-1, 4] - comfort_info["comfort_yaw_rate"] = comfort[-1, 5] - - for key, value in comfort_info.items(): - comfort_info[key] = float(value) - - return comfort_info - - def _add_value_measurements(self, simulation_wrapper: SimulationWrapper, info: Dict[str, Any]) -> None: - """ - Pass some information for the value observation. - TODO: DEBUG/REMOVE. - :param simulation_wrapper: Complete simulation wrapper object used for gym simulation. - :param info: Arbitrary information dictionary, for passing information between modules. - """ - assert len(self._reward_history) > 0 - - current_iteration = simulation_wrapper.current_planner_input.iteration.index - num_simulation_iterations = simulation_wrapper.scenario.number_of_iterations - - remaining_time = 1 - (current_iteration / num_simulation_iterations) - remaining_progress = 1 - simulation_wrapper.route_completion - comfort_score = self._reward_history[-1].comfort - ttc_score = self._reward_history[-1].ttc - - info["remaining_time"] = remaining_time - info["remaining_progress"] = remaining_progress - info["comfort_score"] = comfort_score - info["ttc_score"] = ttc_score - - def _calculate_reward_components( - self, simulation_wrapper: SimulationWrapper, map_cache: MapCache, detection_cache: BoxDetectionCache - ) -> DefaultRewardComponents: - """ - Internal method to calculate the reward components based on the current simulation state. - :param simulation_wrapper: Complete simulation wrapper object used for gym simulation. - :param map_cache: Cache map elements in the environment area. - :param detection_cache: Cached objects for detection tracks in the current simulation step. - :return: dataclass containing the reward components. - """ - ego_states = simulation_wrapper.current_planner_input.history.ego_states - current_ego_state = ego_states[-1] - - component_dict: Dict[str, Union[bool, float]] = {} - - current_lane, intersecting_lanes = _find_current_and_intersecting_lanes(current_ego_state, map_cache) - - # ---------------- Route Completion ---------------- - if self._config.route_completion_type is not None: - if self._config.route_completion_type == "human": - component_dict["route_completion"] = calculate_route_completion_human(ego_states, simulation_wrapper) - elif self._config.route_completion_type == "mean": - component_dict["route_completion"] = calculate_route_completion_mean(ego_states, simulation_wrapper) - elif self._config.route_completion_type == "nuplan": - ( - component_dict["route_completion"], - progress_cache, - ) = calculate_route_completion_nuplan(ego_states, simulation_wrapper, self._progress_cache) - self._progress_cache = progress_cache - else: - raise ValueError(f"Invalid route completion type: {self._config.route_completion_type}") - - # ---------------- Hard Constraints ---------------- - - # 5. Off road - component_dict["off_road"], in_multiple_lanes = _calculate_off_road( - current_ego_state, - map_cache, - intersecting_lanes, - self._config.off_road_violation_threshold, - ) - in_multiple_lanes_or_offroad = in_multiple_lanes or component_dict["off_road"] - - # 1. Is ego blocked for 90s - # component_dict["blocked"] = _calculate_blocked() # Not implemented - - # 2. Red light infraction - if self._config.red_light_type is not None: - if self._config.red_light_type == "v1": - ( - component_dict["red_light"], - self._expert_red_light_infractions, - ) = _calculate_red_light( - simulation_wrapper, - map_cache, - current_lane, - self._expert_red_light_infractions, - ) - else: - raise ValueError(f"Invalid red light type: {self._config.red_light_type}") - - # 3. Collision - if self._config.collision_type is not None: - if self._config.collision_type == "all": - collision, collided_track_tokens = calculate_all_collisions( - current_ego_state, - detection_cache.tracked_objects, - self._prev_collided_track_tokens, - ) - elif self._config.collision_type == "non_stationary": - collision, collided_track_tokens = calculate_non_stationary_collisions( - current_ego_state, - detection_cache.tracked_objects, - self._prev_collided_track_tokens, - ) - elif self._config.collision_type == "at_fault": - collision, collided_track_tokens = calculate_at_fault_collision( - current_ego_state, - detection_cache.tracked_objects, - self._prev_collided_track_tokens, - in_multiple_lanes_or_offroad, - ) - else: - raise ValueError(f"Invalid collision type: {self._config.collision_type}") - - component_dict["collision"] = collision - self._prev_collided_track_tokens.extend(collided_track_tokens) - - # 4. Stop signs - # component_dict["stop_sign"] = _calculate_stop_sign() # Not implemented - - # ---------------- Soft Constraints ---------------- - - # 1. Lane Distance - if self._config.lane_distance_type is not None: - if self._config.lane_distance_type == "v1": - component_dict["lane_distance"] = _calculate_lane_distance( - current_ego_state, - current_lane, - self._config.lane_distance_violation_threshold, - ) - else: - raise ValueError(f"Invalid lane distance type: {self._config.lane_distance_type}") - - # 2. Driving too fast - component_dict["too_fast"] = _calculate_too_fast(current_ego_state, current_lane) - - # 3. Driving off route - if self._config.off_route_type is not None: - if self._config.off_route_type == "v1": - component_dict["off_route"] = calculate_off_route_v1(simulation_wrapper, map_cache) - elif self._config.off_route_type == "v2": - component_dict["off_route"] = calculate_off_route_v2(simulation_wrapper, map_cache) - else: - raise ValueError(f"Invalid off route type: {self._config.off_route_type}") - - # 4. comfort - if self._config.comfort_type is not None: - comfort_results = None - if self._config.comfort_type == "action_delta": - component_dict["comfort"] = calculate_action_delta_comfort(simulation_wrapper) - elif self._config.comfort_type == "kinematics": - ( - component_dict["comfort"], - comfort_results, - ) = calculate_kinematics_comfort(simulation_wrapper) - elif self._config.comfort_type == "kinematics_legacy": - ( - component_dict["comfort"], - comfort_results, - ) = calculate_kinematics_comfort_legacy(simulation_wrapper) - elif self._config.comfort_type == "kinematics_history": - ( - component_dict["comfort"], - comfort_results, - ) = calculate_kinematics_history_comfort(simulation_wrapper) - elif self._config.comfort_type == "kinematics_fixed": - ( - component_dict["comfort"], - comfort_results, - ) = calculate_kinematics_comfort_fixed(simulation_wrapper) - else: - raise ValueError(f"Invalid comfort type: {self._config.comfort_type}") - - if comfort_results is not None: - self._comfort_values.append(comfort_results) - - # 5. Time to collision - if self._config.ttc_type is not None: - ttc_failed_previously = any([reward.ttc < SUCCESS_TTC for reward in self._reward_history]) - if ttc_failed_previously: - component_dict["ttc"] = FAIL_TTC - elif self._config.ttc_type == "v1": - component_dict["ttc"] = calculate_ttc_v1(simulation_wrapper) - elif self._config.ttc_type == "v2": - component_dict["ttc"] = calculate_ttc_v2( - simulation_wrapper, - self._prev_collided_track_tokens, - in_multiple_lanes_or_offroad, - ) - else: - raise ValueError(f"Invalid ttc type: {self._config.ttc_type}") - - return DefaultRewardComponents(**component_dict) - - -def _calculate_blocked() -> bool: - """Placeholder for blocked calculation. TODO: remove.""" - return False - - -def _calculate_red_light( - simulation_wrapper: SimulationWrapper, - map_cache: MapCache, - current_lane: Optional[LaneGraphEdgeMapObject], - expert_red_light_infractions: List[str], -) -> Tuple[bool, List[str]]: - """ - Calculates the red light infraction based in the current iteration. - TODO: Refactor this method. - :param simulation_wrapper: Wrapper object containing complete nuPlan simulation. - :param map_cache: Cache map elements in the environment area. - :param current_lane: Lane object aligned to the ego vehicle in the current iteration. - :param expert_red_light_infractions: List of traffic light infractions of the human expert. - :return: Whether the ego vehicle is violating a red light and the updated list of expert red light infractions. - """ - - STOPPED_SPEED_THRESHOLD: float = 5e-02 - - iteration = simulation_wrapper.current_planner_input.iteration.index - ego_state = simulation_wrapper.current_ego_state - expert_ego_state = simulation_wrapper.scenario.get_ego_state_at_iteration(iteration) - - ego_on_lane = current_lane is None or isinstance(current_lane, Lane) - ego_stopped = ego_state.dynamic_car_state.speed < STOPPED_SPEED_THRESHOLD - - if ego_on_lane or ego_stopped: - return False, expert_red_light_infractions - - # add on route checking - red_connectors: Dict[str, LaneConnector] = {} - for connector_id, connector in map_cache.lane_connectors.items(): - if ( - (connector.get_roadblock_id() in map_cache.route_lane_group_ids) - and (connector_id in map_cache.traffic_lights.keys()) - and (map_cache.traffic_lights[connector_id] == TrafficLightStatusType.RED) - ): - red_connectors[connector_id] = connector - - red_connector_map = PDMOccupancyMap( - list(red_connectors.keys()), - [connector.polygon for connector in red_connectors.values()], - ) - ego_center_point = Point(*ego_state.center.array) - expert_center_point = Point(*expert_ego_state.center.array) - - ego_intersecting_connectors = red_connector_map.intersects(ego_center_point) - expert_intersecting_connectors = red_connector_map.intersects(expert_center_point) - expert_red_light_infractions.extend(expert_intersecting_connectors) - - non_covered_infractions = list(set(ego_intersecting_connectors) - set(expert_red_light_infractions)) - if len(non_covered_infractions) > 0: - return True, expert_red_light_infractions - - return False, expert_red_light_infractions - - -def _calculate_stop_sign() -> bool: - """Placeholder for stop sign infraction. TODO: remove.""" - return False - - -def _calculate_off_road( - ego_state: EgoState, - map_cache: MapCache, - intersecting_lanes: List[LaneGraphEdgeMapObject], - violation_threshold: float, -) -> Tuple[bool, bool]: - """ - Calculates whether the ego vehicle is off-road based on its corners and the drivable area map. - :param ego_state: Ego vehicle state of the current iteration. - :param map_cache: Cache map elements in the environment area. - :param intersecting_lanes: List of lanes that intersect with the ego vehicle's position. - :param violation_threshold: Threshold distance to consider a corner as off-road. - :return: Whether the ego vehicle is off-road and whether it is in multiple lanes. - """ - - drivable_area_map = map_cache.drivable_area_map - ego_corners = np.array( - [[point.x, point.y] for point in ego_state.agent.box.all_corners()], - dtype=np.float64, - ) - corner_in_polygons = drivable_area_map.contains_vectorized(ego_corners) # (geom, 4) - - polygon_indices = np.where(corner_in_polygons.sum(axis=-1) > 0)[0] - corners_dwithin_polygons = corner_in_polygons.sum(axis=0) > 0 - - if violation_threshold > 0.0 and not np.all(corners_dwithin_polygons): - ego_polygons = [drivable_area_map.geometries[idx] for idx in polygon_indices] - ego_polygons.extend([lane.polygon for lane in intersecting_lanes]) - - for corner_idx in np.where(~corners_dwithin_polygons)[0]: - distances = [polygon.distance(Point(*ego_corners[corner_idx])) for polygon in ego_polygons] - if len(distances) > 0 and min(distances) < violation_threshold: - corners_dwithin_polygons[corner_idx] = True - - off_road = not np.all(corners_dwithin_polygons) - in_multiple_lanes = len(polygon_indices) > 1 - - return off_road, in_multiple_lanes - - -def _find_current_and_intersecting_lanes( - ego_state: EgoState, - map_cache: MapCache, -) -> Tuple[Optional[LaneGraphEdgeMapObject], List[LaneGraphEdgeMapObject]]: - """ - Helper function to find the current lane and intersecting lanes based on the ego vehicle's state. - TODO: Refactor this method. - :param ego_state: Ego vehicle state of the current iteration. - :param map_cache: Cache map elements in the environment area. - :return: Tuple of - - current_lane: The lane that the ego vehicle is currently on, or None if not found. - - intersecting_lanes: List of lanes that intersect with the ego vehicle's position. - """ - - current_lane: Optional[LaneGraphEdgeMapObject] = None - - # store lanes and lane connectors in common dict - lanes_dict: Dict[str, LaneGraphEdgeMapObject] = { - lane.id: lane for lane in list(map_cache.lanes.values()) + list(map_cache.lane_connectors.values()) - } - - # find intersecting lanes - lane_polygons: List[Polygon] = [lane.polygon for lane in lanes_dict.values()] - lane_map = PDMOccupancyMap(list(lanes_dict.keys()), lane_polygons) - ego_center_point = Point(ego_state.center.x, ego_state.center.y) - intersecting_lanes_ids = lane_map.intersects(ego_center_point) - intersecting_lanes = [lanes_dict[lane_id] for lane_id in intersecting_lanes_ids] - - def _calculate_heading_error(ego_pose: StateSE2, lane: LaneGraphEdgeMapObject) -> float: - """Calculate the heading error of the ego vehicle with respect to the lane.""" - - # calculate nearest state on baseline - lane_se2_array = states_se2_to_array(lane.baseline_path.discrete_path) - lane_distances = np.linalg.norm(ego_pose.point.array[None, ...] - lane_se2_array[..., :2], axis=-1) - - # calculate heading error - heading_error = lane.baseline_path.discrete_path[np.argmin(lane_distances)].heading - ego_pose.heading - heading_error = np.abs(normalize_angle(heading_error)) - - return heading_error - - if len(intersecting_lanes_ids) > 0: - lane_route_errors: Dict[str, float] = {} - lane_errors: Dict[str, float] = {} - - for lane_id in intersecting_lanes_ids: - lane = lanes_dict[lane_id] - heading_error = _calculate_heading_error(ego_state.center, lane) - if lane.get_roadblock_id() in map_cache.route_lane_group_ids: - lane_route_errors[lane_id] = heading_error - lane_errors[lane_id] = heading_error - - # Search for lanes on route first - if len(lane_route_errors) > 0: - current_lane = lanes_dict[min(lane_route_errors, key=lane_route_errors.get)] - - else: # Fallback to all intersecting lanes - current_lane = lanes_dict[min(lane_errors, key=lane_errors.get)] - - return current_lane, intersecting_lanes - - -def _calculate_lane_distance( - ego_state: EgoState, - current_lane: Optional[LaneGraphEdgeMapObject], - lane_distance_violation_threshold: float = 0.5, -) -> float: - """ - Calculates the distance of the ego vehicle to the center of the current lane. - Normalizes the distance to a value between 0 and 1. - :param ego_state: Ego vehicle state of the current iteration. - :param current_lane: Lane object aligned to the ego vehicle in the current iteration. - :param Normed distance, for which ego is not penalized: _description_, defaults to 0.5 - :return: Normed reward distance between 0 and 1, where 1 is the best value. - """ - - if current_lane is not None and isinstance(current_lane, Lane): - ego_center_point = Point(ego_state.center.x, ego_state.center.y) - center_distance, left_distance, right_distance = ( - ego_center_point.distance(current_lane.baseline_path.linestring), - ego_center_point.distance(current_lane.left_boundary.linestring), - ego_center_point.distance(current_lane.right_boundary.linestring), - ) - - # assumes that the ego center is in lane polygon - center_distance_norm = (center_distance - lane_distance_violation_threshold) / ( - (center_distance + np.minimum(left_distance, right_distance) - lane_distance_violation_threshold) + 1e-6 - ) - center_distance_norm = np.clip(center_distance_norm, 0, 1) - return 1.0 - (center_distance_norm * 0.5) - - return 1.0 - - -def _calculate_too_fast( - ego_state: EgoState, - current_lane: Optional[LaneGraphEdgeMapObject], - max_overspeed_value_threshold: float = 2.23, -) -> float: - """ - Calculates the speed of the ego vehicle in relation to the speed limit of the current lane. - :param ego_state: Ego vehicle state of the current iteration. - :param current_lane: Lane object aligned to the ego vehicle in the current iteration. - :param max_overspeed_value_threshold: max exceeding value for linear penalty, defaults to 2.23 - :return: Reward value between 0 and 1, where 1 is the best value. - """ - - # Adding a small tolerance to handle cases where max_overspeed_value_threshold is specified as 0 - max_overspeed_value_threshold_ = max(max_overspeed_value_threshold, 1e-3) - - if current_lane is not None: - speed_limit = current_lane.speed_limit_mps - if speed_limit is not None: - exceeding_speed = ego_state.dynamic_car_state.speed - speed_limit - if exceeding_speed > 0.0: - violation_loss = exceeding_speed / max_overspeed_value_threshold_ - return float(max(0.0, 1.0 - violation_loss)) - return 1.0 diff --git a/d123/simulation/gym/environment/scenario_sampler/__init__.py b/d123/simulation/gym/environment/scenario_sampler/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/gym/environment/scenario_sampler/abstract_scenario_sampler.py b/d123/simulation/gym/environment/scenario_sampler/abstract_scenario_sampler.py deleted file mode 100644 index 849163ae..00000000 --- a/d123/simulation/gym/environment/scenario_sampler/abstract_scenario_sampler.py +++ /dev/null @@ -1,25 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario - - -class AbstractScenarioSampler(ABC): - """Abstract class for sampling scenarios in a Gym simulation environment.""" - - @abstractmethod - def sample(self, seed: Optional[int] = None) -> AbstractScenario: - """ - Samples a single scenario. - :param seed: Optional seed used for sampling, defaults to None - :return: Scenario interface of nuPlan. - """ - - @abstractmethod - def sample_batch(self, batch_size: int, seed: Optional[int] = None) -> List[AbstractScenario]: - """ - Samples a batch of scenarios. - :param batch_size: number of scenarios to sample. - :param seed: Optional seed used for sampling, defaults to None - :return: List of scenario interfaces of nuPlan. - """ diff --git a/d123/simulation/gym/environment/scenario_sampler/cache_scenario_sampler.py b/d123/simulation/gym/environment/scenario_sampler/cache_scenario_sampler.py deleted file mode 100644 index b1febeb5..00000000 --- a/d123/simulation/gym/environment/scenario_sampler/cache_scenario_sampler.py +++ /dev/null @@ -1,61 +0,0 @@ -from logging import getLogger -from typing import List, Optional - -import numpy as np -from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario - -from d123.simulation.gym.cache.gym_scenario_cache import GymScenarioCache -from d123.simulation.gym.environment.scenario_sampler.abstract_scenario_sampler import AbstractScenarioSampler - -logger = getLogger(__name__) - - -class CacheScenarioSampler(AbstractScenarioSampler): - """ - Scenario sampler that loads scenarios from the Gym cache structure. - NOTE: It is possible to implement a scenario sampler from nuPlan SQL database, but not included in the code. - We tried that. It was too slow. - """ - - def __init__( - self, log_names: List[str], cache_path: str, format: str = "gz", ignore_log_names: bool = False - ) -> None: - """ - Initializes the CacheScenarioSampler. - :param log_names: Log names to include during training. - :param cache_path: Path to the cache directory where scenarios are saved. - :param format: Format of the scenario cache (i.e. gzip), defaults to "gz" - """ - - self._log_names = log_names - self._scenario_cache = GymScenarioCache(cache_path, format) - - # NOTE: Additional conditions (e.g. depending on scenario type) could be added heres - if ignore_log_names: - logger.warning( - "Ignoring provided log names: all scenarios from the cache will be loaded. " - "This may lead to training on validation/test splits if the cache was not properly filtered. " - "Ensure your cache only contains appropriate training scenarios." - ) - self._file_paths = self._scenario_cache.file_paths - else: - self._file_paths = [ - file_path - for file_path, log_name in zip(self._scenario_cache.file_paths, self._scenario_cache.log_names) - if log_name in self._log_names - ] - - def sample(self, seed: Optional[int] = None) -> AbstractScenario: - """Inherited, see super class.""" - return self.sample_batch(1, seed=seed)[0] - - def sample_batch(self, batch_size: int, seed: Optional[int] = None) -> List[AbstractScenario]: - """Inherited, see super class.""" - rng = np.random.default_rng(seed=seed) - indices = rng.choice(len(self._file_paths), size=batch_size) - - scenarios: List[AbstractScenario] = [] - for idx in indices: - file_path = self._file_paths[idx] - scenarios.append(self._scenario_cache.load_scenario(file_path)) - return scenarios diff --git a/d123/simulation/gym/environment/simulation_builder/__init__.py b/d123/simulation/gym/environment/simulation_builder/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/gym/environment/simulation_builder/abstract_simulation_builder.py b/d123/simulation/gym/environment/simulation_builder/abstract_simulation_builder.py deleted file mode 100644 index 9aed2f8f..00000000 --- a/d123/simulation/gym/environment/simulation_builder/abstract_simulation_builder.py +++ /dev/null @@ -1,16 +0,0 @@ -from abc import ABC, abstractmethod - -from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario -from nuplan.planning.simulation.simulation import Simulation - - -class AbstractSimulationBuilder(ABC): - """Abstract class for building a nuPlan simulation object, which includes background traffic, etc.""" - - @abstractmethod - def build_simulation(self, scenario: AbstractScenario) -> Simulation: - """ - Builds a nuPlan simulation object. - :param scenario: Scenario interface of nuPlan. - :return: Simulation object of nuPlan. - """ diff --git a/d123/simulation/gym/environment/simulation_builder/default_simulation_builder.py b/d123/simulation/gym/environment/simulation_builder/default_simulation_builder.py deleted file mode 100644 index 8c8818b8..00000000 --- a/d123/simulation/gym/environment/simulation_builder/default_simulation_builder.py +++ /dev/null @@ -1,91 +0,0 @@ -import random - -from carl_nuplan.planning.simulation.controller.one_stage_controller import OneStageController - -# TODO: refactor for general motion models, observations, etc. -from nuplan.common.actor_state.vehicle_parameters import get_pacifica_parameters -from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario -from nuplan.planning.simulation.controller.motion_model.kinematic_bicycle import KinematicBicycleModel -from nuplan.planning.simulation.observation.idm_agents import IDMAgents -from nuplan.planning.simulation.observation.tracks_observation import TracksObservation -from nuplan.planning.simulation.simulation import Simulation -from nuplan.planning.simulation.simulation_setup import SimulationSetup -from nuplan.planning.simulation.simulation_time_controller.step_simulation_time_controller import ( - StepSimulationTimeController, -) - -from d123.simulation.gym.environment.simulation_builder.abstract_simulation_builder import ( - AbstractSimulationBuilder, -) - - -class DefaultSimulationBuilder(AbstractSimulationBuilder): - """Default simulation builder for CaRL.""" - - def __init__(self, agent_type: str = "tracks") -> None: - """ - Initializes the DefaultSimulationBuilder. - NOTE: Using "tracks" is by far the fastest option and recommended for testing and experimentation. - The IDM implementation of nuPlan is very slow but could be improved if required. - :param agent_type: whether to use tracks (log-replay), idm agents, a mixture, or no background, defaults to "tracks" - """ - # TODO: use Literal typing. - assert agent_type in ["tracks", "idm_agents", "mixed", "no_tracks"] - - self._agent_type = agent_type - self._callback = None - - self._idm_agents_probability = 0.5 - self._history_buffer_duration = 1.0 # [s] - - def build_simulation(self, scenario: AbstractScenario) -> Simulation: - """Inherited, see superclass.""" - simulation_setup = self._build_simulation_setup(scenario) - return Simulation( - simulation_setup=simulation_setup, - callback=self._callback, - simulation_history_buffer_duration=self._history_buffer_duration, - ) - - def _build_simulation_setup(self, scenario: AbstractScenario) -> SimulationSetup: - """ - Helper function to build the simulation setup from a scenario. - :param scenario: Scenario interface of nuPlan. - :return: SimulationSetup object of nuPlan. - """ - - time_controller = StepSimulationTimeController(scenario) - - if self._agent_type == "mixed": - use_idm_agents = random.random() < self._idm_agents_probability - agent_type = "idm_agents" if use_idm_agents else "tracks" - else: - agent_type = self._agent_type - - if agent_type == "tracks": - observations = TracksObservation(scenario) - elif agent_type == "idm_agents": - observations = IDMAgents( - scenario=scenario, - target_velocity=10, - min_gap_to_lead_agent=1.0, - headway_time=1.5, - accel_max=1.0, - decel_max=2.0, - open_loop_detections_types=[ - "PEDESTRIAN", - "BARRIER", - "CZONE_SIGN", - "TRAFFIC_CONE", - "GENERIC_OBJECT", - ], - minimum_path_length=20, - planned_trajectory_samples=None, - planned_trajectory_sample_interval=None, - radius=100, - ) - - motion_model = KinematicBicycleModel(get_pacifica_parameters()) - ego_controller = OneStageController(scenario, motion_model) - - return SimulationSetup(time_controller, observations, ego_controller, scenario) diff --git a/d123/simulation/gym/environment/simulation_wrapper.py b/d123/simulation/gym/environment/simulation_wrapper.py deleted file mode 100644 index 4ffa8de6..00000000 --- a/d123/simulation/gym/environment/simulation_wrapper.py +++ /dev/null @@ -1,160 +0,0 @@ -# TODO: refactor and maybe move in environment wrapper -from functools import cached_property -from typing import List, Optional, Tuple - -import numpy as np -from nuplan.common.actor_state.ego_state import EgoState -from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario -from nuplan.planning.simulation.history.simulation_history import AbstractTrajectory -from nuplan.planning.simulation.planner.abstract_planner import ( - PlannerInitialization, - PlannerInput, -) -from nuplan.planning.simulation.simulation import Simulation -from shapely.creation import linestrings -from shapely.geometry import LineString - - -class SimulationWrapper: - """ - Helper object to wrap the nuPlan simulation and provide additional functionality. - TODO: - - Refactor this class. - - Move route completion logic into the reward builder. - """ - - def __init__(self, simulation: Simulation): - """ - Initializes the SimulationWrapper. - :param simulation: Simulation object of nuPlan to wrap.I - """ - - self._simulation: Simulation = simulation - self._route_completion: float = 0.0 - self._history_trajectories: List[AbstractTrajectory] = [] - - # lazy loaded - self._planner_initialization: Optional[PlannerInitialization] - self._planner_input: Optional[PlannerInput] - self._simulation_ego_states: List[EgoState] = [] - - def initialize(self) -> Tuple[PlannerInput, PlannerInitialization]: - """ - Initializes the simulation and returns the planner input and initialization. - :return: Tuple containing the planner input and initialization according to the nuPlan interface. - """ - self._planner_initialization = self._simulation.initialize() - self._planner_input = self._simulation.get_planner_input() - self._simulation_ego_states.append(self._planner_input.history.current_state[0]) - return self._planner_input, self._planner_initialization - - def step(self, trajectory: AbstractTrajectory) -> Tuple[PlannerInput, bool]: - """ - Propagates the simulation and returns the new planner input. - :return: Tuple containing the planner input and whether the simulation is running. - """ - assert self._planner_initialization is not None, "SimulationManager: Call .initialize() first!" - self._history_trajectories.append(trajectory) - self._simulation.propagate(trajectory) - self._planner_input = self._simulation.get_planner_input() - self._simulation_ego_states.append(self._planner_input.history.current_state[0]) - output = self._planner_input, self.is_simulation_running() - return output - - def is_simulation_running(self) -> bool: - """ - Checks if the simulation is still running. - :return: boolean. - """ - iteration = self._simulation._time_controller.get_iteration().index - num_iterations = self._simulation._time_controller.number_of_iterations() - return iteration != num_iterations - 2 - - @property - def simulation(self) -> Simulation: - """ - :return: Simulation used by the SimulationRunner - """ - assert self._simulation is not None - return self._simulation - - @property - def history_trajectories(self) -> List[AbstractTrajectory]: - """ - :return: Simulation used by the SimulationRunner - """ - assert len(self._history_trajectories) > 0 - return self._history_trajectories - - @property - def scenario(self) -> AbstractScenario: - """ - :return: Get the scenario relative to the simulation. - """ - assert self._simulation is not None - return self._simulation.scenario - - @property - def planner_initialization(self) -> PlannerInitialization: - """ - :return: Get the current planner initialization object. - """ - assert self._planner_initialization is not None - return self._planner_initialization - - @property - def simulation_ego_states(self) -> List[EgoState]: - """ - :return: list of ego states from the simulation - """ - return self._simulation_ego_states - - @property - def current_planner_input(self) -> PlannerInput: - """ - :return: Get the current planner initialization object. - """ - assert self._planner_input is not None - return self._planner_input - - @property - def current_ego_state(self) -> EgoState: - """ - :return: Current ego state from the simulation. - """ - assert self._planner_input is not None - ego_state, _ = self._planner_input.history.current_state - return ego_state - - @property - def initial_ego_state(self) -> EgoState: - """ - :return: Initial ego state from the simulation. - """ - return self.scenario.initial_ego_state - - @cached_property - def ego_linestring(self) -> LineString: - """ - Creates a linestring from the human ego states of the simulation. - TODO: remove this function. - :return: Shapely linestring of the human ego states. - """ - ego_states = list(self.scenario.get_expert_ego_trajectory()) - ego_centers = np.array([ego_state.center.array for ego_state in ego_states]) - return linestrings(ego_centers) - - @property - def route_completion(self): - """ - TODO: remove this function. Move to reward builder. - :return: The current route completion of the simulation [m]. - """ - return self._route_completion - - def update_route_completion(self, new_route_completion: float) -> None: - """ - TODO: remove this function. Move to reward builder. - """ - assert 0 <= new_route_completion <= 1 - self._route_completion = max(self._route_completion, new_route_completion) diff --git a/d123/simulation/gym/gym_env.py b/d123/simulation/gym/gym_env.py deleted file mode 100644 index d539df8e..00000000 --- a/d123/simulation/gym/gym_env.py +++ /dev/null @@ -1,91 +0,0 @@ -from typing import List, Optional, Tuple - -import numpy as np -import numpy.typing as npt - -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2 -from d123.conversion.maps.abstract_map import AbstractMap -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.geometry.vector import Vector2D -from d123.simulation.controller.motion_model.kinematic_bicycle_model import KinematicBicycleModel -from d123.simulation.observation.abstract_observation import AbstractObservation -from d123.simulation.observation.log_replay_observation import LogReplayObservation - - -class GymEnvironment: - """ - A simple demo environment for testing purposes. - This class is a placeholder and does not implement any specific functionality. - """ - - def __init__(self, scenes: List[AbstractScene]) -> None: - - self._scenes = scenes - self._current_iteration = 0 - self._current_scene: Optional[AbstractScene] = None - self._current_ego_state_se2: Optional[EgoStateSE2] = None - - # self._observation: AbstractObservation = AgentsObservation(None) - self._observation: AbstractObservation = LogReplayObservation() - self._observation.initialize() - - self._ego_replay: bool = False - - def reset(self, scene: Optional[AbstractScene]) -> Tuple[AbstractMap, EgoStateSE2, DetectionRecording]: - """ - Reset the environment to the initial state. - Returns a tuple containing the map, ego vehicle state, and detection observation. - """ - if scene is not None: - self._current_scene = scene - else: - self._current_scene = np.random.choice(self._scenes) - - self._current_scene_index = 0 - self._current_ego_state_se2 = self._current_scene.get_ego_state_at_iteration( - self._current_scene_index - ).ego_state_se2 - detection_observation = self._observation.reset(self._current_scene) - - return ( - self._current_scene.get_map_api(), - self._current_ego_state_se2, - detection_observation, - self._current_scene, - ) - - def step(self, action: npt.NDArray[np.float64]) -> Tuple[EgoStateSE2, DetectionRecording, bool]: - self._current_scene_index += 1 - if self._ego_replay: - self._current_ego_state_se2 = self._current_scene.get_ego_state_at_iteration( - self._current_scene_index - ).ego_state_se2 - else: - dynamic_car_state = dynamic_state_from_action(ego_state=self._current_ego_state_se2, action=action) - next_timepoint = self._current_scene.get_timepoint_at_iteration(self._current_scene_index) - self._current_ego_state_se2 = KinematicBicycleModel().step( - self._current_ego_state_se2, dynamic_car_state, next_timepoint - ) - - detection_observation = self._observation.step() - is_done = self._current_scene_index == self._current_scene.number_of_iterations - 1 - - return self._current_ego_state_se2, detection_observation, is_done - - -def dynamic_state_from_action(ego_state: EgoStateSE2, action: npt.NDArray[np.float64]) -> DynamicStateSE2: - """ - Convert the action to a dynamic car state. - """ - # Assuming action is in the form [acceleration, steering_angle] - long_acceleration = action[0] - tire_steering_rate = action[1] - - return DynamicStateSE2( - velocity=ego_state.dynamic_state_se2.velocity, - acceleration=Vector2D(long_acceleration, 0.0), - angular_velocity=ego_state.dynamic_state_se2.angular_velocity, - tire_steering_rate=tire_steering_rate, - angular_acceleration=0.0, - ) diff --git a/d123/simulation/gym/policy/__init__.py b/d123/simulation/gym/policy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/gym/policy/ppo/__init__.py b/d123/simulation/gym/policy/ppo/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/gym/policy/ppo/ppo_config.py b/d123/simulation/gym/policy/ppo/ppo_config.py deleted file mode 100644 index 9606031c..00000000 --- a/d123/simulation/gym/policy/ppo/ppo_config.py +++ /dev/null @@ -1,282 +0,0 @@ -""" -Config class that contains all the hyperparameters needed to build any model. -""" - -import numpy as np - - -class GlobalConfig: - """ - Config class that contains all the hyperparameters needed to build any model. - """ - - def __init__(self): - self.frame_rate = 10.0 # Frames per second of the CARLA simulator - self.time_interval = 1.0 / self.frame_rate # ms per step in CARLA time. - - self.pixels_per_meter = 5.0 # 1 / pixels_per_meter = size of pixel in meters - self.bev_semantics_width = 192 # Numer of pixels the bev_semantics is wide - self.pixels_ev_to_bottom = 40 - self.bev_semantics_height = 192 # Numer of pixels the bev_semantics is high - # Distance of traffic lights considered relevant (in meters) - self.light_radius = 15.0 - self.debug = False # Whether to turn on debugging functions, like visualizations. - self.logging_freq = 10 # Log every 10 th frame - self.logger_region_of_interest = 30.0 # Meters around the car that will be logged. - self.route_points = 10 # Number of route points to render in logger - - half_second = int(self.frame_rate * 0.5) - # Roach ObsManager config. - self.bev_semantics_obs_config = { - "width_in_pixels": self.bev_semantics_width, - "pixels_ev_to_bottom": self.pixels_ev_to_bottom, - "pixels_per_meter": self.pixels_per_meter, - "history_idx": [ - -3 * half_second - 1, - -2 * half_second - 1, - -1 * half_second - 1, - -0 * half_second - 1, - ], - "scale_bbox": True, - "scale_mask_col": 1.0, - "map_folder": "maps_low_res", - } - self.num_route_points_rendered = 80 # Number of route points rendered into the BEV seg observation. - - # Color format BGR - self.bev_classes_list = ( - (0, 0, 0), # unlabeled - (150, 150, 150), # road - (255, 255, 255), # route - (255, 255, 0), # lane marking - (0, 0, 255), # vehicle - (0, 255, 255), # pedestrian - (255, 255, 0), # traffic light - (160, 160, 0), # stop sign - (0, 255, 0), # speed sign - ) - - # New bev observation parameters - self.use_new_bev_obs = False # Whether to use bev_observation.py instead of chauffeurnet.py - self.bev_semantics_width2 = 192 # Width and height of bev_semantic image - self.pixels_ev_to_bottom2 = 40 # Number of pixels from the ego vehicle to the bottom of the input. - self.pixels_per_meter2 = 5.0 # 1 / pixels_per_meter = size of pixel in meters - self.route_width = 16 # Width of the rendered route in pixel. - self.red_light_thickness = 3 # Width of the red light line - self.use_extra_control_inputs = False # Whether to use extra control inputs such as integral of past steering. - # Rough avg steering angle in degree that the wheel can be set to - # The steering angle for individual wheels is +- 70° and +-48° for the other wheel respectively - self.max_avg_steer_angle = 60.0 - self.condition_outside_junction = True # Whether to render the route outside junctions. - self.use_target_point = False # Whether to input a target point in the measurements. - - self.scale_bbox2 = True # Whether to scale up the bounding boxes extends 1.0 for vehicles, 2.0 for ped. 0.8 max - self.scale_factor_vehicle = 1.0 - self.scale_factor_walker = 2.0 - self.min_ext_bounding_box = 0.8 - self.scale_mask_col2 = 1.0 # Scaling factor for ego vehicle bounding box. - self.map_folder2 = "maps_low_res" # Map folder for the preprocessed map data - self.max_speed_actor = 33.33 # In m/s maximum speed we expect from other actors. = 120 km/h - self.min_speed_actor = -2.67 # In m/s minimum speed we expect from other actors. = -10 km/h - - # Extent of the ego vehicles bounding box - self.ego_extent_x = 2.44619083404541 - self.ego_extent_y = 0.9183566570281982 - self.ego_extent_z = 0.7451388239860535 - - # Roach reward hyperparameters. rr stands for roach reward - self.reward_type = "roach" # Reward function to be used during training. Options: roach, simple_reward - self.use_exploration_suggest = False # Whether to use the exploration loss from roach. - self.rr_maximum_speed = 6.0 # Maximum speed in m/s encouraged by the roach reward function. - self.vehicle_distance_threshold = 15 # Distance in meters within which vehicles are considered for the reward. - self.max_vehicle_detection_number = 10 # Maximum number of vehicles considered for the roach reward. - self.rr_vehicle_proximity_threshold = ( - 9.5 # Threshold within which vehicles are considered hazard in the reward. - ) - # Distance in meters within which pedestrians are considered for the reward. - self.pedestrian_distance_threshold = 15 - self.max_pedestrian_detection_number = 10 # Maximum number of pedestrians considered for the roach reward. - # Threshold within which pedestrians are considered hazard in the reward. - self.rr_pedestrian_proximity_threshold = 9.5 - self.rr_tl_offset = -0.8 * self.ego_extent_x # Probably offset to be kept to the entrance of the intersection. - self.rr_tl_dist_threshold = 18.0 # Distance at which traffic lights are considered for the speed reward. - # Meters. If the agent is father away from the centerline (laterally) it counts as route deviation in the reward - self.min_thresh_lat_dist = 3.5 - self.eval_time = ( - 1200.0 # Seconds. After this time a timeout is triggered in the reward which counts as truncation. - ) - # Number of frames before the end of the episode where the exploration loss is applied. - self.n_step_exploration = 100 - # If true rr_maximum_speed will be overwritten to the current speed limit affecting the ego vehicle. - self.use_speed_limit_as_max_speed = False - - # Simple reward hyperparameters - self.consider_tl = True # If set to false traffic light infractions are turned off. Used in simple reward - self.terminal_reward = 0.0 # Reward at the end of the episode - self.terminal_hint = 10.0 # Reward at the end of the episode when colliding, the number will be subtracted. - self.normalize_rewards = False # Whether to use gymnasiums reward normalization. - self.speeding_infraction = False # Whether to terminate the route if the agent drives too fast. - self.use_comfort_infraction = False # Whether to apply a soft penalty if comfort limits are exceeded - # These values are tuned for the nuPlan dataset - self.max_abs_lon_jerk = 4.13 # m/s^3 Comfort limit for longitudinal jerk - self.max_abs_mag_jerk = 8.37 # m/s^3 Comfort limit for jerk magnitude - self.min_lon_accel = -4.05 # m/s^2 Comfort limit for longitudinal acceleration - self.max_lon_accel = 2.40 # m/s^2 Comfort limit for longitudinal acceleration - self.max_abs_lat_accel = 4.89 # m/s^2 Comfort limit for lateral acceleration - self.max_abs_yaw_rate = 0.95 # rad/s Comfort limit for angular velocity - self.max_abs_yaw_accel = 1.93 # rad/s^2 Comfort limit for angular yaw acceleration - self.use_vehicle_close_penalty = False # Whether to use a penalty for being too close to the front vehicle. - # Whether to give a penalty depending on vehicle speed when crashing or running red light - self.use_termination_hint = False - self.ego_forecast_time = 1.0 # Number of seconds that the ego agent is forecasted. - self.ego_forecast_min_speed = 2.5 # In m/s. Minimum speed in the ego forecast. - self.use_perc_progress = False # Whether to multiply RC reward by percentage away from lane center. - self.use_min_speed_infraction = ( - False # Whether to penalize the agent for driving slower than other agents on avg. - ) - self.use_leave_route_done = True # Whether to terminate the route when leaving the precomputed path. - self.use_outside_route_lanes = ( - False # Whether to terminate the route when invading opposing lanes or sidewalks. - ) - self.use_max_change_penalty = False # Whether to apply a soft penalty when the action changes too fast. - self.max_change = 0.25 # Maximum change in action allowed compared to last frame before a penalty is applied - self.penalize_yellow_light = True # Whether to penalize running a yellow light. - - # How often an action is repeated. - self.action_repeat = 1 - - self.num_value_measurements = 4 # Number of measurements exclusive to the value head. - - # Action and observation space - self.obs_num_measurements = 10 # Number of scalar measurements in observation. - self.obs_num_channels = 9 # Number of channels in the bev observation. - - # ###### Distribution parameters ############ - self.distribution = "beta" # Distribution used for the action space. Options beta, normal, beta_uni_mix - # Minimum value for a, b of the beta distribution that the model can predict. Gets added to the softplus output. - self.beta_min_a_b_value = 1.0 - - self.normal_dist_init = ( - (0, -2), - (0, -2), - ) # Initial bias parameters of the normal distribution - self.normal_dist_action_dep_std = True # Whether the std of the normal distribution is dependent of the input - - self.uniform_percentage_z = 0.03 # Mixing percentage of uniform distribution in beta_uni_mix - - # We have 2 actions, corresponding to left right steering and negative to positive acceleration. - self.action_space_dim = 2 - self.action_space_min = -1.0 # Minimum value of the action space - self.action_space_max = 1.0 # Maximum value of the action space - # Number of frames at the beginning before learning starts, return brake - self.start_delay_frames = int(2.0 / self.time_interval + 0.5) - - # PPO training hyperparameters - self.experiment_name = "PPO_000" # the name of this experiment - self.gym_id = "NuPlanEnv-v0" # the id of the gym environment - self.learning_rate = 1.0e-5 # the learning rate of the optimizer - self.seed = 1 # seed of the experiment - self.total_timesteps = 10_000_000 # total time steps of the experiments - self.torch_deterministic = True # if toggled, `torch.backends.cudnn.deterministic=False` - self.cuda = True # if toggled, cuda will be enabled by default - self.track = False # if toggled, this experiment will be tracked with Weights and Biases - self.wandb_project_name = "ppo-roach" # the wandb project name - self.wandb_entity = None # the entity (team) of wandb project - self.capture_video = False # whether to capture videos of the agent performances (check out `videos` folder) - self.num_envs = 5 # the number of parallel game environments - self.lr_schedule = "kl" # Which lr schedule to use. Options: (linear, kl, none, step, cosine, cosine_restart) - self.gae = True # Use GAE for advantage computation - self.gamma = 0.99 # the discount factor gamma - self.gae_lambda = 0.95 # the lambda for the general advantage estimation - self.update_epochs = 4 # the K epochs to update the policy - self.norm_adv = False # Toggles advantages normalization - self.clip_coef = 0.1 # the surrogate clipping coefficient - self.clip_vloss = False # Toggles whether to use a clipped loss for the value function, as per the paper. - self.ent_coef = 0.01 # coefficient of the entropy - self.vf_coef = 0.5 # coefficient of the value function - self.max_grad_norm = 0.5 # the maximum norm for the gradient clipping - self.target_kl = 0.015 # the target KL divergence threshold - self.visualize = False # if toggled, Game will render on screen - self.logdir = "" # The directory to log the data into. - self.load_file = None # model weights for initialization - # Ports of the carla_gym wrapper to connect to. It requires to submit a port for every envs ports == --num_envs - self.ports = (1111, 1112, 1113, 1114, 1115) - self.train_gpu_ids = (0,) # Which GPUs to train on. Index 0 indicates GPU for rank 0 etc. - self.compile_model = False # Whether to use torch compile on the model. - self.total_batch_size = 512 # The total amount of data collected at every step across all environments - self.total_minibatch_size = 256 # The total minibatch sized used for training (across all environments) - self.expl_coef = 0.05 # Weight / coefficient of the exploration loss - self.lr_schedule_step = 8 # Number of time the KL divergence can be triggered before the lr reduces. - self.current_learning_rate = self.learning_rate # Learning rate at the latest iteration. - self.kl_early_stop = 0 # Counter that reduces lr once it reaches lr_schedule_step - self.schedule_free = False - self.adam_eps = 1e-5 # Adam optimizer parameter parameter. Standard PPO value is 1e-5 - # Did not observe a significant speedup with these so we turn them off for better numerical precision. - self.allow_tf32 = False # Whether to use tf32 format, which has better speed but lower numeric precision. - self.benchmark = False # Whether to use cudnn benchmarking - self.matmul_precision = "highest" # Options highest float32, high tf32, medium bfloat16 - # Whether to collect data on cpu. This can be a bit faster, since it avoid CPU GPU ping pong, - # at the cost of running the model on the CPU during data collection. - self.cpu_collect = False - # Robust policy optimization https://arxiv.org/abs/2212.07536 - self.use_rpo = False - self.rpo_alpha = 0.5 # Size of the uniform random value that gets added to a, b - self.use_green_wave = False # If true in some routes all TL that the agent encounters are set to green. - self.green_wave_prob = 0.05 # Probability of a route using green wave (if use_green_wave=True) - # You should pick tiny networks for efficiency e.g. convnext_atto.d2_in1k - self.image_encoder = "roach_ln2" # Which image cnn encoder to use. Either roach, or timm model name - self.use_layer_norm = True # Whether to use LayerNorm before ReLU in MLPs. - # Applicable if use_layer_norm=True, whether to also apply layernorm to the policy head. - # Can be useful to remove to allow the policy to predict large values (for a, b of Beta). - self.use_layer_norm_policy_head = True - self.features_dim = 256 # Dimension of features produced by the state encoder - self.use_lstm = False # Whether to use an LSTM after the feature encoder. - self.num_lstm_layers = 1 # How many LSTM layers to use. - - # Whether to let the model predict the next frame as auxiliary task during the training - self.use_world_model_loss = False - # Number of frames to predict ahead with the world model loss. - self.num_future_prediction = int(0.5 * self.frame_rate) - self.world_model_loss_weight = 1.0 # Weight of the world model loss. - self.render_green_tl = True # Whether to render green traffic lights into the observation. - self.lr_schedule_step_factor = 0.1 # Multiplier when doing a step decrease in learning rate - self.lr_schedule_step_perc = ( - 0.5, - 0.75, - ) # Percentage of training run after which the lr is decayed - self.weight_decay = 0.0 # Weight decay applied to optimizer. AdamW is used when > 0.0 - self.lr_schedule_cosine_restarts = ( - 0.0, - 0.25, - 0.50, - 0.75, - 1.0, - ) # Percentage of training to do a restart - # https://arxiv.org/abs/1911.00357 - self.use_dd_ppo_preempt = False # Whether to use the dd-ppo preemption technique to early stop stragglers - self.dd_ppo_preempt_threshold = 0.6 # Percentage of nodes that need to be finished before the rest is stopped. - self.dd_ppo_min_perc = 0.25 # Minimum percentage of data points that need to be collected before preemption. - self.num_envs_per_gpu = 5 # Number of environments to put on one GPU. Only considered for dd_ppo.py - # Percentage of training at which the model is evaluated - self.eval_intervals = (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9) - self.current_eval_interval_idx = 0 # Helper variable to remember which model to save next. - self.use_temperature = False # Whether the output distribution parameters are divided by a learned temperature - self.min_temperature = 0.1 # Whether the output distribution parameters are divided by a learned temperature - - # Whether to use the histogram loss gauss to train the value head via classification (instead of regression + L2) - self.use_hl_gauss_value_loss = False - self.hl_gauss_std = 0.75 # Standard deviation use for the gaussian histogram loss - self.hl_gauss_vmin = -10.0 # Min value of the histogram in HL_Gauss. Tune to be in return range - self.hl_gauss_vmax = 30.0 # Max value of the histogram in HL_Gauss. Tune to be in return range - self.hl_gauss_bucket_size = 1.0 # Size of each bucket in the HL_Gauss histogram. - self.hl_gauss_num_classes = int((self.hl_gauss_vmax - self.hl_gauss_vmin) / self.hl_gauss_bucket_size) + 1 - - self.global_step = 0 # Current iteration of the training - self.max_training_score = -np.inf # Highest training score achieved so far - self.best_iteration = 0 # Iteration of the best model - self.latest_iteration = 0 # Iteration of the latest model - - def initialize(self, **kwargs): - for k, v in kwargs.items(): - if hasattr(self, k): - setattr(self, k, v) diff --git a/d123/simulation/gym/policy/ppo/ppo_distributions.py b/d123/simulation/gym/policy/ppo/ppo_distributions.py deleted file mode 100644 index 5b8321e6..00000000 --- a/d123/simulation/gym/policy/ppo/ppo_distributions.py +++ /dev/null @@ -1,371 +0,0 @@ -""" -Contains various classes for different probability distributions that sample the actions. -E.g. Gaussian, Beta, Uniform+Beta -""" - -from typing import Tuple - -import torch -from torch import nn -from torch.distributions import Beta, Normal - - -def sum_independent_dims(tensor: torch.Tensor) -> torch.Tensor: - if len(tensor.shape) > 1: - tensor = tensor.sum(dim=1) - else: - tensor = tensor.sum() - return tensor - - -class DiagGaussianDistribution(nn.Module): - """ - Wrapper around the torch Normal distribution with some additional functionality. - """ - - def __init__(self, action_dim: int, dist_init=None, action_dependent_std=False): - super().__init__() - assert action_dim == 2 - - self.distribution = None - self.action_dim = action_dim - self.dist_init = dist_init - self.action_dependent_std = action_dependent_std - - self.low = None - self.high = None - self.log_std_max = 2 - self.log_std_min = -20 - - self.suggest_go = nn.Parameter(torch.FloatTensor([0.66, -3]), requires_grad=False) - self.suggest_stop = nn.Parameter(torch.FloatTensor([-0.66, -3]), requires_grad=False) - self.suggest_turn = nn.Parameter(torch.FloatTensor([0.0, -1]), requires_grad=False) - self.suggest_straight = nn.Parameter(torch.FloatTensor([3.0, 3.0]), requires_grad=False) - - def proba_distribution_net(self, latent_dim: int) -> Tuple[nn.Module, nn.Parameter]: - mean_actions = nn.Linear(latent_dim, self.action_dim) - if self.action_dependent_std: - log_std = nn.Linear(latent_dim, self.action_dim) - else: - log_std = nn.Parameter(-2.0 * torch.ones(self.action_dim), requires_grad=True) - - if self.dist_init is not None: - # log_std.weight.data.fill_(0.01) - # mean_actions.weight.data.fill_(0.01) - # acc/steer - mean_actions.bias.data[0] = self.dist_init[0][0] - mean_actions.bias.data[1] = self.dist_init[1][0] - if self.action_dependent_std: - log_std.bias.data[0] = self.dist_init[0][1] - log_std.bias.data[1] = self.dist_init[1][1] - else: - init_tensor = torch.FloatTensor([self.dist_init[0][1], self.dist_init[1][1]]) - log_std = nn.Parameter(init_tensor, requires_grad=True) - - return mean_actions, log_std - - def proba_distribution(self, mean_actions: torch.Tensor, log_std: torch.Tensor) -> "DiagGaussianDistribution": - if self.action_dependent_std: - log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max) - action_std = torch.ones_like(mean_actions) * log_std.exp() - self.distribution = Normal(mean_actions, action_std) - return self - - def log_prob(self, actions: torch.Tensor) -> torch.Tensor: - log_prob = self.distribution.log_prob(actions) - return sum_independent_dims(log_prob) - - def entropy(self) -> torch.Tensor: - return self.distribution.entropy() - - def exploration_loss(self, exploration_suggests) -> torch.Tensor: - # [('stop'/'go'/None, 'turn'/'straight'/None)] - # (batch_size, action_dim) - mu = self.distribution.loc.detach().clone() - sigma = self.distribution.scale.detach().clone() - - # 0: '', '' - # 1: 'go', '' - # 2: 'go', 'turn' - # 3: 'stop', '' - # TODO see if this can be vectorized - for i, suggest_indx in enumerate(exploration_suggests): - # Index 0 means original - if suggest_indx == 1: # Blocked - # No steer suggest - mu[i, 1] = self.suggest_go[0] - sigma[i, 1] = self.suggest_go[1] - elif suggest_indx == 2: # Route deviation - mu[i, 0] = self.suggest_turn[0] - sigma[i, 0] = self.suggest_turn[1] - - mu[i, 1] = self.suggest_go[0] - sigma[i, 1] = self.suggest_go[1] - elif suggest_indx == 3: # Collision, red light, stop sign - mu[i, 1] = self.suggest_stop[0] - sigma[i, 1] = self.suggest_stop[1] - - dist_ent = Normal(mu, sigma) - - exploration_loss = torch.distributions.kl_divergence(dist_ent, self.distribution) - return torch.mean(exploration_loss) - - def sample(self) -> torch.Tensor: - return self.distribution.rsample() - - def mode(self) -> torch.Tensor: - return self.distribution.mean - - def get_actions(self, deterministic: bool = False) -> torch.Tensor: - if deterministic: - return self.mode() - return self.sample() - - -class BetaDistribution(nn.Module): - """ - Wrapper around the torch Beta distribution with some additional functionality. - """ - - def __init__(self, action_dim=2, dist_init=None): - super().__init__() - assert action_dim == 2 - - self.distribution = None - self.action_dim = action_dim - self.dist_init = dist_init - self.low = 0.0 - self.high = 1.0 - - # [alpha, beta], [0, 1] # Changed order from original repo to have alpha first. - self.suggest_go = nn.Parameter(torch.FloatTensor([2.5, 1.0]), requires_grad=False) - self.suggest_stop = nn.Parameter(torch.FloatTensor([1.0, 1.5]), requires_grad=False) - self.suggest_turn = nn.Parameter(torch.FloatTensor([1.0, 1.0]), requires_grad=False) - - def proba_distribution_net(self, latent_dim: int) -> Tuple[nn.Module, nn.Module]: - - linear_alpha = nn.Linear(latent_dim, self.action_dim) - linear_beta = nn.Linear(latent_dim, self.action_dim) - - if self.dist_init is not None: - # acc - linear_alpha.bias.data[0] = self.dist_init[0][1] - linear_beta.bias.data[0] = self.dist_init[0][0] - # steer - linear_alpha.bias.data[1] = self.dist_init[1][1] - linear_beta.bias.data[1] = self.dist_init[1][0] - - alpha = nn.Sequential(linear_alpha) - beta = nn.Sequential(linear_beta) - return alpha, beta - - def proba_distribution(self, alpha, beta): - self.distribution = Beta(alpha, beta) - return self - - def log_prob(self, actions: torch.Tensor) -> torch.Tensor: - log_prob = self.distribution.log_prob(actions) - return sum_independent_dims(log_prob) - - def entropy(self): - return self.distribution.entropy() - - def exploration_loss(self, exploration_suggests) -> torch.Tensor: - alpha = self.distribution.concentration1.detach().clone() - beta = self.distribution.concentration0.detach().clone() - - # 0: '', '' - # 1: 'go', '' - # 2: 'go', 'turn' - # 3: 'stop', '' - # TODO see if this can be vectorized - for i, suggest_indx in enumerate(exploration_suggests): - # Index 0 means original - if suggest_indx == 1: # Blocked - # No steer suggest - alpha[i, 1] = self.suggest_go[0] - beta[i, 1] = self.suggest_go[1] - elif suggest_indx == 2: # Route deviation - alpha[i, 0] = self.suggest_turn[0] - beta[i, 0] = self.suggest_turn[1] - - alpha[i, 1] = self.suggest_go[0] - beta[i, 1] = self.suggest_go[1] - elif suggest_indx == 3: # Collision, red light, stop sign - alpha[i, 1] = self.suggest_stop[0] - beta[i, 1] = self.suggest_stop[1] - - dist_ent = Beta(alpha, beta) - - exploration_loss = torch.distributions.kl_divergence(self.distribution, dist_ent) - return torch.mean(exploration_loss) - - def sample(self) -> torch.Tensor: - # Reparametrization trick to pass gradients - return self.distribution.rsample() - - def mode(self) -> torch.Tensor: - alpha = self.distribution.concentration1 - beta = self.distribution.concentration0 - x = torch.zeros_like(alpha) - x[:, 1] += 0.5 - mask1 = (alpha > 1) & (beta > 1) - x[mask1] = (alpha[mask1] - 1) / (alpha[mask1] + beta[mask1] - 2) - - mask2 = (alpha <= 1) & (beta > 1) - x[mask2] = 0.0 - - mask3 = (alpha > 1) & (beta <= 1) - x[mask3] = 1.0 - - # mean - mask4 = (alpha <= 1) & (beta <= 1) - x[mask4] = self.distribution.mean[mask4] - - return x - - def evaluate_mean(self) -> torch.Tensor: - return self.distribution.mean - - def get_actions(self, deterministic: bool = False) -> torch.Tensor: - if deterministic: - # return self.mode() # TODO cleanup - return self.evaluate_mean() - return self.sample() - - -class BetaUniformMixtureDistribution(nn.Module): - """ - A Mixture of the Beta and Uniform Distribution. Meant to ease exploration because the distribution has heavier tails - than the beta distribution. In deterministic mode / inference the action is only dependent on the beta distribution. - The Kl divergence between this distribution and past once is only computed based on the Beta part. - PDF = (1-z) * Beta(a,b) + z * Uniform(0,1) - """ - - def __init__(self, action_dim=2, dist_init=None, uniform_percentage_z=0.1): - super().__init__() - assert action_dim == 2 - - self.distribution = None # The current beta distribution - self.uniform_distribution = None - self.action_dim = action_dim - self.dist_init = dist_init - self.low = 0.0 - self.high = 1.0 - self.beta_perc = 1.0 - uniform_percentage_z - self.uniform_perc = uniform_percentage_z - - # [alpha, beta], [0, 1] # Changed order from original repo to have alpha first. - self.suggest_go = nn.Parameter(torch.FloatTensor([2.5, 1.0]), requires_grad=False) - self.suggest_stop = nn.Parameter(torch.FloatTensor([1.0, 1.5]), requires_grad=False) - self.suggest_turn = nn.Parameter(torch.FloatTensor([1.0, 1.0]), requires_grad=False) - - def proba_distribution_net(self, latent_dim: int) -> Tuple[nn.Module, nn.Module]: - - linear_alpha = nn.Linear(latent_dim, self.action_dim) - linear_beta = nn.Linear(latent_dim, self.action_dim) - - if self.dist_init is not None: - # acc - linear_alpha.bias.data[0] = self.dist_init[0][1] - linear_beta.bias.data[0] = self.dist_init[0][0] - # steer - linear_alpha.bias.data[1] = self.dist_init[1][1] - linear_beta.bias.data[1] = self.dist_init[1][0] - - alpha = nn.Sequential(linear_alpha) - beta = nn.Sequential(linear_beta) - return alpha, beta - - def proba_distribution(self, alpha, beta): - self.action_shape = alpha.shape - lower_bound = torch.zeros_like(alpha, requires_grad=False, device=alpha.device) - upper_bound = torch.ones_like(alpha, requires_grad=False, device=alpha.device) - self.uniform_distribution = torch.distributions.uniform.Uniform(lower_bound, upper_bound) - self.distribution = Beta(alpha, beta) - return self - - def log_prob(self, actions: torch.Tensor) -> torch.Tensor: - uniform_pdf = torch.ones_like(actions, device=actions.device, requires_grad=False) - pdf = self.beta_perc * self.distribution.log_prob(actions).exp() + self.uniform_perc * uniform_pdf - log_prob = torch.log(pdf) - return sum_independent_dims(log_prob) - - def entropy(self): - # TODO Since the uniform is constant, maximizing the entropy of the beta is the same as maximizing the entropy of - # the BetaUniMix. But compute real entropy later. - return self.distribution.entropy() - - def exploration_loss(self, exploration_suggests) -> torch.Tensor: - """ - We ignore the uniform in this computation since its constant. - """ - alpha = self.distribution.concentration1.detach().clone() - beta = self.distribution.concentration0.detach().clone() - - # 0: '', '' - # 1: 'go', '' - # 2: 'go', 'turn' - # 3: 'stop', '' - # TODO see if this can be vectorized - for i, suggest_indx in enumerate(exploration_suggests): - # Index 0 means original - if suggest_indx == 1: # Blocked - # No steer suggest - alpha[i, 1] = self.suggest_go[0] - beta[i, 1] = self.suggest_go[1] - elif suggest_indx == 2: # Route deviation - alpha[i, 0] = self.suggest_turn[0] - beta[i, 0] = self.suggest_turn[1] - - alpha[i, 1] = self.suggest_go[0] - beta[i, 1] = self.suggest_go[1] - elif suggest_indx == 3: # Collision, red light, stop sign - alpha[i, 1] = self.suggest_stop[0] - beta[i, 1] = self.suggest_stop[1] - - dist_ent = Beta(alpha, beta) - - exploration_loss = torch.distributions.kl_divergence(self.distribution, dist_ent) - return torch.mean(exploration_loss) - - def sample(self) -> torch.Tensor: - """ - We sample from the mixture distribution by first drawing a uniform random variable in range [0,1]. - If it is > uniform_perc we draw from the beta + otherwise we draw from the uniform distribution. - This is not differentiable, but it doesn't have to be for PPO and other stochastic policy gradient methods. - """ - prob = torch.rand(1) - if prob < self.uniform_perc: - return self.uniform_distribution.rsample() # Uniform - else: - return self.distribution.rsample() # Beta - - def mode(self) -> torch.Tensor: - """ - Uniform is ignored when computing the mode of the distribution. We effectively want to remove the uniform during - inference, since it is only meant to help exploration during training. - """ - alpha = self.distribution.concentration1 - beta = self.distribution.concentration0 - x = torch.zeros_like(alpha) - x[:, 1] += 0.5 - mask1 = (alpha > 1) & (beta > 1) - x[mask1] = (alpha[mask1] - 1) / (alpha[mask1] + beta[mask1] - 2) - - mask2 = (alpha <= 1) & (beta > 1) - x[mask2] = 0.0 - - mask3 = (alpha > 1) & (beta <= 1) - x[mask3] = 1.0 - - # mean - mask4 = (alpha <= 1) & (beta <= 1) - x[mask4] = self.distribution.mean[mask4] - - return x - - def get_actions(self, deterministic: bool = False) -> torch.Tensor: - if deterministic: - return self.mode() - return self.sample() diff --git a/d123/simulation/gym/policy/ppo/ppo_model.py b/d123/simulation/gym/policy/ppo/ppo_model.py deleted file mode 100644 index 911e78f4..00000000 --- a/d123/simulation/gym/policy/ppo/ppo_model.py +++ /dev/null @@ -1,916 +0,0 @@ -""" -Agent architecture from https://github.com/zhejz/carla-roach -""" - -from copy import deepcopy -from typing import Dict, Optional - -import cv2 -import gym -import numpy as np - -# import timm -import torch -from torch import nn - -from d123.simulation.gym.policy.ppo.ppo_config import GlobalConfig -from d123.simulation.gym.policy.ppo.ppo_distributions import ( - BetaDistribution, - BetaUniformMixtureDistribution, - DiagGaussianDistribution, -) - -# class CustomCnn(nn.Module): -# """ -# A custom CNN with timm backbone extractors. -# """ - -# def __init__(self, config, n_input_channels): -# super().__init__() -# self.config = config -# self.image_encoder = timm.create_model( -# config.image_encoder, -# in_chans=n_input_channels, -# pretrained=False, -# features_only=True, -# ) -# final_width = int(self.config.bev_semantics_width / self.image_encoder.feature_info.info[-1]["reduction"]) -# final_height = int(self.config.bev_semantics_height / self.image_encoder.feature_info.info[-1]["reduction"]) -# final_total_pxiels = final_height * final_width -# # We want to output roughly the same amount of features as the roach encoder. -# self.out_channels = int(1024 / final_total_pxiels) -# self.change_channel = nn.Conv2d( -# self.image_encoder.feature_info.info[-1]["num_chs"], -# self.out_channels, -# kernel_size=1, -# ) - -# def forward(self, x): -# x = self.image_encoder(x) -# x = x[-1] -# x = self.change_channel(x) -# x = torch.flatten(x, start_dim=1) -# return x - - -# Input image feature extractor class -class XtMaCNN(nn.Module): - """ - Inspired by https://github.com/xtma/pytorch_car_caring - """ - - def __init__(self, observation_space, states_neurons, config): - super().__init__() - self.features_dim = config.features_dim - self.config = config - - n_input_channels = observation_space["bev_semantics"].shape[0] - - if self.config.image_encoder == "roach": - self.cnn = nn.Sequential( # in [B, 15, 192, 192] - nn.Conv2d(n_input_channels, 8, kernel_size=5, stride=2), # -> [B, 8, 94, 94] - nn.ReLU(), - nn.Conv2d(8, 16, kernel_size=5, stride=2), # -> [B, 16, 45, 45] - nn.ReLU(), - nn.Conv2d(16, 32, kernel_size=5, stride=2), # -> [B, 32, 21, 21] - nn.ReLU(), - nn.Conv2d(32, 64, kernel_size=3, stride=2), # -> [B, 64, 10, 10] - nn.ReLU(), - nn.Conv2d(64, 128, kernel_size=3, stride=2), # -> [B, 128, 4, 4] - nn.ReLU(), - nn.Conv2d(128, 256, kernel_size=3, stride=1), # -> [B, 256, 2, 2] - nn.ReLU(), - ) - elif self.config.image_encoder == "roach_ln": - self.cnn = nn.Sequential( # in [B, 15, 192, 192] - nn.Conv2d(n_input_channels, 8, kernel_size=5, stride=2), # -> [B, 8, 94, 94] - nn.LayerNorm((8, 94, 94)), - nn.ReLU(), - nn.Conv2d(8, 16, kernel_size=5, stride=2), # -> [B, 16, 45, 45] - nn.LayerNorm((16, 45, 45)), - nn.ReLU(), - nn.Conv2d(16, 32, kernel_size=5, stride=2), # -> [B, 32, 21, 21] - nn.LayerNorm((32, 21, 21)), - nn.ReLU(), - nn.Conv2d(32, 64, kernel_size=3, stride=2), # -> [B, 64, 10, 10] - nn.LayerNorm((64, 10, 10)), - nn.ReLU(), - nn.Conv2d(64, 128, kernel_size=3, stride=2), # -> [B, 128, 4, 4] - nn.LayerNorm((128, 4, 4)), - nn.ReLU(), - nn.Conv2d(128, 256, kernel_size=3, stride=1), # -> [B, 256, 2, 2] - nn.LayerNorm((256, 2, 2)), - nn.ReLU(), - ) - elif self.config.image_encoder == "roach_ln2": # input is expected to be [B, C, 256, 256] - self.cnn = nn.Sequential( - nn.Conv2d(n_input_channels, 8, kernel_size=5, stride=2), # -> [B, 8, 126, 126] - nn.LayerNorm((8, 126, 126)), - nn.ReLU(), - nn.Conv2d(8, 16, kernel_size=5, stride=2), # -> [B, 16, 61, 61] - nn.LayerNorm((16, 61, 61)), - nn.ReLU(), - nn.Conv2d(16, 24, kernel_size=5, stride=2), # -> [B, 16, 29, 29] - nn.LayerNorm((24, 29, 29)), - nn.ReLU(), - nn.Conv2d(24, 32, kernel_size=5, stride=2), # -> [B, 32, 13, 13] - nn.LayerNorm((32, 13, 13)), - nn.ReLU(), - nn.Conv2d(32, 64, kernel_size=3, stride=2), # -> [B, 64, 6, 6] - nn.LayerNorm((64, 6, 6)), - nn.ReLU(), - nn.Conv2d(64, 128, kernel_size=3, stride=1), # -> [B, 128, 4, 4] - nn.LayerNorm((128, 4, 4)), - nn.ReLU(), - nn.Conv2d(128, 256, kernel_size=3, stride=1), # -> [B, 256, 2, 2] - nn.LayerNorm((256, 2, 2)), - nn.ReLU(), - ) - # else: - # self.cnn = CustomCnn(config, n_input_channels) - - # Compute shape by doing one forward pass - with torch.no_grad(): - self.cnn_out_shape = self.cnn( - torch.as_tensor(observation_space["bev_semantics"].sample()[None]).float() - ).shape - self.n_flatten = self.cnn_out_shape[1] * self.cnn_out_shape[2] * self.cnn_out_shape[3] - - self.states_neurons = states_neurons[-1] - - if self.config.use_layer_norm: - self.linear = nn.Sequential( - nn.Linear(self.n_flatten + states_neurons[-1], 512), - nn.LayerNorm(512), - nn.ReLU(), - nn.Linear(512, config.features_dim), - nn.LayerNorm(config.features_dim), - nn.ReLU(), - ) - else: - self.linear = nn.Sequential( - nn.Linear(self.n_flatten + states_neurons[-1], 512), - nn.ReLU(), - nn.Linear(512, config.features_dim), - nn.ReLU(), - ) - - states_neurons = [observation_space["measurements"].shape[0]] + list(states_neurons) - self.state_linear = [] - for i in range(len(states_neurons) - 1): - self.state_linear.append(nn.Linear(states_neurons[i], states_neurons[i + 1])) - if self.config.use_layer_norm: - self.state_linear.append(nn.LayerNorm(states_neurons[i + 1])) - self.state_linear.append(nn.ReLU()) - self.state_linear = nn.Sequential(*self.state_linear) - - if self.config.image_encoder == "roach": - self.apply(self._weights_init) - - @staticmethod - def _weights_init(m): - if isinstance(m, nn.Conv2d): - nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain("relu")) - nn.init.constant_(m.bias, 0.1) - - def forward(self, bev_semantics, measurements): - x = self.cnn(bev_semantics) - x = torch.flatten(x, start_dim=1) - latent_state = self.state_linear(measurements) - - x = torch.cat((x, latent_state), dim=1) - x = self.linear(x) - return x - - -class WorldModelDecoder(nn.Module): - """ - Decoder that predicts a next state given features - """ - - def __init__(self, cnn_out_shape, cnn_n_flatten, states_neurons, features_dim, config): - super().__init__() - self.cnn_out_shape = cnn_out_shape - self.cnn_n_flatten = cnn_n_flatten - self.states_neurons = states_neurons - self.features_dim = features_dim - self.config = config - - if self.config.use_layer_norm: - self.linear_decoder = nn.Sequential( - nn.Linear(features_dim, 512), - nn.LayerNorm(512), - nn.ReLU(), - nn.Linear(512, cnn_n_flatten + states_neurons), - nn.LayerNorm(cnn_n_flatten + states_neurons), - nn.ReLU(), - ) - else: - self.linear_decoder = nn.Sequential( - nn.Linear(features_dim, 512), - nn.ReLU(), - nn.Linear(512, cnn_n_flatten + states_neurons), - nn.ReLU(), - ) - - self.bev_semantic_decoder = nn.Sequential( - nn.Conv2d(self.cnn_out_shape[1], 128, (1, 1)), - nn.ReLU(inplace=True), - nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), - nn.Conv2d(128, 64, (3, 3), padding=1), - nn.ReLU(inplace=True), - nn.Upsample( - size=( - self.config.bev_semantics_height // 4, - self.config.bev_semantics_width // 4, - ), - mode="bilinear", - align_corners=False, - ), - nn.Conv2d(64, 32, (3, 3), padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(32, 16, (3, 3), padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(16, self.config.obs_num_channels + 1, kernel_size=(1, 1)), # + 1 = Background class - nn.Upsample( - size=( - self.config.bev_semantics_height, - self.config.bev_semantics_width, - ), - mode="bilinear", - align_corners=False, - ), - ) - - self.measurement_decoder = nn.Linear(states_neurons, self.config.obs_num_measurements) - - def forward(self, features): - features = self.linear_decoder(features) - features_cnn = features[:, : self.cnn_n_flatten] - features_measurements = features[:, self.cnn_n_flatten :] - features_cnn = features_cnn.view(-1, self.cnn_out_shape[1], self.cnn_out_shape[2], self.cnn_out_shape[3]) - - pred_semantic = self.bev_semantic_decoder(features_cnn) - pred_measurement = self.measurement_decoder(features_measurements) - - return pred_semantic, pred_measurement - - -class PPOPolicy(nn.Module): - """ - Neural network policy designed for driving and training with the PPO algorithm. - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - policy_head_arch=(256, 256), - value_head_arch=(256, 256), - states_neurons=(256, 256), - config: Optional[GlobalConfig] = None, - ): - - super().__init__() - self.observation_space = observation_space - self.action_space = action_space - self.config: GlobalConfig = config - - self.features_extractor = XtMaCNN(observation_space, config=config, states_neurons=states_neurons) - - if self.config.use_lstm: - self.lstm = nn.LSTM( - config.features_dim, - config.features_dim, - num_layers=config.num_lstm_layers, - ) - for name, param in self.lstm.named_parameters(): - if "bias" in name: - nn.init.constant_(param, 0) - elif "weight" in name: - nn.init.orthogonal_(param, 1.0) - - if self.config.use_world_model_loss: - self.feature_decoder = WorldModelDecoder( - self.features_extractor.cnn_out_shape, - self.features_extractor.n_flatten, - self.features_extractor.states_neurons, - self.features_extractor.features_dim, - self.config, - ) - - if self.config.distribution == "beta": - self.action_dist = BetaDistribution(int(np.prod(action_space.shape))) - elif self.config.distribution == "normal": - # Hyperparameters are from roach - self.action_dist = DiagGaussianDistribution( - int(np.prod(action_space.shape)), - dist_init=self.config.normal_dist_init, - action_dependent_std=self.config.normal_dist_action_dep_std, - ) - elif self.config.distribution == "beta_uni_mix": - self.action_dist = BetaUniformMixtureDistribution( - int(np.prod(action_space.shape)), - uniform_percentage_z=self.config.uniform_percentage_z, - ) - else: - raise ValueError("Distribution selected that is not implemented. Options: beta, normal, beta_uni_mix") - - self.policy_head_arch = list(policy_head_arch) - self.value_head_arch = list(value_head_arch) - self.activation_fn = nn.ReLU - - self.action_space_low = nn.Parameter(torch.from_numpy(self.action_space.low), requires_grad=False) - self.action_space_high = nn.Parameter(torch.from_numpy(self.action_space.high), requires_grad=False) - - self.build() - - def build(self) -> None: - last_layer_dim_pi = self.features_extractor.features_dim - policy_net = [] - for layer_size in self.policy_head_arch: - policy_net.append(nn.Linear(last_layer_dim_pi, layer_size)) - if self.config.use_layer_norm and self.config.use_layer_norm_policy_head: - policy_net.append(nn.LayerNorm(layer_size)) - policy_net.append(self.activation_fn()) - last_layer_dim_pi = layer_size - - self.policy_head = nn.Sequential(*policy_net) - # mu->alpha/mean, sigma->beta/log_std (nn.Module, nn.Parameter) - self.dist_mu, self.dist_sigma = self.action_dist.proba_distribution_net(last_layer_dim_pi) - - if self.config.use_temperature: - # * 2 for a and b assuming beta distribution - self.temperature_layer = nn.Sequential( - nn.Linear(last_layer_dim_pi, self.action_dist.action_dim * 2), - nn.Sigmoid(), - ) - - last_layer_dim_vf = self.features_extractor.features_dim + self.config.num_value_measurements - value_net = [] - for layer_size in self.value_head_arch: - value_net.append(nn.Linear(last_layer_dim_vf, layer_size)) - if self.config.use_layer_norm: - value_net.append(nn.LayerNorm(layer_size)) - value_net.append(self.activation_fn()) - last_layer_dim_vf = layer_size - - if self.config.use_hl_gauss_value_loss: - value_net.append(nn.Linear(last_layer_dim_vf, self.config.hl_gauss_num_classes)) - else: - value_net.append(nn.Linear(last_layer_dim_vf, 1)) - self.value_head = nn.Sequential(*value_net) - - def get_features(self, observations) -> torch.Tensor: - """ - :param bev_semantics: torch.Tensor (num_envs, frame_stack*channel, height, width) - :param measurements: torch.Tensor (num_envs, state_dim) - """ - bev_semantics = observations["bev_semantics"] - measurements = observations["measurements"] - birdview = bev_semantics / 255.0 - features = self.features_extractor(birdview, measurements) - return features - - def get_action_dist_from_features(self, features: torch.Tensor, actions=None): - latent_pi = self.policy_head(features) - mu = self.dist_mu(latent_pi) - sigma = self.dist_sigma(latent_pi) - - if actions is not None and self.config.use_rpo: - # sample again to add stochasticity to the policy, Robust policy optimization https://arxiv.org/abs/2212.07536 - # Due to the requirement of the Beta distribution to have numbers > 0 we add the random number before the - # activation function. We add the random number only to alpha which should have a similar effect of shifting the - # mean as for the originally proposed gaussian distribution. - z = torch.zeros(mu.shape, dtype=torch.float32, device=mu.device).uniform_( - -self.config.rpo_alpha, self.config.rpo_alpha - ) - mu = mu + z - - # We don't need an activation function for the normal distribution because std is predicted in log space. - if self.config.distribution in ("beta", "beta_uni_mix"): - mu = nn.functional.softplus(mu) - sigma = nn.functional.softplus(sigma) - # NOTE adding the nugget to mu only makes sense with the beta distribution. - mu = mu + self.config.beta_min_a_b_value - sigma = sigma + self.config.beta_min_a_b_value - - if self.config.use_temperature: - temperature = self.temperature_layer(latent_pi) - mu_temperature = temperature[:, : self.action_dist.action_dim] - sigma_temperature = temperature[:, self.action_dist.action_dim : self.action_dist.action_dim * 2] - # Put them from [0,1] into range [min, 1] - mu_temperature = (1.0 - self.config.min_temperature) * mu_temperature + self.config.min_temperature - sigma_temperature = (1.0 - self.config.min_temperature) * sigma_temperature + self.config.min_temperature - - mu = mu / mu_temperature - sigma = sigma / sigma_temperature - - return ( - self.action_dist.proba_distribution(mu, sigma), - mu.detach(), - sigma.detach(), - ) - - def lstm_forward(self, features, lstm_state, done): - # LSTM logic - batch_size = lstm_state[0].shape[1] - hidden = features.reshape((-1, batch_size, self.lstm.input_size)) - done = done.reshape((-1, batch_size)) - new_hidden = [] - for h, d in zip(hidden, done): - h, lstm_state = self.lstm( - h.unsqueeze(0), - ( - (1.0 - d).view(1, -1, 1) * lstm_state[0], - (1.0 - d).view(1, -1, 1) * lstm_state[1], - ), - ) - new_hidden += [h] - new_hidden = torch.flatten(torch.cat(new_hidden), 0, 1) - return new_hidden, lstm_state - - def get_value(self, obs_dict: Dict[str, torch.Tensor], lstm_state=None, done=None): - features = self.get_features(obs_dict) - - if self.config.use_lstm: - features, _ = self.lstm_forward(features, lstm_state, done) - - value_features = torch.cat((features, obs_dict["value_measurements"]), dim=1) - values = self.value_head(value_features) - return values - - def forward( - self, - obs_dict: Dict[str, np.ndarray], - actions=None, - deterministic: bool = False, - exploration_suggests=None, - lstm_state=None, - done=None, - ): - """ - actions are expected to be unscaled actions! - """ - features = self.get_features(obs_dict) - - if self.config.use_lstm: - features, lstm_state = self.lstm_forward(features, lstm_state, done) - - pred_sem = pred_measure = None - # Additional condition turns of world model prediction during data collection where it is not used. - if self.config.use_world_model_loss and (actions is not None or deterministic): - pred_sem, pred_measure = self.feature_decoder(features) - - value_features = torch.cat((features, obs_dict["value_measurements"]), dim=1) - values = self.value_head(value_features) - distribution, mu, sigma = self.get_action_dist_from_features(features, actions) - - if actions is None: - actions = distribution.get_actions(deterministic=deterministic) - else: - actions = self.scale_action(actions) - - log_prob = distribution.log_prob(actions) - - actions = self.unscale_action(actions) - - entropy = distribution.entropy().sum(1) - exp_loss = None - - if exploration_suggests is not None: - exp_loss = distribution.exploration_loss(exploration_suggests) - - return ( - actions, - log_prob, - entropy, - values, - exp_loss, - mu, - sigma, - distribution.distribution, - pred_sem, - pred_measure, - lstm_state, - ) - - def scale_action(self, action: torch.Tensor, eps=1e-7) -> torch.Tensor: - # input action \in [a_low, a_high] - # output action \in [d_low+eps, d_high-eps] - d_low, d_high = self.action_dist.low, self.action_dist.high # scalar - - if d_low is not None and d_high is not None: - a_low, a_high = self.action_space_low, self.action_space_high - action = (action - a_low) / (a_high - a_low) * (d_high - d_low) + d_low - action = torch.clamp(action, d_low + eps, d_high - eps) - return action - - def unscale_action(self, action: torch.Tensor) -> torch.Tensor: - # input action \in [d_low, d_high] - # output action \in [a_low+eps, a_high-eps] - d_low, d_high = self.action_dist.low, self.action_dist.high # scalar - - if d_low is not None and d_high is not None: - a_low, a_high = self.action_space_low, self.action_space_high - action = (action - d_low) / (d_high - d_low) * (a_high - a_low) + a_low - return action - - @staticmethod - def init_weights(module: nn.Module, gain: float = 1) -> None: - """ - Orthogonal initialization (used in PPO and A2C) - """ - if isinstance(module, (nn.Linear, nn.Conv2d)): - nn.init.orthogonal_(module.weight, gain=gain) - if module.bias is not None: - module.bias.data.fill_(0.0) - - def visualize_model( - self, - distribution, - obs_rendered, - measurements, - control, - value, - value_measurements, - pred_sem, - pred_measure, - ): - - if self.config.distribution in ("beta", "beta_uni_mix"): - device = distribution.concentration1.device - granularity = torch.arange(start=0.0, end=1.0, step=0.001).unsqueeze(1) - granularity = torch.ones((granularity.shape[0], self.action_space.shape[0])) * granularity - granularity = granularity.to(device) - granularity_cpu = deepcopy(granularity).cpu() - elif self.config.distribution == "normal": - device = distribution.mean.device - granularity_cpu = torch.arange(start=0.0, end=1.0, step=0.001).unsqueeze(1) - granularity = torch.arange(start=-1.0, end=1.0, step=0.002).unsqueeze(1) - granularity = torch.ones((granularity.shape[0], self.action_space.shape[0])) * granularity - granularity = granularity.to(device) - - if self.config.distribution == "beta_uni_mix": - uniform_pdf = torch.ones_like(granularity, device=device, requires_grad=False) - distribution = ( - self.action_dist.beta_perc * distribution.log_prob(granularity).exp() - + self.action_dist.uniform_perc * uniform_pdf - ) - distribution = distribution.cpu().numpy() - else: - distribution = distribution.log_prob(granularity) - distribution = torch.exp(distribution).cpu().numpy() - # Make a random plot... - width, height, _ = obs_rendered.shape - - action_type = ["acceleration", "steering"] - action_plots = [] - plot_height = height // (self.action_space.shape[0] + 1) - actions = [control[0], control[1]] - - for i in range(self.action_space.shape[0]): - action_plot = np.zeros((plot_height, width, 3), dtype=np.uint8) - cv2.line( - action_plot, - (width // 2, 0), - (width // 2, (plot_height - 1)), - (0, 255, 0), - thickness=2, - ) - cv2.line(action_plot, (0, 0), (0, (plot_height - 1)), (0, 255, 0), thickness=2) - cv2.line( - action_plot, - (width - 1, 0), - (width - 1, (plot_height - 1)), - (0, 255, 0), - thickness=2, - ) - - # Plot actions: - control_pixel = int(((actions[i] + 1.0) / 2.0) * (width - 1)) - cv2.line( - action_plot, - (control_pixel, 0), - (control_pixel, (plot_height - 1)), - (255, 255, 0), - thickness=2, - ) - - for idx, x_value in enumerate(granularity_cpu.numpy()): - x = int(x_value[0] * width) - y_max = 25.0 # Continuous PDFs can be arbitrary high. We clipp after 25. - y_pixel = int(distribution[idx, i] / y_max * (plot_height - 1)) - clipped_pixel = min(int(plot_height - 1), y_pixel) - y = (plot_height - 1) - clipped_pixel # Mirror - action_plot = cv2.circle( - action_plot, - (x, y), - radius=1, - color=(255, 255, 0), - lineType=cv2.LINE_AA, - thickness=-1, - ) - - cv2.putText( - action_plot, - action_type[i], - (0, 10), - cv2.FONT_HERSHEY_SIMPLEX, - 0.5, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - action_plots.append(action_plot) - - action_plots = np.concatenate(action_plots, axis=0) - measurement_plot = np.zeros((plot_height, width, 3), dtype=np.uint8) - - cv2.putText( - measurement_plot, - f"Last steer: {measurements[0]:.2f}", - (0, 10), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - measurement_plot, - f"Last throt: {measurements[1]:.2f}", - (0, 25), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - measurement_plot, - f"Last break: {measurements[2]:.2f}", - (0, 40), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - - if self.config.use_target_point: - cv2.putText( - measurement_plot, - f"TP: {measurements[8]:.1f} {measurements[9]:.1f}", - (0, 55), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - - # cv2.putText(measurement_plot, f'Acc: {measurements[8]:.1f} {measurements[9]:.1f}', (0, 55), - # cv2.FONT_HERSHEY_SIMPLEX, 0.33, (255, 255, 255), 1, cv2.LINE_AA) - - cv2.putText( - measurement_plot, - f"Gear: {measurements[3]:.2f}", - (width // 2, 10), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - measurement_plot, - f"Speed: {measurements[4]:.1f} {measurements[5]:.1f}", - (width // 2, 25), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - measurement_plot, - f"F. speed: {measurements[6]:.2f}", - (width // 2, 40), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - measurement_plot, - f"Speed lim.: {measurements[7]:.2f}", - (width // 2, 55), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - - action_plots = np.concatenate((measurement_plot, action_plots), axis=0) - - obs_rendered = np.array(obs_rendered).copy() - cv2.putText( - obs_rendered, - f"Steer:{control[1]:.2f}", - (5, 10), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (0, 0, 0), - 1, - cv2.LINE_AA, - ) - cv2.putText( - obs_rendered, - f"Throt:{control[0]:.2f}", - (5, 25), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (0, 0, 0), - 1, - cv2.LINE_AA, - ) - cv2.putText( - obs_rendered, - f"Brake:{control[0]:.2f}", - (5, 40), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (0, 0, 0), - 1, - cv2.LINE_AA, - ) - cv2.putText( - obs_rendered, - f"Value:{value.item():.2f}", - (5, 55), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (0, 0, 0), - 1, - cv2.LINE_AA, - ) - - cv2.putText( - obs_rendered, - f"timeout:{value_measurements[0]:.2f}", - (110, 10), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (0, 0, 0), - 1, - cv2.LINE_AA, - ) - cv2.putText( - obs_rendered, - f"blocked:{value_measurements[1]:.2f}", - (110, 25), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (0, 0, 0), - 1, - cv2.LINE_AA, - ) - cv2.putText( - obs_rendered, - f"route:{value_measurements[2]:.2f}", - (110, 40), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (0, 0, 0), - 1, - cv2.LINE_AA, - ) - - if self.config.use_extra_control_inputs: - cv2.putText( - obs_rendered, - f"wheel: {measurements[8]:.2f}", - (110, 140), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - obs_rendered, - f"error: {measurements[9]:.2f}", - (110, 155), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - obs_rendered, - f"deriv: {measurements[10]:.2f}", - (110, 170), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - obs_rendered, - f"integ: {measurements[11]:.2f}", - (110, 185), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - - if self.config.use_world_model_loss: - pred_sem = pred_sem.cpu().numpy() - pred_measure = pred_measure.cpu().numpy() - - bev_semantic_indices = np.argmax(pred_sem[0], axis=0) - converter = np.array(self.config.bev_classes_list) - pred_semantic = converter[bev_semantic_indices, ...].astype("uint8") - pred_measure = pred_measure[0] - - cv2.putText( - pred_semantic, - f"Last steer: {pred_measure[0]:.2f}", - (0, 10), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - pred_semantic, - f"Last throt: {pred_measure[1]:.2f}", - (0, 25), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - pred_semantic, - f"Last break: {pred_measure[2]:.2f}", - (0, 40), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - - cv2.putText( - pred_semantic, - f"Gear: {pred_measure[3]:.2f}", - (width // 2, 10), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - pred_semantic, - f"Speed: {pred_measure[4]:.1f} {pred_measure[5]:.1f}", - (width // 2, 25), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - pred_semantic, - f"F. speed: {pred_measure[6]:.2f}", - (width // 2, 40), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - cv2.putText( - pred_semantic, - f"Speed lim.: {pred_measure[7]:.2f}", - (width // 2, 55), - cv2.FONT_HERSHEY_SIMPLEX, - 0.33, - (255, 255, 255), - 1, - cv2.LINE_AA, - ) - return np.concatenate((action_plots, obs_rendered, pred_semantic), axis=1) - - return np.concatenate((action_plots, obs_rendered), axis=1) diff --git a/d123/simulation/gym/training.py b/d123/simulation/gym/training.py deleted file mode 100644 index 0e035f1c..00000000 --- a/d123/simulation/gym/training.py +++ /dev/null @@ -1,1097 +0,0 @@ -""" -NOTE @DanielDauner: -This file needs refactoring. The training loop is specific to the default environment. -I will leave it for the initial code release but hope to find time to fix it. -""" - -import datetime -import gc -import logging -import math -import os -import random -import re -import time -from collections import deque -from pathlib import Path - -import gymnasium as gym -import jsonpickle -import jsonpickle.ext.numpy as jsonpickle_numpy -import numpy as np -import schedulefree -import torch -import wandb -from carl_nuplan.common.logging import suppress_info_logs -from carl_nuplan.planning.script.builders.observation_building_builder import build_observation_builder -from carl_nuplan.planning.script.builders.reward_builder_builder import build_reward_builder -from carl_nuplan.planning.script.builders.scenario_sampler_builder import build_scenario_sampler -from carl_nuplan.planning.script.builders.simulation_building_builder import build_simulation_builder -from carl_nuplan.planning.script.builders.trajectory_building_builder import build_trajectory_builder -from gymnasium.envs.registration import register -from omegaconf import DictConfig, OmegaConf -from pytictoc import TicToc -from tensorboardX import SummaryWriter -from torch import nn, optim -from tqdm import tqdm - -from d123.simulation.gym.policy.ppo.ppo_config import GlobalConfig -from d123.simulation.gym.policy.ppo.ppo_model import PPOPolicy - -jsonpickle_numpy.register_handlers() -jsonpickle.set_encoder_options("json", sort_keys=True, indent=4) - -logger = logging.getLogger(__name__) - -REWARD_LOGGING: bool = True -COMFORT_LOGGING: bool = True - - -def save(model, optimizer, config, folder, model_file, optimizer_file): - model_file = os.path.join(folder, model_file) - torch.save(model.module.state_dict(), model_file) - - if optimizer is not None: - optimizer_file = os.path.join(folder, optimizer_file) - torch.save(optimizer.state_dict(), optimizer_file) - - json_config = jsonpickle.encode(config) - with open( - os.path.join(folder, "config_pickle.json"), - "wt", - encoding="utf-8", - ) as f2: - f2.write(json_config) - - -def make_env(cfg: DictConfig, config: GlobalConfig): - @suppress_info_logs - def thunk(idx: int = 0): - - scenario_sampler = build_scenario_sampler(cfg) - simulation_builder = build_simulation_builder(cfg) - trajectory_builder = build_trajectory_builder(cfg) - observation_builder = build_observation_builder(cfg) - reward_builder = build_reward_builder(cfg) - - env = gym.make( - "EnvironmentWrapper-v0", - scenario_sampler=scenario_sampler, - simulation_builder=simulation_builder, - trajectory_builder=trajectory_builder, - observation_builder=observation_builder, - reward_builder=reward_builder, - terminate_on_failure=cfg.debug, - ) - env = gym.wrappers.RecordEpisodeStatistics(env) - env = gym.wrappers.ClipAction(env) - - if config.normalize_rewards: - env = gym.wrappers.NormalizeReward(env, gamma=config.gamma) - env = gym.wrappers.TransformReward(env, lambda reward: np.clip(reward, -10, 10)) - return env - - return thunk - - -def run_training(cfg: DictConfig) -> None: - - register( - id="EnvironmentWrapper-v0", - entry_point="d123.simulation.gym.environment.environment_wrapper:EnvironmentWrapper", - max_episode_steps=None, - ) - config = GlobalConfig() - - # Torchrun initialization - # Use torchrun for starting because it has proper error handling. Local rank will be set automatically - rank = int(os.environ["RANK"]) # Rank across all processes - local_rank = int(os.environ["LOCAL_RANK"]) # Rank on Node - world_size = int(os.environ["WORLD_SIZE"]) # Number of processes - - logger.info(f"RANK, LOCAL_RANK and WORLD_SIZE in environ: {rank}/{local_rank}/{world_size}") - - local_batch_size = cfg.total_batch_size // world_size - local_bs_per_env = local_batch_size // cfg.num_envs_per_gpu - local_minibatch_size = cfg.total_minibatch_size // world_size - num_minibatches = local_batch_size // local_minibatch_size - - run_name = f"{cfg.experiment_name}__{cfg.seed}" - if rank == 0: - exp_folder = os.path.join(cfg.output_dir, f"{cfg.experiment_name}") - wandb_folder = os.path.join(exp_folder, "wandb") - - Path(exp_folder).mkdir(parents=True, exist_ok=True) - Path(wandb_folder).mkdir(parents=True, exist_ok=True) - - if cfg.track: - - wandb.init( - project=cfg.wandb_project_name, - entity=cfg.wandb_entity, - sync_tensorboard=True, - # config=vars(cfg), # FIXME - name=run_name, - monitor_gym=False, - allow_val_change=True, - save_code=False, - mode="online", - resume="auto", - dir=wandb_folder, - settings=wandb.Settings( - _disable_stats=True, _disable_meta=True - ), # Can get large if we log all the cpu cores. - ) - - writer = SummaryWriter(exp_folder) - writer.add_text( - "hyperparameters", - "|param|value|\n|-|-|\n%s" - % ("\n".join([f"|{key}|{value}|" for key, value in OmegaConf.to_container(cfg, resolve=True).items()])), - ) - - # TRY NOT TO MODIFY: seeding - random.seed(cfg.seed) - np.random.seed(cfg.seed) - torch.manual_seed(cfg.seed) - - logger.info(f"Is cuda available?: {torch.cuda.is_available()}") - if cfg.train_gpu_ids is None: - cfg.train_gpu_ids = list(range(torch.cuda.device_count())) - - # Load the config before overwriting values with current arguments - if cfg.load_file is not None: - load_folder = Path(cfg.load_file).parent.resolve() - with open(os.path.join(load_folder, "config_pickle.json"), "rt", encoding="utf-8") as f: - json_config = f.read() - # 4 ms, might need to move outside the agent. - loaded_config = jsonpickle.decode(json_config) - # Overwrite all properties that were set in the saved config. - config.__dict__.update(loaded_config.__dict__) - - # Configure config. Converts all arguments into config attributes - config.initialize(**OmegaConf.to_container(cfg, resolve=True)) - - if config.use_dd_ppo_preempt: - # Compute unique port within machine based on experiment name and seed. - experiment_id = int(re.findall(r"\d+", cfg.experiment_uid)[0]) - tcp_store_port = ((experiment_id * 1000) % 65534) + int(cfg.seed) + 5000 - # We use gloo, because nccl crashes when using multiple processes per GPU. - num_rollouts_done_store = torch.distributed.TCPStore("127.0.0.1", tcp_store_port, world_size, rank == 0) - torch.distributed.init_process_group( - backend="gloo" if cfg.cpu_collect else "nccl", - store=num_rollouts_done_store, - world_size=world_size, - rank=rank, - timeout=datetime.timedelta(minutes=15), - ) - num_rollouts_done_store.set("num_done", "0") - logger.info(f"Rank:{rank}, TCP_Store_Port: {tcp_store_port}") - else: - torch.distributed.init_process_group( - backend="gloo" if cfg.cpu_collect else "nccl", - init_method="env://", - world_size=world_size, - rank=rank, - timeout=datetime.timedelta(minutes=15), - ) - - device = ( - # torch.device(f"cuda:{cfg.train_gpu_ids[rank]}") - torch.device(f"cuda:{cfg.train_gpu_ids[rank]}") - if torch.cuda.is_available() and cfg.cuda - else torch.device("cpu") - ) - - if torch.cuda.is_available() and cfg.cuda: - torch.cuda.device(device) - - torch.backends.cudnn.deterministic = cfg.torch_deterministic - torch.backends.cuda.matmul.allow_tf32 = config.allow_tf32 - torch.backends.cudnn.benchmark = config.benchmark - torch.backends.cudnn.allow_tf32 = config.allow_tf32 - # torch.set_float32_matmul_precision(config.matmul_precision) - - if rank == 0: - json_config = jsonpickle.encode(config) - with open(os.path.join(exp_folder, "config_pickle.json"), "w") as f2: - f2.write(json_config) - - # NOTE: need to update the config with the argparse arguments before creating the gym environment because the gym env - if cfg.debug: - env = gym.vector.SyncVectorEnv([make_env(cfg=cfg, config=config) for _ in range(cfg.num_envs_per_gpu)]) - else: - env = gym.vector.AsyncVectorEnv( - [make_env(cfg=cfg, config=config) for _ in range(cfg.num_envs_per_gpu)], - copy=False, - ) - assert isinstance(env.single_action_space, gym.spaces.Box), "only continuous action space is supported" - - agent = PPOPolicy(env.single_observation_space, env.single_action_space, config=config).to(device) - - if config.compile_model: - agent = torch.compile(agent) - - start_step = 0 - if cfg.load_file is not None: - load_file_name = os.path.basename(cfg.load_file) - algo_step = re.findall(r"\d+", load_file_name) - if len(algo_step) > 0: - start_step = int(algo_step[0]) + 1 # That step was already finished. - logger.info(f"Start training from step: {start_step}") - agent.load_state_dict(torch.load(cfg.load_file, map_location=device), strict=True) - - agent = torch.nn.parallel.DistributedDataParallel( - agent, - device_ids=None, - output_device=None, - broadcast_buffers=False, - find_unused_parameters=False, - ) - - # If we are resuming training use last learning rate from config. - # If we start a fresh training set the current learning rate according to arguments. - if cfg.load_file is None: - config.current_learning_rate = cfg.learning_rate - - # if rank == 0: - # model_parameters = filter(lambda p: p.requires_grad, agent.parameters()) - # num_params = sum(np.prod(p.size()) for p in model_parameters) - # - # logger.info('Total trainable parameters: ', num_params) - if cfg.schedule_free: - optimizer = schedulefree.AdamWScheduleFree( - agent.parameters(), - lr=config.current_learning_rate, - betas=tuple(cfg.adam_betas), - eps=config.adam_eps, - weight_decay=config.weight_decay, - ) - - elif config.weight_decay > 0.0: - optimizer = optim.AdamW( - agent.parameters(), - lr=config.current_learning_rate, - betas=tuple(cfg.adam_betas), - eps=config.adam_eps, - weight_decay=config.weight_decay, - ) - else: - optimizer = optim.Adam( - agent.parameters(), - betas=tuple(cfg.adam_betas), - lr=config.current_learning_rate, - eps=config.adam_eps, - ) - - # Load optimizer - if cfg.load_file is not None: - optimizer.load_state_dict(torch.load(cfg.load_file.replace("model_", "optimizer_"), map_location=device)) - - if rank == 0: - writer.add_scalar("charts/restart", 1, config.global_step) # Log that a restart happened - - if config.cpu_collect: - device = "cpu" - - # ALGO Logic: Storage setup - obs = { - "bev_semantics": torch.zeros( - (local_bs_per_env, cfg.num_envs_per_gpu) + env.single_observation_space.spaces["bev_semantics"].shape, - dtype=torch.uint8, - device=device, - ), - "measurements": torch.zeros( - (local_bs_per_env, cfg.num_envs_per_gpu) + env.single_observation_space.spaces["measurements"].shape, - device=device, - ), - "value_measurements": torch.zeros( - (local_bs_per_env, cfg.num_envs_per_gpu) + env.single_observation_space.spaces["value_measurements"].shape, - device=device, - ), - } - actions = torch.zeros( - (local_bs_per_env, cfg.num_envs_per_gpu) + env.single_action_space.shape, - device=device, - ) - old_mus = torch.zeros( - (local_bs_per_env, cfg.num_envs_per_gpu) + env.single_action_space.shape, - device=device, - ) - old_sigmas = torch.zeros( - (local_bs_per_env, cfg.num_envs_per_gpu) + env.single_action_space.shape, - device=device, - ) - logprobs = torch.zeros((local_bs_per_env, cfg.num_envs_per_gpu), device=device) - rewards = torch.zeros((local_bs_per_env, cfg.num_envs_per_gpu), device=device) - dones = torch.zeros((local_bs_per_env, cfg.num_envs_per_gpu), device=device) - values = torch.zeros((local_bs_per_env, cfg.num_envs_per_gpu), device=device) - exp_n_steps = np.zeros((local_bs_per_env, cfg.num_envs_per_gpu), dtype=np.int32) - exp_suggest = np.zeros((local_bs_per_env, cfg.num_envs_per_gpu), dtype=np.int32) - - # TRY NOT TO MODIFY: start the game - reset_obs = env.reset(seed=[cfg.seed + rank * cfg.num_envs_per_gpu + i for i in range(cfg.num_envs_per_gpu)]) - next_obs = { - "bev_semantics": torch.tensor(reset_obs[0]["bev_semantics"], device=device, dtype=torch.uint8), - "measurements": torch.tensor(reset_obs[0]["measurements"], device=device, dtype=torch.float32), - "value_measurements": torch.tensor(reset_obs[0]["value_measurements"], device=device, dtype=torch.float32), - } - next_done = torch.zeros(cfg.num_envs_per_gpu, device=device) - next_lstm_state = ( - torch.zeros( - config.num_lstm_layers, - cfg.num_envs_per_gpu, - config.features_dim, - device=device, - ), - torch.zeros( - config.num_lstm_layers, - cfg.num_envs_per_gpu, - config.features_dim, - device=device, - ), - ) - num_updates = cfg.total_timesteps // cfg.total_batch_size - local_processed_samples = 0 - start_time = time.time() - agent.train() # TODO change train and eval - - if rank == 0: - avg_returns = deque(maxlen=100) - - # if config.use_hl_gauss_value_loss: - # hl_gauss_bins = rl_u.hl_gaus_bins(config.hl_gauss_vmin, config.hl_gauss_vmax, config.hl_gauss_bucket_size, device) - - for update in tqdm(range(start_step, num_updates), disable=rank != 0): - if cfg.debug: - print("WARNING: DEBUG MODE") - - if config.cpu_collect: - device = "cpu" - agent.to(device) - # Free all data from last interation. - gc.collect() - with torch.no_grad(): - torch.cuda.empty_cache() - gc.disable() - - if config.use_dd_ppo_preempt: - num_rollouts_done_store.set("num_done", "0") - - # Buffers we use to store returns and aggregate them later to rank 0 for logging. - total_returns = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - total_lengths = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - num_total_returns = torch.zeros(world_size, device=device, dtype=torch.int32, requires_grad=False) - - # reward - if REWARD_LOGGING: - reward_progress = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - reward_red_light = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - reward_collision = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - reward_off_road = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - reward_lane_distance = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - reward_too_fast = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - reward_off_route = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - reward_comfort = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - reward_ttc = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - - if COMFORT_LOGGING: - comfort_lon_acceleration = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - comfort_lat_acceleration = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - comfort_jerk_metric = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - comfort_lon_jerk_metric = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - comfort_yaw_accel = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - comfort_yaw_rate = torch.zeros(world_size, device=device, dtype=torch.float32, requires_grad=False) - - initial_lstm_state = (next_lstm_state[0].clone(), next_lstm_state[1].clone()) - - # Annealing the rate if instructed to do so. - if cfg.schedule_free: - optimizer.eval() - else: - if config.lr_schedule == "linear": - frac = 1.0 - (update - 1.0) / num_updates - config.current_learning_rate = frac * config.learning_rate - elif config.lr_schedule == "step": - frac = update / num_updates - lr_multiplier = 1.0 - for change_percentage in config.lr_schedule_step_perc: - if frac > change_percentage: - lr_multiplier *= config.lr_schedule_step_factor - config.current_learning_rate = lr_multiplier * config.learning_rate - elif config.lr_schedule == "cosine": - frac = update / num_updates - config.current_learning_rate = 0.5 * config.learning_rate * (1 + math.cos(frac * math.pi)) - elif config.lr_schedule == "cosine_restart": - frac = update / (num_updates + 1) # + 1 so it doesn't become 100 % - for idx, frac_restart in enumerate(config.lr_schedule_cosine_restarts): - if frac >= frac_restart: - current_idx = idx - base_frac = config.lr_schedule_cosine_restarts[current_idx] - length_current_interval = ( - config.lr_schedule_cosine_restarts[current_idx + 1] - - config.lr_schedule_cosine_restarts[current_idx] - ) - frac_current_iter = (frac - base_frac) / length_current_interval - config.current_learning_rate = 0.5 * config.learning_rate * (1 + math.cos(frac_current_iter * math.pi)) - - for param_group in optimizer.param_groups: - param_group["lr"] = config.current_learning_rate - - t0 = TicToc() # Data collect - t1 = TicToc() # Forward pass - t2 = TicToc() # Env step - t3 = TicToc() # Pre-processing - t4 = TicToc() # Train inter - t5 = TicToc() # Logging - t0.tic() - inference_times = [] - env_times = [] - for step in range(0, local_bs_per_env): - config.global_step += 1 * world_size * cfg.num_envs_per_gpu - local_processed_samples += 1 * world_size * cfg.num_envs_per_gpu - - obs["bev_semantics"][step] = next_obs["bev_semantics"] - obs["measurements"][step] = next_obs["measurements"] - obs["value_measurements"][step] = next_obs["value_measurements"] - dones[step] = next_done - - # ALGO LOGIC: action logic - with torch.no_grad(): - t1.tic() - ( - action, - logprob, - _, - value, - _, - mu, - sigma, - _, - _, - _, - next_lstm_state, - ) = agent.forward(next_obs, lstm_state=next_lstm_state, done=next_done) - if config.use_hl_gauss_value_loss: - # value_pdf = F.softmax(value, dim=1) - # value = torch.sum(value_pdf * hl_gauss_bins.unsqueeze(0), dim=1) - pass - inference_times.append(t1.tocvalue()) - values[step] = value.flatten() - actions[step] = action - logprobs[step] = logprob - old_mus[step] = mu - old_sigmas[step] = sigma - - # TRY NOT TO MODIFY: execute the game and log data. - t2.tic() - next_obs, reward, termination, truncation, info = env.step(action.cpu().numpy()) - env_times.append(t2.tocvalue()) - - if rank == 0: - if "timing" in info.keys(): - if info["timing"][0] is not None: - for key_, value_ in info["timing"][0].items(): - tab_ = "time_step" if "step" in key_ else "time_reset" - writer.add_scalar(f"{tab_}/{key_}", np.mean(value_), config.global_step) - - done = np.logical_or(termination, truncation) # Not treated separately in original PPO - rewards[step] = torch.tensor(reward, device=device, dtype=torch.float32) - next_done = torch.tensor(done, device=device, dtype=torch.float32) - next_obs = { - "bev_semantics": torch.tensor(next_obs["bev_semantics"], device=device, dtype=torch.uint8), - "measurements": torch.tensor(next_obs["measurements"], device=device, dtype=torch.float32), - "value_measurements": torch.tensor(next_obs["value_measurements"], device=device, dtype=torch.float32), - } - - if "final_info" in info.keys(): - - for idx, single_info in enumerate(info["final_info"]): - - if config.use_exploration_suggest: - # Exploration loss - exp_n_steps[step, idx] = single_info["n_steps"] - exp_suggest[step, idx] = single_info["suggest"] - - # Sum up total returns and how often the env was reset during this iteration. - if single_info is not None: - if "episode" in single_info.keys(): - print( - f"rank: {rank}, config.global_step={config.global_step}, episodic_return={single_info['episode']['r']}" - ) - total_returns[rank] += single_info["episode"]["r"].item() - total_lengths[rank] += single_info["episode"]["l"].item() - num_total_returns[rank] += 1 - - if REWARD_LOGGING and "reward" in single_info.keys(): - reward_progress[rank] += single_info["reward"]["reward_progress"] - reward_red_light[rank] += single_info["reward"]["reward_red_light"] - reward_collision[rank] += single_info["reward"]["reward_collision"] - reward_off_road[rank] += single_info["reward"]["reward_off_road"] - reward_lane_distance[rank] += single_info["reward"]["reward_lane_distance"] - reward_too_fast[rank] += single_info["reward"]["reward_too_fast"] - reward_off_route[rank] += single_info["reward"]["reward_off_route"] - reward_comfort[rank] += single_info["reward"]["reward_comfort"] - reward_ttc[rank] += single_info["reward"]["reward_ttc"] - - if COMFORT_LOGGING and "comfort" in single_info.keys(): - comfort_lon_acceleration[rank] += single_info["comfort"]["comfort_lon_acceleration"] - comfort_lat_acceleration[rank] += single_info["comfort"]["comfort_lat_acceleration"] - comfort_jerk_metric[rank] += single_info["comfort"]["comfort_jerk_metric"] - comfort_lon_jerk_metric[rank] += single_info["comfort"]["comfort_lon_jerk_metric"] - comfort_yaw_accel[rank] += single_info["comfort"]["comfort_yaw_accel"] - comfort_yaw_rate[rank] += single_info["comfort"]["comfort_yaw_rate"] - - if config.use_dd_ppo_preempt: - num_done = int(num_rollouts_done_store.get("num_done")) - min_steps = int(config.dd_ppo_min_perc * local_bs_per_env) - if (num_done / world_size) > config.dd_ppo_preempt_threshold and step > min_steps: - logger.info(f"Rank:{rank}, Preempt at step: {step}, Num done: {num_done}") - break # End data collection early the other workers are finished. - - t0.toc(msg=f"Rank:{rank}, Data collection.") - print(f"Rank:{rank}, Avg forward time {sum(inference_times)}") - print(f"Rank:{rank}, Avg env time {sum(env_times)}") - t3.tic() - - if config.use_dd_ppo_preempt: - num_rollouts_done_store.add("num_done", 1) - - # In case of a dd-ppo preempt this can be smaller than local batch size - num_collected_steps = step + 1 - - # bootstrap value if not done - with torch.no_grad(): - # if config.use_hl_gauss_value_loss: - # next_value = agent.module.get_value(next_obs, next_lstm_state, next_done) - # value_pdf = F.softmax(next_value, dim=1) - # next_value = torch.sum(value_pdf * hl_gauss_bins.unsqueeze(0), dim=1) - if config.use_hl_gauss_value_loss: - False - else: - next_value = agent.module.get_value(next_obs, next_lstm_state, next_done).squeeze(1) - if cfg.gae: - advantages = torch.zeros_like(rewards, device=device) - lastgaelam = 0.0 - for t in reversed(range(num_collected_steps)): - if t == local_bs_per_env - 1: - nextnonterminal = 1.0 - next_done - nextvalues = next_value - else: - nextnonterminal = 1.0 - dones[t + 1] - nextvalues = values[t + 1] - delta = rewards[t] + cfg.gamma * nextvalues * nextnonterminal - values[t] - advantages[t] = lastgaelam = delta + cfg.gamma * cfg.gae_lambda * nextnonterminal * lastgaelam - returns = advantages + values - else: - returns = torch.zeros_like(rewards, device=device) - for t in reversed(range(num_collected_steps)): - if t == local_bs_per_env - 1: - nextnonterminal = 1.0 - next_done - next_return = next_value - else: - nextnonterminal = 1.0 - dones[t + 1] - next_return = returns[t + 1] - returns[t] = rewards[t] + cfg.gamma * nextnonterminal * next_return - advantages = returns - values - - if config.cpu_collect: - device = ( - torch.device(f"cuda:{cfg.train_gpu_ids}") - if torch.cuda.is_available() and cfg.cuda - else torch.device("cpu") - ) - agent.to(device) - - b_exploration_suggests = np.zeros((num_collected_steps, cfg.num_envs_per_gpu), dtype=np.int32) - if config.use_exploration_suggest: - for step in range(num_collected_steps): - n_steps = exp_n_steps[step][0] # TODO - if n_steps > 0: - n_start = max(0, step - n_steps) - b_exploration_suggests[n_start:step] = exp_suggest[step] - - if config.use_world_model_loss: # TODO - b_wm_added_index = np.zeros(num_collected_steps, dtype=np.int32) - b_world_model_mask = torch.zeros( - num_collected_steps, - dtype=torch.float32, - device=device, - requires_grad=False, - ) - invalid_frames = config.num_future_prediction - for step in reversed(range(num_collected_steps)): - if invalid_frames <= 0: - b_wm_added_index[step] = config.num_future_prediction - b_world_model_mask[step] = 1.0 - else: - invalid_frames -= 1 - - if dones[step]: - invalid_frames = 5 - - b_obs = { - "bev_semantics": obs["bev_semantics"][:num_collected_steps].reshape( - (-1,) + env.single_observation_space.spaces["bev_semantics"].shape - ), - "measurements": obs["measurements"][:num_collected_steps].reshape( - (-1,) + env.single_observation_space.spaces["measurements"].shape - ), - "value_measurements": obs["value_measurements"][:num_collected_steps].reshape( - (-1,) + env.single_observation_space.spaces["value_measurements"].shape - ), - } - b_logprobs = logprobs[:num_collected_steps].reshape(-1) - b_actions = actions[:num_collected_steps].reshape((-1,) + env.single_action_space.shape) - b_dones = dones[:num_collected_steps].reshape(-1) # TODO check if pre-emption trick causes problems with LSTM. - b_advantages = advantages[:num_collected_steps].reshape(-1) - b_returns = returns[:num_collected_steps].reshape(-1) - b_values = values[:num_collected_steps].reshape(-1) - b_old_mus = old_mus[:num_collected_steps].reshape((-1,) + env.single_action_space.shape) - b_old_sigmas = old_sigmas[:num_collected_steps].reshape((-1,) + env.single_action_space.shape) - - # When the data was collected on the CPU, move it to GPU before training - if config.cpu_collect: - b_obs["bev_semantics"] = b_obs["bev_semantics"].to(device) - b_obs["measurements"] = b_obs["measurements"].to(device) - b_obs["value_measurements"] = b_obs["value_measurements"].to(device) - b_logprobs = b_logprobs.to(device) - b_actions = b_actions.to(device) - b_dones = b_dones.to(device) - b_advantages = b_advantages.to(device) - b_returns = b_returns.to(device) - b_values = b_values.to(device) - b_old_mus = b_old_mus.to(device) - b_old_sigmas = b_old_sigmas.to(device) - - # Aggregate returns to GPU 0 for logging and storing the best model. - # Gloo doesn't support AVG, so we implement it via sum / num returns - torch.distributed.all_reduce(total_returns, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(total_lengths, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(num_total_returns, op=torch.distributed.ReduceOp.SUM) - - # reward - if REWARD_LOGGING: - torch.distributed.all_reduce(reward_progress, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(reward_red_light, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(reward_collision, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(reward_off_road, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(reward_lane_distance, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(reward_too_fast, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(reward_off_route, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(reward_comfort, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(reward_ttc, op=torch.distributed.ReduceOp.SUM) - - if COMFORT_LOGGING: - torch.distributed.all_reduce(comfort_lon_acceleration, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(comfort_lat_acceleration, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(comfort_jerk_metric, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(comfort_lon_jerk_metric, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(comfort_yaw_accel, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(comfort_yaw_rate, op=torch.distributed.ReduceOp.SUM) - - if rank == 0: - num_total_returns_all_processes = torch.sum(num_total_returns) - # Only can log return if there was any episode that finished - if num_total_returns_all_processes > 0: - total_returns_all_processes = torch.sum(total_returns) - total_lengths_all_processes = torch.sum(total_lengths) - avg_return = total_returns_all_processes / num_total_returns_all_processes - avg_return = avg_return.item() - avg_length = total_lengths_all_processes / num_total_returns_all_processes - avg_length = avg_length.item() - - avg_returns.append(avg_return) - windowed_avg_return = sum(avg_returns) / len(avg_returns) - - writer.add_scalar("charts/episodic_return", avg_return, config.global_step) - writer.add_scalar( - "charts/windowed_avg_return", - windowed_avg_return, - config.global_step, - ) - writer.add_scalar("charts/episodic_length", avg_length, config.global_step) - - if REWARD_LOGGING: - reward_progress = torch.sum(reward_progress) / num_total_returns_all_processes - reward_red_light = torch.sum(reward_red_light) / num_total_returns_all_processes - reward_collision = torch.sum(reward_collision) / num_total_returns_all_processes - reward_off_road = torch.sum(reward_off_road) / num_total_returns_all_processes - reward_lane_distance = torch.sum(reward_lane_distance) / num_total_returns_all_processes - reward_too_fast = torch.sum(reward_too_fast) / num_total_returns_all_processes - reward_off_route = torch.sum(reward_off_route) / num_total_returns_all_processes - reward_comfort = torch.sum(reward_comfort) / num_total_returns_all_processes - reward_ttc = torch.sum(reward_ttc) / num_total_returns_all_processes - - writer.add_scalar("reward/progress", reward_progress.item(), config.global_step) - writer.add_scalar("reward/red_light", reward_red_light.item(), config.global_step) - writer.add_scalar("reward/collision", reward_collision.item(), config.global_step) - writer.add_scalar("reward/off_road", reward_off_road.item(), config.global_step) - writer.add_scalar( - "reward/lane_distance", - reward_lane_distance.item(), - config.global_step, - ) - writer.add_scalar("reward/too_fast", reward_too_fast.item(), config.global_step) - writer.add_scalar("reward/off_route", reward_off_route.item(), config.global_step) - writer.add_scalar("reward/comfort", reward_comfort.item(), config.global_step) - writer.add_scalar("reward/ttc", reward_ttc.item(), config.global_step) - - if COMFORT_LOGGING: - comfort_lon_acceleration = torch.sum(comfort_lon_acceleration) / num_total_returns_all_processes - comfort_lat_acceleration = torch.sum(comfort_lat_acceleration) / num_total_returns_all_processes - comfort_jerk_metric = torch.sum(comfort_jerk_metric) / num_total_returns_all_processes - comfort_lon_jerk_metric = torch.sum(comfort_lon_jerk_metric) / num_total_returns_all_processes - comfort_yaw_accel = torch.sum(comfort_yaw_accel) / num_total_returns_all_processes - comfort_yaw_rate = torch.sum(comfort_yaw_rate) / num_total_returns_all_processes - - writer.add_scalar( - "comfort/comfort_lon_acceleration", - comfort_lon_acceleration.item(), - config.global_step, - ) - writer.add_scalar( - "comfort/comfort_lat_acceleration", - comfort_lat_acceleration.item(), - config.global_step, - ) - writer.add_scalar( - "comfort/comfort_jerk_metric", - comfort_jerk_metric.item(), - config.global_step, - ) - writer.add_scalar( - "comfort/comfort_lon_jerk_metric", - comfort_lon_jerk_metric.item(), - config.global_step, - ) - writer.add_scalar( - "comfort/comfort_yaw_accel", - comfort_yaw_accel.item(), - config.global_step, - ) - writer.add_scalar( - "comfort/comfort_yaw_rate", - comfort_yaw_rate.item(), - config.global_step, - ) - - if windowed_avg_return >= config.max_training_score: - config.max_training_score = windowed_avg_return - # Same model could reach multiple high scores - if config.best_iteration != update: - save(agent, None, config, exp_folder, "model_best.pth", None) - config.best_iteration = update - - # Optimizing the policy and value network - if config.use_lstm: - assert cfg.num_envs_per_gpu % num_minibatches == 0 - assert not config.use_dd_ppo_preempt - assert not config.use_world_model_loss - - envsperbatch = cfg.num_envs_per_gpu // num_minibatches - envinds = np.arange(cfg.num_envs_per_gpu) - flatinds = np.arange(local_batch_size).reshape(local_bs_per_env, cfg.num_envs_per_gpu) - - b_inds_original = np.arange(num_collected_steps * cfg.num_envs_per_gpu) - - if config.use_dd_ppo_preempt: - b_inds_original = np.resize(b_inds_original, (local_batch_size,)) - - # if config.use_world_model_loss: - # b_inds_world_model_original = b_inds_original + b_wm_added_index[b_inds_original] # TODO - - clipfracs = [] - - t3.toc(msg=f"Rank:{rank}, Data pre-processing.") - t4.tic() - - if cfg.schedule_free: - optimizer.train() - - for latest_epoch in range(cfg.update_epochs): - approx_kl_divs = [] - if config.use_lstm: - np.random.shuffle(envinds) - else: - p = np.random.permutation(len(b_inds_original)) - b_inds = b_inds_original[p] - # if config.use_world_model_loss: - # b_inds_world_model = b_inds_world_model_original[p] - - total_steps = local_batch_size - step_size = local_minibatch_size - if config.use_lstm: - total_steps = cfg.num_envs_per_gpu - step_size = envsperbatch - - for start in range(0, total_steps, step_size): - if config.use_lstm: - end = start + envsperbatch - mbenvinds = envinds[start:end] - lstm_state = ( - initial_lstm_state[0][:, mbenvinds], - initial_lstm_state[1][:, mbenvinds], - ) - mb_inds = flatinds[:, mbenvinds].ravel() # be really careful about the index - else: - end = start + local_minibatch_size - mb_inds = b_inds[start:end] - lstm_state = None - - # if config.use_world_model_loss: - # mb_inds_world_model = b_inds_world_model[start:end] - if config.use_exploration_suggest: - b_exploration_suggests_sampled = b_exploration_suggests[mb_inds] - else: - b_exploration_suggests_sampled = None - b_obs_sampled = { - "bev_semantics": b_obs["bev_semantics"][mb_inds], - "measurements": b_obs["measurements"][mb_inds], - "value_measurements": b_obs["value_measurements"][mb_inds], - } - # Don't need action, so we don't unscale - ( - _, - newlogprob, - entropy, - newvalue, - exploration_loss, - _, - _, - distribution, - pred_sem, - pred_measure, - _, - ) = agent.forward( - b_obs_sampled, - actions=b_actions[mb_inds], - exploration_suggests=b_exploration_suggests_sampled, - lstm_state=lstm_state, - done=b_dones[mb_inds], - ) - logratio = newlogprob - b_logprobs[mb_inds] - ratio = logratio.exp() - - mb_advantages = b_advantages[mb_inds] - if cfg.norm_adv: - mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8) - - # Policy loss - pg_loss1 = -mb_advantages * ratio - pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - cfg.clip_coef, 1 + cfg.clip_coef) - pg_loss = torch.max(pg_loss1, pg_loss2).mean() - - # Value loss - if cfg.clip_vloss: - # Value clipping is not implemented with HL_Gauss loss - assert config.use_hl_gauss_value_loss is False - newvalue = newvalue.view(-1) - v_clipped = b_values[mb_inds] + torch.clamp( - newvalue - b_values[mb_inds], - -cfg.clip_coef, - cfg.clip_coef, - ) - v_loss_clipped = (v_clipped - b_returns[mb_inds]) ** 2 - v_loss_unclipped = (newvalue - b_returns[mb_inds]) ** 2 - v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped) - v_loss = 0.5 * v_loss_max.mean() - else: - # if config.use_hl_gauss_value_loss: - # target_pdf = rl_u.hl_gaus_pdf(b_returns[mb_inds], config.hl_gauss_std, config.hl_gauss_vmin, - # config.hl_gauss_vmax, config.hl_gauss_bucket_size) - # v_loss = F.cross_entropy(newvalue, target_pdf) - if config.use_hl_gauss_value_loss: - pass - else: - newvalue = newvalue.view(-1) - v_loss = 0.5 * ((newvalue - b_returns[mb_inds]) ** 2).mean() - - # if config.use_world_model_loss: - # b_mask_sampled = b_world_model_mask[mb_inds_world_model] - # total_valid_items = torch.sum(b_mask_sampled) - # semantic_labels = image_to_class_labels(b_obs['bev_semantics'][mb_inds_world_model]) - # semantic_loss = F.cross_entropy(pred_sem, semantic_labels, reduction='none') - # semantic_loss = torch.mean(semantic_loss, dim=(1, 2)) * b_mask_sampled - # semantic_loss = torch.sum(semantic_loss) / total_valid_items - # measure_loss = F.l1_loss(pred_measure, b_obs['measurements'][mb_inds_world_model], reduction='none') - # measure_loss = torch.mean(measure_loss, dim=1) * b_mask_sampled - # measure_loss = torch.sum(measure_loss) / total_valid_items - # world_model_loss = 0.5 * semantic_loss + 0.5 * measure_loss - - entropy_loss = entropy.mean() - loss = pg_loss - config.ent_coef * entropy_loss + v_loss * config.vf_coef - if config.use_exploration_suggest: - loss = loss + cfg.expl_coef * exploration_loss - - # if config.use_world_model_loss: - # loss = loss + config.world_model_loss_weight * world_model_loss - - optimizer.zero_grad() - loss.backward() - nn.utils.clip_grad_norm_(agent.parameters(), cfg.max_grad_norm) - optimizer.step() - - old_mu_sampled = b_old_mus[mb_inds] - old_sigmas_sampled = b_old_sigmas[mb_inds] - with torch.no_grad(): - # calculate approx_kl http://joschu.net/blog/kl-approx.html - old_approx_kl = (-logratio).mean() - # approx_kl = ((ratio - 1) - logratio).mean() - - # We compute approx KL according to roach - old_distribution = agent.module.action_dist.proba_distribution(old_mu_sampled, old_sigmas_sampled) - kl_div = torch.distributions.kl_divergence(old_distribution.distribution, distribution) - approx_kl_divs.append(kl_div.mean()) - - clipfracs += [((ratio - 1.0).abs() > cfg.clip_coef).float().mean()] - - approx_kl = torch.mean(torch.stack(approx_kl_divs)) - # Gloo doesn't support AVG, so we implement it via sum / world size - torch.distributed.all_reduce(approx_kl, op=torch.distributed.ReduceOp.SUM) - approx_kl = approx_kl / world_size - if cfg.target_kl is not None and config.lr_schedule == "kl": - if approx_kl > cfg.target_kl: - if config.lr_schedule_step is not None: - config.kl_early_stop += 1 - if config.kl_early_stop >= config.lr_schedule_step: - config.current_learning_rate *= 0.5 - config.kl_early_stop = 0 - - break - - if cfg.schedule_free: - optimizer.eval() - - del b_obs # Remove large array - t4.toc(msg=f"Rank:{rank}, Training.") - t5.tic() - - config.latest_iteration = update - # Avg value to log over all Environments - # Sync with 3 envs takes 4 ms. - # Gloo doesn't support AVG, so we implement it via sum / world size - torch.distributed.all_reduce(v_loss, op=torch.distributed.ReduceOp.SUM) - v_loss = v_loss / world_size - - torch.distributed.all_reduce(pg_loss, op=torch.distributed.ReduceOp.SUM) - pg_loss = pg_loss / world_size - - torch.distributed.all_reduce(entropy_loss, op=torch.distributed.ReduceOp.SUM) - entropy_loss = entropy_loss / world_size - - if config.use_exploration_suggest: - torch.distributed.all_reduce(exploration_loss, op=torch.distributed.ReduceOp.SUM) - exploration_loss = exploration_loss / world_size - - # if config.use_world_model_loss: - # torch.distributed.all_reduce(world_model_loss, op=torch.distributed.ReduceOp.SUM) - # world_model_loss = world_model_loss / world_size - - torch.distributed.all_reduce(old_approx_kl, op=torch.distributed.ReduceOp.SUM) - old_approx_kl = old_approx_kl / world_size - - torch.distributed.all_reduce(approx_kl, op=torch.distributed.ReduceOp.SUM) - approx_kl = approx_kl / world_size - - b_values = b_values[b_inds_original] - torch.distributed.all_reduce(b_values, op=torch.distributed.ReduceOp.SUM) - b_values = b_values / world_size - - b_returns = b_returns[b_inds_original] - torch.distributed.all_reduce(b_returns, op=torch.distributed.ReduceOp.SUM) - b_returns = b_returns / world_size - - b_advantages = b_advantages[b_inds_original] - torch.distributed.all_reduce(b_advantages, op=torch.distributed.ReduceOp.SUM) - b_advantages = b_advantages / world_size - - clipfracs = torch.mean(torch.stack(clipfracs)) - torch.distributed.all_reduce(clipfracs, op=torch.distributed.ReduceOp.SUM) - clipfracs = clipfracs / world_size - - if rank == 0: - save( - agent, - optimizer, - config, - exp_folder, - f"model_latest_{update:09d}.pth", - f"optimizer_latest_{update:09d}.pth", - ) - frac = update / num_updates - if config.current_eval_interval_idx < len(config.eval_intervals): - if frac >= config.eval_intervals[config.current_eval_interval_idx]: - save( - agent, - None, - config, - exp_folder, - f"model_eval_{update:09d}.pth", - None, - ) - config.current_eval_interval_idx += 1 - - # Cleanup file from last epoch - for file in os.listdir(exp_folder): - if file.startswith("model_latest_") and file.endswith(".pth"): - if file != f"model_latest_{update:09d}.pth": - old_model_file = os.path.join(exp_folder, file) - if os.path.isfile(old_model_file): - os.remove(old_model_file) - if file.startswith("optimizer_latest_") and file.endswith(".pth"): - if file != f"optimizer_latest_{update:09d}.pth": - old_model_file = os.path.join(exp_folder, file) - if os.path.isfile(old_model_file): - os.remove(old_model_file) - - y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy() - var_y = np.var(y_true) - explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y - - # TRY NOT TO MODIFY: record rewards for plotting purposes - writer.add_scalar( - "charts/learning_rate", - optimizer.param_groups[0]["lr"], - config.global_step, - ) - writer.add_scalar("losses/value_loss", v_loss.item(), config.global_step) - writer.add_scalar("losses/policy_loss", pg_loss.item(), config.global_step) - writer.add_scalar("losses/entropy", entropy_loss.item(), config.global_step) - if config.use_exploration_suggest: - writer.add_scalar("losses/exploration", exploration_loss.item(), config.global_step) - # if config.use_world_model_loss: - # writer.add_scalar("losses/world_model", world_model_loss.item(), config.global_step) - writer.add_scalar("losses/old_approx_kl", old_approx_kl.item(), config.global_step) - writer.add_scalar("losses/approx_kl", approx_kl.item(), config.global_step) - writer.add_scalar("losses/clipfrac", clipfracs.item(), config.global_step) - writer.add_scalar("losses/explained_variance", explained_var, config.global_step) - writer.add_scalar("losses/latest_epoch", latest_epoch, config.global_step) - writer.add_scalar("charts/discounted_returns", b_returns.mean().item(), config.global_step) - writer.add_scalar("charts/advantages", b_advantages.mean().item(), config.global_step) - # Adjusted so it doesn't count the first epoch which is slower than the rest (converges faster) - writer.add_scalar( - "charts/SPS", - int(local_processed_samples / (time.time() - start_time)), - config.global_step, - ) - writer.add_scalar("charts/restart", 0, config.global_step) - - print(f"SPS: {int(local_processed_samples / (time.time() - start_time))}") - - t5.toc(msg=f"Rank:{rank}, Logging") - - env.close() - if rank == 0: - writer.close() - - save( - agent, - optimizer, - config, - exp_folder, - "model_final.pth", - "optimizer_final.pth", - ) - wandb.finish(exit_code=0, quiet=True) - logger.info("Done training.") diff --git a/d123/simulation/history/simulation_history.py b/d123/simulation/history/simulation_history.py deleted file mode 100644 index 22f779b7..00000000 --- a/d123/simulation/history/simulation_history.py +++ /dev/null @@ -1,95 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from typing import List, Optional - -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput -from d123.simulation.time_controller.simulation_iteration import SimulationIteration - -# from nuplan.common.actor_state.ego_state import EgoState -# from nuplan.common.actor_state.state_representation import StateSE2 -# from nuplan.common.maps.abstract_map import AbstractMap -# from nuplan.common.maps.maps_datatypes import TrafficLightStatusData -# from nuplan.planning.simulation.observation.observation_type import Observation -# from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration -# from nuplan.planning.simulation.trajectory.abstract_trajectory import AbstractTrajectory - - -@dataclass(frozen=True) -class Simulation2DHistorySample: - """ - Single SimulationHistory sample point. - """ - - iteration: SimulationIteration - ego_state: EgoStateSE2 - planner_output: AbstractPlannerOutput - detections: DetectionRecording - - -class Simulation2DHistory: - """ - Simulation history including a sequence of simulation states. - """ - - def __init__(self, data: Optional[List[Simulation2DHistorySample]] = None) -> None: - """ - Construct the history - :param map_api: abstract map api for accessing the maps - :param data: A list of simulation data. - """ - - self.data: List[Simulation2DHistorySample] = data if data is not None else list() - self.scene: Optional[AbstractScene] = None - - def add_sample(self, sample: Simulation2DHistorySample) -> None: - """ - Add a sample to history - :param sample: one snapshot of a simulation - """ - self.data.append(sample) - - def last(self) -> Simulation2DHistorySample: - """ - :return: last sample from history, or raise if empty - """ - if not self.data: - raise RuntimeError("Data is empty!") - return self.data[-1] - - def reset(self, scene: AbstractScene) -> None: - """ - Clear the stored data - """ - self.data.clear() - self.scene = scene - - def __len__(self) -> int: - """ - Return the number of history samples as len(). - """ - return len(self.data) - - @property - def extract_ego_state(self) -> List[EgoStateSE2]: - """ - Extract ego states in simulation history. - :return An List of ego_states. - """ - return [sample.ego_state for sample in self.data] - - @property - def interval_seconds(self) -> float: - """ - Return the interval between SimulationHistorySamples. - :return The interval in seconds. - """ - if not self.data or len(self.data) < 1: - raise ValueError("Data is empty!") - elif len(self.data) < 2: - raise ValueError("Can't calculate the interval of a single-iteration simulation.") - - return float(self.data[1].iteration.time_s - self.data[0].iteration.time_s) # float cast is for mypy diff --git a/d123/simulation/history/simulation_history_buffer.py b/d123/simulation/history/simulation_history_buffer.py deleted file mode 100644 index f98f3844..00000000 --- a/d123/simulation/history/simulation_history_buffer.py +++ /dev/null @@ -1,194 +0,0 @@ -from __future__ import annotations - -from collections import deque -from typing import Deque, List, Optional, Tuple, Type - -from d123.common.datatypes.recording.abstract_recording import Recording -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.conversion.scene.abstract_scene import AbstractScene - - -class Simulation2DHistoryBuffer: - """ - This class is used to keep a rolling buffer of a given size. The buffer is a first-in first-out queue. Hence, the - oldest samples in the buffer are continuously replaced as new samples are appended. - """ - - def __init__( - self, - ego_state_buffer: Deque[EgoStateSE2], - recording_buffer: Deque[Recording], - sample_interval: Optional[float] = None, - ): - """ - Constructs a SimulationHistoryBuffer - :param ego_state_buffer: Past ego state trajectory including the state. - at the current time step [t_-N, ..., t_-1, t_0] - :param observations_buffer: Past observations including the observation. - at the current time step [t_-N, ..., t_-1, t_0]. - :param sample_interval: [s] the time interval between each sample, if given - """ - if not ego_state_buffer or not recording_buffer: - raise ValueError("Ego and observation buffers cannot be empty!") - - if len(ego_state_buffer) != len(recording_buffer): - raise ValueError( - "Ego and observations buffer is " - f"not the same length {len(ego_state_buffer) != len(recording_buffer)}!" - ) - - self._ego_state_buffer = ego_state_buffer - self._recording_buffer = recording_buffer - self._sample_interval = sample_interval - - @property - def ego_state_buffer(self) -> Deque[EgoStateSE2]: - """ - :return: current ego state buffer - """ - return self._ego_state_buffer - - @property - def recording_buffer(self) -> Deque[Recording]: - """ - :return: current observation buffer - """ - return self._recording_buffer - - @property - def size(self) -> int: - """ - :return: Size of the buffer. - """ - return len(self.ego_states) - - @property - def duration(self) -> Optional[float]: - """ - :return: [s] Duration of the buffer. - """ - return self.sample_interval * self.size if self.sample_interval else None - - @property - def current_state(self) -> Tuple[EgoStateSE2, Recording]: - """ - :return: current state of AV vehicle and its observations - """ - return self.ego_states[-1], self.recording_buffer[-1] - - @property - def sample_interval(self) -> Optional[float]: - """ - :return: the sample interval - """ - return self._sample_interval - - @sample_interval.setter - def sample_interval(self, sample_interval: float) -> None: - """ - Sets the sample interval of the buffer, raises if the sample interval was not None - :param sample_interval: The sample interval of the buffer - """ - assert self._sample_interval is None, "Can't overwrite a pre-existing sample-interval!" - self._sample_interval = sample_interval - - @property - def ego_states(self) -> List[EgoStateSE2]: - """ - :return: the ego state buffer in increasing temporal order where the last sample is the more recent sample - [t_-N, ..., t_-1, t_0] - """ - return list(self._ego_state_buffer) - - @property - def recordings(self) -> List[Recording]: - """ - :return: the recording buffer in increasing temporal order where the last sample is the more recent sample - [t_-N, ..., t_-1, t_0] - """ - return list(self._recording_buffer) - - def append(self, ego_state: EgoStateSE2, recording: Recording) -> None: - """ - Adds new samples to the buffers - :param ego_state: an ego state - :param recording: a recording - """ - self._ego_state_buffer.append(ego_state) - self._recording_buffer.append(recording) - - def extend(self, ego_states: List[EgoStateSE2], recordings: List[Recording]) -> None: - """ - Adds new samples to the buffers - :param ego_states: an ego states list - :param recordings: a recordings list - """ - if len(ego_states) != len(recordings): - raise ValueError(f"Ego and recordings are not the same length {len(ego_states) != len(recordings)}!") - self._ego_state_buffer.extend(ego_states) - self._recording_buffer.extend(recordings) - - def __len__(self) -> int: - """ - :return: the length of the buffer - @raise AssertionError if the length of each buffers are not the same - """ - return len(self._ego_state_buffer) - - @classmethod - def initialize_from_list( - cls, - buffer_size: int, - ego_states: List[EgoStateSE2], - recordings: List[Recording], - sample_interval: Optional[float] = None, - ) -> Simulation2DHistoryBuffer: - """ - Create history buffer from lists - :param buffer_size: size of buffer - :param ego_states: list of ego states - :param observations: list of observations - :param sample_interval: [s] the time interval between each sample, if given - :return: SimulationHistoryBuffer - """ - ego_state_buffer: Deque[EgoStateSE2] = deque(ego_states[-buffer_size:], maxlen=buffer_size) - recording_buffer: Deque[Recording] = deque(recordings[-buffer_size:], maxlen=buffer_size) - - return cls( - ego_state_buffer=ego_state_buffer, - recording_buffer=recording_buffer, - sample_interval=sample_interval, - ) - - @staticmethod - def initialize_from_scene( - buffer_size: int, scene: AbstractScene, recording_type: Type[Recording] - ) -> Simulation2DHistoryBuffer: - """ - Initializes ego_state_buffer and recording_buffer from scene - :param buffer_size: size of the buffer - :param scene: Simulation scene - :param recording_type: Recording type used for the simulation - """ - - if recording_type == DetectionRecording: - observation_getter = scene.get_detection_recording_at_iteration - # elif recording_type == SensorRecording: - # observation_getter = scenario.get_past_sensors - else: - raise ValueError(f"No matching recording type for {recording_type} for history!") - - history_iterations = [-iteration for iteration in range(1, scene.get_number_of_history_iterations() + 1)] - - past_observation = list(observation_getter(iteration) for iteration in history_iterations) - past_ego_states = list( - scene.get_ego_state_at_iteration(iteration).ego_state_se2 for iteration in history_iterations - ) - - return Simulation2DHistoryBuffer.initialize_from_list( - buffer_size=buffer_size, - ego_states=past_ego_states, - recordings=past_observation, - sample_interval=scene.log_metadata.timestep_seconds, - ) diff --git a/d123/simulation/metrics/__init__.py b/d123/simulation/metrics/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/metrics/sim_agents/__init__.py b/d123/simulation/metrics/sim_agents/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/metrics/sim_agents/histogram_metric.py b/d123/simulation/metrics/sim_agents/histogram_metric.py deleted file mode 100644 index cc4aeaef..00000000 --- a/d123/simulation/metrics/sim_agents/histogram_metric.py +++ /dev/null @@ -1,218 +0,0 @@ -from typing import Dict, List, Optional, Tuple - -import matplotlib.pyplot as plt -import numpy as np -import numpy.typing as npt - - -class HistogramIntersectionMetric: - def __init__(self, min_val: float, max_val: float, n_bins: int, name: str, weight: float = 1.0): - self.min_val = min_val - self.max_val = max_val - self._n_bins = n_bins - self._bin_edges = np.linspace(min_val, max_val, n_bins + 1) - self._weight = weight - self._name = name - - self._aggregate_objects: bool = False - self._independent_timesteps: bool = True - - def _create_histogram(self, data: npt.NDArray[np.float64], normalize: bool = True) -> np.ndarray: - hist, _ = np.histogram(data, bins=self._bin_edges) - - if normalize: - # Normalize to create probability distribution - hist = hist.astype(float) - if hist.sum() > 0: - hist = hist / hist.sum() - - return hist - - def _calculate_intersection(self, dist1: npt.NDArray[np.float64], dist2: npt.NDArray[np.float64]) -> float: - hist1 = self._create_histogram(dist1, normalize=True) - hist2 = self._create_histogram(dist2, normalize=True) - intersection = np.sum(np.minimum(hist1, hist2)) - return intersection - - def _calculate_bhattacharyya(self, dist1: npt.NDArray[np.int_], dist2: npt.NDArray[np.int_]) -> float: - hist1 = self._create_histogram(dist1, normalize=True) - hist2 = self._create_histogram(dist2, normalize=True) - bhattacharyya_coeff = np.sum(np.sqrt(hist1 * hist2)) - return bhattacharyya_coeff - - def compute( - self, dist1: npt.NDArray[np.float64], dist2: npt.NDArray[np.float64], log_mask: npt.NDArray[np.bool_] - ) -> Dict[str, float]: - assert dist1.shape[0] == dist2.shape[0], "Distributions must have the same number of objects" - assert dist1.ndim == 2 - assert dist2.ndim == 2 - assert log_mask.ndim == 2 - - if len(dist1) == 0: - return { - f"{self._name}_intersection": 1.0, - # f"{self._name}_bhattacharyya": 1.0, - f"{self._name}_score": self._weight * 1.0, - } - - intersection = 0.0 - bhattacharyya = 0.0 - - if self._independent_timesteps: - # (n_objects, n_rollouts * n_steps) - for obj_dist1, obj_dist2, obj_mask in zip(dist1, dist2, log_mask): - intersection += self._calculate_intersection(obj_dist1[obj_mask], obj_dist2[obj_mask]) - bhattacharyya += self._calculate_bhattacharyya(obj_dist1[obj_mask], obj_dist2[obj_mask]) - intersection /= dist1.shape[0] # Average intersection over all objects - bhattacharyya /= dist1.shape[0] # Average Bhattacharyya coefficient over all objects - - else: - raise NotImplementedError - - return { - f"{self._name}_intersection": float(intersection), - # f"{self._name}_bhattacharyya": float(bhattacharyya), - f"{self._name}_score": self._weight * float(intersection), - } - - def plot_histograms( - self, - dist1: npt.NDArray[np.float64], - dist2: npt.NDArray[np.float64], - mask: Optional[npt.NDArray[np.bool_]] = None, - labels: Optional[Tuple[str, str]] = None, - title: str = "Histogram Comparison", - ) -> None: - def _apply_mask( - data: npt.NDArray[np.float64], mask: Optional[npt.NDArray[np.bool_]] - ) -> npt.NDArray[np.float64]: - flat_data = [] - for obj_data, obj_mask in zip(data, mask): - if mask is not None: - flat_data.extend(obj_data[obj_mask].tolist()) - else: - flat_data.extend(obj_data.tolist()) - return np.array(flat_data) - - hist1 = self._create_histogram(_apply_mask(dist1, mask), normalize=True) - hist2 = self._create_histogram(_apply_mask(dist2, mask), normalize=True) - - bin_centers = (self._bin_edges[:-1] + self._bin_edges[1:]) / 2 - width = (self.max_val - self.min_val) / self._n_bins - - plt.figure(figsize=(10, 6)) - - if labels is None: - labels = ("Distribution 1", "Distribution 2") - - plt.bar(bin_centers, hist1, width, alpha=0.5, label=labels[0], color="blue") - plt.bar(bin_centers, hist2, width, alpha=0.5, label=labels[1], color="red") - - plt.xlabel("Value") - plt.ylabel("Probability Density") - plt.title(title) - plt.legend() - plt.grid(True, alpha=0.3) - plt.show() - - -class BinaryHistogramIntersectionMetric: - def __init__(self, name: str, weight: float = 1.0): - self._name = name - self._weight = weight - self.aggregate_objects: bool = False - self.independent_timesteps: bool = True - - def _create_histogram(self, data: List[int], normalize: bool = True) -> np.ndarray: - # Binary histogram: bins are [0, 1] - hist = np.zeros(2, dtype=float) - data = np.asarray(data) - hist[0] = np.sum(data == 0) - hist[1] = np.sum(data == 1) - if normalize and hist.sum() > 0: - hist = hist / hist.sum() - return hist - - def _calculate_intersection(self, dist1: npt.NDArray[np.int_], dist2: npt.NDArray[np.int_]) -> float: - hist1 = self._create_histogram(dist1, normalize=True) - hist2 = self._create_histogram(dist2, normalize=True) - intersection = np.sum(np.minimum(hist1, hist2)) - return intersection - - def _calculate_bhattacharyya(self, dist1: npt.NDArray[np.int_], dist2: npt.NDArray[np.int_]) -> float: - hist1 = self._create_histogram(dist1, normalize=True) - hist2 = self._create_histogram(dist2, normalize=True) - bhattacharyya_coeff = np.sum(np.sqrt(hist1 * hist2)) - return bhattacharyya_coeff - - def compute( - self, dist1: npt.NDArray[np.int_], dist2: npt.NDArray[np.int_], log_mask: npt.NDArray[np.bool_] - ) -> Dict[str, float]: - assert dist1.shape[0] == dist2.shape[0], "Distributions must have the same number of objects" - assert dist1.ndim == 2 - assert dist2.ndim == 2 - assert log_mask.ndim == 2 - - if len(dist1) == 0: - return { - f"{self._name}_intersection": 1.0, - # f"{self._name}_bhattacharyya": 1.0, - f"{self._name}_score": self._weight * 1.0, - } - - intersection = 0.0 - bhattacharyya = 0.0 - - if self.independent_timesteps: - for obj_dist1, obj_dist2, obj_mask in zip(dist1, dist2, log_mask): - intersection += self._calculate_intersection(obj_dist1[obj_mask], obj_dist2[obj_mask]) - bhattacharyya += self._calculate_bhattacharyya(obj_dist1[obj_mask], obj_dist2[obj_mask]) - intersection /= dist1.shape[0] - bhattacharyya /= dist1.shape[0] - else: - raise NotImplementedError - - return { - f"{self._name}_intersection": float(intersection), - # f"{self._name}_bhattacharyya": float(bhattacharyya), - f"{self._name}_score": self._weight * float(intersection), - } - - def plot_histograms( - self, - dist1: npt.NDArray[np.int_], - dist2: npt.NDArray[np.int_], - mask: Optional[npt.NDArray[np.bool_]] = None, - labels: Optional[Tuple[str, str]] = None, - title: str = "Binary Histogram Comparison", - ) -> None: - def _apply_mask(data: npt.NDArray[np.int_], mask: Optional[npt.NDArray[np.bool_]]) -> npt.NDArray[np.int_]: - flat_data = [] - for obj_data, obj_mask in zip(data, mask): - if mask is not None: - flat_data.extend(obj_data[obj_mask].tolist()) - else: - flat_data.extend(obj_data.tolist()) - return np.array(flat_data) - - hist1 = self._create_histogram(_apply_mask(dist1, mask), normalize=True) - hist2 = self._create_histogram(_apply_mask(dist2, mask), normalize=True) - - bin_centers = np.array([0, 1]) - width = 0.4 - - plt.figure(figsize=(6, 4)) - - if labels is None: - labels = ("Distribution 1", "Distribution 2") - - plt.bar(bin_centers - width / 2, hist1, width, alpha=0.5, label=labels[0], color="blue") - plt.bar(bin_centers + width / 2, hist2, width, alpha=0.5, label=labels[1], color="red") - - plt.xlabel("Value") - plt.ylabel("Probability") - plt.title(title) - plt.xticks([0, 1]) - plt.legend() - plt.grid(True, alpha=0.3) - plt.show() diff --git a/d123/simulation/metrics/sim_agents/interaction_based.py b/d123/simulation/metrics/sim_agents/interaction_based.py deleted file mode 100644 index 3f9fca21..00000000 --- a/d123/simulation/metrics/sim_agents/interaction_based.py +++ /dev/null @@ -1,75 +0,0 @@ -from typing import Final, List - -import numpy as np -import numpy.typing as npt - -from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper -from d123.geometry.geometry_index import BoundingBoxSE2Index -from d123.geometry.utils.bounding_box_utils import bbse2_array_to_polygon_array - -MAX_OBJECT_DISTANCE: Final[float] = 50.0 - - -def _get_collision_feature( - agent_array: npt.NDArray[np.float64], box_detections_list: List[BoxDetectionWrapper] -) -> npt.NDArray[np.bool]: - """ - Extracts the collision feature from the agent array. - :param agent_array: The agent array containing bounding box information. - :return: A boolean array indicating collisions. - """ - assert agent_array.ndim == 3 - assert agent_array.shape[-1] == len(BoundingBoxSE2Index) - assert agent_array.shape[1] == len(box_detections_list) - n_objects, n_iterations = agent_array.shape[:2] - collision_feature = np.zeros((n_objects, n_iterations), dtype=np.bool_) - - agent_polygon_array = bbse2_array_to_polygon_array(agent_array) - for iteration, box_detections in enumerate(box_detections_list): - occupancy_map = box_detections.occupancy_map - for agent_idx in range(n_objects): - agent_polygon = agent_polygon_array[agent_idx, iteration] - intersecting_tokens = occupancy_map.intersects(agent_polygon) - collision_feature[agent_idx, iteration] = len(intersecting_tokens) > 1 - - return collision_feature - - -def _get_object_distance_feature( - agent_array: npt.NDArray[np.float64], - agents_mask: npt.NDArray[np.bool], - box_detections_list: List[BoxDetectionWrapper], -) -> npt.NDArray[np.float64]: - """ - Extracts the collision feature from the agent array. - :param agent_array: The agent array containing bounding box information. - :return: A boolean array indicating collisions. - """ - assert agent_array.ndim == 3 - assert agent_array.shape[-1] == len(BoundingBoxSE2Index) - assert agent_array.shape[1] == len(box_detections_list) - n_objects, n_iterations = agent_array.shape[:2] - object_distance_feature = np.zeros((n_objects, n_iterations), dtype=np.float64) - - agent_polygon_array = bbse2_array_to_polygon_array(agent_array) - for iteration, box_detections in enumerate(box_detections_list): - if agents_mask[:, iteration].any(): - - occupancy_map = box_detections.occupancy_map - _, distances = occupancy_map.query_nearest( - agent_polygon_array[agents_mask[:, iteration], iteration], - exclusive=True, - return_distance=True, - all_matches=False, - ) - if len(distances) == 0: - distances = np.full((n_objects,), MAX_OBJECT_DISTANCE, dtype=np.float64) - - if len(agent_polygon_array[agents_mask[:, iteration]]) == 1: - distances = min(distances.min(), MAX_OBJECT_DISTANCE) - - object_distance_feature[agents_mask[:, iteration], iteration] = distances - - object_distance_feature = np.clip(object_distance_feature, 0.0, MAX_OBJECT_DISTANCE) - - return object_distance_feature diff --git a/d123/simulation/metrics/sim_agents/kinematics.py b/d123/simulation/metrics/sim_agents/kinematics.py deleted file mode 100644 index 9d032360..00000000 --- a/d123/simulation/metrics/sim_agents/kinematics.py +++ /dev/null @@ -1,165 +0,0 @@ -import numpy as np -import numpy.typing as npt -from scipy.signal import savgol_filter - -from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index - -SECONDS_PER_ITERATION = 0.1 - - -def _masked_diff(y: npt.NDArray[np.float64], mask: npt.NDArray[np.bool], axis: int = 1) -> npt.NDArray[np.float64]: - """ - Computes the difference between successive elements of y, applying the mask. - :param y: The input array. - :param mask: A boolean mask indicating valid elements. - :return: An array of differences with the same shape as y. - """ - - diff = np.zeros_like(y, dtype=np.float64) - diff[:, 1:] = np.diff(y, axis=axis) - diff[:, 0] = diff[:, 1] - diff[~mask] = 0.0 - - return diff - - -def _get_linear_speed_from_agents_array( - agents_array: npt.NDArray[np.float64], mask: npt.NDArray[np.bool] -) -> npt.NDArray[np.float64]: - """ - Extracts the linear speed from the agents array. - :param agents_array: The agents array containing bounding box data. - :return: An array of linear speeds. - """ - assert agents_array.ndim == 3 - assert agents_array.shape[-1] == len(BoundingBoxSE2Index) - - n_agents, n_iterations = agents_array.shape[:2] - linear_speed = np.zeros((n_agents, n_iterations), dtype=np.float64) - linear_speed[:, 1:] = ( - np.linalg.norm(np.diff(agents_array[:, :, BoundingBoxSE2Index.XY], axis=1), axis=-1) / SECONDS_PER_ITERATION - ) - linear_speed[:, 0] = linear_speed[:, 1] - linear_speed[~mask] = 0.0 - - return linear_speed - - -def _get_linear_acceleration_from_agents_array( - agents_array: npt.NDArray[np.float64], mask: npt.NDArray[np.bool] -) -> npt.NDArray[np.float64]: - """ - Extracts the linear acceleration from the agents array. - :param agents_array: The agents array containing bounding box data. - :return: An array of linear accelerations. - """ - assert agents_array.ndim == 3 - assert agents_array.shape[-1] == len(BoundingBoxSE2Index) - - n_agents, n_iterations = agents_array.shape[:2] - linear_acceleration = np.zeros((n_agents, n_iterations), dtype=np.float64) - - linear_speed = _get_linear_speed_from_agents_array(agents_array, mask) - linear_acceleration[:, 1:] = np.diff(linear_speed, axis=1) / SECONDS_PER_ITERATION - linear_acceleration[:, 0] = linear_acceleration[:, 1] - linear_acceleration[~mask] = 0.0 - - return linear_acceleration - - -def _get_yaw_rate_from_agents_array( - agents_array: npt.NDArray[np.float64], mask: npt.NDArray[np.bool] -) -> npt.NDArray[np.float64]: - """ - Extracts the yaw rate from the agents array. - :param agents_array: The agents array containing bounding box data. - :param mask: A boolean mask indicating valid elements. - :return: An array of yaw rates. - """ - assert agents_array.ndim == 3 - assert agents_array.shape[-1] == len(BoundingBoxSE2Index) - - n_agents, n_iterations = agents_array.shape[:2] - headings = agents_array[:, :, BoundingBoxSE2Index.YAW] - heading_rate = _phase_unwrap(_masked_diff(headings, mask, axis=1)) / SECONDS_PER_ITERATION - return heading_rate - - -def _get_yaw_acceleration_from_agents_array( - agents_array: npt.NDArray[np.float64], mask: npt.NDArray[np.bool] -) -> npt.NDArray[np.float64]: - assert agents_array.ndim == 3 - assert agents_array.shape[-1] == len(BoundingBoxSE2Index) - - n_agents, n_iterations = agents_array.shape[:2] - yaw_rate = _get_yaw_rate_from_agents_array(agents_array, mask) - yaw_acceleration = np.zeros((n_agents, n_iterations), dtype=np.float64) - yaw_acceleration[:, 1:] = np.diff(yaw_rate, axis=1) / SECONDS_PER_ITERATION - yaw_acceleration[:, 0] = yaw_acceleration[:, 1] - yaw_acceleration[~mask] = 0.0 - return yaw_acceleration - - -def _phase_unwrap(headings: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - """ - Returns an array of heading angles equal mod 2 pi to the input heading angles, - and such that the difference between successive output angles is less than or - equal to pi radians in absolute value - :param headings: An array of headings (radians) - :return The phase-unwrapped equivalent headings. - """ - # There are some jumps in the heading (e.g. from -np.pi to +np.pi) which causes approximation of yaw to be very large. - # We want unwrapped[j] = headings[j] - 2*pi*adjustments[j] for some integer-valued adjustments making the absolute value of - # unwrapped[j+1] - unwrapped[j] at most pi: - # -pi <= headings[j+1] - headings[j] - 2*pi*(adjustments[j+1] - adjustments[j]) <= pi - # -1/2 <= (headings[j+1] - headings[j])/(2*pi) - (adjustments[j+1] - adjustments[j]) <= 1/2 - # So adjustments[j+1] - adjustments[j] = round((headings[j+1] - headings[j]) / (2*pi)). - two_pi = 2.0 * np.pi - adjustments = np.zeros_like(headings) - adjustments[..., 1:] = np.cumsum(np.round(np.diff(headings, axis=-1) / two_pi), axis=-1) - unwrapped = headings - two_pi * adjustments - return unwrapped - - -def _approximate_derivatives( - y: npt.NDArray[np.float64], - x: npt.NDArray[np.float64], - window_length: int = 5, - poly_order: int = 2, - deriv_order: int = 1, - axis: int = -1, -) -> npt.NDArray[np.float32]: - """ - Given two equal-length sequences y and x, compute an approximation to the n-th - derivative of some function interpolating the (x, y) data points, and return its - values at the x's. We assume the x's are increasing and equally-spaced. - :param y: The dependent variable (say of length n) - :param x: The independent variable (must have the same length n). Must be strictly - increasing and equally-spaced. - :param window_length: The order (default 5) of the Savitsky-Golay filter used. - (Ignored if the x's are not equally-spaced.) Must be odd and at least 3 - :param poly_order: The degree (default 2) of the filter polynomial used. Must - be less than the window_length - :param deriv_order: The order of derivative to compute (default 1) - :param axis: The axis of the array x along which the filter is to be applied. Default is -1. - :return Derivatives. - """ - window_length = min(window_length, len(x)) - - if not (poly_order < window_length): - raise ValueError(f"{poly_order} < {window_length} does not hold!") - - dx = np.diff(x, axis=-1) - if not (dx > 0).all(): - raise RuntimeError("dx is not monotonically increasing!") - - dx = dx.mean() - derivative: npt.NDArray[np.float32] = savgol_filter( - y, - polyorder=poly_order, - window_length=window_length, - deriv=deriv_order, - delta=dx, - axis=axis, - ) - return derivative diff --git a/d123/simulation/metrics/sim_agents/map_based.py b/d123/simulation/metrics/sim_agents/map_based.py deleted file mode 100644 index 8c5c0673..00000000 --- a/d123/simulation/metrics/sim_agents/map_based.py +++ /dev/null @@ -1,151 +0,0 @@ -from typing import Dict, Final, List - -import numpy as np -import numpy.typing as npt -import shapely - -from d123.conversion.maps.abstract_map import AbstractMap -from d123.conversion.maps.abstract_map_objects import AbstractLane -from d123.conversion.maps.map_datatypes import MapLayer -from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, StateSE2Index -from d123.geometry.se import StateSE2 -from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array -from d123.geometry.utils.rotation_utils import normalize_angle - -MAX_LANE_CENTER_DISTANCE: Final[float] = 10.0 - - -def _get_offroad_feature( - agents_array: npt.NDArray[np.float64], agents_mask: npt.NDArray[np.bool], map_api: AbstractMap -) -> npt.NDArray[np.bool]: - - assert agents_array.shape[-1] == len(BoundingBoxSE2Index) - n_objects, n_iterations = agents_array.shape[:2] - - offroad_feature = np.zeros((n_objects, n_iterations), dtype=np.bool_) - - agent_shapely_corners = shapely.creation.points(bbse2_array_to_corners_array(agents_array)).flatten() - corner_indices = np.arange(n_iterations * n_objects * len(Corners2DIndex)).reshape( - n_objects, n_iterations, len(Corners2DIndex) - ) - - output = map_api.query_object_ids( - agent_shapely_corners, - layers=[ - MapLayer.INTERSECTION, - MapLayer.LANE_GROUP, - MapLayer.CARPARK, - MapLayer.GENERIC_DRIVABLE, - ], - predicate="within", - ) - list_all_corners = [] - for _, object_ids in output.items(): - list_all_corners.extend(list(object_ids)) - set_of_all_corners = set(list_all_corners) - - for object_idx in range(n_objects): - for iteration in range(n_iterations): - if agents_mask[object_idx, iteration]: - corner_indices_ = set(corner_indices[object_idx, iteration]) - offroad_feature[object_idx, iteration] = not corner_indices_.issubset(set_of_all_corners) - - return offroad_feature - - -def _get_road_center_distance_feature( - agents_array: npt.NDArray[np.float64], agents_mask: npt.NDArray[np.bool], map_api: AbstractMap -) -> npt.NDArray[np.float64]: - - lane_dict: Dict[str, AbstractLane] = {} - - def get_lane_by_id(lane_id: str, lane_dict: Dict[str, AbstractLane]) -> AbstractLane: - if lane_id not in lane_dict.keys(): - lane_dict[lane_id] = map_api.get_map_object(lane_id, MapLayer.LANE) - return lane_dict[lane_id] - - assert agents_array.shape[-1] == len(BoundingBoxSE2Index) - n_objects, n_iterations = agents_array.shape[:2] - - agent_shapely_centers = shapely.creation.points(agents_array[..., BoundingBoxSE2Index.XY]).flatten() - agent_indices = np.arange(n_iterations * n_objects).reshape(n_objects, n_iterations) - - distances = np.full((n_objects, n_iterations), MAX_LANE_CENTER_DISTANCE, dtype=np.float64) - - nearest_query_output = map_api.query_nearest( - agent_shapely_centers, - layers=[MapLayer.LANE], - max_distance=MAX_LANE_CENTER_DISTANCE, - return_all=True, - return_distance=False, - exclusive=False, - )[MapLayer.LANE] - - for object_idx in range(n_objects): - for iteration in range(n_iterations): - agent_idx = agent_indices[object_idx, iteration] - - if (not agents_mask[object_idx, iteration]) or (agent_idx not in nearest_query_output.keys()): - continue - - lane_ids: List[str] = nearest_query_output[agent_idx] - lanes: List[AbstractLane] = [get_lane_by_id(lane_id, lane_dict) for lane_id in lane_ids] - - if len(lanes) == 1: - select_lane = lanes[0] - centerline = select_lane.centerline.polyline_se2 - projected_se2_array = centerline.interpolate(centerline.project(agent_shapely_centers[agent_idx])).array - - elif len(lanes) > 1: - - projected_se2s_array = np.zeros((len(lanes), len(StateSE2Index)), dtype=np.float64) - for lane_idx, lane in enumerate(lanes): - lane: AbstractLane - centerline = lane.centerline.polyline_se2 - projected_se2s_array[lane_idx] = centerline.interpolate( - centerline.project(agent_shapely_centers[agent_idx]) - ).array - se2_distances = circumference_distance_se2_array( - agents_array[object_idx, iteration, BoundingBoxSE2Index.SE2], - projected_se2s_array, - radius=agents_array[object_idx, iteration, BoundingBoxSE2Index.LENGTH] / 2, - ) - projected_se2_array = projected_se2s_array[np.argmin(se2_distances)] - else: - raise ValueError - - distances[object_idx, iteration] = np.linalg.norm( - agents_array[object_idx, iteration, BoundingBoxSE2Index.XY] - - projected_se2_array[BoundingBoxSE2Index.XY] - ) - - del lane_dict - return distances - - -def circumference_distance_se2(state1: StateSE2, state2: StateSE2, radius: float) -> float: - # TODO: Move this to a more appropriate location for general usage. - # Heuristic for defining distance/similarity between two SE2 states. - # Combines the 2D Euclidean distance with the circumference of the yaw difference. - positional_distance = np.linalg.norm(state1.point_2d.array - state2.point_2d.array) - abs_yaw_difference = np.abs(normalize_angle(state1.yaw - state2.yaw)) - rotation_distance = abs_yaw_difference * radius - return positional_distance + rotation_distance - - -def circumference_distance_se2_array( - state1_se2: npt.NDArray[np.float64], - state2_se2: npt.NDArray[np.float64], - radius: npt.NDArray[np.float64], -) -> npt.NDArray[np.float64]: - # TODO: Move this to a more appropriate location for general usage. - # Heuristic for defining distance/similarity between two SE2 states. - # Combines the 2D Euclidean distance with the circumference of the yaw difference. - positional_distance = np.linalg.norm(state1_se2[..., StateSE2Index.XY] - state2_se2[..., StateSE2Index.XY], axis=-1) - abs_yaw_difference = np.abs( - normalize_angle( - state1_se2[..., StateSE2Index.YAW] - state2_se2[..., StateSE2Index.YAW], - ) - ) - rotation_distance = abs_yaw_difference * radius - return positional_distance + rotation_distance diff --git a/d123/simulation/metrics/sim_agents/sim_agents.py b/d123/simulation/metrics/sim_agents/sim_agents.py deleted file mode 100644 index a06b7435..00000000 --- a/d123/simulation/metrics/sim_agents/sim_agents.py +++ /dev/null @@ -1,225 +0,0 @@ -from dataclasses import dataclass -from typing import Dict, List - -import numpy as np -import numpy.typing as npt - -from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper, DetectionType -from d123.conversion.maps.abstract_map import AbstractMap -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.geometry.geometry_index import BoundingBoxSE2Index -from d123.simulation.metrics.sim_agents.histogram_metric import ( - BinaryHistogramIntersectionMetric, - HistogramIntersectionMetric, -) -from d123.simulation.metrics.sim_agents.interaction_based import _get_collision_feature, _get_object_distance_feature -from d123.simulation.metrics.sim_agents.kinematics import ( - _get_linear_acceleration_from_agents_array, - _get_linear_speed_from_agents_array, - _get_yaw_acceleration_from_agents_array, - _get_yaw_rate_from_agents_array, -) -from d123.simulation.metrics.sim_agents.map_based import _get_offroad_feature, _get_road_center_distance_feature -from d123.simulation.metrics.sim_agents.utils import _get_log_agents_array, _get_rollout_agents_array - - -@dataclass -class SimAgentsData: - - mask: npt.NDArray[np.bool] - - # 1. Kinematics - speed: npt.NDArray[np.float64] - acceleration: npt.NDArray[np.float64] - yaw_rate: npt.NDArray[np.float64] - yaw_acceleration: npt.NDArray[np.float64] - - # 2. Interaction based - collision: npt.NDArray[np.bool] - object_distance: npt.NDArray[np.float64] - - # 3. Map based - offroad: npt.NDArray[np.bool] - center_distance: npt.NDArray[np.float64] - - -def get_sim_agents_metrics(scene: AbstractScene, agent_rollouts: List[BoxDetectionWrapper]) -> Dict[str, float]: - def get_agent_tokens(agent_rollout: List[BoxDetection]) -> List[str]: - return [ - box_detection.metadata.track_token - for box_detection in agent_rollout - if box_detection.metadata.detection_type == DetectionType.VEHICLE - ] - - # TODO: Add ego vehicle state to the metrics - log_rollouts: List[BoxDetectionWrapper] = [] - - for iteration in range(scene.number_of_iterations): - background_detections = scene.get_box_detections_at_iteration(iteration).box_detections - ego_detection = scene.get_ego_state_at_iteration(iteration).box_detection - log_rollouts.append(BoxDetectionWrapper(background_detections + [ego_detection])) - - initial_agent_tokens = get_agent_tokens(agent_rollouts[0]) - log_agents_array, log_agents_mask = _get_log_agents_array(scene, initial_agent_tokens) - agents_array, agents_mask = _get_rollout_agents_array(agent_rollouts, initial_agent_tokens) - - log_agents_data = _extract_sim_agent_data(log_agents_array, log_agents_mask, log_rollouts, scene.get_map_api()) - agents_data = _extract_sim_agent_data(agents_array, agents_mask, agent_rollouts, scene.get_map_api()) - - results: Dict[str, float] = {} - - # 0. Other data - results.update(_collision_rate(log_agents_data, agents_data)) - results.update(_offroad_rate(log_agents_data, agents_data)) - - # 1. Kinematics metrics - # 1.1 Speed - speed_metric = HistogramIntersectionMetric(min_val=0.0, max_val=25.0, n_bins=10, name="speed", weight=0.05) - speed_result = speed_metric.compute(log_agents_data.speed, agents_data.speed, log_agents_data.mask) - results.update(speed_result) - - # 1.2 Acceleration - acceleration_metric = HistogramIntersectionMetric( - min_val=-12.0, max_val=12.0, n_bins=11, name="acceleration", weight=0.05 - ) - acceleration_result = acceleration_metric.compute( - log_agents_data.acceleration, - agents_data.acceleration, - log_agents_data.mask, - ) - results.update(acceleration_result) - - # 1.3 Yaw rate - yaw_rate_metric = HistogramIntersectionMetric( - min_val=-0.628, max_val=0.628, n_bins=11, name="yaw_rate", weight=0.05 - ) - yaw_rate_result = yaw_rate_metric.compute( - log_agents_data.yaw_rate, - agents_data.yaw_rate, - log_agents_data.mask, - ) - results.update(yaw_rate_result) - - # 1.4 Yaw acceleration - yaw_acceleration_metric = HistogramIntersectionMetric( - min_val=-3.14, max_val=3.14, n_bins=11, name="yaw_acceleration", weight=0.05 - ) - yaw_acceleration_result = yaw_acceleration_metric.compute( - log_agents_data.yaw_acceleration, - agents_data.yaw_acceleration, - log_agents_data.mask, - ) - results.update(yaw_acceleration_result) - - # 2. Interaction based - # 2.1 Collision - collision_metric = BinaryHistogramIntersectionMetric(name="collision", weight=0.25) - collision_results = collision_metric.compute( - log_agents_data.collision, - agents_data.collision, - log_agents_data.mask, - ) - results.update(collision_results) - # collision_metric.plot_histograms(logs_collision, agents_collision, log_agents_mask) - - # 2.2 TTC - # TODO: Implement TTC metric - - # 2.3 Object distance - object_distance_metric = HistogramIntersectionMetric( - min_val=0.0, max_val=40.0, n_bins=10, name="object_distance", weight=0.15 - ) - object_distance_results = object_distance_metric.compute( - log_agents_data.object_distance, - agents_data.object_distance, - log_agents_data.mask, - ) - results.update(object_distance_results) - - # 3. Map based - # 3.1 Offroad - offroad_metric = BinaryHistogramIntersectionMetric(name="offroad", weight=0.25) - offroad_results = offroad_metric.compute( - log_agents_data.offroad, - agents_data.offroad, - log_agents_data.mask, - ) - results.update(offroad_results) - - # 3.2 lane center distance - center_distance_metric = HistogramIntersectionMetric( - min_val=0.0, max_val=10.0, n_bins=10, name="center_distance", weight=0.15 - ) - center_distance_results = center_distance_metric.compute( - log_agents_data.center_distance, - agents_data.center_distance, - log_agents_data.mask, - ) - results.update(center_distance_results) - - # 3.3 Traffic light compliance - # TODO: Implement traffic light compliance metric - - results["meta_score"] = sum([score for name, score in results.items() if name.endswith("_score")]) - - return results - - -def _extract_sim_agent_data( - agents_array: npt.NDArray[np.float64], - agents_mask: npt.NDArray[np.bool], - rollout: List[BoxDetectionWrapper], - map_api: AbstractMap, -) -> SimAgentsData: - - assert agents_array.ndim == 3 - assert agents_array.shape[-1] == len(BoundingBoxSE2Index) - assert agents_array.shape[1] == len(rollout) - - # 1. Kinematics - speed = _get_linear_speed_from_agents_array(agents_array, agents_mask) - acceleration = _get_linear_acceleration_from_agents_array(agents_array, agents_mask) - yaw_rate = _get_yaw_rate_from_agents_array(agents_array, agents_mask) - yaw_acceleration = _get_yaw_acceleration_from_agents_array(agents_array, agents_mask) - - # 2. Interaction based - collision = _get_collision_feature(agents_array, rollout) - object_distance = _get_object_distance_feature(agents_array, agents_mask, rollout) - - # 3. Map based - offroad = _get_offroad_feature(agents_array, agents_mask, map_api) - center_distance = _get_road_center_distance_feature(agents_array, agents_mask, map_api) - - return SimAgentsData( - mask=agents_mask, - speed=speed, - acceleration=acceleration, - yaw_rate=yaw_rate, - yaw_acceleration=yaw_acceleration, - collision=collision, - object_distance=object_distance, - offroad=offroad, - center_distance=center_distance, - ) - - -def _collision_rate(log_agents_data: SimAgentsData, agents_data: SimAgentsData) -> Dict[str, float]: - - def _collision_rate(agents_data: SimAgentsData) -> npt.NDArray[np.bool_]: - return np.any(agents_data.collision, where=agents_data.mask, axis=1).mean() - - return { - "log_collision_rate": _collision_rate(log_agents_data), - "agents_collision_rate": _collision_rate(agents_data), - } - - -def _offroad_rate(log_agents_data: SimAgentsData, agents_data: SimAgentsData) -> Dict[str, float]: - - def _offroad_rate(agents_data_: SimAgentsData) -> npt.NDArray[np.bool_]: - return np.any(agents_data_.offroad, where=agents_data_.mask, axis=1).mean() - - return { - "log_offroad_rate": _offroad_rate(log_agents_data), - "agents_offroad_rate": _offroad_rate(agents_data), - } diff --git a/d123/simulation/metrics/sim_agents/utils.py b/d123/simulation/metrics/sim_agents/utils.py deleted file mode 100644 index 2d1c6ded..00000000 --- a/d123/simulation/metrics/sim_agents/utils.py +++ /dev/null @@ -1,54 +0,0 @@ -from typing import List, Tuple - -import numpy as np -import numpy.typing as npt - -from d123.common.datatypes.detection.detection import BoxDetectionWrapper -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.geometry.geometry_index import BoundingBoxSE2Index - - -def _get_log_agents_array( - scene: AbstractScene, agent_tokens: List[str] -) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]: - - log_agents_array = np.zeros( - (len(agent_tokens), scene.number_of_iterations, len(BoundingBoxSE2Index)), - dtype=np.float64, - ) - log_agents_mask = np.zeros( - (len(agent_tokens), scene.number_of_iterations), - dtype=bool, - ) - - for iteration in range(scene.number_of_iterations): - box_detections = scene.get_box_detections_at_iteration(iteration) - for agent_idx, agent_token in enumerate(agent_tokens): - box_detection = box_detections.get_detection_by_track_token(agent_token) - if box_detection is not None: - log_agents_mask[agent_idx, iteration] = True - log_agents_array[agent_idx, iteration] = box_detection.bounding_box_se2.array - - return log_agents_array, log_agents_mask - - -def _get_rollout_agents_array( - agent_rollouts: List[BoxDetectionWrapper], agent_tokens: List[str] -) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]: - rollout_agents_array = np.zeros( - (len(agent_tokens), len(agent_rollouts), len(BoundingBoxSE2Index)), - dtype=np.float64, - ) - rollout_agents_mask = np.zeros( - (len(agent_tokens), len(agent_rollouts)), - dtype=bool, - ) - - for iteration, agent_rollout in enumerate(agent_rollouts): - for agent_idx, agent_token in enumerate(agent_tokens): - box_detection = agent_rollout.get_detection_by_track_token(agent_token) - if box_detection is not None: - rollout_agents_mask[agent_idx, iteration] = True - rollout_agents_array[agent_idx, iteration] = box_detection.bounding_box_se2.array - - return rollout_agents_array, rollout_agents_mask diff --git a/d123/simulation/observation/__init__.py b/d123/simulation/observation/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/observation/abstract_observation.py b/d123/simulation/observation/abstract_observation.py deleted file mode 100644 index 3d2ad387..00000000 --- a/d123/simulation/observation/abstract_observation.py +++ /dev/null @@ -1,33 +0,0 @@ -import abc -from abc import abstractmethod -from typing import Optional, Type - -from d123.common.datatypes.recording.abstract_recording import Recording -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.simulation.time_controller.simulation_iteration import SimulationIteration - - -class AbstractObservation(abc.ABC): - - # Whether the agent class requires the scenario object to be passed at construction time. - # This can be set to true only for oracle planners and cannot be used for submissions. - requires_scene: bool = True - - @abstractmethod - def recording_type(self) -> Type[Recording]: - pass - - @abstractmethod - def reset(self, scene: Optional[AbstractScene]) -> DetectionRecording: - pass - - @abstractmethod - def step( - self, - current_iteration: SimulationIteration, - next_iteration: SimulationIteration, - current_ego_state: EgoStateSE2, - ) -> DetectionRecording: - pass diff --git a/d123/simulation/observation/agents_observation.py b/d123/simulation/observation/agents_observation.py deleted file mode 100644 index c63fe222..00000000 --- a/d123/simulation/observation/agents_observation.py +++ /dev/null @@ -1,87 +0,0 @@ -from typing import List, Optional, Tuple, Type - -from d123.common.datatypes.detection.detection import BoxDetection -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.datatypes.recording.abstract_recording import Recording -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper -from d123.simulation.agents.abstract_agents import AbstractAgents - -# from d123.simulation.agents.path_following import PathFollowingAgents -from d123.simulation.agents.idm_agents import IDMAgents - -# from d123.simulation.agents.smart_agents import SMARTAgents -from d123.simulation.observation.abstract_observation import AbstractObservation -from d123.simulation.time_controller.simulation_iteration import SimulationIteration - - -class AgentsObservation(AbstractObservation): - - # Whether the agent class requires the scenario object to be passed at construction time. - # This can be set to true only for oracle planners and cannot be used for submissions. - requires_scene: bool = True - - def __init__(self, agents: AbstractAgents) -> None: - super().__init__() - self._scene: Optional[AbstractScene] = None - # self._agents: AbstractAgents = ConstantVelocityAgents() - self._agents: AbstractAgents = IDMAgents() - # self._agents: AbstractAgents = SMARTAgents() - - def recording_type(self) -> Type[Recording]: - return DetectionRecording - - def reset(self, scene: Optional[AbstractScene]) -> DetectionRecording: - assert scene is not None, "Scene must be provided for log replay observation." - self._scene = scene - self._iteration = 0 - - cars, non_cars, _ = _filter_agents_by_type( - self._scene.get_box_detections_at_iteration(self._iteration), - detection_types=[DetectionType.VEHICLE], - ) - cars = self._agents.reset( - map_api=self._scene.get_map_api(), - target_agents=cars, - non_target_agents=non_cars, - scene=self._scene if self._agents.requires_scene else None, - ) - return DetectionRecording( - box_detections=BoxDetectionWrapper(cars + non_cars), - traffic_light_detections=self._scene.get_traffic_light_detections_at_iteration(self._iteration), - ) - - def step( - self, - current_iteration: SimulationIteration, - next_iteration: SimulationIteration, - current_ego_state: EgoStateSE2, - ) -> DetectionRecording: - assert self._scene is not None, "Scene must be provided for log replay observation." - self._iteration += 1 - _, non_cars, _ = _filter_agents_by_type( - self._scene.get_box_detections_at_iteration(self._iteration), - detection_types=[DetectionType.VEHICLE], - ) - cars = self._agents.step(non_target_agents=non_cars) - return DetectionRecording( - box_detections=BoxDetectionWrapper(cars + non_cars), - traffic_light_detections=self._scene.get_traffic_light_detections_at_iteration(self._iteration), - ) - - -def _filter_agents_by_type( - detections: BoxDetectionWrapper, detection_types: List[DetectionType] -) -> Tuple[List[BoxDetection], List[BoxDetection], List[int]]: - - in_types, not_in_types, in_indices = [], [], [] - for detection_idx, detection in enumerate(detections): - if detection.metadata.detection_type in detection_types: - in_types.append(detection) - in_indices.append(detection_idx) - else: - not_in_types.append(detection) - - return in_types, not_in_types, in_indices diff --git a/d123/simulation/observation/log_replay_observation.py b/d123/simulation/observation/log_replay_observation.py deleted file mode 100644 index b9f41fae..00000000 --- a/d123/simulation/observation/log_replay_observation.py +++ /dev/null @@ -1,47 +0,0 @@ -from typing import Optional, Type - -from d123.common.datatypes.recording.abstract_recording import Recording -from d123.common.datatypes.recording.detection_recording import DetectionRecording -from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2 -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.simulation.observation.abstract_observation import AbstractObservation -from d123.simulation.time_controller.simulation_iteration import SimulationIteration - - -class LogReplayObservation(AbstractObservation): - - # Whether the agent class requires the scenario object to be passed at construction time. - # This can be set to true only for oracle planners and cannot be used for submissions. - requires_scene: bool = True - - def __init__(self) -> None: - """ - Initialize the log replay observation. - """ - super().__init__() - self._scene: Optional[AbstractScene] = None - - def recording_type(self) -> Type[Recording]: - return DetectionRecording - - def reset(self, scene: Optional[AbstractScene]) -> DetectionRecording: - assert scene is not None, "Scene must be provided for log replay observation." - self._scene = scene - self._iteration = 0 - - return DetectionRecording( - box_detections=self._scene.get_box_detections_at_iteration(self._iteration), - traffic_light_detections=self._scene.get_traffic_light_detections_at_iteration(self._iteration), - ) - - def step( - self, - current_iteration: SimulationIteration, - next_iteration: SimulationIteration, - current_ego_state: EgoStateSE2, - ) -> DetectionRecording: - self._iteration += 1 - return DetectionRecording( - box_detections=self._scene.get_box_detections_at_iteration(self._iteration), - traffic_light_detections=self._scene.get_traffic_light_detections_at_iteration(self._iteration), - ) diff --git a/d123/simulation/planning/.gitkeep b/d123/simulation/planning/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/planning/__init__.py b/d123/simulation/planning/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/planning/abstract_planner.py b/d123/simulation/planning/abstract_planner.py deleted file mode 100644 index c7a56569..00000000 --- a/d123/simulation/planning/abstract_planner.py +++ /dev/null @@ -1,40 +0,0 @@ -# TODO: Remove or implement this placeholder - - -from dataclasses import dataclass -from typing import List - -from d123.conversion.maps.abstract_map import AbstractMap -from d123.simulation.history.simulation_history_buffer import Simulation2DHistoryBuffer -from d123.simulation.time_controller.simulation_iteration import SimulationIteration - - -@dataclass(frozen=True) -class PlannerInitialization: - """ - This class represents required data to initialize a planner. - """ - - route_lane_group_ids: List[str] - map_api: AbstractMap - - -@dataclass(frozen=True) -class PlannerInput: - """ - Input to a planner for which a trajectory should be computed. - """ - - iteration: SimulationIteration - history: Simulation2DHistoryBuffer - - -class AbstractPlanner: - def __init__(self): - self._arg = None - - def step(self): - raise NotImplementedError - - def reset(self): - raise NotImplementedError diff --git a/d123/simulation/planning/planner_output/__init__.py b/d123/simulation/planning/planner_output/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/planning/planner_output/abstract_planner_output.py b/d123/simulation/planning/planner_output/abstract_planner_output.py deleted file mode 100644 index e5d2cf7f..00000000 --- a/d123/simulation/planning/planner_output/abstract_planner_output.py +++ /dev/null @@ -1,8 +0,0 @@ -import abc - - -class AbstractPlannerOutput(abc.ABC): - """Abstract class for planner output.""" - - def __init__(self): - pass diff --git a/d123/simulation/planning/planner_output/action_planner_output.py b/d123/simulation/planning/planner_output/action_planner_output.py deleted file mode 100644 index 6d1ff54a..00000000 --- a/d123/simulation/planning/planner_output/action_planner_output.py +++ /dev/null @@ -1,29 +0,0 @@ -from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2 -from d123.geometry.vector import Vector2D -from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput - - -class ActionPlannerOutput(AbstractPlannerOutput): - - def __init__(self, acceleration: float, steering_rate: float, ego_state: EgoStateSE2): - """ - Initializes the ActionTrajectory. - :param acceleration: Longitudinal acceleration [m/s^2]. - :param steering_rate: Steering rate [rad/s]. - :param ego_state: Ego state at the start of the action. - """ - - self._acceleration = acceleration - self._steering_rate = steering_rate - self._ego_state = ego_state - - @property - def dynamic_state_se2(self) -> DynamicStateSE2: - - return DynamicStateSE2( - velocity=self._ego_state.dynamic_state_se2.velocity, - acceleration=Vector2D(self._acceleration, 0.0), - angular_velocity=self._ego_state.dynamic_state_se2.angular_velocity, - tire_steering_rate=self._steering_rate, - angular_acceleration=0.0, - ) diff --git a/d123/simulation/planning/planner_output/trajectory_planner_output.py b/d123/simulation/planning/planner_output/trajectory_planner_output.py deleted file mode 100644 index 2a394d68..00000000 --- a/d123/simulation/planning/planner_output/trajectory_planner_output.py +++ /dev/null @@ -1,7 +0,0 @@ -from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput - - -class TrajectoryPlannerOutput(AbstractPlannerOutput): - - def __init__(self): - pass diff --git a/d123/simulation/simulation_2d.py b/d123/simulation/simulation_2d.py deleted file mode 100644 index 2c15dcd0..00000000 --- a/d123/simulation/simulation_2d.py +++ /dev/null @@ -1,171 +0,0 @@ -from __future__ import annotations - -import logging -from typing import Any, Optional, Tuple, Type - -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.simulation.callback.abstract_callback import AbstractCallback -from d123.simulation.callback.multi_callback import MultiCallback -from d123.simulation.history.simulation_history import Simulation2DHistory, Simulation2DHistorySample -from d123.simulation.history.simulation_history_buffer import Simulation2DHistoryBuffer -from d123.simulation.planning.abstract_planner import PlannerInitialization, PlannerInput -from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput -from d123.simulation.simulation_2d_setup import Simulation2DSetup - -logger = logging.getLogger(__name__) - - -class Simulation2D: - - def __init__( - self, - setup: Simulation2DSetup, - callback: Optional[AbstractCallback] = None, - ): - - self._setup: Simulation2DSetup = setup - self._callback = MultiCallback([]) if callback is None else callback - - # History where the steps of a simulation are stored - self._history = Simulation2DHistory() - - # The + 1 here is to account for duration. For example, 20 steps at 0.1s starting at 0s will have a duration - # of 1.9s. At 21 steps the duration will achieve the target 2s duration. - self._history_buffer: Optional[Simulation2DHistoryBuffer] = None - - # Flag that keeps track whether simulation is still running - self._is_simulation_running = True - - # Lazy loaded in `.reset()` method - self._scene: Optional[AbstractScene] = None - - def __reduce__(self) -> Tuple[Type[Simulation2D], Tuple[Any, ...]]: - """ - Hints on how to reconstruct the object when pickling. - :return: Object type and constructor arguments to be used. - """ - return self.__class__, (self._setup, self._callback) - - def is_simulation_running(self) -> bool: - """ - Check whether a simulation reached the end - :return True if simulation hasn't reached the end, otherwise false. - """ - return not self._setup.time_controller.reached_end() and self._is_simulation_running - - def reset(self, scene: AbstractScene) -> Tuple[PlannerInitialization, PlannerInput]: - """ - Reset all internal states of simulation. - """ - - # 1. Reset the scene object - self._scene = scene - - # 2. Reset history and setup - self._history = Simulation2DHistory() # TODO: refactor - self._history.reset(scene) - simulation_iteration = self._setup.time_controller.reset(self._scene) - observation = self._setup.observations.reset(self._scene) - ego_state = self._setup.ego_controller.reset(self._scene) - - # 3. Reinitialize history buffer - self._history_buffer = Simulation2DHistoryBuffer.initialize_from_scene( - self._scene.get_number_of_history_iterations(), - self._scene, - self._setup.observations.recording_type(), - ) - self._history_buffer.append(ego_state, observation) - - # 4. Restart simulation - self._is_simulation_running = True - - # 5. Fill planner input and initialization - planner_initialization = PlannerInitialization( - route_lane_group_ids=self._scene.get_route_lane_group_ids(simulation_iteration.index), - map_api=self._scene.get_map_api(), - ) - planner_input = PlannerInput( - iteration=simulation_iteration, - history=self._history_buffer, - ) - - return planner_initialization, planner_input - - def step(self, planner_output: AbstractPlannerOutput) -> PlannerInput: - - if self._history_buffer is None: - raise RuntimeError("Simulation was not initialized!") - - if not self.is_simulation_running(): - raise RuntimeError("Simulation is not running, simulation can not be propagated!") - - # Measurements - current_iteration = self._setup.time_controller.get_iteration() - current_ego_state, current_observation = self._history_buffer.current_state - - # Add new sample to history - logger.debug(f"Adding to history: {current_iteration.index}") - self._history.add_sample( - Simulation2DHistorySample( - current_iteration, - current_ego_state, - planner_output, - current_observation, - ) - ) - - # Propagate state to next iteration - next_iteration, reached_end = self._setup.time_controller.step() - - # Propagate state - next_ego_state = self._setup.ego_controller.step( - current_iteration, - next_iteration, - current_ego_state, - planner_output, - ) - next_observation = self._setup.observations.step( - current_iteration, - next_iteration, - current_ego_state, - ) - - if reached_end: - self._is_simulation_running = False - - # Append new state into history buffer - self._history_buffer.append(next_ego_state, next_observation) - planner_input = PlannerInput(iteration=next_iteration, history=self._history_buffer) - return planner_input - - @property - def scene(self) -> Optional[AbstractScene]: - """ - :return: used scene in this simulation. - """ - return self._scene - - @property - def callback(self) -> AbstractCallback: - """ - :return: Callback for this simulation. - """ - return self._callback - - @property - def history(self) -> Simulation2DHistory: - """ - :return History from the simulation. - """ - return self._history - - @property - def history_buffer(self) -> Simulation2DHistoryBuffer: - """ - :return SimulationHistoryBuffer from the simulation. - """ - if self._history_buffer is None: - raise RuntimeError( - "_history_buffer is None. Please initialize the buffer by calling Simulation.initialize()" - ) - return self._history_buffer diff --git a/d123/simulation/simulation_2d_setup.py b/d123/simulation/simulation_2d_setup.py deleted file mode 100644 index 737d3e6c..00000000 --- a/d123/simulation/simulation_2d_setup.py +++ /dev/null @@ -1,26 +0,0 @@ -from dataclasses import dataclass - -from d123.simulation.controller.abstract_controller import AbstractEgoController -from d123.simulation.observation.abstract_observation import AbstractObservation -from d123.simulation.time_controller.abstract_time_controller import AbstractTimeController - - -@dataclass -class Simulation2DSetup: - """Setup class for constructing a 2D Simulation.""" - - time_controller: AbstractTimeController - observations: AbstractObservation - ego_controller: AbstractEgoController - - def __post_init__(self) -> None: - """Post-initialization sanity checks.""" - assert isinstance( - self.time_controller, AbstractTimeController - ), "Error: time_controller must inherit from AbstractTimeController!" - assert isinstance( - self.observations, AbstractObservation - ), "Error: observations must inherit from AbstractObservation!" - assert isinstance( - self.ego_controller, AbstractEgoController - ), "Error: ego_controller must inherit from AbstractEgoController!" diff --git a/d123/simulation/time_controller/__init__.py b/d123/simulation/time_controller/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/simulation/time_controller/abstract_time_controller.py b/d123/simulation/time_controller/abstract_time_controller.py deleted file mode 100644 index 135da686..00000000 --- a/d123/simulation/time_controller/abstract_time_controller.py +++ /dev/null @@ -1,47 +0,0 @@ -from __future__ import annotations - -import abc -from typing import Tuple - -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.simulation.time_controller.simulation_iteration import SimulationIteration - - -class AbstractTimeController(abc.ABC): - """ - Generic simulation time manager. - """ - - @abc.abstractmethod - def get_iteration(self) -> SimulationIteration: - """ - Get the current simulation iteration. - :return: Get the current simulation current_simulation_state and time point - """ - - @abc.abstractmethod - def reset(self, scene: AbstractScene) -> SimulationIteration: - """ - Reset the observation (all internal states should be reseted, if any). - """ - - @abc.abstractmethod - def step(self) -> Tuple[SimulationIteration, bool]: - """ - Advance to the next simulation iteration. - :return: A tuple containing the next SimulationIteration and a boolean indicating if the simulation has reached its end. - """ - - @abc.abstractmethod - def reached_end(self) -> bool: - """ - Check if we have reached the end of the simulation. - :return: Check whether simulation reached the end state. - """ - - @abc.abstractmethod - def number_of_iterations(self) -> int: - """ - The number of iterations the simulation should be running for - :return: Number of iterations of simulation. - """ diff --git a/d123/simulation/time_controller/log_time_controller.py b/d123/simulation/time_controller/log_time_controller.py deleted file mode 100644 index 3be18673..00000000 --- a/d123/simulation/time_controller/log_time_controller.py +++ /dev/null @@ -1,44 +0,0 @@ -from typing import Optional, Tuple - -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.simulation.time_controller.abstract_time_controller import ( - AbstractTimeController, -) -from d123.simulation.time_controller.simulation_iteration import SimulationIteration - - -class LogTimeController(AbstractTimeController): - """ - Class handling simulation time and completion. - """ - - def __init__(self): - """ - Initialize simulation control. - """ - self._current_iteration_index: int = 0 - self._scene: Optional[AbstractScene] = None - - def reset(self, scene: AbstractScene) -> SimulationIteration: - """Inherited, see superclass.""" - self._scene = scene - self._current_iteration_index = 0 - return self.get_iteration() - - def get_iteration(self) -> SimulationIteration: - """Inherited, see superclass.""" - scene_time = self._scene.get_timepoint_at_iteration(self._current_iteration_index) - return SimulationIteration(time_point=scene_time, index=self._current_iteration_index) - - def step(self) -> Tuple[SimulationIteration, bool]: - """Inherited, see superclass.""" - self._current_iteration_index += 1 - return self.get_iteration(), self.reached_end() - - def reached_end(self) -> bool: - """Inherited, see superclass.""" - return self._current_iteration_index >= self.number_of_iterations() - 1 - - def number_of_iterations(self) -> int: - """Inherited, see superclass.""" - return self._scene.number_of_iterations diff --git a/d123/simulation/time_controller/simulation_iteration.py b/d123/simulation/time_controller/simulation_iteration.py deleted file mode 100644 index 7304bf78..00000000 --- a/d123/simulation/time_controller/simulation_iteration.py +++ /dev/null @@ -1,34 +0,0 @@ -from dataclasses import dataclass - -from d123.common.datatypes.time.time_point import TimePoint - - -@dataclass -class SimulationIteration: - """ - Simulation step time and index. - """ - - time_point: TimePoint # A time point along simulation - - # Iteration in the simulation, starting from 0. - # In closed loop this represents the n-th sample of the simulation. - index: int - - def __post_init__(self) -> None: - """Post-init index sanity check.""" - assert self.index >= 0, f"Iteration must be >= 0, but it is {self.index}!" - - @property - def time_us(self) -> int: - """ - :return: time in micro seconds. - """ - return int(self.time_point.time_us) - - @property - def time_s(self) -> float: - """ - :return: Time in seconds. - """ - return float(self.time_point.time_s) diff --git a/d123/tests/.gitkeep b/d123/tests/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/__init__.py b/d123/training/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py deleted file mode 100644 index 0dc94d03..00000000 --- a/d123/training/feature_builder/smart_feature_builder.py +++ /dev/null @@ -1,344 +0,0 @@ -from enum import IntEnum -from typing import Dict, Final, List, Optional, Tuple - -import numpy as np -import numpy.typing as npt -import shapely - -from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper -from d123.common.datatypes.detection.detection_types import DetectionType -from d123.common.visualization.color.default import TrafficLightStatus -from d123.conversion.maps.abstract_map import MapLayer -from d123.conversion.maps.abstract_map_objects import ( - AbstractCarpark, - AbstractCrosswalk, - AbstractGenericDrivable, - AbstractLaneGroup, -) -from d123.conversion.scene.abstract_scene import AbstractScene -from d123.geometry import BoundingBoxSE2, PolylineSE2, StateSE2 -from d123.geometry.geometry_index import StateSE2Index -from d123.geometry.transform.transform_se2 import convert_absolute_to_relative_se2_array - -# TODO: Hind feature builder behind abstraction. - - -class SMARTMapTokenType(IntEnum): - LANE = 0 - LANE_GROUP_BOUNDARY = 1 - CROSSWALK = 2 - CARPARK = 3 - GENERIC_DRIVABLE = 4 - - -class SMARTMapTokenPlType(IntEnum): - BOUNDARY = 0 - CENTERLINE = 1 - POLYGON = 2 - - -START_ITERATION: Final[int] = 0 - - -class SMARTFeatureBuilder: - def __init__(self): - pass - - def build_features(self, scene: AbstractScene): - - feature_dict = {"scenario_id": scene.token} - - # Optionally, you use a different origin instead - origin: StateSE2 = scene.get_ego_state_at_iteration(START_ITERATION).bounding_box.center.state_se2 - - map_features = _build_map_features(scene, origin) - feature_dict.update(map_features) - agent_features = _build_agent_features(scene, origin) - feature_dict.update(agent_features) - - return feature_dict - - -def _build_map_features(scene: AbstractScene, origin: StateSE2) -> Dict[str, np.ndarray]: - - # TODO: Add to config - width, height = 200, 200 - num_points = 3 - segment_length = 5.0 - - # create map extent polygon - map_bounding_box = BoundingBoxSE2(origin, height, width) - - map_api = scene.get_map_api() - map_objects = map_api.query( - map_bounding_box.shapely_polygon, - layers=[ - MapLayer.LANE_GROUP, - MapLayer.CROSSWALK, - MapLayer.CARPARK, - MapLayer.GENERIC_DRIVABLE, - ], - predicate="intersects", - ) - - # Traffic light data - traffic_lights = scene.get_traffic_light_detections_at_iteration(START_ITERATION) - - traj_se2: List[npt.NDArray[np.float64]] = [] - types: List[int] = [] - pl_types: List[int] = [] - pl_light_types: List[int] = [] - - # 1. Add lane - for lane_group in map_objects[MapLayer.LANE_GROUP]: - lane_group: AbstractLaneGroup - is_intersection = lane_group.intersection is not None - - for boundary in [lane_group.right_boundary.polyline_se2, lane_group.left_boundary.polyline_se2]: - boundary_traj_se2 = _split_segments( - boundary, - num_points=num_points, - segment_length=segment_length, - map_bounding_box=map_bounding_box, - ) - traj_se2.extend(boundary_traj_se2) - types.extend([int(SMARTMapTokenType.LANE_GROUP_BOUNDARY)] * len(boundary_traj_se2)) - pl_types.extend([int(SMARTMapTokenPlType.BOUNDARY)] * len(boundary_traj_se2)) - pl_light_types.extend([int(TrafficLightStatus.OFF)] * len(boundary_traj_se2)) - - for lane in lane_group.lanes: - lane_traffic_light = traffic_lights.get_detection_by_lane_id(lane.id) - centerline = lane.centerline.polyline_se2 - lane_traj_se2 = _split_segments( - centerline, - num_points=num_points, - segment_length=segment_length, - map_bounding_box=map_bounding_box, - ) - - traj_se2.extend(lane_traj_se2) - types.extend([int(SMARTMapTokenType.LANE)] * len(lane_traj_se2)) - pl_types.extend([int(SMARTMapTokenPlType.CENTERLINE)] * len(lane_traj_se2)) - if lane_traffic_light is None: - if is_intersection: - pl_light_types.extend([int(TrafficLightStatus.UNKNOWN)] * len(lane_traj_se2)) - else: - pl_light_types.extend([int(TrafficLightStatus.OFF)] * len(lane_traj_se2)) - else: - pl_light_types.extend([int(lane_traffic_light.status)] * len(lane_traj_se2)) - - # 2. Crosswalks - for crosswalk in map_objects[MapLayer.CROSSWALK]: - crosswalk: AbstractCrosswalk - crosswalk_traj_se2 = _split_segments( - crosswalk.outline_3d.polyline_se2, - num_points=num_points, - segment_length=segment_length, - map_bounding_box=map_bounding_box, - ) - traj_se2.extend(crosswalk_traj_se2) - types.extend([int(SMARTMapTokenType.CROSSWALK)] * len(crosswalk_traj_se2)) - pl_types.extend([int(SMARTMapTokenPlType.POLYGON)] * len(crosswalk_traj_se2)) - pl_light_types.extend([int(TrafficLightStatus.OFF)] * len(crosswalk_traj_se2)) - - # 3. Parking - for carpark in map_objects[MapLayer.CARPARK]: - carpark: AbstractCarpark - carpark_traj_se2 = _split_segments( - carpark.outline_3d.polyline_se2, - num_points=num_points, - segment_length=segment_length, - map_bounding_box=map_bounding_box, - ) - traj_se2.extend(carpark_traj_se2) - types.extend([int(SMARTMapTokenType.CARPARK)] * len(carpark_traj_se2)) - pl_types.extend([int(SMARTMapTokenPlType.POLYGON)] * len(carpark_traj_se2)) - pl_light_types.extend([int(TrafficLightStatus.OFF)] * len(carpark_traj_se2)) - - # 4. Generic drivable - for generic_drivable in map_objects[MapLayer.GENERIC_DRIVABLE]: - generic_drivable: AbstractGenericDrivable - drivable_traj_se2 = _split_segments( - generic_drivable.outline_3d.polyline_se2, - num_points=num_points, - segment_length=segment_length, - map_bounding_box=map_bounding_box, - ) - traj_se2.extend(drivable_traj_se2) - types.extend([int(SMARTMapTokenType.GENERIC_DRIVABLE)] * len(drivable_traj_se2)) - pl_types.extend([int(SMARTMapTokenPlType.POLYGON)] * len(drivable_traj_se2)) - pl_light_types.extend([int(TrafficLightStatus.OFF)] * len(drivable_traj_se2)) - - assert len(traj_se2) == len(types) == len(pl_types) == len(pl_light_types) - - traj_se2 = np.array(traj_se2, dtype=np.float64) - types = np.array(types, dtype=np.uint8) - pl_types = np.array(pl_types, dtype=np.uint8) - pl_light_types = np.array(pl_light_types, dtype=np.uint8) - traj_se2 = convert_absolute_to_relative_se2_array(origin, traj_se2) - - return { - "map_save": { - "traj_pos": traj_se2[..., StateSE2Index.XY], - "traj_theta": traj_se2[..., 0, StateSE2Index.YAW], - }, - "pt_token": { - "type": types, - "pl_type": pl_types, - "light_type": pl_light_types, - "num_nodes": len(traj_se2), - }, - } - - -def _build_agent_features(scene: AbstractScene, origin: StateSE2) -> None: - iteration_indices = np.arange( - -scene.get_number_of_history_iterations(), - scene.number_of_iterations, - ) - # print(iteration_indices[scene.get_number_of_history_iterations()]) - num_steps = len(iteration_indices) - - target_types = [DetectionType.VEHICLE, DetectionType.PEDESTRIAN, DetectionType.BICYCLE] - box_detections_list = [scene.get_box_detections_at_iteration(iteration) for iteration in iteration_indices] - target_detections: List[List[BoxDetection]] = [] - target_indices: List[List[int]] = [] - for box_detections in box_detections_list: - in_types, _, in_indices = _filter_agents_by_type(box_detections, target_types) - target_detections.append(in_types) - target_indices.append(in_indices) - - # initial_agents = [ - # detection.metadata.track_token for detection in target_detections[scene.get_number_of_history_iterations()] - # ] - other_start_iteration = scene.get_number_of_history_iterations() - initial_agents = [detection.metadata.track_token for detection in target_detections[other_start_iteration]] - initial_indices = target_indices[other_start_iteration] - num_agents = len(initial_agents) + 1 # + 1 for ego vehicle - - def detection_type_to_index(detection_type: DetectionType) -> int: - if detection_type == DetectionType.VEHICLE: - return 0 - elif detection_type == DetectionType.PEDESTRIAN: - return 1 - elif detection_type == DetectionType.BICYCLE: - return 2 - else: - raise ValueError(f"Unsupported detection type: {detection_type}") - - # Fill role, id, type arrays - role = np.zeros((num_agents, 3), dtype=bool) - id = np.zeros((num_agents), dtype=np.int64) - type = np.zeros((num_agents), dtype=np.uint8) - extent = np.zeros((num_agents, 3), dtype=np.float32) - - for agent_idx, agent_token in enumerate(initial_agents): - detection = box_detections_list[other_start_iteration].get_detection_by_track_token(agent_token) - assert detection is not None, f"Agent {agent_token} not found in initial detections." - - role_idx = 2 if detection.metadata.detection_type == DetectionType.VEHICLE else 1 - role[agent_idx, role_idx] = True - id[agent_idx] = initial_indices[agent_idx] - type[agent_idx] = detection_type_to_index(detection.metadata.detection_type) - extent[agent_idx] = [ - detection.bounding_box.length, - detection.bounding_box.width, - 1.0, - ] # NOTE: fill height with 1.0 as placeholder (not always available - - # Fill ego vehicle data - role[-1, 0] = True - id[-1] = -1 # Use -1 for ego vehicle - type[-1] = detection_type_to_index(DetectionType.VEHICLE) - - # Fill role, id, type arrays - valid_mask = np.zeros((num_agents, num_steps), dtype=bool) - position = np.zeros((num_agents, num_steps, 3), dtype=np.float64) - heading = np.zeros((num_agents, num_steps), dtype=np.float64) - velocity = np.zeros((num_agents, num_steps, 2), dtype=np.float64) - - for time_idx, iteration in enumerate(iteration_indices): - for agent_idx, agent_token in enumerate(initial_agents): - detection = box_detections_list[time_idx].get_detection_by_track_token(agent_token) - if detection is None: - continue - - valid_mask[agent_idx, time_idx] = True - - state_se2 = detection.bounding_box.center.state_se2 - local_se2_array = convert_absolute_to_relative_se2_array(origin, state_se2.array) - position[agent_idx, time_idx, :2] = local_se2_array[..., StateSE2Index.XY] - # position[agent_idx, time_idx, 2] = ... # Is this the z dimension? - heading[agent_idx, time_idx] = local_se2_array[..., StateSE2Index.YAW] - velocity[agent_idx, time_idx, :] = detection.velocity.array[:2] # already in local of agent - - # Fill ego vehicle data - ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) - valid_mask[-1, time_idx] = True - local_se2_array = convert_absolute_to_relative_se2_array( - origin, ego_vehicle_state.bounding_box.center.state_se2.array - ) - position[-1, time_idx, :2] = local_se2_array[..., StateSE2Index.XY] - # position[-1, time_idx, 2] = ... # Is this the z dimension? - heading[-1, time_idx] = local_se2_array[..., StateSE2Index.YAW] - velocity[-1, time_idx, :] = ego_vehicle_state.dynamic_state_se3.velocity.array[:2] # already in local of agent - - return { - "agent": { - "num_nodes": num_agents, - "valid_mask": valid_mask, - "role": role, - "id": id, - "type": type, - "position": position, - "heading": heading, - "velocity": velocity, - "shape": extent, # Placeholder for shape, if needed - } - } - - -def _split_segments( - polyline: PolylineSE2, - num_points: int, - segment_length: float, - map_bounding_box: Optional[BoundingBoxSE2] = None, -) -> List[npt.NDArray[np.float64]]: - - segments_distances = np.concatenate([np.arange(0.0, polyline.length, step=segment_length), [polyline.length]]) - polygon = map_bounding_box.shapely_polygon if map_bounding_box is not None else None - - segments = [] - for segment_start, segment_end in zip(segments_distances[:-1], segments_distances[1:]): - include_endpoint = True - poses = polyline.interpolate( - np.linspace( - segment_start, - segment_end, - num=num_points, - endpoint=include_endpoint, - ) - ) - if polygon is not None: - points_shapely = shapely.creation.points(poses[(0, -1), :2]) - in_map = any(polygon.contains(points_shapely)) - if not in_map: - continue - segments.append(poses) - - return segments - - -def _filter_agents_by_type( - detections: BoxDetectionWrapper, detection_types: List[DetectionType] -) -> Tuple[List[BoxDetection], List[BoxDetection], List[int]]: - - in_types, not_in_types, in_indices = [], [], [] - for detection_idx, detection in enumerate(detections): - if detection.metadata.detection_type in detection_types: - in_types.append(detection) - in_indices.append(detection_idx) - else: - not_in_types.append(detection) - - return in_types, not_in_types, in_indices diff --git a/d123/training/models/__init__.py b/d123/training/models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/models/motion_forecasting/__init__.py b/d123/training/models/motion_forecasting/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/models/sim_agent/__init__.py b/d123/training/models/sim_agent/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/models/sim_agent/smart/__init__.py b/d123/training/models/sim_agent/smart/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/models/sim_agent/smart/datamodules/__init__.py b/d123/training/models/sim_agent/smart/datamodules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/models/sim_agent/smart/datamodules/scalable_datamodule.py b/d123/training/models/sim_agent/smart/datamodules/scalable_datamodule.py deleted file mode 100644 index 41c318eb..00000000 --- a/d123/training/models/sim_agent/smart/datamodules/scalable_datamodule.py +++ /dev/null @@ -1,95 +0,0 @@ -from typing import Optional - -from lightning import LightningDataModule -from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS -from torch_geometric.loader import DataLoader - -from d123.training.models.sim_agent.smart.datasets.scalable_dataset import MultiDataset - -from .target_builder import WaymoTargetBuilderTrain, WaymoTargetBuilderVal - - -class MultiDataModule(LightningDataModule): - def __init__( - self, - train_batch_size: int, - val_batch_size: int, - test_batch_size: int, - train_raw_dir: str, - val_raw_dir: str, - test_raw_dir: str, - val_tfrecords_splitted: str, - shuffle: bool, - num_workers: int, - pin_memory: bool, - persistent_workers: bool, - train_max_num: int, - ) -> None: - super(MultiDataModule, self).__init__() - self.train_batch_size = train_batch_size - self.val_batch_size = val_batch_size - self.test_batch_size = test_batch_size - self.shuffle = shuffle - self.num_workers = num_workers - self.pin_memory = pin_memory - self.persistent_workers = persistent_workers and num_workers > 0 - self.train_raw_dir = train_raw_dir - self.val_raw_dir = val_raw_dir - self.test_raw_dir = test_raw_dir - self.val_tfrecords_splitted = val_tfrecords_splitted - - self.train_transform = WaymoTargetBuilderTrain(train_max_num) - self.val_transform = WaymoTargetBuilderVal() - self.test_transform = WaymoTargetBuilderVal() - - def setup(self, stage: Optional[str] = None) -> None: - if stage == "fit" or stage is None: - self.train_dataset = MultiDataset(self.train_raw_dir, self.train_transform) - self.val_dataset = MultiDataset( - self.val_raw_dir, - self.val_transform, - tfrecord_dir=self.val_tfrecords_splitted, - ) - elif stage == "validate": - self.val_dataset = MultiDataset( - self.val_raw_dir, - self.val_transform, - tfrecord_dir=self.val_tfrecords_splitted, - ) - elif stage == "test": - self.test_dataset = MultiDataset(self.test_raw_dir, self.test_transform) - else: - raise ValueError(f"{stage} should be one of [fit, validate, test]") - - def train_dataloader(self) -> TRAIN_DATALOADERS: - return DataLoader( - self.train_dataset, - batch_size=self.train_batch_size, - shuffle=self.shuffle, - num_workers=self.num_workers, - pin_memory=self.pin_memory, - persistent_workers=self.persistent_workers, - drop_last=False, - ) - - def val_dataloader(self) -> EVAL_DATALOADERS: - return DataLoader( - self.val_dataset, - batch_size=self.val_batch_size, - shuffle=False, - num_workers=self.num_workers, - pin_memory=self.pin_memory, # False - persistent_workers=self.persistent_workers, - drop_last=False, - ) - - def test_dataloader(self) -> EVAL_DATALOADERS: - return DataLoader( - self.test_dataset, - batch_size=self.test_batch_size, - shuffle=False, - num_workers=self.num_workers, # 0 - pin_memory=self.pin_memory, # False - persistent_workers=self.persistent_workers, - drop_last=False, - ) diff --git a/d123/training/models/sim_agent/smart/datamodules/target_builder.py b/d123/training/models/sim_agent/smart/datamodules/target_builder.py deleted file mode 100644 index a62b6786..00000000 --- a/d123/training/models/sim_agent/smart/datamodules/target_builder.py +++ /dev/null @@ -1,62 +0,0 @@ -import numpy as np -import torch -from torch_geometric.data import HeteroData -from torch_geometric.transforms import BaseTransform - - -def _numpy_dict_to_torch(data: dict, device: torch.device = torch.device("cpu")) -> dict: - """ - Convert numpy arrays in a dictionary to torch tensors. - :param data: Dictionary with numpy arrays. - :return: Dictionary with torch tensors. - """ - for key, value in data.items(): - if isinstance(value, np.ndarray): - data[key] = torch.tensor(value, device=device) - if data[key].dtype == torch.float64: - data[key] = data[key].to(torch.float32) - elif isinstance(value, dict): - _numpy_dict_to_torch(value) - - -class WaymoTargetBuilderTrain(BaseTransform): - def __init__(self, max_num: int) -> None: - super(WaymoTargetBuilderTrain, self).__init__() - self.step_current = 10 - self.max_num = max_num - - def __call__(self, data) -> HeteroData: - _numpy_dict_to_torch(data) - - pos = data["agent"]["position"] - av_index = torch.where(data["agent"]["role"][:, 0])[0].item() - distance = torch.norm(pos - pos[av_index], dim=-1) - - # we do not believe the perception out of range of 150 meters - data["agent"]["valid_mask"] = data["agent"]["valid_mask"] & (distance < 150) - - # we do not predict vehicle too far away from ego car - role_train_mask = data["agent"]["role"].any(-1) - extra_train_mask = (distance[:, self.step_current] < 100) & ( - data["agent"]["valid_mask"][:, self.step_current + 1 :].sum(-1) >= 5 - ) - - train_mask = extra_train_mask | role_train_mask - if train_mask.sum() > self.max_num: # too many vehicle - _indices = torch.where(extra_train_mask & ~role_train_mask)[0] - selected_indices = _indices[torch.randperm(_indices.size(0))[: self.max_num - role_train_mask.sum()]] - data["agent"]["train_mask"] = role_train_mask - data["agent"]["train_mask"][selected_indices] = True - else: - data["agent"]["train_mask"] = train_mask # [n_agent] - - return HeteroData(data) - - -class WaymoTargetBuilderVal(BaseTransform): - def __init__(self) -> None: - super(WaymoTargetBuilderVal, self).__init__() - - def __call__(self, data) -> HeteroData: - _numpy_dict_to_torch(data) - return HeteroData(data) diff --git a/d123/training/models/sim_agent/smart/datasets/__init__.py b/d123/training/models/sim_agent/smart/datasets/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/models/sim_agent/smart/datasets/scalable_dataset.py b/d123/training/models/sim_agent/smart/datasets/scalable_dataset.py deleted file mode 100644 index 0e4b607d..00000000 --- a/d123/training/models/sim_agent/smart/datasets/scalable_dataset.py +++ /dev/null @@ -1,40 +0,0 @@ -import logging -import pickle -from pathlib import Path -from typing import Callable, List, Optional - -from torch_geometric.data import Dataset - -logger = logging.getLogger(__name__) - - -class MultiDataset(Dataset): - def __init__( - self, - raw_dir: str, - transform: Callable, - tfrecord_dir: Optional[str] = None, - ) -> None: - raw_dir = Path(raw_dir) - self._raw_paths = [p.as_posix() for p in sorted(raw_dir.glob("*"))] - self._num_samples = len(self._raw_paths) - - self._tfrecord_dir = Path(tfrecord_dir) if tfrecord_dir is not None else None - - logger.info("Length of {} dataset is ".format(raw_dir) + str(self._num_samples)) - super(MultiDataset, self).__init__(transform=transform, pre_transform=None, pre_filter=None) - - @property - def raw_paths(self) -> List[str]: - return self._raw_paths - - def len(self) -> int: - return self._num_samples - - def get(self, idx: int): - with open(self.raw_paths[idx], "rb") as handle: - data = pickle.load(handle) - - if self._tfrecord_dir is not None: - data["tfrecord_path"] = (self._tfrecord_dir / (data["scenario_id"] + ".tfrecords")).as_posix() - return data diff --git a/d123/training/models/sim_agent/smart/layers/__init__.py b/d123/training/models/sim_agent/smart/layers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/models/sim_agent/smart/layers/attention_layer.py b/d123/training/models/sim_agent/smart/layers/attention_layer.py deleted file mode 100644 index 9452662e..00000000 --- a/d123/training/models/sim_agent/smart/layers/attention_layer.py +++ /dev/null @@ -1,113 +0,0 @@ -from typing import Optional, Tuple, Union - -import torch -import torch.nn as nn -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.utils import softmax - -from d123.training.models.sim_agent.smart.utils.weight_init import weight_init - - -class AttentionLayer(MessagePassing): - def __init__( - self, - hidden_dim: int, - num_heads: int, - head_dim: int, - dropout: float, - bipartite: bool, - has_pos_emb: bool, - **kwargs, - ) -> None: - super(AttentionLayer, self).__init__(aggr="add", node_dim=0, **kwargs) - self.num_heads = num_heads - self.head_dim = head_dim - self.has_pos_emb = has_pos_emb - self.scale = head_dim**-0.5 - - self.to_q = nn.Linear(hidden_dim, head_dim * num_heads) - self.to_k = nn.Linear(hidden_dim, head_dim * num_heads, bias=False) - self.to_v = nn.Linear(hidden_dim, head_dim * num_heads) - if has_pos_emb: - self.to_k_r = nn.Linear(hidden_dim, head_dim * num_heads, bias=False) - self.to_v_r = nn.Linear(hidden_dim, head_dim * num_heads) - self.to_s = nn.Linear(hidden_dim, head_dim * num_heads) - self.to_g = nn.Linear(head_dim * num_heads + hidden_dim, head_dim * num_heads) - self.to_out = nn.Linear(head_dim * num_heads, hidden_dim) - self.attn_drop = nn.Dropout(dropout) - self.ff_mlp = nn.Sequential( - nn.Linear(hidden_dim, hidden_dim * 4), - nn.ReLU(inplace=True), - nn.Dropout(dropout), - nn.Linear(hidden_dim * 4, hidden_dim), - ) - if bipartite: - self.attn_prenorm_x_src = nn.LayerNorm(hidden_dim) - self.attn_prenorm_x_dst = nn.LayerNorm(hidden_dim) - else: - self.attn_prenorm_x_src = nn.LayerNorm(hidden_dim) - self.attn_prenorm_x_dst = self.attn_prenorm_x_src - if has_pos_emb: - self.attn_prenorm_r = nn.LayerNorm(hidden_dim) - self.attn_postnorm = nn.LayerNorm(hidden_dim) - self.ff_prenorm = nn.LayerNorm(hidden_dim) - self.ff_postnorm = nn.LayerNorm(hidden_dim) - self.apply(weight_init) - - def forward( - self, - x: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]], - r: Optional[torch.Tensor], - edge_index: torch.Tensor, - ) -> torch.Tensor: - if isinstance(x, torch.Tensor): - x_src = x_dst = self.attn_prenorm_x_src(x) - else: - x_src, x_dst = x - x_src = self.attn_prenorm_x_src(x_src) - x_dst = self.attn_prenorm_x_dst(x_dst) - x = x[1] - if self.has_pos_emb and r is not None: - r = self.attn_prenorm_r(r) - x = x + self.attn_postnorm(self._attn_block(x_src, x_dst, r, edge_index)) - x = x + self.ff_postnorm(self._ff_block(self.ff_prenorm(x))) - return x - - def message( - self, - q_i: torch.Tensor, - k_j: torch.Tensor, - v_j: torch.Tensor, - r: Optional[torch.Tensor], - index: torch.Tensor, - ptr: Optional[torch.Tensor], - ) -> torch.Tensor: - if self.has_pos_emb and r is not None: - k_j = k_j + self.to_k_r(r).view(-1, self.num_heads, self.head_dim) - v_j = v_j + self.to_v_r(r).view(-1, self.num_heads, self.head_dim) - sim = (q_i * k_j).sum(dim=-1) * self.scale - attn = softmax(sim, index, ptr) - self.attention_weight = attn.sum(-1).detach() - attn = self.attn_drop(attn) - return v_j * attn.unsqueeze(-1) - - def update(self, inputs: torch.Tensor, x_dst: torch.Tensor) -> torch.Tensor: - inputs = inputs.view(-1, self.num_heads * self.head_dim) - g = torch.sigmoid(self.to_g(torch.cat([inputs, x_dst], dim=-1))) - return inputs + g * (self.to_s(x_dst) - inputs) - - def _attn_block( - self, - x_src: torch.Tensor, - x_dst: torch.Tensor, - r: Optional[torch.Tensor], - edge_index: torch.Tensor, - ) -> torch.Tensor: - q = self.to_q(x_dst).view(-1, self.num_heads, self.head_dim) - k = self.to_k(x_src).view(-1, self.num_heads, self.head_dim) - v = self.to_v(x_src).view(-1, self.num_heads, self.head_dim) - agg = self.propagate(edge_index=edge_index, x_dst=x_dst, q=q, k=k, v=v, r=r) - return self.to_out(agg) - - def _ff_block(self, x: torch.Tensor) -> torch.Tensor: - return self.ff_mlp(x) diff --git a/d123/training/models/sim_agent/smart/layers/fourier_embedding.py b/d123/training/models/sim_agent/smart/layers/fourier_embedding.py deleted file mode 100644 index 7052c42e..00000000 --- a/d123/training/models/sim_agent/smart/layers/fourier_embedding.py +++ /dev/null @@ -1,88 +0,0 @@ -import math -from typing import List, Optional - -import torch -import torch.nn as nn - -from d123.training.models.sim_agent.smart.utils.weight_init import weight_init - - -class FourierEmbedding(nn.Module): - def __init__(self, input_dim: int, hidden_dim: int, num_freq_bands: int) -> None: - super(FourierEmbedding, self).__init__() - self.input_dim = input_dim - self.hidden_dim = hidden_dim - - self.freqs = nn.Embedding(input_dim, num_freq_bands) if input_dim != 0 else None - self.mlps = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(num_freq_bands * 2 + 1, hidden_dim), - nn.LayerNorm(hidden_dim), - nn.ReLU(inplace=True), - nn.Linear(hidden_dim, hidden_dim), - ) - for _ in range(input_dim) - ] - ) - self.to_out = nn.Sequential( - nn.LayerNorm(hidden_dim), - nn.ReLU(inplace=True), - nn.Linear(hidden_dim, hidden_dim), - ) - self.apply(weight_init) - - def forward( - self, - continuous_inputs: Optional[torch.Tensor] = None, - categorical_embs: Optional[List[torch.Tensor]] = None, - ) -> torch.Tensor: - if continuous_inputs is None: - if categorical_embs is not None: - x = torch.stack(categorical_embs).sum(dim=0) - else: - raise ValueError("Both continuous_inputs and categorical_embs are None") - else: - x = continuous_inputs.unsqueeze(-1) * self.freqs.weight * 2 * math.pi - # Warning: if your data are noisy, don't use learnable sinusoidal embedding - x = torch.cat([x.cos(), x.sin(), continuous_inputs.unsqueeze(-1)], dim=-1) - continuous_embs: List[Optional[torch.Tensor]] = [None] * self.input_dim - for i in range(self.input_dim): - continuous_embs[i] = self.mlps[i](x[:, i]) - x = torch.stack(continuous_embs).sum(dim=0) - if categorical_embs is not None: - x = x + torch.stack(categorical_embs).sum(dim=0) - return self.to_out(x) - - -class MLPEmbedding(nn.Module): - def __init__(self, input_dim: int, hidden_dim: int) -> None: - super(MLPEmbedding, self).__init__() - self.input_dim = input_dim - self.hidden_dim = hidden_dim - self.mlp = nn.Sequential( - nn.Linear(input_dim, 128), - nn.LayerNorm(128), - nn.ReLU(inplace=True), - nn.Linear(128, hidden_dim), - nn.LayerNorm(hidden_dim), - nn.ReLU(inplace=True), - nn.Linear(hidden_dim, hidden_dim), - ) - self.apply(weight_init) - - def forward( - self, - continuous_inputs: Optional[torch.Tensor] = None, - categorical_embs: Optional[List[torch.Tensor]] = None, - ) -> torch.Tensor: - if continuous_inputs is None: - if categorical_embs is not None: - x = torch.stack(categorical_embs).sum(dim=0) - else: - raise ValueError("Both continuous_inputs and categorical_embs are None") - else: - x = self.mlp(continuous_inputs) - if categorical_embs is not None: - x = x + torch.stack(categorical_embs).sum(dim=0) - return x diff --git a/d123/training/models/sim_agent/smart/layers/mlp_layer.py b/d123/training/models/sim_agent/smart/layers/mlp_layer.py deleted file mode 100644 index e28b2d2d..00000000 --- a/d123/training/models/sim_agent/smart/layers/mlp_layer.py +++ /dev/null @@ -1,19 +0,0 @@ -import torch -import torch.nn as nn - -from d123.training.models.sim_agent.smart.utils.weight_init import weight_init - - -class MLPLayer(nn.Module): - def __init__(self, input_dim: int, hidden_dim: int, output_dim: int) -> None: - super(MLPLayer, self).__init__() - self.mlp = nn.Sequential( - nn.Linear(input_dim, hidden_dim), - nn.LayerNorm(hidden_dim), - nn.ReLU(inplace=True), - nn.Linear(hidden_dim, output_dim), - ) - self.apply(weight_init) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return self.mlp(x) diff --git a/d123/training/models/sim_agent/smart/metrics/__init__.py b/d123/training/models/sim_agent/smart/metrics/__init__.py deleted file mode 100644 index 679bbfc1..00000000 --- a/d123/training/models/sim_agent/smart/metrics/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# from d123.training.models.sim_agent.smart.metrics.wosac_metrics import WOSACMetrics diff --git a/d123/training/models/sim_agent/smart/metrics/cross_entropy.py b/d123/training/models/sim_agent/smart/metrics/cross_entropy.py deleted file mode 100644 index d4c6d55b..00000000 --- a/d123/training/models/sim_agent/smart/metrics/cross_entropy.py +++ /dev/null @@ -1,104 +0,0 @@ -from typing import Optional - -import torch -from torch import Tensor, tensor -from torch.nn.functional import cross_entropy -from torchmetrics.metric import Metric - -from .utils import get_euclidean_targets, get_prob_targets - - -class CrossEntropy(Metric): - - is_differentiable = True - higher_is_better = False - full_state_update = False - - def __init__( - self, - use_gt_raw: bool, - gt_thresh_scale_length: float, # {"veh": 4.8, "cyc": 2.0, "ped": 1.0} - label_smoothing: float, - rollout_as_gt: bool, - ) -> None: - super().__init__() - self.use_gt_raw = use_gt_raw - self.gt_thresh_scale_length = gt_thresh_scale_length - self.label_smoothing = label_smoothing - self.rollout_as_gt = rollout_as_gt - self.add_state("loss_sum", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("count", default=tensor(0.0), dist_reduce_fx="sum") - - def update( - self, - # ! action that goes from [(10->15), ..., (85->90)] - next_token_logits: Tensor, # [n_agent, 16, n_token] - next_token_valid: Tensor, # [n_agent, 16] - # ! for step {5, 10, ..., 90} and act [(0->5), (5->10), ..., (85->90)] - pred_pos: Tensor, # [n_agent, 18, 2] - pred_head: Tensor, # [n_agent, 18] - pred_valid: Tensor, # [n_agent, 18] - # ! for step {5, 10, ..., 90} - gt_pos_raw: Tensor, # [n_agent, 18, 2] - gt_head_raw: Tensor, # [n_agent, 18] - gt_valid_raw: Tensor, # [n_agent, 18] - # or use the tokenized gt - gt_pos: Tensor, # [n_agent, 18, 2] - gt_head: Tensor, # [n_agent, 18] - gt_valid: Tensor, # [n_agent, 18] - # ! for tokenization - token_agent_shape: Tensor, # [n_agent, 2] - token_traj: Tensor, # [n_agent, n_token, 4, 2] - # ! for filtering intersting agent for training - train_mask: Optional[Tensor] = None, # [n_agent] - # ! for rollout_as_gt - next_token_action: Optional[Tensor] = None, # [n_agent, 16, 3] - **kwargs, - ) -> None: - # ! use raw or tokenized GT - if self.use_gt_raw: - gt_pos = gt_pos_raw - gt_head = gt_head_raw - gt_valid = gt_valid_raw - - # ! GT is valid if it's close to the rollout. - if self.gt_thresh_scale_length > 0: - dist = torch.norm(pred_pos - gt_pos, dim=-1) # [n_agent, n_step] - _thresh = token_agent_shape[:, 1] * self.gt_thresh_scale_length # [n_agent] - gt_valid = gt_valid & (dist < _thresh.unsqueeze(1)) # [n_agent, n_step] - - # ! get prob_targets - euclidean_target, euclidean_target_valid = get_euclidean_targets( - pred_pos=pred_pos, - pred_head=pred_head, - pred_valid=pred_valid, - gt_pos=gt_pos, - gt_head=gt_head, - gt_valid=gt_valid, - ) - if self.rollout_as_gt and (next_token_action is not None): - euclidean_target = next_token_action - - prob_target = get_prob_targets( - target=euclidean_target, # [n_agent, n_step, 3] x,y,yaw in local - token_agent_shape=token_agent_shape, # [n_agent, 2] - token_traj=token_traj, # [n_agent, n_token, 4, 2] - ) # [n_agent, n_step, n_token] prob, last dim sum up to 1 - - loss = cross_entropy( - next_token_logits.transpose(1, 2), # [n_agent, n_token, n_step], logits - prob_target.transpose(1, 2), # [n_agent, n_token, n_step], prob - reduction="none", - label_smoothing=self.label_smoothing, - ) # [n_agent, n_step=16] - - # ! weighting final loss [n_agent, n_step] - loss_weighting_mask = next_token_valid & euclidean_target_valid - if self.training: - loss_weighting_mask &= train_mask.unsqueeze(1) # [n_agent, n_step] - - self.loss_sum += (loss * loss_weighting_mask).sum() - self.count += (loss_weighting_mask > 0).sum() - - def compute(self) -> Tensor: - return self.loss_sum / self.count diff --git a/d123/training/models/sim_agent/smart/metrics/ego_nll.py b/d123/training/models/sim_agent/smart/metrics/ego_nll.py deleted file mode 100644 index e4c2ad29..00000000 --- a/d123/training/models/sim_agent/smart/metrics/ego_nll.py +++ /dev/null @@ -1,120 +0,0 @@ -from typing import Optional - -import torch -from torch import Tensor, tensor -from torch.distributions import Categorical, Independent, MixtureSameFamily, Normal -from torchmetrics.metric import Metric - -from .utils import get_euclidean_targets - - -class EgoNLL(Metric): - - is_differentiable = True - higher_is_better = False - full_state_update = False - - def __init__( - self, - use_gt_raw: bool, - gt_thresh_scale_length: float, # {"veh": 4.8, "cyc": 2.0, "ped": 1.0} - hard_assignment: bool, - rollout_as_gt: bool, - ) -> None: - super().__init__() - self.use_gt_raw = use_gt_raw - self.gt_thresh_scale_length = gt_thresh_scale_length - self.hard_assignment = hard_assignment - self.rollout_as_gt = rollout_as_gt - self.add_state("loss_sum", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("count", default=tensor(0.0), dist_reduce_fx="sum") - - def update( - self, - # ! action that goes from [(10->15), ..., (85->90)] - ego_next_logits: Tensor, # [n_batch, 16, n_k_ego_gmm] - ego_next_poses: Tensor, # [n_batch, 16, n_k_ego_gmm, 3] - ego_next_valid: Tensor, # [n_batch, 16] - ego_next_cov: Tensor, # [2], one for pos, one for heading. - # ! for step {5, 10, ..., 90} and act [(0->5), (5->10), ..., (85->90)] - pred_pos: Tensor, # [n_batch, 18, 2] - pred_head: Tensor, # [n_batch, 18] - pred_valid: Tensor, # [n_batch, 18] - # ! for step {5, 10, ..., 90} - gt_pos_raw: Tensor, # [n_batch, 18, 2] - gt_head_raw: Tensor, # [n_batch, 18] - gt_valid_raw: Tensor, # [n_batch, 18] - # or use the tokenized gt - gt_pos: Tensor, # [n_batch, 18, 2] - gt_head: Tensor, # [n_batch, 18] - gt_valid: Tensor, # [n_batch, 18] - token_agent_shape: Tensor, # [n_agent, 2] - # ! for rollout_as_gt - next_token_action: Optional[Tensor] = None, # [n_batch, 16, 3] - **kwargs, - ) -> None: - # ! use raw or tokenized GT - if self.use_gt_raw: - gt_pos = gt_pos_raw - gt_head = gt_head_raw - gt_valid = gt_valid_raw - - # ! GT is valid if it's close to the rollout. - if self.gt_thresh_scale_length > 0: - dist = torch.norm(pred_pos - gt_pos, dim=-1) # [n_agent, n_step] - _thresh = token_agent_shape[:, 1] * self.gt_thresh_scale_length # [n_agent] - gt_valid = gt_valid & (dist < _thresh.unsqueeze(1)) # [n_agent, n_step] - - # ! get prob_targets - target, target_valid = get_euclidean_targets( - pred_pos=pred_pos, - pred_head=pred_head, - pred_valid=pred_valid, - gt_pos=gt_pos, - gt_head=gt_head, - gt_valid=gt_valid, - ) - if self.rollout_as_gt and (next_token_action is not None): - target = next_token_action - - # ! transform yaw angle to unit vector - ego_next_poses = torch.cat( - [ - ego_next_poses[..., :2], - ego_next_poses[..., [-1]].cos(), - ego_next_poses[..., [-1]].sin(), - ], - dim=-1, - ) - ego_next_poses = ego_next_poses.flatten(0, 1) # [n_batch*n_step, K, 4] - cov = ego_next_cov.repeat_interleave(2)[None, None, :].expand(*ego_next_poses.shape) # [n_batch*n_step, K, 4] - - n_batch, n_step = target_valid.shape - target = torch.cat( - [target[..., :2], target[..., [-1]].cos(), target[..., [-1]].sin()], dim=-1 - ) # [n_batch, n_step, 4] - target = target.flatten(0, 1) # [n_batch*n_step, 4] - - ego_next_logits = ego_next_logits.flatten(0, 1) # [n_batch*n_step, K] - if self.hard_assignment: - idx_hard_assign = (ego_next_poses - target.unsqueeze(1))[..., :2].norm(dim=-1).argmin(-1) - n_batch_step = idx_hard_assign.shape[0] - ego_next_poses = ego_next_poses[torch.arange(n_batch_step), idx_hard_assign].unsqueeze(1) - cov = cov[torch.arange(n_batch_step), idx_hard_assign].unsqueeze(1) - ego_next_logits = ego_next_logits[torch.arange(n_batch_step), idx_hard_assign].unsqueeze(1) - - gmm = MixtureSameFamily( - Categorical(logits=ego_next_logits), - Independent(Normal(ego_next_poses, cov), 1), - ) - - loss = -gmm.log_prob(target) # [n_batch*n_step] - loss = loss.view(n_batch, n_step) # [n_batch, n_step] - - loss_weighting_mask = target_valid & ego_next_valid # [n_batch, n_step] - - self.loss_sum += (loss * loss_weighting_mask).sum() - self.count += (loss_weighting_mask > 0).sum() - - def compute(self) -> Tensor: - return self.loss_sum / self.count diff --git a/d123/training/models/sim_agent/smart/metrics/gmm_ade.py b/d123/training/models/sim_agent/smart/metrics/gmm_ade.py deleted file mode 100644 index e31fe477..00000000 --- a/d123/training/models/sim_agent/smart/metrics/gmm_ade.py +++ /dev/null @@ -1,33 +0,0 @@ -import torch -from torch import Tensor, tensor -from torchmetrics import Metric - - -class GMMADE(Metric): - def __init__(self) -> None: - super(GMMADE, self).__init__() - self.add_state("sum", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("count", default=tensor(0.0), dist_reduce_fx="sum") - - def update( - self, - logits: Tensor, # [n_agent, n_step, n_k] - pred: Tensor, # [n_agent, n_step, n_k, 2] - target: Tensor, # [n_agent, n_step, 2] - valid: Tensor, # [n_agent, n_step] - ) -> None: - n_agent, n_step, _ = logits.shape - idx_max = logits.argmax(-1) # [n_agent, n_step] - pred_max = pred[ - torch.arange(n_agent).unsqueeze(1), - torch.arange(n_step).unsqueeze(0), - idx_max, - ] # [n_agent, n_step, 2] - - dist = torch.norm(pred_max - target, p=2, dim=-1) # [n_agent, n_step] - dist = ((dist * valid).sum(-1)) / (valid.sum(-1) + 1e-6) # [n_agent] - self.sum += dist.sum() - self.count += valid.any(-1).sum() - - def compute(self) -> torch.Tensor: - return self.sum / self.count diff --git a/d123/training/models/sim_agent/smart/metrics/min_ade.py b/d123/training/models/sim_agent/smart/metrics/min_ade.py deleted file mode 100644 index 85db59fb..00000000 --- a/d123/training/models/sim_agent/smart/metrics/min_ade.py +++ /dev/null @@ -1,28 +0,0 @@ -import torch -from torch import Tensor, tensor -from torchmetrics import Metric - - -class minADE(Metric): - def __init__(self) -> None: - super(minADE, self).__init__() - self.add_state("sum", default=tensor(0.0), dist_reduce_fx="sum") - self.add_state("count", default=tensor(0.0), dist_reduce_fx="sum") - - def update( - self, - pred: Tensor, # [n_agent, n_rollout, n_step, 2] - target: Tensor, # [n_agent, n_step, 2] - target_valid: Tensor, # [n_agent, n_step] - ) -> None: - - # [n_agent, n_rollout, n_step] - dist = torch.norm(pred - target.unsqueeze(1), p=2, dim=-1) - dist = (dist * target_valid.unsqueeze(1)).sum(-1).min(-1).values # [n_agent] - - dist = dist / (target_valid.sum(-1) + 1e-6) # [n_agent] - self.sum += dist.sum() - self.count += target_valid.any(-1).sum() - - def compute(self) -> torch.Tensor: - return self.sum / self.count diff --git a/d123/training/models/sim_agent/smart/metrics/next_token_cls.py b/d123/training/models/sim_agent/smart/metrics/next_token_cls.py deleted file mode 100644 index ddca37c5..00000000 --- a/d123/training/models/sim_agent/smart/metrics/next_token_cls.py +++ /dev/null @@ -1,27 +0,0 @@ -import torch -from torchmetrics import Metric - - -class TokenCls(Metric): - def __init__(self, max_guesses: int = 6, **kwargs) -> None: - super(TokenCls, self).__init__(**kwargs) - self.add_state("sum", default=torch.tensor(0.0), dist_reduce_fx="sum") - self.add_state("count", default=torch.tensor(0.0), dist_reduce_fx="sum") - self.max_guesses = max_guesses - - def update( - self, - pred: torch.Tensor, # next_token_logits: [n_agent, 16, n_token] - pred_valid: torch.Tensor, # next_token_idx_gt: [n_agent, 16] - target: torch.Tensor, # next_token_idx_gt: [n_agent, 16] - target_valid: torch.Tensor, # [n_agent, 16] - ) -> None: - target = target[..., None] - acc = (torch.topk(pred, k=self.max_guesses, dim=-1)[1] == target).any(dim=-1) - valid_mask = pred_valid & target_valid - acc = acc * valid_mask - self.sum += acc.sum() - self.count += valid_mask.sum() - - def compute(self) -> torch.Tensor: - return self.sum / self.count diff --git a/d123/training/models/sim_agent/smart/metrics/utils.py b/d123/training/models/sim_agent/smart/metrics/utils.py deleted file mode 100644 index 39477b92..00000000 --- a/d123/training/models/sim_agent/smart/metrics/utils.py +++ /dev/null @@ -1,70 +0,0 @@ -from typing import Tuple - -import torch -from torch import Tensor -from torch.nn.functional import one_hot - -from d123.training.models.sim_agent.smart.utils.geometry import wrap_angle -from d123.training.models.sim_agent.smart.utils.rollout import cal_polygon_contour, transform_to_local - - -@torch.no_grad() -def get_prob_targets( - target: Tensor, # [n_agent, n_step, 3] x,y,yaw in local coord - token_agent_shape: Tensor, # [n_agent, 2] - token_traj: Tensor, # [n_agent, n_token, 4, 2] -) -> Tensor: # [n_agent, n_step, n_token] prob, last dim sum up to 1 - # ! tokenize to index, then compute prob - contour = cal_polygon_contour( - target[..., :2], # [n_agent, n_step, 2] - target[..., 2], # [n_agent, n_step] - token_agent_shape[:, None, :], # [n_agent, 1, 1, 2] - ) # [n_agent, n_step, 4, 2] in local coord - - # [n_agent, n_step, 1, 4, 2] - [n_agent, 1, n_token, 4, 2] - target_token_index = ( - torch.norm(contour.unsqueeze(2) - token_traj[:, None, :, :, :], dim=-1).sum(-1).argmin(-1) - ) # [n_agent, n_step] - - # [n_agent, n_step, n_token] bool - prob_target = one_hot(target_token_index, num_classes=token_traj.shape[1]) - prob_target = prob_target.to(target.dtype) - return prob_target - - -@torch.no_grad() -def get_euclidean_targets( - pred_pos: Tensor, # [n_agent, 18, 2] - pred_head: Tensor, # [n_agent, 18] - pred_valid: Tensor, # [n_agent, 18] - gt_pos: Tensor, # [n_agent, 18, 2] - gt_head: Tensor, # [n_agent, 18] - gt_valid: Tensor, # [n_agent, 18] -) -> Tuple[Tensor, Tensor]: - """ - Return: action that goes from [(10->15), ..., (85->90)] - target: [n_agent, 16, 3], x,y,yaw - target_valid: [n_agent, 16] - """ - gt_last_pos = gt_pos.roll(shifts=-1, dims=1).flatten(0, 1) - gt_last_head = gt_head.roll(shifts=-1, dims=1).flatten(0, 1) - gt_last_valid = gt_valid.roll(shifts=-1, dims=1) # [n_agent, 18] - gt_last_valid[:, -1:] = False # [n_agent, 18] - - target_pos, target_head = transform_to_local( - pos_global=gt_last_pos.unsqueeze(1), # [n_agent*18, 1, 2] - head_global=gt_last_head.unsqueeze(1), # [n_agent*18, 1] - pos_now=pred_pos.flatten(0, 1), # [n_agent*18, 2] - head_now=pred_head.flatten(0, 1), # [n_agent*18] - ) - target_valid = pred_valid & gt_last_valid # [n_agent, 18] - - target_pos = target_pos.squeeze(1).view(gt_pos.shape) # n_agent, 18, 2] - target_head = wrap_angle(target_head) # [n_agent, 18] - target_head = target_head.squeeze(1).view(gt_head.shape) - target = torch.cat((target_pos, target_head.unsqueeze(-1)), dim=-1) - - # truncate [(5->10), ..., (90->5)] to [(10->15), ..., (85->90)] - target = target[:, 1:-1] # [n_agent, 16, 3], x,y,yaw - target_valid = target_valid[:, 1:-1] # [n_agent, 16] - return target, target_valid diff --git a/d123/training/models/sim_agent/smart/modules/__init__.py b/d123/training/models/sim_agent/smart/modules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/models/sim_agent/smart/modules/smart_decoder.py b/d123/training/models/sim_agent/smart/modules/smart_decoder.py deleted file mode 100644 index da680810..00000000 --- a/d123/training/models/sim_agent/smart/modules/smart_decoder.py +++ /dev/null @@ -1,745 +0,0 @@ -from typing import Dict - -import torch -import torch.nn as nn -from torch import Tensor -from torch_cluster import radius, radius_graph -from torch_geometric.utils import dense_to_sparse, subgraph - -from d123.training.models.sim_agent.smart.layers.attention_layer import AttentionLayer -from d123.training.models.sim_agent.smart.layers.fourier_embedding import FourierEmbedding, MLPEmbedding -from d123.training.models.sim_agent.smart.layers.mlp_layer import MLPLayer -from d123.training.models.sim_agent.smart.smart_config import SMARTConfig, SMARTRolloutSampling -from d123.training.models.sim_agent.smart.utils.geometry import angle_between_2d_vectors, wrap_angle -from d123.training.models.sim_agent.smart.utils.rollout import sample_next_token_traj, transform_to_global -from d123.training.models.sim_agent.smart.utils.weight_init import weight_init - - -class SMARTDecoder(nn.Module): - def __init__(self, model_config: SMARTConfig, n_token_agent: int) -> None: - super(SMARTDecoder, self).__init__() - self.map_encoder: SMARTMapDecoder = SMARTMapDecoder(model_config) - self.agent_encoder: SMARTAgentDecoder = SMARTAgentDecoder(model_config, n_token_agent=n_token_agent) - - def forward(self, tokenized_map: Dict[str, Tensor], tokenized_agent: Dict[str, Tensor]) -> Dict[str, Tensor]: - map_feature = self.map_encoder(tokenized_map) - pred_dict = self.agent_encoder(tokenized_agent, map_feature) - return pred_dict - - def inference( - self, - tokenized_map: Dict[str, Tensor], - tokenized_agent: Dict[str, Tensor], - sampling_scheme: SMARTRolloutSampling, - ) -> Dict[str, Tensor]: - map_feature = self.map_encoder(tokenized_map) - pred_dict = self.agent_encoder.inference(tokenized_agent, map_feature, sampling_scheme) - return pred_dict - - -class SMARTMapDecoder(nn.Module): - def __init__(self, model_config: SMARTConfig) -> None: - super(SMARTMapDecoder, self).__init__() - - self.model_config = model_config - self.pl2pl_radius = model_config.pl2pl_radius - self.num_layers = model_config.num_map_layers - - self.type_pt_emb = nn.Embedding(10, model_config.hidden_dim) - self.polygon_type_emb = nn.Embedding(4, model_config.hidden_dim) - self.light_pl_emb = nn.Embedding(5, model_config.hidden_dim) - - input_dim_r_pt2pt = 3 - self.r_pt2pt_emb = FourierEmbedding( - input_dim=input_dim_r_pt2pt, - hidden_dim=model_config.hidden_dim, - num_freq_bands=model_config.num_freq_bands, - ) - self.pt2pt_layers = nn.ModuleList( - [ - AttentionLayer( - hidden_dim=model_config.hidden_dim, - num_heads=model_config.num_heads, - head_dim=model_config.head_dim, - dropout=model_config.dropout, - bipartite=False, - has_pos_emb=True, - ) - for _ in range(model_config.num_map_layers) - ] - ) - - # map_token_traj_src: [n_token, 11, 2].flatten(0,1) - self.token_emb = MLPEmbedding(input_dim=22, hidden_dim=model_config.hidden_dim) - self.apply(weight_init) - - def forward(self, tokenized_map: Dict) -> Dict[str, torch.Tensor]: - pos_pt = tokenized_map["position"] - orient_pt = tokenized_map["orientation"] - orient_vector_pt = torch.stack([orient_pt.cos(), orient_pt.sin()], dim=-1) - pt_token_emb_src = self.token_emb(tokenized_map["token_traj_src"]) - x_pt = pt_token_emb_src[tokenized_map["token_idx"]] - - x_pt_categorical_embs = [ - self.type_pt_emb(tokenized_map["type"]), - self.polygon_type_emb(tokenized_map["pl_type"]), - self.light_pl_emb(tokenized_map["light_type"]), - ] - x_pt = x_pt + torch.stack(x_pt_categorical_embs).sum(dim=0) - edge_index_pt2pt = radius_graph( - x=pos_pt, - r=self.pl2pl_radius, - batch=tokenized_map["batch"], - loop=False, - max_num_neighbors=100, - ) - rel_pos_pt2pt = pos_pt[edge_index_pt2pt[0]] - pos_pt[edge_index_pt2pt[1]] - rel_orient_pt2pt = wrap_angle(orient_pt[edge_index_pt2pt[0]] - orient_pt[edge_index_pt2pt[1]]) - r_pt2pt = torch.stack( - [ - torch.norm(rel_pos_pt2pt[:, :2], p=2, dim=-1), - angle_between_2d_vectors( - ctr_vector=orient_vector_pt[edge_index_pt2pt[1]], - nbr_vector=rel_pos_pt2pt[:, :2], - ), - rel_orient_pt2pt, - ], - dim=-1, - ) - r_pt2pt = self.r_pt2pt_emb(continuous_inputs=r_pt2pt, categorical_embs=None) - for i in range(self.num_layers): - x_pt = self.pt2pt_layers[i](x_pt, r_pt2pt, edge_index_pt2pt) - - return { - "pt_token": x_pt, - "position": pos_pt, - "orientation": orient_pt, - "batch": tokenized_map["batch"], - } - - -class SMARTAgentDecoder(nn.Module): - def __init__(self, model_config: SMARTConfig, n_token_agent: int) -> None: - super(SMARTAgentDecoder, self).__init__() - self.model_config = model_config - self.hidden_dim = model_config.hidden_dim - self.num_historical_steps = model_config.num_historical_steps - self.num_future_steps = model_config.num_future_steps - self.time_span = ( - model_config.time_span if model_config.time_span is not None else model_config.num_historical_steps - ) - self.pl2a_radius = model_config.pl2a_radius - self.a2a_radius = model_config.a2a_radius - self.num_layers = model_config.num_agent_layers - self.shift = 5 - self.hist_drop_prob = model_config.hist_drop_prob - - input_dim_x_a = 2 - input_dim_r_t = 4 - input_dim_r_pt2a = 3 - input_dim_r_a2a = 3 - input_dim_token = 8 - - self.type_a_emb = nn.Embedding(3, model_config.hidden_dim) - self.shape_emb = MLPLayer(3, model_config.hidden_dim, model_config.hidden_dim) - - self.x_a_emb = FourierEmbedding( - input_dim=input_dim_x_a, - hidden_dim=model_config.hidden_dim, - num_freq_bands=model_config.num_freq_bands, - ) - self.r_t_emb = FourierEmbedding( - input_dim=input_dim_r_t, - hidden_dim=model_config.hidden_dim, - num_freq_bands=model_config.num_freq_bands, - ) - self.r_pt2a_emb = FourierEmbedding( - input_dim=input_dim_r_pt2a, - hidden_dim=model_config.hidden_dim, - num_freq_bands=model_config.num_freq_bands, - ) - self.r_a2a_emb = FourierEmbedding( - input_dim=input_dim_r_a2a, - hidden_dim=model_config.hidden_dim, - num_freq_bands=model_config.num_freq_bands, - ) - self.token_emb_veh = MLPEmbedding(input_dim=input_dim_token, hidden_dim=model_config.hidden_dim) - self.token_emb_ped = MLPEmbedding(input_dim=input_dim_token, hidden_dim=model_config.hidden_dim) - self.token_emb_cyc = MLPEmbedding(input_dim=input_dim_token, hidden_dim=model_config.hidden_dim) - self.fusion_emb = MLPEmbedding(input_dim=self.hidden_dim * 2, hidden_dim=self.hidden_dim) - - self.t_attn_layers = nn.ModuleList( - [ - AttentionLayer( - hidden_dim=model_config.hidden_dim, - num_heads=model_config.num_heads, - head_dim=model_config.head_dim, - dropout=model_config.dropout, - bipartite=False, - has_pos_emb=True, - ) - for _ in range(model_config.num_agent_layers) - ] - ) - self.pt2a_attn_layers = nn.ModuleList( - [ - AttentionLayer( - hidden_dim=model_config.hidden_dim, - num_heads=model_config.num_heads, - head_dim=model_config.head_dim, - dropout=model_config.dropout, - bipartite=True, - has_pos_emb=True, - ) - for _ in range(model_config.num_agent_layers) - ] - ) - self.a2a_attn_layers = nn.ModuleList( - [ - AttentionLayer( - hidden_dim=model_config.hidden_dim, - num_heads=model_config.num_heads, - head_dim=model_config.head_dim, - dropout=model_config.dropout, - bipartite=False, - has_pos_emb=True, - ) - for _ in range(model_config.num_agent_layers) - ] - ) - self.token_predict_head = MLPLayer( - input_dim=model_config.hidden_dim, hidden_dim=model_config.hidden_dim, output_dim=n_token_agent - ) - self.apply(weight_init) - - def agent_token_embedding( - self, - agent_token_index, # [n_agent, n_step] - trajectory_token_veh, # [n_token, 8] - trajectory_token_ped, # [n_token, 8] - trajectory_token_cyc, # [n_token, 8] - pos_a, # [n_agent, n_step, 2] - head_vector_a, # [n_agent, n_step, 2] - agent_type, # [n_agent] - agent_shape, # [n_agent, 3] - inference=False, - ): - n_agent, n_step, traj_dim = pos_a.shape - _device = pos_a.device - - veh_mask = agent_type == 0 - ped_mask = agent_type == 1 - cyc_mask = agent_type == 2 - # [n_token, hidden_dim] - agent_token_emb_veh = self.token_emb_veh(trajectory_token_veh) - agent_token_emb_ped = self.token_emb_ped(trajectory_token_ped) - agent_token_emb_cyc = self.token_emb_cyc(trajectory_token_cyc) - agent_token_emb = torch.zeros((n_agent, n_step, self.hidden_dim), device=_device, dtype=pos_a.dtype) - agent_token_emb[veh_mask] = agent_token_emb_veh[agent_token_index[veh_mask]] - agent_token_emb[ped_mask] = agent_token_emb_ped[agent_token_index[ped_mask]] - agent_token_emb[cyc_mask] = agent_token_emb_cyc[agent_token_index[cyc_mask]] - - motion_vector_a = torch.cat( - [ - pos_a.new_zeros(agent_token_index.shape[0], 1, traj_dim), - pos_a[:, 1:] - pos_a[:, :-1], - ], - dim=1, - ) # [n_agent, n_step, 2] - feature_a = torch.stack( - [ - torch.norm(motion_vector_a[:, :, :2], p=2, dim=-1), - angle_between_2d_vectors(ctr_vector=head_vector_a, nbr_vector=motion_vector_a[:, :, :2]), - ], - dim=-1, - ) # [n_agent, n_step, 2] - categorical_embs = [ - self.type_a_emb(agent_type.long()), - self.shape_emb(agent_shape), - ] # List of len=2, shape [n_agent, hidden_dim] - - x_a = self.x_a_emb( - continuous_inputs=feature_a.view(-1, feature_a.size(-1)), - categorical_embs=[v.repeat_interleave(repeats=n_step, dim=0) for v in categorical_embs], - ) # [n_agent*n_step, hidden_dim] - x_a = x_a.view(-1, n_step, self.hidden_dim) # [n_agent, n_step, hidden_dim] - - feat_a = torch.cat((agent_token_emb, x_a), dim=-1) - feat_a = self.fusion_emb(feat_a) - - if inference: - return ( - feat_a, # [n_agent, n_step, hidden_dim] - agent_token_emb, # [n_agent, n_step, hidden_dim] - agent_token_emb_veh, # [n_agent, hidden_dim] - agent_token_emb_ped, # [n_agent, hidden_dim] - agent_token_emb_cyc, # [n_agent, hidden_dim] - veh_mask, # [n_agent] - ped_mask, # [n_agent] - cyc_mask, # [n_agent] - categorical_embs, # List of len=2, shape [n_agent, hidden_dim] - ) - else: - return feat_a # [n_agent, n_step, hidden_dim] - - def build_temporal_edge( - self, - pos_a, # [n_agent, n_step, 2] - head_a, # [n_agent, n_step] - head_vector_a, # [n_agent, n_step, 2], - mask, # [n_agent, n_step] - inference_mask=None, # [n_agent, n_step] - ): - pos_t = pos_a.flatten(0, 1) - head_t = head_a.flatten(0, 1) - head_vector_t = head_vector_a.flatten(0, 1) - - if self.hist_drop_prob > 0 and self.training: - _mask_keep = torch.bernoulli(torch.ones_like(mask) * (1 - self.hist_drop_prob)).bool() - mask = mask & _mask_keep - - if inference_mask is not None: - mask_t = mask.unsqueeze(2) & inference_mask.unsqueeze(1) - else: - mask_t = mask.unsqueeze(2) & mask.unsqueeze(1) - - edge_index_t = dense_to_sparse(mask_t)[0] - edge_index_t = edge_index_t[:, edge_index_t[1] > edge_index_t[0]] - edge_index_t = edge_index_t[:, edge_index_t[1] - edge_index_t[0] <= self.time_span / self.shift] - rel_pos_t = pos_t[edge_index_t[0]] - pos_t[edge_index_t[1]] - rel_pos_t = rel_pos_t[:, :2] - rel_head_t = wrap_angle(head_t[edge_index_t[0]] - head_t[edge_index_t[1]]) - r_t = torch.stack( - [ - torch.norm(rel_pos_t, p=2, dim=-1), - angle_between_2d_vectors(ctr_vector=head_vector_t[edge_index_t[1]], nbr_vector=rel_pos_t), - rel_head_t, - edge_index_t[0] - edge_index_t[1], - ], - dim=-1, - ) - r_t = self.r_t_emb(continuous_inputs=r_t, categorical_embs=None) - return edge_index_t, r_t - - def build_interaction_edge( - self, - pos_a, # [n_agent, n_step, 2] - head_a, # [n_agent, n_step] - head_vector_a, # [n_agent, n_step, 2] - batch_s, # [n_agent*n_step] - mask, # [n_agent, n_step] - ): - mask = mask.transpose(0, 1).reshape(-1) - pos_s = pos_a.transpose(0, 1).flatten(0, 1) - head_s = head_a.transpose(0, 1).reshape(-1) - head_vector_s = head_vector_a.transpose(0, 1).reshape(-1, 2) - edge_index_a2a = radius_graph( - x=pos_s[:, :2], - r=self.a2a_radius, - batch=batch_s, - loop=False, - max_num_neighbors=300, - ) - edge_index_a2a = subgraph(subset=mask, edge_index=edge_index_a2a)[0] - rel_pos_a2a = pos_s[edge_index_a2a[0]] - pos_s[edge_index_a2a[1]] - rel_head_a2a = wrap_angle(head_s[edge_index_a2a[0]] - head_s[edge_index_a2a[1]]) - r_a2a = torch.stack( - [ - torch.norm(rel_pos_a2a[:, :2], p=2, dim=-1), - angle_between_2d_vectors( - ctr_vector=head_vector_s[edge_index_a2a[1]], - nbr_vector=rel_pos_a2a[:, :2], - ), - rel_head_a2a, - ], - dim=-1, - ) - r_a2a = self.r_a2a_emb(continuous_inputs=r_a2a, categorical_embs=None) - return edge_index_a2a, r_a2a - - def build_map2agent_edge( - self, - pos_pl, # [n_pl, 2] - orient_pl, # [n_pl] - pos_a, # [n_agent, n_step, 2] - head_a, # [n_agent, n_step] - head_vector_a, # [n_agent, n_step, 2] - mask, # [n_agent, n_step] - batch_s, # [n_agent*n_step] - batch_pl, # [n_pl*n_step] - ): - n_step = pos_a.shape[1] - mask_pl2a = mask.transpose(0, 1).reshape(-1) - pos_s = pos_a.transpose(0, 1).flatten(0, 1) - head_s = head_a.transpose(0, 1).reshape(-1) - head_vector_s = head_vector_a.transpose(0, 1).reshape(-1, 2) - pos_pl = pos_pl.repeat(n_step, 1) - orient_pl = orient_pl.repeat(n_step) - edge_index_pl2a = radius( - x=pos_s[:, :2], - y=pos_pl[:, :2], - r=self.pl2a_radius, - batch_x=batch_s, - batch_y=batch_pl, - max_num_neighbors=300, - ) - edge_index_pl2a = edge_index_pl2a[:, mask_pl2a[edge_index_pl2a[1]]] - rel_pos_pl2a = pos_pl[edge_index_pl2a[0]] - pos_s[edge_index_pl2a[1]] - rel_orient_pl2a = wrap_angle(orient_pl[edge_index_pl2a[0]] - head_s[edge_index_pl2a[1]]) - r_pl2a = torch.stack( - [ - torch.norm(rel_pos_pl2a[:, :2], p=2, dim=-1), - angle_between_2d_vectors( - ctr_vector=head_vector_s[edge_index_pl2a[1]], - nbr_vector=rel_pos_pl2a[:, :2], - ), - rel_orient_pl2a, - ], - dim=-1, - ) - r_pl2a = self.r_pt2a_emb(continuous_inputs=r_pl2a, categorical_embs=None) - return edge_index_pl2a, r_pl2a - - def forward( - self, - tokenized_agent: Dict[str, torch.Tensor], - map_feature: Dict[str, torch.Tensor], - ) -> Dict[str, torch.Tensor]: - mask = tokenized_agent["valid_mask"] - pos_a = tokenized_agent["sampled_pos"] - head_a = tokenized_agent["sampled_heading"] - head_vector_a = torch.stack([head_a.cos(), head_a.sin()], dim=-1) - n_agent, n_step = head_a.shape - - # ! get agent token embeddings - feat_a = self.agent_token_embedding( - agent_token_index=tokenized_agent["sampled_idx"], # [n_ag, n_step] - trajectory_token_veh=tokenized_agent["trajectory_token_veh"], - trajectory_token_ped=tokenized_agent["trajectory_token_ped"], - trajectory_token_cyc=tokenized_agent["trajectory_token_cyc"], - pos_a=pos_a, # [n_agent, n_step, 2] - head_vector_a=head_vector_a, # [n_agent, n_step, 2] - agent_type=tokenized_agent["type"], # [n_agent] - agent_shape=tokenized_agent["shape"], # [n_agent, 3] - ) # feat_a: [n_agent, n_step, hidden_dim] - - # ! build temporal, interaction and map2agent edges - edge_index_t, r_t = self.build_temporal_edge( - pos_a=pos_a, # [n_agent, n_step, 2] - head_a=head_a, # [n_agent, n_step] - head_vector_a=head_vector_a, # [n_agent, n_step, 2] - mask=mask, # [n_agent, n_step] - ) # edge_index_t: [2, n_edge_t], r_t: [n_edge_t, hidden_dim] - - batch_s = torch.cat( - [tokenized_agent["batch"] + tokenized_agent["num_graphs"] * t for t in range(n_step)], - dim=0, - ) # [n_agent*n_step] - batch_pl = torch.cat( - [map_feature["batch"] + tokenized_agent["num_graphs"] * t for t in range(n_step)], - dim=0, - ) # [n_pl*n_step] - - edge_index_a2a, r_a2a = self.build_interaction_edge( - pos_a=pos_a, # [n_agent, n_step, 2] - head_a=head_a, # [n_agent, n_step] - head_vector_a=head_vector_a, # [n_agent, n_step, 2] - batch_s=batch_s, # [n_agent*n_step] - mask=mask, # [n_agent, n_step] - ) # edge_index_a2a: [2, n_edge_a2a], r_a2a: [n_edge_a2a, hidden_dim] - - edge_index_pl2a, r_pl2a = self.build_map2agent_edge( - pos_pl=map_feature["position"], # [n_pl, 2] - orient_pl=map_feature["orientation"], # [n_pl] - pos_a=pos_a, # [n_agent, n_step, 2] - head_a=head_a, # [n_agent, n_step] - head_vector_a=head_vector_a, # [n_agent, n_step, 2] - mask=mask, # [n_agent, n_step] - batch_s=batch_s, # [n_agent*n_step] - batch_pl=batch_pl, # [n_pl*n_step] - ) - - # ! attention layers - # [n_step*n_pl, hidden_dim] - feat_map = map_feature["pt_token"].unsqueeze(0).expand(n_step, -1, -1).flatten(0, 1) - - for i in range(self.num_layers): - feat_a = feat_a.flatten(0, 1) # [n_agent*n_step, hidden_dim] - feat_a = self.t_attn_layers[i](feat_a, r_t, edge_index_t) - # [n_step*n_agent, hidden_dim] - feat_a = feat_a.view(n_agent, n_step, -1).transpose(0, 1).flatten(0, 1) - feat_a = self.pt2a_attn_layers[i]((feat_map, feat_a), r_pl2a, edge_index_pl2a) - feat_a = self.a2a_attn_layers[i](feat_a, r_a2a, edge_index_a2a) - feat_a = feat_a.view(n_step, n_agent, -1).transpose(0, 1) - - # ! final mlp to get outputs - next_token_logits = self.token_predict_head(feat_a) - - return { - # action that goes from [(10->15), ..., (85->90)] - "next_token_logits": next_token_logits[:, 1:-1], # [n_agent, 16, n_token] - "next_token_valid": tokenized_agent["valid_mask"][:, 1:-1], # [n_agent, 16] - # for step {5, 10, ..., 90} and act [(0->5), (5->10), ..., (85->90)] - "pred_pos": tokenized_agent["sampled_pos"], # [n_agent, 18, 2] - "pred_head": tokenized_agent["sampled_heading"], # [n_agent, 18] - "pred_valid": tokenized_agent["valid_mask"], # [n_agent, 18] - # for step {5, 10, ..., 90} - "gt_pos_raw": tokenized_agent["gt_pos_raw"], # [n_agent, 18, 2] - "gt_head_raw": tokenized_agent["gt_head_raw"], # [n_agent, 18] - "gt_valid_raw": tokenized_agent["gt_valid_raw"], # [n_agent, 18] - # or use the tokenized gt - "gt_pos": tokenized_agent["gt_pos"], # [n_agent, 18, 2] - "gt_head": tokenized_agent["gt_heading"], # [n_agent, 18] - "gt_valid": tokenized_agent["valid_mask"], # [n_agent, 18] - } - - def inference( - self, - tokenized_agent: Dict[str, torch.Tensor], - map_feature: Dict[str, torch.Tensor], - sampling_scheme: SMARTRolloutSampling, - ) -> Dict[str, torch.Tensor]: - n_agent = tokenized_agent["valid_mask"].shape[0] - n_step_future_10hz = self.num_future_steps # 80 - n_step_future_2hz = n_step_future_10hz // self.shift # 16 - step_current_10hz = self.num_historical_steps - 1 # 10 - step_current_2hz = step_current_10hz // self.shift # 2 - - pos_a = tokenized_agent["gt_pos"][:, :step_current_2hz].clone() - head_a = tokenized_agent["gt_heading"][:, :step_current_2hz].clone() - head_vector_a = torch.stack([head_a.cos(), head_a.sin()], dim=-1) - pred_idx = tokenized_agent["gt_idx"].clone() - ( - feat_a, # [n_agent, step_current_2hz, hidden_dim] - agent_token_emb, # [n_agent, step_current_2hz, hidden_dim] - agent_token_emb_veh, # [n_agent, hidden_dim] - agent_token_emb_ped, # [n_agent, hidden_dim] - agent_token_emb_cyc, # [n_agent, hidden_dim] - veh_mask, # [n_agent] - ped_mask, # [n_agent] - cyc_mask, # [n_agent] - categorical_embs, # List of len=2, shape [n_agent, hidden_dim] - ) = self.agent_token_embedding( - agent_token_index=tokenized_agent["gt_idx"][:, :step_current_2hz], - trajectory_token_veh=tokenized_agent["trajectory_token_veh"], - trajectory_token_ped=tokenized_agent["trajectory_token_ped"], - trajectory_token_cyc=tokenized_agent["trajectory_token_cyc"], - pos_a=pos_a, - head_vector_a=head_vector_a, - agent_type=tokenized_agent["type"], - agent_shape=tokenized_agent["shape"], - inference=True, - ) - - if not self.training: - pred_traj_10hz = torch.zeros([n_agent, n_step_future_10hz, 2], dtype=pos_a.dtype, device=pos_a.device) - pred_head_10hz = torch.zeros([n_agent, n_step_future_10hz], dtype=pos_a.dtype, device=pos_a.device) - - pred_valid = tokenized_agent["valid_mask"].clone() - next_token_logits_list = [] - next_token_action_list = [] - feat_a_t_dict = {} - for t in range(n_step_future_2hz): # 0 -> 15 - t_now = step_current_2hz - 1 + t # 1 -> 16 - n_step = t_now + 1 # 2 -> 17 - - if t == 0: # init - hist_step = step_current_2hz - batch_s = torch.cat( - [tokenized_agent["batch"] + tokenized_agent["num_graphs"] * t for t in range(hist_step)], - dim=0, - ) - batch_pl = torch.cat( - [map_feature["batch"] + tokenized_agent["num_graphs"] * t for t in range(hist_step)], - dim=0, - ) - inference_mask = pred_valid[:, :n_step] - edge_index_t, r_t = self.build_temporal_edge( - pos_a=pos_a, - head_a=head_a, - head_vector_a=head_vector_a, - mask=pred_valid[:, :n_step], - ) - else: - hist_step = 1 - batch_s = tokenized_agent["batch"] - batch_pl = map_feature["batch"] - inference_mask = pred_valid[:, :n_step].clone() - inference_mask[:, :-1] = False - edge_index_t, r_t = self.build_temporal_edge( - pos_a=pos_a, - head_a=head_a, - head_vector_a=head_vector_a, - mask=pred_valid[:, :n_step], - inference_mask=inference_mask, - ) - edge_index_t[1] = (edge_index_t[1] + 1) // n_step - 1 - - # In the inference stage, we only infer the current stage for recurrent - edge_index_pl2a, r_pl2a = self.build_map2agent_edge( - pos_pl=map_feature["position"], # [n_pl, 2] - orient_pl=map_feature["orientation"], # [n_pl] - pos_a=pos_a[:, -hist_step:], # [n_agent, hist_step, 2] - head_a=head_a[:, -hist_step:], # [n_agent, hist_step] - head_vector_a=head_vector_a[:, -hist_step:], # [n_agent, hist_step, 2] - mask=inference_mask[:, -hist_step:], # [n_agent, hist_step] - batch_s=batch_s, # [n_agent*hist_step] - batch_pl=batch_pl, # [n_pl*hist_step] - ) - edge_index_a2a, r_a2a = self.build_interaction_edge( - pos_a=pos_a[:, -hist_step:], # [n_agent, hist_step, 2] - head_a=head_a[:, -hist_step:], # [n_agent, hist_step] - head_vector_a=head_vector_a[:, -hist_step:], # [n_agent, hist_step, 2] - batch_s=batch_s, # [n_agent*hist_step] - mask=inference_mask[:, -hist_step:], # [n_agent, hist_step] - ) - - # ! attention layers - for i in range(self.num_layers): - # [n_agent, n_step, hidden_dim] - _feat_temporal = feat_a if i == 0 else feat_a_t_dict[i] - - if t == 0: # init, process hist_step together - _feat_temporal = self.t_attn_layers[i](_feat_temporal.flatten(0, 1), r_t, edge_index_t).view( - n_agent, n_step, -1 - ) - _feat_temporal = _feat_temporal.transpose(0, 1).flatten(0, 1) - - # [hist_step*n_pl, hidden_dim] - _feat_map = map_feature["pt_token"].unsqueeze(0).expand(hist_step, -1, -1).flatten(0, 1) - - _feat_temporal = self.pt2a_attn_layers[i]((_feat_map, _feat_temporal), r_pl2a, edge_index_pl2a) - _feat_temporal = self.a2a_attn_layers[i](_feat_temporal, r_a2a, edge_index_a2a) - _feat_temporal = _feat_temporal.view(n_step, n_agent, -1).transpose(0, 1) - feat_a_now = _feat_temporal[:, -1] # [n_agent, hidden_dim] - - if i + 1 < self.num_layers: - feat_a_t_dict[i + 1] = _feat_temporal - - else: # process one step - feat_a_now = self.t_attn_layers[i]( - (_feat_temporal.flatten(0, 1), _feat_temporal[:, -1]), - r_t, - edge_index_t, - ) - # * give same results as below, but more efficient - # feat_a_now = self.t_attn_layers[i]( - # _feat_temporal.flatten(0, 1), r_t, edge_index_t - # ).view(n_agent, n_step, -1)[:, -1] - - feat_a_now = self.pt2a_attn_layers[i]( - (map_feature["pt_token"], feat_a_now), r_pl2a, edge_index_pl2a - ) - feat_a_now = self.a2a_attn_layers[i](feat_a_now, r_a2a, edge_index_a2a) - - # [n_agent, n_step, hidden_dim] - if i + 1 < self.num_layers: - feat_a_t_dict[i + 1] = torch.cat((feat_a_t_dict[i + 1], feat_a_now.unsqueeze(1)), dim=1) - - # ! get outputs - next_token_logits = self.token_predict_head(feat_a_now) - next_token_logits_list.append(next_token_logits) # [n_agent, n_token] - - next_token_idx, next_token_traj_all = sample_next_token_traj( - token_traj=tokenized_agent["token_traj"], - token_traj_all=tokenized_agent["token_traj_all"], - sampling_scheme=sampling_scheme, - # ! for most-likely sampling - next_token_logits=next_token_logits, - # ! for nearest-pos sampling - pos_now=pos_a[:, t_now], # [n_agent, 2] - head_now=head_a[:, t_now], # [n_agent] - pos_next_gt=tokenized_agent["gt_pos_raw"][:, n_step], # [n_agent, 2] - head_next_gt=tokenized_agent["gt_head_raw"][:, n_step], # [n_agent] - valid_next_gt=tokenized_agent["gt_valid_raw"][:, n_step], # [n_agent] - token_agent_shape=tokenized_agent["token_agent_shape"], # [n_token, 2] - ) # next_token_idx: [n_agent], next_token_traj_all: [n_agent, 6, 4, 2] - - diff_xy = next_token_traj_all[:, -1, 0] - next_token_traj_all[:, -1, 3] - next_token_action_list.append( - torch.cat( - [ - next_token_traj_all[:, -1].mean(1), # [n_agent, 2] - torch.arctan2(diff_xy[:, [1]], diff_xy[:, [0]]), # [n_agent, 1] - ], - dim=-1, - ) # [n_agent, 3] - ) - - token_traj_global = transform_to_global( - pos_local=next_token_traj_all.flatten(1, 2), # [n_agent, 6*4, 2] - head_local=None, - pos_now=pos_a[:, t_now], # [n_agent, 2] - head_now=head_a[:, t_now], # [n_agent] - )[0].view(*next_token_traj_all.shape) - - if not self.training: - pred_traj_10hz[:, t * 5 : (t + 1) * 5] = token_traj_global[:, 1:].mean(2) - diff_xy = token_traj_global[:, 1:, 0] - token_traj_global[:, 1:, 3] - pred_head_10hz[:, t * 5 : (t + 1) * 5] = torch.arctan2(diff_xy[:, :, 1], diff_xy[:, :, 0]) - - # ! get pos_a_next and head_a_next, spawn unseen agents - pos_a_next = token_traj_global[:, -1].mean(dim=1) - diff_xy_next = token_traj_global[:, -1, 0] - token_traj_global[:, -1, 3] - head_a_next = torch.arctan2(diff_xy_next[:, 1], diff_xy_next[:, 0]) - pred_idx[:, n_step] = next_token_idx - - # ! update tensors for for next step - pred_valid[:, n_step] = pred_valid[:, t_now] - # pred_valid[:, n_step] = pred_valid[:, t_now] | mask_spawn - pos_a = torch.cat([pos_a, pos_a_next.unsqueeze(1)], dim=1) - head_a = torch.cat([head_a, head_a_next.unsqueeze(1)], dim=1) - head_vector_a_next = torch.stack([head_a_next.cos(), head_a_next.sin()], dim=-1) - head_vector_a = torch.cat([head_vector_a, head_vector_a_next.unsqueeze(1)], dim=1) - - # ! get agent_token_emb_next - agent_token_emb_next = torch.zeros_like(agent_token_emb[:, 0]) - agent_token_emb_next[veh_mask] = agent_token_emb_veh[next_token_idx[veh_mask]] - agent_token_emb_next[ped_mask] = agent_token_emb_ped[next_token_idx[ped_mask]] - agent_token_emb_next[cyc_mask] = agent_token_emb_cyc[next_token_idx[cyc_mask]] - agent_token_emb = torch.cat([agent_token_emb, agent_token_emb_next.unsqueeze(1)], dim=1) - - # ! get feat_a_next - motion_vector_a = pos_a[:, -1] - pos_a[:, -2] # [n_agent, 2] - x_a = torch.stack( - [ - torch.norm(motion_vector_a, p=2, dim=-1), - angle_between_2d_vectors(ctr_vector=head_vector_a[:, -1], nbr_vector=motion_vector_a), - ], - dim=-1, - ) - # [n_agent, hidden_dim] - x_a = self.x_a_emb(continuous_inputs=x_a, categorical_embs=categorical_embs) - # [n_agent, 1, 2*hidden_dim] - feat_a_next = torch.cat((agent_token_emb_next, x_a), dim=-1).unsqueeze(1) - feat_a_next = self.fusion_emb(feat_a_next) - feat_a = torch.cat([feat_a, feat_a_next], dim=1) - - out_dict = { - # action that goes from [(10->15), ..., (85->90)] - "next_token_logits": torch.stack(next_token_logits_list, dim=1), - "next_token_valid": pred_valid[:, 1:-1], # [n_agent, 16] - # for step {5, 10, ..., 90} and act [(0->5), (5->10), ..., (85->90)] - "pred_pos": pos_a, # [n_agent, 18, 2] - "pred_head": head_a, # [n_agent, 18] - "pred_valid": pred_valid, # [n_agent, 18] - "pred_idx": pred_idx, # [n_agent, 18] - # for step {5, 10, ..., 90} - "gt_pos_raw": tokenized_agent["gt_pos_raw"], # [n_agent, 18, 2] - "gt_head_raw": tokenized_agent["gt_head_raw"], # [n_agent, 18] - "gt_valid_raw": tokenized_agent["gt_valid_raw"], # [n_agent, 18] - # or use the tokenized gt - "gt_pos": tokenized_agent["gt_pos"], # [n_agent, 18, 2] - "gt_head": tokenized_agent["gt_heading"], # [n_agent, 18] - "gt_valid": tokenized_agent["valid_mask"], # [n_agent, 18] - # for shifting proxy targets by lr - "next_token_action": torch.stack(next_token_action_list, dim=1), - } - - if not self.training: # 10hz predictions for wosac evaluation and submission - out_dict["pred_traj_10hz"] = pred_traj_10hz - out_dict["pred_head_10hz"] = pred_head_10hz - pred_z = tokenized_agent["gt_z_raw"].unsqueeze(1) # [n_agent, 1] - out_dict["pred_z_10hz"] = pred_z.expand(-1, pred_traj_10hz.shape[1]) - - return out_dict diff --git a/d123/training/models/sim_agent/smart/smart.py b/d123/training/models/sim_agent/smart/smart.py deleted file mode 100644 index 159630cd..00000000 --- a/d123/training/models/sim_agent/smart/smart.py +++ /dev/null @@ -1,176 +0,0 @@ -import math - -import torch -from lightning import LightningModule -from torch.optim.lr_scheduler import LambdaLR - -from d123.training.models.sim_agent.smart.metrics.cross_entropy import CrossEntropy -from d123.training.models.sim_agent.smart.metrics.min_ade import minADE -from d123.training.models.sim_agent.smart.metrics.next_token_cls import TokenCls -from d123.training.models.sim_agent.smart.modules.smart_decoder import SMARTDecoder -from d123.training.models.sim_agent.smart.smart_config import SMARTConfig -from d123.training.models.sim_agent.smart.tokens.token_processor import TokenProcessor - -# from d123.training.models.sim_agent.smart.utils.finetune import set_model_for_finetuning - -# from src.utils.vis_waymo import VisWaymo -# from src.utils.wosac_utils import get_scenario_id_int_tensor, get_scenario_rollouts - - -class SMART(LightningModule): - def __init__(self, model_config: SMARTConfig) -> None: - super(SMART, self).__init__() - self.save_hyperparameters() - - self.config = model_config - self.lr = model_config.lr - self.lr_warmup_steps = model_config.lr_warmup_steps - self.lr_total_steps = model_config.lr_total_steps - self.lr_min_ratio = model_config.lr_min_ratio - self.num_historical_steps = model_config.num_historical_steps - self.log_epoch = -1 - self.val_open_loop = model_config.val_open_loop - self.val_closed_loop = model_config.val_closed_loop - self.token_processor = TokenProcessor( - map_token_file=model_config.map_token_file, - agent_token_file=model_config.agent_token_file, - map_token_sampling=model_config.map_token_sampling, - agent_token_sampling=model_config.agent_token_sampling, - ) - - self.encoder = SMARTDecoder(model_config=model_config, n_token_agent=self.token_processor.n_token_agent) - # set_model_for_finetuning(self.encoder, model_config.finetune) - - self.minADE = minADE() - self.TokenCls = TokenCls(max_guesses=5) - # self.wosac_metrics = WOSACMetrics("val_closed") - # self.wosac_submission = WOSACSubmission(**model_config.wosac_submission) - self.training_loss = CrossEntropy( - use_gt_raw=model_config.use_gt_raw, - gt_thresh_scale_length=model_config.gt_thresh_scale_length, - label_smoothing=model_config.label_smoothing, - rollout_as_gt=model_config.rollout_as_gt, - ) - - self.n_rollout_closed_val = model_config.n_rollout_closed_val - self.n_vis_batch = model_config.n_vis_batch - self.n_vis_scenario = model_config.n_vis_scenario - self.n_vis_rollout = model_config.n_vis_rollout - # self.n_batch_wosac_metric = model_config.n_batch_wosac_metric - - self.training_rollout_sampling = model_config.training_rollout_sampling - self.validation_rollout_sampling = model_config.validation_rollout_sampling - - def training_step(self, data, batch_idx): - tokenized_map, tokenized_agent = self.token_processor(data) - if self.training_rollout_sampling.num_k <= 0: - pred = self.encoder(tokenized_map, tokenized_agent) - else: - pred = self.encoder.inference( - tokenized_map, - tokenized_agent, - sampling_scheme=self.training_rollout_sampling, - ) - - loss = self.training_loss( - **pred, - token_agent_shape=tokenized_agent["token_agent_shape"], # [n_agent, 2] - token_traj=tokenized_agent["token_traj"], # [n_agent, n_token, 4, 2] - train_mask=data["agent"]["train_mask"], # [n_agent] - current_epoch=self.current_epoch, - ) - self.log("train/loss", loss, on_step=True, batch_size=1) - - return loss - - def validation_step(self, data, batch_idx): - tokenized_map, tokenized_agent = self.token_processor(data) - - # ! open-loop vlidation - if self.val_open_loop: - pred = self.encoder(tokenized_map, tokenized_agent) - loss = self.training_loss( - **pred, - token_agent_shape=tokenized_agent["token_agent_shape"], # [n_agent, 2] - token_traj=tokenized_agent["token_traj"], # [n_agent, n_token, 4, 2] - ) - - self.TokenCls.update( - # action that goes from [(10->15), ..., (85->90)] - pred=pred["next_token_logits"], # [n_agent, 16, n_token] - pred_valid=pred["next_token_valid"], # [n_agent, 16] - target=tokenized_agent["gt_idx"][:, 2:], - target_valid=tokenized_agent["valid_mask"][:, 2:], - ) - self.log( - "val_open/acc", - self.TokenCls, - on_epoch=True, - sync_dist=True, - batch_size=1, - ) - self.log("val_open/loss", loss, on_epoch=True, sync_dist=True, batch_size=1) - - # ! closed-loop vlidation - if self.val_closed_loop: - pred_traj, pred_z, pred_head = [], [], [] - for _ in range(self.n_rollout_closed_val): - pred = self.encoder.inference(tokenized_map, tokenized_agent, self.validation_rollout_sampling) - pred_traj.append(pred["pred_traj_10hz"]) - pred_z.append(pred["pred_z_10hz"]) - pred_head.append(pred["pred_head_10hz"]) - - pred_traj = torch.stack(pred_traj, dim=1) # [n_ag, n_rollout, n_step, 2] - pred_z = torch.stack(pred_z, dim=1) # [n_ag, n_rollout, n_step] - pred_head = torch.stack(pred_head, dim=1) # [n_ag, n_rollout, n_step] - self.minADE.update( - pred=pred_traj, - target=data["agent"]["position"][:, self.num_historical_steps :, : pred_traj.shape[-1]], - target_valid=data["agent"]["valid_mask"][:, self.num_historical_steps :], - ) - return pred_traj - - def on_validation_epoch_end(self): - pass - - def configure_optimizers(self): - - # TODO: add to hydra config - optimizer = torch.optim.AdamW(self.parameters(), lr=self.lr) - - def lr_lambda(current_step): - current_step = self.current_epoch + 1 - if current_step < self.lr_warmup_steps: - return self.lr_min_ratio + (1 - self.lr_min_ratio) * current_step / self.lr_warmup_steps - return self.lr_min_ratio + 0.5 * (1 - self.lr_min_ratio) * ( - 1.0 - + math.cos( - math.pi - * min( - 1.0, - (current_step - self.lr_warmup_steps) / (self.lr_total_steps - self.lr_warmup_steps), - ) - ) - ) - - lr_scheduler = LambdaLR(optimizer, lr_lambda=lr_lambda) - return [optimizer], [lr_scheduler] - - def test_step(self, data, batch_idx): - tokenized_map, tokenized_agent = self.token_processor(data) - - # ! only closed-loop validation - pred_traj, pred_z, pred_head = [], [], [] - for _ in range(self.n_rollout_closed_val): - pred = self.encoder.inference(tokenized_map, tokenized_agent, self.validation_rollout_sampling) - pred_traj.append(pred["pred_traj_10hz"]) - pred_z.append(pred["pred_z_10hz"]) - pred_head.append(pred["pred_head_10hz"]) - - pred_traj = torch.stack(pred_traj, dim=1) # [n_ag, n_rollout, n_step, 2] - pred_z = torch.stack(pred_z, dim=1) # [n_ag, n_rollout, n_step] - pred_head = torch.stack(pred_head, dim=1) # [n_ag, n_rollout, n_step] - return pred_traj, pred_z, pred_head - - def on_test_epoch_end(self): - pass diff --git a/d123/training/models/sim_agent/smart/smart_config.py b/d123/training/models/sim_agent/smart/smart_config.py deleted file mode 100644 index 47d122bc..00000000 --- a/d123/training/models/sim_agent/smart/smart_config.py +++ /dev/null @@ -1,68 +0,0 @@ -from dataclasses import dataclass, field -from typing import Optional - - -@dataclass -class SMARTRolloutSampling: - num_k: int = 1 - temp: float = 1.0 - criteria: Optional[str] = "topk_prob" # {topk_dist_sampled_with_prob, topk_prob, topk_prob_sampled_with_dist} - - -@dataclass -class SMARTConfig: - - lr: float = 0.0005 - lr_warmup_steps: int = 0 - lr_total_steps: int = 100000 - lr_min_ratio: float = 0.05 - - val_open_loop: bool = True - val_closed_loop: bool = True - - # Tokenizer - map_token_file: str = "map_traj_token5.pkl" - agent_token_file: str = "agent_vocab_555_s2.pkl" - - map_token_sampling: SMARTRolloutSampling = field( - default_factory=lambda: SMARTRolloutSampling(num_k=1, temp=1.0, criteria=None) - ) - agent_token_sampling: SMARTRolloutSampling = field( - default_factory=lambda: SMARTRolloutSampling(num_k=1, temp=1.0, criteria=None) - ) - - # Rollout Sampling - validation_rollout_sampling: SMARTRolloutSampling = field( - default_factory=lambda: SMARTRolloutSampling(num_k=5, temp=1.0, criteria="topk_prob") - ) - training_rollout_sampling: SMARTRolloutSampling = field( - default_factory=lambda: SMARTRolloutSampling(num_k=-1, temp=1.0, criteria="topk_prob") - ) - - # Decoder - hidden_dim: int = 128 - num_freq_bands: int = 64 - num_heads: int = 8 - head_dim: int = 16 - dropout: float = 0.1 - hist_drop_prob: float = 0.1 - num_map_layers: int = 3 - num_agent_layers: int = 6 - pl2pl_radius: float = 10 - pl2a_radius: float = 30 - a2a_radius: float = 60 - time_span: Optional[int] = 30 - num_historical_steps: int = 11 - num_future_steps: int = 90 - - # train loss - use_gt_raw: bool = True - gt_thresh_scale_length: float = -1.0 # {"veh": 4.8, "cyc": 2.0, "ped": 1.0} - label_smoothing: float = 0.1 - rollout_as_gt: bool = False - - # else: - n_rollout_closed_val: int = 10 - n_vis_batch: int = 2 - n_vis_scenario: int = 2 - n_vis_rollout: int = 5 diff --git a/d123/training/models/sim_agent/smart/tokens/__init__.py b/d123/training/models/sim_agent/smart/tokens/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/models/sim_agent/smart/tokens/token_processor.py b/d123/training/models/sim_agent/smart/tokens/token_processor.py deleted file mode 100644 index 93fa3a4d..00000000 --- a/d123/training/models/sim_agent/smart/tokens/token_processor.py +++ /dev/null @@ -1,339 +0,0 @@ -import os -import pickle -from typing import Dict, Tuple - -import torch -from torch import Tensor -from torch.distributions import Categorical -from torch_geometric.data import HeteroData - -from d123.training.models.sim_agent.smart.smart_config import SMARTRolloutSampling -from d123.training.models.sim_agent.smart.utils.geometry import wrap_angle -from d123.training.models.sim_agent.smart.utils.rollout import ( - cal_polygon_contour, - transform_to_global, - transform_to_local, -) - - -class TokenProcessor(torch.nn.Module): - def __init__( - self, - map_token_file: str, - agent_token_file: str, - map_token_sampling: SMARTRolloutSampling, - agent_token_sampling: SMARTRolloutSampling, - ) -> None: - super(TokenProcessor, self).__init__() - self.map_token_sampling = map_token_sampling - self.agent_token_sampling = agent_token_sampling - self.shift = 5 - - module_dir = os.path.dirname(__file__) - self.init_agent_token(os.path.join(module_dir, agent_token_file)) - self.init_map_token(os.path.join(module_dir, map_token_file)) - self.n_token_agent = self.agent_token_all_veh.shape[0] - - @torch.no_grad() - def forward(self, data: HeteroData) -> Tuple[Dict[str, Tensor], Dict[str, Tensor]]: - tokenized_map = self.tokenize_map(data) - tokenized_agent = self.tokenize_agent(data) - return tokenized_map, tokenized_agent - - def init_map_token(self, map_token_traj_path, argmin_sample_len=3) -> None: - map_token_traj = pickle.load(open(map_token_traj_path, "rb"))["traj_src"] - indices = torch.linspace(0, map_token_traj.shape[1] - 1, steps=argmin_sample_len).long() - - self.register_buffer( - "map_token_traj_src", - torch.tensor(map_token_traj, dtype=torch.float32).flatten(1, 2), - persistent=False, - ) # [n_token, 11*2] - - self.register_buffer( - "map_token_sample_pt", - torch.tensor(map_token_traj[:, indices], dtype=torch.float32).unsqueeze(0), - persistent=False, - ) # [1, n_token, 3, 2] - - def init_agent_token(self, agent_token_path) -> None: - agent_token_data = pickle.load(open(agent_token_path, "rb")) - for k, v in agent_token_data["token_all"].items(): - v = torch.tensor(v, dtype=torch.float32) - # [n_token, 6, 4, 2], countour, 10 hz - self.register_buffer(f"agent_token_all_{k}", v, persistent=False) - - def tokenize_map(self, data: HeteroData) -> Dict[str, Tensor]: - traj_pos = data["map_save"]["traj_pos"] # [n_pl, 3, 2] - traj_theta = data["map_save"]["traj_theta"] # [n_pl] - - traj_pos_local, _ = transform_to_local( - pos_global=traj_pos, # [n_pl, 3, 2] - head_global=None, # [n_pl, 1] - pos_now=traj_pos[:, 0], # [n_pl, 2] - head_now=traj_theta, # [n_pl] - ) - # [1, n_token, 3, 2] - [n_pl, 1, 3, 2] - dist = torch.sum( - (self.map_token_sample_pt - traj_pos_local.unsqueeze(1)) ** 2, - dim=(-2, -1), - ) # [n_pl, n_token] - - if self.training and (self.map_token_sampling.num_k > 1): - topk_dists, topk_indices = torch.topk( - dist, - self.map_token_sampling.num_k, - dim=-1, - largest=False, - sorted=False, - ) # [n_pl, K] - - topk_logits = (-1e-6 - topk_dists) / self.map_token_sampling.temp - _samples = Categorical(logits=topk_logits).sample() # [n_pl] in K - token_idx = topk_indices[torch.arange(len(_samples)), _samples].contiguous() - else: - token_idx = torch.argmin(dist, dim=-1) - - tokenized_map = { - "position": traj_pos[:, 0].contiguous(), # [n_pl, 2] - "orientation": traj_theta, # [n_pl] - "token_idx": token_idx, # [n_pl] - "token_traj_src": self.map_token_traj_src, # [n_token, 11*2] - "type": data["pt_token"]["type"].long(), # [n_pl] - "pl_type": data["pt_token"]["pl_type"].long(), # [n_pl] - "light_type": data["pt_token"]["light_type"].long(), # [n_pl] - "batch": data["pt_token"]["batch"], # [n_pl] - } - return tokenized_map - - def tokenize_agent(self, data: HeteroData) -> Dict[str, Tensor]: - """ - Args: data["agent"]: Dict - "valid_mask": [n_agent, n_step], bool - "role": [n_agent, 3], bool - "id": [n_agent], int64 - "type": [n_agent], uint8 - "position": [n_agent, n_step, 3], float32 - "heading": [n_agent, n_step], float32 - "velocity": [n_agent, n_step, 2], float32 - "shape": [n_agent, 3], float32 - """ - # ! collate width/length, traj tokens for current batch - agent_shape, token_traj_all, token_traj = self._get_agent_shape_and_token_traj(data["agent"]["type"]) - - # ! get raw trajectory data - valid = data["agent"]["valid_mask"] # [n_agent, n_step] - heading = data["agent"]["heading"] # [n_agent, n_step] - pos = data["agent"]["position"][..., :2].contiguous() # [n_agent, n_step, 2] - vel = data["agent"]["velocity"] # [n_agent, n_step, 2] - - # ! agent, specifically vehicle's heading can be 180 degree off. We fix it here. - heading = self._clean_heading(valid, heading) - # ! extrapolate to previous 5th step. - valid, pos, heading, vel = self._extrapolate_agent_to_prev_token_step(valid, pos, heading, vel) - - # ! prepare output dict - tokenized_agent = { - "num_graphs": data.num_graphs, - "type": data["agent"]["type"], - "shape": data["agent"]["shape"], - "ego_mask": data["agent"]["role"][:, 0], # [n_agent] - "token_agent_shape": agent_shape, # [n_agent, 2] - "batch": data["agent"]["batch"], - "token_traj_all": token_traj_all, # [n_agent, n_token, 6, 4, 2] - "token_traj": token_traj, # [n_agent, n_token, 4, 2] - # for step {5, 10, ..., 90} - "gt_pos_raw": pos[:, self.shift :: self.shift], # [n_agent, n_step=18, 2] - "gt_head_raw": heading[:, self.shift :: self.shift], # [n_agent, n_step=18] - "gt_valid_raw": valid[:, self.shift :: self.shift], # [n_agent, n_step=18] - } - # [n_token, 8] - for k in ["veh", "ped", "cyc"]: - tokenized_agent[f"trajectory_token_{k}"] = getattr(self, f"agent_token_all_{k}")[:, -1].flatten(1, 2) - - # ! match token for each agent - if not self.training: - # [n_agent] - tokenized_agent["gt_z_raw"] = data["agent"]["position"][:, 10, 2] - - token_dict = self._match_agent_token( - valid=valid, - pos=pos, - heading=heading, - agent_shape=agent_shape, - token_traj=token_traj, - ) - tokenized_agent.update(token_dict) - return tokenized_agent - - def _match_agent_token( - self, - valid: Tensor, # [n_agent, n_step] - pos: Tensor, # [n_agent, n_step, 2] - heading: Tensor, # [n_agent, n_step] - agent_shape: Tensor, # [n_agent, 2] - token_traj: Tensor, # [n_agent, n_token, 4, 2] - ) -> Dict[str, Tensor]: - """n_step_token=n_step//5 - n_step_token=18 for train with BC. - n_step_token=2 for val/test and train with closed-loop rollout. - Returns: Dict - # ! action that goes from [(0->5), (5->10), ..., (85->90)] - "valid_mask": [n_agent, n_step_token] - "gt_idx": [n_agent, n_step_token] - # ! at step [5, 10, 15, ..., 90] - "gt_pos": [n_agent, n_step_token, 2] - "gt_heading": [n_agent, n_step_token] - # ! noisy sampling for training data augmentation - "sampled_idx": [n_agent, n_step_token] - "sampled_pos": [n_agent, n_step_token, 2] - "sampled_heading": [n_agent, n_step_token] - """ - num_k = self.agent_token_sampling.num_k if self.training else 1 - n_agent, n_step = valid.shape - range_a = torch.arange(n_agent) - - prev_pos, prev_head = pos[:, 0], heading[:, 0] # [n_agent, 2], [n_agent] - prev_pos_sample, prev_head_sample = pos[:, 0], heading[:, 0] - - out_dict = { - "valid_mask": [], - "gt_idx": [], - "gt_pos": [], - "gt_heading": [], - "sampled_idx": [], - "sampled_pos": [], - "sampled_heading": [], - } - - for i in range(self.shift, n_step, self.shift): # [5, 10, 15, ..., 90] - _valid_mask = valid[:, i - self.shift] & valid[:, i] # [n_agent] - _invalid_mask = ~_valid_mask - out_dict["valid_mask"].append(_valid_mask) - - # ! gt_contour: [n_agent, 4, 2] in global coord - gt_contour = cal_polygon_contour(pos[:, i], heading[:, i], agent_shape) - gt_contour = gt_contour.unsqueeze(1) # [n_agent, 1, 4, 2] - - # ! tokenize without sampling - token_world_gt = transform_to_global( - pos_local=token_traj.flatten(1, 2), # [n_agent, n_token*4, 2] - head_local=None, - pos_now=prev_pos, # [n_agent, 2] - head_now=prev_head, # [n_agent] - )[0].view(*token_traj.shape) - token_idx_gt = torch.argmin(torch.norm(token_world_gt - gt_contour, dim=-1).sum(-1), dim=-1) # [n_agent] - # [n_agent, 4, 2] - token_contour_gt = token_world_gt[range_a, token_idx_gt] - - # udpate prev_pos, prev_head - prev_head = heading[:, i].clone() - dxy = token_contour_gt[:, 0] - token_contour_gt[:, 3] - prev_head[_valid_mask] = torch.arctan2(dxy[:, 1], dxy[:, 0])[_valid_mask] - prev_pos = pos[:, i].clone() - prev_pos[_valid_mask] = token_contour_gt.mean(1)[_valid_mask] - # add to output dict - out_dict["gt_idx"].append(token_idx_gt) - out_dict["gt_pos"].append(prev_pos.masked_fill(_invalid_mask.unsqueeze(1), 0)) - out_dict["gt_heading"].append(prev_head.masked_fill(_invalid_mask, 0)) - - # ! tokenize from sampled rollout state - if num_k == 1: # K=1 means no sampling - out_dict["sampled_idx"].append(out_dict["gt_idx"][-1]) - out_dict["sampled_pos"].append(out_dict["gt_pos"][-1]) - out_dict["sampled_heading"].append(out_dict["gt_heading"][-1]) - else: - # contour: [n_agent, n_token, 4, 2], 2HZ, global coord - token_world_sample = transform_to_global( - pos_local=token_traj.flatten(1, 2), # [n_agent, n_token*4, 2] - head_local=None, - pos_now=prev_pos_sample, # [n_agent, 2] - head_now=prev_head_sample, # [n_agent] - )[0].view(*token_traj.shape) - - # dist: [n_agent, n_token] - dist = torch.norm(token_world_sample - gt_contour, dim=-1).mean(-1) - topk_dists, topk_indices = torch.topk(dist, num_k, dim=-1, largest=False, sorted=False) # [n_agent, K] - - topk_logits = (-1.0 * topk_dists) / self.agent_token_sampling.temp - _samples = Categorical(logits=topk_logits).sample() # [n_agent] in K - token_idx_sample = topk_indices[range_a, _samples] - token_contour_sample = token_world_sample[range_a, token_idx_sample] - - # udpate prev_pos_sample, prev_head_sample - prev_head_sample = heading[:, i].clone() - dxy = token_contour_sample[:, 0] - token_contour_sample[:, 3] - prev_head_sample[_valid_mask] = torch.arctan2(dxy[:, 1], dxy[:, 0])[_valid_mask] - prev_pos_sample = pos[:, i].clone() - prev_pos_sample[_valid_mask] = token_contour_sample.mean(1)[_valid_mask] - # add to output dict - out_dict["sampled_idx"].append(token_idx_sample) - out_dict["sampled_pos"].append(prev_pos_sample.masked_fill(_invalid_mask.unsqueeze(1), 0.0)) - out_dict["sampled_heading"].append(prev_head_sample.masked_fill(_invalid_mask, 0.0)) - out_dict = {k: torch.stack(v, dim=1) for k, v in out_dict.items()} - return out_dict - - @staticmethod - def _clean_heading(valid: Tensor, heading: Tensor) -> Tensor: - valid_pairs = valid[:, :-1] & valid[:, 1:] - for i in range(heading.shape[1] - 1): - heading_diff = torch.abs(wrap_angle(heading[:, i] - heading[:, i + 1])) - change_needed = (heading_diff > 1.5) & valid_pairs[:, i] - heading[:, i + 1][change_needed] = heading[:, i][change_needed] - return heading - - def _extrapolate_agent_to_prev_token_step( - self, - valid: Tensor, # [n_agent, n_step] - pos: Tensor, # [n_agent, n_step, 2] - heading: Tensor, # [n_agent, n_step] - vel: Tensor, # [n_agent, n_step, 2] - ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - # [n_agent], max will give the first True step - first_valid_step = torch.max(valid, dim=1).indices - - for i, t in enumerate(first_valid_step): # extrapolate to previous 5th step. - n_step_to_extrapolate = t % self.shift - if (t == 10) and (not valid[i, 10 - self.shift]): - # such that at least one token is valid in the history. - n_step_to_extrapolate = self.shift - - if n_step_to_extrapolate > 0: - vel[i, t - n_step_to_extrapolate : t] = vel[i, t] - valid[i, t - n_step_to_extrapolate : t] = True - heading[i, t - n_step_to_extrapolate : t] = heading[i, t] - - for j in range(n_step_to_extrapolate): - pos[i, t - j - 1] = pos[i, t - j] - vel[i, t] * 0.1 - - return valid, pos, heading, vel - - def _get_agent_shape_and_token_traj(self, agent_type: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: - """ - agent_shape: [n_agent, 2] - token_traj_all: [n_agent, n_token, 6, 4, 2] - token_traj: [n_agent, n_token, 4, 2] - """ - agent_type_masks = { - "veh": agent_type == 0, - "ped": agent_type == 1, - "cyc": agent_type == 2, - } - agent_shape = 0.0 - token_traj_all = 0.0 - for k, mask in agent_type_masks.items(): - if k == "veh": - width = 2.0 - length = 4.8 - elif k == "cyc": - width = 1.0 - length = 2.0 - else: - width = 1.0 - length = 1.0 - agent_shape += torch.stack([width * mask, length * mask], dim=-1) - - token_traj_all += mask[:, None, None, None, None] * (getattr(self, f"agent_token_all_{k}").unsqueeze(0)) - - token_traj = token_traj_all[:, :, -1, :, :].contiguous() - return agent_shape, token_traj_all, token_traj diff --git a/d123/training/models/sim_agent/smart/tokens/traj_clustering.py b/d123/training/models/sim_agent/smart/tokens/traj_clustering.py deleted file mode 100644 index c93f4238..00000000 --- a/d123/training/models/sim_agent/smart/tokens/traj_clustering.py +++ /dev/null @@ -1,155 +0,0 @@ -import pickle -from pathlib import Path - -import lightning as L -import torch -from torch_geometric.data import HeteroData -from torch_geometric.loader import DataLoader -from tqdm import tqdm - -from d123.training.models.sim_agent.smart.datasets.scalable_dataset import MultiDataset -from d123.training.models.sim_agent.smart.tokens.token_processor import TokenProcessor -from d123.training.models.sim_agent.smart.utils.geometry import wrap_angle -from d123.training.models.sim_agent.smart.utils.rollout import cal_polygon_contour, transform_to_local - - -def Kdisk_cluster( - X, # [n_trajs, 4, 2], bbox of the last point of the segment - N, # int - tol, # float - a_pos, # [n_trajs, 6, 3], the complete segment - cal_mean_heading=True, -): - n_total = X.shape[0] - ret_traj_list = [] - - for i in range(N): - if i == 0: - choice_index = 0 # always include [0, 0, 0] - else: - choice_index = torch.randint(0, X.shape[0], (1,)).item() - x0 = X[choice_index] - # res_mask = torch.sum((X - x0) ** 2, dim=[1, 2]) / 4.0 > (tol**2) - res_mask = torch.norm(X - x0, dim=-1).mean(-1) > tol - if cal_mean_heading: - ret_traj = a_pos[~res_mask].mean(0, keepdim=True) - else: - ret_traj = a_pos[[choice_index]] - X = X[res_mask] - a_pos = a_pos[res_mask] - ret_traj_list.append(ret_traj) - - remain = X.shape[0] * 100.0 / n_total - n_inside = (~res_mask).sum().item() - print(f"{i=}, {remain=:.2f}%, {n_inside=}") - - return torch.cat(ret_traj_list, dim=0) # [N, 6, 3] - - -if __name__ == "__main__": - L.seed_everything(seed=2, workers=True) - n_trajs = 2048 * 100 # 2e5 - load_data_from_file = True - data_cache_path = Path("/root/.cache/SMART") - out_file_name = "agent_vocab_555_s2.pkl" - tol_dist = [0.05, 0.05, 0.05] # veh, ped, cyc - - # ! don't change these params - shift = 5 # motion token time dimension - num_cluster = 2048 # vocabulary size - n_step = 91 - data_file_path = data_cache_path / "kdisk_trajs.pkl" - if load_data_from_file: - with open(data_file_path, "rb") as f: - data = pickle.load(f) - else: - trajs = [ - torch.zeros([1, 6, 3], dtype=torch.float32), # veh - torch.zeros([1, 6, 3], dtype=torch.float32), # ped - torch.zeros([1, 6, 3], dtype=torch.float32), # cyc - ] - dataloader = DataLoader( - dataset=MultiDataset(raw_dir=data_cache_path / "training", transform=lambda x: HeteroData(x)), - batch_size=8, - shuffle=False, - num_workers=8, - drop_last=False, - ) - - with tqdm( - total=len(dataloader), - desc=f"n_trajs={n_trajs}", - postfix={"n_veh": 0, "n_ped": 0, "n_cyc": 0}, - ) as pbar: - - for data in dataloader: - valid_mask = data["agent"]["valid_mask"] - data["agent"]["heading"] = TokenProcessor._clean_heading(valid_mask, data["agent"]["heading"]) - - for i_ag in range(valid_mask.shape[0]): - if valid_mask[i_ag, :].sum() < 30: - continue - for t in range(0, n_step - shift, shift): - if valid_mask[i_ag, t] and valid_mask[i_ag, t + shift]: - _type = data["agent"]["type"][i_ag] - if trajs[_type].shape[0] < n_trajs: - pos = data["agent"]["position"][i_ag, t : t + shift + 1, :2] - head = data["agent"]["heading"][i_ag, t : t + shift + 1] - pos, head = transform_to_local( - pos_global=pos.unsqueeze(0), # [1, 6, 2] - head_global=head.unsqueeze(0), # [1, 6] - pos_now=pos[[0]], # [1, 2] - head_now=head[[0]], # [1] - ) - head = wrap_angle(head) - to_add = torch.cat([pos, head.unsqueeze(-1)], dim=-1) - - if not (((trajs[_type] - to_add).abs().sum([1, 2]) < 1e-2).any()): - trajs[_type] = torch.cat([trajs[_type], to_add], dim=0) - pbar.update(1) - pbar.set_postfix( - n_veh=trajs[0].shape[0], - n_ped=trajs[1].shape[0], - n_cyc=trajs[2].shape[0], - ) - if trajs[0].shape[0] == n_trajs and trajs[1].shape[0] == n_trajs and trajs[2].shape[0] == n_trajs: - break - - # [n_trajs, shift+1, [relative_x, relative_y, relative_theta]] - data = {"veh": trajs[0], "ped": trajs[1], "cyc": trajs[2]} - - with open(data_file_path, "wb") as f: - pickle.dump(data, f) - - res = {"token_all": {}} - - for k, v in data.items(): - if k == "veh": - width_length = torch.tensor([2.0, 4.8]) - elif k == "ped": - width_length = torch.tensor([1.0, 1.0]) - elif k == "cyc": - width_length = torch.tensor([1.0, 2.0]) - width_length = width_length.unsqueeze(0) # [1, 2] - - contour = cal_polygon_contour(pos=v[:, -1, :2], head=v[:, -1, 2], width_length=width_length) # [n_trajs, 4, 2] - - if k == "veh": - tol = tol_dist[0] - elif k == "ped": - tol = tol_dist[1] - elif k == "cyc": - tol = tol_dist[2] - print(k, tol) - ret_traj = Kdisk_cluster(X=contour, N=num_cluster, tol=tol, a_pos=v) - ret_traj[:, :, -1] = wrap_angle(ret_traj[:, :, -1]) - - contour = cal_polygon_contour( - pos=ret_traj[:, :, :2], # [N, 6, 2] - head=ret_traj[:, :, 2], # [N, 6] - width_length=width_length.unsqueeze(0), - ) - res["token_all"][k] = contour.numpy() - - with open(Path(__file__).resolve().parent / out_file_name, "wb") as f: - pickle.dump(res, f) diff --git a/d123/training/models/sim_agent/smart/utils/__init__.py b/d123/training/models/sim_agent/smart/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/d123/training/models/sim_agent/smart/utils/finetune.py b/d123/training/models/sim_agent/smart/utils/finetune.py deleted file mode 100644 index 9db8e6f4..00000000 --- a/d123/training/models/sim_agent/smart/utils/finetune.py +++ /dev/null @@ -1,33 +0,0 @@ -import logging - -import torch - -logger = logging.getLogger(__name__) - - -def set_model_for_finetuning(model: torch.nn.Module, finetune: bool) -> None: - def _unfreeze(module: torch.nn.Module) -> None: - for p in module.parameters(): - p.requires_grad = True - - if finetune: - for p in model.parameters(): - p.requires_grad = False - - try: - _unfreeze(model.agent_encoder.token_predict_head) - logger.info("Unfreezing token_predict_head") - except: # noqa: E722 - logger.info("No token_predict_head in model.agent_encoder") - - try: - _unfreeze(model.agent_encoder.gmm_logits_head) - _unfreeze(model.agent_encoder.gmm_pose_head) - # _unfreeze(model.agent_encoder.gmm_gmm_covpose_head) - logger.info("Unfreezing gmm heads") - except: # noqa: E722 - logger.info("No gmm_logits_head in model.agent_encoder") - - _unfreeze(model.agent_encoder.t_attn_layers) - _unfreeze(model.agent_encoder.pt2a_attn_layers) - _unfreeze(model.agent_encoder.a2a_attn_layers) diff --git a/d123/training/models/sim_agent/smart/utils/geometry.py b/d123/training/models/sim_agent/smart/utils/geometry.py deleted file mode 100644 index a351046c..00000000 --- a/d123/training/models/sim_agent/smart/utils/geometry.py +++ /dev/null @@ -1,14 +0,0 @@ -import math - -import torch - - -def angle_between_2d_vectors(ctr_vector: torch.Tensor, nbr_vector: torch.Tensor) -> torch.Tensor: - return torch.atan2( - ctr_vector[..., 0] * nbr_vector[..., 1] - ctr_vector[..., 1] * nbr_vector[..., 0], - (ctr_vector[..., :2] * nbr_vector[..., :2]).sum(dim=-1), - ) - - -def wrap_angle(angle: torch.Tensor, min_val: float = -math.pi, max_val: float = math.pi) -> torch.Tensor: - return min_val + (angle + max_val) % (max_val - min_val) diff --git a/d123/training/models/sim_agent/smart/utils/preprocess.py b/d123/training/models/sim_agent/smart/utils/preprocess.py deleted file mode 100644 index f15b68c9..00000000 --- a/d123/training/models/sim_agent/smart/utils/preprocess.py +++ /dev/null @@ -1,150 +0,0 @@ -from typing import Any, Dict - -import numpy as np -import torch -from scipy.interpolate import interp1d - - -def get_polylines_from_polygon(polygon: np.ndarray) -> np.ndarray: - # polygon: [4, 3] - l1 = np.linalg.norm(polygon[1, :2] - polygon[0, :2]) - l2 = np.linalg.norm(polygon[2, :2] - polygon[1, :2]) - - def _pl_interp_start_end(start: np.ndarray, end: np.ndarray) -> np.ndarray: - length = np.linalg.norm(start - end) - unit_vec = (end - start) / length - pl = [] - for i in range(int(length) + 1): # 4.5 -> 5 [0,1,2,3,4] - x, y, z = start + unit_vec * i - pl.append([x, y, z]) - pl.append([end[0], end[1], end[2]]) - return np.array(pl) - - if l1 > l2: - pl1 = _pl_interp_start_end(polygon[0], polygon[1]) - pl2 = _pl_interp_start_end(polygon[2], polygon[3]) - else: - pl1 = _pl_interp_start_end(polygon[0], polygon[3]) - pl2 = _pl_interp_start_end(polygon[2], polygon[1]) - return np.concatenate([pl1, pl1[::-1], pl2, pl2[::-1]], axis=0) - - -def _interplating_polyline(polylines, distance=0.5, split_distace=5): - # Calculate the cumulative distance along the path, up-sample the polyline to 0.5 meter - dist_along_path_list = [] - polylines_list = [] - euclidean_dists = np.linalg.norm(polylines[1:, :2] - polylines[:-1, :2], axis=-1) - euclidean_dists = np.concatenate([[0], euclidean_dists]) - breakpoints = np.where(euclidean_dists > 3)[0] - breakpoints = np.concatenate([[0], breakpoints, [polylines.shape[0]]]) - for i in range(1, breakpoints.shape[0]): - start = breakpoints[i - 1] - end = breakpoints[i] - dist_along_path_list.append(np.cumsum(euclidean_dists[start:end]) - euclidean_dists[start]) - polylines_list.append(polylines[start:end]) - - multi_polylines_list = [] - for idx in range(len(dist_along_path_list)): - if len(dist_along_path_list[idx]) < 2: - continue - dist_along_path = dist_along_path_list[idx] - polylines_cur = polylines_list[idx] - # Create interpolation functions for x and y coordinates - fxy = interp1d(dist_along_path, polylines_cur, axis=0) - - # Create an array of distances at which to interpolate - new_dist_along_path = np.arange(0, dist_along_path[-1], distance) - new_dist_along_path = np.concatenate([new_dist_along_path, dist_along_path[[-1]]]) - - # Combine the new x and y coordinates into a single array - new_polylines = fxy(new_dist_along_path) - polyline_size = int(split_distace / distance) - if new_polylines.shape[0] >= (polyline_size + 1): - padding_size = (new_polylines.shape[0] - (polyline_size + 1)) % polyline_size - final_index = (new_polylines.shape[0] - (polyline_size + 1)) // polyline_size + 1 - else: - padding_size = new_polylines.shape[0] - final_index = 0 - multi_polylines = None - new_polylines = torch.from_numpy(new_polylines) - new_heading = torch.atan2( - new_polylines[1:, 1] - new_polylines[:-1, 1], - new_polylines[1:, 0] - new_polylines[:-1, 0], - ) - new_heading = torch.cat([new_heading, new_heading[-1:]], -1)[..., None] - new_polylines = torch.cat([new_polylines, new_heading], -1) - if new_polylines.shape[0] >= (polyline_size + 1): - multi_polylines = new_polylines.unfold(dimension=0, size=polyline_size + 1, step=polyline_size) - multi_polylines = multi_polylines.transpose(1, 2) - multi_polylines = multi_polylines[:, ::5, :] - if padding_size >= 3: - last_polyline = new_polylines[final_index * polyline_size :] - last_polyline = last_polyline[torch.linspace(0, last_polyline.shape[0] - 1, steps=3).long()] - if multi_polylines is not None: - multi_polylines = torch.cat([multi_polylines, last_polyline.unsqueeze(0)], dim=0) - else: - multi_polylines = last_polyline.unsqueeze(0) - if multi_polylines is None: - continue - multi_polylines_list.append(multi_polylines) - if len(multi_polylines_list) > 0: - multi_polylines_list = torch.cat(multi_polylines_list, dim=0).to(torch.float32) - else: - multi_polylines_list = None - return multi_polylines_list - - -def preprocess_map(map_data: Dict[str, Any]) -> Dict[str, Any]: - pt2pl = map_data[("map_point", "to", "map_polygon")]["edge_index"] - split_polyline_type = [] - split_polyline_pos = [] - split_polyline_theta = [] - split_polygon_type = [] - split_light_type = [] - - for i in sorted(torch.unique(pt2pl[1])): - index = pt2pl[0, pt2pl[1] == i] - if len(index) <= 2: - continue - - polygon_type = map_data["map_polygon"]["type"][i] - light_type = map_data["map_polygon"]["light_type"][i] - cur_type = map_data["map_point"]["type"][index] - cur_pos = map_data["map_point"]["position"][index, :2] - - # assert len(np.unique(cur_type)) == 1 - - split_polyline = _interplating_polyline(cur_pos.numpy()) - if split_polyline is None: - continue - split_polyline_pos.append(split_polyline[..., :2]) - split_polyline_theta.append(split_polyline[..., 2]) - split_polyline_type.append(cur_type[0].repeat(split_polyline.shape[0])) - split_polygon_type.append(polygon_type.repeat(split_polyline.shape[0])) - split_light_type.append(light_type.repeat(split_polyline.shape[0])) - - data = {} - if len(split_polyline_pos) == 0: # add dummy empty map - data["map_save"] = { - # 6e4 such that it's within the range of float16. - "traj_pos": torch.zeros([1, 3, 2], dtype=torch.float32) + 6e4, - "traj_theta": torch.zeros([1], dtype=torch.float32), - } - data["pt_token"] = { - "type": torch.tensor([0], dtype=torch.uint8), - "pl_type": torch.tensor([0], dtype=torch.uint8), - "light_type": torch.tensor([0], dtype=torch.uint8), - "num_nodes": 1, - } - else: - data["map_save"] = { - "traj_pos": torch.cat(split_polyline_pos, dim=0), # [num_nodes, 3, 2] - "traj_theta": torch.cat(split_polyline_theta, dim=0)[:, 0], # [num_nodes] - } - data["pt_token"] = { - "type": torch.cat(split_polyline_type, dim=0), # [num_nodes], uint8 - "pl_type": torch.cat(split_polygon_type, dim=0), # [num_nodes], uint8 - "light_type": torch.cat(split_light_type, dim=0), # [num_nodes], uint8 - "num_nodes": data["map_save"]["traj_pos"].shape[0], - } - return data diff --git a/d123/training/models/sim_agent/smart/utils/rollout.py b/d123/training/models/sim_agent/smart/utils/rollout.py deleted file mode 100644 index 041b5acb..00000000 --- a/d123/training/models/sim_agent/smart/utils/rollout.py +++ /dev/null @@ -1,260 +0,0 @@ -from typing import Optional, Tuple - -import torch -from omegaconf import DictConfig -from torch import Tensor -from torch.distributions import Categorical, Independent, MixtureSameFamily, Normal - -from d123.training.models.sim_agent.smart.smart_config import SMARTRolloutSampling - - -@torch.no_grad() -def cal_polygon_contour( - pos: Tensor, # [n_agent, n_step, n_target, 2] - head: Tensor, # [n_agent, n_step, n_target] - width_length: Tensor, # [n_agent, 1, 1, 2] -) -> Tensor: # [n_agent, n_step, n_target, 4, 2] - x, y = pos[..., 0], pos[..., 1] # [n_agent, n_step, n_target] - width, length = width_length[..., 0], width_length[..., 1] # [n_agent, 1 ,1] - - half_cos = 0.5 * head.cos() # [n_agent, n_step, n_target] - half_sin = 0.5 * head.sin() # [n_agent, n_step, n_target] - length_cos = length * half_cos # [n_agent, n_step, n_target] - length_sin = length * half_sin # [n_agent, n_step, n_target] - width_cos = width * half_cos # [n_agent, n_step, n_target] - width_sin = width * half_sin # [n_agent, n_step, n_target] - - left_front_x = x + length_cos - width_sin - left_front_y = y + length_sin + width_cos - left_front = torch.stack((left_front_x, left_front_y), dim=-1) - - right_front_x = x + length_cos + width_sin - right_front_y = y + length_sin - width_cos - right_front = torch.stack((right_front_x, right_front_y), dim=-1) - - right_back_x = x - length_cos + width_sin - right_back_y = y - length_sin - width_cos - right_back = torch.stack((right_back_x, right_back_y), dim=-1) - - left_back_x = x - length_cos - width_sin - left_back_y = y - length_sin + width_cos - left_back = torch.stack((left_back_x, left_back_y), dim=-1) - - polygon_contour = torch.stack((left_front, right_front, right_back, left_back), dim=-2) - - return polygon_contour - - -def transform_to_global( - pos_local: Tensor, # [n_agent, n_step, 2] - head_local: Optional[Tensor], # [n_agent, n_step] - pos_now: Tensor, # [n_agent, 2] - head_now: Tensor, # [n_agent] -) -> Tuple[Tensor, Optional[Tensor]]: - cos, sin = head_now.cos(), head_now.sin() - rot_mat = torch.zeros((head_now.shape[0], 2, 2), device=head_now.device) - rot_mat[:, 0, 0] = cos - rot_mat[:, 0, 1] = sin - rot_mat[:, 1, 0] = -sin - rot_mat[:, 1, 1] = cos - - pos_global = torch.bmm(pos_local, rot_mat) # [n_agent, n_step, 2]*[n_agent, 2, 2] - pos_global = pos_global + pos_now.unsqueeze(1) - if head_local is None: - head_global = None - else: - head_global = head_local + head_now.unsqueeze(1) - return pos_global, head_global - - -def transform_to_local( - pos_global: Tensor, # [n_agent, n_step, 2] - head_global: Optional[Tensor], # [n_agent, n_step] - pos_now: Tensor, # [n_agent, 2] - head_now: Tensor, # [n_agent] -) -> Tuple[Tensor, Optional[Tensor]]: - cos, sin = head_now.cos(), head_now.sin() - rot_mat = torch.zeros((head_now.shape[0], 2, 2), device=head_now.device) - rot_mat[:, 0, 0] = cos - rot_mat[:, 0, 1] = -sin - rot_mat[:, 1, 0] = sin - rot_mat[:, 1, 1] = cos - - pos_local = pos_global - pos_now.unsqueeze(1) - pos_local = torch.bmm(pos_local, rot_mat) # [n_agent, n_step, 2]*[n_agent, 2, 2] - if head_global is None: - head_local = None - else: - head_local = head_global - head_now.unsqueeze(1) - return pos_local, head_local - - -def sample_next_token_traj( - token_traj: Tensor, # [n_agent, n_token, 4, 2] - token_traj_all: Tensor, # [n_agent, n_token, 6, 4, 2] - sampling_scheme: SMARTRolloutSampling, - # ! for most-likely sampling - next_token_logits: Tensor, # [n_agent, n_token], with grad - # ! for nearest-pos sampling, sampling near to GT - pos_now: Tensor, # [n_agent, 2] - head_now: Tensor, # [n_agent] - pos_next_gt: Tensor, # [n_agent, 2] - head_next_gt: Tensor, # [n_agent] - valid_next_gt: Tensor, # [n_agent] - token_agent_shape: Tensor, # [n_agent, 2] -) -> Tuple[Tensor, Tensor]: - """ - Returns: - next_token_traj_all: [n_agent, 6, 4, 2], local coord - next_token_idx: [n_agent], without grad - """ - range_a = torch.arange(next_token_logits.shape[0]) - next_token_logits = next_token_logits.detach() - - if sampling_scheme.criteria == "topk_prob" or sampling_scheme.criteria == "topk_prob_sampled_with_dist": - topk_logits, topk_indices = torch.topk(next_token_logits, sampling_scheme.num_k, dim=-1, sorted=False) - if sampling_scheme.criteria == "topk_prob_sampled_with_dist": - # ! gt_contour: [n_agent, 4, 2] in global coord - gt_contour = cal_polygon_contour(pos_next_gt, head_next_gt, token_agent_shape) - gt_contour = gt_contour.unsqueeze(1) # [n_agent, 1, 4, 2] - token_world_sample = token_traj[range_a.unsqueeze(1), topk_indices] - token_world_sample = transform_to_global( - pos_local=token_world_sample.flatten(1, 2), - head_local=None, - pos_now=pos_now, # [n_agent, 2] - head_now=head_now, # [n_agent] - )[0].view(*token_world_sample.shape) - - # dist: [n_agent, n_token] - dist = torch.norm(token_world_sample - gt_contour, dim=-1).mean(-1) - topk_logits = topk_logits.masked_fill(valid_next_gt.unsqueeze(1), 0.0) - 1.0 * dist.masked_fill( - ~valid_next_gt.unsqueeze(1), 0.0 - ) - elif sampling_scheme.criteria == "topk_dist_sampled_with_prob": - # ! gt_contour: [n_agent, 4, 2] in global coord - gt_contour = cal_polygon_contour(pos_next_gt, head_next_gt, token_agent_shape) - gt_contour = gt_contour.unsqueeze(1) # [n_agent, 1, 4, 2] - token_world_sample = transform_to_global( - pos_local=token_traj.flatten(1, 2), # [n_agent, n_token*4, 2] - head_local=None, - pos_now=pos_now, # [n_agent, 2] - head_now=head_now, # [n_agent] - )[0].view(*token_traj.shape) - - _invalid = ~valid_next_gt - # dist: [n_agent, n_token] - dist = torch.norm(token_world_sample - gt_contour, dim=-1).mean(-1) - _logits = -1.0 * dist.masked_fill(_invalid.unsqueeze(1), 0.0) - - if _invalid.any(): - _logits[_invalid] = next_token_logits[_invalid] - _, topk_indices = torch.topk(_logits, sampling_scheme.num_k, dim=-1, sorted=False) # [n_agent, K] - topk_logits = next_token_logits[range_a.unsqueeze(1), topk_indices] - - else: - raise ValueError(f"Invalid criteria: {sampling_scheme.criteria}") - - # topk_logits, topk_indices: [n_agent, K] - topk_logits = topk_logits / sampling_scheme.temp - samples = Categorical(logits=topk_logits).sample() # [n_agent] in K - next_token_idx = topk_indices[range_a, samples] - next_token_traj_all = token_traj_all[range_a, next_token_idx] - - return next_token_idx, next_token_traj_all - - -def sample_next_gmm_traj( - token_traj: Tensor, # [n_agent, n_token, 4, 2] - token_traj_all: Tensor, # [n_agent, n_token, 6, 4, 2] - sampling_scheme: DictConfig, - # ! for most-likely sampling - ego_mask: Tensor, # [n_agent], bool, ego_mask.sum()==n_batch - ego_next_logits: Tensor, # [n_batch, n_k_ego_gmm] - ego_next_poses: Tensor, # [n_batch, n_k_ego_gmm, 3] - ego_next_cov: Tensor, # [2], one for pos, one for heading. - # ! for nearest-pos sampling, sampling near to GT - pos_now: Tensor, # [n_agent, 2] - head_now: Tensor, # [n_agent] - pos_next_gt: Tensor, # [n_agent, 2] - head_next_gt: Tensor, # [n_agent] - valid_next_gt: Tensor, # [n_agent] - token_agent_shape: Tensor, # [n_agent, 2] - next_token_idx: Tensor, # [n_agent] -) -> Tuple[Tensor, Tensor]: - """ - Returns: - next_token_traj_all: [n_agent, 6, 4, 2], local coord - next_token_idx: [n_agent], without grad - """ - n_agent = token_traj.shape[0] - n_batch = ego_next_logits.shape[0] - next_token_traj_all = token_traj_all[torch.arange(n_agent), next_token_idx] - - # ! sample only the ego-vehicle - assert sampling_scheme.criteria == "topk_prob" or sampling_scheme.criteria == "topk_prob_sampled_with_dist" - topk_logits, topk_indices = torch.topk( - ego_next_logits, sampling_scheme.num_k, dim=-1, sorted=False - ) # [n_agent, k], [n_agent, k] - ego_pose_topk = ego_next_poses[torch.arange(n_batch).unsqueeze(1), topk_indices] # [n_batch, k, 3] - - if sampling_scheme.criteria == "topk_prob_sampled_with_dist": - # udpate topk_logits - gt_contour = cal_polygon_contour( - pos_next_gt[ego_mask], - head_next_gt[ego_mask], - token_agent_shape[ego_mask], - ) # [n_batch, 4, 2] in global coord - gt_contour = gt_contour.unsqueeze(1) # [n_batch, 1, 4, 2] - - ego_pos_global, ego_head_global = transform_to_global( - pos_local=ego_pose_topk[:, :, :2], # [n_batch, k, 2] - head_local=ego_pose_topk[:, :, -1], # [n_batch, k] - pos_now=pos_now[ego_mask], # [n_batch, 2] - head_now=head_now[ego_mask], # [n_batch] - ) - ego_contour = cal_polygon_contour( - ego_pos_global, # [n_batch, k, 2] - ego_head_global, # [n_batch, k] - token_agent_shape[ego_mask].unsqueeze(1), - ) # [n_batch, k, 4, 2] in global coord - - dist = torch.norm(ego_contour - gt_contour, dim=-1).mean(-1) # [n_batch, k] - topk_logits = topk_logits.masked_fill(valid_next_gt[ego_mask].unsqueeze(1), 0.0) - 1.0 * dist.masked_fill( - ~valid_next_gt[ego_mask].unsqueeze(1), 0.0 - ) - - topk_logits = topk_logits / sampling_scheme.temp_mode # [n_batch, k] - ego_pose_topk = torch.cat( - [ - ego_pose_topk[..., :2], - ego_pose_topk[..., [-1]].cos(), - ego_pose_topk[..., [-1]].sin(), - ], - dim=-1, - ) - cov = ( - (ego_next_cov * sampling_scheme.temp_cov).repeat_interleave(2)[None, None, :].expand(*ego_pose_topk.shape) - ) # [n_batch, k, 4] - gmm = MixtureSameFamily(Categorical(logits=topk_logits), Independent(Normal(ego_pose_topk, cov), 1)) - ego_sample = gmm.sample() # [n_batch, 4] - - ego_contour_local = cal_polygon_contour( - ego_sample[:, :2], # [n_batch, 2] - torch.arctan2(ego_sample[:, -1], ego_sample[:, -2]), # [n_batch] - token_agent_shape[ego_mask], # [n_batch, 2] - ) # [n_batch, 4, 2] in local coord - - ego_token_local = token_traj[ego_mask] # [n_batch, n_token, 4, 2] - - dist = torch.norm(ego_contour_local.unsqueeze(1) - ego_token_local, dim=-1).mean(-1) # [n_batch, n_token] - next_token_idx[ego_mask] = dist.argmin(-1) - - ego_contour_local # [n_batch, 4, 2] in local coord - ego_countour_start = next_token_traj_all[ego_mask][:, 0] # [n_batch, 4, 2] - n_step = next_token_traj_all.shape[1] - diff = (ego_contour_local - ego_countour_start) / (n_step - 1) - ego_token_interp = [ego_countour_start + diff * i for i in range(n_step)] - # [n_batch, 6, 4, 2] - next_token_traj_all[ego_mask] = torch.stack(ego_token_interp, dim=1) - - return next_token_idx, next_token_traj_all diff --git a/d123/training/models/sim_agent/smart/utils/weight_init.py b/d123/training/models/sim_agent/smart/utils/weight_init.py deleted file mode 100644 index a507cf30..00000000 --- a/d123/training/models/sim_agent/smart/utils/weight_init.py +++ /dev/null @@ -1,69 +0,0 @@ -import torch.nn as nn - - -def weight_init(m: nn.Module) -> None: - if isinstance(m, nn.Linear): - nn.init.xavier_uniform_(m.weight) - if m.bias is not None: - nn.init.zeros_(m.bias) - elif isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): - fan_in = m.in_channels / m.groups - fan_out = m.out_channels / m.groups - bound = (6.0 / (fan_in + fan_out)) ** 0.5 - nn.init.uniform_(m.weight, -bound, bound) - if m.bias is not None: - nn.init.zeros_(m.bias) - elif isinstance(m, nn.Embedding): - nn.init.normal_(m.weight, mean=0.0, std=0.02) - elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): - nn.init.ones_(m.weight) - nn.init.zeros_(m.bias) - elif isinstance(m, nn.LayerNorm): - nn.init.ones_(m.weight) - nn.init.zeros_(m.bias) - elif isinstance(m, nn.MultiheadAttention): - if m.in_proj_weight is not None: - fan_in = m.embed_dim - fan_out = m.embed_dim - bound = (6.0 / (fan_in + fan_out)) ** 0.5 - nn.init.uniform_(m.in_proj_weight, -bound, bound) - else: - nn.init.xavier_uniform_(m.q_proj_weight) - nn.init.xavier_uniform_(m.k_proj_weight) - nn.init.xavier_uniform_(m.v_proj_weight) - if m.in_proj_bias is not None: - nn.init.zeros_(m.in_proj_bias) - nn.init.xavier_uniform_(m.out_proj.weight) - if m.out_proj.bias is not None: - nn.init.zeros_(m.out_proj.bias) - if m.bias_k is not None: - nn.init.normal_(m.bias_k, mean=0.0, std=0.02) - if m.bias_v is not None: - nn.init.normal_(m.bias_v, mean=0.0, std=0.02) - elif isinstance(m, (nn.LSTM, nn.LSTMCell)): - for name, param in m.named_parameters(): - if "weight_ih" in name: - for ih in param.chunk(4, 0): - nn.init.xavier_uniform_(ih) - elif "weight_hh" in name: - for hh in param.chunk(4, 0): - nn.init.orthogonal_(hh) - elif "weight_hr" in name: - nn.init.xavier_uniform_(param) - elif "bias_ih" in name: - nn.init.zeros_(param) - elif "bias_hh" in name: - nn.init.zeros_(param) - nn.init.ones_(param.chunk(4, 0)[1]) - elif isinstance(m, (nn.GRU, nn.GRUCell)): - for name, param in m.named_parameters(): - if "weight_ih" in name: - for ih in param.chunk(3, 0): - nn.init.xavier_uniform_(ih) - elif "weight_hh" in name: - for hh in param.chunk(3, 0): - nn.init.orthogonal_(hh) - elif "bias_ih" in name: - nn.init.zeros_(param) - elif "bias_hh" in name: - nn.init.zeros_(param) diff --git a/scripts/install/install_smart.sh b/scripts/install/install_smart.sh deleted file mode 100644 index 0cc90c0f..00000000 --- a/scripts/install/install_smart.sh +++ /dev/null @@ -1,17 +0,0 @@ -wget https://data.pyg.org/whl/torch-2.6.0%2Bcu124/torch_cluster-1.6.3+pt26cu124-cp312-cp312-linux_x86_64.whl -python3 -m pip install torch_cluster-1.6.3+pt26cu124-cp312-cp312-linux_x86_64.whl -rm torch_cluster-1.6.3+pt26cu124-cp312-cp312-linux_x86_64.whl - -wget https://data.pyg.org/whl/torch-2.6.0%2Bcu124/torch_scatter-2.1.2%2Bpt26cu124-cp312-cp312-linux_x86_64.whl -python3 -m pip install torch_scatter-2.1.2+pt26cu124-cp312-cp312-linux_x86_64.whl -rm torch_scatter-2.1.2+pt26cu124-cp312-cp312-linux_x86_64.whl - -wget https://data.pyg.org/whl/torch-2.6.0%2Bcu124/torch_sparse-0.6.18+pt26cu124-cp312-cp312-linux_x86_64.whl -python3 -m pip install torch_sparse-0.6.18+pt26cu124-cp312-cp312-linux_x86_64.whl -rm torch_sparse-0.6.18+pt26cu124-cp312-cp312-linux_x86_64.whl - -wget https://data.pyg.org/whl/torch-2.6.0%2Bcu124/torch_spline_conv-1.2.2%2Bpt26cu124-cp312-cp312-linux_x86_64.whl` -python3 -m pip install torch_spline_conv-1.2.2+pt26cu124-cp312-cp312-linux_x86_64.whl -rm torch_spline_conv-1.2.2+pt26cu124-cp312-cp312-linux_x86_64.whl - -pip install torch_geometric diff --git a/scripts/preprocessing/preprocess_smart.sh b/scripts/preprocessing/preprocess_smart.sh deleted file mode 100644 index c4eb615f..00000000 --- a/scripts/preprocessing/preprocess_smart.sh +++ /dev/null @@ -1,17 +0,0 @@ - - -CACHE_PATH=/home/daniel/cache_test - - -python $D123_DEVKIT_ROOT/d123/script/run_preprocessing.py \ -experiment_name="smart_preprocessing" \ -scene_filter="nuplan_mini_train" \ -scene_filter.max_num_scenes=1000 \ -cache_path="${CACHE_PATH}/training" - - -python $D123_DEVKIT_ROOT/d123/script/run_preprocessing.py \ -experiment_name="smart_preprocessing" \ -scene_filter="nuplan_mini_val" \ -scene_filter.max_num_scenes=1000 \ -cache_path="${CACHE_PATH}/validation" diff --git a/scripts/simulation/run_sim_agents.sh b/scripts/simulation/run_sim_agents.sh deleted file mode 100644 index 8f245e04..00000000 --- a/scripts/simulation/run_sim_agents.sh +++ /dev/null @@ -1,7 +0,0 @@ -# nuplan_sim_agent_mini - - -python $D123_DEVKIT_ROOT/d123/script/run_simulation.py \ -scene_filter="nuplan_sim_agent" \ -experiment_name="sim_agent" \ -# worker=single_machine_thread_pool diff --git a/scripts/training/train_smart.sh b/scripts/training/train_smart.sh deleted file mode 100644 index 3b9fbedb..00000000 --- a/scripts/training/train_smart.sh +++ /dev/null @@ -1,6 +0,0 @@ - - - -python $D123_DEVKIT_ROOT/d123/script/run_training.py \ -experiment_name="smart_forecasting" \ -# worker=single_machine_thread_pool From d48cd8c5ce12ee559e0b67ad3c4066458dd89483 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 11 Oct 2025 17:57:45 +0200 Subject: [PATCH 070/145] Remove some smart notebooks (#54) --- notebooks/smarty/smart_feature_testing.ipynb | 149 --------- notebooks/smarty/smart_rollout.ipynb | 184 ----------- notebooks/smarty/smart_testing.ipynb | 307 ------------------- notebooks/smarty/smart_tokens.ipynb | 172 ----------- 4 files changed, 812 deletions(-) delete mode 100644 notebooks/smarty/smart_feature_testing.ipynb delete mode 100644 notebooks/smarty/smart_rollout.ipynb delete mode 100644 notebooks/smarty/smart_testing.ipynb delete mode 100644 notebooks/smarty/smart_tokens.ipynb diff --git a/notebooks/smarty/smart_feature_testing.ipynb b/notebooks/smarty/smart_feature_testing.ipynb deleted file mode 100644 index c134baf9..00000000 --- a/notebooks/smarty/smart_feature_testing.ipynb +++ /dev/null @@ -1,149 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.training.models.sim_agent.smart.smart import SMART\n", - "from d123.training.models.sim_agent.smart.smart_config import SMARTConfig" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "config = SMARTConfig()\n", - "smart_model = SMART(config)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from d123.dataset.scene.arrow_scene import ArrowScene\n", - "from d123.common.visualization.matplotlib.plots import plot_scene_at_iteration\n", - "\n", - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", - "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n", - "\n", - "\n", - "log_names = None\n", - "split = \"nuplan_mini_val\"\n", - "scene_filter = SceneFilter(\n", - " split_names=[split], log_names=log_names, timestamp_threshold_s=8.0, duration_s=8.0, history_s=1.0\n", - ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "scene = scenes[23]\n", - "plot_scene_at_iteration(scene, iteration=0)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.training.feature_builder.smart_feature_builder_ import SMARTFeatureBuilder\n", - "\n", - "feature_builder= SMARTFeatureBuilder()\n", - "\n", - "feature_dict = feature_builder.build_features(scene)\n", - "feature_dict[\"pt_token\"].keys()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "from matplotlib import pyplot as plt\n", - "\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - "traj_pos = feature_dict[\"map_save\"][\"traj_pos\"]\n", - "pl_type = feature_dict[\"pt_token\"][\"pl_type\"]\n", - "print(traj_pos.shape)\n", - "for i in range(\n", - " traj_pos.shape[0],\n", - "):\n", - " if pl_type[i] == 1:\n", - " ax.plot(traj_pos[i, :, 0], traj_pos[i, :, 1])\n", - "\n", - "ax.set_title(\"map_save/traj_pos\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - "# for boundary in boundaries:\n", - "# ax.plot(boundary[:, 0], boundary[:, 1], color=\"blue\", linewidth=1.0, alpha=1.0)\n", - "for map_segment in map_segments:\n", - " ax.plot(map_segment[:, 0], map_segment[:, 1], linewidth=1.0, alpha=1.0)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "asim", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.21" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/smarty/smart_rollout.ipynb b/notebooks/smarty/smart_rollout.ipynb deleted file mode 100644 index d437fcfb..00000000 --- a/notebooks/smarty/smart_rollout.ipynb +++ /dev/null @@ -1,184 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.training.models.sim_agent.smart.smart import SMART\n", - "from d123.training.models.sim_agent.smart.smart_config import SMARTConfig\n", - "\n", - "from pathlib import Path\n", - "from d123.dataset.scene.arrow_scene import ArrowScene\n", - "from d123.common.visualization.matplotlib.plots import plot_scene_at_iteration\n", - "\n", - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", - "\n", - "from d123.common.multithreading.worker_sequential import Sequential" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "log_names = None\n", - "split = \"nuplan_mini_val\"\n", - "scene_filter = SceneFilter(\n", - " split_names=[split],\n", - " log_names=log_names,\n", - " timestamp_threshold_s=8.0,\n", - " duration_s=8.1,\n", - " history_s=1.0,\n", - ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "scene: ArrowScene = scenes[100]\n", - "plot_scene_at_iteration(scene, iteration=0)\n", - "print(scene.number_of_iterations, scene.get_number_of_history_iterations())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.training.models.sim_agent.smart.smart import SMART\n", - "from d123.training.models.sim_agent.smart.smart_config import SMARTConfig\n", - "\n", - "\n", - "checkpoint_path = Path(\"/home/daniel/epoch_027.ckpt\")\n", - "config = SMARTConfig(\n", - " hidden_dim=64,\n", - " num_freq_bands=64,\n", - " num_heads=4,\n", - " head_dim=8,\n", - " dropout=0.1,\n", - " hist_drop_prob=0.1,\n", - " num_map_layers=2,\n", - " num_agent_layers=4,\n", - " pl2pl_radius=10,\n", - " pl2a_radius=20,\n", - " a2a_radius=20,\n", - " time_span=20,\n", - " num_historical_steps=11,\n", - " num_future_steps=80,\n", - ")\n", - "\n", - "smart_model = SMART.load_from_checkpoint(checkpoint_path, config=config, map_location=\"cpu\")\n", - "smart_model.eval()\n", - "# print(smart_model.training)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "import torch\n", - "from torch_geometric.data import HeteroData\n", - "from d123.training.feature_builder.smart_feature_builder import SMARTFeatureBuilder\n", - "from d123.training.models.sim_agent.smart.datamodules.target_builder import _numpy_dict_to_torch\n", - "\n", - "feature_builder = SMARTFeatureBuilder()\n", - "features = feature_builder.build_features(scene)\n", - "# features[\"agent\"][\"position\"][:, :40] = 0.0\n", - "_numpy_dict_to_torch(features)\n", - "\n", - "\n", - "torch_features = HeteroData(features)\n", - "\n", - "from torch_geometric.loader import DataLoader\n", - "\n", - "# If you have a dataset\n", - "dataset = [torch_features] # List with single sample\n", - "loader = DataLoader(dataset, batch_size=1, shuffle=False)\n", - "with torch.no_grad():\n", - " for batch in loader:\n", - " pred_traj, pred_z, pred_head = smart_model.test_step(batch, 0)\n", - " break\n", - "\n", - "\n", - "# features[\"agent\"][\"valid_mask\"].sum(-1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "array = pred_traj.numpy()\n", - "\n", - "array.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "from matplotlib import pyplot as plt\n", - "\n", - "from d123.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array\n", - "\n", - "\n", - "origin = scene.get_ego_state_at_iteration(0).bounding_box.center.state_se2\n", - "abs_array = convert_relative_to_absolute_point_2d_array(origin, array)\n", - "\n", - "\n", - "for roll_out in range(abs_array.shape[1]):\n", - " # fig, ax = plt.subplots(figsize=(10, 10))\n", - " fig, ax = plot_scene_at_iteration(scene, iteration=0)\n", - " for i in range(abs_array.shape[0]):\n", - " ax.plot(abs_array[i, roll_out, :, 0], abs_array[i, roll_out, :, 1], label=f\"Agent {i}\", zorder=15, linewidth=3, alpha=0.5)\n", - " # ax.set_aspect('equal', adjustable='box')\n", - " plt.show()\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/smarty/smart_testing.ipynb b/notebooks/smarty/smart_testing.ipynb deleted file mode 100644 index 765325f9..00000000 --- a/notebooks/smarty/smart_testing.ipynb +++ /dev/null @@ -1,307 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.training.models.sim_agent.smart.smart import SMART\n", - "from d123.training.models.sim_agent.smart.smart_config import SMARTConfig\n", - "\n", - "from d123.common.visualization.color.color import TAB_10\n", - "\n", - "import torch\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "\n", - "config = SMARTConfig()\n", - "smart_model = SMART(config)\n", - "smart_model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "import pickle\n", - "\n", - "training_path = Path(\"/home/daniel/nuplan_cache/training\")\n", - "pickle_paths = list(training_path.iterdir())\n", - "\n", - "idx = 1\n", - "\n", - "with open(pickle_paths[idx], \"rb\") as f:\n", - " data = pickle.load(f)\n", - "data\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "for key in data.keys():\n", - " print(f\"{key}:\")\n", - " try:\n", - " for part_key, part_data in data[key].items():\n", - " if isinstance(part_data, (torch.Tensor, np.ndarray)):\n", - " print(f\" {part_key}:\")\n", - " print(f\" Tensor: shape: {list(part_data.shape)}, dtype: {part_data.dtype}\")\n", - " else:\n", - " print(f\" {part_key}: {type(part_data)} - {part_data}\")\n", - " print(f\" {type(part_data)} - {part_data}\")\n", - "\n", - " except:\n", - " print(f\" {data[key]}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "\"\"\"\n", - "map_save:\n", - " traj_pos:\n", - " Tensor: shape: [3013, 3, 2], dtype: torch.float32\n", - " traj_theta:\n", - " Tensor: shape: [3013], dtype: torch.float32\n", - "pt_token:\n", - " type:\n", - " Tensor: shape: [3013], dtype: torch.uint8\n", - " pl_type:\n", - " Tensor: shape: [3013], dtype: torch.uint8\n", - " light_type:\n", - " Tensor: shape: [3013], dtype: torch.uint8\n", - " num_nodes: - 3013\n", - " - 3013\n", - "agent:\n", - " num_nodes: - 48\n", - " - 48\n", - " valid_mask:\n", - " Tensor: shape: [48, 91], dtype: torch.bool\n", - " role:\n", - " Tensor: shape: [48, 3], dtype: torch.bool\n", - " id:\n", - " Tensor: shape: [48], dtype: torch.int64\n", - " type:\n", - " Tensor: shape: [48], dtype: torch.uint8\n", - " position:\n", - " Tensor: shape: [48, 91, 3], dtype: torch.float32\n", - " heading:\n", - " Tensor: shape: [48, 91], dtype: torch.float32\n", - " velocity:\n", - " Tensor: shape: [48, 91, 2], dtype: torch.float32\n", - " shape:\n", - " Tensor: shape: [48, 3], dtype: torch.float32\n", - "scenario_id:\n", - " 5e1ba6c841ae6ccd\n", - "\"\"\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# 1. map_save:\n", - "# traj_pos:\n", - "# Tensor: shape: [3013, 3, 2], dtype: torch.float32\n", - "# traj_theta:\n", - "# Tensor: shape: [3013], dtype: torch.float32\n", - "\n", - "\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - "traj_pos = data['map_save']['traj_pos']\n", - "for i in range(traj_pos.shape[0], ):\n", - " ax.plot(traj_pos[i, :, 0], traj_pos[i, :, 1])\n", - "\n", - "ax.set_title(\"map_save/traj_pos\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "distance = np.linalg.norm(traj_pos[:, :-1] - traj_pos[:, 1:], axis=-1)\n", - "\n", - "# min_x, min_y = np.min(traj_pos[:, :, 0]), np.min(traj_pos[:, :, 1])\n", - "# max_x, max_y = np.max(traj_pos[:, :, 0]), np.max(traj_pos[:, :, 1])\n", - "# print(np.abs(min_y-max_y), np.abs(min_x-max_x))\n", - "\n", - "\n", - "plt.hist(distance)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "# pt_token:\n", - "# type:\n", - "# Tensor: shape: [3013], dtype: torch.uint8\n", - "# pl_type:\n", - "# Tensor: shape: [3013], dtype: torch.uint8\n", - "# light_type:\n", - "# Tensor: shape: [3013], dtype: torch.uint8\n", - "# num_nodes: - 3013\n", - "# - 3013\n", - "\n", - "from d123.common.visualization.matplotlib.utils import add_non_repeating_legend_to_ax\n", - "\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "traj_pos = data[\"map_save\"][\"traj_pos\"]\n", - "type = data[\"pt_token\"][\"type\"]\n", - "for i in range(traj_pos.shape[0]):\n", - " ax.plot(traj_pos[i, :, 0], traj_pos[i, :, 1], color=TAB_10[type[i] % len(TAB_10)].hex, label=f\"type: {type[i]}\")\n", - "ax.set_title(f\"map_save/traj_pos with type {set(type.tolist())}\")\n", - "add_non_repeating_legend_to_ax(ax)\n", - "plt.show()\n", - "\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "traj_pos = data[\"map_save\"][\"traj_pos\"]\n", - "pl_type = data[\"pt_token\"][\"pl_type\"]\n", - "for i in range(traj_pos.shape[0]):\n", - " ax.plot(traj_pos[i, :, 0], traj_pos[i, :, 1], color=TAB_10[pl_type[i] % len(TAB_10)].hex, label=f\"pl_type: {pl_type[i]}\")\n", - "ax.set_title(f\"map_save/traj_pos with pl_type {set(pl_type.tolist())}\")\n", - "add_non_repeating_legend_to_ax(ax)\n", - "plt.show()\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "traj_pos = data[\"map_save\"][\"traj_pos\"]\n", - "light_type = data[\"pt_token\"][\"light_type\"]\n", - "for i in range(traj_pos.shape[0]):\n", - " ax.plot(traj_pos[i, :, 0], traj_pos[i, :, 1], color=TAB_10[light_type[i] % len(TAB_10)].hex, label=f\"light_type: {light_type[i]}\")\n", - "ax.set_title(f\"map_save/traj_pos with light_type {set(light_type.tolist())}\")\n", - "add_non_repeating_legend_to_ax(ax)\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "# agent:\n", - "# num_nodes: - 48\n", - "# - 48\n", - "# valid_mask:\n", - "# Tensor: shape: [48, 91], dtype: torch.bool\n", - "# role:\n", - "# Tensor: shape: [48, 3], dtype: torch.bool\n", - "# id:\n", - "# Tensor: shape: [48], dtype: torch.int64\n", - "# type:\n", - "# Tensor: shape: [48], dtype: torch.uint8\n", - "# position:\n", - "# Tensor: shape: [48, 91, 3], dtype: torch.float32\n", - "# heading:\n", - "# Tensor: shape: [48, 91], dtype: torch.float32\n", - "# velocity:\n", - "# Tensor: shape: [48, 91, 2], dtype: torch.float32\n", - "# shape:\n", - "# Tensor: shape: [48, 3], dtype: torch.float32\n", - "\n", - "num_nodes = data[\"agent\"][\"num_nodes\"]\n", - "valid_mask = data[\"agent\"][\"valid_mask\"]\n", - "role = data[\"agent\"][\"role\"].argmax(axis=-1)\n", - "id = data[\"agent\"][\"id\"]\n", - "type = data[\"agent\"][\"type\"]\n", - "position = data[\"agent\"][\"position\"]\n", - "heading = data[\"agent\"][\"heading\"]\n", - "velocity = data[\"agent\"][\"velocity\"]\n", - "shape = data[\"agent\"][\"shape\"]\n", - "\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "for i in range(num_nodes):\n", - " if type[i] == 1:\n", - " continue\n", - "\n", - " position_mask = valid_mask[i]\n", - " ax.plot(\n", - " position[i, position_mask, 0],\n", - " position[i, position_mask, 1],\n", - " # color=TAB_10[type[i] % len(TAB_10)].hex,\n", - " label=f\"type: {type[i]}, id: {id[i]}, role: {role[i]}\",\n", - " )\n", - "\n", - "# ax.legend()\n", - "ax.set_aspect('equal', adjustable='box')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "asim", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.21" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/smarty/smart_tokens.ipynb b/notebooks/smarty/smart_tokens.ipynb deleted file mode 100644 index c2ff3060..00000000 --- a/notebooks/smarty/smart_tokens.ipynb +++ /dev/null @@ -1,172 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.training.models.sim_agent.smart.smart import SMART\n", - "from d123.training.models.sim_agent.smart.smart_config import SMARTConfig" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "config = SMARTConfig()\n", - "SMART(config)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "import pickle\n", - "\n", - "path = \"/home/daniel/d123_workspace/d123/d123/training/models/sim_agent/smart/tokens/cluster_frame_5_2048.pkl\"\n", - "\n", - "with open(path, \"rb\") as f:\n", - " data = pickle.load(f)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# dict_keys(['token', 'traj', 'token_all'])\n", - "data.keys()\n", - "\n", - "# dict_keys(['veh', 'ped', 'cyc'])\n", - "data[\"token_all\"][\"veh\"].shape\n", - "\n", - "vehicle_tokens = data[\"token_all\"][\"veh\"]\n", - "vehicle_tokens.shape # (2048, 6, 4, 2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "from d123.common.visualization.color.color import TAB_10\n", - "fix, ax = plt.subplots(1,1,figsize=(10, 5))\n", - "\n", - "\n", - "\n", - "for token_idx, token in enumerate(vehicle_tokens[:3]):\n", - " for timestep in token:\n", - " ax.plot(timestep[:, 1], timestep[:, 0], marker='o', markersize=2, linestyle='-', color=TAB_10[token_idx % len(TAB_10)].hex)\n", - "\n", - " \n", - "\n", - "\n", - "ax.set_aspect('equal', adjustable='box')\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "import pickle\n", - "\n", - "path = \"/home/daniel/d123_workspace/d123/d123/training/models/sim_agent/smart/tokens/map_traj_token5.pkl\"\n", - "\n", - "with open(path, \"rb\") as f:\n", - " data = pickle.load(f)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "data.keys() # 'traj_src', 'sample_pt'\n", - "\n", - "import numpy as np \n", - "\n", - "data[\"traj_src\"].shape\n", - "\n", - "\n", - "fix, ax = plt.subplots(1, 1, figsize=(10, 5))\n", - "\n", - "\n", - "traj_tokens = data[\"traj_src\"][np.random.choice(len(data[\"traj_src\"]), size=50, replace=False)]\n", - "\n", - "for token_idx, token in enumerate(traj_tokens):\n", - " # for timestep in token:\n", - " ax.plot(\n", - " token[:, 1], token[:, 0], marker=\"o\", markersize=2, linestyle=\"-\", color=TAB_10[token_idx % len(TAB_10)].hex\n", - " )\n", - "ax.set_aspect(\"equal\", adjustable=\"box\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "data[\"traj_src\"].shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "asim", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.21" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From 81065d721b7c8ff7495790c92bea32c753a162d8 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 11 Oct 2025 17:59:05 +0200 Subject: [PATCH 071/145] Delete more notebooks from simulation and gym code. (#54) --- .../deprecated/collect_sim_metrics_gym.ipynb | 154 ------- notebooks/gym/test_gym.ipynb | 300 ------------- notebooks/gym/test_simulation_2d.ipynb | 421 ------------------ 3 files changed, 875 deletions(-) delete mode 100644 notebooks/deprecated/collect_sim_metrics_gym.ipynb delete mode 100644 notebooks/gym/test_gym.ipynb delete mode 100644 notebooks/gym/test_simulation_2d.ipynb diff --git a/notebooks/deprecated/collect_sim_metrics_gym.ipynb b/notebooks/deprecated/collect_sim_metrics_gym.ipynb deleted file mode 100644 index 40100cd6..00000000 --- a/notebooks/deprecated/collect_sim_metrics_gym.ipynb +++ /dev/null @@ -1,154 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", - "from d123.common.multithreading.worker_sequential import Sequential\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "import os, psutil\n", - "import numpy as np\n", - "\n", - "def print_memory_usage():\n", - " process = psutil.Process(os.getpid())\n", - " memory_info = process.memory_info()\n", - " print(f\"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB\")\n", - "\n", - "\n", - "num_scenes = 100\n", - "split = \"nuplan_mini_test\"\n", - "\n", - "# log_names = [\"2021.06.07.12.54.00_veh-35_01843_02314\"]\n", - "scene_tokens = None\n", - "# scene_tokens = [\"1b48bd8c60b3790a\"]\n", - "log_names = None\n", - "\n", - "scene_filter = SceneFilter(split_names=[split], log_names=log_names, scene_tokens=scene_tokens, duration_s=8.1, history_s=1.0)\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "np.random.seed(42)\n", - "scenes = list(np.random.choice(scenes, size=num_scenes, replace=False))\n", - "\n", - "# scenes = [scene for scene in scenes if scene.token ==\"be74699d84ec5662\"]\n", - "\n", - "experiment_name = None\n", - "print([scene.token for scene in scenes])\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "import trace\n", - "import traceback\n", - "from tqdm import tqdm\n", - "from d123.dataset.arrow.conversion import BoxDetectionWrapper, DetectionType\n", - "from d123.simulation.gym.demo_gym_env import DemoGymEnv\n", - "from d123.simulation.observation.agents_observation import _filter_agents_by_type\n", - "\n", - "from typing import List\n", - "\n", - "from d123.simulation.metrics.sim_agents.sim_agents import get_sim_agents_metrics\n", - "\n", - "from d123.dataset.arrow.conversion import BoxDetection, DetectionType\n", - "from d123.dataset.recording.detection.detection import BoxDetectionWrapper\n", - "\n", - "import time\n", - "\n", - "import pandas as pd\n", - "\n", - "action = [1.0, 0.1] # Placeholder action, replace with actual action logic\n", - "env = DemoGymEnv(scenes)\n", - "\n", - "results = []\n", - "\n", - "for scene in tqdm(scenes):\n", - " try:\n", - "\n", - " agent_rollouts = []\n", - "\n", - " map_api, ego_state, detection_observation, current_scene = env.reset(scene)\n", - " initial_ego_state = ego_state\n", - " agent_rollouts.append(detection_observation.box_detections)\n", - "\n", - " result = {}\n", - " result[\"token\"] = scene.token\n", - " for i in range(150):\n", - " ego_state, detection_observation, end = env.step(action)\n", - " agent_rollouts.append(detection_observation.box_detections)\n", - " if end:\n", - " break\n", - " result.update(get_sim_agents_metrics(current_scene, agent_rollouts))\n", - " results.append(result)\n", - " except Exception as e:\n", - " print(current_scene.token)\n", - " traceback.print_exc()\n", - " continue\n", - "\n", - "\n", - "\n", - "df = pd.DataFrame(results)\n", - "avg_row = df.drop(columns=['token']).mean(numeric_only=True)\n", - "avg_row['token'] = 'average'\n", - "df = pd.concat([df, pd.DataFrame([avg_row])], ignore_index=True)\n", - "df.to_csv(\"test.csv\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/gym/test_gym.ipynb b/notebooks/gym/test_gym.ipynb deleted file mode 100644 index a9e89681..00000000 --- a/notebooks/gym/test_gym.ipynb +++ /dev/null @@ -1,300 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", - "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n", - "# from d123.common.multithreading.worker_ray import RayDistributed" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "import os, psutil\n", - "\n", - "\n", - "def print_memory_usage():\n", - " process = psutil.Process(os.getpid())\n", - " memory_info = process.memory_info()\n", - " print(f\"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB\")\n", - "\n", - "\n", - "split = \"nuplan_mini_val\"\n", - "\n", - "# log_names = [\"2021.06.07.12.54.00_veh-35_01843_02314\"]\n", - "scene_tokens = None\n", - "# scene_tokens = [\"2283aea39bc1505e\"]\n", - "log_names = None\n", - "\n", - "scene_filter = SceneFilter(\n", - " split_names=[split], log_names=log_names, scene_tokens=scene_tokens, duration_s=15.1, history_s=1.0\n", - ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(len(scenes))\n", - "\n", - "for scene in scenes[:10]:\n", - " print(scene.log_name, scene.token)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from typing import Optional, Tuple\n", - "\n", - "import matplotlib.animation as animation\n", - "import matplotlib.pyplot as plt\n", - "from tqdm import tqdm\n", - "\n", - "from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2\n", - "from d123.geometry.base import Point2D, StateSE2\n", - "from d123.geometry.bounding_box import BoundingBoxSE2\n", - "from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG\n", - "from d123.common.visualization.matplotlib.observation import (\n", - " add_bounding_box_to_ax,\n", - " add_box_detections_to_ax,\n", - " add_default_map_on_ax,\n", - " add_traffic_lights_to_ax,\n", - " add_ego_vehicle_to_ax,\n", - ")\n", - "from d123.dataset.arrow.conversion import TrafficLightDetectionWrapper\n", - "from d123.dataset.maps.abstract_map import AbstractMap\n", - "d123.datatypes.detections.detection import BoxDetectionWrapper\n", - "from d123.dataset.scene.abstract_scene import AbstractScene\n", - "import io\n", - "from PIL import Image\n", - "\n", - "\n", - "\n", - "def _plot_scene_on_ax(\n", - " ax: plt.Axes,\n", - " map_api: AbstractMap,\n", - " ego_state: EgoStateSE2,\n", - " initial_ego_state: Optional[EgoStateSE2],\n", - " box_detections: BoxDetectionWrapper,\n", - " traffic_light_detections: TrafficLightDetectionWrapper,\n", - " radius: float = 120,\n", - ") -> plt.Axes:\n", - "\n", - " if initial_ego_state is not None:\n", - " point_2d = initial_ego_state.center.point_2d\n", - " else:\n", - " point_2d = ego_state.center.point_2d\n", - " add_default_map_on_ax(ax, map_api, point_2d, radius=radius)\n", - " add_traffic_lights_to_ax(ax, traffic_light_detections, map_api)\n", - "\n", - " add_box_detections_to_ax(ax, box_detections)\n", - " add_ego_vehicle_to_ax(ax, ego_state)\n", - "\n", - " ax.set_xlim(point_2d.x - radius, point_2d.x + radius)\n", - " ax.set_ylim(point_2d.y - radius, point_2d.y + radius)\n", - "\n", - " ax.set_aspect(\"equal\", adjustable=\"box\")\n", - " return ax\n", - "\n", - "\n", - "def plot_scene_to_image(\n", - " map_api: AbstractMap,\n", - " ego_state: EgoStateSE2,\n", - " initial_ego_state: Optional[EgoStateSE2],\n", - " box_detections: BoxDetectionWrapper,\n", - " traffic_light_detections: TrafficLightDetectionWrapper,\n", - " radius: float = 120,\n", - " figsize: Tuple[int, int] = (8, 8),\n", - ") -> Image:\n", - "\n", - " fig, ax = plt.subplots(figsize=figsize)\n", - " _plot_scene_on_ax(ax, map_api, ego_state, initial_ego_state, box_detections, traffic_light_detections, radius)\n", - " ax.set_aspect(\"equal\", adjustable=\"box\")\n", - " plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)\n", - " # plt.tight_layout()\n", - "\n", - " buf = io.BytesIO()\n", - " fig.savefig(buf, format=\"png\", bbox_inches=\"tight\")\n", - " plt.close(fig)\n", - " buf.seek(0)\n", - " img = Image.open(buf)\n", - " return img" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.dataset.arrow.conversion import DetectionType\n", - "from d123.simulation.gym.gym_env import GymEnvironment\n", - "from d123.simulation.observation.agents_observation import _filter_agents_by_type\n", - "\n", - "import time\n", - "\n", - "images = []\n", - "agent_rollouts = []\n", - "plot: bool = True\n", - "action = [1.0, -0.0] # Placeholder action, replace with actual action logic\n", - "env = GymEnvironment(scenes)\n", - "\n", - "start = time.time()\n", - "\n", - "map_api, ego_state, detection_observation, current_scene = env.reset(None)\n", - "initial_ego_state = ego_state\n", - "cars, _, _ = _filter_agents_by_type(detection_observation.box_detections, detection_types=[DetectionType.VEHICLE])\n", - "agent_rollouts.append(BoxDetectionWrapper(cars))\n", - "if plot:\n", - " images.append(\n", - " plot_scene_to_image(\n", - " map_api,\n", - " ego_state,\n", - " initial_ego_state,\n", - " detection_observation.box_detections,\n", - " detection_observation.traffic_light_detections,\n", - " )\n", - " )\n", - "\n", - "\n", - "for i in range(160):\n", - " ego_state, detection_observation, end = env.step(action)\n", - " cars, _, _ = _filter_agents_by_type(detection_observation.box_detections, detection_types=[DetectionType.VEHICLE])\n", - " agent_rollouts.append(BoxDetectionWrapper(cars))\n", - " if plot:\n", - " images.append(\n", - " plot_scene_to_image(\n", - " map_api,\n", - " ego_state,\n", - " initial_ego_state,\n", - " detection_observation.box_detections,\n", - " detection_observation.traffic_light_detections,\n", - " )\n", - " )\n", - " if end:\n", - " print(\"End of scene reached.\")\n", - " break\n", - "\n", - "time_s = time.time() - start\n", - "print(time_s)\n", - "print(151/ time_s)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "151/46.239747524261475" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "\n", - "def create_gif(images, output_path, duration=100):\n", - " \"\"\"\n", - " Create a GIF from a list of PIL images.\n", - "\n", - " Args:\n", - " images (list): List of PIL.Image objects.\n", - " output_path (str): Path to save the GIF.\n", - " duration (int): Duration between frames in milliseconds.\n", - " \"\"\"\n", - " if images:\n", - " print(len(images))\n", - " images_p = [img.convert(\"P\", palette=Image.ADAPTIVE) for img in images]\n", - " images_p[0].save(output_path, save_all=True, append_images=images_p[1:], duration=duration, loop=0)\n", - "\n", - "\n", - "if plot:\n", - " create_gif(images, f\"{split}_{current_scene.token}.gif\", duration=20)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import List\n", - "\n", - "from d123.simulation.metrics.sim_agents.sim_agents import get_sim_agents_metrics\n", - "\n", - "\n", - "\n", - "\n", - "result = get_sim_agents_metrics(current_scene, agent_rollouts)\n", - "result\n", - "\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/gym/test_simulation_2d.ipynb b/notebooks/gym/test_simulation_2d.ipynb deleted file mode 100644 index d6eadc86..00000000 --- a/notebooks/gym/test_simulation_2d.ipynb +++ /dev/null @@ -1,421 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", - "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n", - "# from d123.common.multithreading.worker_ray import RayDistributed" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "split = \"nuplan_mini_val\"\n", - "\n", - "log_names = None\n", - "scene_tokens = None\n", - "\n", - "# log_names = [\"2021.06.07.12.54.00_veh-35_01843_02314\"]\n", - "# scene_tokens = [\"2283aea39bc1505e\"]\n", - "\n", - "scene_filter = SceneFilter(\n", - " split_names=[split],\n", - " log_names=log_names,\n", - " scene_tokens=scene_tokens,\n", - " duration_s=30.1,\n", - " history_s=1.0,\n", - " timestamp_threshold_s=10,\n", - ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(f\"Number of scenes: {len(scenes)}\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.simulation.controller.action_controller import ActionController\n", - "from d123.simulation.controller.motion_model.kinematic_bicycle_model import KinematicBicycleModel\n", - "from d123.simulation.observation.log_replay_observation import LogReplayObservation\n", - "from d123.simulation.simulation_2d_setup import Simulation2DSetup\n", - "from d123.simulation.simulation_2d import Simulation2D\n", - "\n", - "\n", - "from d123.simulation.time_controller.log_time_controller import LogTimeController\n", - "\n", - "\n", - "def get_simulation_2d_setup():\n", - " return Simulation2DSetup(\n", - " time_controller=LogTimeController(),\n", - " observations=LogReplayObservation(),\n", - " ego_controller=ActionController(KinematicBicycleModel()),\n", - " )\n", - "\n", - "\n", - "simulation_2d_setup = get_simulation_2d_setup()\n", - "simulation2d = Simulation2D(simulation_2d_setup)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# reset\n", - "import torch\n", - "from d123.simulation.gym.environment.output_converter.action_output_converter import ActionOutputConverter\n", - "from d123.simulation.gym.policy.ppo.ppo_config import GlobalConfig\n", - "from d123.simulation.gym.policy.ppo.ppo_model import PPOPolicy\n", - "\n", - "\n", - "import numpy as np\n", - "import numpy.typing as npt\n", - "\n", - "\n", - "from d123.simulation.gym.environment.gym_observation.raster.raster_gym_observation import RasterGymObservation\n", - "from d123.simulation.gym.environment.gym_observation.raster.raster_renderer import RasterRenderer\n", - "from d123.simulation.gym.environment.helper.environment_area import RectangleEnvironmentArea\n", - "from d123.simulation.planning.abstract_planner import PlannerInitialization, PlannerInput\n", - "from d123.simulation.planning.planner_output.action_planner_output import ActionPlannerOutput\n", - "\n", - "\n", - "environment_area = RectangleEnvironmentArea()\n", - "gym_observation = RasterGymObservation(environment_area, RasterRenderer(environment_area), inference=True)\n", - "gym_observation.reset()\n", - "\n", - "checkpoint_path = \"/home/daniel/carl_workspace/carl_nuplan/checkpoints/nuplan_51892_1B/model_best.pth\"\n", - "config = GlobalConfig()\n", - "output_converter = ActionOutputConverter()\n", - "\n", - "device = \"cpu\"\n", - "\n", - "agent = PPOPolicy(\n", - " gym_observation.get_observation_space(),\n", - " output_converter.get_action_space(),\n", - " config=config,\n", - ").to(device)\n", - "\n", - "state_dict = torch.load(checkpoint_path, map_location=device)\n", - "agent.load_state_dict(state_dict, strict=True)\n", - "agent.to(device)\n", - "agent.eval()\n", - "\n", - "\n", - "def forward_agent(\n", - " planner_initialization: PlannerInitialization,\n", - " planner_input: PlannerInput,\n", - " last_action: npt.NDArray[np.float32],\n", - ") -> npt.NDArray[np.float32]:\n", - " info = {\"last_action\": last_action}\n", - " obs = gym_observation.get_gym_observation(planner_input, planner_initialization, info)\n", - "\n", - " obs_tensor = {\n", - " \"bev_semantics\": torch.Tensor(obs[\"bev_semantics\"][None, ...]).to(device, dtype=torch.float32),\n", - " \"measurements\": torch.Tensor(obs[\"measurements\"][None, ...]).to(device, dtype=torch.float32),\n", - " \"value_measurements\": torch.Tensor(obs[\"value_measurements\"][None, ...]).to(device, dtype=torch.float32),\n", - " }\n", - "\n", - " with torch.no_grad():\n", - " (action, _, _, _, _, _, _, _, _, _, _) = agent.forward(\n", - " obs_tensor, deterministic=True, lstm_state=None, done=None\n", - " )\n", - " action = action.squeeze().detach().cpu().numpy()\n", - " print(action)\n", - " return action\n", - "\n", - "\n", - "last_action = [0.0, 0.0] # placeholder\n", - "rasters = []\n", - "\n", - "idx = 1\n", - "planner_initialization, current_planner_input = simulation2d.reset(scenes[idx])\n", - "\n", - "\n", - "\n", - "def _get_action(planner_input: PlannerInput) -> ActionPlannerOutput:\n", - " ego_state, _ = planner_input.history.current_state\n", - " return ActionPlannerOutput(0.5, 0.0, ego_state)\n", - "\n", - "\n", - "while simulation2d.is_simulation_running():\n", - "\n", - " # 1. trigger planner\n", - " # planner_output = _get_action(current_planner_input)\n", - "\n", - " last_action = forward_agent(planner_initialization, current_planner_input, last_action)\n", - " planner_output = output_converter.get_planner_output(\n", - " action=last_action,\n", - " ego_state=current_planner_input.history.current_state[0],\n", - " info={},\n", - " )\n", - " print(planner_output._acceleration, planner_output._steering_rate)\n", - "\n", - " # 2. step simulation\n", - " current_planner_input = simulation2d.step(planner_output)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "initial_ego_state = simulation2d.history.data[0]\n", - "# iteration\n", - "# ego_state\n", - "# planner_output\n", - "# detections\n", - "\n", - "len(simulation2d.history.data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from typing import List, Union\n", - "\n", - "import numpy as np\n", - "import numpy.typing as npt\n", - "from PIL import Image\n", - "\n", - "\n", - "from d123.common.visualization.color.color import (\n", - " BLACK,\n", - " DARK_GREY,\n", - " DARKER_GREY,\n", - " ELLIS_5,\n", - " LIGHT_GREY,\n", - " NEW_TAB_10,\n", - ")\n", - "\n", - "\n", - "def numpy_images_to_gif(\n", - " numpy_images: List[npt.NDArray[np.uint8]], gif_path: Union[str, Path], duration: int = 50\n", - ") -> None:\n", - " \"\"\"\n", - " Helper function to convert images into a GIF file.\n", - " :param numpy_images: list of images as uint8 numpy arrays.\n", - " :param gif_path: outout path for the GIF file.\n", - " :param duration: duration between frames (TODO: check), defaults to 50\n", - " \"\"\"\n", - " pil_images = [Image.fromarray(img) for img in numpy_images]\n", - " pil_images[0].save(gif_path, save_all=True, append_images=pil_images[1:], duration=duration, loop=0)\n", - "\n", - "\n", - "def image_to_rbg(image: npt.NDArray[np.uint8]) -> npt.NDArray[np.uint8]:\n", - " \"\"\"\n", - " Helper function to convert an observation image to RGB format.\n", - " :param image: _description_\n", - " :return: _description_\n", - " \"\"\"\n", - " _, width, height = image.shape\n", - " rgb_image = np.zeros((width, height, 3), dtype=np.uint8)\n", - " rgb_image.fill(255)\n", - " # drivable area\n", - " rgb_image[image[0] > 0] = LIGHT_GREY.rgb\n", - " rgb_image[image[1] > 0] = DARK_GREY.rgb\n", - " rgb_image[image[2] > 0] = BLACK.rgb\n", - " rgb_image[image[5] > 0] = DARKER_GREY.rgb\n", - "\n", - " rgb_image[image[3] == 80] = NEW_TAB_10[4].rgb\n", - " rgb_image[image[3] == 255] = NEW_TAB_10[2].rgb\n", - " # rgb_image[image[4] > 0] = ELLIS_5[1].rgb\n", - " rgb_image[image[6] > 0] = ELLIS_5[4].rgb\n", - " rgb_image[image[7] > 0] = NEW_TAB_10[6].rgb\n", - " rgb_image[image[8] > 0] = ELLIS_5[0].rgb\n", - "\n", - " rgb_image = np.rot90(rgb_image[::-1])\n", - " return rgb_image\n", - "\n", - "\n", - "# numpy_images_to_gif(\n", - "# [image_to_rbg(raster[\"bev_semantics\"]) for raster in rasters],\n", - "# gif_path=\"/home/daniel/d123_workspace/d123/notebooks/simulation_2d.gif\",\n", - "# duration=100,\n", - "# )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from typing import Optional, Tuple, Union\n", - "\n", - "import matplotlib.animation as animation\n", - "import matplotlib.pyplot as plt\n", - "from tqdm import tqdm\n", - "\n", - "from d123.common.visualization.matplotlib.observation import (\n", - " add_box_detections_to_ax,\n", - " add_default_map_on_ax,\n", - " add_ego_vehicle_to_ax,\n", - " add_traffic_lights_to_ax,\n", - ")\n", - "from d123.dataset.scene.abstract_scene import AbstractScene\n", - "from d123.simulation.history.simulation_history import Simulation2DHistory, Simulation2DHistorySample\n", - "from d123.simulation.planning.abstract_planner import PlannerInitialization\n", - "\n", - "\n", - "def _plot_simulation_history_sample_on_ax(\n", - " ax: plt.Axes,\n", - " simulation_history: Simulation2DHistory,\n", - " iteration: int = 0,\n", - " radius: float = 80,\n", - ") -> plt.Axes:\n", - "\n", - " sample = simulation_history.data[iteration]\n", - " map_api = simulation_history.scene.get_map_api()\n", - "\n", - " ego_state = sample.ego_state\n", - " # planner_output = sample.planner_output\n", - " detections = sample.detections\n", - "\n", - " point_2d = ego_state.center.point_2d\n", - " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", - " add_traffic_lights_to_ax(ax, detections.traffic_light_detections, map_api)\n", - "\n", - " add_box_detections_to_ax(ax, detections.box_detections)\n", - " add_ego_vehicle_to_ax(ax, ego_state)\n", - "\n", - " ax.set_xlim(point_2d.x - radius, point_2d.x + radius)\n", - " ax.set_ylim(point_2d.y - radius, point_2d.y + radius)\n", - "\n", - " ax.set_aspect(\"equal\", adjustable=\"box\")\n", - " ax.set_title(f\"Iteration {iteration}\")\n", - " return ax\n", - "\n", - "\n", - "def plot_simulation_history_at_iteration(\n", - " simulation_history: Simulation2DHistory,\n", - " iteration: int = 0,\n", - " radius: float = 80,\n", - ") -> Tuple[plt.Figure, plt.Axes]:\n", - "\n", - " fig, ax = plt.subplots(figsize=(10, 10))\n", - " _plot_simulation_history_sample_on_ax(\n", - " ax,\n", - " simulation_history,\n", - " iteration,\n", - " radius,\n", - " )\n", - " return fig, ax\n", - "\n", - "\n", - "def render_simulation_history_animation(\n", - " simulation_history: Simulation2DHistory,\n", - " output_path: Union[str, Path],\n", - " start_idx: int = 0,\n", - " end_idx: Optional[int] = None,\n", - " step: int = 10,\n", - " fps: float = 20.0,\n", - " dpi: int = 300,\n", - " format: str = \"mp4\",\n", - " radius: float = 100,\n", - ") -> None:\n", - " assert format in [\"mp4\", \"gif\"], \"Format must be either 'mp4' or 'gif'.\"\n", - " output_path = Path(output_path)\n", - " output_path.mkdir(parents=True, exist_ok=True)\n", - "\n", - " simulation_history.scene.open()\n", - " end_idx = len(simulation_history)\n", - "\n", - " fig, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - " def update(i):\n", - " ax.clear()\n", - " _plot_simulation_history_sample_on_ax(ax, simulation_history, i, radius)\n", - " plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)\n", - " pbar.update(1)\n", - "\n", - " frames = list(range(start_idx, end_idx, step))\n", - " pbar = tqdm(total=len(frames), desc=f\"Rendering {simulation_history.scene.log_name} as {format}\")\n", - " ani = animation.FuncAnimation(fig, update, frames=frames, repeat=False)\n", - "\n", - " ani.save(\n", - " output_path / f\"{simulation_history.scene.log_name}_{simulation_history.scene.token}.{format}\",\n", - " writer=\"ffmpeg\",\n", - " fps=fps,\n", - " dpi=dpi,\n", - " )\n", - " plt.close(fig)\n", - " simulation_history.scene.close()\n", - "\n", - "\n", - "render_simulation_history_animation(\n", - " simulation_history=simulation2d.history,\n", - " output_path=Path(\"/home/daniel/d123_workspace/d123/notebooks/animations\"),\n", - " format=\"mp4\",\n", - " step=1,\n", - " fps=20.0,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From 8e5b97d03815449cfd278151e044a9c2e2332f7b Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 11 Oct 2025 21:01:13 +0200 Subject: [PATCH 072/145] Replace token in table by uuid (#55). Refactor data conversion for waymo open perception (#51). --- d123/common/utils/uuid.py | 25 + d123/conversion/dataset_converter_config.py | 2 +- .../datasets/av2/av2_sensor_converter.py | 22 +- .../datasets/nuplan/nuplan_converter.py | 5 +- .../datasets/nuplan/nuplan_map_conversion.py | 2 +- .../nuplan/{ => utils}/nuplan_constants.py | 0 .../nuplan/utils/nuplan_sql_helper.py | 2 +- d123/conversion/datasets/wopd/__init__.py | 0 .../datasets/wopd/utils/wopd_constants.py | 39 ++ .../wopd/waymo_map_utils/wopd_map_utils.py | 2 +- .../datasets/wopd/wopd_converter.py | 408 +++++++++++++++ .../datasets/wopd/wopd_data_converter.py | 475 ------------------ .../log_writer/abstract_log_writer.py | 1 - .../conversion/log_writer/arrow_log_writer.py | 12 +- .../utils/sensor_utils/camera_conventions.py | 2 + d123/datatypes/scene/abstract_scene.py | 2 +- .../scene/arrow/arrow_scene_builder.py | 12 +- .../scene/arrow/utils/arrow_getters.py | 7 +- d123/datatypes/scene/scene_filter.py | 2 +- d123/datatypes/scene/scene_metadata.py | 2 +- d123/geometry/se.py | 2 +- d123/script/builders/scene_filter_builder.py | 4 +- .../common/scene_filter/all_scenes.yaml | 2 +- .../common/scene_filter/log_scenes.yaml | 2 +- .../scene_filter/nuplan_mini_train.yaml | 2 +- .../common/scene_filter/nuplan_mini_val.yaml | 2 +- .../common/scene_filter/nuplan_sim_agent.yaml | 2 +- .../common/scene_filter/viser_scenes.yaml | 2 +- .../conversion/datasets/wopd_dataset.yaml | 8 +- .../config/conversion/default_conversion.yaml | 4 +- d123/script/run_conversion.py | 15 +- notebooks/deprecated/extraction_testing.ipynb | 14 +- notebooks/deprecated/test_scene_builder.ipynb | 4 +- notebooks/scene_rendering.ipynb | 6 +- notebooks/scene_sensor_loading.ipynb | 4 +- notebooks/viz/bev_matplotlib.ipynb | 4 +- notebooks/viz/bev_matplotlib_prediction.ipynb | 4 +- notebooks/viz/camera_matplotlib.ipynb | 4 +- notebooks/viz/video_example.ipynb | 4 +- notebooks/viz/viser_testing_v2_scene.ipynb | 4 +- notebooks/waymo_perception/map_testing.ipynb | 2 +- notebooks/waymo_perception/testing.ipynb | 3 +- test_viser.py | 10 +- 43 files changed, 559 insertions(+), 571 deletions(-) create mode 100644 d123/common/utils/uuid.py rename d123/conversion/datasets/nuplan/{ => utils}/nuplan_constants.py (100%) create mode 100644 d123/conversion/datasets/wopd/__init__.py create mode 100644 d123/conversion/datasets/wopd/utils/wopd_constants.py create mode 100644 d123/conversion/datasets/wopd/wopd_converter.py delete mode 100644 d123/conversion/datasets/wopd/wopd_data_converter.py diff --git a/d123/common/utils/uuid.py b/d123/common/utils/uuid.py new file mode 100644 index 00000000..d4d2678a --- /dev/null +++ b/d123/common/utils/uuid.py @@ -0,0 +1,25 @@ +import uuid +from typing import Final + +# Fixed namespace UUID for all UUIDs generated by 123D, do not change! +UUID_NAMESPACE_123D: Final[uuid.UUID] = uuid.UUID("123D123D-123D-123D-123D-123D123D123D") + + +def create_deterministic_uuid(split: str, log_name: str, timestamp_us: int, misc: str = None) -> uuid.UUID: + """Create a universally unique identifier (UUID) based on identifying fields. + + :param split: The data split (in the format {dataset_name}_{train, val, test}) + :param log_name: The name of the log without file extension + :param timestamp_us: The timestamp in microseconds + :param misc: Any additional information to include in the UUID, defaults to None + :return: The generated deterministic UUID + """ + # https://en.wikipedia.org/wiki/Universally_unique_identifier#Versions_3_and_5_(namespace_name-based) + + # Create a unique string from all identifying fields + unique_string = f"{split}:{log_name}:{timestamp_us}" + if misc: + unique_string += f":{misc}" + + # Generate UUIDv5 (SHA-1 based, deterministic) + return uuid.uuid5(UUID_NAMESPACE_123D, unique_string) diff --git a/d123/conversion/dataset_converter_config.py b/d123/conversion/dataset_converter_config.py index 018a9fd1..a55c65cf 100644 --- a/d123/conversion/dataset_converter_config.py +++ b/d123/conversion/dataset_converter_config.py @@ -8,7 +8,7 @@ @dataclass class DatasetConverterConfig: - output_path: Union[str, Path] + output_path: Union[str, Path] # TODO: Remove. The directory of writing should be handled by the log/map writer. force_log_conversion: bool = False force_map_conversion: bool = False diff --git a/d123/conversion/datasets/av2/av2_sensor_converter.py b/d123/conversion/datasets/av2/av2_sensor_converter.py index f3ed06af..0944fb6a 100644 --- a/d123/conversion/datasets/av2/av2_sensor_converter.py +++ b/d123/conversion/datasets/av2/av2_sensor_converter.py @@ -1,15 +1,11 @@ -import hashlib from pathlib import Path from typing import Dict, List, Optional, Tuple, Union import numpy as np import pandas as pd -from d123.conversion.abstract_dataset_converter import ( - AbstractDatasetConverter, - AbstractLogWriter, - DatasetConverterConfig, -) +from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter +from d123.conversion.dataset_converter_config import DatasetConverterConfig from d123.conversion.datasets.av2.av2_constants import ( AV2_CAMERA_TYPE_MAPPING, AV2_SENSOR_SPLITS, @@ -23,6 +19,7 @@ get_slice_with_timestamp_ns, ) from d123.conversion.datasets.av2.av2_map_conversion import convert_av2_map +from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata @@ -44,16 +41,6 @@ from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array -def create_token(input_data: str) -> str: - # TODO: Refactor this function. - # TODO: Add a general function to create tokens from arbitrary data. - if isinstance(input_data, str): - input_data = input_data.encode("utf-8") - - hash_obj = hashlib.sha256(input_data) - return hash_obj.hexdigest()[:16] - - class AV2SensorConverter(AbstractDatasetConverter): def __init__( self, @@ -133,9 +120,9 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: # 2. Prepare log writer overwrite_log = log_writer.reset(self.dataset_converter_config, log_metadata) + # 3. Process source log data if overwrite_log: - # 3. Process source log data sensor_df = build_sensor_dataframe(source_log_path) synchronization_df = build_synchronization_dataframe(sensor_df) @@ -157,7 +144,6 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: for lidar_timestamp_ns in lidar_timestamps_ns: ego_state = _extract_av2_sensor_ego_state(city_se3_egovehicle_df, lidar_timestamp_ns) log_writer.write( - token=create_token(str(lidar_timestamp_ns)), timestamp=TimePoint.from_ns(int(lidar_timestamp_ns)), ego_state=ego_state, box_detections=_extract_av2_sensor_box_detections(annotations_df, lidar_timestamp_ns, ego_state), diff --git a/d123/conversion/datasets/nuplan/nuplan_converter.py b/d123/conversion/datasets/nuplan/nuplan_converter.py index 4201dede..9be024d2 100644 --- a/d123/conversion/datasets/nuplan/nuplan_converter.py +++ b/d123/conversion/datasets/nuplan/nuplan_converter.py @@ -11,14 +11,14 @@ from d123.common.utils.dependencies import check_dependencies from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter from d123.conversion.dataset_converter_config import DatasetConverterConfig -from d123.conversion.datasets.nuplan.nuplan_constants import ( +from d123.conversion.datasets.nuplan.nuplan_map_conversion import NuPlanMapConverter +from d123.conversion.datasets.nuplan.utils.nuplan_constants import ( NUPLAN_DATA_SPLITS, NUPLAN_DEFAULT_DT, NUPLAN_MAP_LOCATIONS, NUPLAN_ROLLING_SHUTTER_S, NUPLAN_TRAFFIC_STATUS_DICT, ) -from d123.conversion.datasets.nuplan.nuplan_map_conversion import NuPlanMapConverter from d123.conversion.datasets.nuplan.utils.nuplan_sql_helper import ( get_box_detections_for_lidarpc_token_from_db, get_nearest_ego_pose_for_timestamp_from_db, @@ -193,7 +193,6 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: lidar_pc_token: str = nuplan_lidar_pc.token log_writer.write( - token=lidar_pc_token, timestamp=TimePoint.from_us(nuplan_lidar_pc.timestamp), ego_state=_extract_nuplan_ego_state(nuplan_lidar_pc), box_detections=_extract_nuplan_box_detections(nuplan_lidar_pc, source_log_path), diff --git a/d123/conversion/datasets/nuplan/nuplan_map_conversion.py b/d123/conversion/datasets/nuplan/nuplan_map_conversion.py index 7152f7da..a87ad7b1 100644 --- a/d123/conversion/datasets/nuplan/nuplan_map_conversion.py +++ b/d123/conversion/datasets/nuplan/nuplan_map_conversion.py @@ -13,7 +13,7 @@ # Suppress numpy runtime warnings for casting operations np.seterr(invalid="ignore") -from d123.conversion.datasets.nuplan.nuplan_constants import NUPLAN_MAP_GPKG_LAYERS, NUPLAN_MAP_LOCATION_FILES +from d123.conversion.datasets.nuplan.utils.nuplan_constants import NUPLAN_MAP_GPKG_LAYERS, NUPLAN_MAP_LOCATION_FILES from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, split_line_geometry_by_max_length, diff --git a/d123/conversion/datasets/nuplan/nuplan_constants.py b/d123/conversion/datasets/nuplan/utils/nuplan_constants.py similarity index 100% rename from d123/conversion/datasets/nuplan/nuplan_constants.py rename to d123/conversion/datasets/nuplan/utils/nuplan_constants.py diff --git a/d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py b/d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py index 903fbeb2..773c6123 100644 --- a/d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py +++ b/d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py @@ -1,7 +1,7 @@ from typing import List from d123.common.utils.dependencies import check_dependencies -from d123.conversion.datasets.nuplan.nuplan_constants import NUPLAN_DETECTION_NAME_DICT +from d123.conversion.datasets.nuplan.utils.nuplan_constants import NUPLAN_DETECTION_NAME_DICT from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3 from d123.geometry import BoundingBoxSE3, EulerAngles, StateSE3, Vector3D from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL diff --git a/d123/conversion/datasets/wopd/__init__.py b/d123/conversion/datasets/wopd/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/conversion/datasets/wopd/utils/wopd_constants.py b/d123/conversion/datasets/wopd/utils/wopd_constants.py new file mode 100644 index 00000000..bad841d9 --- /dev/null +++ b/d123/conversion/datasets/wopd/utils/wopd_constants.py @@ -0,0 +1,39 @@ +from typing import Dict, List + +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from d123.datatypes.sensors.lidar.lidar import LiDARType + +WOPD_AVAILABLE_SPLITS: List[str] = [ + "wopd_train", + "wopd_val", + "wopd_test", +] + +# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63 +WOPD_DETECTION_NAME_DICT: Dict[int, DetectionType] = { + 0: DetectionType.GENERIC_OBJECT, # TYPE_UNKNOWN + 1: DetectionType.VEHICLE, # TYPE_VEHICLE + 2: DetectionType.PEDESTRIAN, # TYPE_PEDESTRIAN + 3: DetectionType.SIGN, # TYPE_SIGN + 4: DetectionType.BICYCLE, # TYPE_CYCLIST +} + +# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L50 +WOPD_CAMERA_TYPES: Dict[int, PinholeCameraType] = { + 1: PinholeCameraType.CAM_F0, # front_camera + 2: PinholeCameraType.CAM_L0, # front_left_camera + 3: PinholeCameraType.CAM_R0, # front_right_camera + 4: PinholeCameraType.CAM_L1, # left_camera + 5: PinholeCameraType.CAM_R1, # right_camera +} + +# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L66 +WOPD_LIDAR_TYPES: Dict[int, LiDARType] = { + 0: LiDARType.LIDAR_UNKNOWN, # UNKNOWN + 1: LiDARType.LIDAR_TOP, # TOP + 2: LiDARType.LIDAR_FRONT, # FRONT + 3: LiDARType.LIDAR_SIDE_LEFT, # SIDE_LEFT + 4: LiDARType.LIDAR_SIDE_RIGHT, # SIDE_RIGHT + 5: LiDARType.LIDAR_BACK, # REAR +} diff --git a/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py b/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py index d98a0691..6baf8cf7 100644 --- a/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py +++ b/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py @@ -9,7 +9,7 @@ import shapely.geometry as geom from d123.common.utils.dependencies import check_dependencies -from d123.conversion.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries +from d123.conversion.datasets.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType from d123.geometry import Point3DIndex, Polyline3D from d123.geometry.utils.units import mph_to_mps diff --git a/d123/conversion/datasets/wopd/wopd_converter.py b/d123/conversion/datasets/wopd/wopd_converter.py new file mode 100644 index 00000000..0572b72b --- /dev/null +++ b/d123/conversion/datasets/wopd/wopd_converter.py @@ -0,0 +1,408 @@ +import logging +import os +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union + +import numpy as np +import numpy.typing as npt + +from d123.common.utils.dependencies import check_dependencies +from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter +from d123.conversion.dataset_converter_config import DatasetConverterConfig +from d123.conversion.datasets.wopd.utils.wopd_constants import ( + WOPD_AVAILABLE_SPLITS, + WOPD_CAMERA_TYPES, + WOPD_DETECTION_NAME_DICT, + WOPD_LIDAR_TYPES, +) +from d123.conversion.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map +from d123.conversion.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection +from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from d123.conversion.utils.sensor_utils.camera_conventions import CameraConvention, convert_camera_convention +from d123.conversion.utils.sensor_utils.lidar_index_registry import DefaultLidarIndex, WOPDLidarIndex +from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 +from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters +from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex +from d123.geometry.bounding_box import BoundingBoxSE3 +from d123.geometry.geometry_index import EulerAnglesIndex, StateSE3Index +from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array +from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry.utils.rotation_utils import ( + get_euler_array_from_quaternion_array, + get_quaternion_array_from_euler_array, +) + +check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") +import tensorflow as tf +from waymo_open_dataset import dataset_pb2 +from waymo_open_dataset.utils import frame_utils + +logger = logging.getLogger(__name__) +os.environ["CUDA_VISIBLE_DEVICES"] = "-1" +D123_MAPS_ROOT: Path = Path(os.getenv("D123_MAPS_ROOT", "$HOME/maps")) # TODO: remove + + +class WOPDConverter(AbstractDatasetConverter): + def __init__( + self, + splits: List[str], + wopd_data_root: Union[Path, str], + zero_roll_pitch: bool, + keep_polar_features: bool, + add_map_pose_offset: bool, + dataset_converter_config: DatasetConverterConfig, + ) -> None: + super().__init__(dataset_converter_config) + for split in splits: + assert ( + split in WOPD_AVAILABLE_SPLITS + ), f"Split {split} is not available. Available splits: {WOPD_AVAILABLE_SPLITS}" + + self._splits: List[str] = splits + self._wopd_data_root: Path = Path(wopd_data_root) + self._zero_roll_pitch: bool = zero_roll_pitch + self._keep_polar_features: bool = keep_polar_features + self._add_map_pose_offset: bool = add_map_pose_offset # TODO: Implement this feature + + self._split_tf_record_pairs: List[Tuple[str, List[Path]]] = self._collect_split_tf_record_pairs() + + def _collect_split_tf_record_pairs(self) -> Dict[str, List[Path]]: + """Helper to collect the pairings of the split names and the corresponding tf record file.""" + + split_tf_record_pairs: List[Tuple[str, List[Path]]] = [] + split_name_mapping: Dict[str, str] = { + "wopd_train": "training", + "wopd_val": "validation", + "wopd_test": "testing", + } + + for split in self._splits: + assert split in split_name_mapping.keys() + split_folder = self._wopd_data_root / split_name_mapping[split] + source_log_paths = [log_file for log_file in split_folder.glob("*.tfrecord")] + for source_log_path in source_log_paths: + split_tf_record_pairs.append((split, source_log_path)) + + return split_tf_record_pairs + + def get_number_of_maps(self) -> int: + """Inherited, see superclass.""" + return len(self._split_tf_record_pairs) + + def get_number_of_logs(self) -> int: + """Inherited, see superclass.""" + return len(self._split_tf_record_pairs) + + def convert_map(self, map_index: int) -> None: + """Inherited, see superclass.""" + split, source_tf_record_path = self._split_tf_record_pairs[map_index] + initial_frame = _get_initial_frame_from_tfrecord(source_tf_record_path) + log_name = str(initial_frame.context.name) + map_file_path = D123_MAPS_ROOT / split / f"{log_name}.gpkg" + if self.dataset_converter_config.force_map_conversion or not map_file_path.exists(): + map_file_path.unlink(missing_ok=True) + convert_wopd_map(initial_frame, map_file_path) + + def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: + """Inherited, see superclass.""" + + split, source_tf_record_path = self._split_tf_record_pairs[log_index] + + initial_frame = _get_initial_frame_from_tfrecord(source_tf_record_path, keep_dataset=False) + log_name = str(initial_frame.context.name) + dataset = tf.data.TFRecordDataset(source_tf_record_path, compression_type="") + + # 1. Initialize Metadata + log_metadata = LogMetadata( + dataset="wopd", + split=split, + log_name=log_name, + location=None, # TODO: Add location information. + timestep_seconds=0.1, + vehicle_parameters=get_wopd_chrysler_pacifica_parameters(), + camera_metadata=_get_wopd_camera_metadata( + initial_frame, + self.dataset_converter_config, + ), + lidar_metadata=_get_wopd_lidar_metadata( + initial_frame, + self._keep_polar_features, + self.dataset_converter_config, + ), + map_has_z=True, + map_is_local=True, + ) + + # 2. Prepare log writer + overwrite_log = log_writer.reset(self.dataset_converter_config, log_metadata) + + # 3. Process source log data + if overwrite_log: + try: + for frame_idx, data in enumerate(dataset): + frame = dataset_pb2.Frame() + frame.ParseFromString(data.numpy()) + + map_pose_offset: Vector3D = Vector3D(0.0, 0.0, 0.0) + if self._add_map_pose_offset: + map_pose_offset = Vector3D( + x=frame.map_pose_offset.x, + y=frame.map_pose_offset.y, + z=frame.map_pose_offset.z, + ) + + log_writer.write( + timestamp=TimePoint.from_us(frame.timestamp_micros), + ego_state=_extract_wopd_ego_state(frame, map_pose_offset), + box_detections=_extract_wopd_box_detections(frame, map_pose_offset, self._zero_roll_pitch), + traffic_lights=None, # TODO: Check if WOPD has traffic light information + cameras=_extract_wopd_cameras(frame, self.dataset_converter_config), + lidars=_extract_wopd_lidars(frame, self._keep_polar_features, self.dataset_converter_config), + ) + except Exception as e: + logger.error(f"Error processing log {log_name}: {e}") + + log_writer.close() + + +def _get_initial_frame_from_tfrecord( + tf_record_path: Path, + keep_dataset: bool = False, +) -> Union[dataset_pb2.Frame, Tuple[dataset_pb2.Frame, tf.data.TFRecordDataset]]: + dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") + for data in dataset: + initial_frame = dataset_pb2.Frame() + initial_frame.ParseFromString(data.numpy()) + break + + if keep_dataset: + return initial_frame, dataset + + del dataset + return initial_frame + + +def _get_wopd_camera_metadata( + initial_frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig +) -> Dict[PinholeCameraType, PinholeCameraMetadata]: + + camera_metadata_dict: Dict[PinholeCameraType, PinholeCameraMetadata] = {} + + if dataset_converter_config.camera_store_option is not None: + for calibration in initial_frame.context.camera_calibrations: + camera_type = WOPD_CAMERA_TYPES[calibration.name] + # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L96 + # https://github.com/waymo-research/waymo-open-dataset/issues/834#issuecomment-2134995440 + fx, fy, cx, cy, k1, k2, p1, p2, k3 = calibration.intrinsic + intrinsics = PinholeIntrinsics(fx=fx, fy=fy, cx=cx, cy=cy) + distortion = PinholeDistortion(k1=k1, k2=k2, p1=p1, p2=p2, k3=k3) + if camera_type in WOPD_CAMERA_TYPES.values(): + camera_metadata_dict[camera_type] = PinholeCameraMetadata( + camera_type=camera_type, + width=calibration.width, + height=calibration.height, + intrinsics=intrinsics, + distortion=distortion, + ) + + return camera_metadata_dict + + +def _get_wopd_lidar_metadata( + initial_frame: dataset_pb2.Frame, + keep_polar_features: bool, + dataset_converter_config: DatasetConverterConfig, +) -> Dict[LiDARType, LiDARMetadata]: + + laser_metadatas: Dict[LiDARType, LiDARMetadata] = {} + + # NOTE: Using + lidar_index = WOPDLidarIndex if keep_polar_features else DefaultLidarIndex + if dataset_converter_config.lidar_store_option is not None: + for laser_calibration in initial_frame.context.laser_calibrations: + + lidar_type = WOPD_LIDAR_TYPES[laser_calibration.name] + + extrinsic: Optional[StateSE3] = None + if laser_calibration.extrinsic: + extrinsic_transform = np.array(laser_calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) + extrinsic = StateSE3.from_transformation_matrix(extrinsic_transform) + + laser_metadatas[lidar_type] = LiDARMetadata( + lidar_type=lidar_type, + lidar_index=lidar_index, + extrinsic=extrinsic, + ) + + return laser_metadatas + + +def _get_ego_pose_se3(frame: dataset_pb2.Frame, map_pose_offset: Vector3D) -> StateSE3: + ego_pose_matrix = np.array(frame.pose.transform, dtype=np.float64).reshape(4, 4) + ego_pose_se3 = StateSE3.from_transformation_matrix(ego_pose_matrix) + ego_pose_se3.array[StateSE3Index.XYZ] += map_pose_offset.array[Vector3DIndex.XYZ] + return ego_pose_se3 + + +def _extract_wopd_ego_state(frame: dataset_pb2.Frame, map_pose_offset: Vector3D) -> List[float]: + rear_axle_pose = _get_ego_pose_se3(frame, map_pose_offset) + + vehicle_parameters = get_wopd_chrysler_pacifica_parameters() + # FIXME: Find dynamic state in waymo open perception dataset + # https://github.com/waymo-research/waymo-open-dataset/issues/55#issuecomment-546152290 + dynamic_state = DynamicStateSE3( + velocity=Vector3D(*np.zeros(3)), + acceleration=Vector3D(*np.zeros(3)), + angular_velocity=Vector3D(*np.zeros(3)), + ) + + return EgoStateSE3.from_rear_axle( + rear_axle_se3=rear_axle_pose, + dynamic_state_se3=dynamic_state, + vehicle_parameters=vehicle_parameters, + time_point=None, + ) + + +def _extract_wopd_box_detections( + frame: dataset_pb2.Frame, map_pose_offset: Vector3D, zero_roll_pitch: bool = True +) -> BoxDetectionWrapper: + + ego_pose_se3 = _get_ego_pose_se3(frame, map_pose_offset) + + num_detections = len(frame.laser_labels) + detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) + detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) + detections_types: List[int] = [] + detections_token: List[str] = [] + + for detection_idx, detection in enumerate(frame.laser_labels): + + detection_quaternion = EulerAngles( + roll=DEFAULT_ROLL, + pitch=DEFAULT_PITCH, + yaw=detection.box.heading, + ).quaternion + + # 2. Fill SE3 Bounding Box + detections_state[detection_idx, BoundingBoxSE3Index.X] = detection.box.center_x + detections_state[detection_idx, BoundingBoxSE3Index.Y] = detection.box.center_y + detections_state[detection_idx, BoundingBoxSE3Index.Z] = detection.box.center_z + detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = detection_quaternion + detections_state[detection_idx, BoundingBoxSE3Index.LENGTH] = detection.box.length + detections_state[detection_idx, BoundingBoxSE3Index.WIDTH] = detection.box.width + detections_state[detection_idx, BoundingBoxSE3Index.HEIGHT] = detection.box.height + + # 2. Velocity TODO: check if velocity needs to be rotated + detections_velocity[detection_idx] = Vector3D( + x=detection.metadata.speed_x, + y=detection.metadata.speed_y, + z=detection.metadata.speed_z, + ).array + + # 3. Type and track token + detections_types.append(WOPD_DETECTION_NAME_DICT[detection.type]) + detections_token.append(str(detection.id)) + + detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( + origin=ego_pose_se3, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3] + ) + if zero_roll_pitch: + euler_array = get_euler_array_from_quaternion_array(detections_state[:, BoundingBoxSE3Index.QUATERNION]) + euler_array[..., EulerAnglesIndex.ROLL] = DEFAULT_ROLL + euler_array[..., EulerAnglesIndex.PITCH] = DEFAULT_PITCH + detections_state[..., BoundingBoxSE3Index.QUATERNION] = get_quaternion_array_from_euler_array(euler_array) + + box_detections: List[BoxDetectionSE3] = [] + for detection_idx in range(num_detections): + box_detections.append( + BoxDetectionSE3( + metadata=BoxDetectionMetadata( + detection_type=detections_types[detection_idx], + timepoint=None, + track_token=detections_token[detection_idx], + confidence=None, + ), + bounding_box_se3=BoundingBoxSE3.from_array(detections_state[detection_idx]), + velocity=Vector3D.from_array(detections_velocity[detection_idx]), + ) + ) + + return BoxDetectionWrapper(box_detections=box_detections) + + +def _extract_wopd_cameras( + frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig +) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: + + camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} + + if dataset_converter_config.include_cameras: + + # TODO: Implement option to store images as paths + assert ( + dataset_converter_config.camera_store_option == "binary" + ), "Camera store option must be 'binary' for WOPD." + + # NOTE: The extrinsic matrix in frame.context.camera_calibration is fixed to model the ego to camera transformation. + # The poses in frame.images[idx] are the motion compensated ego poses when the camera triggers. + + camera_extrinsic: Dict[str, StateSE3] = {} + for calibration in frame.context.camera_calibrations: + camera_type = WOPD_CAMERA_TYPES[calibration.name] + camera_transform = np.array(calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) + camera_pose = StateSE3.from_transformation_matrix(camera_transform) + # NOTE: WOPD uses a different camera convention than d123 + # https://arxiv.org/pdf/1912.04838 (Figure 1.) + camera_pose = convert_camera_convention( + camera_pose, + from_convention=CameraConvention.pXpZmY, + to_convention=CameraConvention.pZmYpX, + ) + camera_extrinsic[camera_type] = camera_pose + + for image_proto in frame.images: + camera_type = WOPD_CAMERA_TYPES[image_proto.name] + camera_bytes: bytes = image_proto.image + camera_dict[camera_type] = camera_bytes, camera_extrinsic[camera_type] + + return camera_dict + + +def _extract_wopd_lidars( + frame: dataset_pb2.Frame, + keep_polar_features: bool, + dataset_converter_config: DatasetConverterConfig, +) -> Dict[LiDARType, npt.NDArray[np.float32]]: + + lidar_data: Dict[LiDARType, npt.NDArray[np.float32]] = {} + + if dataset_converter_config.include_lidars: + + # TODO: Implement option to store point clouds as paths + assert dataset_converter_config.lidar_store_option == "binary", "Lidar store option must be 'binary' for WOPD." + (range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame) + + points, cp_points = frame_utils.convert_range_image_to_point_cloud( + frame=frame, + range_images=range_images, + camera_projections=camera_projections, + range_image_top_pose=range_image_top_pose, + keep_polar_features=keep_polar_features, + ) + + for lidar_idx, frame_lidar in enumerate(frame.lasers): + lidar_type = WOPD_LIDAR_TYPES[frame_lidar.name] + lidar_data[lidar_type] = np.array(points[lidar_idx], dtype=np.float32).flatten() + + return lidar_data diff --git a/d123/conversion/datasets/wopd/wopd_data_converter.py b/d123/conversion/datasets/wopd/wopd_data_converter.py deleted file mode 100644 index 80f32d27..00000000 --- a/d123/conversion/datasets/wopd/wopd_data_converter.py +++ /dev/null @@ -1,475 +0,0 @@ -import gc -import hashlib -import os -from functools import partial -from pathlib import Path -from typing import Any, Dict, Final, List, Optional, Tuple, Union - -import numpy as np -import numpy.typing as npt - -from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.dependencies import check_dependencies -from d123.conversion.abstract_dataset_converter import AbstractDataConverter, DatasetConverterConfig -from d123.conversion.utils.log_writer.arrow_ipc_writer import ArrowLogWriter -from d123.conversion.utils.sensor.camera_conventions import CameraConvention, convert_camera_convention -from d123.conversion.utils.sensor.lidar_index_registry import WopdLidarIndex -from d123.conversion.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map -from d123.conversion.wopd.wopd_utils import parse_range_image_and_camera_projection -from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import ( - PinholeCameraMetadata, - PinholeCameraType, - PinholeDistortion, - PinholeIntrinsics, -) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 -from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters -from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.bounding_box import BoundingBoxSE3 -from d123.geometry.geometry_index import EulerAnglesIndex -from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array -from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL -from d123.geometry.utils.rotation_utils import ( - get_euler_array_from_quaternion_array, - get_quaternion_array_from_euler_array, -) - -check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") -import tensorflow as tf -from waymo_open_dataset import dataset_pb2 -from waymo_open_dataset.utils import frame_utils - -# TODO: Make keep_polar_features an optional argument. -# With polar features, the lidar loading time is SIGNIFICANTLY higher. - -os.environ["CUDA_VISIBLE_DEVICES"] = "-1" -D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) - -TARGET_DT: Final[float] = 0.1 -SORT_BY_TIMESTAMP: Final[bool] = False - - -# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63 -WOPD_DETECTION_NAME_DICT: Dict[int, DetectionType] = { - 0: DetectionType.GENERIC_OBJECT, # TYPE_UNKNOWN - 1: DetectionType.VEHICLE, # TYPE_VEHICLE - 2: DetectionType.PEDESTRIAN, # TYPE_PEDESTRIAN - 3: DetectionType.SIGN, # TYPE_SIGN - 4: DetectionType.BICYCLE, # TYPE_CYCLIST -} - -# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L50 -WOPD_CAMERA_TYPES: Dict[int, PinholeCameraType] = { - 1: PinholeCameraType.CAM_F0, # front_camera - 2: PinholeCameraType.CAM_L0, # front_left_camera - 3: PinholeCameraType.CAM_R0, # front_right_camera - 4: PinholeCameraType.CAM_L1, # left_camera - 5: PinholeCameraType.CAM_R1, # right_camera -} - -# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L66 -WOPD_LIDAR_TYPES: Dict[int, LiDARType] = { - 0: LiDARType.LIDAR_UNKNOWN, # UNKNOWN - 1: LiDARType.LIDAR_TOP, # TOP - 2: LiDARType.LIDAR_FRONT, # FRONT - 3: LiDARType.LIDAR_SIDE_LEFT, # SIDE_LEFT - 4: LiDARType.LIDAR_SIDE_RIGHT, # SIDE_RIGHT - 5: LiDARType.LIDAR_BACK, # REAR -} - -WOPD_DATA_ROOT = Path("/media/nvme1/waymo_perception") # TODO: set as environment variable !!!! - -# Whether to use ego or zero roll and pitch values for bounding box detections (after global conversion) -ZERO_ROLL_PITCH: Final[bool] = True - - -def create_token(input_data: str) -> str: - # TODO: Refactor this function. - # TODO: Add a general function to create tokens from arbitrary data. - if isinstance(input_data, str): - input_data = input_data.encode("utf-8") - - hash_obj = hashlib.sha256(input_data) - return hash_obj.hexdigest()[:16] - - -class WOPDDataConverter(AbstractDataConverter): - def __init__( - self, - splits: List[str], - log_path: Union[Path, str], - dataset_converter_config: DatasetConverterConfig, - ) -> None: - super().__init__(dataset_converter_config) - for split in splits: - assert ( - split in self.get_available_splits() - ), f"Split {split} is not available. Available splits: {self.available_splits}" - - self._splits: List[str] = splits - self._tf_records_per_split: Dict[str, List[Path]] = self._collect_tf_records() - self._target_dt: float = 0.1 - - def _collect_tf_records(self) -> Dict[str, List[Path]]: - tf_records_per_split: Dict[str, List[Path]] = {} - - for split in self._splits: - if split in ["wopd_train"]: - log_path = WOPD_DATA_ROOT / "training" - else: - raise ValueError(f"Split {split} is not supported.") - - log_paths = [log_file for log_file in log_path.glob("*.tfrecord")] - tf_records_per_split[split] = log_paths - - return tf_records_per_split - - def get_available_splits(self) -> List[str]: - # TODO: Add more splits if available - return [ - "wopd_train", - ] - - def convert_maps(self, worker: WorkerPool) -> None: - log_args = [ - { - "tf_record": tf_record, - "split": split, - } - for split, tf_record_paths in self._tf_records_per_split.items() - for tf_record in tf_record_paths - ] - - worker_map( - worker, - partial(convert_wopd_tfrecord_map_to_gpkg, dataset_converter_config=self.dataset_converter_config), - log_args, - ) - - def convert_logs(self, worker: WorkerPool) -> None: - log_args = [ - { - "tf_record": tf_record, - "split": split, - } - for split, tf_record_paths in self._tf_records_per_split.items() - for tf_record in tf_record_paths - ] - - worker_map( - worker, - partial(convert_wopd_tfrecord_log_to_arrow, dataset_converter_config=self.dataset_converter_config), - log_args, - ) - - -def convert_wopd_tfrecord_map_to_gpkg( - args: List[Dict[str, Union[List[str], List[Path]]]], dataset_converter_config: DatasetConverterConfig -) -> List[Any]: - - for log_info in args: - tf_record_path: Path = log_info["tf_record"] - split: str = log_info["split"] - - if not tf_record_path.exists(): - raise FileNotFoundError(f"TFRecord path {tf_record_path} does not exist.") - - dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") - for data in dataset: - initial_frame = dataset_pb2.Frame() - initial_frame.ParseFromString(data.numpy()) - break - log_name = str(initial_frame.context.name) - map_file_path = D123_MAPS_ROOT / split / f"{log_name}.gpkg" - - if dataset_converter_config.force_map_conversion or not map_file_path.exists(): - map_file_path.unlink(missing_ok=True) - convert_wopd_map(initial_frame, map_file_path) - return [] - - -def convert_wopd_tfrecord_log_to_arrow( - args: List[Dict[str, Union[List[str], List[Path]]]], dataset_converter_config: DatasetConverterConfig -) -> List[Any]: - for log_info in args: - try: - - tf_record_path: Path = log_info["tf_record"] - split: str = log_info["split"] - - if not tf_record_path.exists(): - raise FileNotFoundError(f"TFRecord path {tf_record_path} does not exist.") - - dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") - for data in dataset: - initial_frame = dataset_pb2.Frame() - initial_frame.ParseFromString(data.numpy()) - break - - log_name = str(initial_frame.context.name) - log_file_path = dataset_converter_config.output_path / split / f"{log_name}.arrow" - - if dataset_converter_config.force_log_conversion or not log_file_path.exists(): - log_file_path.unlink(missing_ok=True) - if not log_file_path.parent.exists(): - log_file_path.parent.mkdir(parents=True, exist_ok=True) - - log_metadata = LogMetadata( - dataset="wopd", - split=split, - log_name=log_name, - location=None, # TODO: Add location information. - timestep_seconds=TARGET_DT, - vehicle_parameters=get_wopd_chrysler_pacifica_parameters(), - camera_metadata=get_wopd_camera_metadata(initial_frame, dataset_converter_config), - lidar_metadata=get_wopd_lidar_metadata(initial_frame, dataset_converter_config), - map_has_z=True, - map_is_local=True, - ) - - log_writer = ArrowLogWriter( - log_path=log_file_path, - dataset_converter_config=dataset_converter_config, - log_metadata=log_metadata, - ) - - _write_recording_table(dataset, log_writer, tf_record_path, dataset_converter_config) - - del dataset - except Exception as e: - import traceback - - print(f"Error processing log {str(tf_record_path)}: {e}") - traceback.print_exc() - gc.collect() - return [] - - -def get_wopd_camera_metadata( - initial_frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig -) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - - cam_metadatas: Dict[PinholeCameraType, PinholeCameraMetadata] = {} - if dataset_converter_config.camera_store_option is not None: - for calibration in initial_frame.context.camera_calibrations: - camera_type = WOPD_CAMERA_TYPES[calibration.name] - # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L96 - # https://github.com/waymo-research/waymo-open-dataset/issues/834#issuecomment-2134995440 - fx, fy, cx, cy, k1, k2, p1, p2, k3 = calibration.intrinsic - intrinsics = PinholeIntrinsics(fx=fx, fy=fy, cx=cx, cy=cy) - distortion = PinholeDistortion(k1=k1, k2=k2, p1=p1, p2=p2, k3=k3) - if camera_type in WOPD_CAMERA_TYPES.values(): - cam_metadatas[camera_type] = PinholeCameraMetadata( - camera_type=camera_type, - width=calibration.width, - height=calibration.height, - intrinsics=intrinsics, - distortion=distortion, - ) - - return cam_metadatas - - -def get_wopd_lidar_metadata( - initial_frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig -) -> Dict[LiDARType, LiDARMetadata]: - - laser_metadatas: Dict[LiDARType, LiDARMetadata] = {} - if dataset_converter_config.lidar_store_option is not None: - for laser_calibration in initial_frame.context.laser_calibrations: - - lidar_type = WOPD_LIDAR_TYPES[laser_calibration.name] - - extrinsic: Optional[StateSE3] = None - if laser_calibration.extrinsic: - extrinsic_transform = np.array(laser_calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) - extrinsic = StateSE3.from_transformation_matrix(extrinsic_transform) - - laser_metadatas[lidar_type] = LiDARMetadata( - lidar_type=lidar_type, - lidar_index=WopdLidarIndex, - extrinsic=extrinsic, - ) - - return laser_metadatas - - -def _write_recording_table( - dataset: tf.data.TFRecordDataset, - log_writer: ArrowLogWriter, - tf_record_path: Path, - dataset_converter_config: DatasetConverterConfig, -) -> None: - - dataset = tf.data.TFRecordDataset(tf_record_path, compression_type="") - for frame_idx, data in enumerate(dataset): - frame = dataset_pb2.Frame() - frame.ParseFromString(data.numpy()) - - log_writer.add_row( - token=create_token(f"{frame.context.name}_{int(frame.timestamp_micros)}"), - timestamp=TimePoint.from_us(frame.timestamp_micros), - ego_state=_extract_wopd_ego_state(frame), - box_detections=_extract_wopd_box_detections(frame), - traffic_lights=None, # NOTE: WOPD does not have traffic light information - cameras=_extract_wopd_cameras(frame, dataset_converter_config), - lidars=_extract_wopd_lidars(frame, dataset_converter_config), - scenario_tags=None, # NOTE: WOPD does not have scenario tags - route_lane_group_ids=None, # NOTE: WOPD does not have route information - ) - - log_writer.close() - - -def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> StateSE3: - ego_pose_matrix = np.array(frame.pose.transform, dtype=np.float64).reshape(4, 4) - return StateSE3.from_transformation_matrix(ego_pose_matrix) - - -def _extract_wopd_ego_state(frame: dataset_pb2.Frame) -> List[float]: - rear_axle_pose = _get_ego_pose_se3(frame) - - vehicle_parameters = get_wopd_chrysler_pacifica_parameters() - # FIXME: Find dynamic state in waymo open perception dataset - # https://github.com/waymo-research/waymo-open-dataset/issues/55#issuecomment-546152290 - dynamic_state = DynamicStateSE3( - velocity=Vector3D(*np.zeros(3)), - acceleration=Vector3D(*np.zeros(3)), - angular_velocity=Vector3D(*np.zeros(3)), - ) - - return EgoStateSE3.from_rear_axle( - rear_axle_se3=rear_axle_pose, - dynamic_state_se3=dynamic_state, - vehicle_parameters=vehicle_parameters, - time_point=None, - ) - - -def _extract_wopd_box_detections(frame: dataset_pb2.Frame) -> BoxDetectionWrapper: - - ego_rear_axle = _get_ego_pose_se3(frame) - - num_detections = len(frame.laser_labels) - detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) - detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) - detections_types: List[int] = [] - detections_token: List[str] = [] - - for detection_idx, detection in enumerate(frame.laser_labels): - if detection.type not in WOPD_DETECTION_NAME_DICT: - continue - detection_quaternion = EulerAngles( - roll=DEFAULT_ROLL, - pitch=DEFAULT_PITCH, - yaw=detection.box.heading, - ).quaternion - - # 2. Fill SE3 Bounding Box - detections_state[detection_idx, BoundingBoxSE3Index.X] = detection.box.center_x - detections_state[detection_idx, BoundingBoxSE3Index.Y] = detection.box.center_y - detections_state[detection_idx, BoundingBoxSE3Index.Z] = detection.box.center_z - detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = detection_quaternion - detections_state[detection_idx, BoundingBoxSE3Index.LENGTH] = detection.box.length - detections_state[detection_idx, BoundingBoxSE3Index.WIDTH] = detection.box.width - detections_state[detection_idx, BoundingBoxSE3Index.HEIGHT] = detection.box.height - - # 2. Velocity TODO: check if velocity needs to be rotated - detections_velocity[detection_idx] = Vector3D( - x=detection.metadata.speed_x, - y=detection.metadata.speed_y, - z=detection.metadata.speed_z, - ).array - - # 3. Type and track token - detections_types.append(WOPD_DETECTION_NAME_DICT[detection.type]) - detections_token.append(str(detection.id)) - - detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( - origin=ego_rear_axle, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3] - ) - if ZERO_ROLL_PITCH: - euler_array = get_euler_array_from_quaternion_array(detections_state[:, BoundingBoxSE3Index.QUATERNION]) - euler_array[..., EulerAnglesIndex.ROLL] = DEFAULT_ROLL - euler_array[..., EulerAnglesIndex.PITCH] = DEFAULT_PITCH - detections_state[..., BoundingBoxSE3Index.QUATERNION] = get_quaternion_array_from_euler_array(euler_array) - - box_detections: List[BoxDetectionSE3] = [] - for detection_idx in range(num_detections): - box_detections.append( - BoxDetectionSE3( - metadata=BoxDetectionMetadata( - detection_type=detections_types[detection_idx], - timepoint=None, - track_token=detections_token[detection_idx], - confidence=None, - ), - bounding_box_se3=BoundingBoxSE3.from_array(detections_state[detection_idx]), - velocity=Vector3D.from_array(detections_velocity[detection_idx]), - ) - ) - - return BoxDetectionWrapper(box_detections=box_detections) - - -def _extract_wopd_cameras( - frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig -) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: - - # TODO: Implement option to store images as paths - assert dataset_converter_config.camera_store_option == "binary", "Camera store option must be 'binary' for WOPD." - - camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} - - # NOTE: The extrinsic matrix in frame.context.camera_calibration is fixed to model the ego to camera transformation. - # The poses in frame.images[idx] are the motion compensated ego poses when the camera triggers. - - camera_extrinsic: Dict[str, StateSE3] = {} - for calibration in frame.context.camera_calibrations: - camera_type = WOPD_CAMERA_TYPES[calibration.name] - camera_transform = np.array(calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) - camera_pose = StateSE3.from_transformation_matrix(camera_transform) - # NOTE: WOPD uses a different camera convention than d123 - # https://arxiv.org/pdf/1912.04838 (Figure 1.) - camera_pose = convert_camera_convention( - camera_pose, - from_convention=CameraConvention.pXpZmY, - to_convention=CameraConvention.pZmYpX, - ) - camera_extrinsic[camera_type] = camera_pose - - for image_proto in frame.images: - camera_type = WOPD_CAMERA_TYPES[image_proto.name] - camera_bytes: bytes = image_proto.image - camera_dict[camera_type] = camera_bytes, camera_extrinsic[camera_type] - - return camera_dict - - -def _extract_wopd_lidars( - frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig -) -> Dict[LiDARType, npt.NDArray[np.float32]]: - - # TODO: Implement option to store point clouds as paths - assert dataset_converter_config.lidar_store_option == "binary", "Lidar store option must be 'binary' for WOPD." - (range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame) - - points, cp_points = frame_utils.convert_range_image_to_point_cloud( - frame=frame, - range_images=range_images, - camera_projections=camera_projections, - range_image_top_pose=range_image_top_pose, - keep_polar_features=False, - ) - - lidar_data: Dict[LiDARType, npt.NDArray[np.float32]] = {} - for lidar_idx, frame_lidar in enumerate(frame.lasers): - lidar_type = WOPD_LIDAR_TYPES[frame_lidar.name] - lidar_data[lidar_type] = np.array(points[lidar_idx], dtype=np.float32).flatten() - - return lidar_data diff --git a/d123/conversion/log_writer/abstract_log_writer.py b/d123/conversion/log_writer/abstract_log_writer.py index 4b606ec9..cc7fdf83 100644 --- a/d123/conversion/log_writer/abstract_log_writer.py +++ b/d123/conversion/log_writer/abstract_log_writer.py @@ -30,7 +30,6 @@ def reset( @abc.abstractmethod def write( self, - token: str, timestamp: TimePoint, ego_state: Optional[EgoStateSE3] = None, box_detections: Optional[BoxDetectionWrapper] = None, diff --git a/d123/conversion/log_writer/arrow_log_writer.py b/d123/conversion/log_writer/arrow_log_writer.py index 5fdac367..732028b6 100644 --- a/d123/conversion/log_writer/arrow_log_writer.py +++ b/d123/conversion/log_writer/arrow_log_writer.py @@ -3,6 +3,7 @@ import pyarrow as pa +from d123.common.utils.uuid import create_deterministic_uuid from d123.conversion.abstract_dataset_converter import AbstractLogWriter, DatasetConverterConfig from d123.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema @@ -69,7 +70,6 @@ def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: def write( self, - token: str, timestamp: TimePoint, ego_state: Optional[EgoStateSE3] = None, box_detections: Optional[BoxDetectionWrapper] = None, @@ -87,7 +87,13 @@ def write( assert self._source is not None, "Log writer is not initialized." record_batch_data = { - "token": [token], + "uuid": [ + create_deterministic_uuid( + split=self._log_metadata.split, + log_name=self._log_metadata.log_name, + timestamp_us=timestamp.time_us, + ).bytes + ], "timestamp": [timestamp.time_us], } @@ -210,7 +216,7 @@ def close(self) -> None: def _build_schema(dataset_converter_config: DatasetConverterConfig, log_metadata: LogMetadata) -> pa.Schema: schema_list: List[Tuple[str, pa.DataType]] = [ - ("token", pa.string()), + ("uuid", pa.uuid()), ("timestamp", pa.int64()), ] diff --git a/d123/conversion/utils/sensor_utils/camera_conventions.py b/d123/conversion/utils/sensor_utils/camera_conventions.py index 184eb9ef..c734f00a 100644 --- a/d123/conversion/utils/sensor_utils/camera_conventions.py +++ b/d123/conversion/utils/sensor_utils/camera_conventions.py @@ -38,6 +38,8 @@ class CameraConvention(Enum): order: forward, up, right Example: pZmYpX means +Z forward, -Y up, +X right + + TODO: Figure out a more intuitive naming scheme. """ pZmYpX = "pZmYpX" # Default in 123D (OpenCV/COLMAP) diff --git a/d123/datatypes/scene/abstract_scene.py b/d123/datatypes/scene/abstract_scene.py index c9fbc1af..619ef273 100644 --- a/d123/datatypes/scene/abstract_scene.py +++ b/d123/datatypes/scene/abstract_scene.py @@ -95,7 +95,7 @@ def scene_extraction_metadata(self) -> SceneExtractionMetadata: @property def token(self) -> str: - return self.scene_extraction_metadata.initial_token + return self.scene_extraction_metadata.initial_uuid @property def number_of_iterations(self) -> int: diff --git a/d123/datatypes/scene/arrow/arrow_scene_builder.py b/d123/datatypes/scene/arrow/arrow_scene_builder.py index 4acb53d6..2bd58888 100644 --- a/d123/datatypes/scene/arrow/arrow_scene_builder.py +++ b/d123/datatypes/scene/arrow/arrow_scene_builder.py @@ -104,7 +104,7 @@ def _get_scene_extraction_metadatas(log_path: Union[str, Path], filter: SceneFil if filter.duration_s is None: return [ SceneExtractionMetadata( - initial_token=str(recording_table["token"][start_idx].as_py()), + initial_uuid=str(recording_table["uuid"][start_idx].as_py()), initial_idx=start_idx, duration_s=(end_idx - start_idx) * log_metadata.timestep_seconds, history_s=filter.history_s if filter.history_s is not None else 0.0, @@ -112,22 +112,22 @@ def _get_scene_extraction_metadatas(log_path: Union[str, Path], filter: SceneFil ) ] - scene_token_set = set(filter.scene_tokens) if filter.scene_tokens is not None else None + scene_uuid_set = set(filter.scene_uuids) if filter.scene_uuids is not None else None for idx in range(start_idx, end_idx): scene_extraction_metadata: Optional[SceneExtractionMetadata] = None - if scene_token_set is None: + if scene_uuid_set is None: scene_extraction_metadata = SceneExtractionMetadata( - initial_token=str(recording_table["token"][idx].as_py()), + initial_uuid=str(recording_table["uuid"][idx].as_py()), initial_idx=idx, duration_s=filter.duration_s, history_s=filter.history_s, iteration_duration_s=log_metadata.timestep_seconds, ) - elif str(recording_table["token"][idx]) in scene_token_set: + elif str(recording_table["uuid"][idx]) in scene_uuid_set: scene_extraction_metadata = SceneExtractionMetadata( - initial_token=str(recording_table["token"][idx].as_py()), + initial_uuid=str(recording_table["uuid"][idx].as_py()), initial_idx=idx, duration_s=filter.duration_s, history_s=filter.history_s, diff --git a/d123/datatypes/scene/arrow/utils/arrow_getters.py b/d123/datatypes/scene/arrow/utils/arrow_getters.py index 600c1444..19161321 100644 --- a/d123/datatypes/scene/arrow/utils/arrow_getters.py +++ b/d123/datatypes/scene/arrow/utils/arrow_getters.py @@ -78,7 +78,8 @@ def get_traffic_light_detections_from_arrow_table(arrow_table: pa.Table, index: traffic_light_detections: List[TrafficLightDetection] = [] for lane_id, status in zip( - arrow_table["traffic_light_ids"][index].as_py(), arrow_table["traffic_light_types"][index].as_py() + arrow_table["traffic_light_ids"][index].as_py(), + arrow_table["traffic_light_types"][index].as_py(), ): traffic_light_detection = TrafficLightDetection( timepoint=timepoint, @@ -145,11 +146,11 @@ def get_lidar_from_arrow_table( # NOTE: We move data specific import into if-else block, to avoid data specific import errors if log_metadata.dataset == "nuplan": - from d123.conversion.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path + from d123.conversion.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "carla": - from d123.conversion.carla.load_sensor import load_carla_lidar_from_path + from d123.conversion.datasets.carla.carla_load_sensor import load_carla_lidar_from_path lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "wopd": diff --git a/d123/datatypes/scene/scene_filter.py b/d123/datatypes/scene/scene_filter.py index c05073db..0bc35e39 100644 --- a/d123/datatypes/scene/scene_filter.py +++ b/d123/datatypes/scene/scene_filter.py @@ -15,7 +15,7 @@ class SceneFilter: log_names: Optional[List[str]] = None map_names: Optional[List[str]] = None # TODO: - scene_tokens: Optional[List[str]] = None # TODO: + scene_uuids: Optional[List[str]] = None # TODO: timestamp_threshold_s: Optional[float] = None # TODO: ego_displacement_minimum_m: Optional[float] = None # TODO: diff --git a/d123/datatypes/scene/scene_metadata.py b/d123/datatypes/scene/scene_metadata.py index 59389d13..3db87645 100644 --- a/d123/datatypes/scene/scene_metadata.py +++ b/d123/datatypes/scene/scene_metadata.py @@ -51,7 +51,7 @@ def to_dict(self) -> Dict: @dataclass(frozen=True) class SceneExtractionMetadata: - initial_token: str + initial_uuid: str initial_idx: int duration_s: float history_s: float diff --git a/d123/geometry/se.py b/d123/geometry/se.py index 3d328f4b..a67d973d 100644 --- a/d123/geometry/se.py +++ b/d123/geometry/se.py @@ -148,7 +148,7 @@ def from_transformation_matrix(cls, transformation_matrix: npt.NDArray[np.float6 array = np.zeros(len(StateSE3Index), dtype=np.float64) array[StateSE3Index.XYZ] = transformation_matrix[:3, 3] array[StateSE3Index.QUATERNION] = Quaternion.from_rotation_matrix(transformation_matrix[:3, :3]) - return StateSE3.from_array(array) + return StateSE3.from_array(array, copy=False) @property def x(self) -> float: diff --git a/d123/script/builders/scene_filter_builder.py b/d123/script/builders/scene_filter_builder.py index 512f59ca..f6ad657b 100644 --- a/d123/script/builders/scene_filter_builder.py +++ b/d123/script/builders/scene_filter_builder.py @@ -31,11 +31,11 @@ def build_scene_filter(cfg: DictConfig) -> SceneFilter: :return: Instance of SceneFilter. """ logger.info("Building SceneFilter...") - if cfg.scene_tokens and not all(map(is_valid_token, cfg.scene_tokens)): + if cfg.scene_uuids and not all(map(is_valid_token, cfg.scene_uuids)): raise RuntimeError( "Expected all scene tokens to be 16-character strings. Your shell may strip quotes " "causing hydra to parse a token as a float, so consider passing them like " - "scene_filter.scene_tokens='[\"595322e649225137\", ...]'" + "scene_filter.scene_uuids='[\"595322e649225137\", ...]'" ) scene_filter: SceneFilter = instantiate(cfg) assert isinstance(scene_filter, SceneFilter) diff --git a/d123/script/config/common/scene_filter/all_scenes.yaml b/d123/script/config/common/scene_filter/all_scenes.yaml index 35e157a0..1026b737 100644 --- a/d123/script/config/common/scene_filter/all_scenes.yaml +++ b/d123/script/config/common/scene_filter/all_scenes.yaml @@ -7,7 +7,7 @@ log_names: null map_names: null -scene_tokens: null +scene_uuids: null timestamp_threshold_s: null ego_displacement_minimum_m: null diff --git a/d123/script/config/common/scene_filter/log_scenes.yaml b/d123/script/config/common/scene_filter/log_scenes.yaml index f83322ac..b5d98361 100644 --- a/d123/script/config/common/scene_filter/log_scenes.yaml +++ b/d123/script/config/common/scene_filter/log_scenes.yaml @@ -7,7 +7,7 @@ log_names: null map_names: null -scene_tokens: null +scene_uuids: null timestamp_threshold_s: null ego_displacement_minimum_m: null diff --git a/d123/script/config/common/scene_filter/nuplan_mini_train.yaml b/d123/script/config/common/scene_filter/nuplan_mini_train.yaml index 231e882b..4f69838b 100644 --- a/d123/script/config/common/scene_filter/nuplan_mini_train.yaml +++ b/d123/script/config/common/scene_filter/nuplan_mini_train.yaml @@ -8,7 +8,7 @@ log_names: null map_names: null -scene_tokens: null +scene_uuids: null timestamp_threshold_s: 1.0 ego_displacement_minimum_m: null max_num_scenes: null diff --git a/d123/script/config/common/scene_filter/nuplan_mini_val.yaml b/d123/script/config/common/scene_filter/nuplan_mini_val.yaml index 43f5c9bc..62f29257 100644 --- a/d123/script/config/common/scene_filter/nuplan_mini_val.yaml +++ b/d123/script/config/common/scene_filter/nuplan_mini_val.yaml @@ -8,7 +8,7 @@ log_names: null map_names: null -scene_tokens: null +scene_uuids: null timestamp_threshold_s: 1.0 ego_displacement_minimum_m: null max_num_scenes: null diff --git a/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml b/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml index 055ac331..2656ac68 100644 --- a/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml +++ b/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml @@ -8,7 +8,7 @@ log_names: null map_names: null -scene_tokens: +scene_uuids: - "796266a84fd65c71" - "1ef8b2f08cd65f9a" - "8922138735b45195" diff --git a/d123/script/config/common/scene_filter/viser_scenes.yaml b/d123/script/config/common/scene_filter/viser_scenes.yaml index fe012d84..a2dd51be 100644 --- a/d123/script/config/common/scene_filter/viser_scenes.yaml +++ b/d123/script/config/common/scene_filter/viser_scenes.yaml @@ -7,7 +7,7 @@ log_names: null map_names: null -scene_tokens: null +scene_uuids: null timestamp_threshold_s: 10.0 ego_displacement_minimum_m: null diff --git a/d123/script/config/conversion/datasets/wopd_dataset.yaml b/d123/script/config/conversion/datasets/wopd_dataset.yaml index a5069b8b..4380a80d 100644 --- a/d123/script/config/conversion/datasets/wopd_dataset.yaml +++ b/d123/script/config/conversion/datasets/wopd_dataset.yaml @@ -2,8 +2,12 @@ wopd_dataset: _target_: d123.conversion.datasets.wopd.wopd_converter.WOPDConverter _convert_: 'all' - splits: ["wopd_train"] - log_path: null # TODO: implement + splits: ["wopd_val"] + wopd_data_root: "/media/nvme1/waymo_perception" # ${wopd_data_root} + zero_roll_pitch: true # Whether to zero the roll and pitch of the box detections. + keep_polar_features: false # Add lidar polar features (range, azimuth, elevation) in addition to XYZ. (slow if true) + add_map_pose_offset: true # Whether to add the map pose offset to the ego state and box detections. + dataset_converter_config: _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig diff --git a/d123/script/config/conversion/default_conversion.yaml b/d123/script/config/conversion/default_conversion.yaml index 9834174a..179401e3 100644 --- a/d123/script/config/conversion/default_conversion.yaml +++ b/d123/script/config/conversion/default_conversion.yaml @@ -13,10 +13,10 @@ defaults: - default_dataset_paths - log_writer: arrow_ipc_log_writer - datasets: - - nuplan_mini_dataset + # - nuplan_mini_dataset # - nuplan_private_dataset # - carla_dataset - # - wopd_dataset + - wopd_dataset # - av2_sensor_dataset - _self_ diff --git a/d123/script/run_conversion.py b/d123/script/run_conversion.py index dc33653a..eb630597 100644 --- a/d123/script/run_conversion.py +++ b/d123/script/run_conversion.py @@ -36,23 +36,16 @@ def main(cfg: DictConfig) -> None: for dataset_converter in dataset_converters: worker = build_worker(cfg) - logger.info(f"Processing dataset: {dataset_converter.__class__.__name__}") - # map_args = [{"map_index": i} for i in range(dataset_converter.get_number_of_maps())] - # worker_map(worker, partial(_convert_maps, cfg=cfg, dataset_converter=dataset_converter), map_args) - # logger.info(f"Finished maps: {dataset_converter.__class__.__name__}") - - # worker.shutdown() - - # del worker - - # worker = build_worker(cfg) + map_args = [{"map_index": i} for i in range(dataset_converter.get_number_of_maps())] + worker_map(worker, partial(_convert_maps, cfg=cfg, dataset_converter=dataset_converter), map_args) + logger.info(f"Finished maps: {dataset_converter.__class__.__name__}") log_args = [{"log_index": i} for i in range(dataset_converter.get_number_of_logs())] worker_map(worker, partial(_convert_logs, cfg=cfg, dataset_converter=dataset_converter), log_args) - logger.info(f"Finished logs: {dataset_converter.__class__.__name__}") + logger.info(f"Finished processing dataset: {dataset_converter.__class__.__name__}") diff --git a/notebooks/deprecated/extraction_testing.ipynb b/notebooks/deprecated/extraction_testing.ipynb index 6b08c67f..1f617694 100644 --- a/notebooks/deprecated/extraction_testing.ipynb +++ b/notebooks/deprecated/extraction_testing.ipynb @@ -45,11 +45,11 @@ "log_metadata = LogMetadata.from_arrow_table(recording_table)\n", "\n", "\n", - "# scene_tokens = [str(token) for token in np.random.choice(recording_table.column(\"token\").to_pylist(), size=10)]\n", - "scene_tokens = [\"\"]\n", + "# scene_uuids = [str(token) for token in np.random.choice(recording_table.column(\"token\").to_pylist(), size=10)]\n", + "scene_uuids = [\"\"]\n", "timestamp_threshold_s: float = 10.0\n", "# timestamp_threshold_s = None\n", - "filter = SceneFilter(scene_tokens=scene_tokens, timestamp_threshold_s=timestamp_threshold_s)" + "filter = SceneFilter(scene_uuids=scene_uuids, timestamp_threshold_s=timestamp_threshold_s)" ] }, { @@ -59,7 +59,7 @@ "metadata": {}, "outputs": [], "source": [ - "scene_tokens" + "scene_uuids" ] }, { @@ -82,12 +82,12 @@ " start_idx = int(filter.history_s / log_metadata.timestep_seconds)\n", " end_idx = len(recording_table) - int(filter.duration_s / log_metadata.timestep_seconds)\n", "\n", - " scene_token_set = set(filter.scene_tokens) if filter.scene_tokens else None\n", + " scene_uuid_set = set(filter.scene_uuids) if filter.scene_uuids else None\n", "\n", " for idx in range(start_idx, end_idx):\n", " scene_extraction_info: Optional[SceneExtractionInfo] = None\n", "\n", - " if scene_token_set is None:\n", + " if scene_uuid_set is None:\n", " scene_extraction_info = SceneExtractionInfo(\n", " initial_token=str(recording_table[\"token\"][idx]),\n", " initial_idx=idx,\n", @@ -95,7 +95,7 @@ " history_s=filter.history_s,\n", " iteration_duration_s=ITERATION_DURATION_SECONDS,\n", " )\n", - " elif str(recording_table[\"token\"][idx]) in scene_token_set:\n", + " elif str(recording_table[\"token\"][idx]) in scene_uuid_set:\n", " scene_extraction_info = SceneExtractionInfo(\n", " initial_token=str(recording_table[\"token\"][idx]),\n", " initial_idx=idx,\n", diff --git a/notebooks/deprecated/test_scene_builder.ipynb b/notebooks/deprecated/test_scene_builder.ipynb index e41ba4dd..105e60a5 100644 --- a/notebooks/deprecated/test_scene_builder.ipynb +++ b/notebooks/deprecated/test_scene_builder.ipynb @@ -33,8 +33,8 @@ "# log_names = [\"_Rep0_bench2drive220_route2_06_12_20_50_31\"]\n", "log_names = None\n", "split = \"nuplan_mini_val\"\n", - "scene_tokens = [\"8445a99210185a81\"]\n", - "scene_filter = SceneFilter(split_names=[split], log_names=log_names, duration_s=8.1, scene_tokens=scene_tokens)\n", + "scene_uuids = [\"8445a99210185a81\"]\n", + "scene_filter = SceneFilter(split_names=[split], log_names=log_names, duration_s=8.1, scene_uuids=scene_uuids)\n", "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", "worker = Sequential() \n", "# worker = RayDistributed()\n", diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb index 77735e19..780e9bab 100644 --- a/notebooks/scene_rendering.ipynb +++ b/notebooks/scene_rendering.ipynb @@ -34,14 +34,14 @@ "# split = \"carla\"\n", "\n", "# log_names = [\"2021.06.07.12.54.00_veh-35_01843_02314\"]\n", - "scene_tokens = None\n", - "# scene_tokens = [\"2283aea39bc1505e\"]\n", + "scene_uuids = None\n", + "# scene_uuids = [\"2283aea39bc1505e\"]\n", "log_names = None\n", "\n", "scene_filter = SceneFilter(\n", " split_names=[split],\n", " log_names=log_names,\n", - " scene_tokens=scene_tokens,\n", + " scene_uuids=scene_uuids,\n", " duration_s=15.1,\n", " history_s=1.0,\n", ")\n", diff --git a/notebooks/scene_sensor_loading.ipynb b/notebooks/scene_sensor_loading.ipynb index ed143de1..56fdf4f4 100644 --- a/notebooks/scene_sensor_loading.ipynb +++ b/notebooks/scene_sensor_loading.ipynb @@ -37,13 +37,13 @@ "split = \"nuplan_private_test\"\n", "\n", "log_names = [\"2021.07.01.21.22.09_veh-14_00016_00656\"]\n", - "scene_tokens = None\n", + "scene_uuids = None\n", "log_names = None\n", "\n", "scene_filter = SceneFilter(\n", " split_names=[split],\n", " log_names=log_names,\n", - " scene_tokens=scene_tokens,\n", + " scene_uuids=scene_uuids,\n", " duration_s=15.1,\n", " history_s=1.0,\n", " timestamp_threshold_s=15.0,\n", diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index e4a8d52b..0c0c8e42 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -48,12 +48,12 @@ "\n", "\n", "log_names = None\n", - "scene_tokens = None\n", + "scene_uuids = None\n", "\n", "scene_filter = SceneFilter(\n", " split_names=splits,\n", " log_names=log_names,\n", - " scene_tokens=scene_tokens,\n", + " scene_uuids=scene_uuids,\n", " duration_s=10,\n", " history_s=0.0,\n", " timestamp_threshold_s=20,\n", diff --git a/notebooks/viz/bev_matplotlib_prediction.ipynb b/notebooks/viz/bev_matplotlib_prediction.ipynb index 68c11e2a..3363e5ef 100644 --- a/notebooks/viz/bev_matplotlib_prediction.ipynb +++ b/notebooks/viz/bev_matplotlib_prediction.ipynb @@ -33,12 +33,12 @@ "\n", "\n", "log_names = None\n", - "scene_tokens = None\n", + "scene_uuids = None\n", "\n", "scene_filter = SceneFilter(\n", " split_names=splits,\n", " log_names=log_names,\n", - " scene_tokens=scene_tokens,\n", + " scene_uuids=scene_uuids,\n", " duration_s=8.0,\n", " history_s=0.0,\n", " timestamp_threshold_s=4.0,\n", diff --git a/notebooks/viz/camera_matplotlib.ipynb b/notebooks/viz/camera_matplotlib.ipynb index 2dbc9c77..77380f62 100644 --- a/notebooks/viz/camera_matplotlib.ipynb +++ b/notebooks/viz/camera_matplotlib.ipynb @@ -47,12 +47,12 @@ "# splits = [\"nuplan_private_test\"]\n", "log_names = None\n", "\n", - "scene_tokens = None\n", + "scene_uuids = None\n", "\n", "scene_filter = SceneFilter(\n", " split_names=splits,\n", " log_names=log_names,\n", - " scene_tokens=scene_tokens,\n", + " scene_uuids=scene_uuids,\n", " duration_s=15,\n", " history_s=0.0,\n", " timestamp_threshold_s=15,\n", diff --git a/notebooks/viz/video_example.ipynb b/notebooks/viz/video_example.ipynb index a6beb679..3b836715 100644 --- a/notebooks/viz/video_example.ipynb +++ b/notebooks/viz/video_example.ipynb @@ -48,12 +48,12 @@ "\n", "\n", "log_names = None\n", - "scene_tokens = None\n", + "scene_uuids = None\n", "\n", "scene_filter = SceneFilter(\n", " split_names=splits,\n", " log_names=log_names,\n", - " scene_tokens=scene_tokens,\n", + " scene_uuids=scene_uuids,\n", " duration_s=20,\n", " history_s=0.0,\n", " timestamp_threshold_s=20,\n", diff --git a/notebooks/viz/viser_testing_v2_scene.ipynb b/notebooks/viz/viser_testing_v2_scene.ipynb index e3dc780f..7c6d4a22 100644 --- a/notebooks/viz/viser_testing_v2_scene.ipynb +++ b/notebooks/viz/viser_testing_v2_scene.ipynb @@ -28,12 +28,12 @@ "# splits = [\"av2-sensor-mini_train\"]\n", "log_names = None\n", "\n", - "scene_tokens = None\n", + "scene_uuids = None\n", "\n", "scene_filter = SceneFilter(\n", " split_names=splits,\n", " log_names=log_names,\n", - " scene_tokens=scene_tokens,\n", + " scene_uuids=scene_uuids,\n", " duration_s=10,\n", " history_s=0.0,\n", " timestamp_threshold_s=10,\n", diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb index 6bb42684..b172331b 100644 --- a/notebooks/waymo_perception/map_testing.ipynb +++ b/notebooks/waymo_perception/map_testing.ipynb @@ -241,7 +241,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.conversion.wopd.wopd_map_utils import extract_lane_boundaries\n", + "from d123.conversion.datasets.wopd.wopd_map_utils import extract_lane_boundaries\n", "\n", "\n", "left_boundaries, right_boundaries = extract_lane_boundaries(\n", diff --git a/notebooks/waymo_perception/testing.ipynb b/notebooks/waymo_perception/testing.ipynb index d03d7d93..ae984f72 100644 --- a/notebooks/waymo_perception/testing.ipynb +++ b/notebooks/waymo_perception/testing.ipynb @@ -35,7 +35,7 @@ "from pathlib import Path\n", "\n", "\n", - "WOPD_DATA_ROOT = Path(\"/media/nvme1/waymo_perception/training\")\n", + "WOPD_DATA_ROOT = Path(\"/media/nvme1/waymo_perception/validation\")\n", "\n", "\n", "tfrecords_file_list = list(WOPD_DATA_ROOT.glob(\"*.tfrecord\"))" @@ -123,6 +123,7 @@ " frame = dataset_pb2.Frame()\n", " frame.ParseFromString(data.numpy())\n", " if frame_idx == 2:\n", + " print(frame)\n", " break\n", "\n", "print(\"Ego\")\n", diff --git a/test_viser.py b/test_viser.py index 9661edde..ab527a98 100644 --- a/test_viser.py +++ b/test_viser.py @@ -11,18 +11,18 @@ splits = ["nuplan_mini_test", "nuplan_mini_train", "nuplan_mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla"] - # splits = ["wopd_train"] + splits = ["wopd_val"] # splits = ["av2-sensor-mini_train"] log_names = None - scene_tokens = None + scene_uuids = None scene_filter = SceneFilter( split_names=splits, log_names=log_names, - scene_tokens=scene_tokens, - duration_s=10.0, + scene_uuids=scene_uuids, + duration_s=None, history_s=0.0, - timestamp_threshold_s=10.0, + timestamp_threshold_s=None, shuffle=True, camera_types=[PinholeCameraType.CAM_F0], ) From c38aa484ee19d783f54544573e9a4a7c7dba3a48 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 11 Oct 2025 21:07:31 +0200 Subject: [PATCH 073/145] Add a bunch of waymo specific environment variables to avoid logger spam. --- d123/conversion/datasets/wopd/wopd_converter.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/d123/conversion/datasets/wopd/wopd_converter.py b/d123/conversion/datasets/wopd/wopd_converter.py index 0572b72b..0c9ef209 100644 --- a/d123/conversion/datasets/wopd/wopd_converter.py +++ b/d123/conversion/datasets/wopd/wopd_converter.py @@ -47,9 +47,14 @@ from waymo_open_dataset import dataset_pb2 from waymo_open_dataset.utils import frame_utils -logger = logging.getLogger(__name__) -os.environ["CUDA_VISIBLE_DEVICES"] = "-1" D123_MAPS_ROOT: Path = Path(os.getenv("D123_MAPS_ROOT", "$HOME/maps")) # TODO: remove +os.environ["CUDA_VISIBLE_DEVICES"] = "-1" +os.environ["OMP_NUM_THREADS"] = "1" +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" +os.environ["CUDA_VISIBLE_DEVICES"] = "-1" +tf.config.set_visible_devices(tf.config.list_physical_devices("CPU")) + +logger = logging.getLogger(__name__) class WOPDConverter(AbstractDatasetConverter): From ff356a53e9ee44f21aca289750b614c71084d85b Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sun, 12 Oct 2025 19:12:50 +0200 Subject: [PATCH 074/145] Add map writer during conversion for nuPlan (#51). --- .../visualization/matplotlib/observation.py | 4 +- d123/conversion/abstract_dataset_converter.py | 4 +- d123/conversion/dataset_converter_config.py | 3 + .../datasets/av2/av2_sensor_converter.py | 4 +- .../datasets/carla/carla_data_converter.py | 10 +- .../datasets/nuplan/nuplan_converter.py | 55 +- .../nuplan/nuplan_map_conversion copy.py | 463 +++++++++++ .../datasets/nuplan/nuplan_map_conversion.py | 772 ++++++++---------- .../datasets/nuplan/utils/nuplan_constants.py | 15 +- .../datasets/wopd/wopd_converter.py | 4 +- .../conversion/log_writer/arrow_log_writer.py | 14 +- .../map_writer/abstract_map_writer.py | 69 ++ d123/conversion/map_writer/gpkg_map_writer.py | 177 ++++ d123/datatypes/maps/abstract_map_objects.py | 151 +++- d123/datatypes/maps/cache/__init__.py | 0 .../datatypes/maps/cache/cache_map_objects.py | 311 +++++++ d123/datatypes/maps/gpkg/gpkg_map.py | 2 +- d123/datatypes/maps/gpkg/gpkg_map_objects.py | 119 ++- .../maps/gpkg/{utils.py => gpkg_utils.py} | 0 d123/datatypes/maps/map_datatypes.py | 3 +- d123/datatypes/scene/scene_metadata.py | 2 +- d123/geometry/polyline.py | 2 +- ...og_writer_builder.py => writer_builder.py} | 9 + .../config/common/default_dataset_paths.yaml | 5 +- .../conversion/datasets/nuplan_dataset.yaml | 5 +- .../datasets/nuplan_mini_dataset.yaml | 7 +- .../datasets/nuplan_private_dataset.yaml | 7 +- .../conversion/datasets/wopd_dataset.yaml | 7 +- .../config/conversion/default_conversion.yaml | 9 +- ..._log_writer.yaml => arrow_log_writer.yaml} | 2 + .../config/conversion/map_writer/__init__.py | 0 .../map_writer/gpkg_map_writer.yaml | 4 + d123/script/run_conversion.py | 6 +- 33 files changed, 1713 insertions(+), 532 deletions(-) create mode 100644 d123/conversion/datasets/nuplan/nuplan_map_conversion copy.py create mode 100644 d123/datatypes/maps/cache/__init__.py create mode 100644 d123/datatypes/maps/cache/cache_map_objects.py rename d123/datatypes/maps/gpkg/{utils.py => gpkg_utils.py} (100%) rename d123/script/builders/{log_writer_builder.py => writer_builder.py} (60%) rename d123/script/config/conversion/log_writer/{arrow_ipc_log_writer.yaml => arrow_log_writer.yaml} (90%) create mode 100644 d123/script/config/conversion/map_writer/__init__.py create mode 100644 d123/script/config/conversion/map_writer/gpkg_map_writer.yaml diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py index 7b809f41..a2f47ebc 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/d123/common/visualization/matplotlib/observation.py @@ -55,7 +55,7 @@ def add_default_map_on_ax( for map_object in map_objects: try: if layer in [MapLayer.LANE_GROUP]: - if route_lane_group_ids is not None and int(map_object.id) in route_lane_group_ids: + if route_lane_group_ids is not None and int(map_object.object_id) in route_lane_group_ids: add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, ROUTE_CONFIG) else: add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer]) @@ -73,7 +73,7 @@ def add_default_map_on_ax( except Exception: import traceback - print(f"Error adding map object of type {layer.name} and id {map_object.id}") + print(f"Error adding map object of type {layer.name} and id {map_object.object_id}") traceback.print_exc() ax.set_title(f"Map: {map_api.map_name}") diff --git a/d123/conversion/abstract_dataset_converter.py b/d123/conversion/abstract_dataset_converter.py index f0631a37..db9c3284 100644 --- a/d123/conversion/abstract_dataset_converter.py +++ b/d123/conversion/abstract_dataset_converter.py @@ -2,6 +2,7 @@ from d123.conversion.dataset_converter_config import DatasetConverterConfig from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter class AbstractDatasetConverter(abc.ABC): @@ -23,10 +24,11 @@ def get_number_of_logs(self) -> int: """Returns the number of available raw data logs for conversion.""" @abc.abstractmethod - def convert_map(self, map_index: int) -> None: + def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: """ Convert a single map in raw data format to the uniform 123D format. :param map_index: The index of the map to convert. + :param map_writer: The map writer to use for writing the converted map. """ @abc.abstractmethod diff --git a/d123/conversion/dataset_converter_config.py b/d123/conversion/dataset_converter_config.py index a55c65cf..1c6cee27 100644 --- a/d123/conversion/dataset_converter_config.py +++ b/d123/conversion/dataset_converter_config.py @@ -12,6 +12,9 @@ class DatasetConverterConfig: force_log_conversion: bool = False force_map_conversion: bool = False + # Map + include_map: bool = False + # Ego include_ego: bool = False diff --git a/d123/conversion/datasets/av2/av2_sensor_converter.py b/d123/conversion/datasets/av2/av2_sensor_converter.py index 0944fb6a..8dab4fb8 100644 --- a/d123/conversion/datasets/av2/av2_sensor_converter.py +++ b/d123/conversion/datasets/av2/av2_sensor_converter.py @@ -118,10 +118,10 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: ) # 2. Prepare log writer - overwrite_log = log_writer.reset(self.dataset_converter_config, log_metadata) + log_needs_writing = log_writer.reset(self.dataset_converter_config, log_metadata) # 3. Process source log data - if overwrite_log: + if log_needs_writing: sensor_df = build_sensor_dataframe(source_log_path) synchronization_df = build_synchronization_dataframe(sensor_df) diff --git a/d123/conversion/datasets/carla/carla_data_converter.py b/d123/conversion/datasets/carla/carla_data_converter.py index 11820c0f..20c6aecb 100644 --- a/d123/conversion/datasets/carla/carla_data_converter.py +++ b/d123/conversion/datasets/carla/carla_data_converter.py @@ -382,7 +382,7 @@ def _extract_traffic_light_data( lane_start_point = lane.centerline.array[0] distance_to_lane_start = np.linalg.norm(lane_start_point - point_3d.array) if distance_to_lane_start < TRAFFIC_LIGHT_ASSIGNMENT_DISTANCE: - traffic_light_ids.append(int(lane.id)) + traffic_light_ids.append(int(lane.object_id)) traffic_light_types.append(traffic_light_state) return traffic_light_ids, traffic_light_types @@ -404,19 +404,19 @@ def _extract_route_lane_group_ids(route: List[List[float]], map_api: AbstractMap if len(nearby_lane_groups) == 0: continue elif len(nearby_lane_groups) > 1: - possible_lane_group_ids = [lane_group.id for lane_group in nearby_lane_groups] + possible_lane_group_ids = [lane_group.object_id for lane_group in nearby_lane_groups] if len(route_lane_group_ids) > 0: prev_lane_group_id = route_lane_group_ids[-1] if prev_lane_group_id in possible_lane_group_ids: continue else: # TODO: Choose with least heading difference? - route_lane_group_ids.append(int(nearby_lane_groups[0].id)) + route_lane_group_ids.append(int(nearby_lane_groups[0].object_id)) else: # TODO: Choose with least heading difference? - route_lane_group_ids.append(int(nearby_lane_groups[0].id)) + route_lane_group_ids.append(int(nearby_lane_groups[0].object_id)) elif len(nearby_lane_groups) == 1: - route_lane_group_ids.append(int(nearby_lane_groups[0].id)) + route_lane_group_ids.append(int(nearby_lane_groups[0].object_id)) return list(dict.fromkeys(route_lane_group_ids)) # Remove duplicates while preserving order diff --git a/d123/conversion/datasets/nuplan/nuplan_converter.py b/d123/conversion/datasets/nuplan/nuplan_converter.py index 9be024d2..de356ed9 100644 --- a/d123/conversion/datasets/nuplan/nuplan_converter.py +++ b/d123/conversion/datasets/nuplan/nuplan_converter.py @@ -1,6 +1,4 @@ -import os import pickle -import uuid from pathlib import Path from typing import Dict, Final, List, Optional, Tuple, Union @@ -11,7 +9,7 @@ from d123.common.utils.dependencies import check_dependencies from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter from d123.conversion.dataset_converter_config import DatasetConverterConfig -from d123.conversion.datasets.nuplan.nuplan_map_conversion import NuPlanMapConverter +from d123.conversion.datasets.nuplan.nuplan_map_conversion import write_nuplan_map from d123.conversion.datasets.nuplan.utils.nuplan_constants import ( NUPLAN_DATA_SPLITS, NUPLAN_DEFAULT_DT, @@ -24,6 +22,7 @@ get_nearest_ego_pose_for_timestamp_from_db, ) from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter from d123.conversion.utils.sensor_utils.lidar_index_registry import NuPlanLidarIndex from d123.datatypes.detections.detection import ( BoxDetectionSE3, @@ -82,7 +81,7 @@ def __init__( self, splits: List[str], nuplan_data_root: Union[Path, str], - nuplan_map_root: Union[Path, str], + nuplan_maps_root: Union[Path, str], nuplan_sensor_root: Union[Path, str], dataset_converter_config: DatasetConverterConfig, ) -> None: @@ -95,7 +94,7 @@ def __init__( self._splits: List[str] = splits self._nuplan_data_root: Path = Path(nuplan_data_root) - self._nuplan_map_root: Path = Path(nuplan_map_root) + self._nuplan_maps_root: Path = Path(nuplan_maps_root) self._nuplan_sensor_root: Path = Path(nuplan_sensor_root) self._split_log_path_pairs: List[Tuple[str, List[Path]]] = self._collect_split_log_path_pairs() @@ -115,16 +114,16 @@ def _collect_split_log_path_pairs(self) -> List[Tuple[str, List[Path]]]: nuplan_split_folder = self._nuplan_data_root / "nuplan-v1.1" / "splits" / "trainval" elif split in ["nuplan_test"]: nuplan_split_folder = self._nuplan_data_root / "nuplan-v1.1" / "splits" / "test" - elif split in ["nuplan_mini_train", "nuplan_mini_val", "nuplan_mini_test"]: + elif split in ["nuplan-mini_train", "nuplan-mini_val", "nuplan-mini_test"]: nuplan_split_folder = self._nuplan_data_root / "nuplan-v1.1" / "splits" / "mini" - elif split == "nuplan_private_test": + elif split == "nuplan-private_test": # TODO: Remove private split nuplan_split_folder = self._nuplan_data_root / "nuplan-v1.1" / "splits" / "private_test" all_log_files_in_path = [log_file for log_file in nuplan_split_folder.glob("*.db")] - if split == "nuplan_private_test": - # TODO: Remove private split + # TODO: Remove private split + if split == "nuplan-private_test": valid_log_names = [str(log_file.stem) for log_file in all_log_files_in_path] else: all_log_files_in_path = [log_file for log_file in nuplan_split_folder.glob("*.db")] @@ -146,21 +145,35 @@ def get_number_of_logs(self) -> int: """Inherited, see superclass.""" return len(self._split_log_path_pairs) - def convert_map(self, map_index: int) -> None: + def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: """Inherited, see superclass.""" map_name = NUPLAN_MAP_LOCATIONS[map_index] - map_path = self.dataset_converter_config.output_path / "maps" / f"nuplan_{map_name}.gpkg" - if self.dataset_converter_config.force_map_conversion or not map_path.exists(): - map_path.unlink(missing_ok=True) - NuPlanMapConverter( - nuplan_map_root=self._nuplan_map_root, - map_path=self.dataset_converter_config.output_path / "maps", - ).convert(map_name=map_name) + + # Dummy log metadata for map writing, TODO: Consider using MapMetadata instead? + log_metadata = LogMetadata( + dataset="nuplan", + split=None, + log_name=None, + location=map_name, + timestep_seconds=TARGET_DT, + vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(), + camera_metadata={}, + lidar_metadata={}, + map_has_z=False, + map_is_local=False, + ) + map_needs_writing = map_writer.reset(self.dataset_converter_config, log_metadata) + if map_needs_writing: + write_nuplan_map( + map_name=map_name, + nuplan_maps_root=self._nuplan_maps_root, + map_writer=map_writer, + ) + + map_writer.close() def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: """Inherited, see superclass.""" - int(os.environ.get("NODE_RANK", 0)) - str(uuid.uuid4()) split, source_log_path = self._split_log_path_pairs[log_index] @@ -185,9 +198,9 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: ) # 2. Prepare log writer - overwrite_log = log_writer.reset(self.dataset_converter_config, log_metadata) + log_needs_writing = log_writer.reset(self.dataset_converter_config, log_metadata) - if overwrite_log: + if log_needs_writing: step_interval: float = int(TARGET_DT / NUPLAN_DEFAULT_DT) for nuplan_lidar_pc in nuplan_log_db.lidar_pc[::step_interval]: diff --git a/d123/conversion/datasets/nuplan/nuplan_map_conversion copy.py b/d123/conversion/datasets/nuplan/nuplan_map_conversion copy.py new file mode 100644 index 00000000..7ce449fb --- /dev/null +++ b/d123/conversion/datasets/nuplan/nuplan_map_conversion copy.py @@ -0,0 +1,463 @@ +# # TODO: Refactor this mess. + +# import warnings +# from pathlib import Path +# from typing import Dict, Optional, Union + +# import geopandas as gpd +# import numpy as np +# import pandas as pd +# import pyogrio +# from shapely.geometry import LineString + +# # Suppress numpy runtime warnings for casting operations +# np.seterr(invalid="ignore") + +# from d123.conversion.datasets.nuplan.utils.nuplan_constants import NUPLAN_MAP_GPKG_LAYERS, NUPLAN_MAP_LOCATION_FILES +# from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( +# get_road_edge_linear_rings, +# split_line_geometry_by_max_length, +# ) +# from d123.datatypes.maps.gpkg.gpkg_utils import get_all_rows_with_value, get_row_with_value +# from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType + +# # 0: generic lane I guess. +# # 1: ending? +# # 3: bike lanes. + +# MAX_ROAD_EDGE_LENGTH = 100.0 # meters, used to filter out very long road edges + +# NUPLAN_ROAD_LINE_CONVERSION = { +# 0: RoadLineType.DASHED_WHITE, +# 2: RoadLineType.SOLID_WHITE, +# 3: RoadLineType.UNKNOWN, +# } + + +# class NuPlanMapConverter: +# def __init__(self, nuplan_maps_root: Union[str, Path], map_path: Path) -> None: + +# self._map_path: Path = map_path +# self._nuplan_maps_root: Path = Path(nuplan_maps_root) +# self._gdf: Optional[Dict[str, gpd.GeoDataFrame]] = None + +# def convert(self, map_name: str = "us-pa-pittsburgh-hazelwood") -> None: +# assert map_name in NUPLAN_MAP_LOCATION_FILES.keys(), f"Map name {map_name} is not supported." + +# map_file_path = self._nuplan_maps_root / NUPLAN_MAP_LOCATION_FILES[map_name] +# self._load_dataframes(map_file_path) + +# dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} +# dataframes[MapLayer.LANE] = self._extract_lane_dataframe() +# dataframes[MapLayer.LANE_GROUP] = self._extract_lane_group_dataframe() +# dataframes[MapLayer.INTERSECTION] = self._extract_intersection_dataframe() +# dataframes[MapLayer.CROSSWALK] = self._extract_crosswalk_dataframe() +# dataframes[MapLayer.WALKWAY] = self._extract_walkway_dataframe() +# dataframes[MapLayer.CARPARK] = self._extract_carpark_dataframe() +# dataframes[MapLayer.GENERIC_DRIVABLE] = self._extract_generic_drivable_dataframe() +# dataframes[MapLayer.ROAD_EDGE] = self._extract_road_edge_dataframe() +# dataframes[MapLayer.ROAD_LINE] = self._extract_road_line_dataframe() + +# if not self._map_path.exists(): +# self._map_path.mkdir(parents=True, exist_ok=True) + +# try: +# map_file_name = self._map_path / f"nuplan_{map_name}.gpkg" +# with warnings.catch_warnings(): +# warnings.filterwarnings("ignore", message="'crs' was not provided") +# for layer, gdf in dataframes.items(): +# gdf.to_file(map_file_name, layer=layer.serialize(), driver="GPKG", mode="a") +# except Exception as e: +# print(f"Error occurred while converting map {map_name}: {e}") +# print(map_file_name, map_file_path) + +# def _load_dataframes(self, map_file_path: Path) -> None: + +# # The projected coordinate system depends on which UTM zone the mapped location is in. +# map_meta = gpd.read_file(map_file_path, layer="meta", engine="pyogrio") +# projection_system = map_meta[map_meta["key"] == "projectedCoordSystem"]["value"].iloc[0] + +# self._gdf = {} +# for layer_name in NUPLAN_MAP_GPKG_LAYERS: +# with warnings.catch_warnings(): +# # Suppress the warnings from the GPKG operations below so that they don't spam the training logs. +# warnings.filterwarnings("ignore") + +# gdf_in_pixel_coords = pyogrio.read_dataframe(map_file_path, layer=layer_name, fid_as_index=True) +# gdf_in_utm_coords = gdf_in_pixel_coords.to_crs(projection_system) +# # gdf_in_utm_coords = gdf_in_pixel_coords + +# # For backwards compatibility, cast the index to string datatype. +# # and mirror it to the "fid" column. +# gdf_in_utm_coords.index = gdf_in_utm_coords.index.map(str) +# gdf_in_utm_coords["fid"] = gdf_in_utm_coords.index + +# self._gdf[layer_name] = gdf_in_utm_coords + +# def _extract_lane_dataframe(self) -> gpd.GeoDataFrame: +# assert self._gdf is not None, "Call `.initialize()` before retrieving data!" +# lane_df = self._extract_nuplan_lane_dataframe() +# lane_connector_df = self._extract_nuplan_lane_connector_dataframe() +# combined_df = pd.concat([lane_df, lane_connector_df], ignore_index=True) +# return combined_df + +# def _extract_nuplan_lane_dataframe(self) -> gpd.GeoDataFrame: +# # NOTE: drops: lane_index (?), creator_id, name (?), road_type_fid (?), lane_type_fid (?), width (?), left_offset (?), right_offset (?), +# # min_speed (?), max_speed (?), stops, left_has_reflectors (?), right_has_reflectors (?), from_edge_fid, to_edge_fid + +# ids = self._gdf["lanes_polygons"].lane_fid.to_list() +# lane_group_ids = self._gdf["lanes_polygons"].lane_group_fid.to_list() +# speed_limits_mps = self._gdf["lanes_polygons"].speed_limit_mps.to_list() +# predecessor_ids = [] +# successor_ids = [] +# left_boundaries = [] +# right_boundaries = [] +# left_lane_ids = [] +# right_lane_ids = [] +# baseline_paths = [] +# geometries = self._gdf["lanes_polygons"].geometry.to_list() + +# for lane_id in ids: + +# # 1. predecessor_ids, successor_ids +# _predecessor_ids = get_all_rows_with_value( +# self._gdf["lane_connectors"], +# "entry_lane_fid", +# lane_id, +# )["fid"].tolist() +# _successor_ids = get_all_rows_with_value( +# self._gdf["lane_connectors"], +# "exit_lane_fid", +# lane_id, +# )["fid"].tolist() +# predecessor_ids.append(_predecessor_ids) +# successor_ids.append(_successor_ids) + +# # 2. left_boundaries, right_boundaries +# lane_series = get_row_with_value(self._gdf["lanes_polygons"], "fid", str(lane_id)) +# left_boundary_fid = lane_series["left_boundary_fid"] +# left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] + +# right_boundary_fid = lane_series["right_boundary_fid"] +# right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] + +# # 3. left_lane_ids, right_lane_ids +# lane_index = lane_series["lane_index"] +# all_group_lanes = get_all_rows_with_value( +# self._gdf["lanes_polygons"], "lane_group_fid", lane_series["lane_group_fid"] +# ) +# left_lane_id = all_group_lanes[all_group_lanes["lane_index"] == int(lane_index) - 1]["fid"] +# right_lane_id = all_group_lanes[all_group_lanes["lane_index"] == int(lane_index) + 1]["fid"] +# left_lane_ids.append(left_lane_id.item() if not left_lane_id.empty else None) +# right_lane_ids.append(right_lane_id.item() if not right_lane_id.empty else None) + +# # 3. baseline_paths +# baseline_path = get_row_with_value(self._gdf["baseline_paths"], "lane_fid", float(lane_id))["geometry"] + +# left_boundary = align_boundary_direction(baseline_path, left_boundary) +# right_boundary = align_boundary_direction(baseline_path, right_boundary) + +# left_boundaries.append(left_boundary) +# right_boundaries.append(right_boundary) +# baseline_paths.append(baseline_path) + +# data = pd.DataFrame( +# { +# "id": ids, +# "lane_group_id": lane_group_ids, +# "left_boundary": left_boundaries, +# "right_boundary": right_boundaries, +# "baseline_path": baseline_paths, +# "left_lane_id": left_lane_ids, +# "right_lane_id": right_lane_ids, +# "predecessor_ids": predecessor_ids, +# "successor_ids": successor_ids, +# "speed_limit_mps": speed_limits_mps, +# } +# ) + +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + +# def _extract_nuplan_lane_connector_dataframe(self) -> None: +# # NOTE: drops: exit_lane_group_fid, entry_lane_group_fid, to_edge_fid, +# # turn_type_fid (?), bulb_fids (?), traffic_light_stop_line_fids (?), overlap (?), creator_id +# # left_has_reflectors (?), right_has_reflectors (?) +# ids = self._gdf["lane_connectors"].fid.to_list() +# lane_group_ids = self._gdf["lane_connectors"].lane_group_connector_fid.to_list() +# speed_limits_mps = self._gdf["lane_connectors"].speed_limit_mps.to_list() +# predecessor_ids = [] +# successor_ids = [] +# left_boundaries = [] +# right_boundaries = [] +# baseline_paths = [] +# geometries = [] + +# for lane_id in ids: +# # 1. predecessor_ids, successor_ids +# lane_connector_row = get_row_with_value(self._gdf["lane_connectors"], "fid", str(lane_id)) +# predecessor_ids.append([lane_connector_row["entry_lane_fid"]]) +# successor_ids.append([lane_connector_row["exit_lane_fid"]]) + +# # 2. left_boundaries, right_boundaries +# lane_connector_polygons_row = get_row_with_value( +# self._gdf["gen_lane_connectors_scaled_width_polygons"], "lane_connector_fid", str(lane_id) +# ) +# left_boundary_fid = lane_connector_polygons_row["left_boundary_fid"] +# left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] + +# right_boundary_fid = lane_connector_polygons_row["right_boundary_fid"] +# right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] + +# # 3. baseline_paths +# baseline_path = get_row_with_value(self._gdf["baseline_paths"], "lane_connector_fid", float(lane_id))[ +# "geometry" +# ] + +# left_boundary = align_boundary_direction(baseline_path, left_boundary) +# right_boundary = align_boundary_direction(baseline_path, right_boundary) + +# left_boundaries.append(left_boundary) +# right_boundaries.append(right_boundary) +# baseline_paths.append(baseline_path) + +# # 4. geometries +# geometries.append(lane_connector_polygons_row.geometry) + +# data = pd.DataFrame( +# { +# "id": ids, +# "lane_group_id": lane_group_ids, +# "speed_limit_mps": speed_limits_mps, +# "predecessor_ids": predecessor_ids, +# "successor_ids": successor_ids, +# "left_boundary": left_boundaries, +# "right_boundary": right_boundaries, +# "left_lane_id": [None] * len(ids), +# "right_lane_id": [None] * len(ids), +# "baseline_path": baseline_paths, +# } +# ) + +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + +# def _extract_lane_group_dataframe(self) -> gpd.GeoDataFrame: +# lane_group_df = self._extract_nuplan_lane_group_dataframe() +# lane_connector_group_df = self._extract_nuplan_lane_connector_group_dataframe() +# combined_df = pd.concat([lane_group_df, lane_connector_group_df], ignore_index=True) +# return combined_df + +# def _extract_nuplan_lane_group_dataframe(self) -> gpd.GeoDataFrame: +# # NOTE: drops: creator_id, from_edge_fid, to_edge_fid +# ids = self._gdf["lane_groups_polygons"].fid.to_list() +# lane_ids = [] +# intersection_ids = [None] * len(ids) +# predecessor_lane_group_ids = [] +# successor_lane_group_ids = [] +# left_boundaries = [] +# right_boundaries = [] +# geometries = self._gdf["lane_groups_polygons"].geometry.to_list() + +# for lane_group_id in ids: +# # 1. lane_ids +# lane_ids_ = get_all_rows_with_value( +# self._gdf["lanes_polygons"], +# "lane_group_fid", +# lane_group_id, +# )["fid"].tolist() +# lane_ids.append(lane_ids_) + +# # 2. predecessor_lane_group_ids, successor_lane_group_ids +# predecessor_lane_group_ids_ = get_all_rows_with_value( +# self._gdf["lane_group_connectors"], +# "to_lane_group_fid", +# lane_group_id, +# )["fid"].tolist() +# successor_lane_group_ids_ = get_all_rows_with_value( +# self._gdf["lane_group_connectors"], +# "from_lane_group_fid", +# lane_group_id, +# )["fid"].tolist() +# predecessor_lane_group_ids.append(predecessor_lane_group_ids_) +# successor_lane_group_ids.append(successor_lane_group_ids_) + +# # 3. left_boundaries, right_boundaries +# lane_group_row = get_row_with_value(self._gdf["lane_groups_polygons"], "fid", str(lane_group_id)) +# left_boundary_fid = lane_group_row["left_boundary_fid"] +# left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] + +# right_boundary_fid = lane_group_row["right_boundary_fid"] +# right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] + +# repr_baseline_path = get_row_with_value(self._gdf["baseline_paths"], "lane_fid", float(lane_ids_[0]))[ +# "geometry" +# ] + +# left_boundary = align_boundary_direction(repr_baseline_path, left_boundary) +# right_boundary = align_boundary_direction(repr_baseline_path, right_boundary) + +# left_boundaries.append(left_boundary) +# right_boundaries.append(right_boundary) + +# data = pd.DataFrame( +# { +# "id": ids, +# "lane_ids": lane_ids, +# "intersection_id": intersection_ids, +# "predecessor_lane_group_ids": predecessor_lane_group_ids, +# "successor_lane_group_ids": successor_lane_group_ids, +# "left_boundary": left_boundaries, +# "right_boundary": right_boundaries, +# } +# ) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + +# def _extract_nuplan_lane_connector_group_dataframe(self) -> gpd.GeoDataFrame: +# # NOTE: drops: creator_id, from_edge_fid, to_edge_fid, intersection_fid +# ids = self._gdf["lane_group_connectors"].fid.to_list() +# lane_ids = [] +# intersection_ids = self._gdf["lane_group_connectors"].intersection_fid.to_list() +# predecessor_lane_group_ids = [] +# successor_lane_group_ids = [] +# left_boundaries = [] +# right_boundaries = [] +# geometries = self._gdf["lane_group_connectors"].geometry.to_list() + +# for lane_group_connector_id in ids: +# # 1. lane_ids +# lane_ids_ = get_all_rows_with_value( +# self._gdf["lane_connectors"], "lane_group_connector_fid", lane_group_connector_id +# )["fid"].tolist() +# lane_ids.append(lane_ids_) + +# # 2. predecessor_lane_group_ids, successor_lane_group_ids +# lane_group_connector_row = get_row_with_value( +# self._gdf["lane_group_connectors"], "fid", lane_group_connector_id +# ) +# predecessor_lane_group_ids.append([str(lane_group_connector_row["from_lane_group_fid"])]) +# successor_lane_group_ids.append([str(lane_group_connector_row["to_lane_group_fid"])]) + +# # 3. left_boundaries, right_boundaries +# left_boundary_fid = lane_group_connector_row["left_boundary_fid"] +# left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] +# right_boundary_fid = lane_group_connector_row["right_boundary_fid"] +# right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] + +# left_boundaries.append(left_boundary) +# right_boundaries.append(right_boundary) + +# data = pd.DataFrame( +# { +# "id": ids, +# "lane_ids": lane_ids, +# "intersection_id": intersection_ids, +# "predecessor_lane_group_ids": predecessor_lane_group_ids, +# "successor_lane_group_ids": successor_lane_group_ids, +# "left_boundary": left_boundaries, +# "right_boundary": right_boundaries, +# } +# ) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + +# def _extract_intersection_dataframe(self) -> gpd.GeoDataFrame: +# # NOTE: drops: creator_id, intersection_type_fid (?), is_mini (?) +# ids = self._gdf["intersections"].fid.to_list() +# lane_group_ids = [] +# for intersection_id in ids: +# lane_group_connector_ids = get_all_rows_with_value( +# self._gdf["lane_group_connectors"], "intersection_fid", str(intersection_id) +# )["fid"].tolist() +# lane_group_ids.append(lane_group_connector_ids) +# data = pd.DataFrame({"id": ids, "lane_group_ids": lane_group_ids}) +# return gpd.GeoDataFrame(data, geometry=self._gdf["intersections"].geometry.to_list()) + +# def _extract_crosswalk_dataframe(self) -> gpd.GeoDataFrame: +# # NOTE: drops: creator_id, intersection_fids, lane_fids, is_marked (?) +# data = pd.DataFrame({"id": self._gdf["crosswalks"].fid.to_list()}) +# return gpd.GeoDataFrame(data, geometry=self._gdf["crosswalks"].geometry.to_list()) + +# def _extract_walkway_dataframe(self) -> gpd.GeoDataFrame: +# # NOTE: drops: creator_id +# data = pd.DataFrame({"id": self._gdf["walkways"].fid.to_list()}) +# return gpd.GeoDataFrame(data, geometry=self._gdf["walkways"].geometry.to_list()) + +# def _extract_carpark_dataframe(self) -> gpd.GeoDataFrame: +# # NOTE: drops: heading, creator_id +# data = pd.DataFrame({"id": self._gdf["carpark_areas"].fid.to_list()}) +# return gpd.GeoDataFrame(data, geometry=self._gdf["carpark_areas"].geometry.to_list()) + +# def _extract_generic_drivable_dataframe(self) -> gpd.GeoDataFrame: +# # NOTE: drops: creator_id +# data = pd.DataFrame({"id": self._gdf["generic_drivable_areas"].fid.to_list()}) +# return gpd.GeoDataFrame(data, geometry=self._gdf["generic_drivable_areas"].geometry.to_list()) + +# def _extract_road_edge_dataframe(self) -> gpd.GeoDataFrame: +# drivable_polygons = ( +# self._gdf["intersections"].geometry.to_list() +# + self._gdf["lane_groups_polygons"].geometry.to_list() +# + self._gdf["carpark_areas"].geometry.to_list() +# + self._gdf["generic_drivable_areas"].geometry.to_list() +# ) +# road_edge_linear_rings = get_road_edge_linear_rings(drivable_polygons) +# road_edges = split_line_geometry_by_max_length(road_edge_linear_rings, MAX_ROAD_EDGE_LENGTH) + +# ids = [] +# road_edge_types = [] +# for idx in range(len(road_edges)): +# ids.append(idx) +# # TODO @DanielDauner: Figure out if other types should/could be assigned here. +# road_edge_types.append(int(RoadEdgeType.ROAD_EDGE_BOUNDARY)) + +# data = pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}) +# return gpd.GeoDataFrame(data, geometry=road_edges) + +# def _extract_road_line_dataframe(self) -> gpd.GeoDataFrame: +# boundaries = self._gdf["boundaries"].geometry.to_list() +# fids = self._gdf["boundaries"].fid.to_list() +# boundary_types = self._gdf["boundaries"].boundary_type_fid.to_list() + +# ids = [] +# road_line_types = [] +# geometries = [] + +# for idx in range(len(boundary_types)): +# ids.append(fids[idx]) +# road_line_types.append(int(NUPLAN_ROAD_LINE_CONVERSION[boundary_types[idx]])) +# geometries.append(boundaries[idx]) + +# data = pd.DataFrame( +# { +# "id": ids, +# "road_line_type": road_line_types, +# } +# ) +# return gpd.GeoDataFrame(data, geometry=geometries) + + +# def flip_linestring(linestring: LineString) -> LineString: +# # TODO: move somewhere more appropriate or implement in Polyline2D, PolylineSE2, etc. +# return LineString(linestring.coords[::-1]) + + +# def lines_same_direction(centerline: LineString, boundary: LineString) -> bool: +# # TODO: refactor helper function. +# center_start = np.array(centerline.coords[0]) +# center_end = np.array(centerline.coords[-1]) +# boundary_start = np.array(boundary.coords[0]) +# boundary_end = np.array(boundary.coords[-1]) + +# # Distance from centerline start to boundary start + centerline end to boundary end +# same_dir_dist = np.linalg.norm(center_start - boundary_start) + np.linalg.norm(center_end - boundary_end) +# opposite_dir_dist = np.linalg.norm(center_start - boundary_end) + np.linalg.norm(center_end - boundary_start) + +# return same_dir_dist <= opposite_dir_dist + + +# def align_boundary_direction(centerline: LineString, boundary: LineString) -> LineString: +# # TODO: refactor helper function. +# if not lines_same_direction(centerline, boundary): +# return flip_linestring(boundary) +# return boundary diff --git a/d123/conversion/datasets/nuplan/nuplan_map_conversion.py b/d123/conversion/datasets/nuplan/nuplan_map_conversion.py index a87ad7b1..4b1dc5f7 100644 --- a/d123/conversion/datasets/nuplan/nuplan_map_conversion.py +++ b/d123/conversion/datasets/nuplan/nuplan_map_conversion.py @@ -1,443 +1,381 @@ -# TODO: Refactor this mess. - import warnings from pathlib import Path -from typing import Dict, Optional, Union +from typing import Dict, Final import geopandas as gpd import numpy as np -import pandas as pd import pyogrio -from shapely.geometry import LineString - -# Suppress numpy runtime warnings for casting operations -np.seterr(invalid="ignore") +from shapely import LineString -from d123.conversion.datasets.nuplan.utils.nuplan_constants import NUPLAN_MAP_GPKG_LAYERS, NUPLAN_MAP_LOCATION_FILES +from d123.conversion.datasets.nuplan.utils.nuplan_constants import ( + NUPLAN_MAP_GPKG_LAYERS, + NUPLAN_MAP_LOCATION_FILES, + NUPLAN_ROAD_LINE_CONVERSION, +) +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, split_line_geometry_by_max_length, ) -from d123.datatypes.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value -from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType - -# 0: generic lane I guess. -# 1: ending? -# 3: bike lanes. - -MAX_ROAD_EDGE_LENGTH = 100.0 # meters, used to filter out very long road edges - -NUPLAN_ROAD_LINE_CONVERSION = { - 0: RoadLineType.DASHED_WHITE, - 2: RoadLineType.SOLID_WHITE, - 3: RoadLineType.UNKNOWN, -} - - -class NuPlanMapConverter: - def __init__(self, nuplan_map_root: Union[str, Path], map_path: Path) -> None: - - self._map_path: Path = map_path - self._nuplan_maps_root: Path = Path(nuplan_map_root) - self._gdf: Optional[Dict[str, gpd.GeoDataFrame]] = None - - def convert(self, map_name: str = "us-pa-pittsburgh-hazelwood") -> None: - assert map_name in NUPLAN_MAP_LOCATION_FILES.keys(), f"Map name {map_name} is not supported." - - map_file_path = self._nuplan_maps_root / NUPLAN_MAP_LOCATION_FILES[map_name] - self._load_dataframes(map_file_path) - - dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} - dataframes[MapLayer.LANE] = self._extract_lane_dataframe() - dataframes[MapLayer.LANE_GROUP] = self._extract_lane_group_dataframe() - dataframes[MapLayer.INTERSECTION] = self._extract_intersection_dataframe() - dataframes[MapLayer.CROSSWALK] = self._extract_crosswalk_dataframe() - dataframes[MapLayer.WALKWAY] = self._extract_walkway_dataframe() - dataframes[MapLayer.CARPARK] = self._extract_carpark_dataframe() - dataframes[MapLayer.GENERIC_DRIVABLE] = self._extract_generic_drivable_dataframe() - dataframes[MapLayer.ROAD_EDGE] = self._extract_road_edge_dataframe() - dataframes[MapLayer.ROAD_LINE] = self._extract_road_line_dataframe() - - if not self._map_path.exists(): - self._map_path.mkdir(parents=True, exist_ok=True) - - try: - map_file_name = self._map_path / f"nuplan_{map_name}.gpkg" - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", message="'crs' was not provided") - for layer, gdf in dataframes.items(): - gdf.to_file(map_file_name, layer=layer.serialize(), driver="GPKG", mode="a") - except Exception as e: - print(f"Error occurred while converting map {map_name}: {e}") - print(map_file_name, map_file_path) - - def _load_dataframes(self, map_file_path: Path) -> None: - - # The projected coordinate system depends on which UTM zone the mapped location is in. - map_meta = gpd.read_file(map_file_path, layer="meta", engine="pyogrio") - projection_system = map_meta[map_meta["key"] == "projectedCoordSystem"]["value"].iloc[0] - - self._gdf = {} - for layer_name in NUPLAN_MAP_GPKG_LAYERS: - with warnings.catch_warnings(): - # Suppress the warnings from the GPKG operations below so that they don't spam the training logs. - warnings.filterwarnings("ignore") - - gdf_in_pixel_coords = pyogrio.read_dataframe(map_file_path, layer=layer_name, fid_as_index=True) - gdf_in_utm_coords = gdf_in_pixel_coords.to_crs(projection_system) - # gdf_in_utm_coords = gdf_in_pixel_coords - - # For backwards compatibility, cast the index to string datatype. - # and mirror it to the "fid" column. - gdf_in_utm_coords.index = gdf_in_utm_coords.index.map(str) - gdf_in_utm_coords["fid"] = gdf_in_utm_coords.index - - self._gdf[layer_name] = gdf_in_utm_coords - - def _extract_lane_dataframe(self) -> gpd.GeoDataFrame: - assert self._gdf is not None, "Call `.initialize()` before retrieving data!" - lane_df = self._extract_nuplan_lane_dataframe() - lane_connector_df = self._extract_nuplan_lane_connector_dataframe() - combined_df = pd.concat([lane_df, lane_connector_df], ignore_index=True) - return combined_df - - def _extract_nuplan_lane_dataframe(self) -> gpd.GeoDataFrame: - # NOTE: drops: lane_index (?), creator_id, name (?), road_type_fid (?), lane_type_fid (?), width (?), left_offset (?), right_offset (?), - # min_speed (?), max_speed (?), stops, left_has_reflectors (?), right_has_reflectors (?), from_edge_fid, to_edge_fid - - ids = self._gdf["lanes_polygons"].lane_fid.to_list() - lane_group_ids = self._gdf["lanes_polygons"].lane_group_fid.to_list() - speed_limits_mps = self._gdf["lanes_polygons"].speed_limit_mps.to_list() - predecessor_ids = [] - successor_ids = [] - left_boundaries = [] - right_boundaries = [] - left_lane_ids = [] - right_lane_ids = [] - baseline_paths = [] - geometries = self._gdf["lanes_polygons"].geometry.to_list() - - for lane_id in ids: - - # 1. predecessor_ids, successor_ids - _predecessor_ids = get_all_rows_with_value( - self._gdf["lane_connectors"], - "entry_lane_fid", - lane_id, - )["fid"].tolist() - _successor_ids = get_all_rows_with_value( - self._gdf["lane_connectors"], - "exit_lane_fid", - lane_id, - )["fid"].tolist() - predecessor_ids.append(_predecessor_ids) - successor_ids.append(_successor_ids) - - # 2. left_boundaries, right_boundaries - lane_series = get_row_with_value(self._gdf["lanes_polygons"], "fid", str(lane_id)) - left_boundary_fid = lane_series["left_boundary_fid"] - left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] - - right_boundary_fid = lane_series["right_boundary_fid"] - right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] - - # 3. left_lane_ids, right_lane_ids - lane_index = lane_series["lane_index"] - all_group_lanes = get_all_rows_with_value( - self._gdf["lanes_polygons"], "lane_group_fid", lane_series["lane_group_fid"] +from d123.datatypes.maps.cache.cache_map_objects import ( + CacheCarpark, + CacheCrosswalk, + CacheGenericDrivable, + CacheIntersection, + CacheLane, + CacheLaneGroup, + CacheRoadEdge, + CacheRoadLine, +) +from d123.datatypes.maps.gpkg.gpkg_utils import get_all_rows_with_value, get_row_with_value +from d123.datatypes.maps.map_datatypes import RoadEdgeType +from d123.geometry.polyline import Polyline2D, Polyline3D + +MAX_ROAD_EDGE_LENGTH: Final[float] = 100.0 # meters, used to filter out very long road edges. TODO @add to config? + + +def write_nuplan_map(nuplan_maps_root: Path, map_name: str, map_writer: AbstractMapWriter) -> None: + assert map_name in NUPLAN_MAP_LOCATION_FILES.keys(), f"Map name {map_name} is not supported." + source_map_path = nuplan_maps_root / NUPLAN_MAP_LOCATION_FILES[map_name] + assert source_map_path.exists(), f"Map file {source_map_path} does not exist." + nuplan_gdf = _load_nuplan_gdf(source_map_path) + _write_nuplan_lanes(nuplan_gdf, map_writer) + _write_nuplan_lane_connectors(nuplan_gdf, map_writer) + _write_nuplan_lane_groups(nuplan_gdf, map_writer) + _write_nuplan_lane_connector_groups(nuplan_gdf, map_writer) + _write_nuplan_intersections(nuplan_gdf, map_writer) + _write_nuplan_crosswalks(nuplan_gdf, map_writer) + _write_nuplan_walkways(nuplan_gdf, map_writer) + _write_nuplan_carparks(nuplan_gdf, map_writer) + _write_nuplan_generic_drivables(nuplan_gdf, map_writer) + _write_nuplan_road_edges(nuplan_gdf, map_writer) + _write_nuplan_road_lines(nuplan_gdf, map_writer) + del nuplan_gdf + + +def _write_nuplan_lanes(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + + # NOTE: drops: lane_index (?), creator_id, name (?), road_type_fid (?), lane_type_fid (?), width (?), + # left_offset (?), right_offset (?), min_speed (?), max_speed (?), stops, left_has_reflectors (?), + # right_has_reflectors (?), from_edge_fid, to_edge_fid + + all_ids = nuplan_gdf["lanes_polygons"].lane_fid.to_list() + all_lane_group_ids = nuplan_gdf["lanes_polygons"].lane_group_fid.to_list() + all_speed_limits_mps = nuplan_gdf["lanes_polygons"].speed_limit_mps.to_list() + all_geometries = nuplan_gdf["lanes_polygons"].geometry.to_list() + + for idx, lane_id in enumerate(all_ids): + + # 1. predecessor_ids, successor_ids + predecessor_ids = get_all_rows_with_value( + nuplan_gdf["lane_connectors"], + "entry_lane_fid", + lane_id, + )["fid"].tolist() + successor_ids = get_all_rows_with_value( + nuplan_gdf["lane_connectors"], + "exit_lane_fid", + lane_id, + )["fid"].tolist() + + # 2. left_boundary, right_boundary + lane_series = get_row_with_value(nuplan_gdf["lanes_polygons"], "fid", str(lane_id)) + left_boundary_fid = lane_series["left_boundary_fid"] + left_boundary = get_row_with_value(nuplan_gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] + + right_boundary_fid = lane_series["right_boundary_fid"] + right_boundary = get_row_with_value(nuplan_gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] + + # 3. left_lane_id, right_lane_id + lane_index = lane_series["lane_index"] + all_group_lanes = get_all_rows_with_value( + nuplan_gdf["lanes_polygons"], "lane_group_fid", lane_series["lane_group_fid"] + ) + left_lane_id = all_group_lanes[all_group_lanes["lane_index"] == int(lane_index) - 1]["fid"] + right_lane_id = all_group_lanes[all_group_lanes["lane_index"] == int(lane_index) + 1]["fid"] + left_lane_id = left_lane_id.item() if not left_lane_id.empty else None + right_lane_id = right_lane_id.item() if not right_lane_id.empty else None + + # 3. centerline (aka. baseline_path) + centerline = get_row_with_value(nuplan_gdf["baseline_paths"], "lane_fid", float(lane_id))["geometry"] + + # Ensure the left/right boundaries are aligned with the baseline path direction. + left_boundary = align_boundary_direction(centerline, left_boundary) + right_boundary = align_boundary_direction(centerline, right_boundary) + + map_writer.write_lane( + CacheLane( + object_id=lane_id, + lane_group_id=all_lane_group_ids[idx], + left_boundary=Polyline3D.from_linestring(left_boundary), + right_boundary=Polyline3D.from_linestring(right_boundary), + centerline=Polyline3D.from_linestring(centerline), + left_lane_id=left_lane_id, + right_lane_id=right_lane_id, + predecessor_ids=predecessor_ids, + successor_ids=successor_ids, + speed_limit_mps=all_speed_limits_mps[idx], + outline=None, + geometry=all_geometries[idx], + ) + ) + + +def _write_nuplan_lane_connectors(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + + # NOTE: drops: exit_lane_group_fid, entry_lane_group_fid, to_edge_fid, + # turn_type_fid (?), bulb_fids (?), traffic_light_stop_line_fids (?), overlap (?), creator_id + # left_has_reflectors (?), right_has_reflectors (?) + all_ids = nuplan_gdf["lane_connectors"].fid.to_list() + all_lane_group_ids = nuplan_gdf["lane_connectors"].lane_group_connector_fid.to_list() + all_speed_limits_mps = nuplan_gdf["lane_connectors"].speed_limit_mps.to_list() + + for idx, lane_id in enumerate(all_ids): + + # 1. predecessor_ids, successor_ids + lane_connector_row = get_row_with_value(nuplan_gdf["lane_connectors"], "fid", str(lane_id)) + predecessor_ids = lane_connector_row["entry_lane_fid"] + successor_ids = lane_connector_row["exit_lane_fid"] + + # 2. left_boundaries, right_boundaries + lane_connector_polygons_row = get_row_with_value( + nuplan_gdf["gen_lane_connectors_scaled_width_polygons"], "lane_connector_fid", str(lane_id) + ) + left_boundary_fid = lane_connector_polygons_row["left_boundary_fid"] + left_boundary = get_row_with_value(nuplan_gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] + + right_boundary_fid = lane_connector_polygons_row["right_boundary_fid"] + right_boundary = get_row_with_value(nuplan_gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] + + # 3. baseline_paths + centerline = get_row_with_value(nuplan_gdf["baseline_paths"], "lane_connector_fid", float(lane_id))["geometry"] + + left_boundary = align_boundary_direction(centerline, left_boundary) + right_boundary = align_boundary_direction(centerline, right_boundary) + + # # 4. geometries + # geometries.append(lane_connector_polygons_row.geometry) + + map_writer.write_lane( + CacheLane( + object_id=lane_id, + lane_group_id=all_lane_group_ids[idx], + left_boundary=Polyline3D.from_linestring(left_boundary), + right_boundary=Polyline3D.from_linestring(right_boundary), + centerline=Polyline3D.from_linestring(centerline), + left_lane_id=None, + right_lane_id=None, + predecessor_ids=predecessor_ids, + successor_ids=successor_ids, + speed_limit_mps=all_speed_limits_mps[idx], + outline=None, + geometry=lane_connector_polygons_row.geometry, ) - left_lane_id = all_group_lanes[all_group_lanes["lane_index"] == int(lane_index) - 1]["fid"] - right_lane_id = all_group_lanes[all_group_lanes["lane_index"] == int(lane_index) + 1]["fid"] - left_lane_ids.append(left_lane_id.item() if not left_lane_id.empty else None) - right_lane_ids.append(right_lane_id.item() if not right_lane_id.empty else None) - - # 3. baseline_paths - baseline_path = get_row_with_value(self._gdf["baseline_paths"], "lane_fid", float(lane_id))["geometry"] - - left_boundary = align_boundary_direction(baseline_path, left_boundary) - right_boundary = align_boundary_direction(baseline_path, right_boundary) - - left_boundaries.append(left_boundary) - right_boundaries.append(right_boundary) - baseline_paths.append(baseline_path) - - data = pd.DataFrame( - { - "id": ids, - "lane_group_id": lane_group_ids, - "speed_limit_mps": speed_limits_mps, - "predecessor_ids": predecessor_ids, - "successor_ids": successor_ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - "left_lane_id": left_lane_ids, - "right_lane_id": right_lane_ids, - "baseline_path": baseline_paths, - } ) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - def _extract_nuplan_lane_connector_dataframe(self) -> None: - # NOTE: drops: exit_lane_group_fid, entry_lane_group_fid, to_edge_fid, - # turn_type_fid (?), bulb_fids (?), traffic_light_stop_line_fids (?), overlap (?), creator_id - # left_has_reflectors (?), right_has_reflectors (?) - ids = self._gdf["lane_connectors"].fid.to_list() - lane_group_ids = self._gdf["lane_connectors"].lane_group_connector_fid.to_list() - speed_limits_mps = self._gdf["lane_connectors"].speed_limit_mps.to_list() - predecessor_ids = [] - successor_ids = [] - left_boundaries = [] - right_boundaries = [] - baseline_paths = [] - geometries = [] - - for lane_id in ids: - # 1. predecessor_ids, successor_ids - lane_connector_row = get_row_with_value(self._gdf["lane_connectors"], "fid", str(lane_id)) - predecessor_ids.append([lane_connector_row["entry_lane_fid"]]) - successor_ids.append([lane_connector_row["exit_lane_fid"]]) - - # 2. left_boundaries, right_boundaries - lane_connector_polygons_row = get_row_with_value( - self._gdf["gen_lane_connectors_scaled_width_polygons"], "lane_connector_fid", str(lane_id) + +def _write_nuplan_lane_groups(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + # NOTE: drops: creator_id, from_edge_fid, to_edge_fid + ids = nuplan_gdf["lane_groups_polygons"].fid.to_list() + # all_geometries = nuplan_gdf["lane_groups_polygons"].geometry.to_list() + + for lane_group_id in ids: + + # 1. lane_ids + lane_ids = get_all_rows_with_value( + nuplan_gdf["lanes_polygons"], + "lane_group_fid", + lane_group_id, + )["fid"].tolist() + + # 2. predecessor_lane_group_ids, successor_lane_group_ids + predecessor_lane_group_ids = get_all_rows_with_value( + nuplan_gdf["lane_group_connectors"], + "to_lane_group_fid", + lane_group_id, + )["fid"].tolist() + successor_lane_group_ids = get_all_rows_with_value( + nuplan_gdf["lane_group_connectors"], + "from_lane_group_fid", + lane_group_id, + )["fid"].tolist() + + # 3. left_boundaries, right_boundaries + lane_group_row = get_row_with_value(nuplan_gdf["lane_groups_polygons"], "fid", str(lane_group_id)) + left_boundary_fid = lane_group_row["left_boundary_fid"] + left_boundary = get_row_with_value(nuplan_gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] + + right_boundary_fid = lane_group_row["right_boundary_fid"] + right_boundary = get_row_with_value(nuplan_gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] + + # Flip the boundaries to align with the first lane's baseline path direction. + repr_centerline = get_row_with_value(nuplan_gdf["baseline_paths"], "lane_fid", float(lane_ids[0]))["geometry"] + + left_boundary = align_boundary_direction(repr_centerline, left_boundary) + right_boundary = align_boundary_direction(repr_centerline, right_boundary) + + map_writer.write_lane_group( + CacheLaneGroup( + object_id=lane_group_id, + lane_ids=lane_ids, + left_boundary=Polyline3D.from_linestring(left_boundary), + right_boundary=Polyline3D.from_linestring(right_boundary), + intersection_id=None, + predecessor_ids=predecessor_lane_group_ids, + successor_ids=successor_lane_group_ids, + outline=None, + geometry=lane_group_row.geometry, ) - left_boundary_fid = lane_connector_polygons_row["left_boundary_fid"] - left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] - - right_boundary_fid = lane_connector_polygons_row["right_boundary_fid"] - right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] - - # 3. baseline_paths - baseline_path = get_row_with_value(self._gdf["baseline_paths"], "lane_connector_fid", float(lane_id))[ - "geometry" - ] - - left_boundary = align_boundary_direction(baseline_path, left_boundary) - right_boundary = align_boundary_direction(baseline_path, right_boundary) - - left_boundaries.append(left_boundary) - right_boundaries.append(right_boundary) - baseline_paths.append(baseline_path) - - # 4. geometries - geometries.append(lane_connector_polygons_row.geometry) - - data = pd.DataFrame( - { - "id": ids, - "lane_group_id": lane_group_ids, - "speed_limit_mps": speed_limits_mps, - "predecessor_ids": predecessor_ids, - "successor_ids": successor_ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - "left_lane_id": [None] * len(ids), - "right_lane_id": [None] * len(ids), - "baseline_path": baseline_paths, - } ) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - def _extract_lane_group_dataframe(self) -> gpd.GeoDataFrame: - lane_group_df = self._extract_nuplan_lane_group_dataframe() - lane_connector_group_df = self._extract_nuplan_lane_connector_group_dataframe() - combined_df = pd.concat([lane_group_df, lane_connector_group_df], ignore_index=True) - return combined_df - - def _extract_nuplan_lane_group_dataframe(self) -> gpd.GeoDataFrame: - # NOTE: drops: creator_id, from_edge_fid, to_edge_fid - ids = self._gdf["lane_groups_polygons"].fid.to_list() - lane_ids = [] - intersection_ids = [None] * len(ids) - predecessor_lane_group_ids = [] - successor_lane_group_ids = [] - left_boundaries = [] - right_boundaries = [] - geometries = self._gdf["lane_groups_polygons"].geometry.to_list() - - for lane_group_id in ids: - # 1. lane_ids - lane_ids_ = get_all_rows_with_value( - self._gdf["lanes_polygons"], - "lane_group_fid", - lane_group_id, - )["fid"].tolist() - lane_ids.append(lane_ids_) - - # 2. predecessor_lane_group_ids, successor_lane_group_ids - predecessor_lane_group_ids_ = get_all_rows_with_value( - self._gdf["lane_group_connectors"], - "to_lane_group_fid", - lane_group_id, - )["fid"].tolist() - successor_lane_group_ids_ = get_all_rows_with_value( - self._gdf["lane_group_connectors"], - "from_lane_group_fid", - lane_group_id, - )["fid"].tolist() - predecessor_lane_group_ids.append(predecessor_lane_group_ids_) - successor_lane_group_ids.append(successor_lane_group_ids_) - - # 3. left_boundaries, right_boundaries - lane_group_row = get_row_with_value(self._gdf["lane_groups_polygons"], "fid", str(lane_group_id)) - left_boundary_fid = lane_group_row["left_boundary_fid"] - left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] - - right_boundary_fid = lane_group_row["right_boundary_fid"] - right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] - - repr_baseline_path = get_row_with_value(self._gdf["baseline_paths"], "lane_fid", float(lane_ids_[0]))[ - "geometry" - ] - - left_boundary = align_boundary_direction(repr_baseline_path, left_boundary) - right_boundary = align_boundary_direction(repr_baseline_path, right_boundary) - - left_boundaries.append(left_boundary) - right_boundaries.append(right_boundary) - - data = pd.DataFrame( - { - "id": ids, - "lane_ids": lane_ids, - "intersection_id": intersection_ids, - "predecessor_lane_group_ids": predecessor_lane_group_ids, - "successor_lane_group_ids": successor_lane_group_ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - } + +def _write_nuplan_lane_connector_groups(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + # NOTE: drops: creator_id, from_edge_fid, to_edge_fid, intersection_fid + ids = nuplan_gdf["lane_group_connectors"].fid.to_list() + all_intersection_ids = nuplan_gdf["lane_group_connectors"].intersection_fid.to_list() + # all_geometries = nuplan_gdf["lane_group_connectors"].geometry.to_list() + + for idx, lane_group_connector_id in enumerate(ids): + + # 1. lane_ids + lane_ids = get_all_rows_with_value( + nuplan_gdf["lane_connectors"], "lane_group_connector_fid", lane_group_connector_id + )["fid"].tolist() + + # 2. predecessor_lane_group_ids, successor_lane_group_ids + lane_group_connector_row = get_row_with_value( + nuplan_gdf["lane_group_connectors"], "fid", lane_group_connector_id ) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - def _extract_nuplan_lane_connector_group_dataframe(self) -> gpd.GeoDataFrame: - # NOTE: drops: creator_id, from_edge_fid, to_edge_fid, intersection_fid - ids = self._gdf["lane_group_connectors"].fid.to_list() - lane_ids = [] - intersection_ids = self._gdf["lane_group_connectors"].intersection_fid.to_list() - predecessor_lane_group_ids = [] - successor_lane_group_ids = [] - left_boundaries = [] - right_boundaries = [] - geometries = self._gdf["lane_group_connectors"].geometry.to_list() - - for lane_group_connector_id in ids: - # 1. lane_ids - lane_ids_ = get_all_rows_with_value( - self._gdf["lane_connectors"], "lane_group_connector_fid", lane_group_connector_id - )["fid"].tolist() - lane_ids.append(lane_ids_) - - # 2. predecessor_lane_group_ids, successor_lane_group_ids - lane_group_connector_row = get_row_with_value( - self._gdf["lane_group_connectors"], "fid", lane_group_connector_id + predecessor_lane_group_ids = [str(lane_group_connector_row["from_lane_group_fid"])] + successor_lane_group_ids = [str(lane_group_connector_row["to_lane_group_fid"])] + + # 3. left_boundaries, right_boundaries + left_boundary_fid = lane_group_connector_row["left_boundary_fid"] + left_boundary = get_row_with_value(nuplan_gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] + right_boundary_fid = lane_group_connector_row["right_boundary_fid"] + right_boundary = get_row_with_value(nuplan_gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] + + map_writer.write_lane_group( + CacheLaneGroup( + object_id=lane_group_connector_id, + lane_ids=lane_ids, + left_boundary=Polyline3D.from_linestring(left_boundary), + right_boundary=Polyline3D.from_linestring(right_boundary), + intersection_id=all_intersection_ids[idx], + predecessor_ids=predecessor_lane_group_ids, + successor_ids=successor_lane_group_ids, + outline=None, + geometry=lane_group_connector_row.geometry, ) - predecessor_lane_group_ids.append([str(lane_group_connector_row["from_lane_group_fid"])]) - successor_lane_group_ids.append([str(lane_group_connector_row["to_lane_group_fid"])]) - - # 3. left_boundaries, right_boundaries - left_boundary_fid = lane_group_connector_row["left_boundary_fid"] - left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] - right_boundary_fid = lane_group_connector_row["right_boundary_fid"] - right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] - - left_boundaries.append(left_boundary) - right_boundaries.append(right_boundary) - - data = pd.DataFrame( - { - "id": ids, - "lane_ids": lane_ids, - "intersection_id": intersection_ids, - "predecessor_lane_group_ids": predecessor_lane_group_ids, - "successor_lane_group_ids": successor_lane_group_ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - } ) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - def _extract_intersection_dataframe(self) -> gpd.GeoDataFrame: - # NOTE: drops: creator_id, intersection_type_fid (?), is_mini (?) - ids = self._gdf["intersections"].fid.to_list() - lane_group_ids = [] - for intersection_id in ids: - lane_group_connector_ids = get_all_rows_with_value( - self._gdf["lane_group_connectors"], "intersection_fid", str(intersection_id) - )["fid"].tolist() - lane_group_ids.append(lane_group_connector_ids) - data = pd.DataFrame({"id": ids, "lane_group_ids": lane_group_ids}) - return gpd.GeoDataFrame(data, geometry=self._gdf["intersections"].geometry.to_list()) - - def _extract_crosswalk_dataframe(self) -> gpd.GeoDataFrame: - # NOTE: drops: creator_id, intersection_fids, lane_fids, is_marked (?) - data = pd.DataFrame({"id": self._gdf["crosswalks"].fid.to_list()}) - return gpd.GeoDataFrame(data, geometry=self._gdf["crosswalks"].geometry.to_list()) - - def _extract_walkway_dataframe(self) -> gpd.GeoDataFrame: - # NOTE: drops: creator_id - data = pd.DataFrame({"id": self._gdf["walkways"].fid.to_list()}) - return gpd.GeoDataFrame(data, geometry=self._gdf["walkways"].geometry.to_list()) - - def _extract_carpark_dataframe(self) -> gpd.GeoDataFrame: - # NOTE: drops: heading, creator_id - data = pd.DataFrame({"id": self._gdf["carpark_areas"].fid.to_list()}) - return gpd.GeoDataFrame(data, geometry=self._gdf["carpark_areas"].geometry.to_list()) - - def _extract_generic_drivable_dataframe(self) -> gpd.GeoDataFrame: - # NOTE: drops: creator_id - data = pd.DataFrame({"id": self._gdf["generic_drivable_areas"].fid.to_list()}) - return gpd.GeoDataFrame(data, geometry=self._gdf["generic_drivable_areas"].geometry.to_list()) - - def _extract_road_edge_dataframe(self) -> gpd.GeoDataFrame: - drivable_polygons = ( - self._gdf["intersections"].geometry.to_list() - + self._gdf["lane_groups_polygons"].geometry.to_list() - + self._gdf["carpark_areas"].geometry.to_list() - + self._gdf["generic_drivable_areas"].geometry.to_list() + + +def _write_nuplan_intersections(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + # NOTE: drops: creator_id, intersection_type_fid (?), is_mini (?) + all_ids = nuplan_gdf["intersections"].fid.to_list() + all_geometries = nuplan_gdf["intersections"].geometry.to_list() + for idx, intersection_id in enumerate(all_ids): + lane_group_connector_ids = get_all_rows_with_value( + nuplan_gdf["lane_group_connectors"], "intersection_fid", str(intersection_id) + )["fid"].tolist() + + map_writer.write_intersection( + CacheIntersection( + object_id=intersection_id, + lane_group_ids=lane_group_connector_ids, + geometry=all_geometries[idx], + ) ) - road_edge_linear_rings = get_road_edge_linear_rings(drivable_polygons) - road_edges = split_line_geometry_by_max_length(road_edge_linear_rings, MAX_ROAD_EDGE_LENGTH) - - ids = [] - road_edge_types = [] - for idx in range(len(road_edges)): - ids.append(idx) - # TODO @DanielDauner: Figure out if other types should/could be assigned here. - road_edge_types.append(int(RoadEdgeType.ROAD_EDGE_BOUNDARY)) - - data = pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}) - return gpd.GeoDataFrame(data, geometry=road_edges) - - def _extract_road_line_dataframe(self) -> gpd.GeoDataFrame: - boundaries = self._gdf["boundaries"].geometry.to_list() - fids = self._gdf["boundaries"].fid.to_list() - boundary_types = self._gdf["boundaries"].boundary_type_fid.to_list() - - ids = [] - road_line_types = [] - geometries = [] - - for idx in range(len(boundary_types)): - ids.append(fids[idx]) - road_line_types.append(int(NUPLAN_ROAD_LINE_CONVERSION[boundary_types[idx]])) - geometries.append(boundaries[idx]) - - data = pd.DataFrame( - { - "id": ids, - "road_line_type": road_line_types, - } + + +def _write_nuplan_crosswalks(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + # NOTE: drops: creator_id, intersection_fids, lane_fids, is_marked (?) + for id, geometry in zip(nuplan_gdf["crosswalks"].fid.to_list(), nuplan_gdf["crosswalks"].geometry.to_list()): + map_writer.write_crosswalk(CacheCrosswalk(object_id=id, geometry=geometry)) + + +def _write_nuplan_walkways(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + # NOTE: drops: creator_id + for id, geometry in zip(nuplan_gdf["walkways"].fid.to_list(), nuplan_gdf["walkways"].geometry.to_list()): + map_writer.write_crosswalk(CacheCrosswalk(object_id=id, geometry=geometry)) + + +def _write_nuplan_carparks(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + # NOTE: drops: creator_id + for id, geometry in zip(nuplan_gdf["carpark_areas"].fid.to_list(), nuplan_gdf["carpark_areas"].geometry.to_list()): + map_writer.write_carpark(CacheCarpark(object_id=id, geometry=geometry)) + + +def _write_nuplan_generic_drivables(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + # NOTE: drops: creator_id + for id, geometry in zip( + nuplan_gdf["generic_drivable_areas"].fid.to_list(), nuplan_gdf["generic_drivable_areas"].geometry.to_list() + ): + map_writer.write_generic_drivable(CacheGenericDrivable(object_id=id, geometry=geometry)) + + +def _write_nuplan_road_edges(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + drivable_polygons = ( + nuplan_gdf["intersections"].geometry.to_list() + + nuplan_gdf["lane_groups_polygons"].geometry.to_list() + + nuplan_gdf["carpark_areas"].geometry.to_list() + + nuplan_gdf["generic_drivable_areas"].geometry.to_list() + ) + road_edge_linear_rings = get_road_edge_linear_rings(drivable_polygons) + road_edges = split_line_geometry_by_max_length(road_edge_linear_rings, MAX_ROAD_EDGE_LENGTH) + + for idx in range(len(road_edges)): + map_writer.write_road_edge( + CacheRoadEdge( + object_id=idx, + road_edge_type=RoadEdgeType.ROAD_EDGE_BOUNDARY, + polyline=Polyline2D.from_linestring(road_edges[idx]), + ) ) - return gpd.GeoDataFrame(data, geometry=geometries) -def flip_linestring(linestring: LineString) -> LineString: +def _write_nuplan_road_lines(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: + boundaries = nuplan_gdf["boundaries"].geometry.to_list() + fids = nuplan_gdf["boundaries"].fid.to_list() + boundary_types = nuplan_gdf["boundaries"].boundary_type_fid.to_list() + + for idx in range(len(boundary_types)): + map_writer.write_road_line( + CacheRoadLine( + object_id=fids[idx], + road_line_type=NUPLAN_ROAD_LINE_CONVERSION[boundary_types[idx]], + polyline=Polyline2D.from_linestring(boundaries[idx]), + ) + ) + + +def _load_nuplan_gdf(map_file_path: Path) -> Dict[str, gpd.GeoDataFrame]: + + # The projected coordinate system depends on which UTM zone the mapped location is in. + map_meta = gpd.read_file(map_file_path, layer="meta", engine="pyogrio") + projection_system = map_meta[map_meta["key"] == "projectedCoordSystem"]["value"].iloc[0] + + nuplan_gdf: Dict[str, gpd.GeoDataFrame] = {} + for layer_name in NUPLAN_MAP_GPKG_LAYERS: + with warnings.catch_warnings(): + # Suppress the warnings from the GPKG operations below so that they don't spam the training logs. + warnings.filterwarnings("ignore") + + gdf_in_pixel_coords = pyogrio.read_dataframe(map_file_path, layer=layer_name, fid_as_index=True) + gdf_in_utm_coords = gdf_in_pixel_coords.to_crs(projection_system) + # gdf_in_utm_coords = gdf_in_pixel_coords + + # For backwards compatibility, cast the index to string datatype. + # and mirror it to the "fid" column. + gdf_in_utm_coords.index = gdf_in_utm_coords.index.map(str) + gdf_in_utm_coords["fid"] = gdf_in_utm_coords.index + + nuplan_gdf[layer_name] = gdf_in_utm_coords + + return nuplan_gdf + + +def _flip_linestring(linestring: LineString) -> LineString: # TODO: move somewhere more appropriate or implement in Polyline2D, PolylineSE2, etc. return LineString(linestring.coords[::-1]) @@ -459,5 +397,5 @@ def lines_same_direction(centerline: LineString, boundary: LineString) -> bool: def align_boundary_direction(centerline: LineString, boundary: LineString) -> LineString: # TODO: refactor helper function. if not lines_same_direction(centerline, boundary): - return flip_linestring(boundary) + return _flip_linestring(boundary) return boundary diff --git a/d123/conversion/datasets/nuplan/utils/nuplan_constants.py b/d123/conversion/datasets/nuplan/utils/nuplan_constants.py index d68bf672..b2a5876c 100644 --- a/d123/conversion/datasets/nuplan/utils/nuplan_constants.py +++ b/d123/conversion/datasets/nuplan/utils/nuplan_constants.py @@ -3,6 +3,7 @@ from d123.datatypes.detections.detection import TrafficLightStatus from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.maps.map_datatypes import RoadLineType from d123.datatypes.time.time_point import TimePoint @@ -41,10 +42,10 @@ class NuPlanBoxDetectionType(IntEnum): "nuplan_train", "nuplan_val", "nuplan_test", - "nuplan_mini_train", - "nuplan_mini_val", - "nuplan_mini_test", - "nuplan_private_test", # TODO: remove, not publicly available + "nuplan-mini_train", + "nuplan-mini_val", + "nuplan-mini_test", + "nuplan-private_test", # TODO: remove, not publicly available } NUPLAN_MAP_LOCATIONS: List[str] = [ @@ -81,5 +82,11 @@ class NuPlanBoxDetectionType(IntEnum): "gen_lane_connectors_scaled_width_polygons", } +NUPLAN_ROAD_LINE_CONVERSION = { + 0: RoadLineType.DASHED_WHITE, + 2: RoadLineType.SOLID_WHITE, + 3: RoadLineType.UNKNOWN, +} + NUPLAN_ROLLING_SHUTTER_S: Final[TimePoint] = TimePoint.from_s(1 / 60) diff --git a/d123/conversion/datasets/wopd/wopd_converter.py b/d123/conversion/datasets/wopd/wopd_converter.py index 0c9ef209..1f1a2cbd 100644 --- a/d123/conversion/datasets/wopd/wopd_converter.py +++ b/d123/conversion/datasets/wopd/wopd_converter.py @@ -149,10 +149,10 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: ) # 2. Prepare log writer - overwrite_log = log_writer.reset(self.dataset_converter_config, log_metadata) + log_needs_writing = log_writer.reset(self.dataset_converter_config, log_metadata) # 3. Process source log data - if overwrite_log: + if log_needs_writing: try: for frame_idx, data in enumerate(dataset): frame = dataset_pb2.Frame() diff --git a/d123/conversion/log_writer/arrow_log_writer.py b/d123/conversion/log_writer/arrow_log_writer.py index 732028b6..4d27492d 100644 --- a/d123/conversion/log_writer/arrow_log_writer.py +++ b/d123/conversion/log_writer/arrow_log_writer.py @@ -1,5 +1,5 @@ from pathlib import Path -from typing import Any, Dict, List, Literal, Optional, Tuple +from typing import Any, Dict, List, Literal, Optional, Tuple, Union import pyarrow as pa @@ -19,10 +19,12 @@ class ArrowLogWriter(AbstractLogWriter): def __init__( self, + logs_root: Union[str, Path], compression: Optional[Literal["lz4", "zstd"]] = None, compression_level: Optional[int] = None, ) -> None: + self._logs_root = Path(logs_root) self._compression = compression self._compression_level = compression_level @@ -35,14 +37,12 @@ def __init__( def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: LogMetadata) -> bool: - overwrite_log: bool = False - sink_log_path: Path = ( - dataset_converter_config.output_path / log_metadata.split / f"{log_metadata.log_name}.arrow" - ) + log_needs_writing: bool = False + sink_log_path: Path = self._logs_root / log_metadata.split / f"{log_metadata.log_name}.arrow" # Check if the log file already exists or needs to be overwritten if not sink_log_path.exists() or dataset_converter_config.force_log_conversion: - overwrite_log = True + log_needs_writing = True # Delete the file if it exists (no error if it doesn't) sink_log_path.unlink(missing_ok=True) @@ -66,7 +66,7 @@ def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: self._source = pa.OSFile(str(sink_log_path), "wb") self._record_batch_writer = pa.ipc.new_file(self._source, schema=self._schema, options=options) - return overwrite_log + return log_needs_writing def write( self, diff --git a/d123/conversion/map_writer/abstract_map_writer.py b/d123/conversion/map_writer/abstract_map_writer.py index e69de29b..c49a1181 100644 --- a/d123/conversion/map_writer/abstract_map_writer.py +++ b/d123/conversion/map_writer/abstract_map_writer.py @@ -0,0 +1,69 @@ +import abc +from abc import abstractmethod + +from d123.conversion.dataset_converter_config import DatasetConverterConfig +from d123.datatypes.maps.abstract_map_objects import ( + AbstractCarpark, + AbstractCrosswalk, + AbstractGenericDrivable, + AbstractIntersection, + AbstractLane, + AbstractLaneGroup, + AbstractRoadEdge, + AbstractRoadLine, + AbstractStopLine, + AbstractWalkway, +) +from d123.datatypes.scene.scene_metadata import LogMetadata + + +class AbstractMapWriter(abc.ABC): + """Abstract base class for map writers.""" + + @abstractmethod + def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: LogMetadata) -> bool: + """Reset the writer to its initial state.""" + + @abstractmethod + def write_lane(self, lane: AbstractLane) -> None: + """Write a lane to the map.""" + + @abstractmethod + def write_lane_group(self, lane: AbstractLaneGroup) -> None: + """Write a group of lanes to the map.""" + + @abstractmethod + def write_intersection(self, intersection: AbstractIntersection) -> None: + """Write an intersection to the map.""" + + @abstractmethod + def write_crosswalk(self, crosswalk: AbstractCrosswalk) -> None: + """Write a crosswalk to the map.""" + + @abstractmethod + def write_carpark(self, carpark: AbstractCarpark) -> None: + """Write a car park to the map.""" + + @abstractmethod + def write_walkway(self, walkway: AbstractWalkway) -> None: + """Write a walkway to the map.""" + + @abstractmethod + def write_generic_drivable(self, obj: AbstractGenericDrivable) -> None: + """Write a generic drivable area to the map.""" + + @abstractmethod + def write_stop_line(self, stop_line: AbstractStopLine) -> None: + """Write a stop lines to the map.""" + + @abstractmethod + def write_road_edge(self, road_edge: AbstractRoadEdge) -> None: + """Write a road edge to the map.""" + + @abstractmethod + def write_road_line(self, road_line: AbstractRoadLine) -> None: + """Write a road line to the map.""" + + @abstractmethod + def close(self) -> None: + """Close the writer and finalize any resources.""" diff --git a/d123/conversion/map_writer/gpkg_map_writer.py b/d123/conversion/map_writer/gpkg_map_writer.py index e69de29b..b7696868 100644 --- a/d123/conversion/map_writer/gpkg_map_writer.py +++ b/d123/conversion/map_writer/gpkg_map_writer.py @@ -0,0 +1,177 @@ +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Union + +import geopandas as gpd +import pandas as pd +import shapely.geometry as geom + +from d123.conversion.dataset_converter_config import DatasetConverterConfig +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from d123.datatypes.maps.abstract_map_objects import ( + AbstractCarpark, + AbstractCrosswalk, + AbstractGenericDrivable, + AbstractIntersection, + AbstractLane, + AbstractLaneGroup, + AbstractLineMapObject, + AbstractRoadEdge, + AbstractRoadLine, + AbstractStopLine, + AbstractSurfaceMapObject, + AbstractWalkway, +) +from d123.datatypes.maps.map_datatypes import MapLayer +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.geometry.polyline import Polyline3D + +MAP_OBJECT_DATA = Dict[str, List[Union[str, int, float, bool, geom.base.BaseGeometry]]] + + +class GPKGMapWriter(AbstractMapWriter): + """Abstract base class for map writers.""" + + def __init__(self, maps_root: Union[str, Path]) -> None: + self._maps_root = Path(maps_root) + self._crs: str = "EPSG:4326" # WGS84 + + # Data to be written to the map for each object type + self._map_data: Optional[Dict[MapLayer, MAP_OBJECT_DATA]] = None + self._map_file: Optional[Path] = None + + def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: LogMetadata) -> bool: + """Inherited, see superclass.""" + + map_needs_writing: bool = False + + if dataset_converter_config.include_map: + if log_metadata.map_is_local: + split, log_name = log_metadata.split, log_metadata.log_name + map_file = self._maps_root / split / f"{log_name}.gpkg" + else: + dataset, location = log_metadata.dataset, log_metadata.location + map_file = self._maps_root / dataset / f"{dataset}_{location}.gpkg" + + map_needs_writing = dataset_converter_config.force_map_conversion or not map_file.exists() + if map_needs_writing: + # Reset all map layers + self._map_file = map_file + self._map_data = {map_layer: defaultdict(list) for map_layer in MapLayer} + + return map_needs_writing + + def write_lane(self, lane: AbstractLane) -> None: + """Inherited, see superclass.""" + self._write_surface_layer(MapLayer.LANE, lane) + self._map_data[MapLayer.LANE]["lane_group_id"].append(lane.lane_group_id) + self._map_data[MapLayer.LANE]["left_boundary"].append(lane.left_boundary.linestring) + self._map_data[MapLayer.LANE]["right_boundary"].append(lane.right_boundary.linestring) + self._map_data[MapLayer.LANE]["centerline"].append(lane.centerline.linestring) + self._map_data[MapLayer.LANE]["left_lane_id"].append(lane.left_lane_id) + self._map_data[MapLayer.LANE]["right_lane_id"].append(lane.right_lane_id) + self._map_data[MapLayer.LANE]["predecessor_ids"].append(lane.predecessor_ids) + self._map_data[MapLayer.LANE]["successor_ids"].append(lane.successor_ids) + self._map_data[MapLayer.LANE]["speed_limit_mps"].append(lane.speed_limit_mps) + + def write_lane_group(self, lane_group: AbstractLaneGroup) -> None: + """Inherited, see superclass.""" + self._write_surface_layer(MapLayer.LANE_GROUP, lane_group) + self._map_data[MapLayer.LANE_GROUP]["lane_ids"].append(lane_group.lane_ids) + self._map_data[MapLayer.LANE_GROUP]["intersection_id"].append(lane_group.intersection_id) + self._map_data[MapLayer.LANE_GROUP]["predecessor_ids"].append(lane_group.predecessor_ids) + self._map_data[MapLayer.LANE_GROUP]["successor_ids"].append(lane_group.successor_ids) + self._map_data[MapLayer.LANE_GROUP]["left_boundary"].append(lane_group.left_boundary.linestring) + self._map_data[MapLayer.LANE_GROUP]["right_boundary"].append(lane_group.right_boundary.linestring) + + def write_intersection(self, intersection: AbstractIntersection) -> None: + """Inherited, see superclass.""" + self._write_surface_layer(MapLayer.INTERSECTION, intersection) + self._map_data[MapLayer.INTERSECTION]["lane_group_ids"].append(intersection.lane_group_ids) + + def write_crosswalk(self, crosswalk: AbstractCrosswalk) -> None: + """Inherited, see superclass.""" + self._write_surface_layer(MapLayer.CROSSWALK, crosswalk) + + def write_carpark(self, carpark: AbstractCarpark) -> None: + """Inherited, see superclass.""" + self._write_surface_layer(MapLayer.CARPARK, carpark) + + def write_walkway(self, walkway: AbstractWalkway) -> None: + """Inherited, see superclass.""" + self._write_surface_layer(MapLayer.WALKWAY, walkway) + + def write_generic_drivable(self, obj: AbstractGenericDrivable) -> None: + """Inherited, see superclass.""" + self._write_surface_layer(MapLayer.GENERIC_DRIVABLE, obj) + + def write_stop_line(self, stop_line: AbstractStopLine) -> None: + """Inherited, see superclass.""" + # self._write_line_layer(MapLayer.STOP_LINE, stop_line) + raise NotImplementedError("Stop lines are not yet supported in GPKG maps.") + + def write_road_edge(self, road_edge: AbstractRoadEdge) -> None: + """Inherited, see superclass.""" + self._write_line_layer(MapLayer.ROAD_EDGE, road_edge) + self._map_data[MapLayer.ROAD_EDGE]["road_edge_type"].append(road_edge.road_edge_type) + + def write_road_line(self, road_line: AbstractRoadLine) -> None: + """Inherited, see superclass.""" + self._write_line_layer(MapLayer.ROAD_LINE, road_line) + self._map_data[MapLayer.ROAD_LINE]["road_line_type"].append(road_line.road_line_type) + + def close(self) -> None: + """Inherited, see superclass.""" + + if self._map_file is not None or self._map_data is not None: + + if not self._map_file.parent.exists(): + self._map_file.parent.mkdir(parents=True, exist_ok=True) + + for map_layer, layer_data in self._map_data.items(): + if len(layer_data["id"]) > 0: + df = pd.DataFrame(layer_data) + gdf = gpd.GeoDataFrame(df, geometry="geometry", crs=self._crs) + gdf.to_file(self._map_file, driver="GPKG", layer=map_layer.serialize()) + + del self._map_file, self._map_data + self._map_file = None + self._map_data = None + + def _assert_initialized(self) -> None: + assert self._map_data is not None, "Call reset() before writing data." + + def _write_surface_layer(self, layer: MapLayer, surface_object: AbstractSurfaceMapObject) -> None: + """Helper to write surface map objects. + + :param layer: map layer of surface object + :param surface_object: surface map object to write + """ + self._assert_initialized() + self._map_data[layer]["id"].append(surface_object.object_id) + # NOTE: if outline outline has a z-coordinate, we store it, an otherwise infer from the geometry + if isinstance(surface_object.outline, Polyline3D): + self._map_data[layer]["outline"].append(surface_object.outline.linestring) + self._map_data[layer]["geometry"].append(surface_object.shapely_polygon) + + def _write_line_layer(self, layer: MapLayer, line_object: AbstractLineMapObject) -> None: + """Helper to write line map objects. + + :param layer: map layer of line object + :param line_object: line map object to write + """ + self._assert_initialized() + self._map_data[layer]["id"].append(line_object.object_id) + self._map_data[layer]["geometry"].append(line_object.shapely_linestring) + + def _get_gpd_dataframe(self, layer: MapLayer, layer_data: MAP_OBJECT_DATA): + """Helper to convert map data to a GeoPandas DataFrame. + + :param layer: map layer of data + :param layer_data: map data to convert + :return: GeoPandas DataFrame of map data + """ + + df = pd.DataFrame(layer_data) + gdf = gpd.GeoDataFrame(df, geometry="geometry", crs=self._crs) + return gdf diff --git a/d123/datatypes/maps/abstract_map_objects.py b/d123/datatypes/maps/abstract_map_objects.py index 67f63e5c..4b08cb05 100644 --- a/d123/datatypes/maps/abstract_map_objects.py +++ b/d123/datatypes/maps/abstract_map_objects.py @@ -1,7 +1,7 @@ from __future__ import annotations import abc -from typing import List, Optional, Tuple +from typing import List, Optional, Tuple, TypeAlias, Union import shapely.geometry as geom import trimesh @@ -9,18 +9,22 @@ from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType from d123.geometry import Polyline2D, Polyline3D, PolylineSE2 +# TODO: Refactor and just use int +# type MapObjectIDType = Union[str, int] for Python >= 3.12 +MapObjectIDType: TypeAlias = Union[str, int] + class AbstractMapObject(abc.ABC): """ Base interface representation of all map objects. """ - def __init__(self, object_id: str): + def __init__(self, object_id: MapObjectIDType): """ Constructor of the base map object type. :param object_id: unique identifier of the map object. """ - self.id = str(object_id) + self.object_id: MapObjectIDType = object_id @property @abc.abstractmethod @@ -46,10 +50,10 @@ def shapely_polygon(self) -> geom.Polygon: @property @abc.abstractmethod - def outline_3d(self) -> Polyline3D: + def outline(self) -> Union[Polyline2D, Polyline3D]: """ - Returns the 3D outline of the map object. - :return: 3D polyline + Returns the 2D or 3D outline of the map surface, if available. + :return: 2D or 3D polyline """ @property @@ -60,19 +64,49 @@ def trimesh_mesh(self) -> trimesh.Trimesh: :return: Trimesh """ + @property + def outline_3d(self) -> Polyline3D: + """Returns the 3D outline of the map surface, or converts 2D to 3D if necessary. + + :return: 3D polyline + """ + if isinstance(self.outline, Polyline3D): + return self.outline + # Converts 2D polyline to 3D by adding a default (zero) z-coordinate + return Polyline3D.from_linestring(self.outline.linestring) + + @property def outline_2d(self) -> Polyline2D: - return self.outline_3d.polyline_2d + """Returns the 2D outline of the map surface, or converts 3D to 2D if necessary. + + :return: 2D polyline + """ + if isinstance(self.outline, Polyline2D): + return self.outline + # Converts 3D polyline to 2D by dropping the z-coordinate + return self.outline.polyline_2d class AbstractLineMapObject(AbstractMapObject): @property @abc.abstractmethod + def polyline(self) -> Union[Polyline2D, Polyline3D]: + """ + Returns the polyline of the road edge, either 2D or 3D. + :return: polyline + """ + + @property def polyline_3d(self) -> Polyline3D: """ Returns the 3D polyline of the road edge. :return: 3D polyline """ + if isinstance(self.polyline, Polyline3D): + return self.polyline + # Converts 2D polyline to 3D by adding a default (zero) z-coordinate + return Polyline3D.from_linestring(self.polyline.linestring) @property def polyline_2d(self) -> Polyline2D: @@ -80,7 +114,10 @@ def polyline_2d(self) -> Polyline2D: Returns the 2D polyline of the road line. :return: 2D polyline """ - return self.polyline_3d.polyline_2d + if isinstance(self.polyline, Polyline2D): + return self.polyline + # Converts 3D polyline to 2D by dropping the z-coordinate + return self.polyline.polyline_2d @property def polyline_se2(self) -> PolylineSE2: @@ -88,7 +125,15 @@ def polyline_se2(self) -> PolylineSE2: Returns the 2D polyline of the road line in SE(2) coordinates. :return: 2D polyline in SE(2) """ - return self.polyline_3d.polyline_se2 + return self.polyline_2d.polyline_se2 + + @property + def shapely_linestring(self) -> geom.LineString: + """ + Returns the shapely linestring of the line, either 2D or 3D. + :return: shapely linestring + """ + return self.polyline.linestring class AbstractLane(AbstractSurfaceMapObject): @@ -106,6 +151,14 @@ def speed_limit_mps(self) -> Optional[float]: :return: float or none """ + @property + @abc.abstractmethod + def successor_ids(self) -> List[MapObjectIDType]: + """ + Property of succeeding lane object ids (front). + :return: list of lane ids + """ + @property @abc.abstractmethod def successors(self) -> List[AbstractLane]: @@ -114,6 +167,14 @@ def successors(self) -> List[AbstractLane]: :return: list of lane class """ + @property + @abc.abstractmethod + def predecessor_ids(self) -> List[MapObjectIDType]: + """ + Property of preceding lane object ids (behind). + :return: list of lane ids + """ + @property @abc.abstractmethod def predecessors(self) -> List[AbstractLane]: @@ -138,6 +199,14 @@ def right_boundary(self) -> Polyline3D: :return: returns 3D polyline """ + @property + @abc.abstractmethod + def left_lane_id(self) -> Optional[MapObjectIDType]: + """ + Property of left lane id of lane. + :return: returns left lane id or none, if no left lane + """ + @property @abc.abstractmethod def left_lane(self) -> Optional[AbstractLane]: @@ -146,6 +215,14 @@ def left_lane(self) -> Optional[AbstractLane]: :return: returns left lane or none, if no left lane """ + @property + @abc.abstractmethod + def right_lane_id(self) -> Optional[MapObjectIDType]: + """ + Property of right lane id of lane. + :return: returns right lane id or none, if no right lane + """ + @property @abc.abstractmethod def right_lane(self) -> Optional[AbstractLane]: @@ -162,6 +239,14 @@ def centerline(self) -> Polyline3D: :return: returns 3D polyline """ + @property + @abc.abstractmethod + def lane_group_id(self) -> AbstractLaneGroup: + """ + Property of lane group id of lane. + :return: returns lane group id + """ + @property @abc.abstractmethod def lane_group(self) -> AbstractLaneGroup: @@ -186,20 +271,36 @@ class AbstractLaneGroup(AbstractSurfaceMapObject): def layer(self) -> MapLayer: return MapLayer.LANE_GROUP + @property + @abc.abstractmethod + def successor_ids(self) -> List[MapObjectIDType]: + """ + Property of succeeding lane object ids (front). + :return: list of lane group ids + """ + @property @abc.abstractmethod def successors(self) -> List[AbstractLaneGroup]: """ - Property of succeeding lane objects (front). - :return: list of lane class + Property of succeeding lane group objects (front). + :return: list of lane group class + """ + + @property + @abc.abstractmethod + def predecessor_ids(self) -> List[MapObjectIDType]: + """ + Property of preceding lane object ids (behind). + :return: list of lane group ids """ @property @abc.abstractmethod def predecessors(self) -> List[AbstractLaneGroup]: """ - Property of preceding lane objects (behind). - :return: list of lane class + Property of preceding lane group objects (behind). + :return: list of lane group class """ @property @@ -218,6 +319,14 @@ def right_boundary(self) -> Polyline3D: :return: returns 3D polyline """ + @property + @abc.abstractmethod + def lane_ids(self) -> List[MapObjectIDType]: + """ + Property of interior lane ids of a lane group. + :return: returns list of lane ids + """ + @property @abc.abstractmethod def lanes(self) -> List[AbstractLane]: @@ -226,6 +335,14 @@ def lanes(self) -> List[AbstractLane]: :return: returns list of lanes """ + @property + @abc.abstractmethod + def intersection_id(self) -> Optional[MapObjectIDType]: + """ + Property of intersection id of a lane group. + :return: returns intersection id or none, if lane group not in intersection + """ + @property @abc.abstractmethod def intersection(self) -> Optional[AbstractIntersection]: @@ -242,6 +359,14 @@ class AbstractIntersection(AbstractSurfaceMapObject): def layer(self) -> MapLayer: return MapLayer.INTERSECTION + @property + @abc.abstractmethod + def lane_group_ids(self) -> List[MapObjectIDType]: + """ + Property of lane group ids of intersection. + :return: returns list of lane group ids + """ + @property @abc.abstractmethod def lane_groups(self) -> List[AbstractLaneGroup]: diff --git a/d123/datatypes/maps/cache/__init__.py b/d123/datatypes/maps/cache/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/datatypes/maps/cache/cache_map_objects.py b/d123/datatypes/maps/cache/cache_map_objects.py new file mode 100644 index 00000000..85a89673 --- /dev/null +++ b/d123/datatypes/maps/cache/cache_map_objects.py @@ -0,0 +1,311 @@ +from __future__ import annotations + +from typing import List, Optional, Union + +import numpy as np +import shapely.geometry as geom +import trimesh + +from d123.datatypes.maps.abstract_map_objects import ( + AbstractCarpark, + AbstractCrosswalk, + AbstractGenericDrivable, + AbstractIntersection, + AbstractLane, + AbstractLaneGroup, + AbstractLineMapObject, + AbstractRoadEdge, + AbstractRoadLine, + AbstractSurfaceMapObject, + AbstractWalkway, + MapObjectIDType, +) +from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.geometry import Polyline3D +from d123.geometry.polyline import Polyline2D + + +class CacheSurfaceObject(AbstractSurfaceMapObject): + """ + Base interface representation of all map objects. + """ + + def __init__( + self, + object_id: MapObjectIDType, + outline: Optional[Union[Polyline2D, Polyline3D]] = None, + geometry: Optional[geom.Polygon] = None, + ) -> None: + super().__init__(object_id) + + assert outline is not None or geometry is not None, "Either outline or geometry must be provided." + + if outline is None: + outline = Polyline3D.from_linestring(geometry.exterior) + + if geometry is None: + geometry = geom.Polygon(outline.array[:, :2]) + + self._outline = outline + self._geometry = geometry + + outline = property(lambda self: self._outline) + + @property + def shapely_polygon(self) -> geom.Polygon: + """Inherited, see superclass.""" + return self._geometry + + @property + def outline_3d(self) -> Polyline3D: + """Inherited, see superclass.""" + if isinstance(self.outline, Polyline3D): + return self.outline + # Converts 2D polyline to 3D by adding a default (zero) z-coordinate + return Polyline3D.from_linestring(self.outline.linestring) + + @property + def trimesh_mesh(self) -> trimesh.Trimesh: + """Inherited, see superclass.""" + raise NotImplementedError + + +class CacheLineObject(AbstractLineMapObject): + + def __init__(self, object_id: MapObjectIDType, polyline: Union[Polyline2D, Polyline3D]) -> None: + """ + Constructor of the base line map object type. + :param object_id: unique identifier of a line map object. + """ + super().__init__(object_id) + self._polyline = polyline + + polyline = property(lambda self: self._polyline) + + +class CacheLane(CacheSurfaceObject, AbstractLane): + + def __init__( + self, + object_id: MapObjectIDType, + lane_group_id: MapObjectIDType, + left_boundary: Polyline3D, + right_boundary: Polyline3D, + centerline: Polyline3D, + left_lane_id: Optional[MapObjectIDType] = None, + right_lane_id: Optional[MapObjectIDType] = None, + predecessor_ids: List[MapObjectIDType] = [], + successor_ids: List[MapObjectIDType] = [], + speed_limit_mps: Optional[float] = None, + outline: Optional[Polyline3D] = None, + geometry: Optional[geom.Polygon] = None, + ) -> None: + + if outline is None: + outline_array = np.vstack( + ( + left_boundary.array, + right_boundary.array[::-1], + left_boundary.array[0], + ) + ) + outline = Polyline3D.from_linestring(geom.LineString(outline_array)) + + super().__init__(object_id, outline, geometry) + + self._lane_group_id = lane_group_id + self._left_boundary = left_boundary + self._right_boundary = right_boundary + self._centerline = centerline + self._left_lane_id = left_lane_id + self._right_lane_id = right_lane_id + self._predecessor_ids = predecessor_ids + self._successor_ids = successor_ids + self._speed_limit_mps = speed_limit_mps + + lane_group_id = property(lambda self: self._lane_group_id) + left_boundary = property(lambda self: self._left_boundary) + right_boundary = property(lambda self: self._right_boundary) + centerline = property(lambda self: self._centerline) + left_lane_id = property(lambda self: self._left_lane_id) + right_lane_id = property(lambda self: self._right_lane_id) + predecessor_ids = property(lambda self: self._predecessor_ids) + successor_ids = property(lambda self: self._successor_ids) + speed_limit_mps = property(lambda self: self._speed_limit_mps) + + @property + def layer(self) -> MapLayer: + """Inherited, see superclass.""" + return MapLayer.LANE + + @property + def successors(self) -> List[AbstractLane]: + """Inherited, see superclass.""" + raise NotImplementedError + + @property + def predecessors(self) -> List[AbstractLane]: + """Inherited, see superclass.""" + raise NotImplementedError + + @property + def left_lane(self) -> Optional[AbstractLane]: + """Inherited, see superclass.""" + raise NotImplementedError + + @property + def right_lane(self) -> Optional[AbstractLane]: + """Inherited, see superclass.""" + raise NotImplementedError + + @property + def lane_group(self) -> AbstractLaneGroup: + """Inherited, see superclass.""" + raise NotImplementedError + + +class CacheLaneGroup(CacheSurfaceObject, AbstractLaneGroup): + def __init__( + self, + object_id: MapObjectIDType, + lane_ids: List[MapObjectIDType], + left_boundary: Polyline3D, + right_boundary: Polyline3D, + intersection_id: Optional[MapObjectIDType] = None, + predecessor_ids: List[MapObjectIDType] = [], + successor_ids: List[MapObjectIDType] = [], + outline: Optional[Polyline3D] = None, + geometry: Optional[geom.Polygon] = None, + ): + if outline is None: + outline_array = np.vstack( + ( + left_boundary.array, + right_boundary.array[::-1], + left_boundary.array[0], + ) + ) + outline = Polyline3D.from_linestring(geom.LineString(outline_array)) + super().__init__(object_id, outline, geometry) + + self._lane_ids = lane_ids + self._left_boundary = left_boundary + self._right_boundary = right_boundary + self._intersection_id = intersection_id + self._predecessor_ids = predecessor_ids + self._successor_ids = successor_ids + + layer = property(lambda self: MapLayer.LANE_GROUP) + lane_ids = property(lambda self: self._lane_ids) + intersection_id = property(lambda self: self._intersection_id) + predecessor_ids = property(lambda self: self._predecessor_ids) + successor_ids = property(lambda self: self._successor_ids) + left_boundary = property(lambda self: self._left_boundary) + right_boundary = property(lambda self: self._right_boundary) + + @property + def successors(self) -> List[AbstractLaneGroup]: + """Inherited, see superclass.""" + raise NotImplementedError + + @property + def predecessors(self) -> List[AbstractLaneGroup]: + """Inherited, see superclass.""" + raise NotImplementedError + + @property + def lanes(self) -> List[AbstractLane]: + """Inherited, see superclass.""" + raise NotImplementedError + + @property + def intersection(self) -> Optional[AbstractIntersection]: + """Inherited, see superclass.""" + raise NotImplementedError + + +class CacheIntersection(CacheSurfaceObject, AbstractIntersection): + def __init__( + self, + object_id: MapObjectIDType, + lane_group_ids: List[MapObjectIDType], + outline: Optional[Union[Polyline2D, Polyline3D]] = None, + geometry: Optional[geom.Polygon] = None, + ): + + super().__init__(object_id, outline, geometry) + self._lane_group_ids = lane_group_ids + + layer = property(lambda self: MapLayer.INTERSECTION) + lane_group_ids = property(lambda self: self._lane_group_ids) + + @property + def lane_groups(self) -> List[CacheLaneGroup]: + """Inherited, see superclass.""" + raise NotImplementedError + + +class CacheCrosswalk(CacheSurfaceObject, AbstractCrosswalk): + def __init__( + self, + object_id: MapObjectIDType, + outline: Optional[Union[Polyline2D, Polyline3D]] = None, + geometry: Optional[geom.Polygon] = None, + ): + super().__init__(object_id, outline, geometry) + + +class CacheCarpark(CacheSurfaceObject, AbstractCarpark): + def __init__( + self, + object_id: MapObjectIDType, + outline: Optional[Union[Polyline2D, Polyline3D]] = None, + geometry: Optional[geom.Polygon] = None, + ): + super().__init__(object_id, outline, geometry) + + +class CacheWalkway(CacheSurfaceObject, AbstractWalkway): + def __init__( + self, + object_id: MapObjectIDType, + outline: Optional[Union[Polyline2D, Polyline3D]] = None, + geometry: Optional[geom.Polygon] = None, + ): + super().__init__(object_id, outline, geometry) + + +class CacheGenericDrivable(CacheSurfaceObject, AbstractGenericDrivable): + def __init__( + self, + object_id: MapObjectIDType, + outline: Optional[Union[Polyline2D, Polyline3D]] = None, + geometry: Optional[geom.Polygon] = None, + ): + super().__init__(object_id, outline, geometry) + + +class CacheRoadEdge(CacheLineObject, AbstractRoadEdge): + def __init__( + self, + object_id: MapObjectIDType, + road_edge_type: RoadEdgeType, + polyline: Union[Polyline2D, Polyline3D], + ): + super().__init__(object_id, polyline) + self._road_edge_type = road_edge_type + + road_edge_type = property(lambda self: self._road_edge_type) + + +class CacheRoadLine(CacheLineObject, AbstractRoadLine): + def __init__( + self, + object_id: MapObjectIDType, + road_line_type: RoadLineType, + polyline: Union[Polyline2D, Polyline3D], + ): + super().__init__(object_id, polyline) + self._road_line_type = road_line_type + + road_line_type = property(lambda self: self._road_line_type) diff --git a/d123/datatypes/maps/gpkg/gpkg_map.py b/d123/datatypes/maps/gpkg/gpkg_map.py index be15d482..74380773 100644 --- a/d123/datatypes/maps/gpkg/gpkg_map.py +++ b/d123/datatypes/maps/gpkg/gpkg_map.py @@ -24,7 +24,7 @@ GPKGRoadLine, GPKGWalkway, ) -from d123.datatypes.maps.gpkg.utils import load_gdf_with_geometry_columns +from d123.datatypes.maps.gpkg.gpkg_utils import load_gdf_with_geometry_columns from d123.datatypes.maps.map_datatypes import MapLayer from d123.geometry import Point2D diff --git a/d123/datatypes/maps/gpkg/gpkg_map_objects.py b/d123/datatypes/maps/gpkg/gpkg_map_objects.py index ff44b0d6..db299918 100644 --- a/d123/datatypes/maps/gpkg/gpkg_map_objects.py +++ b/d123/datatypes/maps/gpkg/gpkg_map_objects.py @@ -22,8 +22,9 @@ AbstractRoadLine, AbstractSurfaceMapObject, AbstractWalkway, + MapObjectIDType, ) -from d123.datatypes.maps.gpkg.utils import get_row_with_value, get_trimesh_from_boundaries +from d123.datatypes.maps.gpkg.gpkg_utils import get_row_with_value, get_trimesh_from_boundaries from d123.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType from d123.geometry import Point3DIndex, Polyline3D @@ -33,7 +34,7 @@ class GPKGSurfaceObject(AbstractSurfaceMapObject): Base interface representation of all map objects. """ - def __init__(self, object_id: str, surface_df: gpd.GeoDataFrame) -> None: + def __init__(self, object_id: MapObjectIDType, surface_df: gpd.GeoDataFrame) -> None: """ Constructor of the base surface map object type. :param object_id: unique identifier of a surface map object. @@ -49,7 +50,7 @@ def shapely_polygon(self) -> geom.Polygon: @cached_property def _object_row(self) -> gpd.GeoSeries: - return get_row_with_value(self._object_df, "id", self.id) + return get_row_with_value(self._object_df, "id", self.object_id) @cached_property def outline_3d(self) -> Polyline3D: @@ -90,7 +91,7 @@ def trimesh_mesh(self) -> trimesh.Trimesh: class GPKGLineObject(AbstractLineMapObject): - def __init__(self, object_id: str, line_df: gpd.GeoDataFrame) -> None: + def __init__(self, object_id: MapObjectIDType, line_df: gpd.GeoDataFrame) -> None: """ Constructor of the base line map object type. :param object_id: unique identifier of a line map object. @@ -101,7 +102,7 @@ def __init__(self, object_id: str, line_df: gpd.GeoDataFrame) -> None: @cached_property def _object_row(self) -> gpd.GeoSeries: - return get_row_with_value(self._object_df, "id", self.id) + return get_row_with_value(self._object_df, "id", self.object_id) @property def polyline_3d(self) -> Polyline3D: @@ -112,7 +113,7 @@ def polyline_3d(self) -> Polyline3D: class GPKGLane(GPKGSurfaceObject, AbstractLane): def __init__( self, - object_id: str, + object_id: MapObjectIDType, object_df: gpd.GeoDataFrame, lane_group_df: gpd.GeoDataFrame, intersection_df: gpd.GeoDataFrame, @@ -126,17 +127,25 @@ def speed_limit_mps(self) -> Optional[float]: """Inherited, see superclass.""" return self._object_row.speed_limit_mps + @property + def successor_ids(self) -> List[MapObjectIDType]: + """Inherited, see superclass.""" + return ast.literal_eval(self._object_row.successor_ids) + @property def successors(self) -> List[GPKGLane]: """Inherited, see superclass.""" - successor_ids = ast.literal_eval(self._object_row.successor_ids) - return [GPKGLane(lane_id, self._object_df) for lane_id in successor_ids] + return [GPKGLane(lane_id, self._object_df) for lane_id in self.successor_ids] + + @property + def predecessor_ids(self) -> List[MapObjectIDType]: + """Inherited, see superclass.""" + return ast.literal_eval(self._object_row.predecessor_ids) @property def predecessors(self) -> List[GPKGLane]: """Inherited, see superclass.""" - predecessor_ids = ast.literal_eval(self._object_row.predecessor_ids) - return [GPKGLane(lane_id, self._object_df) for lane_id in predecessor_ids] + return [GPKGLane(lane_id, self._object_df) for lane_id in self.predecessor_ids] @property def left_boundary(self) -> Polyline3D: @@ -148,23 +157,31 @@ def right_boundary(self) -> Polyline3D: """Inherited, see superclass.""" return Polyline3D.from_linestring(self._object_row.right_boundary) + @property + def left_lane_id(self) -> Optional[MapObjectIDType]: + """ "Inherited, see superclass.""" + return self._object_row.left_lane_id + @property def left_lane(self) -> Optional[GPKGLane]: """Inherited, see superclass.""" - left_lane_id = self._object_row.left_lane_id return ( - GPKGLane(left_lane_id, self._object_df, self._lane_group_df, self._intersection_df) - if left_lane_id is not None and not pd.isna(left_lane_id) + GPKGLane(self.left_lane_id, self._object_df, self._lane_group_df, self._intersection_df) + if self.left_lane_id is not None and not pd.isna(self.left_lane_id) else None ) + @property + def right_lane_id(self) -> Optional[MapObjectIDType]: + """Inherited, see superclass.""" + return self._object_row.right_lane_id + @property def right_lane(self) -> Optional[GPKGLane]: """Inherited, see superclass.""" - right_lane_id = self._object_row.right_lane_id return ( - GPKGLane(right_lane_id, self._object_df, self._lane_group_df, self._intersection_df) - if right_lane_id is not None and not pd.isna(right_lane_id) + GPKGLane(self.right_lane_id, self._object_df, self._lane_group_df, self._intersection_df) + if self.right_lane_id is not None and not pd.isna(self.right_lane_id) else None ) @@ -180,12 +197,16 @@ def outline_3d(self) -> Polyline3D: outline_array = np.vstack((outline_array, outline_array[0])) return Polyline3D.from_linestring(geom.LineString(outline_array)) + @property + def lane_group_id(self) -> MapObjectIDType: + """Inherited, see superclass.""" + return self._object_row.lane_group_id + @property def lane_group(self) -> GPKGLaneGroup: """Inherited, see superclass.""" - lane_group_id = self._object_row.lane_group_id return GPKGLaneGroup( - lane_group_id, + self.lane_group_id, self._lane_group_df, self._object_df, self._intersection_df, @@ -195,7 +216,7 @@ def lane_group(self) -> GPKGLaneGroup: class GPKGLaneGroup(GPKGSurfaceObject, AbstractLaneGroup): def __init__( self, - object_id: str, + object_id: MapObjectIDType, object_df: gpd.GeoDataFrame, lane_df: gpd.GeoDataFrame, intersection_df: gpd.GeoDataFrame, @@ -204,22 +225,30 @@ def __init__( self._lane_df = lane_df self._intersection_df = intersection_df + @property + def successor_ids(self) -> List[MapObjectIDType]: + """Inherited, see superclass.""" + return ast.literal_eval(self._object_row.successor_ids) + @property def successors(self) -> List[GPKGLaneGroup]: """Inherited, see superclass.""" - successor_ids = ast.literal_eval(self._object_row.successor_ids) return [ GPKGLaneGroup(lane_group_id, self._object_df, self._lane_df, self._intersection_df) - for lane_group_id in successor_ids + for lane_group_id in self.successor_ids ] + @property + def predecessor_ids(self) -> List[MapObjectIDType]: + """Inherited, see superclass.""" + return ast.literal_eval(self._object_row.predecessor_ids) + @property def predecessors(self) -> List[GPKGLaneGroup]: """Inherited, see superclass.""" - predecessor_ids = ast.literal_eval(self._object_row.predecessor_ids) return [ GPKGLaneGroup(lane_group_id, self._object_df, self._lane_df, self._intersection_df) - for lane_group_id in predecessor_ids + for lane_group_id in self.predecessor_ids ] @property @@ -238,10 +267,14 @@ def outline_3d(self) -> Polyline3D: outline_array = np.vstack((self.left_boundary.array, self.right_boundary.array[::-1])) return Polyline3D.from_linestring(geom.LineString(outline_array)) + @property + def lane_ids(self) -> List[MapObjectIDType]: + """Inherited, see superclass.""" + return ast.literal_eval(self._object_row.lane_ids) + @property def lanes(self) -> List[GPKGLane]: """Inherited, see superclass.""" - lane_ids = ast.literal_eval(self._object_row.lane_ids) return [ GPKGLane( lane_id, @@ -249,21 +282,25 @@ def lanes(self) -> List[GPKGLane]: self._object_df, self._intersection_df, ) - for lane_id in lane_ids + for lane_id in self.lane_ids ] + @property + def intersection_id(self) -> Optional[MapObjectIDType]: + """Inherited, see superclass.""" + return self._object_row.intersection_id + @property def intersection(self) -> Optional[GPKGIntersection]: """Inherited, see superclass.""" - intersection_id = self._object_row.intersection_id return ( GPKGIntersection( - intersection_id, + self.intersection_id, self._intersection_df, self._lane_df, self._object_df, ) - if intersection_id is not None and not pd.isna(intersection_id) + if self.intersection_id is not None and not pd.isna(self.intersection_id) else None ) @@ -271,7 +308,7 @@ def intersection(self) -> Optional[GPKGIntersection]: class GPKGIntersection(GPKGSurfaceObject, AbstractIntersection): def __init__( self, - object_id: str, + object_id: MapObjectIDType, object_df: gpd.GeoDataFrame, lane_df: gpd.GeoDataFrame, lane_group_df: gpd.GeoDataFrame, @@ -280,10 +317,14 @@ def __init__( self._lane_df = lane_df self._lane_group_df = lane_group_df + @property + def lane_group_ids(self) -> List[MapObjectIDType]: + """Inherited, see superclass.""" + return ast.literal_eval(self._object_row.lane_group_ids) + @property def lane_groups(self) -> List[GPKGLaneGroup]: """Inherited, see superclass.""" - lane_group_ids = ast.literal_eval(self._object_row.lane_group_ids) return [ GPKGLaneGroup( lane_group_id, @@ -291,37 +332,37 @@ def lane_groups(self) -> List[GPKGLaneGroup]: self._lane_df, self._object_df, ) - for lane_group_id in lane_group_ids + for lane_group_id in self.lane_group_ids ] class GPKGCrosswalk(GPKGSurfaceObject, AbstractCrosswalk): - def __init__(self, object_id: str, object_df: gpd.GeoDataFrame): + def __init__(self, object_id: MapObjectIDType, object_df: gpd.GeoDataFrame): super().__init__(object_id, object_df) class GPKGCarpark(GPKGSurfaceObject, AbstractCarpark): - def __init__(self, object_id: str, object_df: gpd.GeoDataFrame): + def __init__(self, object_id: MapObjectIDType, object_df: gpd.GeoDataFrame): super().__init__(object_id, object_df) class GPKGWalkway(GPKGSurfaceObject, AbstractWalkway): - def __init__(self, object_id: str, object_df: gpd.GeoDataFrame): + def __init__(self, object_id: MapObjectIDType, object_df: gpd.GeoDataFrame): super().__init__(object_id, object_df) class GPKGGenericDrivable(GPKGSurfaceObject, AbstractGenericDrivable): - def __init__(self, object_id: str, object_df: gpd.GeoDataFrame): + def __init__(self, object_id: MapObjectIDType, object_df: gpd.GeoDataFrame): super().__init__(object_id, object_df) class GPKGRoadEdge(GPKGLineObject, AbstractRoadEdge): - def __init__(self, object_id: str, object_df: gpd.GeoDataFrame): + def __init__(self, object_id: MapObjectIDType, object_df: gpd.GeoDataFrame): super().__init__(object_id, object_df) @cached_property def _object_row(self) -> gpd.GeoSeries: - return get_row_with_value(self._object_df, "id", self.id) + return get_row_with_value(self._object_df, "id", self.object_id) @property def road_edge_type(self) -> RoadEdgeType: @@ -330,12 +371,12 @@ def road_edge_type(self) -> RoadEdgeType: class GPKGRoadLine(GPKGLineObject, AbstractRoadLine): - def __init__(self, object_id: str, object_df: gpd.GeoDataFrame): + def __init__(self, object_id: MapObjectIDType, object_df: gpd.GeoDataFrame): super().__init__(object_id, object_df) @cached_property def _object_row(self) -> gpd.GeoSeries: - return get_row_with_value(self._object_df, "id", self.id) + return get_row_with_value(self._object_df, "id", self.object_id) @property def road_line_type(self) -> RoadLineType: diff --git a/d123/datatypes/maps/gpkg/utils.py b/d123/datatypes/maps/gpkg/gpkg_utils.py similarity index 100% rename from d123/datatypes/maps/gpkg/utils.py rename to d123/datatypes/maps/gpkg/gpkg_utils.py diff --git a/d123/datatypes/maps/map_datatypes.py b/d123/datatypes/maps/map_datatypes.py index f948422f..14a815b5 100644 --- a/d123/datatypes/maps/map_datatypes.py +++ b/d123/datatypes/maps/map_datatypes.py @@ -49,8 +49,7 @@ class RoadEdgeType(SerialIntEnum): class RoadLineType(SerialIntEnum): """ Enum for RoadLineType. - NOTE: We use the road line types from Waymo. - TODO: Use the Argoverse 2 road line types instead. + TODO: Use the Argoverse 2 road line types. https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/protos/map.proto#L208 """ diff --git a/d123/datatypes/scene/scene_metadata.py b/d123/datatypes/scene/scene_metadata.py index 3db87645..3a12c9c0 100644 --- a/d123/datatypes/scene/scene_metadata.py +++ b/d123/datatypes/scene/scene_metadata.py @@ -9,7 +9,7 @@ from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters -@dataclass(frozen=True) +@dataclass class LogMetadata: dataset: str diff --git a/d123/geometry/polyline.py b/d123/geometry/polyline.py index 51df68b5..19004273 100644 --- a/d123/geometry/polyline.py +++ b/d123/geometry/polyline.py @@ -296,7 +296,7 @@ def polyline_se2(self) -> PolylineSE2: return PolylineSE2.from_linestring(self.linestring) @property - def array(self) -> Polyline2D: + def array(self) -> npt.NDArray[np.float64]: """Converts the 3D polyline to the discrete 3D points. :return: A numpy array of shape (N, 3), indexed by :class:`~d123.geometry.Point3DIndex`. diff --git a/d123/script/builders/log_writer_builder.py b/d123/script/builders/writer_builder.py similarity index 60% rename from d123/script/builders/log_writer_builder.py rename to d123/script/builders/writer_builder.py index d0c3a394..165e9b7b 100644 --- a/d123/script/builders/log_writer_builder.py +++ b/d123/script/builders/writer_builder.py @@ -4,11 +4,20 @@ from omegaconf import DictConfig from d123.conversion.abstract_dataset_converter import AbstractLogWriter +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter from d123.script.builders.utils.utils_type import validate_type logger = logging.getLogger(__name__) +def build_map_writer(cfg: DictConfig) -> AbstractMapWriter: + logger.info("Building AbstractMapWriter...") + map_writer: AbstractLogWriter = instantiate(cfg) + validate_type(map_writer, AbstractMapWriter) + logger.info("Building AbstractMapWriter...DONE!") + return map_writer + + def build_log_writer(cfg: DictConfig) -> AbstractLogWriter: logger.info("Building AbstractLogWriter...") log_writer: AbstractLogWriter = instantiate(cfg) diff --git a/d123/script/config/common/default_dataset_paths.yaml b/d123/script/config/common/default_dataset_paths.yaml index a18ecb77..53e06caa 100644 --- a/d123/script/config/common/default_dataset_paths.yaml +++ b/d123/script/config/common/default_dataset_paths.yaml @@ -1,8 +1,9 @@ # 123D Defaults -d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT} d123_data_root: ${oc.env:D123_DATA_ROOT} -d123_maps_root: ${oc.env:D123_MAPS_ROOT} +d123_logs_root: ${oc.env:D123_DATA_ROOT}/logs +d123_maps_root: ${oc.env:D123_DATA_ROOT}/maps +d123_sensors_root: ${oc.env:D123_DATA_ROOT}/sensors # nuPlan defaults diff --git a/d123/script/config/conversion/datasets/nuplan_dataset.yaml b/d123/script/config/conversion/datasets/nuplan_dataset.yaml index e744a1ad..b5fa60d7 100644 --- a/d123/script/config/conversion/datasets/nuplan_dataset.yaml +++ b/d123/script/config/conversion/datasets/nuplan_dataset.yaml @@ -4,7 +4,7 @@ nuplan_dataset: splits: ["nuplan_train", "nuplan_val", "nuplan_test"] nuplan_data_root: ${nuplan_data_root} - nuplan_map_root: ${nuplan_maps_root} + nuplan_maps_root: ${nuplan_maps_root} nuplan_sensor_root: ${nuplan_sensor_root} dataset_converter_config: @@ -15,6 +15,9 @@ nuplan_dataset: force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} + # Map + include_map: true + # Ego include_ego: true diff --git a/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml b/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml index 92b86322..146e8d2d 100644 --- a/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml +++ b/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml @@ -2,9 +2,9 @@ nuplan_mini_dataset: _target_: d123.conversion.datasets.nuplan.nuplan_converter.NuPlanConverter _convert_: 'all' - splits: ["nuplan_mini_train", "nuplan_mini_val", "nuplan_mini_test"] + splits: ["nuplan-mini_train", "nuplan-mini_val", "nuplan-mini_test"] nuplan_data_root: ${nuplan_data_root} - nuplan_map_root: ${nuplan_maps_root} + nuplan_maps_root: ${nuplan_maps_root} nuplan_sensor_root: ${nuplan_sensor_root} dataset_converter_config: @@ -15,6 +15,9 @@ nuplan_mini_dataset: force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} + # Map + include_map: true + # Ego include_ego: true diff --git a/d123/script/config/conversion/datasets/nuplan_private_dataset.yaml b/d123/script/config/conversion/datasets/nuplan_private_dataset.yaml index bd5caaec..62f42ce9 100644 --- a/d123/script/config/conversion/datasets/nuplan_private_dataset.yaml +++ b/d123/script/config/conversion/datasets/nuplan_private_dataset.yaml @@ -2,9 +2,9 @@ nuplan_private_dataset: _target_: d123.conversion.datasets.nuplan.nuplan_converter.NuPlanConverter _convert_: 'all' - splits: ["nuplan_private_test"] + splits: ["nuplan-private_test"] nuplan_data_root: ${nuplan_data_root} - nuplan_map_root: ${nuplan_maps_root} + nuplan_maps_root: ${nuplan_maps_root} nuplan_sensor_root: ${nuplan_sensor_root} dataset_converter_config: @@ -15,6 +15,9 @@ nuplan_private_dataset: force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} + # Map + include_map: true + # Ego include_ego: true diff --git a/d123/script/config/conversion/datasets/wopd_dataset.yaml b/d123/script/config/conversion/datasets/wopd_dataset.yaml index 4380a80d..f936270d 100644 --- a/d123/script/config/conversion/datasets/wopd_dataset.yaml +++ b/d123/script/config/conversion/datasets/wopd_dataset.yaml @@ -2,9 +2,9 @@ wopd_dataset: _target_: d123.conversion.datasets.wopd.wopd_converter.WOPDConverter _convert_: 'all' - splits: ["wopd_val"] + splits: ["wopd_train", "wopd_val", "wopd_test"] # Which splits to convert. Options: ["wopd_train", "wopd_val", "wopd_test"] wopd_data_root: "/media/nvme1/waymo_perception" # ${wopd_data_root} - zero_roll_pitch: true # Whether to zero the roll and pitch of the box detections. + zero_roll_pitch: true # Whether to zero the roll and pitch of the box detections in global frame. keep_polar_features: false # Add lidar polar features (range, azimuth, elevation) in addition to XYZ. (slow if true) add_map_pose_offset: true # Whether to add the map pose offset to the ego state and box detections. @@ -17,6 +17,9 @@ wopd_dataset: force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} + # Map + include_map: true + # Ego include_ego: true diff --git a/d123/script/config/conversion/default_conversion.yaml b/d123/script/config/conversion/default_conversion.yaml index 179401e3..1a1bd125 100644 --- a/d123/script/config/conversion/default_conversion.yaml +++ b/d123/script/config/conversion/default_conversion.yaml @@ -6,17 +6,20 @@ hydra: - pkg://d123.script.config - pkg://d123.script.config.common job: + env_set: + HYDRA_FULL_ERROR: 1 chdir: False # defaults: - default_common - default_dataset_paths - - log_writer: arrow_ipc_log_writer + - log_writer: arrow_log_writer + - map_writer: gpkg_map_writer - datasets: - # - nuplan_mini_dataset + - nuplan_mini_dataset # - nuplan_private_dataset # - carla_dataset - - wopd_dataset + # - wopd_dataset # - av2_sensor_dataset - _self_ diff --git a/d123/script/config/conversion/log_writer/arrow_ipc_log_writer.yaml b/d123/script/config/conversion/log_writer/arrow_log_writer.yaml similarity index 90% rename from d123/script/config/conversion/log_writer/arrow_ipc_log_writer.yaml rename to d123/script/config/conversion/log_writer/arrow_log_writer.yaml index d74a121a..5b7e7766 100644 --- a/d123/script/config/conversion/log_writer/arrow_ipc_log_writer.yaml +++ b/d123/script/config/conversion/log_writer/arrow_log_writer.yaml @@ -1,5 +1,7 @@ _target_: d123.conversion.log_writer.arrow_log_writer.ArrowLogWriter _convert_: 'all' + +logs_root: ${d123_logs_root} compression: null # Compression method for ipc files. Options: None, 'lz4', 'zstd' compression_level: null # Compression level for ipc files. Options: None, or depending on compression method diff --git a/d123/script/config/conversion/map_writer/__init__.py b/d123/script/config/conversion/map_writer/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/d123/script/config/conversion/map_writer/gpkg_map_writer.yaml b/d123/script/config/conversion/map_writer/gpkg_map_writer.yaml new file mode 100644 index 00000000..2e2e42b5 --- /dev/null +++ b/d123/script/config/conversion/map_writer/gpkg_map_writer.yaml @@ -0,0 +1,4 @@ +_target_: d123.conversion.map_writer.gpkg_map_writer.GPKGMapWriter +_convert_: 'all' + +maps_root: ${d123_maps_root} diff --git a/d123/script/run_conversion.py b/d123/script/run_conversion.py index eb630597..b25a5ba4 100644 --- a/d123/script/run_conversion.py +++ b/d123/script/run_conversion.py @@ -9,8 +9,8 @@ from d123 import ascii_banner from d123.common.multithreading.worker_utils import worker_map from d123.script.builders.dataset_converter_builder import AbstractDatasetConverter, build_dataset_converters -from d123.script.builders.log_writer_builder import build_log_writer from d123.script.builders.worker_pool_builder import build_worker +from d123.script.builders.writer_builder import build_log_writer, build_map_writer logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @@ -50,8 +50,10 @@ def main(cfg: DictConfig) -> None: def _convert_maps(args: List[Dict[str, int]], cfg: DictConfig, dataset_converter: AbstractDatasetConverter) -> List: + + map_writer = build_map_writer(cfg.map_writer) for arg in args: - dataset_converter.convert_map(arg["map_index"]) + dataset_converter.convert_map(arg["map_index"], map_writer) return [] From b277c59597c0a55c44f1e3fbf1b9afbf93d9e4a9 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sun, 12 Oct 2025 19:36:36 +0200 Subject: [PATCH 075/145] Fix a few map writer related issues (#51) --- d123/conversion/dataset_converter_config.py | 2 +- .../datasets/nuplan/nuplan_map_conversion.py | 3 ++- d123/conversion/map_writer/gpkg_map_writer.py | 17 +++-------------- d123/datatypes/maps/gpkg/gpkg_map.py | 4 ++-- d123/datatypes/maps/gpkg/gpkg_map_objects.py | 6 +++--- .../scene/arrow/arrow_scene_builder.py | 2 +- .../scene/arrow/utils/arrow_getters.py | 2 +- .../config/conversion/default_conversion.yaml | 2 +- notebooks/viz/bev_matplotlib.ipynb | 4 ++-- test_viser.py | 8 +++++--- 10 files changed, 21 insertions(+), 29 deletions(-) diff --git a/d123/conversion/dataset_converter_config.py b/d123/conversion/dataset_converter_config.py index 1c6cee27..43c06199 100644 --- a/d123/conversion/dataset_converter_config.py +++ b/d123/conversion/dataset_converter_config.py @@ -41,7 +41,7 @@ def __post_init__(self): if isinstance(self.output_path, str): self.output_path = Path(self.output_path) - assert self.camera_store_option != "mp4", "MP4 format is not yet supported." + assert self.camera_store_option != "mp4", "MP4 format is not yet supported, but planned for future releases." assert self.camera_store_option in [ "path", "binary", diff --git a/d123/conversion/datasets/nuplan/nuplan_map_conversion.py b/d123/conversion/datasets/nuplan/nuplan_map_conversion.py index 4b1dc5f7..fb2fa763 100644 --- a/d123/conversion/datasets/nuplan/nuplan_map_conversion.py +++ b/d123/conversion/datasets/nuplan/nuplan_map_conversion.py @@ -26,6 +26,7 @@ CacheLaneGroup, CacheRoadEdge, CacheRoadLine, + CacheWalkway, ) from d123.datatypes.maps.gpkg.gpkg_utils import get_all_rows_with_value, get_row_with_value from d123.datatypes.maps.map_datatypes import RoadEdgeType @@ -297,7 +298,7 @@ def _write_nuplan_crosswalks(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer def _write_nuplan_walkways(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: # NOTE: drops: creator_id for id, geometry in zip(nuplan_gdf["walkways"].fid.to_list(), nuplan_gdf["walkways"].geometry.to_list()): - map_writer.write_crosswalk(CacheCrosswalk(object_id=id, geometry=geometry)) + map_writer.write_walkway(CacheWalkway(object_id=id, geometry=geometry)) def _write_nuplan_carparks(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_writer: AbstractMapWriter) -> None: diff --git a/d123/conversion/map_writer/gpkg_map_writer.py b/d123/conversion/map_writer/gpkg_map_writer.py index b7696868..7bdad06e 100644 --- a/d123/conversion/map_writer/gpkg_map_writer.py +++ b/d123/conversion/map_writer/gpkg_map_writer.py @@ -124,7 +124,6 @@ def close(self) -> None: """Inherited, see superclass.""" if self._map_file is not None or self._map_data is not None: - if not self._map_file.parent.exists(): self._map_file.parent.mkdir(parents=True, exist_ok=True) @@ -132,7 +131,9 @@ def close(self) -> None: if len(layer_data["id"]) > 0: df = pd.DataFrame(layer_data) gdf = gpd.GeoDataFrame(df, geometry="geometry", crs=self._crs) - gdf.to_file(self._map_file, driver="GPKG", layer=map_layer.serialize()) + else: + gdf = gpd.GeoDataFrame({"id": [], "geometry": []}, geometry="geometry", crs=self._crs) + gdf.to_file(self._map_file, driver="GPKG", layer=map_layer.serialize()) del self._map_file, self._map_data self._map_file = None @@ -163,15 +164,3 @@ def _write_line_layer(self, layer: MapLayer, line_object: AbstractLineMapObject) self._assert_initialized() self._map_data[layer]["id"].append(line_object.object_id) self._map_data[layer]["geometry"].append(line_object.shapely_linestring) - - def _get_gpd_dataframe(self, layer: MapLayer, layer_data: MAP_OBJECT_DATA): - """Helper to convert map data to a GeoPandas DataFrame. - - :param layer: map layer of data - :param layer_data: map data to convert - :return: GeoPandas DataFrame of map data - """ - - df = pd.DataFrame(layer_data) - gdf = gpd.GeoDataFrame(df, geometry="geometry", crs=self._crs) - return gdf diff --git a/d123/datatypes/maps/gpkg/gpkg_map.py b/d123/datatypes/maps/gpkg/gpkg_map.py index 74380773..d207e637 100644 --- a/d123/datatypes/maps/gpkg/gpkg_map.py +++ b/d123/datatypes/maps/gpkg/gpkg_map.py @@ -68,7 +68,7 @@ def initialize(self) -> None: ) load_gdf_with_geometry_columns( self._gpd_dataframes[map_layer], - geometry_column_names=["baseline_path", "right_boundary", "left_boundary", "outline"], + geometry_column_names=["centerline", "right_boundary", "left_boundary", "outline"], ) # TODO: remove the temporary fix and enforce consistent id types in the GPKG files if "id" in self._gpd_dataframes[map_layer].columns: @@ -374,7 +374,7 @@ def _get_road_line(self, id: str) -> Optional[GPKGRoadLine]: @lru_cache(maxsize=MAX_LRU_CACHED_TABLES) def get_global_map_api(dataset: str, location: str) -> GPKGMap: D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) - gpkg_path = D123_MAPS_ROOT / f"{dataset}_{location}.gpkg" + gpkg_path = D123_MAPS_ROOT / dataset / f"{dataset}_{location}.gpkg" assert gpkg_path.is_file(), f"{dataset}_{location}.gpkg not found in {str(D123_MAPS_ROOT)}." map_api = GPKGMap(gpkg_path) map_api.initialize() diff --git a/d123/datatypes/maps/gpkg/gpkg_map_objects.py b/d123/datatypes/maps/gpkg/gpkg_map_objects.py index db299918..64e78838 100644 --- a/d123/datatypes/maps/gpkg/gpkg_map_objects.py +++ b/d123/datatypes/maps/gpkg/gpkg_map_objects.py @@ -52,8 +52,8 @@ def shapely_polygon(self) -> geom.Polygon: def _object_row(self) -> gpd.GeoSeries: return get_row_with_value(self._object_df, "id", self.object_id) - @cached_property - def outline_3d(self) -> Polyline3D: + @property + def outline(self) -> Polyline3D: """Inherited, see superclass.""" outline_3d: Optional[Polyline3D] = None if "outline" in self._object_df.columns: @@ -188,7 +188,7 @@ def right_lane(self) -> Optional[GPKGLane]: @property def centerline(self) -> Polyline3D: """Inherited, see superclass.""" - return Polyline3D.from_linestring(self._object_row.baseline_path) + return Polyline3D.from_linestring(self._object_row.centerline) @property def outline_3d(self) -> Polyline3D: diff --git a/d123/datatypes/scene/arrow/arrow_scene_builder.py b/d123/datatypes/scene/arrow/arrow_scene_builder.py index 2bd58888..b45b7a0f 100644 --- a/d123/datatypes/scene/arrow/arrow_scene_builder.py +++ b/d123/datatypes/scene/arrow/arrow_scene_builder.py @@ -60,7 +60,7 @@ def _discover_split_names(dataset_path: Path, split_types: Set[str]) -> Set[str] def _discover_log_paths(dataset_path: Path, split_names: Set[str], log_names: Optional[List[str]]) -> List[Path]: log_paths: List[Path] = [] for split_name in split_names: - for log_path in (dataset_path / split_name).iterdir(): + for log_path in (dataset_path / "logs" / split_name).iterdir(): if log_path.is_file() and log_path.name.endswith(".arrow"): if log_names is None or log_path.stem in log_names: log_paths.append(log_path) diff --git a/d123/datatypes/scene/arrow/utils/arrow_getters.py b/d123/datatypes/scene/arrow/utils/arrow_getters.py index 19161321..7ba3e96d 100644 --- a/d123/datatypes/scene/arrow/utils/arrow_getters.py +++ b/d123/datatypes/scene/arrow/utils/arrow_getters.py @@ -30,7 +30,7 @@ DATASET_SENSOR_ROOT: Dict[str, Path] = { "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", "carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs", - "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini", + # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini", } diff --git a/d123/script/config/conversion/default_conversion.yaml b/d123/script/config/conversion/default_conversion.yaml index 1a1bd125..71321d1e 100644 --- a/d123/script/config/conversion/default_conversion.yaml +++ b/d123/script/config/conversion/default_conversion.yaml @@ -24,4 +24,4 @@ defaults: - _self_ force_map_conversion: True -force_log_conversion: True +force_log_conversion: False diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index 0c0c8e42..bd9cb6e7 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -42,7 +42,7 @@ "\n", "# splits = [\"wopd_train\"]\n", "# splits = [\"carla\"]\n", - "splits = [\"nuplan_private_test\"]\n", + "splits = [\"nuplan-mini_test\"]\n", "# splits = [\"av2-sensor-mini_train\"]\n", "# log_names = None\n", "\n", @@ -252,7 +252,7 @@ " return fig, ax\n", "\n", "\n", - "scene_index = 19\n", + "scene_index = 17\n", "iteration = 99\n", "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n", "plt.show()\n", diff --git a/test_viser.py b/test_viser.py index ab527a98..f8675f2e 100644 --- a/test_viser.py +++ b/test_viser.py @@ -1,5 +1,7 @@ import os +from anyio import Path + from d123.common.multithreading.worker_sequential import Sequential from d123.common.visualization.viser.viser_viewer import ViserViewer from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder @@ -8,10 +10,10 @@ if __name__ == "__main__": - splits = ["nuplan_mini_test", "nuplan_mini_train", "nuplan_mini_val"] + splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla"] - splits = ["wopd_val"] + # splits = ["wopd_val"] # splits = ["av2-sensor-mini_train"] log_names = None scene_uuids = None @@ -26,7 +28,7 @@ shuffle=True, camera_types=[PinholeCameraType.CAM_F0], ) - scene_builder = ArrowSceneBuilder(os.environ["D123_DATA_ROOT"]) + scene_builder = ArrowSceneBuilder(Path(os.environ["D123_DATA_ROOT"])) worker = Sequential() scenes = scene_builder.get_scenes(scene_filter, worker) print(f"Found {len(scenes)} scenes") From 993ec9e27858712b60580f7a57d01ae3d6ade142 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sun, 12 Oct 2025 21:24:32 +0200 Subject: [PATCH 076/145] Expand map writer support to av2 (#51) --- .../datasets/av2/av2_map_conversion copy.py | 525 ++++++++++++++++++ .../datasets/av2/av2_map_conversion.py | 325 ++++------- .../datasets/av2/av2_sensor_converter.py | 51 +- .../map_utils/road_edge/road_edge_3d_utils.py | 12 - .../datasets/av2_sensor_dataset.yaml | 5 +- .../config/conversion/default_conversion.yaml | 4 +- notebooks/viz/bev_matplotlib.ipynb | 6 +- test_viser.py | 4 +- 8 files changed, 674 insertions(+), 258 deletions(-) create mode 100644 d123/conversion/datasets/av2/av2_map_conversion copy.py diff --git a/d123/conversion/datasets/av2/av2_map_conversion copy.py b/d123/conversion/datasets/av2/av2_map_conversion copy.py new file mode 100644 index 00000000..f45e6d0a --- /dev/null +++ b/d123/conversion/datasets/av2/av2_map_conversion copy.py @@ -0,0 +1,525 @@ +# import json +# import warnings +# from pathlib import Path +# from typing import Any, Dict, Final, List + +# import geopandas as gpd +# import numpy as np +# import numpy.typing as npt +# import pandas as pd +# import shapely +# import shapely.geometry as geom + +# from d123.conversion.datasets.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING +# from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length +# from d123.conversion.utils.map_utils.road_edge.road_edge_3d_utils import ( +# get_road_edges_3d_from_generic_drivable_area_df, +# ) +# from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType +# from d123.geometry import OccupancyMap2D, Point3DIndex, Polyline2D, Polyline3D + +# LANE_GROUP_MARK_TYPES: List[str] = [ +# "DASHED_WHITE", +# "DOUBLE_DASH_WHITE", +# "DASH_SOLID_WHITE", +# "SOLID_DASH_WHITE", +# "SOLID_WHITE", +# ] +# MAX_ROAD_EDGE_LENGTH: Final[float] = 100.0 # TODO: Add to config + + +# def convert_av2_map(source_log_path: Path, map_file_path: Path) -> None: + +# def _extract_polyline(data: List[Dict[str, float]], close: bool = False) -> Polyline3D: +# polyline = np.array([[p["x"], p["y"], p["z"]] for p in data], dtype=np.float64) +# if close: +# polyline = np.vstack([polyline, polyline[0]]) + +# return Polyline3D.from_array(polyline) + +# map_folder = source_log_path / "map" +# log_map_archive_path = next(map_folder.glob("log_map_archive_*.json")) + +# with open(log_map_archive_path, "r") as f: +# log_map_archive = json.load(f) + +# drivable_areas: Dict[int, Polyline3D] = {} + +# for drivable_area_id, drivable_area_dict in log_map_archive["drivable_areas"].items(): +# # keys: ["area_boundary", "id"] +# drivable_areas[int(drivable_area_id)] = _extract_polyline(drivable_area_dict["area_boundary"], close=True) + +# for lane_segment_id, lane_segment_dict in log_map_archive["lane_segments"].items(): +# # keys = [ +# # "id", +# # "is_intersection", +# # "lane_type", +# # "left_lane_boundary", +# # "left_lane_mark_type", +# # "right_lane_boundary", +# # "right_lane_mark_type", +# # "successors", +# # "predecessors", +# # "right_neighbor_id", +# # "left_neighbor_id", +# # ] +# lane_segment_dict["left_lane_boundary"] = _extract_polyline(lane_segment_dict["left_lane_boundary"]) +# lane_segment_dict["right_lane_boundary"] = _extract_polyline(lane_segment_dict["right_lane_boundary"]) + +# for crosswalk_id, crosswalk_dict in log_map_archive["pedestrian_crossings"].items(): +# # keys = ["id", "outline"] +# # https://github.com/argoverse/av2-api/blob/6b22766247eda941cb1953d6a58e8d5631c561da/src/av2/map/pedestrian_crossing.py + +# p1, p2 = np.array([[p["x"], p["y"], p["z"]] for p in crosswalk_dict["edge1"]], dtype=np.float64) +# p3, p4 = np.array([[p["x"], p["y"], p["z"]] for p in crosswalk_dict["edge2"]], dtype=np.float64) +# crosswalk_dict["outline"] = Polyline3D.from_array(np.array([p1, p2, p4, p3, p1], dtype=np.float64)) + +# lane_group_dict = _extract_lane_group_dict(log_map_archive["lane_segments"]) +# intersection_dict = _extract_intersection_dict(log_map_archive["lane_segments"], lane_group_dict) + +# dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} + +# dataframes[MapLayer.LANE] = get_lane_df(log_map_archive["lane_segments"]) +# dataframes[MapLayer.LANE_GROUP] = get_lane_group_df(lane_group_dict) +# dataframes[MapLayer.INTERSECTION] = get_intersections_df(intersection_dict) +# dataframes[MapLayer.CROSSWALK] = get_crosswalk_df(log_map_archive["pedestrian_crossings"]) +# dataframes[MapLayer.GENERIC_DRIVABLE] = get_generic_drivable_df(drivable_areas) +# dataframes[MapLayer.ROAD_EDGE] = get_road_edge_df(dataframes[MapLayer.GENERIC_DRIVABLE]) +# dataframes[MapLayer.ROAD_LINE] = get_road_line_df(log_map_archive["lane_segments"]) +# # NOTE: AV2 does not provide walkways or carparks, so we create an empty DataFrame. +# dataframes[MapLayer.WALKWAY] = get_empty_gdf() +# dataframes[MapLayer.CARPARK] = get_empty_gdf() + +# map_file_path.unlink(missing_ok=True) +# if not map_file_path.parent.exists(): +# map_file_path.parent.mkdir(parents=True, exist_ok=True) + +# with warnings.catch_warnings(): +# warnings.filterwarnings("ignore", message="'crs' was not provided") +# for layer, gdf in dataframes.items(): +# gdf.to_file(map_file_path, layer=layer.serialize(), driver="GPKG", mode="a") + + +# def get_empty_gdf() -> gpd.GeoDataFrame: +# ids = [] +# outlines = [] +# geometries = [] +# data = pd.DataFrame({"id": ids, "outline": outlines}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_lane_df(lanes: Dict[int, Any]) -> gpd.GeoDataFrame: + +# ids = [int(lane_id) for lane_id in lanes.keys()] +# lane_types = [0] * len(ids) # TODO: Add lane types +# lane_group_ids = [] +# speed_limits_mps = [] +# predecessor_ids = [] +# successor_ids = [] +# left_boundaries = [] +# right_boundaries = [] +# left_lane_ids = [] +# right_lane_ids = [] +# baseline_paths = [] +# geometries = [] + +# def _get_centerline_from_boundaries( +# left_boundary: Polyline3D, right_boundary: Polyline3D, resolution: float = 0.1 +# ) -> Polyline3D: + +# points_per_meter = 1 / resolution +# num_points = int(np.ceil(max([right_boundary.length, left_boundary.length]) * points_per_meter)) +# right_array = right_boundary.interpolate(np.linspace(0, right_boundary.length, num_points, endpoint=True)) +# left_array = left_boundary.interpolate(np.linspace(0, left_boundary.length, num_points, endpoint=True)) + +# return Polyline3D.from_array(np.mean([right_array, left_array], axis=0)) + +# for lane_id, lane_dict in lanes.items(): +# # keys = [ +# # "id", +# # "is_intersection", +# # "lane_type", +# # "left_lane_boundary", +# # "left_lane_mark_type", +# # "right_lane_boundary", +# # "right_lane_mark_type", +# # "successors", +# # "predecessors", +# # "right_neighbor_id", +# # "left_neighbor_id", +# # ] +# lane_centerline = _get_centerline_from_boundaries( +# left_boundary=lane_dict["left_lane_boundary"], +# right_boundary=lane_dict["right_lane_boundary"], +# ) +# lane_speed_limit_mps = None # TODO: Consider using geo reference to retrieve speed limits. +# lane_group_ids.append(lane_id) +# speed_limits_mps.append(lane_speed_limit_mps) +# predecessor_ids.append(lane_dict["predecessors"]) +# successor_ids.append(lane_dict["successors"]) +# left_boundaries.append(lane_dict["left_lane_boundary"].linestring) +# right_boundaries.append(lane_dict["right_lane_boundary"].linestring) +# left_lane_ids.append(lane_dict["left_neighbor_id"]) +# right_lane_ids.append(lane_dict["right_neighbor_id"]) +# baseline_paths.append(lane_centerline.linestring) + +# geometry = geom.Polygon( +# np.vstack( +# [ +# lane_dict["left_lane_boundary"].array[:, :2], +# lane_dict["right_lane_boundary"].array[:, :2][::-1], +# ] +# ) +# ) +# geometries.append(geometry) + +# data = pd.DataFrame( +# { +# "id": ids, +# "lane_type": lane_types, +# "lane_group_id": lane_group_ids, +# "speed_limit_mps": speed_limits_mps, +# "predecessor_ids": predecessor_ids, +# "successor_ids": successor_ids, +# "left_boundary": left_boundaries, +# "right_boundary": right_boundaries, +# "left_lane_id": left_lane_ids, +# "right_lane_id": right_lane_ids, +# "baseline_path": baseline_paths, +# } +# ) + +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_lane_group_df(lane_group_dict: Dict[int, Any]) -> gpd.GeoDataFrame: + +# ids = list(lane_group_dict.keys()) +# lane_ids = [] +# intersection_ids = [] +# predecessor_lane_group_ids = [] +# successor_lane_group_ids = [] +# left_boundaries = [] +# right_boundaries = [] +# geometries = [] + +# for lane_group_id, lane_group_values in lane_group_dict.items(): + +# lane_ids.append(lane_group_values["lane_ids"]) +# intersection_ids.append(lane_group_values["intersection_id"]) + +# predecessor_lane_group_ids.append(lane_group_values["predecessor_ids"]) +# successor_lane_group_ids.append(lane_group_values["successor_ids"]) +# left_boundaries.append(lane_group_values["left_boundary"].linestring) +# right_boundaries.append(lane_group_values["right_boundary"].linestring) +# geometry = geom.Polygon( +# np.vstack( +# [ +# lane_group_values["left_boundary"].array[:, :2], +# lane_group_values["right_boundary"].array[:, :2][::-1], +# lane_group_values["left_boundary"].array[0, :2][None, ...], +# ] +# ) +# ) +# geometries.append(geometry) + +# data = pd.DataFrame( +# { +# "id": ids, +# "lane_ids": lane_ids, +# "intersection_id": intersection_ids, +# "predecessor_ids": predecessor_lane_group_ids, +# "successor_ids": successor_lane_group_ids, +# "left_boundary": left_boundaries, +# "right_boundary": right_boundaries, +# } +# ) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_intersections_df(intersection_dict: Dict[int, Any]) -> gpd.GeoDataFrame: +# ids = [] +# lane_group_ids = [] +# outlines = [] +# geometries = [] + +# for intersection_id, intersection_values in intersection_dict.items(): +# ids.append(intersection_id) +# lane_group_ids.append(intersection_values["lane_group_ids"]) +# outlines.append(intersection_values["outline_3d"].linestring) +# geometries.append(geom.Polygon(intersection_values["outline_3d"].array[:, Point3DIndex.XY])) + +# data = pd.DataFrame({"id": ids, "lane_group_ids": lane_group_ids, "outline": outlines}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_carpark_df(carparks) -> gpd.GeoDataFrame: +# ids = list(carparks.keys()) +# outlines = [geom.LineString(outline) for outline in carparks.values()] +# geometries = [geom.Polygon(outline[..., Point3DIndex.XY]) for outline in carparks.values()] + +# data = pd.DataFrame({"id": ids, "outline": outlines}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_walkway_df() -> gpd.GeoDataFrame: +# ids = [] +# geometries = [] + +# # NOTE: WOPD does not provide walkways, so we create an empty DataFrame. +# data = pd.DataFrame({"id": ids}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_crosswalk_df(crosswalks: Dict[int, npt.NDArray[np.float64]]) -> gpd.GeoDataFrame: +# ids = list(crosswalks.keys()) +# outlines = [] +# geometries = [] +# for crosswalk_dict in crosswalks.values(): +# outline = crosswalk_dict["outline"] +# outlines.append(outline.linestring) +# geometries.append(geom.Polygon(outline.array[:, Point3DIndex.XY])) + +# data = pd.DataFrame({"id": ids, "outline": outlines}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_generic_drivable_df(drivable_areas: Dict[int, Polyline3D]) -> gpd.GeoDataFrame: +# ids = list(drivable_areas.keys()) +# outlines = [drivable_area.linestring for drivable_area in drivable_areas.values()] +# geometries = [geom.Polygon(drivable_area.array[:, Point3DIndex.XY]) for drivable_area in drivable_areas.values()] + +# data = pd.DataFrame({"id": ids, "outline": outlines}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_road_edge_df(generic_drivable_area_df: gpd.GeoDataFrame) -> gpd.GeoDataFrame: +# road_edges = get_road_edges_3d_from_generic_drivable_area_df(generic_drivable_area_df) +# road_edges = split_line_geometry_by_max_length(road_edges, MAX_ROAD_EDGE_LENGTH) + +# ids = np.arange(len(road_edges), dtype=np.int64).tolist() +# # TODO @DanielDauner: Figure out if other types should/could be assigned here. +# road_edge_types = [int(RoadEdgeType.ROAD_EDGE_BOUNDARY)] * len(road_edges) +# geometries = road_edges +# return gpd.GeoDataFrame(pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}), geometry=geometries) + + +# def get_road_line_df(lanes: Dict[int, Any]) -> gpd.GeoDataFrame: + +# # TODO @DanielDauner: Allow lanes to reference road line dataframe. + +# ids = [] +# road_lines_type = [] +# geometries = [] + +# running_id = 0 +# for lane in lanes.values(): +# for side in ["left", "right"]: +# # NOTE: We currently ignore lane markings that are NONE in the AV2 dataset. +# # TODO: Review if the road line system should be changed in the future. +# if lane[f"{side}_lane_mark_type"] == "NONE": +# continue + +# ids.append(running_id) +# road_lines_type.append(AV2_ROAD_LINE_TYPE_MAPPING[lane[f"{side}_lane_mark_type"]]) +# geometries.append(lane[f"{side}_lane_boundary"].linestring) +# running_id += 1 + +# data = pd.DataFrame({"id": ids, "road_line_type": road_lines_type}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def _extract_lane_group_dict(lanes: Dict[int, Any]) -> gpd.GeoDataFrame: + +# lane_group_sets = _extract_lane_group(lanes) +# lane_group_set_dict = {i: lane_group for i, lane_group in enumerate(lane_group_sets)} + +# lane_group_dict: Dict[int, Dict[str, Any]] = {} + +# def _get_lane_group_ids_of_lanes_ids(lane_ids: List[str]) -> List[int]: +# """Helper to find lane group ids that contain any of the given lane ids.""" +# lane_group_ids_ = [] +# for lane_group_id_, lane_group_set_ in lane_group_set_dict.items(): +# if any(str(lane_id) in lane_group_set_ for lane_id in lane_ids): +# lane_group_ids_.append(lane_group_id_) +# return list(set(lane_group_ids_)) + +# for lane_group_id, lane_group_set in lane_group_set_dict.items(): + +# lane_group_dict[lane_group_id] = {} +# lane_group_dict[lane_group_id]["id"] = lane_group_id +# lane_group_dict[lane_group_id]["lane_ids"] = [int(lane_id) for lane_id in lane_group_set] + +# successor_lanes = [] +# predecessor_lanes = [] +# for lane_id in lane_group_set: +# lane_dict = lanes[str(lane_id)] +# successor_lanes.extend(lane_dict["successors"]) +# predecessor_lanes.extend(lane_dict["predecessors"]) + +# left_boundary = lanes[lane_group_set[0]]["left_lane_boundary"] +# right_boundary = lanes[lane_group_set[-1]]["right_lane_boundary"] + +# lane_group_dict[lane_group_id]["intersection_id"] = None +# lane_group_dict[lane_group_id]["predecessor_ids"] = _get_lane_group_ids_of_lanes_ids(predecessor_lanes) +# lane_group_dict[lane_group_id]["successor_ids"] = _get_lane_group_ids_of_lanes_ids(successor_lanes) +# lane_group_dict[lane_group_id]["left_boundary"] = left_boundary +# lane_group_dict[lane_group_id]["right_boundary"] = right_boundary +# outline_array = np.vstack( +# [ +# left_boundary.array[:, :3], +# right_boundary.array[:, :3][::-1], +# left_boundary.array[0, :3][None, ...], +# ] +# ) + +# lane_group_dict[lane_group_id]["outline"] = Polyline3D.from_array(outline_array) + +# return lane_group_dict + + +# def _extract_lane_group(lanes) -> List[List[str]]: + +# visited = set() +# lane_groups = [] + +# def _get_valid_neighbor_id(lane_data, direction): +# """Helper function to safely get neighbor ID""" +# neighbor_key = f"{direction}_neighbor_id" +# neighbor_id = str(lane_data.get(neighbor_key)) +# mark_type = lane_data.get(f"{direction}_lane_mark_type", None) + +# if (neighbor_id is not None) and (neighbor_id in lanes) and (mark_type in LANE_GROUP_MARK_TYPES): +# return neighbor_id +# return None + +# def _traverse_group(start_lane_id): +# """ +# Traverse left and right from a starting lane to find all connected parallel lanes +# """ +# group = [start_lane_id] +# queue = [start_lane_id] + +# while queue: +# current_id = queue.pop(0) +# if current_id in visited: +# continue + +# visited.add(current_id) + +# # Check left neighbor +# left_neighbor = _get_valid_neighbor_id(lanes[current_id], "left") +# if left_neighbor is not None and left_neighbor not in visited: +# queue.append(left_neighbor) +# group = [left_neighbor] + group + +# # Check right neighbor +# right_neighbor = _get_valid_neighbor_id(lanes[current_id], "right") +# if right_neighbor is not None and right_neighbor not in visited: +# queue.append(right_neighbor) +# group = group + [right_neighbor] + +# return group + +# # Find all lane groups +# for lane_id in lanes: +# if lane_id not in visited: +# group = _traverse_group(lane_id) +# lane_groups.append(group) + +# return lane_groups + + +# def _extract_intersection_dict( +# lanes: Dict[int, Any], lane_group_dict: Dict[int, Any], max_distance: float = 0.01 +# ) -> Dict[str, Any]: + +# def _interpolate_z_on_segment(point: shapely.Point, segment_coords: npt.NDArray[np.float64]) -> float: +# """Interpolate Z coordinate along a 3D line segment.""" +# p1, p2 = segment_coords[0], segment_coords[1] + +# # Project point onto segment +# segment_vec = p2[:2] - p1[:2] +# point_vec = np.array([point.x, point.y]) - p1[:2] + +# # Handle degenerate case +# segment_length_sq = np.dot(segment_vec, segment_vec) +# if segment_length_sq == 0: +# return p1[2] + +# # Calculate projection parameter +# t = np.dot(point_vec, segment_vec) / segment_length_sq +# t = np.clip(t, 0, 1) # Clamp to segment bounds + +# # Interpolate Z +# return p1[2] + t * (p2[2] - p1[2]) + +# # 1. Collect all lane groups where at least one lane is marked as an intersection. +# lane_group_intersection_dict = {} +# for lane_group_id, lane_group in lane_group_dict.items(): +# is_intersection_lanes = [lanes[str(lane_id)]["is_intersection"] for lane_id in lane_group["lane_ids"]] +# if any(is_intersection_lanes): +# lane_group_intersection_dict[lane_group_id] = lane_group + +# # 2. Merge polygons of lane groups that are marked as intersections. +# lane_group_intersection_geometry = { +# lane_group_id: shapely.Polygon(lane_group["outline"].array[:, Point3DIndex.XY]) +# for lane_group_id, lane_group in lane_group_intersection_dict.items() +# } +# intersection_polygons = gpd.GeoSeries(lane_group_intersection_geometry).union_all() + +# # 3. Collect all intersection polygons and their lane group IDs. +# intersection_dict = {} +# for intersection_idx, intersection_polygon in enumerate(intersection_polygons.geoms): +# if intersection_polygon.is_empty: +# continue +# lane_group_ids = [ +# lane_group_id +# for lane_group_id, lane_group_polygon in lane_group_intersection_geometry.items() +# if intersection_polygon.intersects(lane_group_polygon) +# ] +# for lane_group_id in lane_group_ids: +# lane_group_dict[lane_group_id]["intersection_id"] = intersection_idx + +# intersection_dict[intersection_idx] = { +# "id": intersection_idx, +# "outline_2d": Polyline2D.from_array(np.array(list(intersection_polygon.exterior.coords), dtype=np.float64)), +# "lane_group_ids": lane_group_ids, +# } + +# # 4. Lift intersection outlines to 3D. +# boundary_segments = [] +# for lane_group in lane_group_intersection_dict.values(): +# coords = np.array(lane_group["outline"].linestring.coords, dtype=np.float64).reshape(-1, 1, 3) +# segment_coords_boundary = np.concatenate([coords[:-1], coords[1:]], axis=1) +# boundary_segments.append(segment_coords_boundary) + +# boundary_segments = np.concatenate(boundary_segments, axis=0) +# boundary_segment_linestrings = shapely.creation.linestrings(boundary_segments) +# occupancy_map = OccupancyMap2D(boundary_segment_linestrings) + +# for intersection_id, intersection_data in intersection_dict.items(): +# points_2d = intersection_data["outline_2d"].array +# points_3d = np.zeros((len(points_2d), 3), dtype=np.float64) +# points_3d[:, :2] = points_2d + +# query_points = shapely.creation.points(points_2d) +# results = occupancy_map.query_nearest(query_points, max_distance=max_distance, exclusive=True) +# for query_idx, geometry_idx in zip(*results): +# query_point = query_points[query_idx] +# segment_coords = boundary_segments[geometry_idx] +# best_z = _interpolate_z_on_segment(query_point, segment_coords) +# points_3d[query_idx, 2] = best_z + +# intersection_dict[intersection_id]["outline_3d"] = Polyline3D.from_array(points_3d) + +# return intersection_dict diff --git a/d123/conversion/datasets/av2/av2_map_conversion.py b/d123/conversion/datasets/av2/av2_map_conversion.py index 9aae5385..ab3bdd66 100644 --- a/d123/conversion/datasets/av2/av2_map_conversion.py +++ b/d123/conversion/datasets/av2/av2_map_conversion.py @@ -1,21 +1,30 @@ import json -import warnings from pathlib import Path from typing import Any, Dict, Final, List import geopandas as gpd import numpy as np import numpy.typing as npt -import pandas as pd import shapely import shapely.geometry as geom from d123.conversion.datasets.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING -from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length -from d123.conversion.utils.map_utils.road_edge.road_edge_3d_utils import ( - get_road_edges_3d_from_generic_drivable_area_df, +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( + get_road_edge_linear_rings, + split_line_geometry_by_max_length, ) -from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType +from d123.conversion.utils.map_utils.road_edge.road_edge_3d_utils import lift_road_edges_to_3d +from d123.datatypes.maps.cache.cache_map_objects import ( + CacheCrosswalk, + CacheGenericDrivable, + CacheIntersection, + CacheLane, + CacheLaneGroup, + CacheRoadEdge, + CacheRoadLine, +) +from d123.datatypes.maps.map_datatypes import RoadEdgeType from d123.geometry import OccupancyMap2D, Point3DIndex, Polyline2D, Polyline3D LANE_GROUP_MARK_TYPES: List[str] = [ @@ -28,7 +37,7 @@ MAX_ROAD_EDGE_LENGTH: Final[float] = 100.0 # TODO: Add to config -def convert_av2_map(source_log_path: Path, map_file_path: Path) -> None: +def convert_av2_map(source_log_path: Path, map_writer: AbstractMapWriter) -> None: def _extract_polyline(data: List[Dict[str, float]], close: bool = False) -> Polyline3D: polyline = np.array([[p["x"], p["y"], p["z"]] for p in data], dtype=np.float64) @@ -44,7 +53,6 @@ def _extract_polyline(data: List[Dict[str, float]], close: bool = False) -> Poly log_map_archive = json.load(f) drivable_areas: Dict[int, Polyline3D] = {} - for drivable_area_id, drivable_area_dict in log_map_archive["drivable_areas"].items(): # keys: ["area_boundary", "id"] drivable_areas[int(drivable_area_id)] = _extract_polyline(drivable_area_dict["area_boundary"], close=True) @@ -77,52 +85,16 @@ def _extract_polyline(data: List[Dict[str, float]], close: bool = False) -> Poly lane_group_dict = _extract_lane_group_dict(log_map_archive["lane_segments"]) intersection_dict = _extract_intersection_dict(log_map_archive["lane_segments"], lane_group_dict) - dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} - - dataframes[MapLayer.LANE] = get_lane_df(log_map_archive["lane_segments"]) - dataframes[MapLayer.LANE_GROUP] = get_lane_group_df(lane_group_dict) - dataframes[MapLayer.INTERSECTION] = get_intersections_df(intersection_dict) - dataframes[MapLayer.CROSSWALK] = get_crosswalk_df(log_map_archive["pedestrian_crossings"]) - dataframes[MapLayer.GENERIC_DRIVABLE] = get_generic_drivable_df(drivable_areas) - dataframes[MapLayer.ROAD_EDGE] = get_road_edge_df(dataframes[MapLayer.GENERIC_DRIVABLE]) - dataframes[MapLayer.ROAD_LINE] = get_road_line_df(log_map_archive["lane_segments"]) - # NOTE: AV2 does not provide walkways or carparks, so we create an empty DataFrame. - dataframes[MapLayer.WALKWAY] = get_empty_gdf() - dataframes[MapLayer.CARPARK] = get_empty_gdf() - - map_file_path.unlink(missing_ok=True) - if not map_file_path.parent.exists(): - map_file_path.parent.mkdir(parents=True, exist_ok=True) - - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", message="'crs' was not provided") - for layer, gdf in dataframes.items(): - gdf.to_file(map_file_path, layer=layer.serialize(), driver="GPKG", mode="a") - - -def get_empty_gdf() -> gpd.GeoDataFrame: - ids = [] - outlines = [] - geometries = [] - data = pd.DataFrame({"id": ids, "outline": outlines}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def get_lane_df(lanes: Dict[int, Any]) -> gpd.GeoDataFrame: - - ids = [int(lane_id) for lane_id in lanes.keys()] - lane_types = [0] * len(ids) # TODO: Add lane types - lane_group_ids = [] - speed_limits_mps = [] - predecessor_ids = [] - successor_ids = [] - left_boundaries = [] - right_boundaries = [] - left_lane_ids = [] - right_lane_ids = [] - baseline_paths = [] - geometries = [] + _write_av2_lanes(log_map_archive["lane_segments"], map_writer) + _write_av2_lane_group(lane_group_dict, map_writer) + _write_av2_intersections(intersection_dict, map_writer) + _write_av2_crosswalks(log_map_archive["pedestrian_crossings"], map_writer) + _write_av2_generic_drivable(drivable_areas, map_writer) + _write_av2_road_edge(drivable_areas, map_writer) + _write_av2_road_lines(log_map_archive["lane_segments"], map_writer) + + +def _write_av2_lanes(lanes: Dict[int, Any], map_writer: AbstractMapWriter) -> None: def _get_centerline_from_boundaries( left_boundary: Polyline3D, right_boundary: Polyline3D, resolution: float = 0.1 @@ -136,206 +108,120 @@ def _get_centerline_from_boundaries( return Polyline3D.from_array(np.mean([right_array, left_array], axis=0)) for lane_id, lane_dict in lanes.items(): - # keys = [ - # "id", - # "is_intersection", - # "lane_type", - # "left_lane_boundary", - # "left_lane_mark_type", - # "right_lane_boundary", - # "right_lane_mark_type", - # "successors", - # "predecessors", - # "right_neighbor_id", - # "left_neighbor_id", - # ] lane_centerline = _get_centerline_from_boundaries( left_boundary=lane_dict["left_lane_boundary"], right_boundary=lane_dict["right_lane_boundary"], ) - lane_speed_limit_mps = None # TODO: Consider using geo reference to retrieve speed limits. - lane_group_ids.append(lane_id) - speed_limits_mps.append(lane_speed_limit_mps) - predecessor_ids.append(lane_dict["predecessors"]) - successor_ids.append(lane_dict["successors"]) - left_boundaries.append(lane_dict["left_lane_boundary"].linestring) - right_boundaries.append(lane_dict["right_lane_boundary"].linestring) - left_lane_ids.append(lane_dict["left_neighbor_id"]) - right_lane_ids.append(lane_dict["right_neighbor_id"]) - baseline_paths.append(lane_centerline.linestring) - - geometry = geom.Polygon( - np.vstack( - [ - lane_dict["left_lane_boundary"].array[:, :2], - lane_dict["right_lane_boundary"].array[:, :2][::-1], - ] + + map_writer.write_lane( + CacheLane( + object_id=lane_id, + lane_group_id=lane_dict["lane_group_id"], + left_boundary=lane_dict["left_lane_boundary"], + right_boundary=lane_dict["right_lane_boundary"], + centerline=lane_centerline, + left_lane_id=lane_dict["left_neighbor_id"], + right_lane_id=lane_dict["right_neighbor_id"], + predecessor_ids=lane_dict["predecessors"], + successor_ids=lane_dict["successors"], + speed_limit_mps=None, + outline=None, # Inferred from boundaries + geometry=None, ) ) - geometries.append(geometry) - - data = pd.DataFrame( - { - "id": ids, - "lane_type": lane_types, - "lane_group_id": lane_group_ids, - "speed_limit_mps": speed_limits_mps, - "predecessor_ids": predecessor_ids, - "successor_ids": successor_ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - "left_lane_id": left_lane_ids, - "right_lane_id": right_lane_ids, - "baseline_path": baseline_paths, - } - ) - - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf -def get_lane_group_df(lane_group_dict: Dict[int, Any]) -> gpd.GeoDataFrame: - - ids = list(lane_group_dict.keys()) - lane_ids = [] - intersection_ids = [] - predecessor_lane_group_ids = [] - successor_lane_group_ids = [] - left_boundaries = [] - right_boundaries = [] - geometries = [] +def _write_av2_lane_group(lane_group_dict: Dict[int, Any], map_writer: AbstractMapWriter) -> None: for lane_group_id, lane_group_values in lane_group_dict.items(): - lane_ids.append(lane_group_values["lane_ids"]) - intersection_ids.append(lane_group_values["intersection_id"]) - - predecessor_lane_group_ids.append(lane_group_values["predecessor_ids"]) - successor_lane_group_ids.append(lane_group_values["successor_ids"]) - left_boundaries.append(lane_group_values["left_boundary"].linestring) - right_boundaries.append(lane_group_values["right_boundary"].linestring) - geometry = geom.Polygon( - np.vstack( - [ - lane_group_values["left_boundary"].array[:, :2], - lane_group_values["right_boundary"].array[:, :2][::-1], - lane_group_values["left_boundary"].array[0, :2][None, ...], - ] + map_writer.write_lane_group( + CacheLaneGroup( + object_id=lane_group_id, + lane_ids=lane_group_values["lane_ids"], + left_boundary=lane_group_values["left_boundary"], + right_boundary=lane_group_values["right_boundary"], + intersection_id=lane_group_values["intersection_id"], + predecessor_ids=lane_group_values["predecessor_ids"], + successor_ids=lane_group_values["successor_ids"], + outline=None, + geometry=None, ) ) - geometries.append(geometry) - - data = pd.DataFrame( - { - "id": ids, - "lane_ids": lane_ids, - "intersection_id": intersection_ids, - "predecessor_ids": predecessor_lane_group_ids, - "successor_ids": successor_lane_group_ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - } - ) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - -def get_intersections_df(intersection_dict: Dict[int, Any]) -> gpd.GeoDataFrame: - ids = [] - lane_group_ids = [] - outlines = [] - geometries = [] +def _write_av2_intersections(intersection_dict: Dict[int, Any], map_writer: AbstractMapWriter) -> None: for intersection_id, intersection_values in intersection_dict.items(): - ids.append(intersection_id) - lane_group_ids.append(intersection_values["lane_group_ids"]) - outlines.append(intersection_values["outline_3d"].linestring) - geometries.append(geom.Polygon(intersection_values["outline_3d"].array[:, Point3DIndex.XY])) - - data = pd.DataFrame({"id": ids, "lane_group_ids": lane_group_ids, "outline": outlines}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def get_carpark_df(carparks) -> gpd.GeoDataFrame: - ids = list(carparks.keys()) - outlines = [geom.LineString(outline) for outline in carparks.values()] - geometries = [geom.Polygon(outline[..., Point3DIndex.XY]) for outline in carparks.values()] - - data = pd.DataFrame({"id": ids, "outline": outlines}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def get_walkway_df() -> gpd.GeoDataFrame: - ids = [] - geometries = [] - - # NOTE: WOPD does not provide walkways, so we create an empty DataFrame. - data = pd.DataFrame({"id": ids}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - + map_writer.write_intersection( + CacheIntersection( + object_id=intersection_id, + lane_group_ids=intersection_values["lane_group_ids"], + outline=intersection_values["outline_3d"], + ) + ) -def get_crosswalk_df(crosswalks: Dict[int, npt.NDArray[np.float64]]) -> gpd.GeoDataFrame: - ids = list(crosswalks.keys()) - outlines = [] - geometries = [] - for crosswalk_dict in crosswalks.values(): - outline = crosswalk_dict["outline"] - outlines.append(outline.linestring) - geometries.append(geom.Polygon(outline.array[:, Point3DIndex.XY])) - data = pd.DataFrame({"id": ids, "outline": outlines}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf +def _write_av2_crosswalks(crosswalks: Dict[int, npt.NDArray[np.float64]], map_writer: AbstractMapWriter) -> None: + for cross_walk_id, crosswalk_dict in crosswalks.items(): + map_writer.write_crosswalk( + CacheCrosswalk( + object_id=cross_walk_id, + outline=crosswalk_dict["outline"], + ) + ) -def get_generic_drivable_df(drivable_areas: Dict[int, Polyline3D]) -> gpd.GeoDataFrame: - ids = list(drivable_areas.keys()) - outlines = [drivable_area.linestring for drivable_area in drivable_areas.values()] - geometries = [geom.Polygon(drivable_area.array[:, Point3DIndex.XY]) for drivable_area in drivable_areas.values()] +def _write_av2_generic_drivable(drivable_areas: Dict[int, Polyline3D], map_writer: AbstractMapWriter) -> None: + for drivable_area_id, drivable_area_outline in drivable_areas.items(): + map_writer.write_generic_drivable( + CacheGenericDrivable( + object_id=drivable_area_id, + outline=drivable_area_outline, + ) + ) - data = pd.DataFrame({"id": ids, "outline": outlines}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf +def _write_av2_road_edge(drivable_areas: Dict[int, Polyline3D], map_writer: AbstractMapWriter) -> None: -def get_road_edge_df(generic_drivable_area_df: gpd.GeoDataFrame) -> gpd.GeoDataFrame: - road_edges = get_road_edges_3d_from_generic_drivable_area_df(generic_drivable_area_df) - road_edges = split_line_geometry_by_max_length(road_edges, MAX_ROAD_EDGE_LENGTH) + # NOTE @DanielDauner: We merge all drivable areas in 2D and lift the outlines to 3D. + # Currently the method assumes that the drivable areas do not overlap and all road surfaces are included. - ids = np.arange(len(road_edges), dtype=np.int64).tolist() - # TODO @DanielDauner: Figure out if other types should/could be assigned here. - road_edge_types = [int(RoadEdgeType.ROAD_EDGE_BOUNDARY)] * len(road_edges) - geometries = road_edges - return gpd.GeoDataFrame(pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}), geometry=geometries) + drivable_polygons = [geom.Polygon(drivable_area.array[:, :2]) for drivable_area in drivable_areas.values()] + road_edges_2d = get_road_edge_linear_rings(drivable_polygons) + outlines_linestrings = [drivable_area.linestring for drivable_area in drivable_areas.values()] + non_conflicting_road_edges = lift_road_edges_to_3d(road_edges_2d, outlines_linestrings) + road_edges = split_line_geometry_by_max_length(non_conflicting_road_edges, MAX_ROAD_EDGE_LENGTH) + for idx, road_edge in enumerate(road_edges): -def get_road_line_df(lanes: Dict[int, Any]) -> gpd.GeoDataFrame: + # TODO @DanielDauner: Figure out if other road edge types should/could be assigned here. + map_writer.write_road_edge( + CacheRoadEdge( + object_id=idx, + road_edge_type=RoadEdgeType.ROAD_EDGE_BOUNDARY, + polyline=Polyline3D.from_linestring(road_edge), + ) + ) - # TODO @DanielDauner: Allow lanes to reference road line dataframe. - ids = [] - road_lines_type = [] - geometries = [] +def _write_av2_road_lines(lanes: Dict[int, Any], map_writer: AbstractMapWriter) -> None: - running_id = 0 + running_road_line_id = 0 for lane in lanes.values(): for side in ["left", "right"]: - # NOTE: We currently ignore lane markings that are NONE in the AV2 dataset. - # TODO: Review if the road line system should be changed in the future. + # NOTE @DanielDauner: We currently ignore lane markings that are NONE in the AV2 dataset. if lane[f"{side}_lane_mark_type"] == "NONE": continue - ids.append(running_id) - road_lines_type.append(AV2_ROAD_LINE_TYPE_MAPPING[lane[f"{side}_lane_mark_type"]]) - geometries.append(lane[f"{side}_lane_boundary"].linestring) - running_id += 1 + map_writer.write_road_line( + CacheRoadLine( + object_id=running_road_line_id, + road_line_type=AV2_ROAD_LINE_TYPE_MAPPING[lane[f"{side}_lane_mark_type"]], + polyline=lane[f"{side}_lane_boundary"], + ) + ) - data = pd.DataFrame({"id": ids, "road_line_type": road_lines_type}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf + running_road_line_id += 1 def _extract_lane_group_dict(lanes: Dict[int, Any]) -> gpd.GeoDataFrame: @@ -363,6 +249,7 @@ def _get_lane_group_ids_of_lanes_ids(lane_ids: List[str]) -> List[int]: predecessor_lanes = [] for lane_id in lane_group_set: lane_dict = lanes[str(lane_id)] + lane_dict["lane_group_id"] = lane_group_id # Assign lane to lane group. successor_lanes.extend(lane_dict["successors"]) predecessor_lanes.extend(lane_dict["predecessors"]) diff --git a/d123/conversion/datasets/av2/av2_sensor_converter.py b/d123/conversion/datasets/av2/av2_sensor_converter.py index 8dab4fb8..46325efc 100644 --- a/d123/conversion/datasets/av2/av2_sensor_converter.py +++ b/d123/conversion/datasets/av2/av2_sensor_converter.py @@ -20,6 +20,7 @@ ) from d123.conversion.datasets.av2.av2_map_conversion import convert_av2_map from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata @@ -62,17 +63,16 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]: log_paths_and_split: List[Tuple[Path, str]] = [] for split in self._splits: - subsplit = split.split("_")[-1] - assert subsplit in ["train", "val", "test"] - - if "av2_sensor" in split: - log_folder = self._data_root / "sensor" / subsplit - elif "av2_lidar" in split: - log_folder = self._data_root / "lidar" / subsplit - elif "av2_motion" in split: - log_folder = self._data_root / "motion-forecasting" / subsplit - elif "av2-sensor-mini" in split: - log_folder = self._data_root / "sensor_mini" / subsplit + dataset_name = split.split("_")[0] + split_type = split.split("_")[-1] + assert split_type in ["train", "val", "test"] + + if "av2-sensor" == dataset_name: + log_folder = self._data_root / dataset_name / split_type + elif "av2-sensor-mini" == dataset_name: + log_folder = self._data_root / "sensor-mini" / split_type + else: + raise ValueError(f"Unknown dataset name {dataset_name} in split {split}.") log_paths_and_split.extend([(log_path, split) for log_path in log_folder.iterdir()]) @@ -86,17 +86,30 @@ def get_number_of_logs(self) -> int: """Inherited, see superclass.""" return len(self._log_paths_and_split) - def convert_map(self, map_index: int) -> None: + def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: """Inherited, see superclass.""" source_log_path, split = self._log_paths_and_split[map_index] - log_name = source_log_path.name - map_path = self.dataset_converter_config.output_path / "maps" / split / f"{log_name}.gpkg" - if self.dataset_converter_config.force_map_conversion or not map_path.exists(): - map_path.unlink(missing_ok=True) - if not map_path.parent.exists(): - map_path.parent.mkdir(parents=True, exist_ok=True) - convert_av2_map(source_log_path, map_path) + + # 1. Initialize Metadata, TODO: Use a MapMetadata class if needed in the future. + log_metadata = LogMetadata( + dataset="av2-sensor", + split=split, + log_name=source_log_path.name, + location=None, # TODO: Add location information. + timestep_seconds=None, + vehicle_parameters=None, + camera_metadata=None, + lidar_metadata=None, + map_has_z=True, + map_is_local=True, + ) + + map_needs_writing = map_writer.reset(self.dataset_converter_config, log_metadata) + if map_needs_writing: + convert_av2_map(source_log_path, map_writer) + + map_writer.close() def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: """Inherited, see superclass.""" diff --git a/d123/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py b/d123/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py index 2cf843a3..0d4aea2b 100644 --- a/d123/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py +++ b/d123/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py @@ -16,18 +16,6 @@ logger = logging.getLogger(__name__) -def get_road_edges_3d_from_generic_drivable_area_df(generic_drivable_area_df: gpd.GeoDataFrame) -> List[LineString]: - """ - Extracts 3D road edges from the generic drivable area GeoDataFrame. - """ - # NOTE: this is a simplified version that assumes the generic drivable area covers all areas. - # This is the case for argoverse 2. - road_edges_2d = get_road_edge_linear_rings(generic_drivable_area_df.geometry.tolist()) - outlines = generic_drivable_area_df.outline.tolist() - non_conflicting_road_edges = lift_road_edges_to_3d(road_edges_2d, outlines) - return non_conflicting_road_edges - - def get_road_edges_3d_from_gdf( lane_df: gpd.GeoDataFrame, carpark_df: gpd.GeoDataFrame, diff --git a/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml b/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml index 0f2d4d9f..06893f45 100644 --- a/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml +++ b/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml @@ -1,5 +1,5 @@ av2_sensor_dataset: - _target_: d123.conversion.datasets.av2.av2_sensor_converter.AV2SensorDataConverter + _target_: d123.conversion.datasets.av2.av2_sensor_converter.AV2SensorConverter _convert_: 'all' splits: ["av2-sensor-mini_train"] @@ -13,6 +13,9 @@ av2_sensor_dataset: force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} + # Map + include_map: true + # Ego include_ego: true diff --git a/d123/script/config/conversion/default_conversion.yaml b/d123/script/config/conversion/default_conversion.yaml index 71321d1e..21630d38 100644 --- a/d123/script/config/conversion/default_conversion.yaml +++ b/d123/script/config/conversion/default_conversion.yaml @@ -16,11 +16,11 @@ defaults: - log_writer: arrow_log_writer - map_writer: gpkg_map_writer - datasets: - - nuplan_mini_dataset + # - nuplan_mini_dataset # - nuplan_private_dataset # - carla_dataset # - wopd_dataset - # - av2_sensor_dataset + - av2_sensor_dataset - _self_ force_map_conversion: True diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index bd9cb6e7..61af12ea 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -42,8 +42,8 @@ "\n", "# splits = [\"wopd_train\"]\n", "# splits = [\"carla\"]\n", - "splits = [\"nuplan-mini_test\"]\n", - "# splits = [\"av2-sensor-mini_train\"]\n", + "# splits = [\"nuplan-mini_test\"]\n", + "splits = [\"av2-sensor-mini_train\"]\n", "# log_names = None\n", "\n", "\n", @@ -252,7 +252,7 @@ " return fig, ax\n", "\n", "\n", - "scene_index = 17\n", + "scene_index = 19\n", "iteration = 99\n", "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n", "plt.show()\n", diff --git a/test_viser.py b/test_viser.py index f8675f2e..88981b92 100644 --- a/test_viser.py +++ b/test_viser.py @@ -10,11 +10,11 @@ if __name__ == "__main__": - splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] + # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla"] # splits = ["wopd_val"] - # splits = ["av2-sensor-mini_train"] + splits = ["av2-sensor-mini_train"] log_names = None scene_uuids = None From 42d6d22a3c9abcd498b5e2a8800a8a19ad781eba Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 13 Oct 2025 22:52:03 +0200 Subject: [PATCH 077/145] Update Waymo map conversion to use map writer instead. Introduce map metadata. (#51) --- .../visualization/matplotlib/observation.py | 2 +- .../viser/elements/map_elements.py | 2 +- .../datasets/wopd/utils/wopd_constants.py | 29 + .../waymo_map_utils/womp_boundary_utils.py | 90 ++- .../waymo_map_utils/wopd_map_utils copy.py | 390 +++++++++++++ .../wopd/waymo_map_utils/wopd_map_utils.py | 526 ++++++------------ .../datasets/wopd/wopd_converter.py | 47 +- .../map_writer/abstract_map_writer.py | 4 +- d123/conversion/map_writer/gpkg_map_writer.py | 24 +- d123/datatypes/maps/abstract_map.py | 8 +- d123/datatypes/maps/gpkg/gpkg_map.py | 14 +- d123/datatypes/maps/map_datatypes.py | 5 + d123/datatypes/maps/map_metadata.py | 27 + d123/datatypes/scene/arrow/arrow_scene.py | 2 +- d123/datatypes/scene/scene_metadata.py | 24 +- .../conversion/datasets/wopd_dataset.yaml | 2 +- .../config/conversion/default_conversion.yaml | 4 +- notebooks/viz/bev_matplotlib.ipynb | 18 +- test_viser.py | 4 +- 19 files changed, 804 insertions(+), 418 deletions(-) create mode 100644 d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py create mode 100644 d123/datatypes/maps/map_metadata.py diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py index a2f47ebc..0a5d7301 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/d123/common/visualization/matplotlib/observation.py @@ -76,7 +76,7 @@ def add_default_map_on_ax( print(f"Error adding map object of type {layer.name} and id {map_object.object_id}") traceback.print_exc() - ax.set_title(f"Map: {map_api.map_name}") + # ax.set_title(f"Map: {map_api.map_name}") def add_box_detections_to_ax(ax: plt.Axes, box_detections: BoxDetectionWrapper) -> None: diff --git a/d123/common/visualization/viser/elements/map_elements.py b/d123/common/visualization/viser/elements/map_elements.py index d25fe73f..edafef5c 100644 --- a/d123/common/visualization/viser/elements/map_elements.py +++ b/d123/common/visualization/viser/elements/map_elements.py @@ -68,7 +68,7 @@ def _get_map_trimesh_dict( trimesh_mesh.vertices[..., Point3DIndex.Z] += viser_config.map_non_road_z_offset # If the map does not have z-values, we place the surfaces on the ground level of the ego vehicle. - if not scene.log_metadata.map_has_z: + if not scene.log_metadata.map_metadata.map_has_z: trimesh_mesh.vertices[..., Point3DIndex.Z] += ( scene_center.z - initial_ego_state.vehicle_parameters.height / 2 ) diff --git a/d123/conversion/datasets/wopd/utils/wopd_constants.py b/d123/conversion/datasets/wopd/utils/wopd_constants.py index bad841d9..61d0a150 100644 --- a/d123/conversion/datasets/wopd/utils/wopd_constants.py +++ b/d123/conversion/datasets/wopd/utils/wopd_constants.py @@ -1,6 +1,7 @@ from typing import Dict, List from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.maps.map_datatypes import LaneType, RoadEdgeType, RoadLineType from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from d123.datatypes.sensors.lidar.lidar import LiDARType @@ -37,3 +38,31 @@ 4: LiDARType.LIDAR_SIDE_RIGHT, # SIDE_RIGHT 5: LiDARType.LIDAR_BACK, # REAR } + +# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/protos/map.proto#L206 +WAYMO_ROAD_LINE_TYPE_CONVERSION: Dict[int, RoadLineType] = { + 0: RoadLineType.UNKNOWN, # UNKNOWN + 1: RoadLineType.DASHED_WHITE, # BROKEN_SINGLE_WHITE + 2: RoadLineType.SOLID_WHITE, # SOLID_SINGLE_WHITE + 3: RoadLineType.DOUBLE_SOLID_WHITE, # SOLID_DOUBLE_WHITE + 4: RoadLineType.DASHED_YELLOW, # BROKEN_SINGLE_YELLOW + 5: RoadLineType.DOUBLE_DASH_YELLOW, # BROKEN_DOUBLE_YELLOW + 6: RoadLineType.SOLID_YELLOW, # SOLID_SINGLE_YELLOW + 7: RoadLineType.DOUBLE_SOLID_YELLOW, # SOLID_DOUBLE_YELLOW + 8: RoadLineType.DOUBLE_DASH_YELLOW, # PASSING_DOUBLE_YELLOW +} + +# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/protos/map.proto#L186 +WAYMO_ROAD_EDGE_TYPE_CONVERSION: Dict[int, RoadEdgeType] = { + 0: RoadEdgeType.UNKNOWN, + 1: RoadEdgeType.ROAD_EDGE_BOUNDARY, + 2: RoadEdgeType.ROAD_EDGE_MEDIAN, +} + +# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/protos/map.proto#L147 +WAYMO_LANE_TYPE_CONVERSION: Dict[int, LaneType] = { + 0: LaneType.UNDEFINED, + 1: LaneType.FREEWAY, + 2: LaneType.SURFACE_STREET, + 3: LaneType.BIKE_LANE, +} diff --git a/d123/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py index 84fe34ed..a1a3e237 100644 --- a/d123/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py +++ b/d123/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py @@ -2,9 +2,10 @@ from typing import Dict, List, Optional, Tuple import numpy as np -import numpy.typing as npt import shapely.geometry as geom +from d123.datatypes.maps.abstract_map_objects import AbstractRoadEdge, AbstractRoadLine +from d123.datatypes.maps.map_datatypes import LaneType from d123.geometry import OccupancyMap2D, Point3D, Polyline3D, PolylineSE2, StateSE2, Vector2D from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame from d123.geometry.utils.rotation_utils import normalize_angle @@ -34,8 +35,33 @@ def get_polyline_from_token(polyline_dict: Dict[str, Dict[int, Polyline3D]], tok return polyline_dict[line_type][line_id] +@dataclass +class WaymoLaneData: + """Helper class to store lane data.""" + + # Regular lane features + # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/protos/map.proto#L142 + object_id: int + centerline: Polyline3D + predecessor_ids: List[int] + successor_ids: List[int] + speed_limit_mps: Optional[float] + lane_type: LaneType + + # Waymo allows multiple left/right neighbors + # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/protos/map.proto#L111 + left_neighbors: List[Dict[str, int]] + right_neighbors: List[Dict[str, int]] + + # To be filled + left_boundary: Optional[Polyline3D] = None + right_boundary: Optional[Polyline3D] = None + + @dataclass class PerpendicularHit: + """Helper class to store perpendicular hit data.""" + distance_along_perp_2d: float hit_point_3d: Point3D hit_polyline_token: str @@ -131,7 +157,7 @@ def _filter_perpendicular_hits( continue # 2. filter hits that are too close and not with the road edge (e.g. close lane lines) - if hit.distance_along_perp_2d < MIN_HIT_DISTANCE and hit.hit_polyline_type != "roadedge": + if hit.distance_along_perp_2d < MIN_HIT_DISTANCE and hit.hit_polyline_type != "road-edge": continue filtered_hits.append(hit) @@ -142,28 +168,31 @@ def _filter_perpendicular_hits( return filtered_hits -def extract_lane_boundaries( - lanes: Dict[int, npt.NDArray[np.float64]], - lanes_successors: Dict[int, List[int]], - lanes_predecessors: Dict[int, List[int]], - road_lines: Dict[int, npt.NDArray[np.float64]], - road_edges: Dict[int, npt.NDArray[np.float64]], +def fill_lane_boundaries( + lane_data_dict: Dict[int, WaymoLaneData], + road_lines: List[AbstractRoadLine], + road_edges: List[AbstractRoadEdge], ) -> Tuple[Dict[str, Polyline3D], Dict[str, Polyline3D]]: - polyline_dict: Dict[str, Dict[int, Polyline3D]] = {"lane": {}, "roadline": {}, "roadedge": {}} + """Welcome to insanity. + + :param lane_data: List of of WaymoLaneData helper class + :param road_lines: List of AbstractRoadLine objects + :param road_edges: List of AbstractRoadEdge objects + :return: Tuple of left and right lane boundaries as 3D polylines + """ + + polyline_dict: Dict[str, Dict[int, Polyline3D]] = {"lane": {}, "road-line": {}, "road-edge": {}} lane_polyline_se2_dict: Dict[int, PolylineSE2] = {} - for lane_id, lane_polyline in lanes.items(): - if lane_polyline.ndim == 2 and lane_polyline.shape[1] == 3 and len(lane_polyline) > 0: - polyline_dict["lane"][lane_id] = Polyline3D.from_array(lane_polyline) - lane_polyline_se2_dict[f"lane_{lane_id}"] = polyline_dict["lane"][lane_id].polyline_se2 + for lane_id, lane in lane_data_dict.items(): + polyline_dict["lane"][lane_id] = lane.centerline + lane_polyline_se2_dict[f"lane_{lane_id}"] = lane.centerline.polyline_se2 - # for road_line_id, road_line_polyline in road_lines.items(): - # if road_line_polyline.ndim == 2 and road_line_polyline.shape[1] == 3 and len(road_line_polyline) > 0: - # polyline_dict["roadline"][road_line_id] = Polyline3D.from_array(road_line_polyline) + # for road_line in road_lines: + # polyline_dict["road-line"][road_line.object_id] = road_line.polyline_3d - for road_edge_id, road_edge_polyline in road_edges.items(): - if road_edge_polyline.ndim == 2 and road_edge_polyline.shape[1] == 3 and len(road_edge_polyline) > 0: - polyline_dict["roadedge"][road_edge_id] = Polyline3D.from_array(road_edge_polyline) + for road_edge in road_edges: + polyline_dict["road-edge"][road_edge.object_id] = road_edge.polyline_3d geometries = [] tokens = [] @@ -218,23 +247,32 @@ def extract_lane_boundaries( first_hit = perpendicular_hits[0] # 1.1. If the first hit is a road edge, use it as the boundary point - if first_hit.hit_polyline_type == "roadedge": + if first_hit.hit_polyline_type == "road-edge": boundary_point_3d = first_hit.hit_point_3d - elif first_hit.hit_polyline_type == "roadline": + elif first_hit.hit_polyline_type == "road-line": boundary_point_3d = first_hit.hit_point_3d elif first_hit.hit_polyline_type == "lane": for hit in perpendicular_hits: - if hit.hit_polyline_type == "roadedge": + if hit.hit_polyline_type == "road-edge": continue if hit.hit_polyline_type == "lane": + lane_data_dict[lane_id].predecessor_ids + has_same_predecessor = ( - len(set(lanes_predecessors[hit.hit_polyline_id]) & set(lanes_predecessors[lane_id])) + len( + set(lane_data_dict[hit.hit_polyline_id].predecessor_ids) + & set(lane_data_dict[lane_id].predecessor_ids) + ) > 0 ) has_same_successor = ( - len(set(lanes_successors[hit.hit_polyline_id]) & set(lanes_successors[lane_id])) > 0 + len( + set(lane_data_dict[hit.hit_polyline_id].successor_ids) + & set(lane_data_dict[lane_id].successor_ids) + ) + > 0 ) heading_min = np.pi / 8.0 invalid_heading_error = heading_min < abs(hit.heading_error) < (np.pi - heading_min) @@ -284,11 +322,11 @@ def _get_default_boundary_point_3d( if len(final_boundary_points_3d) > 1: if sign == 1.0: - left_boundaries[lane_id] = Polyline3D.from_array( + lane_data_dict[lane_id].left_boundary = Polyline3D.from_array( np.array(final_boundary_points_3d, dtype=np.float64) ) else: - right_boundaries[lane_id] = Polyline3D.from_array( + lane_data_dict[lane_id].right_boundary = Polyline3D.from_array( np.array(final_boundary_points_3d, dtype=np.float64) ) diff --git a/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py b/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py new file mode 100644 index 00000000..24974b85 --- /dev/null +++ b/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py @@ -0,0 +1,390 @@ +# from collections import defaultdict +# from pathlib import Path +# from typing import Dict, List, Optional + +# import geopandas as gpd +# import numpy as np +# import numpy.typing as npt +# import pandas as pd +# import shapely.geometry as geom + +# from d123.common.utils.dependencies import check_dependencies +# from d123.conversion.datasets.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries +# from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +# from d123.geometry import Point3DIndex, Polyline3D +# from d123.geometry.utils.units import mph_to_mps + +# check_dependencies(modules=["waymo_open_dataset"], optional_name="waymo") +# from waymo_open_dataset import dataset_pb2 + +# # TODO: +# # - Implement stop signs +# # - Implement speed bumps +# # - Implement driveways with a different semantic type if needed +# # - Implement intersections and lane group logic + +# WAYMO_ROAD_LINE_CONVERSION = { +# 0: RoadLineType.UNKNOWN, # aka. UNKNOWN +# 1: RoadLineType.DASHED_WHITE, # aka. BROKEN_SINGLE_WHITE +# 2: RoadLineType.SOLID_WHITE, # aka. SOLID_SINGLE_WHITE +# 3: RoadLineType.DOUBLE_SOLID_WHITE, # aka. SOLID_DOUBLE_WHITE +# 4: RoadLineType.DASHED_YELLOW, # aka. BROKEN_SINGLE_YELLOW +# 5: RoadLineType.DOUBLE_DASH_YELLOW, # aka. BROKEN_DOUBLE_YELLOW +# 6: RoadLineType.SOLID_YELLOW, # aka. SOLID_SINGLE_YELLOW +# 7: RoadLineType.DOUBLE_SOLID_YELLOW, # aka. SOLID_DOUBLE_YELLOW +# 8: RoadLineType.DOUBLE_DASH_YELLOW, # aka. PASSING_DOUBLE_YELLOW +# } + +# WAYMO_ROAD_EDGE_CONVERSION = { +# 0: RoadEdgeType.UNKNOWN, +# 1: RoadEdgeType.ROAD_EDGE_BOUNDARY, +# 2: RoadEdgeType.ROAD_EDGE_MEDIAN, +# } + + +# def convert_wopd_map(frame: dataset_pb2.Frame, map_file_path: Path) -> None: + +# def _extract_polyline(data) -> npt.NDArray[np.float64]: +# polyline = np.array([[p.x, p.y, p.z] for p in data.polyline], dtype=np.float64) +# return polyline + +# def _extract_polygon(data) -> npt.NDArray[np.float64]: +# polygon = np.array([[p.x, p.y, p.z] for p in data.polygon], dtype=np.float64) +# assert polygon.shape[0] >= 3, "Polygon must have at least 3 points" +# assert polygon.shape[1] == 3, "Polygon must have 3 coordinates (x, y, z)" +# return polygon + +# def _extract_neighbors(data) -> List[Dict[str, int]]: +# neighbors = [] +# for neighbor in data: +# neighbors.append( +# { +# "lane_id": neighbor.feature_id, +# "self_start_index": neighbor.self_start_index, +# "self_end_index": neighbor.self_end_index, +# "neighbor_start_index": neighbor.neighbor_start_index, +# "neighbor_end_index": neighbor.neighbor_end_index, +# } +# ) +# return neighbors + +# lanes: Dict[int, npt.NDArray[np.float64]] = {} +# lanes_successors = defaultdict(list) +# lanes_predecessors = defaultdict(list) +# lanes_speed_limit_mps: Dict[int, float] = {} +# lanes_type: Dict[int, int] = {} +# lanes_left_neighbors: Dict[int, List[Dict[str, int]]] = {} +# lanes_right_neighbors: Dict[int, List[Dict[str, int]]] = {} + +# road_lines: Dict[int, npt.NDArray[np.float64]] = {} +# road_lines_type: Dict[int, RoadLineType] = {} + +# road_edges: Dict[int, npt.NDArray[np.float64]] = {} +# road_edges_type: Dict[int, int] = {} + +# crosswalks: Dict[int, npt.NDArray[np.float64]] = {} +# carparks: Dict[int, npt.NDArray[np.float64]] = {} + +# for map_feature in frame.map_features: +# if map_feature.HasField("lane"): +# polyline = _extract_polyline(map_feature.lane) +# # Ignore lanes with less than 2 points or not 2D +# if polyline.ndim != 2 or polyline.shape[0] < 2: +# continue +# lanes[map_feature.id] = polyline +# for lane_id_ in map_feature.lane.exit_lanes: +# lanes_successors[map_feature.id].append(lane_id_) +# for lane_id_ in map_feature.lane.exit_lanes: +# lanes_predecessors[map_feature.id].append(lane_id_) +# lanes_speed_limit_mps[map_feature.id] = mph_to_mps(map_feature.lane.speed_limit_mph) +# lanes_type[map_feature.id] = map_feature.lane.type +# lanes_left_neighbors[map_feature.id] = _extract_neighbors(map_feature.lane.left_neighbors) +# lanes_right_neighbors[map_feature.id] = _extract_neighbors(map_feature.lane.right_neighbors) +# elif map_feature.HasField("road_line"): +# polyline = _extract_polyline(map_feature.road_line) +# if polyline.ndim != 2 or polyline.shape[0] < 2: +# continue +# road_lines[map_feature.id] = polyline +# road_lines_type[map_feature.id] = WAYMO_ROAD_LINE_CONVERSION.get( +# map_feature.road_line.type, RoadLineType.UNKNOWN +# ) +# elif map_feature.HasField("road_edge"): +# polyline = _extract_polyline(map_feature.road_edge) +# if polyline.ndim != 2 or polyline.shape[0] < 2: +# continue +# road_edges[map_feature.id] = polyline +# road_edges_type[map_feature.id] = WAYMO_ROAD_EDGE_CONVERSION.get( +# map_feature.road_edge.type, RoadEdgeType.UNKNOWN +# ) +# elif map_feature.HasField("stop_sign"): +# # TODO: implement stop signs +# pass +# elif map_feature.HasField("crosswalk"): +# crosswalks[map_feature.id] = _extract_polygon(map_feature.crosswalk) +# elif map_feature.HasField("speed_bump"): +# # TODO: implement speed bumps +# pass +# elif map_feature.HasField("driveway"): +# # NOTE: Determine whether to use a different semantic type for driveways. +# carparks[map_feature.id] = _extract_polygon(map_feature.driveway) + +# lane_left_boundaries_3d, lane_right_boundaries_3d = extract_lane_boundaries( +# lanes, lanes_successors, lanes_predecessors, road_lines, road_edges +# ) + +# lane_df = get_lane_df( +# lanes, +# lanes_successors, +# lanes_predecessors, +# lanes_speed_limit_mps, +# lane_left_boundaries_3d, +# lane_right_boundaries_3d, +# lanes_type, +# lanes_left_neighbors, +# lanes_right_neighbors, +# ) +# lane_group_df = get_lane_group_df( +# lanes, +# lanes_successors, +# lanes_predecessors, +# lane_left_boundaries_3d, +# lane_right_boundaries_3d, +# ) +# intersection_df = get_intersections_df() +# crosswalk_df = get_crosswalk_df(crosswalks) +# walkway_df = get_walkway_df() +# carpark_df = get_carpark_df(carparks) +# generic_drivable_df = get_generic_drivable_df() +# road_edge_df = get_road_edge_df(road_edges, road_edges_type) +# road_line_df = get_road_line_df(road_lines, road_lines_type) + +# map_file_path.unlink(missing_ok=True) +# if not map_file_path.parent.exists(): +# map_file_path.parent.mkdir(parents=True, exist_ok=True) + +# lane_df.to_file(map_file_path, layer=MapLayer.LANE.serialize(), driver="GPKG") +# lane_group_df.to_file(map_file_path, layer=MapLayer.LANE_GROUP.serialize(), driver="GPKG", mode="a") +# intersection_df.to_file(map_file_path, layer=MapLayer.INTERSECTION.serialize(), driver="GPKG", mode="a") +# crosswalk_df.to_file(map_file_path, layer=MapLayer.CROSSWALK.serialize(), driver="GPKG", mode="a") +# walkway_df.to_file(map_file_path, layer=MapLayer.WALKWAY.serialize(), driver="GPKG", mode="a") +# carpark_df.to_file(map_file_path, layer=MapLayer.CARPARK.serialize(), driver="GPKG", mode="a") +# generic_drivable_df.to_file(map_file_path, layer=MapLayer.GENERIC_DRIVABLE.serialize(), driver="GPKG", mode="a") +# road_edge_df.to_file(map_file_path, layer=MapLayer.ROAD_EDGE.serialize(), driver="GPKG", mode="a") +# road_line_df.to_file(map_file_path, layer=MapLayer.ROAD_LINE.serialize(), driver="GPKG", mode="a") + + +# def get_lane_df( +# lanes: Dict[int, npt.NDArray[np.float64]], +# lanes_successors: Dict[int, List[int]], +# lanes_predecessors: Dict[int, List[int]], +# lanes_speed_limit_mps: Dict[int, float], +# lanes_left_boundaries_3d: Dict[int, Polyline3D], +# lanes_right_boundaries_3d: Dict[int, Polyline3D], +# lanes_type: Dict[int, int], +# lanes_left_neighbors: Dict[int, List[Dict[str, int]]], +# lanes_right_neighbors: Dict[int, List[Dict[str, int]]], +# ) -> gpd.GeoDataFrame: + +# ids = [] +# lane_types = [] +# lane_group_ids = [] +# speed_limits_mps = [] +# predecessor_ids = [] +# successor_ids = [] +# left_boundaries = [] +# right_boundaries = [] +# left_lane_ids = [] +# right_lane_ids = [] +# baseline_paths = [] +# geometries = [] + +# def _get_majority_neighbor(neighbors: List[Dict[str, int]]) -> Optional[int]: +# if len(neighbors) == 0: +# return None +# length = { +# neighbor["lane_id"]: neighbor["self_end_index"] - neighbor["self_start_index"] for neighbor in neighbors +# } +# return str(max(length, key=length.get)) + +# for lane_id, lane_centerline_array in lanes.items(): +# if lane_id not in lanes_left_boundaries_3d or lane_id not in lanes_right_boundaries_3d: +# continue +# lane_centerline = Polyline3D.from_array(lane_centerline_array) +# lane_speed_limit_mps = lanes_speed_limit_mps[lane_id] if lanes_speed_limit_mps[lane_id] > 0.0 else None + +# ids.append(lane_id) +# lane_types.append(lanes_type[lane_id]) +# lane_group_ids.append([lane_id]) +# speed_limits_mps.append(lane_speed_limit_mps) +# predecessor_ids.append(lanes_predecessors[lane_id]) +# successor_ids.append(lanes_successors[lane_id]) +# left_boundaries.append(lanes_left_boundaries_3d[lane_id].linestring) +# right_boundaries.append(lanes_right_boundaries_3d[lane_id].linestring) +# left_lane_ids.append(_get_majority_neighbor(lanes_left_neighbors[lane_id])) +# right_lane_ids.append(_get_majority_neighbor(lanes_right_neighbors[lane_id])) +# baseline_paths.append(lane_centerline.linestring) + +# geometry = geom.Polygon( +# np.vstack( +# [ +# lanes_left_boundaries_3d[lane_id].array[:, :2], +# lanes_right_boundaries_3d[lane_id].array[:, :2][::-1], +# ] +# ) +# ) +# geometries.append(geometry) + +# data = pd.DataFrame( +# { +# "id": ids, +# "lane_type": lane_types, +# "lane_group_id": lane_group_ids, +# "speed_limit_mps": speed_limits_mps, +# "predecessor_ids": predecessor_ids, +# "successor_ids": successor_ids, +# "left_boundary": left_boundaries, +# "right_boundary": right_boundaries, +# "left_lane_id": left_lane_ids, +# "right_lane_id": right_lane_ids, +# "baseline_path": baseline_paths, +# } +# ) + +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_lane_group_df( +# lanes: Dict[int, npt.NDArray[np.float64]], +# lanes_successors: Dict[int, List[int]], +# lanes_predecessors: Dict[int, List[int]], +# lanes_left_boundaries_3d: Dict[int, Polyline3D], +# lanes_right_boundaries_3d: Dict[int, Polyline3D], +# ) -> gpd.GeoDataFrame: + +# ids = [] +# lane_ids = [] +# intersection_ids = [] +# predecessor_lane_group_ids = [] +# successor_lane_group_ids = [] +# left_boundaries = [] +# right_boundaries = [] +# geometries = [] + +# # NOTE: WOPD does not provide lane groups, so we create a lane group for each lane. +# for lane_id in lanes.keys(): +# if lane_id not in lanes_left_boundaries_3d or lane_id not in lanes_right_boundaries_3d: +# continue +# ids.append(lane_id) +# lane_ids.append([lane_id]) +# intersection_ids.append(None) # WOPD does not provide intersections +# predecessor_lane_group_ids.append(lanes_predecessors[lane_id]) +# successor_lane_group_ids.append(lanes_successors[lane_id]) +# left_boundaries.append(lanes_left_boundaries_3d[lane_id].linestring) +# right_boundaries.append(lanes_right_boundaries_3d[lane_id].linestring) +# geometry = geom.Polygon( +# np.vstack( +# [ +# lanes_left_boundaries_3d[lane_id].array[:, :2], +# lanes_right_boundaries_3d[lane_id].array[:, :2][::-1], +# ] +# ) +# ) +# geometries.append(geometry) + +# data = pd.DataFrame( +# { +# "id": ids, +# "lane_ids": lane_ids, +# "intersection_id": intersection_ids, +# "predecessor_lane_group_ids": predecessor_lane_group_ids, +# "successor_lane_group_ids": successor_lane_group_ids, +# "left_boundary": left_boundaries, +# "right_boundary": right_boundaries, +# } +# ) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_intersections_df() -> gpd.GeoDataFrame: +# ids = [] +# lane_group_ids = [] +# geometries = [] + +# # NOTE: WOPD does not provide intersections, so we create an empty DataFrame. +# data = pd.DataFrame({"id": ids, "lane_group_ids": lane_group_ids}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_carpark_df(carparks) -> gpd.GeoDataFrame: +# ids = list(carparks.keys()) +# outlines = [geom.LineString(outline) for outline in carparks.values()] +# geometries = [geom.Polygon(outline[..., Point3DIndex.XY]) for outline in carparks.values()] + +# data = pd.DataFrame({"id": ids, "outline": outlines}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_walkway_df() -> gpd.GeoDataFrame: +# ids = [] +# geometries = [] + +# # NOTE: WOPD does not provide walkways, so we create an empty DataFrame. +# data = pd.DataFrame({"id": ids}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_crosswalk_df(crosswalks: Dict[int, npt.NDArray[np.float64]]) -> gpd.GeoDataFrame: +# ids = list(crosswalks.keys()) +# outlines = [geom.LineString(outline) for outline in crosswalks.values()] +# geometries = [geom.Polygon(outline[..., Point3DIndex.XY]) for outline in crosswalks.values()] + +# data = pd.DataFrame({"id": ids, "outline": outlines}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_generic_drivable_df() -> gpd.GeoDataFrame: +# ids = [] +# geometries = [] + +# # NOTE: WOPD does not provide generic drivable areas, so we create an empty DataFrame. +# data = pd.DataFrame({"id": ids}) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_road_edge_df( +# road_edges: Dict[int, npt.NDArray[np.float64]], road_edges_type: Dict[int, RoadEdgeType] +# ) -> gpd.GeoDataFrame: +# ids = list(road_edges.keys()) +# geometries = [Polyline3D.from_array(road_edge).linestring for road_edge in road_edges.values()] + +# data = pd.DataFrame( +# { +# "id": ids, +# "road_edge_type": [int(road_edge_type) for road_edge_type in road_edges_type.values()], +# } +# ) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf + + +# def get_road_line_df( +# road_lines: Dict[int, npt.NDArray[np.float64]], road_lines_type: Dict[int, RoadLineType] +# ) -> gpd.GeoDataFrame: +# ids = list(road_lines.keys()) +# geometries = [Polyline3D.from_array(road_edge).linestring for road_edge in road_lines.values()] + +# data = pd.DataFrame( +# { +# "id": ids, +# "road_line_type": [int(road_line_type) for road_line_type in road_lines_type.values()], +# } +# ) +# gdf = gpd.GeoDataFrame(data, geometry=geometries) +# return gdf diff --git a/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py b/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py index 6baf8cf7..4f8782af 100644 --- a/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py +++ b/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py @@ -1,17 +1,26 @@ -from collections import defaultdict -from pathlib import Path from typing import Dict, List, Optional -import geopandas as gpd import numpy as np -import numpy.typing as npt -import pandas as pd -import shapely.geometry as geom from d123.common.utils.dependencies import check_dependencies -from d123.conversion.datasets.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries -from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType -from d123.geometry import Point3DIndex, Polyline3D +from d123.conversion.datasets.wopd.utils.wopd_constants import ( + WAYMO_LANE_TYPE_CONVERSION, + WAYMO_ROAD_EDGE_TYPE_CONVERSION, + WAYMO_ROAD_LINE_TYPE_CONVERSION, +) +from d123.conversion.datasets.wopd.waymo_map_utils.womp_boundary_utils import WaymoLaneData, fill_lane_boundaries +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractRoadEdge, AbstractRoadLine +from d123.datatypes.maps.cache.cache_map_objects import ( + CacheCarpark, + CacheCrosswalk, + CacheLane, + CacheLaneGroup, + CacheRoadEdge, + CacheRoadLine, +) +from d123.datatypes.maps.map_datatypes import LaneType, RoadEdgeType, RoadLineType +from d123.geometry import Polyline3D from d123.geometry.utils.units import mph_to_mps check_dependencies(modules=["waymo_open_dataset"], optional_name="waymo") @@ -23,180 +32,101 @@ # - Implement driveways with a different semantic type if needed # - Implement intersections and lane group logic -WAYMO_ROAD_LINE_CONVERSION = { - 0: RoadLineType.UNKNOWN, # aka. UNKNOWN - 1: RoadLineType.DASHED_WHITE, # aka. BROKEN_SINGLE_WHITE - 2: RoadLineType.SOLID_WHITE, # aka. SOLID_SINGLE_WHITE - 3: RoadLineType.DOUBLE_SOLID_WHITE, # aka. SOLID_DOUBLE_WHITE - 4: RoadLineType.DASHED_YELLOW, # aka. BROKEN_SINGLE_YELLOW - 5: RoadLineType.DOUBLE_DASH_YELLOW, # aka. BROKEN_DOUBLE_YELLOW - 6: RoadLineType.SOLID_YELLOW, # aka. SOLID_SINGLE_YELLOW - 7: RoadLineType.DOUBLE_SOLID_YELLOW, # aka. SOLID_DOUBLE_YELLOW - 8: RoadLineType.DOUBLE_DASH_YELLOW, # aka. PASSING_DOUBLE_YELLOW -} - -WAYMO_ROAD_EDGE_CONVERSION = { - 0: RoadEdgeType.UNKNOWN, - 1: RoadEdgeType.ROAD_EDGE_BOUNDARY, - 2: RoadEdgeType.ROAD_EDGE_MEDIAN, -} - - -def convert_wopd_map(frame: dataset_pb2.Frame, map_file_path: Path) -> None: - - def _extract_polyline(data) -> npt.NDArray[np.float64]: - polyline = np.array([[p.x, p.y, p.z] for p in data.polyline], dtype=np.float64) - return polyline - - def _extract_polygon(data) -> npt.NDArray[np.float64]: - polygon = np.array([[p.x, p.y, p.z] for p in data.polygon], dtype=np.float64) - assert polygon.shape[0] >= 3, "Polygon must have at least 3 points" - assert polygon.shape[1] == 3, "Polygon must have 3 coordinates (x, y, z)" - return polygon - - def _extract_neighbors(data) -> List[Dict[str, int]]: - neighbors = [] - for neighbor in data: - neighbors.append( - { - "lane_id": neighbor.feature_id, - "self_start_index": neighbor.self_start_index, - "self_end_index": neighbor.self_end_index, - "neighbor_start_index": neighbor.neighbor_start_index, - "neighbor_end_index": neighbor.neighbor_end_index, - } - ) - return neighbors - lanes: Dict[int, npt.NDArray[np.float64]] = {} - lanes_successors = defaultdict(list) - lanes_predecessors = defaultdict(list) - lanes_speed_limit_mps: Dict[int, float] = {} - lanes_type: Dict[int, int] = {} - lanes_left_neighbors: Dict[int, List[Dict[str, int]]] = {} - lanes_right_neighbors: Dict[int, List[Dict[str, int]]] = {} +def convert_wopd_map(frame: dataset_pb2.Frame, map_writer: AbstractMapWriter) -> None: + + # We first extract all road lines, road edges, and lanes, and write them to the map writer. + # NOTE: road lines and edges are used needed to extract lane boundaries. + road_lines = _write_and_get_waymo_road_lines(frame, map_writer) + road_edges = _write_and_get_waymo_road_edges(frame, map_writer) + lanes = _write_and_get_waymo_lanes(frame, road_lines, road_edges, map_writer) + + # Write lane groups based on the extracted lanes + _write_waymo_lane_groups(lanes, map_writer) + + # Write miscellaneous surfaces (carparks, crosswalks, stop zones, etc.) directly from the Waymo frame proto + _write_waymo_misc_surfaces(frame, map_writer) + + +def _write_and_get_waymo_road_lines(frame: dataset_pb2.Frame, map_writer: AbstractMapWriter) -> List[AbstractRoadLine]: + """Helper function to extract road lines from a Waymo frame proto.""" - road_lines: Dict[int, npt.NDArray[np.float64]] = {} - road_lines_type: Dict[int, RoadLineType] = {} + road_lines: List[AbstractRoadLine] = [] + for map_feature in frame.map_features: + if map_feature.HasField("road_line"): + polyline = _extract_polyline_waymo_proto(map_feature.road_line) + if polyline is not None: + road_line_type = WAYMO_ROAD_LINE_TYPE_CONVERSION.get(map_feature.road_line.type, RoadLineType.UNKNOWN) + road_lines.append( + CacheRoadLine( + object_id=map_feature.id, + road_line_type=road_line_type, + polyline=polyline, + ) + ) + + for road_line in road_lines: + map_writer.write_road_line(road_line) + + return road_lines - road_edges: Dict[int, npt.NDArray[np.float64]] = {} - road_edges_type: Dict[int, int] = {} - crosswalks: Dict[int, npt.NDArray[np.float64]] = {} - carparks: Dict[int, npt.NDArray[np.float64]] = {} +def _write_and_get_waymo_road_edges(frame: dataset_pb2.Frame, map_writer: AbstractMapWriter) -> List[AbstractRoadEdge]: + """Helper function to extract road edges from a Waymo frame proto.""" + road_edges: List[AbstractRoadEdge] = [] + for map_feature in frame.map_features: + if map_feature.HasField("road_edge"): + polyline = _extract_polyline_waymo_proto(map_feature.road_edge) + if polyline is not None: + road_edge_type = WAYMO_ROAD_EDGE_TYPE_CONVERSION.get(map_feature.road_edge.type, RoadEdgeType.UNKNOWN) + road_edges.append( + CacheRoadEdge( + object_id=map_feature.id, + road_edge_type=road_edge_type, + polyline=polyline, + ) + ) + + for road_edge in road_edges: + map_writer.write_road_edge(road_edge) + + return road_edges + + +def _write_and_get_waymo_lanes( + frame: dataset_pb2.Frame, + road_lines: List[AbstractRoadLine], + road_edges: List[AbstractRoadEdge], + map_writer: AbstractMapWriter, +) -> List[AbstractLane]: + + # 1. Load lane data from Waymo frame proto + lane_data_dict: Dict[int, WaymoLaneData] = {} for map_feature in frame.map_features: if map_feature.HasField("lane"): - polyline = _extract_polyline(map_feature.lane) - # Ignore lanes with less than 2 points or not 2D - if polyline.ndim != 2 or polyline.shape[0] < 2: - continue - lanes[map_feature.id] = polyline - for lane_id_ in map_feature.lane.exit_lanes: - lanes_successors[map_feature.id].append(lane_id_) - for lane_id_ in map_feature.lane.exit_lanes: - lanes_predecessors[map_feature.id].append(lane_id_) - lanes_speed_limit_mps[map_feature.id] = mph_to_mps(map_feature.lane.speed_limit_mph) - lanes_type[map_feature.id] = map_feature.lane.type - lanes_left_neighbors[map_feature.id] = _extract_neighbors(map_feature.lane.left_neighbors) - lanes_right_neighbors[map_feature.id] = _extract_neighbors(map_feature.lane.right_neighbors) - elif map_feature.HasField("road_line"): - polyline = _extract_polyline(map_feature.road_line) - if polyline.ndim != 2 or polyline.shape[0] < 2: - continue - road_lines[map_feature.id] = polyline - road_lines_type[map_feature.id] = WAYMO_ROAD_LINE_CONVERSION.get( - map_feature.road_line.type, RoadLineType.UNKNOWN - ) - elif map_feature.HasField("road_edge"): - polyline = _extract_polyline(map_feature.road_edge) - if polyline.ndim != 2 or polyline.shape[0] < 2: + centerline = _extract_polyline_waymo_proto(map_feature.lane) + + # In case of a invalid lane, skip it + if centerline is None: continue - road_edges[map_feature.id] = polyline - road_edges_type[map_feature.id] = WAYMO_ROAD_EDGE_CONVERSION.get( - map_feature.road_edge.type, RoadEdgeType.UNKNOWN + + speed_limit_mps = mph_to_mps(map_feature.lane.speed_limit_mph) + speed_limit_mps = speed_limit_mps if speed_limit_mps > 0.0 else None + + lane_data_dict[map_feature.id] = WaymoLaneData( + object_id=map_feature.id, + centerline=centerline, + predecessor_ids=[int(lane_id_) for lane_id_ in map_feature.lane.entry_lanes], + successor_ids=[int(lane_id_) for lane_id_ in map_feature.lane.exit_lanes], + speed_limit_mps=speed_limit_mps, + lane_type=WAYMO_LANE_TYPE_CONVERSION.get(map_feature.lane.type, LaneType.UNDEFINED), + left_neighbors=_extract_lane_neighbors(map_feature.lane.left_neighbors), + right_neighbors=_extract_lane_neighbors(map_feature.lane.right_neighbors), ) - elif map_feature.HasField("stop_sign"): - # TODO: implement stop signs - pass - elif map_feature.HasField("crosswalk"): - crosswalks[map_feature.id] = _extract_polygon(map_feature.crosswalk) - elif map_feature.HasField("speed_bump"): - # TODO: implement speed bumps - pass - elif map_feature.HasField("driveway"): - # NOTE: Determine whether to use a different semantic type for driveways. - carparks[map_feature.id] = _extract_polygon(map_feature.driveway) - - lane_left_boundaries_3d, lane_right_boundaries_3d = extract_lane_boundaries( - lanes, lanes_successors, lanes_predecessors, road_lines, road_edges - ) - - lane_df = get_lane_df( - lanes, - lanes_successors, - lanes_predecessors, - lanes_speed_limit_mps, - lane_left_boundaries_3d, - lane_right_boundaries_3d, - lanes_type, - lanes_left_neighbors, - lanes_right_neighbors, - ) - lane_group_df = get_lane_group_df( - lanes, - lanes_successors, - lanes_predecessors, - lane_left_boundaries_3d, - lane_right_boundaries_3d, - ) - intersection_df = get_intersections_df() - crosswalk_df = get_crosswalk_df(crosswalks) - walkway_df = get_walkway_df() - carpark_df = get_carpark_df(carparks) - generic_drivable_df = get_generic_drivable_df() - road_edge_df = get_road_edge_df(road_edges, road_edges_type) - road_line_df = get_road_line_df(road_lines, road_lines_type) - - map_file_path.unlink(missing_ok=True) - if not map_file_path.parent.exists(): - map_file_path.parent.mkdir(parents=True, exist_ok=True) - - lane_df.to_file(map_file_path, layer=MapLayer.LANE.serialize(), driver="GPKG") - lane_group_df.to_file(map_file_path, layer=MapLayer.LANE_GROUP.serialize(), driver="GPKG", mode="a") - intersection_df.to_file(map_file_path, layer=MapLayer.INTERSECTION.serialize(), driver="GPKG", mode="a") - crosswalk_df.to_file(map_file_path, layer=MapLayer.CROSSWALK.serialize(), driver="GPKG", mode="a") - walkway_df.to_file(map_file_path, layer=MapLayer.WALKWAY.serialize(), driver="GPKG", mode="a") - carpark_df.to_file(map_file_path, layer=MapLayer.CARPARK.serialize(), driver="GPKG", mode="a") - generic_drivable_df.to_file(map_file_path, layer=MapLayer.GENERIC_DRIVABLE.serialize(), driver="GPKG", mode="a") - road_edge_df.to_file(map_file_path, layer=MapLayer.ROAD_EDGE.serialize(), driver="GPKG", mode="a") - road_line_df.to_file(map_file_path, layer=MapLayer.ROAD_LINE.serialize(), driver="GPKG", mode="a") - - -def get_lane_df( - lanes: Dict[int, npt.NDArray[np.float64]], - lanes_successors: Dict[int, List[int]], - lanes_predecessors: Dict[int, List[int]], - lanes_speed_limit_mps: Dict[int, float], - lanes_left_boundaries_3d: Dict[int, Polyline3D], - lanes_right_boundaries_3d: Dict[int, Polyline3D], - lanes_type: Dict[int, int], - lanes_left_neighbors: Dict[int, List[Dict[str, int]]], - lanes_right_neighbors: Dict[int, List[Dict[str, int]]], -) -> gpd.GeoDataFrame: - - ids = [] - lane_types = [] - lane_group_ids = [] - speed_limits_mps = [] - predecessor_ids = [] - successor_ids = [] - left_boundaries = [] - right_boundaries = [] - left_lane_ids = [] - right_lane_ids = [] - baseline_paths = [] - geometries = [] + + # 2. Process lane data to fill in left/right boundaries + fill_lane_boundaries(lane_data_dict, road_lines, road_edges) def _get_majority_neighbor(neighbors: List[Dict[str, int]]) -> Optional[int]: if len(neighbors) == 0: @@ -206,185 +136,99 @@ def _get_majority_neighbor(neighbors: List[Dict[str, int]]) -> Optional[int]: } return str(max(length, key=length.get)) - for lane_id, lane_centerline_array in lanes.items(): - if lane_id not in lanes_left_boundaries_3d or lane_id not in lanes_right_boundaries_3d: + lanes: List[AbstractLane] = [] + for lane_data in lane_data_dict.values(): + + # Skip lanes without boundaries + if lane_data.left_boundary is None or lane_data.right_boundary is None: continue - lane_centerline = Polyline3D.from_array(lane_centerline_array) - lane_speed_limit_mps = lanes_speed_limit_mps[lane_id] if lanes_speed_limit_mps[lane_id] > 0.0 else None - - ids.append(lane_id) - lane_types.append(lanes_type[lane_id]) - lane_group_ids.append([lane_id]) - speed_limits_mps.append(lane_speed_limit_mps) - predecessor_ids.append(lanes_predecessors[lane_id]) - successor_ids.append(lanes_successors[lane_id]) - left_boundaries.append(lanes_left_boundaries_3d[lane_id].linestring) - right_boundaries.append(lanes_right_boundaries_3d[lane_id].linestring) - left_lane_ids.append(_get_majority_neighbor(lanes_left_neighbors[lane_id])) - right_lane_ids.append(_get_majority_neighbor(lanes_right_neighbors[lane_id])) - baseline_paths.append(lane_centerline.linestring) - - geometry = geom.Polygon( - np.vstack( - [ - lanes_left_boundaries_3d[lane_id].array[:, :2], - lanes_right_boundaries_3d[lane_id].array[:, :2][::-1], - ] + + lanes.append( + CacheLane( + object_id=lane_data.object_id, + lane_group_id=lane_data.object_id, + left_boundary=lane_data.left_boundary, + right_boundary=lane_data.right_boundary, + centerline=lane_data.centerline, + left_lane_id=_get_majority_neighbor(lane_data.left_neighbors), + right_lane_id=_get_majority_neighbor(lane_data.right_neighbors), + predecessor_ids=lane_data.predecessor_ids, + successor_ids=lane_data.successor_ids, + speed_limit_mps=lane_data.speed_limit_mps, ) ) - geometries.append(geometry) - - data = pd.DataFrame( - { - "id": ids, - "lane_type": lane_types, - "lane_group_id": lane_group_ids, - "speed_limit_mps": speed_limits_mps, - "predecessor_ids": predecessor_ids, - "successor_ids": successor_ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - "left_lane_id": left_lane_ids, - "right_lane_id": right_lane_ids, - "baseline_path": baseline_paths, - } - ) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf + for lane in lanes: + map_writer.write_lane(lane) + return lanes -def get_lane_group_df( - lanes: Dict[int, npt.NDArray[np.float64]], - lanes_successors: Dict[int, List[int]], - lanes_predecessors: Dict[int, List[int]], - lanes_left_boundaries_3d: Dict[int, Polyline3D], - lanes_right_boundaries_3d: Dict[int, Polyline3D], -) -> gpd.GeoDataFrame: - ids = [] - lane_ids = [] - intersection_ids = [] - predecessor_lane_group_ids = [] - successor_lane_group_ids = [] - left_boundaries = [] - right_boundaries = [] - geometries = [] +def _write_waymo_lane_groups(lanes: List[AbstractLane], map_writer: AbstractMapWriter) -> None: # NOTE: WOPD does not provide lane groups, so we create a lane group for each lane. - for lane_id in lanes.keys(): - if lane_id not in lanes_left_boundaries_3d or lane_id not in lanes_right_boundaries_3d: - continue - ids.append(lane_id) - lane_ids.append([lane_id]) - intersection_ids.append(None) # WOPD does not provide intersections - predecessor_lane_group_ids.append(lanes_predecessors[lane_id]) - successor_lane_group_ids.append(lanes_successors[lane_id]) - left_boundaries.append(lanes_left_boundaries_3d[lane_id].linestring) - right_boundaries.append(lanes_right_boundaries_3d[lane_id].linestring) - geometry = geom.Polygon( - np.vstack( - [ - lanes_left_boundaries_3d[lane_id].array[:, :2], - lanes_right_boundaries_3d[lane_id].array[:, :2][::-1], - ] + for lane in lanes: + map_writer.write_lane_group( + CacheLaneGroup( + object_id=lane.object_id, + lane_ids=[lane.object_id], + left_boundary=lane.left_boundary, + right_boundary=lane.right_boundary, + intersection_id=None, + predecessor_ids=lane.predecessor_ids, + successor_ids=lane.successor_ids, + outline=lane.outline_3d, ) ) - geometries.append(geometry) - - data = pd.DataFrame( - { - "id": ids, - "lane_ids": lane_ids, - "intersection_id": intersection_ids, - "predecessor_lane_group_ids": predecessor_lane_group_ids, - "successor_lane_group_ids": successor_lane_group_ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - } - ) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf -def get_intersections_df() -> gpd.GeoDataFrame: - ids = [] - lane_group_ids = [] - geometries = [] +def _write_waymo_misc_surfaces(frame: dataset_pb2.Frame, map_writer: AbstractMapWriter) -> None: - # NOTE: WOPD does not provide intersections, so we create an empty DataFrame. - data = pd.DataFrame({"id": ids, "lane_group_ids": lane_group_ids}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def get_carpark_df(carparks) -> gpd.GeoDataFrame: - ids = list(carparks.keys()) - outlines = [geom.LineString(outline) for outline in carparks.values()] - geometries = [geom.Polygon(outline[..., Point3DIndex.XY]) for outline in carparks.values()] - - data = pd.DataFrame({"id": ids, "outline": outlines}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def get_walkway_df() -> gpd.GeoDataFrame: - ids = [] - geometries = [] - - # NOTE: WOPD does not provide walkways, so we create an empty DataFrame. - data = pd.DataFrame({"id": ids}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def get_crosswalk_df(crosswalks: Dict[int, npt.NDArray[np.float64]]) -> gpd.GeoDataFrame: - ids = list(crosswalks.keys()) - outlines = [geom.LineString(outline) for outline in crosswalks.values()] - geometries = [geom.Polygon(outline[..., Point3DIndex.XY]) for outline in crosswalks.values()] - - data = pd.DataFrame({"id": ids, "outline": outlines}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def get_generic_drivable_df() -> gpd.GeoDataFrame: - ids = [] - geometries = [] - - # NOTE: WOPD does not provide generic drivable areas, so we create an empty DataFrame. - data = pd.DataFrame({"id": ids}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def get_road_edge_df( - road_edges: Dict[int, npt.NDArray[np.float64]], road_edges_type: Dict[int, RoadEdgeType] -) -> gpd.GeoDataFrame: - ids = list(road_edges.keys()) - geometries = [Polyline3D.from_array(road_edge).linestring for road_edge in road_edges.values()] - - data = pd.DataFrame( - { - "id": ids, - "road_edge_type": [int(road_edge_type) for road_edge_type in road_edges_type.values()], - } - ) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def get_road_line_df( - road_lines: Dict[int, npt.NDArray[np.float64]], road_lines_type: Dict[int, RoadLineType] -) -> gpd.GeoDataFrame: - ids = list(road_lines.keys()) - geometries = [Polyline3D.from_array(road_edge).linestring for road_edge in road_lines.values()] + for map_feature in frame.map_features: + if map_feature.HasField("driveway"): + # NOTE: We currently only handle classify driveways as carparks. + outline = _extract_outline_from_waymo_proto(map_feature.driveway) + if outline is not None: + map_writer.write_carpark(CacheCarpark(object_id=map_feature.id, outline=outline)) + elif map_feature.HasField("crosswalk"): + outline = _extract_outline_from_waymo_proto(map_feature.crosswalk) + if outline is not None: + map_writer.write_crosswalk(CacheCrosswalk(object_id=map_feature.id, outline=outline)) - data = pd.DataFrame( - { - "id": ids, - "road_line_type": [int(road_line_type) for road_line_type in road_lines_type.values()], - } - ) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf + elif map_feature.HasField("stop_sign"): + pass # TODO: Implement stop signs + elif map_feature.HasField("speed_bump"): + pass # TODO: Implement speed bumps + + +def _extract_polyline_waymo_proto(data) -> Optional[Polyline3D]: + polyline: Optional[Polyline3D] = None + polyline_array = np.array([[p.x, p.y, p.z] for p in data.polyline], dtype=np.float64) + if polyline_array.ndim == 2 and polyline_array.shape[1] == 3 and len(polyline_array) >= 2: + # NOTE: A valid polyline must have at least 2 points, be 3D, and be non-empty + polyline = Polyline3D.from_array(polyline_array) + return polyline + + +def _extract_outline_from_waymo_proto(data) -> Optional[Polyline3D]: + outline: Optional[Polyline3D] = None + outline_array = np.array([[p.x, p.y, p.z] for p in data.polygon], dtype=np.float64) + if outline_array.ndim == 2 and outline_array.shape[0] >= 3 and outline_array.shape[1] == 3: + # NOTE: A valid polygon outline must have at least 3 points, be 3D, and be non-empty + outline = Polyline3D.from_array(outline_array) + return outline + + +def _extract_lane_neighbors(data) -> List[Dict[str, int]]: + neighbors = [] + for neighbor in data: + neighbors.append( + { + "lane_id": neighbor.feature_id, + "self_start_index": neighbor.self_start_index, + "self_end_index": neighbor.self_end_index, + "neighbor_start_index": neighbor.neighbor_start_index, + "neighbor_end_index": neighbor.neighbor_end_index, + } + ) + return neighbors diff --git a/d123/conversion/datasets/wopd/wopd_converter.py b/d123/conversion/datasets/wopd/wopd_converter.py index 1f1a2cbd..4cf33ef9 100644 --- a/d123/conversion/datasets/wopd/wopd_converter.py +++ b/d123/conversion/datasets/wopd/wopd_converter.py @@ -18,9 +18,11 @@ from d123.conversion.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from d123.conversion.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter from d123.conversion.utils.sensor_utils.camera_conventions import CameraConvention, convert_camera_convention from d123.conversion.utils.sensor_utils.lidar_index_registry import DefaultLidarIndex, WOPDLidarIndex from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper +from d123.datatypes.maps.map_metadata import MapMetadata from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, @@ -32,9 +34,16 @@ from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters -from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.bounding_box import BoundingBoxSE3 -from d123.geometry.geometry_index import EulerAnglesIndex, StateSE3Index +from d123.geometry import ( + BoundingBoxSE3, + BoundingBoxSE3Index, + EulerAngles, + EulerAnglesIndex, + StateSE3, + StateSE3Index, + Vector3D, + Vector3DIndex, +) from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL from d123.geometry.utils.rotation_utils import ( @@ -47,7 +56,6 @@ from waymo_open_dataset import dataset_pb2 from waymo_open_dataset.utils import frame_utils -D123_MAPS_ROOT: Path = Path(os.getenv("D123_MAPS_ROOT", "$HOME/maps")) # TODO: remove os.environ["CUDA_VISIBLE_DEVICES"] = "-1" os.environ["OMP_NUM_THREADS"] = "1" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" @@ -108,15 +116,17 @@ def get_number_of_logs(self) -> int: """Inherited, see superclass.""" return len(self._split_tf_record_pairs) - def convert_map(self, map_index: int) -> None: + def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: """Inherited, see superclass.""" split, source_tf_record_path = self._split_tf_record_pairs[map_index] initial_frame = _get_initial_frame_from_tfrecord(source_tf_record_path) - log_name = str(initial_frame.context.name) - map_file_path = D123_MAPS_ROOT / split / f"{log_name}.gpkg" - if self.dataset_converter_config.force_map_conversion or not map_file_path.exists(): - map_file_path.unlink(missing_ok=True) - convert_wopd_map(initial_frame, map_file_path) + + map_metadata = _get_wopd_map_metadata(initial_frame, split) + map_needs_writing = map_writer.reset(self.dataset_converter_config, map_metadata) + if map_needs_writing: + convert_wopd_map(initial_frame, map_writer) + + map_writer.close() def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: """Inherited, see superclass.""" @@ -144,8 +154,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: self._keep_polar_features, self.dataset_converter_config, ), - map_has_z=True, - map_is_local=True, + map_metadata=_get_wopd_map_metadata(initial_frame, split), ) # 2. Prepare log writer @@ -197,6 +206,20 @@ def _get_initial_frame_from_tfrecord( return initial_frame +def _get_wopd_map_metadata(initial_frame: dataset_pb2.Frame, split: str) -> MapMetadata: + + map_metadata = MapMetadata( + dataset="wopd", + split=split, + log_name=str(initial_frame.context.name), + location=None, # TODO: Add location information. + map_has_z=True, + map_is_local=True, # True, if map is per log + ) + + return map_metadata + + def _get_wopd_camera_metadata( initial_frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig ) -> Dict[PinholeCameraType, PinholeCameraMetadata]: diff --git a/d123/conversion/map_writer/abstract_map_writer.py b/d123/conversion/map_writer/abstract_map_writer.py index c49a1181..25158a76 100644 --- a/d123/conversion/map_writer/abstract_map_writer.py +++ b/d123/conversion/map_writer/abstract_map_writer.py @@ -14,14 +14,14 @@ AbstractStopLine, AbstractWalkway, ) -from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.maps.map_metadata import MapMetadata class AbstractMapWriter(abc.ABC): """Abstract base class for map writers.""" @abstractmethod - def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: LogMetadata) -> bool: + def reset(self, dataset_converter_config: DatasetConverterConfig, map_metadata: MapMetadata) -> bool: """Reset the writer to its initial state.""" @abstractmethod diff --git a/d123/conversion/map_writer/gpkg_map_writer.py b/d123/conversion/map_writer/gpkg_map_writer.py index 7bdad06e..3ae6e9e8 100644 --- a/d123/conversion/map_writer/gpkg_map_writer.py +++ b/d123/conversion/map_writer/gpkg_map_writer.py @@ -23,7 +23,7 @@ AbstractWalkway, ) from d123.datatypes.maps.map_datatypes import MapLayer -from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.maps.map_metadata import MapMetadata from d123.geometry.polyline import Polyline3D MAP_OBJECT_DATA = Dict[str, List[Union[str, int, float, bool, geom.base.BaseGeometry]]] @@ -39,25 +39,27 @@ def __init__(self, maps_root: Union[str, Path]) -> None: # Data to be written to the map for each object type self._map_data: Optional[Dict[MapLayer, MAP_OBJECT_DATA]] = None self._map_file: Optional[Path] = None + self._map_metadata: Optional[MapMetadata] = None - def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: LogMetadata) -> bool: + def reset(self, dataset_converter_config: DatasetConverterConfig, map_metadata: MapMetadata) -> bool: """Inherited, see superclass.""" map_needs_writing: bool = False if dataset_converter_config.include_map: - if log_metadata.map_is_local: - split, log_name = log_metadata.split, log_metadata.log_name + if map_metadata.map_is_local: + split, log_name = map_metadata.split, map_metadata.log_name map_file = self._maps_root / split / f"{log_name}.gpkg" else: - dataset, location = log_metadata.dataset, log_metadata.location + dataset, location = map_metadata.dataset, map_metadata.location map_file = self._maps_root / dataset / f"{dataset}_{location}.gpkg" map_needs_writing = dataset_converter_config.force_map_conversion or not map_file.exists() if map_needs_writing: - # Reset all map layers - self._map_file = map_file + # Reset all map layers and update map file / metadata self._map_data = {map_layer: defaultdict(list) for map_layer in MapLayer} + self._map_file = map_file + self._map_metadata = map_metadata return map_needs_writing @@ -135,12 +137,18 @@ def close(self) -> None: gdf = gpd.GeoDataFrame({"id": [], "geometry": []}, geometry="geometry", crs=self._crs) gdf.to_file(self._map_file, driver="GPKG", layer=map_layer.serialize()) - del self._map_file, self._map_data + metadata_df = gpd.GeoDataFrame(pd.DataFrame([self._map_metadata.to_dict()])) + metadata_df.to_file(self._map_file, driver="GPKG", layer="map_metadata") + + del self._map_file, self._map_data, self._map_metadata self._map_file = None self._map_data = None + self._map_metadata = None def _assert_initialized(self) -> None: assert self._map_data is not None, "Call reset() before writing data." + assert self._map_file is not None, "Call reset() before writing data." + assert self._map_metadata is not None, "Call reset() before writing data." def _write_surface_layer(self, layer: MapLayer, surface_object: AbstractSurfaceMapObject) -> None: """Helper to write surface map objects. diff --git a/d123/datatypes/maps/abstract_map.py b/d123/datatypes/maps/abstract_map.py index edfc16e5..6d279c29 100644 --- a/d123/datatypes/maps/abstract_map.py +++ b/d123/datatypes/maps/abstract_map.py @@ -7,6 +7,7 @@ from d123.datatypes.maps.abstract_map_objects import AbstractMapObject from d123.datatypes.maps.map_datatypes import MapLayer +from d123.datatypes.maps.map_metadata import MapMetadata from d123.geometry import Point2D # TODO: @@ -18,9 +19,8 @@ class AbstractMap(abc.ABC): - @property @abc.abstractmethod - def map_name(self) -> str: + def get_map_metadata(self) -> MapMetadata: pass @abc.abstractmethod @@ -82,3 +82,7 @@ def query_nearest( exclusive: bool = False, ) -> Dict[MapLayer, Union[List[AbstractMapObject], Dict[int, List[AbstractMapObject]]]]: pass + + @property + def map_name(self) -> str: + raise NotImplementedError diff --git a/d123/datatypes/maps/gpkg/gpkg_map.py b/d123/datatypes/maps/gpkg/gpkg_map.py index d207e637..d77f90a9 100644 --- a/d123/datatypes/maps/gpkg/gpkg_map.py +++ b/d123/datatypes/maps/gpkg/gpkg_map.py @@ -26,6 +26,7 @@ ) from d123.datatypes.maps.gpkg.gpkg_utils import load_gdf_with_geometry_columns from d123.datatypes.maps.map_datatypes import MapLayer +from d123.datatypes.maps.map_metadata import MapMetadata from d123.geometry import Point2D USE_ARROW: bool = True @@ -50,11 +51,7 @@ def __init__(self, file_path: Path) -> None: # loaded during `.initialize()` self._gpd_dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} - - @property - def map_name(self) -> str: - """Inherited, see superclass.""" - return self._file_path.with_suffix("").name + self._map_metadata: Optional[MapMetadata] = None def initialize(self) -> None: """Inherited, see superclass.""" @@ -77,6 +74,10 @@ def initialize(self) -> None: warnings.warn(f"GPKGMap: {map_layer_name} not available in {str(self._file_path)}") self._gpd_dataframes[map_layer] = None + assert "map_metadata" in list(gpd.list_layers(self._file_path).name) + metadata_gdf = gpd.read_file(self._file_path, layer="map_metadata", use_arrow=USE_ARROW) + self._map_metadata = MapMetadata.from_dict(metadata_gdf.iloc[0].to_dict()) + def _assert_initialize(self) -> None: "Checks if `.initialize()` was called, before retrieving data." assert len(self._gpd_dataframes) > 0, "GPKGMap: Call `.initialize()` before retrieving data!" @@ -85,6 +86,9 @@ def _assert_layer_available(self, layer: MapLayer) -> None: "Checks if layer is available." assert layer in self.get_available_map_objects(), f"GPKGMap: MapLayer {layer.name} is unavailable." + def get_map_metadata(self): + return self._map_metadata + def get_available_map_objects(self) -> List[MapLayer]: """Inherited, see superclass.""" self._assert_initialize() diff --git a/d123/datatypes/maps/map_datatypes.py b/d123/datatypes/maps/map_datatypes.py index 14a815b5..8a96d1d0 100644 --- a/d123/datatypes/maps/map_datatypes.py +++ b/d123/datatypes/maps/map_datatypes.py @@ -3,6 +3,10 @@ from d123.common.utils.enums import SerialIntEnum # TODO: Add stop pads or stop lines. +# - Add type for stop zones. +# - Add type for carparks, e.g. outline, driveway (Waymo), or other types. +# - Check if intersections should have types. +# - Use consistent naming conventions unknown, undefined, none, etc. class MapLayer(SerialIntEnum): @@ -25,6 +29,7 @@ class MapLayer(SerialIntEnum): class LaneType(SerialIntEnum): """ Enum for LaneType. + NOTE: We use the lane types from Waymo. https://github.com/waymo-research/waymo-open-dataset/blob/99a4cb3ff07e2fe06c2ce73da001f850f628e45a/src/waymo_open_dataset/protos/map.proto#L147 """ diff --git a/d123/datatypes/maps/map_metadata.py b/d123/datatypes/maps/map_metadata.py new file mode 100644 index 00000000..c643f43b --- /dev/null +++ b/d123/datatypes/maps/map_metadata.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from dataclasses import asdict, dataclass +from typing import Any, Dict, Optional + +import d123 + +# TODO: Refactor the usage of the map map metadata in this repo. + + +@dataclass +class MapMetadata: + """Class to hold metadata information about a map.""" + + dataset: str + split: Optional[str] # None, if map is not per log + log_name: Optional[str] # None, if map is per log + location: str + map_has_z: bool + map_is_local: bool # True, if map is per log + version: str = str(d123.__version__) + + def to_dict(self) -> dict: + return asdict(self) + + def from_dict(data_dict: Dict[str, Any]) -> MapMetadata: + return MapMetadata(**data_dict) diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py index 8aa595db..928f1205 100644 --- a/d123/datatypes/scene/arrow/arrow_scene.py +++ b/d123/datatypes/scene/arrow/arrow_scene.py @@ -83,7 +83,7 @@ def get_scene_extraction_metadata(self) -> SceneExtractionMetadata: def get_map_api(self) -> Optional[AbstractMap]: map_api: Optional[AbstractMap] = None - if self.log_metadata.map_is_local: + if self.log_metadata.map_metadata.map_is_local: if self._local_map_api is None: map_api = get_local_map_api(self.log_metadata.split, self.log_name) self._local_map_api = map_api diff --git a/d123/datatypes/scene/scene_metadata.py b/d123/datatypes/scene/scene_metadata.py index 3a12c9c0..27b56e85 100644 --- a/d123/datatypes/scene/scene_metadata.py +++ b/d123/datatypes/scene/scene_metadata.py @@ -1,9 +1,10 @@ from __future__ import annotations -from dataclasses import asdict, dataclass -from typing import Dict +from dataclasses import asdict, dataclass, field +from typing import Dict, Optional import d123 +from d123.datatypes.maps.map_metadata import MapMetadata from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraMetadata, PinholeCameraType from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters @@ -18,18 +19,19 @@ class LogMetadata: location: str timestep_seconds: float - vehicle_parameters: VehicleParameters - camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] - lidar_metadata: Dict[LiDARType, LiDARMetadata] + vehicle_parameters: Optional[VehicleParameters] = None + camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = field(default_factory=dict) + lidar_metadata: Dict[LiDARType, LiDARMetadata] = field(default_factory=dict) - map_has_z: bool - map_is_local: bool + map_metadata: Optional[MapMetadata] = None version: str = str(d123.__version__) @classmethod def from_dict(cls, data_dict: Dict) -> LogMetadata: - data_dict["vehicle_parameters"] = VehicleParameters.from_dict(data_dict["vehicle_parameters"]) + if data_dict["vehicle_parameters"] is not None: + data_dict["vehicle_parameters"] = VehicleParameters.from_dict(data_dict["vehicle_parameters"]) + data_dict["camera_metadata"] = { PinholeCameraType.deserialize(key): PinholeCameraMetadata.from_dict(value) for key, value in data_dict.get("camera_metadata", {}).items() @@ -38,13 +40,17 @@ def from_dict(cls, data_dict: Dict) -> LogMetadata: LiDARType.deserialize(key): LiDARMetadata.from_dict(value) for key, value in data_dict.get("lidar_metadata", {}).items() } + if data_dict["map_metadata"] is not None: + data_dict["map_metadata"] = MapMetadata.from_dict(data_dict["map_metadata"]) + return LogMetadata(**data_dict) def to_dict(self) -> Dict: data_dict = asdict(self) - data_dict["vehicle_parameters"] = self.vehicle_parameters.to_dict() + data_dict["vehicle_parameters"] = self.vehicle_parameters.to_dict() if self.vehicle_parameters else None data_dict["camera_metadata"] = {key.serialize(): value.to_dict() for key, value in self.camera_metadata.items()} data_dict["lidar_metadata"] = {key.serialize(): value.to_dict() for key, value in self.lidar_metadata.items()} + data_dict["map_metadata"] = self.map_metadata.to_dict() if self.map_metadata else None return data_dict diff --git a/d123/script/config/conversion/datasets/wopd_dataset.yaml b/d123/script/config/conversion/datasets/wopd_dataset.yaml index f936270d..da977222 100644 --- a/d123/script/config/conversion/datasets/wopd_dataset.yaml +++ b/d123/script/config/conversion/datasets/wopd_dataset.yaml @@ -2,7 +2,7 @@ wopd_dataset: _target_: d123.conversion.datasets.wopd.wopd_converter.WOPDConverter _convert_: 'all' - splits: ["wopd_train", "wopd_val", "wopd_test"] # Which splits to convert. Options: ["wopd_train", "wopd_val", "wopd_test"] + splits: ["wopd_val"] # Which splits to convert. Options: ["wopd_train", "wopd_val", "wopd_test"] wopd_data_root: "/media/nvme1/waymo_perception" # ${wopd_data_root} zero_roll_pitch: true # Whether to zero the roll and pitch of the box detections in global frame. keep_polar_features: false # Add lidar polar features (range, azimuth, elevation) in addition to XYZ. (slow if true) diff --git a/d123/script/config/conversion/default_conversion.yaml b/d123/script/config/conversion/default_conversion.yaml index 21630d38..8f2b7da3 100644 --- a/d123/script/config/conversion/default_conversion.yaml +++ b/d123/script/config/conversion/default_conversion.yaml @@ -19,8 +19,8 @@ defaults: # - nuplan_mini_dataset # - nuplan_private_dataset # - carla_dataset - # - wopd_dataset - - av2_sensor_dataset + - wopd_dataset + # - av2_sensor_dataset - _self_ force_map_conversion: True diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index 61af12ea..f1a18ba8 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -40,10 +40,10 @@ "# log_names = [\"2021.09.29.17.35.58_veh-44_00066_00432\"]\n", "\n", "\n", - "# splits = [\"wopd_train\"]\n", + "splits = [\"wopd_val\"]\n", "# splits = [\"carla\"]\n", "# splits = [\"nuplan-mini_test\"]\n", - "splits = [\"av2-sensor-mini_train\"]\n", + "# splits = [\"av2-sensor-mini_train\"]\n", "# log_names = None\n", "\n", "\n", @@ -252,11 +252,12 @@ " return fig, ax\n", "\n", "\n", - "scene_index = 19\n", + "scene_index = 17\n", "iteration = 99\n", - "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n", + "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=500)\n", "plt.show()\n", "\n", + "\n", "# camera = scenes[scene_index].get_camera_at_iteration(\n", "# iteration=iteration, camera_type=CameraType.CAM_F0\n", "# )\n", @@ -274,9 +275,16 @@ "metadata": {}, "outputs": [], "source": [ + "scene_index = 17\n", + "iteration = 99\n", "\n", + "fig, ax = plt.subplots(1, 3, figsize=(15, 5))\n", + "scene = scenes[scene_index]\n", + "_plot_scene_on_ax(ax[0], scene, iteration, radius=20)\n", + "_plot_scene_on_ax(ax[1], scene, iteration, radius=50)\n", + "_plot_scene_on_ax(ax[2], scene, iteration, radius=100)\n", "\n", - "\n" + "plt.show()" ] }, { diff --git a/test_viser.py b/test_viser.py index 88981b92..ed04109b 100644 --- a/test_viser.py +++ b/test_viser.py @@ -13,8 +13,8 @@ # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla"] - # splits = ["wopd_val"] - splits = ["av2-sensor-mini_train"] + splits = ["wopd_val"] + # splits = ["av2-sensor-mini_train"] log_names = None scene_uuids = None From 35962da9a09aad23f7df21d66c08e2b49971499b Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Tue, 14 Oct 2025 19:34:35 +0800 Subject: [PATCH 078/145] refactor kitti360 log_writer and incorporate fisheye camera --- d123/common/visualization/viser/server.py | 308 ------------------ .../visualization/viser/viser_viewer.py | 2 +- d123/datasets/av2/av2_data_converter.py | 13 - .../kitti_360/kitti_360_data_converter.py | 253 +++++++------- d123/datasets/kitti_360/load_sensor.py | 4 +- d123/datasets/nuplan/nuplan_data_converter.py | 17 +- d123/datasets/utils/arrow_ipc_writer.py | 3 +- d123/datatypes/scene/abstract_scene.py | 7 +- d123/datatypes/scene/arrow/arrow_scene.py | 5 +- .../scene/arrow/utils/arrow_getters.py | 25 +- d123/datatypes/scene/scene_filter.py | 12 +- d123/datatypes/scene/scene_metadata.py | 18 +- d123/datatypes/sensors/camera.py | 189 ----------- .../sensors/camera/fisheye_mei_camera.py | 189 +++++++++++ d123/datatypes/sensors/camera/utils.py | 36 ++ .../default_dataset_conversion.yaml | 5 +- .../config/datasets/kitti360_dataset.yaml | 19 ++ d123/script/run_viser.py | 5 - 18 files changed, 436 insertions(+), 674 deletions(-) delete mode 100644 d123/common/visualization/viser/server.py delete mode 100644 d123/datatypes/sensors/camera.py create mode 100644 d123/datatypes/sensors/camera/fisheye_mei_camera.py create mode 100644 d123/datatypes/sensors/camera/utils.py diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py deleted file mode 100644 index afda1375..00000000 --- a/d123/common/visualization/viser/server.py +++ /dev/null @@ -1,308 +0,0 @@ -import time -from typing import Dict, List, Literal - -import numpy as np -import trimesh -import viser - -from d123.common.visualization.viser.utils import ( - get_bounding_box_meshes, - get_bounding_box_outlines, - get_camera_if_available, - get_camera_values, - get_lidar_points, - get_map_meshes, -) -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.sensors.camera import CameraType -from d123.datatypes.sensors.lidar import LiDARType - -# TODO: Try to fix performance issues. -# TODO: Refactor this file. - -all_camera_types: List[CameraType] = [ - CameraType.CAM_F0, - CameraType.CAM_B0, - CameraType.CAM_L0, - CameraType.CAM_L1, - CameraType.CAM_L2, - CameraType.CAM_R0, - CameraType.CAM_R1, - CameraType.CAM_R2, -] - -# MISC config: -LINE_WIDTH: float = 4.0 - -# Bounding box config: -BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "mesh" - -# Map config: -MAP_AVAILABLE: bool = True - - -# Cameras config: - -# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0] -# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types -VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L] -# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [] -VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_STEREO_L] -CAMERA_SCALE: float = 1.0 - -# Lidar config: -LIDAR_AVAILABLE: bool = True - -LIDAR_TYPES: List[LiDARType] = [ - LiDARType.LIDAR_MERGED, - LiDARType.LIDAR_TOP, - LiDARType.LIDAR_FRONT, - LiDARType.LIDAR_SIDE_LEFT, - LiDARType.LIDAR_SIDE_RIGHT, - LiDARType.LIDAR_BACK, -] -# LIDAR_TYPES: List[LiDARType] = [ -# LiDARType.LIDAR_TOP, -# ] -LIDAR_POINT_SIZE: float = 0.05 - - -class ViserVisualizationServer: - def __init__( - self, - scenes: List[AbstractScene], - scene_index: int = 0, - host: str = "localhost", - port: int = 8080, - label: str = "D123 Viser Server", - ): - assert len(scenes) > 0, "At least one scene must be provided." - self.scenes = scenes - self.scene_index = scene_index - - self.host = host - self.port = port - self.label = label - - self.server = viser.ViserServer(host=self.host, port=self.port, label=self.label) - self.set_scene(self.scenes[self.scene_index % len(self.scenes)]) - - def next(self) -> None: - self.server.flush() - self.server.gui.reset() - self.server.scene.reset() - self.scene_index = (self.scene_index + 1) % len(self.scenes) - print(f"Viser server started at {self.host}:{self.port}") - self.set_scene(self.scenes[self.scene_index]) - - def set_scene(self, scene: AbstractScene) -> None: - num_frames = scene.get_number_of_iterations() - # print(scene.available_camera_types) - - self.server.gui.configure_theme(dark_mode=False, control_width="large") - - # TODO: Fix lighting. Environment map can help, but cannot be freely configured. - # self.server.scene.configure_environment_map( - # hdri="warehouse", - # background=False, - # background_intensity=0.25, - # environment_intensity=0.5, - # ) - - with self.server.gui.add_folder("Playback"): - server_playing = True - - gui_timestep = self.server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = self.server.gui.add_button("Next Frame", disabled=True) - gui_prev_frame = self.server.gui.add_button("Prev Frame", disabled=True) - gui_next_scene = self.server.gui.add_button("Next Scene", disabled=False) - gui_playing = self.server.gui.add_checkbox("Playing", True) - gui_framerate = self.server.gui.add_slider("FPS", min=1, max=60, step=0.1, initial_value=10) - gui_framerate_options = self.server.gui.add_button_group("FPS options", ("10", "20", "30", "60")) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - @gui_next_scene.on_click - def _(_) -> None: - nonlocal server_playing - server_playing = False - - # Disable frame controls when we're playing. - @gui_playing.on_update - def _(_) -> None: - gui_timestep.disabled = gui_playing.value - gui_next_frame.disabled = gui_playing.value - gui_prev_frame.disabled = gui_playing.value - - # Set the framerate when we click one of the options. - @gui_framerate_options.on_click - def _(_) -> None: - gui_framerate.value = int(gui_framerate_options.value) - - prev_timestep = gui_timestep.value - - # Toggle frame visibility when the timestep slider changes. - @gui_timestep.on_update - def _(_) -> None: - nonlocal current_frame_handle, current_frame_handle, prev_timestep - current_timestep = gui_timestep.value - - start = time.time() - # with self.server.atomic(): - mew_frame_handle = self.server.scene.add_frame(f"/frame{gui_timestep.value}", show_axes=False) - if BOUNDING_BOX_TYPE == "mesh": - meshes = [] - for _, mesh in get_bounding_box_meshes(scene, gui_timestep.value).items(): - meshes.append(mesh) - self.server.scene.add_mesh_trimesh( - f"/frame{gui_timestep.value}/detections", - trimesh.util.concatenate(meshes), - visible=True, - ) - elif BOUNDING_BOX_TYPE == "lines": - lines, colors = get_bounding_box_outlines(scene, gui_timestep.value) - self.server.scene.add_line_segments( - f"/frame{gui_timestep.value}/detections", - points=lines, - colors=colors, - line_width=LINE_WIDTH, - ) - else: - raise ValueError(f"Unknown bounding box type: {BOUNDING_BOX_TYPE}") - - current_frame_handle.remove() - current_frame_handle = mew_frame_handle - - for camera_type in VISUALIZE_CAMERA_GUI: - camera = get_camera_if_available(scene, camera_type, gui_timestep.value) - if camera is not None: - camera_gui_handles[camera_type].image = camera.image - - for camera_type in VISUALIZE_CAMERA_FRUSTUM: - camera = get_camera_if_available(scene, camera_type, gui_timestep.value) - if camera is not None: - camera_position, camera_quaternion = get_camera_values(scene, camera, gui_timestep.value) - camera_frustum_handles[camera_type].position = camera_position.array - camera_frustum_handles[camera_type].wxyz = camera_quaternion.q - camera_frustum_handles[camera_type].image = camera.image - - if LIDAR_AVAILABLE: - try: - points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES) - except Exception as e: - print(f"Error getting lidar points: {e}") - points = np.zeros((0, 3)) - colors = np.zeros((0, 3)) - - gui_lidar.points = points - gui_lidar.colors = colors - - prev_timestep = current_timestep - - rendering_time = time.time() - start - sleep_time = 1.0 / gui_framerate.value - rendering_time - time.sleep(max(sleep_time, 0.0)) - self.server.flush() # Optional! - - # Load in frames. - current_frame_handle = self.server.scene.add_frame(f"/frame{gui_timestep.value}", show_axes=False) - self.server.scene.add_frame("/map", show_axes=False) - - camera_gui_handles: Dict[CameraType, viser.GuiImageHandle] = {} - camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {} - - for camera_type in VISUALIZE_CAMERA_GUI: - camera = get_camera_if_available(scene, camera_type, gui_timestep.value) - if camera is not None: - with self.server.gui.add_folder(f"Camera {camera_type.serialize()}"): - camera_gui_handles[camera_type] = self.server.gui.add_image( - image=camera.image, - label=camera_type.serialize(), - format="jpeg", - ) - - for camera_type in VISUALIZE_CAMERA_FRUSTUM: - camera = get_camera_if_available(scene, camera_type, gui_timestep.value) - if camera is not None: - camera_position, camera_quaternion = get_camera_values(scene, camera, gui_timestep.value) - camera_frustum_handles[camera_type] = self.server.scene.add_camera_frustum( - f"camera_frustum_{camera_type.serialize()}", - fov=camera.metadata.fov_y, - aspect=camera.metadata.aspect_ratio, - scale=CAMERA_SCALE, - image=camera.image, - position=camera_position.array, - wxyz=camera_quaternion.q, - ) - - if LIDAR_AVAILABLE: - try: - points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES) - except Exception as e: - print(f"Error getting lidar points: {e}") - points = np.zeros((0, 3)) - colors = np.zeros((0, 3)) - - gui_lidar = self.server.scene.add_point_cloud( - name="LiDAR", - points=points, - colors=colors, - point_size=LIDAR_POINT_SIZE, - point_shape="circle", - ) - - if MAP_AVAILABLE: - for name, mesh in get_map_meshes(scene).items(): - self.server.scene.add_mesh_trimesh(f"/map/{name}", mesh, visible=True) - - # centerlines, __, __, road_edges = get_map_lines(scene) - # for i, centerline in enumerate(centerlines): - # self.server.scene.add_line_segments( - # "/map/centerlines", - # centerlines, - # colors=[[BLACK.rgb]], - # line_width=LINE_WIDTH, - # ) - # self.server.scene.add_line_segments( - # "/map/left_boundary", - # left_boundaries, - # colors=[[TAB_10[2].rgb]], - # line_width=LINE_WIDTH, - # ) - # self.server.scene.add_line_segments( - # "/map/right_boundary",clear - # right_boundaries, - # colors=[[TAB_10[3].rgb]], - # line_width=LINE_WIDTH, - # ) - # print(centerlines.shape, road_edges.shape) - # self.server.scene.add_line_segments( - # "/map/road_edges", - # road_edges, - # colors=[[BLACK.rgb]], - # line_width=LINE_WIDTH, - # ) - - # Playback update loop. - prev_timestep = gui_timestep.value - while server_playing: - # Update the timestep if we're playing. - if gui_playing.value: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - self.server.flush() - self.next() diff --git a/d123/common/visualization/viser/viser_viewer.py b/d123/common/visualization/viser/viser_viewer.py index 8cf1ec80..c981ea89 100644 --- a/d123/common/visualization/viser/viser_viewer.py +++ b/d123/common/visualization/viser/viser_viewer.py @@ -94,7 +94,7 @@ def __init__( self, scenes: List[AbstractScene], viser_config: ViserConfig = ViserConfig(), - scene_index: int = 0.0, + scene_index: int = 0, ) -> None: assert len(scenes) > 0, "At least one scene must be provided." diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py index 433b731c..48fc8cb9 100644 --- a/d123/datasets/av2/av2_data_converter.py +++ b/d123/datasets/av2/av2_data_converter.py @@ -23,10 +23,6 @@ from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata -<<<<<<< HEAD -from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -======= from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, @@ -34,7 +30,6 @@ PinholeIntrinsics, ) from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType ->>>>>>> dev_v0.0.7 from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from d123.datatypes.vehicle_state.vehicle_parameters import ( @@ -216,20 +211,12 @@ def convert_av2_log_to_arrow( return [] -<<<<<<< HEAD -def get_av2_camera_metadata(log_path: Path) -> Dict[CameraType, PinholeCameraMetadata]: -======= def get_av2_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: ->>>>>>> dev_v0.0.7 intrinsics_file = log_path / "calibration" / "intrinsics.feather" intrinsics_df = pd.read_feather(intrinsics_file) -<<<<<<< HEAD - camera_metadata: Dict[CameraType, PinholeCameraMetadata] = {} -======= camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} ->>>>>>> dev_v0.0.7 for _, row in intrinsics_df.iterrows(): row = row.to_dict() camera_type = AV2_CAMERA_TYPE_MAPPING[row["sensor_name"]] diff --git a/d123/datasets/kitti_360/kitti_360_data_converter.py b/d123/datasets/kitti_360/kitti_360_data_converter.py index 0616dcfa..69ed6f8b 100644 --- a/d123/datasets/kitti_360/kitti_360_data_converter.py +++ b/d123/datasets/kitti_360/kitti_360_data_converter.py @@ -20,17 +20,33 @@ import logging from pyquaternion import Quaternion -from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map - -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.sensors.camera import PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json -from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.datatypes.sensors.lidar_index import Kitti360LidarIndex +from d123.common.multithreading.worker_utils import WorkerPool, worker_map + +from d123.datatypes.detections.detection import ( + BoxDetectionMetadata, + BoxDetectionSE3, + BoxDetectionWrapper, +) +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, +) +from d123.datatypes.sensors.camera.fisheye_mei_camera import ( + FisheyeMEICameraMetadata, + FisheyeMEICameraType, + FisheyeMEIDistortion, + FisheyeMEIProjection, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from d123.datasets.utils.sensor.lidar_index_registry import Kitti360LidarIndex from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3 from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter +from d123.datasets.utils.arrow_ipc_writer import ArrowLogWriter from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic from d123.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT @@ -44,10 +60,10 @@ KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) KITTI360_CAMERA_TYPES = { - CameraType.CAM_STEREO_L: "image_00", - CameraType.CAM_STEREO_R: "image_01", - CameraType.CAM_L1: "image_02", - CameraType.CAM_R1: "image_03", + PinholeCameraType.CAM_STEREO_L: "image_00", + PinholeCameraType.CAM_STEREO_R: "image_01", + FisheyeMEICameraType.CAM_L: "image_02", + FisheyeMEICameraType.CAM_R: "image_03", } DIR_2D_RAW = "data_2d_raw" @@ -192,7 +208,10 @@ def convert_kitti360_map_to_gpkg( split: str = log_info["split"] log_name = log_path.stem - map_path = data_converter_config.output_path / "maps" / split / f"kitti360_{log_name}.gpkg" + D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) + map_path = D123_MAPS_ROOT / split / f"{log_name}.gpkg" + #map_path = data_converter_config.output_path / "maps" / split / f"{log_name}.gpkg" + map_path.parent.mkdir(parents=True, exist_ok=True) if data_converter_config.force_map_conversion or not map_path.exists(): map_path.unlink(missing_ok=True) convert_kitti360_map(log_name, map_path) @@ -216,65 +235,32 @@ def convert_kitti360_log_to_arrow( if not log_file_path.parent.exists(): log_file_path.parent.mkdir(parents=True, exist_ok=True) - metadata = LogMetadata( + log_metadata = LogMetadata( dataset="kitti360", + split=split, log_name=log_name, location=log_name, timestep_seconds=KITTI360_DT, + vehicle_parameters=get_kitti360_station_wagon_parameters(), + camera_metadata=get_kitti360_camera_metadata(), + lidar_metadata=get_kitti360_lidar_metadata(), map_has_z=True, + map_is_local=True, ) - vehicle_parameters = get_kitti360_station_wagon_parameters() - camera_metadata = get_kitti360_camera_metadata() - lidar_metadata = get_kitti360_lidar_metadata() - - schema_column_list = [ - ("token", pa.string()), - ("timestamp", pa.int64()), - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ("scenario_tag", pa.list_(pa.string())), - ("route_lane_group_ids", pa.list_(pa.int64())), - ] - if data_converter_config.lidar_store_option is not None: - for lidar_type in lidar_metadata.keys(): - if data_converter_config.lidar_store_option == "path": - schema_column_list.append((lidar_type.serialize(), pa.string())) - elif data_converter_config.lidar_store_option == "binary": - raise NotImplementedError("Binary lidar storage is not implemented.") - - if data_converter_config.camera_store_option is not None: - for camera_type in camera_metadata.keys(): - if data_converter_config.camera_store_option == "path": - schema_column_list.append((camera_type.serialize(), pa.string())) - schema_column_list.append( - (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4)) - ) - elif data_converter_config.camera_store_option == "binary": - raise NotImplementedError("Binary camera storage is not implemented.") - - recording_schema = pa.schema(schema_column_list) - recording_schema = recording_schema.with_metadata( - { - "log_metadata": json.dumps(asdict(metadata)), - "vehicle_parameters": json.dumps(asdict(vehicle_parameters)), - "camera_metadata": camera_metadata_dict_to_json(camera_metadata), - "lidar_metadata": lidar_metadata_dict_to_json(lidar_metadata), - } + log_writer = ArrowLogWriter( + log_path=log_file_path, + data_converter_config=data_converter_config, + log_metadata=log_metadata, ) - _write_recording_table(log_name, recording_schema, log_file_path, data_converter_config) + _write_recording_table(log_name, log_writer, log_file_path, data_converter_config) gc.collect() return [] -def get_kitti360_camera_metadata() -> Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]]: +def get_kitti360_camera_metadata() -> Dict[Union[PinholeCameraType, FisheyeMEICameraType], Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]]: persp = PATH_CALIB_ROOT / "perspective.txt" @@ -300,24 +286,40 @@ def get_kitti360_camera_metadata() -> Dict[CameraType, Union[PinholeCameraMetada fisheye03 = _readYAMLFile(fisheye_camera03_path) fisheye_result = {"image_02": fisheye02, "image_03": fisheye03} - log_cam_infos: Dict[str, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = {} + log_cam_infos: Dict[Union[PinholeCameraType, FisheyeMEICameraType], Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = {} for cam_type, cam_name in KITTI360_CAMERA_TYPES.items(): if cam_name in ["image_00", "image_01"]: log_cam_infos[cam_type] = PinholeCameraMetadata( camera_type=cam_type, width=persp_result[cam_name]["wh"][0], height=persp_result[cam_name]["wh"][1], - intrinsic=np.array(persp_result[cam_name]["intrinsic"]), - distortion=np.array(persp_result[cam_name]["distortion"]), + intrinsics=PinholeIntrinsics.from_camera_matrix(np.array(persp_result[cam_name]["intrinsic"])), + distortion=PinholeDistortion.from_array(np.array(persp_result[cam_name]["distortion"])), ) elif cam_name in ["image_02","image_03"]: + distortion_params = fisheye_result[cam_name]["distortion_parameters"] + distortion = FisheyeMEIDistortion( + k1=distortion_params['k1'], + k2=distortion_params['k2'], + p1=distortion_params['p1'], + p2=distortion_params['p2'], + ) + + projection_params = fisheye_result[cam_name]["projection_parameters"] + projection = FisheyeMEIProjection( + gamma1=projection_params['gamma1'], + gamma2=projection_params['gamma2'], + u0=projection_params['u0'], + v0=projection_params['v0'], + ) + log_cam_infos[cam_type] = FisheyeMEICameraMetadata( camera_type=cam_type, width=fisheye_result[cam_name]["image_width"], height=fisheye_result[cam_name]["image_height"], - mirror_parameters=fisheye_result[cam_name]["mirror_parameters"], - distortion=np.array(fisheye_result[cam_name]["distortion_parameters"]), - projection_parameters= np.array(fisheye_result[cam_name]["projection_parameters"]), + mirror_parameter=fisheye_result[cam_name]["mirror_parameters"], + distortion=distortion, + projection=projection, ) return log_cam_infos @@ -347,65 +349,45 @@ def _readYAMLFile(fileName:Path) -> Dict[str, Any]: def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: metadata: Dict[LiDARType, LiDARMetadata] = {} extrinsic = get_lidar_extrinsic() + extrinsic_state_se3 = StateSE3.from_transformation_matrix(extrinsic) metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( lidar_type=LiDARType.LIDAR_TOP, lidar_index=Kitti360LidarIndex, - extrinsic=extrinsic, + extrinsic=extrinsic_state_se3, ) return metadata def _write_recording_table( log_name: str, - recording_schema: pa.Schema, + log_writer: ArrowLogWriter, log_file_path: Path, data_converter_config: DataConverterConfig ) -> None: ts_list: List[TimePoint] = _read_timestamps(log_name) ego_state_all, valid_timestamp = _extract_ego_state_all(log_name) - ego_states_xyz = np.array([ego_state[:3] for ego_state in ego_state_all],dtype=np.float64) - detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list),ego_states_xyz,valid_timestamp) - - with pa.OSFile(str(log_file_path), "wb") as sink: - with pa.ipc.new_file(sink, recording_schema) as writer: - for idx in range(len(valid_timestamp)): - valid_idx = valid_timestamp[idx] - row_data = { - "token": [create_token(f"{log_name}_{idx}")], - "timestamp": [ts_list[valid_idx].time_us], - "detections_state": [detections_states[valid_idx]], - "detections_velocity": [detections_velocity[valid_idx]], - "detections_token": [detections_tokens[valid_idx]], - "detections_type": [detections_types[valid_idx]], - "ego_states": [ego_state_all[idx]], - "traffic_light_ids": [[]], - "traffic_light_types": [[]], - "scenario_tag": [['unknown']], - "route_lane_group_ids": [[]], - } - - if data_converter_config.lidar_store_option is not None: - lidar_data_dict = _extract_lidar(log_name, valid_idx, data_converter_config) - for lidar_type, lidar_data in lidar_data_dict.items(): - if lidar_data is not None: - row_data[lidar_type.serialize()] = [lidar_data] - else: - row_data[lidar_type.serialize()] = [None] - - if data_converter_config.camera_store_option is not None: - camera_data_dict = _extract_cameras(log_name, valid_idx, data_converter_config) - for camera_type, camera_data in camera_data_dict.items(): - if camera_data is not None: - row_data[camera_type.serialize()] = [camera_data[0]] - row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] - else: - row_data[camera_type.serialize()] = [None] - row_data[f"{camera_type.serialize()}_extrinsic"] = [None] - - batch = pa.record_batch(row_data, schema=recording_schema) - writer.write_batch(batch) - - del batch + ego_states_xyz = np.array([ego_state.center.array[:3] for ego_state in ego_state_all],dtype=np.float64) + box_detection_wrapper_all = _extract_detections(log_name,len(ts_list),ego_states_xyz,valid_timestamp) + logging.info(f"Number of valid timestamps with ego states: {len(valid_timestamp)}") + for idx in range(len(valid_timestamp)): + valid_idx = valid_timestamp[idx] + + cameras = _extract_cameras(log_name, valid_idx, data_converter_config) + lidars = _extract_lidar(log_name, valid_idx, data_converter_config) + + log_writer.add_row( + token=create_token(f"{log_name}_{idx}"), + timestamp=ts_list[valid_idx], + ego_state=ego_state_all[idx], + box_detections=box_detection_wrapper_all[valid_idx], + traffic_lights=None, + cameras=cameras, + lidars=lidars, + scenario_tags=None, + route_lane_group_ids=None, + ) + + log_writer.close() if SORT_BY_TIMESTAMP: recording_table = open_arrow_table(log_file_path) @@ -449,7 +431,7 @@ def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: return tps return None -def _extract_ego_state_all(log_name: str) -> Tuple[List[List[float]], List[int]]: +def _extract_ego_state_all(log_name: str) -> Tuple[List[EgoStateSE3], List[int]]: ego_state_all: List[List[float]] = [] @@ -518,7 +500,7 @@ def _extract_ego_state_all(log_name: str) -> Tuple[List[List[float]], List[int]] dynamic_state_se3=dynamic_state, vehicle_parameters=vehicle_parameters, timepoint=None, - ).array.tolist() + ) ) return ego_state_all, valid_timestamp @@ -527,7 +509,7 @@ def _extract_detections( ts_len: int, ego_states_xyz: np.ndarray, valid_timestamp: List[int], -) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]: +) -> List[BoxDetectionWrapper]: detections_states: List[List[List[float]]] = [[] for _ in range(ts_len)] detections_velocity: List[List[List[float]]] = [[] for _ in range(ts_len)] @@ -549,6 +531,7 @@ def _extract_detections( with open(detection_preprocess_path, "rb") as f: detection_preprocess_result = pickle.load(f) static_records_dict = {record_item["global_id"]: record_item for record_item in detection_preprocess_result["static"]} + logging.info(f"Loaded detection preprocess data from {detection_preprocess_path}") else: detection_preprocess_result = None @@ -575,9 +558,9 @@ def _extract_detections( for record in obj.valid_frames["records"]: frame = record["timestamp"] detections_states[frame].append(obj.get_state_array()) - detections_velocity[frame].append([0.0, 0.0, 0.0]) + detections_velocity[frame].append(np.array([0.0, 0.0, 0.0])) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.name])) + detections_types[frame].append(KIITI360_DETECTION_NAME_DICT[obj.name]) else: global_ID = obj.globalID dynamic_objs[global_ID].append(obj) @@ -614,9 +597,35 @@ def _extract_detections( detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append(vel) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.name])) - - return detections_states, detections_velocity, detections_tokens, detections_types + detections_types[frame].append(KIITI360_DETECTION_NAME_DICT[obj.name]) + + box_detection_wrapper_all: List[BoxDetectionWrapper] = [] + for frame in range(ts_len): + box_detections: List[BoxDetectionSE3] = [] + for state, velocity, token, detection_type in zip( + detections_states[frame], + detections_velocity[frame], + detections_tokens[frame], + detections_types[frame], + ): + if state is None: + break + detection_metadata = BoxDetectionMetadata( + detection_type=detection_type, + timepoint=None, + track_token=token, + confidence=None, + ) + bounding_box_se3 = BoundingBoxSE3.from_array(state) + velocity_vector = Vector3D.from_array(velocity) + box_detection = BoxDetectionSE3( + metadata=detection_metadata, + bounding_box_se3=bounding_box_se3, + velocity=velocity_vector, + ) + box_detections.append(box_detection) + box_detection_wrapper_all.append(BoxDetectionWrapper(box_detections=box_detections)) + return box_detection_wrapper_all def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: @@ -637,9 +646,9 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverter def _extract_cameras( log_name: str, idx: int, data_converter_config: DataConverterConfig -) -> Dict[CameraType, Optional[str]]: +) -> Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]]: - camera_dict: Dict[str, Union[str, bytes]] = {} + camera_dict: Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]] = {} for camera_type, cam_dir_name in KITTI360_CAMERA_TYPES.items(): if cam_dir_name in ["image_00", "image_01"]: img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rect" / f"{idx:010d}.png" @@ -663,11 +672,13 @@ def _extract_cameras( if img_path_png.exists(): if data_converter_config.camera_store_option == "path": - camera_data = str(img_path_png), cam2pose.flatten().tolist() + camera_data = str(img_path_png) elif data_converter_config.camera_store_option == "binary": with open(img_path_png, "rb") as f: - camera_data = f.read(), cam2pose + camera_data = f.read() else: - camera_data = None, cam2pose.flatten().tolist() - camera_dict[camera_type] = camera_data + camera_data = None + + camera_extrinsic = StateSE3.from_transformation_matrix(cam2pose) + camera_dict[camera_type] = camera_data, camera_extrinsic return camera_dict diff --git a/d123/datasets/kitti_360/load_sensor.py b/d123/datasets/kitti_360/load_sensor.py index 7ca4489a..fa206526 100644 --- a/d123/datasets/kitti_360/load_sensor.py +++ b/d123/datasets/kitti_360/load_sensor.py @@ -3,7 +3,7 @@ import numpy as np import logging -from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata +from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: @@ -20,7 +20,7 @@ def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) ones = np.ones((xyz.shape[0], 1), dtype=pcd.dtype) points_h = np.concatenate([xyz, ones], axis=1) #[N,4] - transformed_h = lidar_metadata.extrinsic @ points_h.T #[4,N] + transformed_h = lidar_metadata.extrinsic.transformation_matrix @ points_h.T #[4,N] transformed_xyz = transformed_h[:3, :] # (3,N) diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py index 93da2a8e..7beb3f73 100644 --- a/d123/datasets/nuplan/nuplan_data_converter.py +++ b/d123/datasets/nuplan/nuplan_data_converter.py @@ -234,15 +234,18 @@ def convert_nuplan_log_to_arrow( return [] -def get_nuplan_camera_metadata(log_path: Path) -> Dict[CameraType, CameraMetadata]: +def get_nuplan_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - def _get_camera_metadata(camera_type: CameraType) -> CameraMetadata: + def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadata: cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0] - intrinsic = np.array(pickle.loads(cam.intrinsic)) - rotation = np.array(pickle.loads(cam.rotation)) - rotation = Quaternion(rotation).rotation_matrix - distortion = np.array(pickle.loads(cam.distortion)) - return CameraMetadata( + + intrinsics_camera_matrix = np.array(pickle.loads(cam.intrinsic), dtype=np.float64) # array of shape (3, 3) + intrinsic = PinholeIntrinsics.from_camera_matrix(intrinsics_camera_matrix) + + distortion_array = np.array(pickle.loads(cam.distortion), dtype=np.float64) # array of shape (5,) + distortion = PinholeDistortion.from_array(distortion_array, copy=False) + + return PinholeCameraMetadata( camera_type=camera_type, width=cam.width, height=cam.height, diff --git a/d123/datasets/utils/arrow_ipc_writer.py b/d123/datasets/utils/arrow_ipc_writer.py index fa0ed439..4e2f491f 100644 --- a/d123/datasets/utils/arrow_ipc_writer.py +++ b/d123/datasets/utils/arrow_ipc_writer.py @@ -8,6 +8,7 @@ from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from d123.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraType from d123.datatypes.sensors.lidar.lidar import LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import EgoStateSE3, EgoStateSE3Index @@ -119,7 +120,7 @@ def add_row( ego_state: Optional[EgoStateSE3] = None, box_detections: Optional[BoxDetectionWrapper] = None, traffic_lights: Optional[TrafficLightDetectionWrapper] = None, - cameras: Optional[Dict[PinholeCameraType, Tuple[Any, ...]]] = None, + cameras: Optional[Dict[Union[PinholeCameraType, FisheyeMEICameraType], Tuple[Any, ...]]] = None, lidars: Optional[Dict[LiDARType, Any]] = None, scenario_tags: Optional[List[str]] = None, route_lane_group_ids: Optional[List[int]] = None, diff --git a/d123/datatypes/scene/abstract_scene.py b/d123/datatypes/scene/abstract_scene.py index c9fbc1af..8e835afb 100644 --- a/d123/datatypes/scene/abstract_scene.py +++ b/d123/datatypes/scene/abstract_scene.py @@ -1,12 +1,13 @@ from __future__ import annotations import abc -from typing import List, Optional +from typing import List, Optional, Union from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper from d123.datatypes.maps.abstract_map import AbstractMap from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from d123.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -56,7 +57,7 @@ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: raise NotImplementedError @abc.abstractmethod - def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType) -> Optional[PinholeCamera]: + def get_camera_at_iteration(self, iteration: int, camera_type: Union[PinholeCameraType, FisheyeMEICameraType]) -> Optional[Union[PinholeCamera, FisheyeMEICamera]]: raise NotImplementedError @abc.abstractmethod @@ -81,7 +82,7 @@ def vehicle_parameters(self) -> VehicleParameters: return self.log_metadata.vehicle_parameters @property - def available_camera_types(self) -> List[PinholeCameraType]: + def available_camera_types(self) -> List[Union[PinholeCameraType, FisheyeMEICameraType]]: return list(self.log_metadata.camera_metadata.keys()) @property diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py index 8aa595db..86904b6c 100644 --- a/d123/datatypes/scene/arrow/arrow_scene.py +++ b/d123/datatypes/scene/arrow/arrow_scene.py @@ -19,6 +19,7 @@ from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from d123.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -124,8 +125,8 @@ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: route_lane_group_ids = table["route_lane_group_ids"][self._get_table_index(iteration)].as_py() return route_lane_group_ids - def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType) -> Optional[PinholeCamera]: - camera: Optional[PinholeCamera] = None + def get_camera_at_iteration(self, iteration: int, camera_type: Union[PinholeCameraType, FisheyeMEICameraType]) -> Optional[Union[PinholeCamera, FisheyeMEICamera]]: + camera: Optional[Union[PinholeCamera, FisheyeMEICamera]] = None if camera_type in self.available_camera_types: camera = get_camera_from_arrow_table( self._get_recording_table(), diff --git a/d123/datatypes/scene/arrow/utils/arrow_getters.py b/d123/datatypes/scene/arrow/utils/arrow_getters.py index 578560b9..92a68f51 100644 --- a/d123/datatypes/scene/arrow/utils/arrow_getters.py +++ b/d123/datatypes/scene/arrow/utils/arrow_getters.py @@ -2,7 +2,7 @@ import os from pathlib import Path -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Union import cv2 import numpy as np @@ -21,6 +21,7 @@ from d123.datatypes.detections.detection_types import DetectionType from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from d123.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -95,9 +96,9 @@ def get_traffic_light_detections_from_arrow_table(arrow_table: pa.Table, index: def get_camera_from_arrow_table( arrow_table: pa.Table, index: int, - camera_type: PinholeCameraType, + camera_type: Union[PinholeCameraType, FisheyeMEICameraType], log_metadata: LogMetadata, -) -> PinholeCamera: +) -> Union[PinholeCamera, FisheyeMEICamera]: camera_name = camera_type.serialize() table_data = arrow_table[f"{camera_name}_data"][index].as_py() @@ -121,11 +122,19 @@ def get_camera_from_arrow_table( else: raise NotImplementedError("Only string file paths for camera data are supported.") - return PinholeCamera( - metadata=log_metadata.camera_metadata[camera_type], - image=image, - extrinsic=extrinsic, - ) + camera_metadata = log_metadata.camera_metadata[camera_type] + if hasattr(camera_metadata, 'mirror_parameter') and camera_metadata.mirror_parameter is not None: + return FisheyeMEICamera( + metadata=camera_metadata, + image=image, + extrinsic=extrinsic, + ) + else: + return PinholeCamera( + metadata=camera_metadata, + image=image, + extrinsic=extrinsic, + ) def get_lidar_from_arrow_table( diff --git a/d123/datatypes/scene/scene_filter.py b/d123/datatypes/scene/scene_filter.py index c05073db..3bbe340f 100644 --- a/d123/datatypes/scene/scene_filter.py +++ b/d123/datatypes/scene/scene_filter.py @@ -1,7 +1,9 @@ from dataclasses import dataclass -from typing import List, Optional +from typing import List, Optional, Union from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from d123.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraType +from d123.datatypes.sensors.camera.utils import get_camera_type_by_value, deserialize_camera_type # TODO: Add more filter options (e.g. scene tags, ego movement, or whatever appropriate) @@ -23,7 +25,7 @@ class SceneFilter: duration_s: Optional[float] = 10.0 history_s: Optional[float] = 3.0 - camera_types: Optional[List[PinholeCameraType]] = None + camera_types: Optional[List[Union[PinholeCameraType, FisheyeMEICameraType]]] = None max_num_scenes: Optional[int] = None shuffle: bool = False @@ -34,10 +36,12 @@ def __post_init__(self): camera_types = [] for camera_type in self.camera_types: if isinstance(camera_type, str): - camera_type = PinholeCameraType.deserialize[camera_type] + camera_type = deserialize_camera_type(camera_type) camera_types.append(camera_type) elif isinstance(camera_type, int): - camera_type = PinholeCameraType(camera_type) + camera_type = get_camera_type_by_value(camera_type) + camera_types.append(camera_type) + elif isinstance(camera_type, (PinholeCameraType, FisheyeMEICameraType)): camera_types.append(camera_type) else: raise ValueError(f"Invalid camera type: {camera_type}") diff --git a/d123/datatypes/scene/scene_metadata.py b/d123/datatypes/scene/scene_metadata.py index ae40aa4d..0c243de4 100644 --- a/d123/datatypes/scene/scene_metadata.py +++ b/d123/datatypes/scene/scene_metadata.py @@ -1,10 +1,11 @@ from __future__ import annotations from dataclasses import asdict, dataclass -from typing import Dict +from typing import Dict, Union import d123 from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraMetadata, PinholeCameraType +from d123.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraMetadata, FisheyeMEICameraType from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters @@ -19,7 +20,7 @@ class LogMetadata: timestep_seconds: float vehicle_parameters: VehicleParameters - camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] + camera_metadata: Union[Dict[PinholeCameraType, PinholeCameraMetadata], Dict[FisheyeMEICameraType, FisheyeMEICameraMetadata]] lidar_metadata: Dict[LiDARType, LiDARMetadata] map_has_z: bool @@ -30,10 +31,15 @@ class LogMetadata: def from_dict(cls, data_dict: Dict) -> LogMetadata: data_dict["vehicle_parameters"] = VehicleParameters(**data_dict["vehicle_parameters"]) - data_dict["camera_metadata"] = { - PinholeCameraType.deserialize(key): PinholeCameraMetadata.from_dict(value) - for key, value in data_dict.get("camera_metadata", {}).items() - } + camera_metadata = {} + for key, value in data_dict.get("camera_metadata", {}).items(): + if value.get("mirror_parameter") is not None: + camera_type = FisheyeMEICameraType.deserialize(key) + camera_metadata[camera_type] = FisheyeMEICameraMetadata.from_dict(value) + else: + camera_type = PinholeCameraType.deserialize(key) + camera_metadata[camera_type] = PinholeCameraMetadata.from_dict(value) + data_dict["camera_metadata"] = camera_metadata data_dict["lidar_metadata"] = { LiDARType.deserialize(key): LiDARMetadata.from_dict(value) for key, value in data_dict.get("lidar_metadata", {}).items() diff --git a/d123/datatypes/sensors/camera.py b/d123/datatypes/sensors/camera.py deleted file mode 100644 index a9cc209e..00000000 --- a/d123/datatypes/sensors/camera.py +++ /dev/null @@ -1,189 +0,0 @@ -from __future__ import annotations - -import json -from dataclasses import dataclass -from typing import Any, Dict, Union -from abc import ABC, abstractmethod - -import numpy as np -import numpy.typing as npt - -from d123.common.utils.enums import SerialIntEnum - - -class CameraType(SerialIntEnum): - """ - Enum for cameras in d123. - """ - - CAM_F0 = 0 - CAM_B0 = 1 - CAM_L0 = 2 - CAM_L1 = 3 - CAM_L2 = 4 - CAM_R0 = 5 - CAM_R1 = 6 - CAM_R2 = 7 - CAM_STEREO_L = 8 - CAM_STEREO_R = 9 - -@dataclass -class CameraMetadata(ABC): - camera_type: CameraType - width: int - height: int - - @abstractmethod - def to_dict(self) -> Dict[str, Any]: - ... - - @classmethod - @abstractmethod - def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata: - ... - -@dataclass -class PinholeCameraMetadata(CameraMetadata): - - intrinsic: npt.NDArray[np.float64] # 3x3 matrix # TODO: don't store matrix but values. - distortion: npt.NDArray[np.float64] # 5x1 vector # TODO: don't store matrix but values. - - def to_dict(self) -> Dict[str, Any]: - # TODO: remove None types. Only a placeholder for now. - return { - "camera_type": int(self.camera_type), - "width": self.width, - "height": self.height, - "intrinsic": self.intrinsic.tolist() if self.intrinsic is not None else None, - "distortion": self.distortion.tolist() if self.distortion is not None else None, - } - - @classmethod - def from_dict(cls, json_dict: Dict[str, Any]) -> PinholeCameraMetadata: - # TODO: remove None types. Only a placeholder for now. - return cls( - camera_type=CameraType(json_dict["camera_type"]), - width=json_dict["width"], - height=json_dict["height"], - intrinsic=np.array(json_dict["intrinsic"]) if json_dict["intrinsic"] is not None else None, - distortion=np.array(json_dict["distortion"]) if json_dict["distortion"] is not None else None, - ) - - @property - def aspect_ratio(self) -> float: - return self.width / self.height - - @property - def fov_x(self) -> float: - """ - Calculates the horizontal field of view (FOV) in radian. - """ - fx = self.intrinsic[0, 0] - fov_x_rad = 2 * np.arctan(self.width / (2 * fx)) - return fov_x_rad - - @property - def fov_y(self) -> float: - """ - Calculates the vertical field of view (FOV) in radian. - """ - fy = self.intrinsic[1, 1] - fov_y_rad = 2 * np.arctan(self.height / (2 * fy)) - return fov_y_rad - - -@dataclass -class FisheyeMEICameraMetadata(CameraMetadata): - - mirror_parameters: float - distortion: npt.NDArray[np.float64] # k1,k2,p1,p2 - projection_parameters: npt.NDArray[np.float64] #gamma1,gamma2,u0,v0 - - def to_dict(self) -> Dict[str, Any]: - # TODO: remove None types. Only a placeholder for now. - return { - "camera_type": int(self.camera_type), - "width": self.width, - "height": self.height, - "mirror_parameters": self.mirror_parameters, - "distortion": self.distortion.tolist() if self.distortion is not None else None, - "projection_parameters": self.projection_parameters.tolist() if self.projection_parameters is not None else None, - } - - @classmethod - def from_dict(cls, json_dict: Dict[str, Any]) -> FisheyeMEICameraMetadata: - # TODO: remove None types. Only a placeholder for now. - return cls( - camera_type=CameraType(json_dict["camera_type"]), - width=json_dict["width"], - height=json_dict["height"], - mirror_parameters=json_dict["mirror_parameters"], - distortion=np.array(json_dict["distortion"]) if json_dict["distortion"] is not None else None, - projection_parameters=np.array(json_dict["projection_parameters"]) if json_dict["projection_parameters"] is not None else None, - ) - - def cam2image(self, points_3d: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - ''' camera coordinate to image plane ''' - norm = np.linalg.norm(points_3d, axis=1) - - x = points_3d[:,0] / norm - y = points_3d[:,1] / norm - z = points_3d[:,2] / norm - - x /= z+self.mirror_parameters - y /= z+self.mirror_parameters - - k1 = self.distortion[0] - k2 = self.distortion[1] - gamma1 = self.projection_parameters[0] - gamma2 = self.projection_parameters[1] - u0 = self.projection_parameters[2] - v0 = self.projection_parameters[3] - - ro2 = x*x + y*y - x *= 1 + k1*ro2 + k2*ro2*ro2 - y *= 1 + k1*ro2 + k2*ro2*ro2 - - x = gamma1*x + u0 - y = gamma2*y + v0 - - return x, y, norm * points_3d[:,2] / np.abs(points_3d[:,2]) - -def camera_metadata_dict_to_json(camera_metadata: Dict[CameraType, CameraMetadata]) -> Dict[str, Dict[str, Any]]: - """ - Converts a dictionary of CameraMetadata to a JSON-serializable format. - :param camera_metadata: Dictionary of CameraMetadata. - :return: JSON-serializable dictionary. - """ - camera_metadata_dict = { - camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items() - } - return json.dumps(camera_metadata_dict) - - -def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]]: - """ - Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata. - :param json_dict: JSON-serializable dictionary. - :return: Dictionary of CameraMetadata. - """ - camera_metadata_dict = json.loads(json_dict) - out: Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = {} - for camera_type, metadata in camera_metadata_dict.items(): - cam_type = CameraType.deserialize(camera_type) - if isinstance(metadata, dict) and "mirror_parameters" in metadata: - out[cam_type] = FisheyeMEICameraMetadata.from_dict(metadata) - else: - out[cam_type] = PinholeCameraMetadata.from_dict(metadata) - return out - -@dataclass -class Camera: - - metadata: PinholeCameraMetadata - image: npt.NDArray[np.uint8] - extrinsic: npt.NDArray[np.float64] # 4x4 matrix - - def get_view_matrix(self) -> np.ndarray: - # Compute the view matrix based on the camera's position and orientation - pass diff --git a/d123/datatypes/sensors/camera/fisheye_mei_camera.py b/d123/datatypes/sensors/camera/fisheye_mei_camera.py new file mode 100644 index 00000000..061ef6b1 --- /dev/null +++ b/d123/datatypes/sensors/camera/fisheye_mei_camera.py @@ -0,0 +1,189 @@ +from __future__ import annotations + +from dataclasses import asdict, dataclass +from typing import Any, Dict, Optional + +import numpy as np +import numpy.typing as npt +from zmq import IntEnum + +from d123.common.utils.enums import SerialIntEnum +from d123.common.utils.mixin import ArrayMixin +from d123.geometry.se import StateSE3 + +#option TODO merge FisheyeMEICameraType and PinholeCameraType +class FisheyeMEICameraType(SerialIntEnum): + """ + Enum for fisheye cameras in d123. + """ + #NOTE Use higher values to avoid conflicts with PinholeCameraType + CAM_L = 10 + CAM_R = 11 + + +@dataclass +class FisheyeMEICamera: + + metadata: FisheyeMEICameraMetadata + image: npt.NDArray[np.uint8] + extrinsic: StateSE3 + + +class FisheyeMEIDistortionIndex(IntEnum): + + K1 = 0 + K2 = 1 + P1 = 2 + P2 = 3 + + +class FisheyeMEIDistortion(ArrayMixin): + _array: npt.NDArray[np.float64] + + def __init__(self, k1: float, k2: float, p1: float, p2: float) -> None: + array = np.zeros(len(FisheyeMEIDistortionIndex), dtype=np.float64) + array[FisheyeMEIDistortionIndex.K1] = k1 + array[FisheyeMEIDistortionIndex.K2] = k2 + array[FisheyeMEIDistortionIndex.P1] = p1 + array[FisheyeMEIDistortionIndex.P2] = p2 + object.__setattr__(self, "_array", array) + + @classmethod + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> FisheyeMEIDistortion: + assert array.ndim == 1 + assert array.shape[-1] == len(FisheyeMEIDistortionIndex) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance + + @property + def array(self) -> npt.NDArray[np.float64]: + return self._array + + @property + def k1(self) -> float: + return self._array[FisheyeMEIDistortionIndex.K1] + + @property + def k2(self) -> float: + return self._array[FisheyeMEIDistortionIndex.K2] + + @property + def p1(self) -> float: + return self._array[FisheyeMEIDistortionIndex.P1] + + @property + def p2(self) -> float: + return self._array[FisheyeMEIDistortionIndex.P2] + + +class FisheyeMEIProjectionIndex(IntEnum): + + GAMMA1 = 0 + GAMMA2 = 1 + U0 = 2 + V0 = 3 + + +class FisheyeMEIProjection(ArrayMixin): + _array: npt.NDArray[np.float64] + + def __init__(self, gamma1: float, gamma2: float, u0: float, v0: float) -> None: + array = np.zeros(len(FisheyeMEIProjectionIndex), dtype=np.float64) + array[FisheyeMEIProjectionIndex.GAMMA1] = gamma1 + array[FisheyeMEIProjectionIndex.GAMMA2] = gamma2 + array[FisheyeMEIProjectionIndex.U0] = u0 + array[FisheyeMEIProjectionIndex.V0] = v0 + object.__setattr__(self, "_array", array) + + @classmethod + def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> FisheyeMEIProjection: + assert array.ndim == 1 + assert array.shape[-1] == len(FisheyeMEIProjectionIndex) + instance = object.__new__(cls) + object.__setattr__(instance, "_array", array.copy() if copy else array) + return instance + + @property + def array(self) -> npt.NDArray[np.float64]: + return self._array + + @property + def gamma1(self) -> float: + return self._array[FisheyeMEIProjectionIndex.GAMMA1] + + @property + def gamma2(self) -> float: + return self._array[FisheyeMEIProjectionIndex.GAMMA2] + + @property + def u0(self) -> float: + return self._array[FisheyeMEIProjectionIndex.U0] + + @property + def v0(self) -> float: + return self._array[FisheyeMEIProjectionIndex.V0] + + +@dataclass +class FisheyeMEICameraMetadata: + + camera_type: FisheyeMEICameraType + mirror_parameter: Optional[float] + distortion: Optional[FisheyeMEIDistortion] + projection: Optional[FisheyeMEIProjection] + width: int + height: int + + @classmethod + def from_dict(cls, data_dict: Dict[str, Any]) -> FisheyeMEICameraMetadata: + data_dict["camera_type"] = FisheyeMEICameraType(data_dict["camera_type"]) + data_dict["distortion"] = ( + FisheyeMEIDistortion.from_array(np.array(data_dict["distortion"])) if data_dict["distortion"] is not None else None + ) + data_dict["projection"] = ( + FisheyeMEIProjection.from_array(np.array(data_dict["projection"])) if data_dict["projection"] is not None else None + ) + return FisheyeMEICameraMetadata(**data_dict) + + def to_dict(self) -> Dict[str, Any]: + data_dict = asdict(self) + data_dict["camera_type"] = int(self.camera_type) + data_dict["distortion"] = self.distortion.array.tolist() if self.distortion is not None else None + data_dict["projection"] = self.projection.array.tolist() if self.projection is not None else None + return data_dict + + def cam2image(self, points_3d: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + ''' camera coordinate to image plane ''' + norm = np.linalg.norm(points_3d, axis=1) + + x = points_3d[:,0] / norm + y = points_3d[:,1] / norm + z = points_3d[:,2] / norm + + x /= z+self.mirror_parameter + y /= z+self.mirror_parameter + + if self.distortion is not None: + k1 = self.distortion.k1 + k2 = self.distortion.k2 + else: + k1 = k2 = 0.0 + + if self.projection is not None: + gamma1 = self.projection.gamma1 + gamma2 = self.projection.gamma2 + u0 = self.projection.u0 + v0 = self.projection.v0 + else: + gamma1 = gamma2 = 1.0 + u0 = v0 = 0.0 + + ro2 = x*x + y*y + x *= 1 + k1*ro2 + k2*ro2*ro2 + y *= 1 + k1*ro2 + k2*ro2*ro2 + + x = gamma1*x + u0 + y = gamma2*y + v0 + + return x, y, norm * points_3d[:,2] / np.abs(points_3d[:,2]) diff --git a/d123/datatypes/sensors/camera/utils.py b/d123/datatypes/sensors/camera/utils.py new file mode 100644 index 00000000..9e281e55 --- /dev/null +++ b/d123/datatypes/sensors/camera/utils.py @@ -0,0 +1,36 @@ +from typing import Union + +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from d123.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraType + +def get_camera_type_by_value(value: int) -> Union[PinholeCameraType, FisheyeMEICameraType]: + """Dynamically determine camera type based on value range.""" + pinhole_values = [member.value for member in PinholeCameraType] + fisheye_values = [member.value for member in FisheyeMEICameraType] + + if value in pinhole_values: + return PinholeCameraType(value) + elif value in fisheye_values: + return FisheyeMEICameraType(value) + else: + raise ValueError(f"Invalid camera type value: {value}. " + f"Valid PinholeCameraType values: {pinhole_values}, " + f"Valid FisheyeMEICameraType values: {fisheye_values}") + +def deserialize_camera_type(camera_str: str) -> Union[PinholeCameraType, FisheyeMEICameraType]: + """Deserialize camera type string to appropriate enum.""" + try: + return PinholeCameraType.deserialize(camera_str) + except (ValueError, KeyError): + pass + + try: + return FisheyeMEICameraType.deserialize(camera_str) + except (ValueError, KeyError): + pass + + pinhole_names = [member.name.lower() for member in PinholeCameraType] + fisheye_names = [member.name.lower() for member in FisheyeMEICameraType] + raise ValueError(f"Unknown camera type: '{camera_str}'. " + f"Valid PinholeCameraType names: {pinhole_names}, " + f"Valid FisheyeMEICameraType names: {fisheye_names}") \ No newline at end of file diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml index 0b4b55f1..19bdd168 100644 --- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml +++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml @@ -14,13 +14,10 @@ defaults: - datasets: # - nuplan_private_dataset # - carla_dataset - - wopd_dataset + # - wopd_dataset # - av2_sensor_dataset -<<<<<<< HEAD - kitti360_dataset -======= - _self_ ->>>>>>> dev_v0.0.7 force_map_conversion: False force_log_conversion: True diff --git a/d123/script/config/datasets/kitti360_dataset.yaml b/d123/script/config/datasets/kitti360_dataset.yaml index c5816a29..be7567bf 100644 --- a/d123/script/config/datasets/kitti360_dataset.yaml +++ b/d123/script/config/datasets/kitti360_dataset.yaml @@ -12,5 +12,24 @@ kitti360_dataset: output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} + + # Ego + include_ego: true + + # Box Detections + include_box_detections: true + + # Traffic Lights + include_traffic_lights: false + + # Cameras + include_cameras: true camera_store_option: "path" + + # LiDARs + include_lidars: true lidar_store_option: "path" + + # Scenario tag / Route + include_scenario_tags: false + include_route: false diff --git a/d123/script/run_viser.py b/d123/script/run_viser.py index ed05d73f..a6d9c526 100644 --- a/d123/script/run_viser.py +++ b/d123/script/run_viser.py @@ -21,13 +21,8 @@ def main(cfg: DictConfig) -> None: scene_filter = build_scene_filter(cfg.scene_filter) scene_builder = build_scene_builder(cfg.scene_builder) scenes = scene_builder.get_scenes(scene_filter, worker=worker) -<<<<<<< HEAD - - ViserVisualizationServer(scenes=scenes) -======= ViserViewer(scenes=scenes) ->>>>>>> dev_v0.0.7 if __name__ == "__main__": From 107a803d1f8462f5c800d30045fcbd7f0d182b5f Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 14 Oct 2025 21:28:24 +0200 Subject: [PATCH 079/145] Update map for nuPlan and AV2 with map metadata (#51) --- .../visualization/matplotlib/observation.py | 4 +- .../datasets/av2/av2_map_conversion copy.py | 525 ---------------- .../datasets/av2/av2_sensor_converter.py | 45 +- .../datasets/carla/carla_data_converter.py | 20 +- .../datasets/nuplan/nuplan_converter.py | 34 +- .../nuplan/nuplan_map_conversion copy.py | 463 -------------- .../datasets/nuplan/nuplan_map_conversion.py | 6 +- .../opendrive/opendrive_map_conversion.py | 4 +- d123/datatypes/maps/abstract_map.py | 4 +- .../scene/arrow/arrow_scene_builder.py | 8 +- d123/datatypes/scene/scene_filter.py | 2 +- .../common/scene_filter/all_scenes.yaml | 2 +- .../common/scene_filter/log_scenes.yaml | 2 +- .../scene_filter/nuplan_mini_train.yaml | 2 +- .../common/scene_filter/nuplan_mini_val.yaml | 2 +- .../common/scene_filter/nuplan_sim_agent.yaml | 2 +- .../common/scene_filter/viser_scenes.yaml | 2 +- .../datasets/av2_sensor_dataset.yaml | 2 +- .../datasets/nuplan_private_dataset.yaml | 41 -- .../conversion/datasets/wopd_dataset.yaml | 2 +- .../config/conversion/default_conversion.yaml | 12 +- notebooks/deprecated/arrow.ipynb | 178 ------ notebooks/deprecated/extraction_testing.ipynb | 166 ----- .../test_nuplan_conversion.ipynb | 71 --- .../test_opendrive_conversion.ipynb | 191 ------ .../deprecated/nuplan_map_dataframe.ipynb | 570 ------------------ notebooks/deprecated/scene_rendering.ipynb | 115 ---- .../test_intesection_polygons.ipynb | 293 --------- notebooks/deprecated/test_scene_builder.ipynb | 258 -------- notebooks/deprecated/test_waypoints.ipynb | 167 ----- notebooks/viz/bev_matplotlib.ipynb | 25 +- 31 files changed, 95 insertions(+), 3123 deletions(-) delete mode 100644 d123/conversion/datasets/av2/av2_map_conversion copy.py delete mode 100644 d123/conversion/datasets/nuplan/nuplan_map_conversion copy.py delete mode 100644 d123/script/config/conversion/datasets/nuplan_private_dataset.yaml delete mode 100644 notebooks/deprecated/arrow.ipynb delete mode 100644 notebooks/deprecated/extraction_testing.ipynb delete mode 100644 notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb delete mode 100644 notebooks/deprecated/map_conversion/test_opendrive_conversion.ipynb delete mode 100644 notebooks/deprecated/nuplan_map_dataframe.ipynb delete mode 100644 notebooks/deprecated/scene_rendering.ipynb delete mode 100644 notebooks/deprecated/test_intesection_polygons.ipynb delete mode 100644 notebooks/deprecated/test_scene_builder.ipynb delete mode 100644 notebooks/deprecated/test_waypoints.ipynb diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py index 0a5d7301..0d6c5ddf 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/d123/common/visualization/matplotlib/observation.py @@ -76,7 +76,7 @@ def add_default_map_on_ax( print(f"Error adding map object of type {layer.name} and id {map_object.object_id}") traceback.print_exc() - # ax.set_title(f"Map: {map_api.map_name}") + ax.set_title(f"Map: {map_api.location}") def add_box_detections_to_ax(ax: plt.Axes, box_detections: BoxDetectionWrapper) -> None: @@ -137,7 +137,7 @@ def add_traffic_lights_to_ax( TRAFFIC_LIGHT_CONFIG[traffic_light_detection.status], ) else: - raise ValueError(f"Lane with id {traffic_light_detection.lane_id} not found in map {map_api.map_name}.") + raise ValueError(f"Lane with id {traffic_light_detection.lane_id} not found in map {map_api.location}.") def add_bounding_box_to_ax( diff --git a/d123/conversion/datasets/av2/av2_map_conversion copy.py b/d123/conversion/datasets/av2/av2_map_conversion copy.py deleted file mode 100644 index f45e6d0a..00000000 --- a/d123/conversion/datasets/av2/av2_map_conversion copy.py +++ /dev/null @@ -1,525 +0,0 @@ -# import json -# import warnings -# from pathlib import Path -# from typing import Any, Dict, Final, List - -# import geopandas as gpd -# import numpy as np -# import numpy.typing as npt -# import pandas as pd -# import shapely -# import shapely.geometry as geom - -# from d123.conversion.datasets.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING -# from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length -# from d123.conversion.utils.map_utils.road_edge.road_edge_3d_utils import ( -# get_road_edges_3d_from_generic_drivable_area_df, -# ) -# from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType -# from d123.geometry import OccupancyMap2D, Point3DIndex, Polyline2D, Polyline3D - -# LANE_GROUP_MARK_TYPES: List[str] = [ -# "DASHED_WHITE", -# "DOUBLE_DASH_WHITE", -# "DASH_SOLID_WHITE", -# "SOLID_DASH_WHITE", -# "SOLID_WHITE", -# ] -# MAX_ROAD_EDGE_LENGTH: Final[float] = 100.0 # TODO: Add to config - - -# def convert_av2_map(source_log_path: Path, map_file_path: Path) -> None: - -# def _extract_polyline(data: List[Dict[str, float]], close: bool = False) -> Polyline3D: -# polyline = np.array([[p["x"], p["y"], p["z"]] for p in data], dtype=np.float64) -# if close: -# polyline = np.vstack([polyline, polyline[0]]) - -# return Polyline3D.from_array(polyline) - -# map_folder = source_log_path / "map" -# log_map_archive_path = next(map_folder.glob("log_map_archive_*.json")) - -# with open(log_map_archive_path, "r") as f: -# log_map_archive = json.load(f) - -# drivable_areas: Dict[int, Polyline3D] = {} - -# for drivable_area_id, drivable_area_dict in log_map_archive["drivable_areas"].items(): -# # keys: ["area_boundary", "id"] -# drivable_areas[int(drivable_area_id)] = _extract_polyline(drivable_area_dict["area_boundary"], close=True) - -# for lane_segment_id, lane_segment_dict in log_map_archive["lane_segments"].items(): -# # keys = [ -# # "id", -# # "is_intersection", -# # "lane_type", -# # "left_lane_boundary", -# # "left_lane_mark_type", -# # "right_lane_boundary", -# # "right_lane_mark_type", -# # "successors", -# # "predecessors", -# # "right_neighbor_id", -# # "left_neighbor_id", -# # ] -# lane_segment_dict["left_lane_boundary"] = _extract_polyline(lane_segment_dict["left_lane_boundary"]) -# lane_segment_dict["right_lane_boundary"] = _extract_polyline(lane_segment_dict["right_lane_boundary"]) - -# for crosswalk_id, crosswalk_dict in log_map_archive["pedestrian_crossings"].items(): -# # keys = ["id", "outline"] -# # https://github.com/argoverse/av2-api/blob/6b22766247eda941cb1953d6a58e8d5631c561da/src/av2/map/pedestrian_crossing.py - -# p1, p2 = np.array([[p["x"], p["y"], p["z"]] for p in crosswalk_dict["edge1"]], dtype=np.float64) -# p3, p4 = np.array([[p["x"], p["y"], p["z"]] for p in crosswalk_dict["edge2"]], dtype=np.float64) -# crosswalk_dict["outline"] = Polyline3D.from_array(np.array([p1, p2, p4, p3, p1], dtype=np.float64)) - -# lane_group_dict = _extract_lane_group_dict(log_map_archive["lane_segments"]) -# intersection_dict = _extract_intersection_dict(log_map_archive["lane_segments"], lane_group_dict) - -# dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} - -# dataframes[MapLayer.LANE] = get_lane_df(log_map_archive["lane_segments"]) -# dataframes[MapLayer.LANE_GROUP] = get_lane_group_df(lane_group_dict) -# dataframes[MapLayer.INTERSECTION] = get_intersections_df(intersection_dict) -# dataframes[MapLayer.CROSSWALK] = get_crosswalk_df(log_map_archive["pedestrian_crossings"]) -# dataframes[MapLayer.GENERIC_DRIVABLE] = get_generic_drivable_df(drivable_areas) -# dataframes[MapLayer.ROAD_EDGE] = get_road_edge_df(dataframes[MapLayer.GENERIC_DRIVABLE]) -# dataframes[MapLayer.ROAD_LINE] = get_road_line_df(log_map_archive["lane_segments"]) -# # NOTE: AV2 does not provide walkways or carparks, so we create an empty DataFrame. -# dataframes[MapLayer.WALKWAY] = get_empty_gdf() -# dataframes[MapLayer.CARPARK] = get_empty_gdf() - -# map_file_path.unlink(missing_ok=True) -# if not map_file_path.parent.exists(): -# map_file_path.parent.mkdir(parents=True, exist_ok=True) - -# with warnings.catch_warnings(): -# warnings.filterwarnings("ignore", message="'crs' was not provided") -# for layer, gdf in dataframes.items(): -# gdf.to_file(map_file_path, layer=layer.serialize(), driver="GPKG", mode="a") - - -# def get_empty_gdf() -> gpd.GeoDataFrame: -# ids = [] -# outlines = [] -# geometries = [] -# data = pd.DataFrame({"id": ids, "outline": outlines}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_lane_df(lanes: Dict[int, Any]) -> gpd.GeoDataFrame: - -# ids = [int(lane_id) for lane_id in lanes.keys()] -# lane_types = [0] * len(ids) # TODO: Add lane types -# lane_group_ids = [] -# speed_limits_mps = [] -# predecessor_ids = [] -# successor_ids = [] -# left_boundaries = [] -# right_boundaries = [] -# left_lane_ids = [] -# right_lane_ids = [] -# baseline_paths = [] -# geometries = [] - -# def _get_centerline_from_boundaries( -# left_boundary: Polyline3D, right_boundary: Polyline3D, resolution: float = 0.1 -# ) -> Polyline3D: - -# points_per_meter = 1 / resolution -# num_points = int(np.ceil(max([right_boundary.length, left_boundary.length]) * points_per_meter)) -# right_array = right_boundary.interpolate(np.linspace(0, right_boundary.length, num_points, endpoint=True)) -# left_array = left_boundary.interpolate(np.linspace(0, left_boundary.length, num_points, endpoint=True)) - -# return Polyline3D.from_array(np.mean([right_array, left_array], axis=0)) - -# for lane_id, lane_dict in lanes.items(): -# # keys = [ -# # "id", -# # "is_intersection", -# # "lane_type", -# # "left_lane_boundary", -# # "left_lane_mark_type", -# # "right_lane_boundary", -# # "right_lane_mark_type", -# # "successors", -# # "predecessors", -# # "right_neighbor_id", -# # "left_neighbor_id", -# # ] -# lane_centerline = _get_centerline_from_boundaries( -# left_boundary=lane_dict["left_lane_boundary"], -# right_boundary=lane_dict["right_lane_boundary"], -# ) -# lane_speed_limit_mps = None # TODO: Consider using geo reference to retrieve speed limits. -# lane_group_ids.append(lane_id) -# speed_limits_mps.append(lane_speed_limit_mps) -# predecessor_ids.append(lane_dict["predecessors"]) -# successor_ids.append(lane_dict["successors"]) -# left_boundaries.append(lane_dict["left_lane_boundary"].linestring) -# right_boundaries.append(lane_dict["right_lane_boundary"].linestring) -# left_lane_ids.append(lane_dict["left_neighbor_id"]) -# right_lane_ids.append(lane_dict["right_neighbor_id"]) -# baseline_paths.append(lane_centerline.linestring) - -# geometry = geom.Polygon( -# np.vstack( -# [ -# lane_dict["left_lane_boundary"].array[:, :2], -# lane_dict["right_lane_boundary"].array[:, :2][::-1], -# ] -# ) -# ) -# geometries.append(geometry) - -# data = pd.DataFrame( -# { -# "id": ids, -# "lane_type": lane_types, -# "lane_group_id": lane_group_ids, -# "speed_limit_mps": speed_limits_mps, -# "predecessor_ids": predecessor_ids, -# "successor_ids": successor_ids, -# "left_boundary": left_boundaries, -# "right_boundary": right_boundaries, -# "left_lane_id": left_lane_ids, -# "right_lane_id": right_lane_ids, -# "baseline_path": baseline_paths, -# } -# ) - -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_lane_group_df(lane_group_dict: Dict[int, Any]) -> gpd.GeoDataFrame: - -# ids = list(lane_group_dict.keys()) -# lane_ids = [] -# intersection_ids = [] -# predecessor_lane_group_ids = [] -# successor_lane_group_ids = [] -# left_boundaries = [] -# right_boundaries = [] -# geometries = [] - -# for lane_group_id, lane_group_values in lane_group_dict.items(): - -# lane_ids.append(lane_group_values["lane_ids"]) -# intersection_ids.append(lane_group_values["intersection_id"]) - -# predecessor_lane_group_ids.append(lane_group_values["predecessor_ids"]) -# successor_lane_group_ids.append(lane_group_values["successor_ids"]) -# left_boundaries.append(lane_group_values["left_boundary"].linestring) -# right_boundaries.append(lane_group_values["right_boundary"].linestring) -# geometry = geom.Polygon( -# np.vstack( -# [ -# lane_group_values["left_boundary"].array[:, :2], -# lane_group_values["right_boundary"].array[:, :2][::-1], -# lane_group_values["left_boundary"].array[0, :2][None, ...], -# ] -# ) -# ) -# geometries.append(geometry) - -# data = pd.DataFrame( -# { -# "id": ids, -# "lane_ids": lane_ids, -# "intersection_id": intersection_ids, -# "predecessor_ids": predecessor_lane_group_ids, -# "successor_ids": successor_lane_group_ids, -# "left_boundary": left_boundaries, -# "right_boundary": right_boundaries, -# } -# ) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_intersections_df(intersection_dict: Dict[int, Any]) -> gpd.GeoDataFrame: -# ids = [] -# lane_group_ids = [] -# outlines = [] -# geometries = [] - -# for intersection_id, intersection_values in intersection_dict.items(): -# ids.append(intersection_id) -# lane_group_ids.append(intersection_values["lane_group_ids"]) -# outlines.append(intersection_values["outline_3d"].linestring) -# geometries.append(geom.Polygon(intersection_values["outline_3d"].array[:, Point3DIndex.XY])) - -# data = pd.DataFrame({"id": ids, "lane_group_ids": lane_group_ids, "outline": outlines}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_carpark_df(carparks) -> gpd.GeoDataFrame: -# ids = list(carparks.keys()) -# outlines = [geom.LineString(outline) for outline in carparks.values()] -# geometries = [geom.Polygon(outline[..., Point3DIndex.XY]) for outline in carparks.values()] - -# data = pd.DataFrame({"id": ids, "outline": outlines}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_walkway_df() -> gpd.GeoDataFrame: -# ids = [] -# geometries = [] - -# # NOTE: WOPD does not provide walkways, so we create an empty DataFrame. -# data = pd.DataFrame({"id": ids}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_crosswalk_df(crosswalks: Dict[int, npt.NDArray[np.float64]]) -> gpd.GeoDataFrame: -# ids = list(crosswalks.keys()) -# outlines = [] -# geometries = [] -# for crosswalk_dict in crosswalks.values(): -# outline = crosswalk_dict["outline"] -# outlines.append(outline.linestring) -# geometries.append(geom.Polygon(outline.array[:, Point3DIndex.XY])) - -# data = pd.DataFrame({"id": ids, "outline": outlines}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_generic_drivable_df(drivable_areas: Dict[int, Polyline3D]) -> gpd.GeoDataFrame: -# ids = list(drivable_areas.keys()) -# outlines = [drivable_area.linestring for drivable_area in drivable_areas.values()] -# geometries = [geom.Polygon(drivable_area.array[:, Point3DIndex.XY]) for drivable_area in drivable_areas.values()] - -# data = pd.DataFrame({"id": ids, "outline": outlines}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_road_edge_df(generic_drivable_area_df: gpd.GeoDataFrame) -> gpd.GeoDataFrame: -# road_edges = get_road_edges_3d_from_generic_drivable_area_df(generic_drivable_area_df) -# road_edges = split_line_geometry_by_max_length(road_edges, MAX_ROAD_EDGE_LENGTH) - -# ids = np.arange(len(road_edges), dtype=np.int64).tolist() -# # TODO @DanielDauner: Figure out if other types should/could be assigned here. -# road_edge_types = [int(RoadEdgeType.ROAD_EDGE_BOUNDARY)] * len(road_edges) -# geometries = road_edges -# return gpd.GeoDataFrame(pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}), geometry=geometries) - - -# def get_road_line_df(lanes: Dict[int, Any]) -> gpd.GeoDataFrame: - -# # TODO @DanielDauner: Allow lanes to reference road line dataframe. - -# ids = [] -# road_lines_type = [] -# geometries = [] - -# running_id = 0 -# for lane in lanes.values(): -# for side in ["left", "right"]: -# # NOTE: We currently ignore lane markings that are NONE in the AV2 dataset. -# # TODO: Review if the road line system should be changed in the future. -# if lane[f"{side}_lane_mark_type"] == "NONE": -# continue - -# ids.append(running_id) -# road_lines_type.append(AV2_ROAD_LINE_TYPE_MAPPING[lane[f"{side}_lane_mark_type"]]) -# geometries.append(lane[f"{side}_lane_boundary"].linestring) -# running_id += 1 - -# data = pd.DataFrame({"id": ids, "road_line_type": road_lines_type}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def _extract_lane_group_dict(lanes: Dict[int, Any]) -> gpd.GeoDataFrame: - -# lane_group_sets = _extract_lane_group(lanes) -# lane_group_set_dict = {i: lane_group for i, lane_group in enumerate(lane_group_sets)} - -# lane_group_dict: Dict[int, Dict[str, Any]] = {} - -# def _get_lane_group_ids_of_lanes_ids(lane_ids: List[str]) -> List[int]: -# """Helper to find lane group ids that contain any of the given lane ids.""" -# lane_group_ids_ = [] -# for lane_group_id_, lane_group_set_ in lane_group_set_dict.items(): -# if any(str(lane_id) in lane_group_set_ for lane_id in lane_ids): -# lane_group_ids_.append(lane_group_id_) -# return list(set(lane_group_ids_)) - -# for lane_group_id, lane_group_set in lane_group_set_dict.items(): - -# lane_group_dict[lane_group_id] = {} -# lane_group_dict[lane_group_id]["id"] = lane_group_id -# lane_group_dict[lane_group_id]["lane_ids"] = [int(lane_id) for lane_id in lane_group_set] - -# successor_lanes = [] -# predecessor_lanes = [] -# for lane_id in lane_group_set: -# lane_dict = lanes[str(lane_id)] -# successor_lanes.extend(lane_dict["successors"]) -# predecessor_lanes.extend(lane_dict["predecessors"]) - -# left_boundary = lanes[lane_group_set[0]]["left_lane_boundary"] -# right_boundary = lanes[lane_group_set[-1]]["right_lane_boundary"] - -# lane_group_dict[lane_group_id]["intersection_id"] = None -# lane_group_dict[lane_group_id]["predecessor_ids"] = _get_lane_group_ids_of_lanes_ids(predecessor_lanes) -# lane_group_dict[lane_group_id]["successor_ids"] = _get_lane_group_ids_of_lanes_ids(successor_lanes) -# lane_group_dict[lane_group_id]["left_boundary"] = left_boundary -# lane_group_dict[lane_group_id]["right_boundary"] = right_boundary -# outline_array = np.vstack( -# [ -# left_boundary.array[:, :3], -# right_boundary.array[:, :3][::-1], -# left_boundary.array[0, :3][None, ...], -# ] -# ) - -# lane_group_dict[lane_group_id]["outline"] = Polyline3D.from_array(outline_array) - -# return lane_group_dict - - -# def _extract_lane_group(lanes) -> List[List[str]]: - -# visited = set() -# lane_groups = [] - -# def _get_valid_neighbor_id(lane_data, direction): -# """Helper function to safely get neighbor ID""" -# neighbor_key = f"{direction}_neighbor_id" -# neighbor_id = str(lane_data.get(neighbor_key)) -# mark_type = lane_data.get(f"{direction}_lane_mark_type", None) - -# if (neighbor_id is not None) and (neighbor_id in lanes) and (mark_type in LANE_GROUP_MARK_TYPES): -# return neighbor_id -# return None - -# def _traverse_group(start_lane_id): -# """ -# Traverse left and right from a starting lane to find all connected parallel lanes -# """ -# group = [start_lane_id] -# queue = [start_lane_id] - -# while queue: -# current_id = queue.pop(0) -# if current_id in visited: -# continue - -# visited.add(current_id) - -# # Check left neighbor -# left_neighbor = _get_valid_neighbor_id(lanes[current_id], "left") -# if left_neighbor is not None and left_neighbor not in visited: -# queue.append(left_neighbor) -# group = [left_neighbor] + group - -# # Check right neighbor -# right_neighbor = _get_valid_neighbor_id(lanes[current_id], "right") -# if right_neighbor is not None and right_neighbor not in visited: -# queue.append(right_neighbor) -# group = group + [right_neighbor] - -# return group - -# # Find all lane groups -# for lane_id in lanes: -# if lane_id not in visited: -# group = _traverse_group(lane_id) -# lane_groups.append(group) - -# return lane_groups - - -# def _extract_intersection_dict( -# lanes: Dict[int, Any], lane_group_dict: Dict[int, Any], max_distance: float = 0.01 -# ) -> Dict[str, Any]: - -# def _interpolate_z_on_segment(point: shapely.Point, segment_coords: npt.NDArray[np.float64]) -> float: -# """Interpolate Z coordinate along a 3D line segment.""" -# p1, p2 = segment_coords[0], segment_coords[1] - -# # Project point onto segment -# segment_vec = p2[:2] - p1[:2] -# point_vec = np.array([point.x, point.y]) - p1[:2] - -# # Handle degenerate case -# segment_length_sq = np.dot(segment_vec, segment_vec) -# if segment_length_sq == 0: -# return p1[2] - -# # Calculate projection parameter -# t = np.dot(point_vec, segment_vec) / segment_length_sq -# t = np.clip(t, 0, 1) # Clamp to segment bounds - -# # Interpolate Z -# return p1[2] + t * (p2[2] - p1[2]) - -# # 1. Collect all lane groups where at least one lane is marked as an intersection. -# lane_group_intersection_dict = {} -# for lane_group_id, lane_group in lane_group_dict.items(): -# is_intersection_lanes = [lanes[str(lane_id)]["is_intersection"] for lane_id in lane_group["lane_ids"]] -# if any(is_intersection_lanes): -# lane_group_intersection_dict[lane_group_id] = lane_group - -# # 2. Merge polygons of lane groups that are marked as intersections. -# lane_group_intersection_geometry = { -# lane_group_id: shapely.Polygon(lane_group["outline"].array[:, Point3DIndex.XY]) -# for lane_group_id, lane_group in lane_group_intersection_dict.items() -# } -# intersection_polygons = gpd.GeoSeries(lane_group_intersection_geometry).union_all() - -# # 3. Collect all intersection polygons and their lane group IDs. -# intersection_dict = {} -# for intersection_idx, intersection_polygon in enumerate(intersection_polygons.geoms): -# if intersection_polygon.is_empty: -# continue -# lane_group_ids = [ -# lane_group_id -# for lane_group_id, lane_group_polygon in lane_group_intersection_geometry.items() -# if intersection_polygon.intersects(lane_group_polygon) -# ] -# for lane_group_id in lane_group_ids: -# lane_group_dict[lane_group_id]["intersection_id"] = intersection_idx - -# intersection_dict[intersection_idx] = { -# "id": intersection_idx, -# "outline_2d": Polyline2D.from_array(np.array(list(intersection_polygon.exterior.coords), dtype=np.float64)), -# "lane_group_ids": lane_group_ids, -# } - -# # 4. Lift intersection outlines to 3D. -# boundary_segments = [] -# for lane_group in lane_group_intersection_dict.values(): -# coords = np.array(lane_group["outline"].linestring.coords, dtype=np.float64).reshape(-1, 1, 3) -# segment_coords_boundary = np.concatenate([coords[:-1], coords[1:]], axis=1) -# boundary_segments.append(segment_coords_boundary) - -# boundary_segments = np.concatenate(boundary_segments, axis=0) -# boundary_segment_linestrings = shapely.creation.linestrings(boundary_segments) -# occupancy_map = OccupancyMap2D(boundary_segment_linestrings) - -# for intersection_id, intersection_data in intersection_dict.items(): -# points_2d = intersection_data["outline_2d"].array -# points_3d = np.zeros((len(points_2d), 3), dtype=np.float64) -# points_3d[:, :2] = points_2d - -# query_points = shapely.creation.points(points_2d) -# results = occupancy_map.query_nearest(query_points, max_distance=max_distance, exclusive=True) -# for query_idx, geometry_idx in zip(*results): -# query_point = query_points[query_idx] -# segment_coords = boundary_segments[geometry_idx] -# best_z = _interpolate_z_on_segment(query_point, segment_coords) -# points_3d[query_idx, 2] = best_z - -# intersection_dict[intersection_id]["outline_3d"] = Polyline3D.from_array(points_3d) - -# return intersection_dict diff --git a/d123/conversion/datasets/av2/av2_sensor_converter.py b/d123/conversion/datasets/av2/av2_sensor_converter.py index 46325efc..f731a5f9 100644 --- a/d123/conversion/datasets/av2/av2_sensor_converter.py +++ b/d123/conversion/datasets/av2/av2_sensor_converter.py @@ -23,6 +23,7 @@ from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.maps.map_metadata import MapMetadata from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, @@ -46,7 +47,7 @@ class AV2SensorConverter(AbstractDatasetConverter): def __init__( self, splits: List[str], - log_path: Union[Path, str], + av2_data_root: Union[Path, str], dataset_converter_config: DatasetConverterConfig, ) -> None: super().__init__(dataset_converter_config) @@ -56,7 +57,7 @@ def __init__( ), f"Split {split} is not available. Available splits: {self.available_splits}" self._splits: List[str] = splits - self._data_root: Path = Path(log_path) + self._av2_data_root: Path = Path(av2_data_root) self._log_paths_and_split: Dict[str, List[Path]] = self._collect_log_paths() def _collect_log_paths(self) -> Dict[str, List[Path]]: @@ -68,9 +69,9 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]: assert split_type in ["train", "val", "test"] if "av2-sensor" == dataset_name: - log_folder = self._data_root / dataset_name / split_type + log_folder = self._av2_data_root / dataset_name / split_type elif "av2-sensor-mini" == dataset_name: - log_folder = self._data_root / "sensor-mini" / split_type + log_folder = self._av2_data_root / "sensor-mini" / split_type else: raise ValueError(f"Unknown dataset name {dataset_name} in split {split}.") @@ -91,24 +92,17 @@ def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: source_log_path, split = self._log_paths_and_split[map_index] - # 1. Initialize Metadata, TODO: Use a MapMetadata class if needed in the future. - log_metadata = LogMetadata( - dataset="av2-sensor", - split=split, - log_name=source_log_path.name, - location=None, # TODO: Add location information. - timestep_seconds=None, - vehicle_parameters=None, - camera_metadata=None, - lidar_metadata=None, - map_has_z=True, - map_is_local=True, - ) + # 1. Initialize map metadata + map_metadata = _get_av2_sensor_map_metadata(split, source_log_path.name) + + # 2. Prepare map writer + map_needs_writing = map_writer.reset(self.dataset_converter_config, map_metadata) - map_needs_writing = map_writer.reset(self.dataset_converter_config, log_metadata) + # 3. Process source map data if map_needs_writing: convert_av2_map(source_log_path, map_writer) + # 4. Finalize map writing map_writer.close() def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: @@ -126,8 +120,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: vehicle_parameters=get_av2_ford_fusion_hybrid_parameters(), camera_metadata=get_av2_camera_metadata(source_log_path), lidar_metadata=get_av2_lidar_metadata(source_log_path), - map_has_z=True, - map_is_local=True, + map_metadata=_get_av2_sensor_map_metadata(split, source_log_path.name), ) # 2. Prepare log writer @@ -169,9 +162,21 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: ), ) + # 4. Finalize log writing log_writer.close() +def _get_av2_sensor_map_metadata(split: str, log_name: str) -> MapMetadata: + return MapMetadata( + dataset="av2-sensor", + split=split, + log_name=log_name, + location=None, # TODO: Add location information, e.g. city name. + map_has_z=True, + map_is_local=True, + ) + + def get_av2_camera_metadata(source_log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: intrinsics_file = source_log_path / "calibration" / "intrinsics.feather" diff --git a/d123/conversion/datasets/carla/carla_data_converter.py b/d123/conversion/datasets/carla/carla_data_converter.py index 20c6aecb..65ed911a 100644 --- a/d123/conversion/datasets/carla/carla_data_converter.py +++ b/d123/conversion/datasets/carla/carla_data_converter.py @@ -132,29 +132,29 @@ def convert_logs(self, worker: WorkerPool) -> None: ) -def convert_carla_map_to_gpkg(map_names: List[str], dataset_converter_config: DatasetConverterConfig) -> List[Any]: +def convert_carla_map_to_gpkg(locations: List[str], dataset_converter_config: DatasetConverterConfig) -> List[Any]: # TODO: add to config _interpolation_step_size = 0.5 # [m] _connection_distance_threshold = 0.1 # [m] - for map_name in map_names: - map_path = dataset_converter_config.output_path / "maps" / f"carla_{map_name.lower()}.gpkg" + for location in locations: + map_path = dataset_converter_config.output_path / "maps" / f"carla_{location.lower()}.gpkg" if dataset_converter_config.force_map_conversion or not map_path.exists(): map_path.unlink(missing_ok=True) assert os.environ["CARLA_ROOT"] is not None CARLA_ROOT = Path(os.environ["CARLA_ROOT"]) - if map_name not in ["Town11", "Town12", "Town13", "Town15"]: + if location not in ["Town11", "Town12", "Town13", "Town15"]: carla_maps_root = CARLA_ROOT / "CarlaUE4" / "Content" / "Carla" / "Maps" / "OpenDrive" - carla_map_path = carla_maps_root / f"{map_name}.xodr" + carla_map_path = carla_maps_root / f"{location}.xodr" else: carla_map_path = ( - CARLA_ROOT / "CarlaUE4" / "Content" / "Carla" / "Maps" / map_name / "OpenDrive" / f"{map_name}.xodr" + CARLA_ROOT / "CarlaUE4" / "Content" / "Carla" / "Maps" / location / "OpenDrive" / f"{location}.xodr" ) convert_from_xodr( carla_map_path, - f"carla_{map_name.lower()}", + f"carla_{location.lower()}", _interpolation_step_size, _connection_distance_threshold, ) @@ -179,10 +179,10 @@ def convert_log_internal(args: List[Dict[str, Union[List[str], List[Path]]]]) -> bounding_box_paths = sorted([bb_path for bb_path in (log_path / "boxes").iterdir()]) first_log_dict = _load_json_gz(bounding_box_paths[0]) - map_name = first_log_dict["location"] - map_api = get_global_map_api("carla", map_name) + location = first_log_dict["location"] + map_api = get_global_map_api("carla", location) - metadata = _get_metadata(map_name, str(log_path.stem)) + metadata = _get_metadata(location, str(log_path.stem)) vehicle_parameters = get_carla_lincoln_mkz_2020_parameters() camera_metadata = get_carla_camera_metadata(first_log_dict) lidar_metadata = get_carla_lidar_metadata(first_log_dict) diff --git a/d123/conversion/datasets/nuplan/nuplan_converter.py b/d123/conversion/datasets/nuplan/nuplan_converter.py index de356ed9..83ce1bd2 100644 --- a/d123/conversion/datasets/nuplan/nuplan_converter.py +++ b/d123/conversion/datasets/nuplan/nuplan_converter.py @@ -30,6 +30,7 @@ TrafficLightDetection, TrafficLightDetectionWrapper, ) +from d123.datatypes.maps.map_metadata import MapMetadata from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, @@ -147,25 +148,14 @@ def get_number_of_logs(self) -> int: def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: """Inherited, see superclass.""" - map_name = NUPLAN_MAP_LOCATIONS[map_index] + location = NUPLAN_MAP_LOCATIONS[map_index] # Dummy log metadata for map writing, TODO: Consider using MapMetadata instead? - log_metadata = LogMetadata( - dataset="nuplan", - split=None, - log_name=None, - location=map_name, - timestep_seconds=TARGET_DT, - vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(), - camera_metadata={}, - lidar_metadata={}, - map_has_z=False, - map_is_local=False, - ) - map_needs_writing = map_writer.reset(self.dataset_converter_config, log_metadata) + map_metadata = _get_nuplan_map_metadata(location) + map_needs_writing = map_writer.reset(self.dataset_converter_config, map_metadata) if map_needs_writing: write_nuplan_map( - map_name=map_name, + location=location, nuplan_maps_root=self._nuplan_maps_root, map_writer=map_writer, ) @@ -193,8 +183,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: lidar_metadata=_get_nuplan_lidar_metadata( self._nuplan_sensor_root, log_name, self.dataset_converter_config ), - map_has_z=False, - map_is_local=False, + map_metadata=_get_nuplan_map_metadata(nuplan_log_db.log.map_version), ) # 2. Prepare log writer @@ -236,6 +225,17 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: del nuplan_log_db +def _get_nuplan_map_metadata(location: str) -> MapMetadata: + return MapMetadata( + dataset="nuplan", + split=None, + log_name=None, + location=location, + map_has_z=False, + map_is_local=False, + ) + + def _get_nuplan_camera_metadata( source_log_path: Path, dataset_converter_config: DatasetConverterConfig, diff --git a/d123/conversion/datasets/nuplan/nuplan_map_conversion copy.py b/d123/conversion/datasets/nuplan/nuplan_map_conversion copy.py deleted file mode 100644 index 7ce449fb..00000000 --- a/d123/conversion/datasets/nuplan/nuplan_map_conversion copy.py +++ /dev/null @@ -1,463 +0,0 @@ -# # TODO: Refactor this mess. - -# import warnings -# from pathlib import Path -# from typing import Dict, Optional, Union - -# import geopandas as gpd -# import numpy as np -# import pandas as pd -# import pyogrio -# from shapely.geometry import LineString - -# # Suppress numpy runtime warnings for casting operations -# np.seterr(invalid="ignore") - -# from d123.conversion.datasets.nuplan.utils.nuplan_constants import NUPLAN_MAP_GPKG_LAYERS, NUPLAN_MAP_LOCATION_FILES -# from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( -# get_road_edge_linear_rings, -# split_line_geometry_by_max_length, -# ) -# from d123.datatypes.maps.gpkg.gpkg_utils import get_all_rows_with_value, get_row_with_value -# from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType - -# # 0: generic lane I guess. -# # 1: ending? -# # 3: bike lanes. - -# MAX_ROAD_EDGE_LENGTH = 100.0 # meters, used to filter out very long road edges - -# NUPLAN_ROAD_LINE_CONVERSION = { -# 0: RoadLineType.DASHED_WHITE, -# 2: RoadLineType.SOLID_WHITE, -# 3: RoadLineType.UNKNOWN, -# } - - -# class NuPlanMapConverter: -# def __init__(self, nuplan_maps_root: Union[str, Path], map_path: Path) -> None: - -# self._map_path: Path = map_path -# self._nuplan_maps_root: Path = Path(nuplan_maps_root) -# self._gdf: Optional[Dict[str, gpd.GeoDataFrame]] = None - -# def convert(self, map_name: str = "us-pa-pittsburgh-hazelwood") -> None: -# assert map_name in NUPLAN_MAP_LOCATION_FILES.keys(), f"Map name {map_name} is not supported." - -# map_file_path = self._nuplan_maps_root / NUPLAN_MAP_LOCATION_FILES[map_name] -# self._load_dataframes(map_file_path) - -# dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} -# dataframes[MapLayer.LANE] = self._extract_lane_dataframe() -# dataframes[MapLayer.LANE_GROUP] = self._extract_lane_group_dataframe() -# dataframes[MapLayer.INTERSECTION] = self._extract_intersection_dataframe() -# dataframes[MapLayer.CROSSWALK] = self._extract_crosswalk_dataframe() -# dataframes[MapLayer.WALKWAY] = self._extract_walkway_dataframe() -# dataframes[MapLayer.CARPARK] = self._extract_carpark_dataframe() -# dataframes[MapLayer.GENERIC_DRIVABLE] = self._extract_generic_drivable_dataframe() -# dataframes[MapLayer.ROAD_EDGE] = self._extract_road_edge_dataframe() -# dataframes[MapLayer.ROAD_LINE] = self._extract_road_line_dataframe() - -# if not self._map_path.exists(): -# self._map_path.mkdir(parents=True, exist_ok=True) - -# try: -# map_file_name = self._map_path / f"nuplan_{map_name}.gpkg" -# with warnings.catch_warnings(): -# warnings.filterwarnings("ignore", message="'crs' was not provided") -# for layer, gdf in dataframes.items(): -# gdf.to_file(map_file_name, layer=layer.serialize(), driver="GPKG", mode="a") -# except Exception as e: -# print(f"Error occurred while converting map {map_name}: {e}") -# print(map_file_name, map_file_path) - -# def _load_dataframes(self, map_file_path: Path) -> None: - -# # The projected coordinate system depends on which UTM zone the mapped location is in. -# map_meta = gpd.read_file(map_file_path, layer="meta", engine="pyogrio") -# projection_system = map_meta[map_meta["key"] == "projectedCoordSystem"]["value"].iloc[0] - -# self._gdf = {} -# for layer_name in NUPLAN_MAP_GPKG_LAYERS: -# with warnings.catch_warnings(): -# # Suppress the warnings from the GPKG operations below so that they don't spam the training logs. -# warnings.filterwarnings("ignore") - -# gdf_in_pixel_coords = pyogrio.read_dataframe(map_file_path, layer=layer_name, fid_as_index=True) -# gdf_in_utm_coords = gdf_in_pixel_coords.to_crs(projection_system) -# # gdf_in_utm_coords = gdf_in_pixel_coords - -# # For backwards compatibility, cast the index to string datatype. -# # and mirror it to the "fid" column. -# gdf_in_utm_coords.index = gdf_in_utm_coords.index.map(str) -# gdf_in_utm_coords["fid"] = gdf_in_utm_coords.index - -# self._gdf[layer_name] = gdf_in_utm_coords - -# def _extract_lane_dataframe(self) -> gpd.GeoDataFrame: -# assert self._gdf is not None, "Call `.initialize()` before retrieving data!" -# lane_df = self._extract_nuplan_lane_dataframe() -# lane_connector_df = self._extract_nuplan_lane_connector_dataframe() -# combined_df = pd.concat([lane_df, lane_connector_df], ignore_index=True) -# return combined_df - -# def _extract_nuplan_lane_dataframe(self) -> gpd.GeoDataFrame: -# # NOTE: drops: lane_index (?), creator_id, name (?), road_type_fid (?), lane_type_fid (?), width (?), left_offset (?), right_offset (?), -# # min_speed (?), max_speed (?), stops, left_has_reflectors (?), right_has_reflectors (?), from_edge_fid, to_edge_fid - -# ids = self._gdf["lanes_polygons"].lane_fid.to_list() -# lane_group_ids = self._gdf["lanes_polygons"].lane_group_fid.to_list() -# speed_limits_mps = self._gdf["lanes_polygons"].speed_limit_mps.to_list() -# predecessor_ids = [] -# successor_ids = [] -# left_boundaries = [] -# right_boundaries = [] -# left_lane_ids = [] -# right_lane_ids = [] -# baseline_paths = [] -# geometries = self._gdf["lanes_polygons"].geometry.to_list() - -# for lane_id in ids: - -# # 1. predecessor_ids, successor_ids -# _predecessor_ids = get_all_rows_with_value( -# self._gdf["lane_connectors"], -# "entry_lane_fid", -# lane_id, -# )["fid"].tolist() -# _successor_ids = get_all_rows_with_value( -# self._gdf["lane_connectors"], -# "exit_lane_fid", -# lane_id, -# )["fid"].tolist() -# predecessor_ids.append(_predecessor_ids) -# successor_ids.append(_successor_ids) - -# # 2. left_boundaries, right_boundaries -# lane_series = get_row_with_value(self._gdf["lanes_polygons"], "fid", str(lane_id)) -# left_boundary_fid = lane_series["left_boundary_fid"] -# left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] - -# right_boundary_fid = lane_series["right_boundary_fid"] -# right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] - -# # 3. left_lane_ids, right_lane_ids -# lane_index = lane_series["lane_index"] -# all_group_lanes = get_all_rows_with_value( -# self._gdf["lanes_polygons"], "lane_group_fid", lane_series["lane_group_fid"] -# ) -# left_lane_id = all_group_lanes[all_group_lanes["lane_index"] == int(lane_index) - 1]["fid"] -# right_lane_id = all_group_lanes[all_group_lanes["lane_index"] == int(lane_index) + 1]["fid"] -# left_lane_ids.append(left_lane_id.item() if not left_lane_id.empty else None) -# right_lane_ids.append(right_lane_id.item() if not right_lane_id.empty else None) - -# # 3. baseline_paths -# baseline_path = get_row_with_value(self._gdf["baseline_paths"], "lane_fid", float(lane_id))["geometry"] - -# left_boundary = align_boundary_direction(baseline_path, left_boundary) -# right_boundary = align_boundary_direction(baseline_path, right_boundary) - -# left_boundaries.append(left_boundary) -# right_boundaries.append(right_boundary) -# baseline_paths.append(baseline_path) - -# data = pd.DataFrame( -# { -# "id": ids, -# "lane_group_id": lane_group_ids, -# "left_boundary": left_boundaries, -# "right_boundary": right_boundaries, -# "baseline_path": baseline_paths, -# "left_lane_id": left_lane_ids, -# "right_lane_id": right_lane_ids, -# "predecessor_ids": predecessor_ids, -# "successor_ids": successor_ids, -# "speed_limit_mps": speed_limits_mps, -# } -# ) - -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - -# def _extract_nuplan_lane_connector_dataframe(self) -> None: -# # NOTE: drops: exit_lane_group_fid, entry_lane_group_fid, to_edge_fid, -# # turn_type_fid (?), bulb_fids (?), traffic_light_stop_line_fids (?), overlap (?), creator_id -# # left_has_reflectors (?), right_has_reflectors (?) -# ids = self._gdf["lane_connectors"].fid.to_list() -# lane_group_ids = self._gdf["lane_connectors"].lane_group_connector_fid.to_list() -# speed_limits_mps = self._gdf["lane_connectors"].speed_limit_mps.to_list() -# predecessor_ids = [] -# successor_ids = [] -# left_boundaries = [] -# right_boundaries = [] -# baseline_paths = [] -# geometries = [] - -# for lane_id in ids: -# # 1. predecessor_ids, successor_ids -# lane_connector_row = get_row_with_value(self._gdf["lane_connectors"], "fid", str(lane_id)) -# predecessor_ids.append([lane_connector_row["entry_lane_fid"]]) -# successor_ids.append([lane_connector_row["exit_lane_fid"]]) - -# # 2. left_boundaries, right_boundaries -# lane_connector_polygons_row = get_row_with_value( -# self._gdf["gen_lane_connectors_scaled_width_polygons"], "lane_connector_fid", str(lane_id) -# ) -# left_boundary_fid = lane_connector_polygons_row["left_boundary_fid"] -# left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] - -# right_boundary_fid = lane_connector_polygons_row["right_boundary_fid"] -# right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] - -# # 3. baseline_paths -# baseline_path = get_row_with_value(self._gdf["baseline_paths"], "lane_connector_fid", float(lane_id))[ -# "geometry" -# ] - -# left_boundary = align_boundary_direction(baseline_path, left_boundary) -# right_boundary = align_boundary_direction(baseline_path, right_boundary) - -# left_boundaries.append(left_boundary) -# right_boundaries.append(right_boundary) -# baseline_paths.append(baseline_path) - -# # 4. geometries -# geometries.append(lane_connector_polygons_row.geometry) - -# data = pd.DataFrame( -# { -# "id": ids, -# "lane_group_id": lane_group_ids, -# "speed_limit_mps": speed_limits_mps, -# "predecessor_ids": predecessor_ids, -# "successor_ids": successor_ids, -# "left_boundary": left_boundaries, -# "right_boundary": right_boundaries, -# "left_lane_id": [None] * len(ids), -# "right_lane_id": [None] * len(ids), -# "baseline_path": baseline_paths, -# } -# ) - -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - -# def _extract_lane_group_dataframe(self) -> gpd.GeoDataFrame: -# lane_group_df = self._extract_nuplan_lane_group_dataframe() -# lane_connector_group_df = self._extract_nuplan_lane_connector_group_dataframe() -# combined_df = pd.concat([lane_group_df, lane_connector_group_df], ignore_index=True) -# return combined_df - -# def _extract_nuplan_lane_group_dataframe(self) -> gpd.GeoDataFrame: -# # NOTE: drops: creator_id, from_edge_fid, to_edge_fid -# ids = self._gdf["lane_groups_polygons"].fid.to_list() -# lane_ids = [] -# intersection_ids = [None] * len(ids) -# predecessor_lane_group_ids = [] -# successor_lane_group_ids = [] -# left_boundaries = [] -# right_boundaries = [] -# geometries = self._gdf["lane_groups_polygons"].geometry.to_list() - -# for lane_group_id in ids: -# # 1. lane_ids -# lane_ids_ = get_all_rows_with_value( -# self._gdf["lanes_polygons"], -# "lane_group_fid", -# lane_group_id, -# )["fid"].tolist() -# lane_ids.append(lane_ids_) - -# # 2. predecessor_lane_group_ids, successor_lane_group_ids -# predecessor_lane_group_ids_ = get_all_rows_with_value( -# self._gdf["lane_group_connectors"], -# "to_lane_group_fid", -# lane_group_id, -# )["fid"].tolist() -# successor_lane_group_ids_ = get_all_rows_with_value( -# self._gdf["lane_group_connectors"], -# "from_lane_group_fid", -# lane_group_id, -# )["fid"].tolist() -# predecessor_lane_group_ids.append(predecessor_lane_group_ids_) -# successor_lane_group_ids.append(successor_lane_group_ids_) - -# # 3. left_boundaries, right_boundaries -# lane_group_row = get_row_with_value(self._gdf["lane_groups_polygons"], "fid", str(lane_group_id)) -# left_boundary_fid = lane_group_row["left_boundary_fid"] -# left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] - -# right_boundary_fid = lane_group_row["right_boundary_fid"] -# right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] - -# repr_baseline_path = get_row_with_value(self._gdf["baseline_paths"], "lane_fid", float(lane_ids_[0]))[ -# "geometry" -# ] - -# left_boundary = align_boundary_direction(repr_baseline_path, left_boundary) -# right_boundary = align_boundary_direction(repr_baseline_path, right_boundary) - -# left_boundaries.append(left_boundary) -# right_boundaries.append(right_boundary) - -# data = pd.DataFrame( -# { -# "id": ids, -# "lane_ids": lane_ids, -# "intersection_id": intersection_ids, -# "predecessor_lane_group_ids": predecessor_lane_group_ids, -# "successor_lane_group_ids": successor_lane_group_ids, -# "left_boundary": left_boundaries, -# "right_boundary": right_boundaries, -# } -# ) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - -# def _extract_nuplan_lane_connector_group_dataframe(self) -> gpd.GeoDataFrame: -# # NOTE: drops: creator_id, from_edge_fid, to_edge_fid, intersection_fid -# ids = self._gdf["lane_group_connectors"].fid.to_list() -# lane_ids = [] -# intersection_ids = self._gdf["lane_group_connectors"].intersection_fid.to_list() -# predecessor_lane_group_ids = [] -# successor_lane_group_ids = [] -# left_boundaries = [] -# right_boundaries = [] -# geometries = self._gdf["lane_group_connectors"].geometry.to_list() - -# for lane_group_connector_id in ids: -# # 1. lane_ids -# lane_ids_ = get_all_rows_with_value( -# self._gdf["lane_connectors"], "lane_group_connector_fid", lane_group_connector_id -# )["fid"].tolist() -# lane_ids.append(lane_ids_) - -# # 2. predecessor_lane_group_ids, successor_lane_group_ids -# lane_group_connector_row = get_row_with_value( -# self._gdf["lane_group_connectors"], "fid", lane_group_connector_id -# ) -# predecessor_lane_group_ids.append([str(lane_group_connector_row["from_lane_group_fid"])]) -# successor_lane_group_ids.append([str(lane_group_connector_row["to_lane_group_fid"])]) - -# # 3. left_boundaries, right_boundaries -# left_boundary_fid = lane_group_connector_row["left_boundary_fid"] -# left_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(left_boundary_fid))["geometry"] -# right_boundary_fid = lane_group_connector_row["right_boundary_fid"] -# right_boundary = get_row_with_value(self._gdf["boundaries"], "fid", str(right_boundary_fid))["geometry"] - -# left_boundaries.append(left_boundary) -# right_boundaries.append(right_boundary) - -# data = pd.DataFrame( -# { -# "id": ids, -# "lane_ids": lane_ids, -# "intersection_id": intersection_ids, -# "predecessor_lane_group_ids": predecessor_lane_group_ids, -# "successor_lane_group_ids": successor_lane_group_ids, -# "left_boundary": left_boundaries, -# "right_boundary": right_boundaries, -# } -# ) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - -# def _extract_intersection_dataframe(self) -> gpd.GeoDataFrame: -# # NOTE: drops: creator_id, intersection_type_fid (?), is_mini (?) -# ids = self._gdf["intersections"].fid.to_list() -# lane_group_ids = [] -# for intersection_id in ids: -# lane_group_connector_ids = get_all_rows_with_value( -# self._gdf["lane_group_connectors"], "intersection_fid", str(intersection_id) -# )["fid"].tolist() -# lane_group_ids.append(lane_group_connector_ids) -# data = pd.DataFrame({"id": ids, "lane_group_ids": lane_group_ids}) -# return gpd.GeoDataFrame(data, geometry=self._gdf["intersections"].geometry.to_list()) - -# def _extract_crosswalk_dataframe(self) -> gpd.GeoDataFrame: -# # NOTE: drops: creator_id, intersection_fids, lane_fids, is_marked (?) -# data = pd.DataFrame({"id": self._gdf["crosswalks"].fid.to_list()}) -# return gpd.GeoDataFrame(data, geometry=self._gdf["crosswalks"].geometry.to_list()) - -# def _extract_walkway_dataframe(self) -> gpd.GeoDataFrame: -# # NOTE: drops: creator_id -# data = pd.DataFrame({"id": self._gdf["walkways"].fid.to_list()}) -# return gpd.GeoDataFrame(data, geometry=self._gdf["walkways"].geometry.to_list()) - -# def _extract_carpark_dataframe(self) -> gpd.GeoDataFrame: -# # NOTE: drops: heading, creator_id -# data = pd.DataFrame({"id": self._gdf["carpark_areas"].fid.to_list()}) -# return gpd.GeoDataFrame(data, geometry=self._gdf["carpark_areas"].geometry.to_list()) - -# def _extract_generic_drivable_dataframe(self) -> gpd.GeoDataFrame: -# # NOTE: drops: creator_id -# data = pd.DataFrame({"id": self._gdf["generic_drivable_areas"].fid.to_list()}) -# return gpd.GeoDataFrame(data, geometry=self._gdf["generic_drivable_areas"].geometry.to_list()) - -# def _extract_road_edge_dataframe(self) -> gpd.GeoDataFrame: -# drivable_polygons = ( -# self._gdf["intersections"].geometry.to_list() -# + self._gdf["lane_groups_polygons"].geometry.to_list() -# + self._gdf["carpark_areas"].geometry.to_list() -# + self._gdf["generic_drivable_areas"].geometry.to_list() -# ) -# road_edge_linear_rings = get_road_edge_linear_rings(drivable_polygons) -# road_edges = split_line_geometry_by_max_length(road_edge_linear_rings, MAX_ROAD_EDGE_LENGTH) - -# ids = [] -# road_edge_types = [] -# for idx in range(len(road_edges)): -# ids.append(idx) -# # TODO @DanielDauner: Figure out if other types should/could be assigned here. -# road_edge_types.append(int(RoadEdgeType.ROAD_EDGE_BOUNDARY)) - -# data = pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}) -# return gpd.GeoDataFrame(data, geometry=road_edges) - -# def _extract_road_line_dataframe(self) -> gpd.GeoDataFrame: -# boundaries = self._gdf["boundaries"].geometry.to_list() -# fids = self._gdf["boundaries"].fid.to_list() -# boundary_types = self._gdf["boundaries"].boundary_type_fid.to_list() - -# ids = [] -# road_line_types = [] -# geometries = [] - -# for idx in range(len(boundary_types)): -# ids.append(fids[idx]) -# road_line_types.append(int(NUPLAN_ROAD_LINE_CONVERSION[boundary_types[idx]])) -# geometries.append(boundaries[idx]) - -# data = pd.DataFrame( -# { -# "id": ids, -# "road_line_type": road_line_types, -# } -# ) -# return gpd.GeoDataFrame(data, geometry=geometries) - - -# def flip_linestring(linestring: LineString) -> LineString: -# # TODO: move somewhere more appropriate or implement in Polyline2D, PolylineSE2, etc. -# return LineString(linestring.coords[::-1]) - - -# def lines_same_direction(centerline: LineString, boundary: LineString) -> bool: -# # TODO: refactor helper function. -# center_start = np.array(centerline.coords[0]) -# center_end = np.array(centerline.coords[-1]) -# boundary_start = np.array(boundary.coords[0]) -# boundary_end = np.array(boundary.coords[-1]) - -# # Distance from centerline start to boundary start + centerline end to boundary end -# same_dir_dist = np.linalg.norm(center_start - boundary_start) + np.linalg.norm(center_end - boundary_end) -# opposite_dir_dist = np.linalg.norm(center_start - boundary_end) + np.linalg.norm(center_end - boundary_start) - -# return same_dir_dist <= opposite_dir_dist - - -# def align_boundary_direction(centerline: LineString, boundary: LineString) -> LineString: -# # TODO: refactor helper function. -# if not lines_same_direction(centerline, boundary): -# return flip_linestring(boundary) -# return boundary diff --git a/d123/conversion/datasets/nuplan/nuplan_map_conversion.py b/d123/conversion/datasets/nuplan/nuplan_map_conversion.py index fb2fa763..97cceeb8 100644 --- a/d123/conversion/datasets/nuplan/nuplan_map_conversion.py +++ b/d123/conversion/datasets/nuplan/nuplan_map_conversion.py @@ -35,9 +35,9 @@ MAX_ROAD_EDGE_LENGTH: Final[float] = 100.0 # meters, used to filter out very long road edges. TODO @add to config? -def write_nuplan_map(nuplan_maps_root: Path, map_name: str, map_writer: AbstractMapWriter) -> None: - assert map_name in NUPLAN_MAP_LOCATION_FILES.keys(), f"Map name {map_name} is not supported." - source_map_path = nuplan_maps_root / NUPLAN_MAP_LOCATION_FILES[map_name] +def write_nuplan_map(nuplan_maps_root: Path, location: str, map_writer: AbstractMapWriter) -> None: + assert location in NUPLAN_MAP_LOCATION_FILES.keys(), f"Map name {location} is not supported." + source_map_path = nuplan_maps_root / NUPLAN_MAP_LOCATION_FILES[location] assert source_map_path.exists(), f"Map file {source_map_path} does not exist." nuplan_gdf = _load_nuplan_gdf(source_map_path) _write_nuplan_lanes(nuplan_gdf, map_writer) diff --git a/d123/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py b/d123/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py index 662d08cc..2edafb96 100644 --- a/d123/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py +++ b/d123/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py @@ -32,7 +32,7 @@ def convert_from_xodr( xordr_file: Path, - map_name: str, + location: str, interpolation_step_size: float, connection_distance_threshold: float, ) -> None: @@ -72,7 +72,7 @@ def convert_from_xodr( dataframes[MapLayer.LANE], dataframes[MapLayer.LANE_GROUP], ) - map_file_name = D123_MAPS_ROOT / f"{map_name}.gpkg" + map_file_name = D123_MAPS_ROOT / f"{location}.gpkg" with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="'crs' was not provided") for layer, gdf in dataframes.items(): diff --git a/d123/datatypes/maps/abstract_map.py b/d123/datatypes/maps/abstract_map.py index 6d279c29..e42f3fa1 100644 --- a/d123/datatypes/maps/abstract_map.py +++ b/d123/datatypes/maps/abstract_map.py @@ -84,5 +84,5 @@ def query_nearest( pass @property - def map_name(self) -> str: - raise NotImplementedError + def location(self) -> str: + return self.get_map_metadata().location diff --git a/d123/datatypes/scene/arrow/arrow_scene_builder.py b/d123/datatypes/scene/arrow/arrow_scene_builder.py index b45b7a0f..c2abd492 100644 --- a/d123/datatypes/scene/arrow/arrow_scene_builder.py +++ b/d123/datatypes/scene/arrow/arrow_scene_builder.py @@ -91,8 +91,12 @@ def _get_scene_extraction_metadatas(log_path: Union[str, Path], filter: SceneFil recording_table = get_lru_cached_arrow_table(log_path) log_metadata = get_log_metadata_from_arrow(log_path) - # 1. Filter map name - if filter.map_names is not None and log_metadata.map_name not in filter.map_names: + # 1. Filter location + if ( + filter.locations is not None + and log_metadata.map_metadata is not None + and log_metadata.map_metadata.location not in filter.locations + ): return scene_extraction_metadatas start_idx = int(filter.history_s / log_metadata.timestep_seconds) if filter.history_s is not None else 0 diff --git a/d123/datatypes/scene/scene_filter.py b/d123/datatypes/scene/scene_filter.py index 0bc35e39..083d7ab1 100644 --- a/d123/datatypes/scene/scene_filter.py +++ b/d123/datatypes/scene/scene_filter.py @@ -14,7 +14,7 @@ class SceneFilter: # scene_tags: List[str] = None log_names: Optional[List[str]] = None - map_names: Optional[List[str]] = None # TODO: + locations: Optional[List[str]] = None # TODO: scene_uuids: Optional[List[str]] = None # TODO: timestamp_threshold_s: Optional[float] = None # TODO: diff --git a/d123/script/config/common/scene_filter/all_scenes.yaml b/d123/script/config/common/scene_filter/all_scenes.yaml index 1026b737..0134a435 100644 --- a/d123/script/config/common/scene_filter/all_scenes.yaml +++ b/d123/script/config/common/scene_filter/all_scenes.yaml @@ -6,7 +6,7 @@ split_names: null log_names: null -map_names: null +locations: null scene_uuids: null timestamp_threshold_s: null ego_displacement_minimum_m: null diff --git a/d123/script/config/common/scene_filter/log_scenes.yaml b/d123/script/config/common/scene_filter/log_scenes.yaml index b5d98361..f1121675 100644 --- a/d123/script/config/common/scene_filter/log_scenes.yaml +++ b/d123/script/config/common/scene_filter/log_scenes.yaml @@ -6,7 +6,7 @@ split_names: ["av2-sensor-mini_train"] log_names: null -map_names: null +locations: null scene_uuids: null timestamp_threshold_s: null ego_displacement_minimum_m: null diff --git a/d123/script/config/common/scene_filter/nuplan_mini_train.yaml b/d123/script/config/common/scene_filter/nuplan_mini_train.yaml index 4f69838b..d04697ec 100644 --- a/d123/script/config/common/scene_filter/nuplan_mini_train.yaml +++ b/d123/script/config/common/scene_filter/nuplan_mini_train.yaml @@ -7,7 +7,7 @@ split_names: log_names: null -map_names: null +locations: null scene_uuids: null timestamp_threshold_s: 1.0 ego_displacement_minimum_m: null diff --git a/d123/script/config/common/scene_filter/nuplan_mini_val.yaml b/d123/script/config/common/scene_filter/nuplan_mini_val.yaml index 62f29257..b847f623 100644 --- a/d123/script/config/common/scene_filter/nuplan_mini_val.yaml +++ b/d123/script/config/common/scene_filter/nuplan_mini_val.yaml @@ -7,7 +7,7 @@ split_names: log_names: null -map_names: null +locations: null scene_uuids: null timestamp_threshold_s: 1.0 ego_displacement_minimum_m: null diff --git a/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml b/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml index 2656ac68..68a2424d 100644 --- a/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml +++ b/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml @@ -7,7 +7,7 @@ split_names: log_names: null -map_names: null +locations: null scene_uuids: - "796266a84fd65c71" - "1ef8b2f08cd65f9a" diff --git a/d123/script/config/common/scene_filter/viser_scenes.yaml b/d123/script/config/common/scene_filter/viser_scenes.yaml index a2dd51be..9e561981 100644 --- a/d123/script/config/common/scene_filter/viser_scenes.yaml +++ b/d123/script/config/common/scene_filter/viser_scenes.yaml @@ -6,7 +6,7 @@ split_names: null log_names: null -map_names: null +locations: null scene_uuids: null timestamp_threshold_s: 10.0 ego_displacement_minimum_m: null diff --git a/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml b/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml index 06893f45..4fbda36f 100644 --- a/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml +++ b/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml @@ -3,7 +3,7 @@ av2_sensor_dataset: _convert_: 'all' splits: ["av2-sensor-mini_train"] - log_path: "/media/nvme1/argoverse" + av2_data_root: "/media/nvme1/argoverse" dataset_converter_config: _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig diff --git a/d123/script/config/conversion/datasets/nuplan_private_dataset.yaml b/d123/script/config/conversion/datasets/nuplan_private_dataset.yaml deleted file mode 100644 index 62f42ce9..00000000 --- a/d123/script/config/conversion/datasets/nuplan_private_dataset.yaml +++ /dev/null @@ -1,41 +0,0 @@ -nuplan_private_dataset: - _target_: d123.conversion.datasets.nuplan.nuplan_converter.NuPlanConverter - _convert_: 'all' - - splits: ["nuplan-private_test"] - nuplan_data_root: ${nuplan_data_root} - nuplan_maps_root: ${nuplan_maps_root} - nuplan_sensor_root: ${nuplan_sensor_root} - - dataset_converter_config: - _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig - _convert_: 'all' - - output_path: ${d123_data_root} - force_log_conversion: ${force_log_conversion} - force_map_conversion: ${force_map_conversion} - - # Map - include_map: true - - # Ego - include_ego: true - - # Box Detections - include_box_detections: true - - # Traffic Lights - include_traffic_lights: true - - # Cameras - include_cameras: true - camera_store_option: "path" # "path", "binary", "mp4" - - # LiDARs - include_lidars: true - lidar_store_option: "path" # "path", "binary" - - # Scenario tag / Route - # NOTE: These are only supported for nuPlan. Consider removing or expanding support. - include_scenario_tags: true - include_route: true diff --git a/d123/script/config/conversion/datasets/wopd_dataset.yaml b/d123/script/config/conversion/datasets/wopd_dataset.yaml index da977222..f936270d 100644 --- a/d123/script/config/conversion/datasets/wopd_dataset.yaml +++ b/d123/script/config/conversion/datasets/wopd_dataset.yaml @@ -2,7 +2,7 @@ wopd_dataset: _target_: d123.conversion.datasets.wopd.wopd_converter.WOPDConverter _convert_: 'all' - splits: ["wopd_val"] # Which splits to convert. Options: ["wopd_train", "wopd_val", "wopd_test"] + splits: ["wopd_train", "wopd_val", "wopd_test"] # Which splits to convert. Options: ["wopd_train", "wopd_val", "wopd_test"] wopd_data_root: "/media/nvme1/waymo_perception" # ${wopd_data_root} zero_roll_pitch: true # Whether to zero the roll and pitch of the box detections in global frame. keep_polar_features: false # Add lidar polar features (range, azimuth, elevation) in addition to XYZ. (slow if true) diff --git a/d123/script/config/conversion/default_conversion.yaml b/d123/script/config/conversion/default_conversion.yaml index 8f2b7da3..d9cbd752 100644 --- a/d123/script/config/conversion/default_conversion.yaml +++ b/d123/script/config/conversion/default_conversion.yaml @@ -16,12 +16,14 @@ defaults: - log_writer: arrow_log_writer - map_writer: gpkg_map_writer - datasets: - # - nuplan_mini_dataset - # - nuplan_private_dataset + - nuplan_mini_dataset # - carla_dataset - - wopd_dataset - # - av2_sensor_dataset + # - wopd_dataset + - av2_sensor_dataset - _self_ + +terminate_on_exception: True + force_map_conversion: True -force_log_conversion: False +force_log_conversion: True diff --git a/notebooks/deprecated/arrow.ipynb b/notebooks/deprecated/arrow.ipynb deleted file mode 100644 index 8fa5d9bb..00000000 --- a/notebooks/deprecated/arrow.ipynb +++ /dev/null @@ -1,178 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "import os, psutil\n", - "\n", - "def print_memory_usage():\n", - " process = psutil.Process(os.getpid())\n", - " memory_info = process.memory_info()\n", - " print(f\"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB\")\n", - "\n", - "print_memory_usage()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "from geoarrow.pyarrow import io\n", - "import tempfile\n", - "import geopandas\n", - "import os\n", - "\n", - "\n", - "import shapely.geometry as geom\n", - "from d123.dataset.maps.gpkg.gpkg_map import GPKGMap\n", - "from d123.dataset.maps.map_datatypes import MapLayer\n", - "\n", - "\n", - "temp_gpkg = \"/home/daniel/d123_workspace/data/maps/carla_town01.gpkg\"\n", - "\n", - "print_memory_usage()\n", - "map_api = GPKGMap(temp_gpkg)\n", - "print_memory_usage()\n", - "map_api.initialize()\n", - "print_memory_usage()\n", - "\n", - "\n", - "table = map_api._gpd_dataframes[MapLayer.LANE]\n", - "\n", - "# import pyarrow as pa\n", - "# pa.table(table).schema\n", - "\n", - "print_memory_usage()\n", - "inter = table.sindex.query(geom.box(-10, -10, 10, 10), predicate='intersects')\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "import geoarrow.pyarrow as ga\n", - "\n", - "\n", - "from shapely import node\n", - "from shapely.strtree import STRtree\n", - "ga_array = ga.as_geoarrow([str(i) for i in pa_table[\"left_boundary\"]])\n", - "# ga_array.wkb()\n", - "print_memory_usage()\n", - "\n", - "STRtree([el.to_shapely() for el in ga_array], node_capacity=10)\n", - "print_memory_usage()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "import pyarrow.ipc as ipc\n", - "import pyarrow as pa\n", - "\n", - "# Save to Arrow IPC file\n", - "with pa.OSFile(\"data.arrow\", \"wb\") as sink:\n", - " with ipc.new_file(sink, pa_table.schema) as writer:\n", - " writer.write_table(pa_table)\n", - "\n", - "# Load with memory mapping\n", - "with pa.memory_map(\"data.arrow\", \"r\") as source:\n", - " pa_table = ipc.open_file(source).read_all()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "import geoarrow.pandas as gap\n", - "\n", - "\n", - "gap.GeoArrowExtensionDtype" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "print_memory_usage()\n", - "# Load with memory mapping\n", - "with pa.memory_map('data.arrow', 'r') as source:\n", - " table_mmap = ipc.open_file(source).read_all()\n", - "print_memory_usage()\n", - "\n", - "STRtree([el.to_shapely() for el in table_mmap[\"geometry\"]], node_capacity=10)\n", - "print_memory_usage()\n", - "\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "from shapely.geometry import mapping\n", - "table_mmap[\"geom\"].apply(lambda geom: mapping(geom.to_shapely()) if geom else None)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/deprecated/extraction_testing.ipynb b/notebooks/deprecated/extraction_testing.ipynb deleted file mode 100644 index 1f617694..00000000 --- a/notebooks/deprecated/extraction_testing.ipynb +++ /dev/null @@ -1,166 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "from functools import partial\n", - "from pathlib import Path\n", - "from typing import Iterator, List, Optional, Set, Union\n", - "\n", - "\n", - "import pyarrow as pa\n", - "from d123.common.multithreading.worker_pool import WorkerPool\n", - "\n", - "from d123.dataset.arrow.helper import open_arrow_arrow_table\n", - "from d123.conversion.nuplan.nuplan_data_processor import worker_map\n", - "from d123.dataset.logs.log_metadata import LogMetadata\n", - "from d123.dataset.scene.abstract_scene import AbstractScene\n", - "from d123.dataset.scene.arrow_scene import ArrowScene, SceneExtractionInfo\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "DURATION_SECONDS = 10.0 \n", - "HISTORY_SECONDS = 3.0\n", - "ITERATION_DURATION_SECONDS = 0.1\n", - "\n", - "\n", - "\n", - "log_path = \"/home/daniel/d123_workspace/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow\"\n", - "\n", - "recording_table = open_arrow_arrow_table(log_path)\n", - "log_metadata = LogMetadata.from_arrow_table(recording_table)\n", - "\n", - "\n", - "# scene_uuids = [str(token) for token in np.random.choice(recording_table.column(\"token\").to_pylist(), size=10)]\n", - "scene_uuids = [\"\"]\n", - "timestamp_threshold_s: float = 10.0\n", - "# timestamp_threshold_s = None\n", - "filter = SceneFilter(scene_uuids=scene_uuids, timestamp_threshold_s=timestamp_threshold_s)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "scene_uuids" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "def _get_scene_extraction_info(log_path: str, filter: SceneFilter) -> List[SceneExtractionInfo]:\n", - " scene_extraction_infos: List[SceneExtractionInfo] = []\n", - "\n", - " recording_table = open_arrow_arrow_table(log_path)\n", - " log_metadata = LogMetadata.from_arrow_table(recording_table)\n", - "\n", - " # 1. Filter map name\n", - " if filter.map_names is not None and log_metadata.map_name not in filter.map_names:\n", - " return scene_extraction_infos\n", - "\n", - " start_idx = int(filter.history_s / log_metadata.timestep_seconds)\n", - " end_idx = len(recording_table) - int(filter.duration_s / log_metadata.timestep_seconds)\n", - "\n", - " scene_uuid_set = set(filter.scene_uuids) if filter.scene_uuids else None\n", - "\n", - " for idx in range(start_idx, end_idx):\n", - " scene_extraction_info: Optional[SceneExtractionInfo] = None\n", - "\n", - " if scene_uuid_set is None:\n", - " scene_extraction_info = SceneExtractionInfo(\n", - " initial_token=str(recording_table[\"token\"][idx]),\n", - " initial_idx=idx,\n", - " duration_s=filter.duration_s,\n", - " history_s=filter.history_s,\n", - " iteration_duration_s=ITERATION_DURATION_SECONDS,\n", - " )\n", - " elif str(recording_table[\"token\"][idx]) in scene_uuid_set:\n", - " scene_extraction_info = SceneExtractionInfo(\n", - " initial_token=str(recording_table[\"token\"][idx]),\n", - " initial_idx=idx,\n", - " duration_s=filter.duration_s,\n", - " history_s=filter.history_s,\n", - " iteration_duration_s=ITERATION_DURATION_SECONDS,\n", - " )\n", - "\n", - " if scene_extraction_info is not None:\n", - " # TODO: add more options\n", - " if filter.timestamp_threshold_s is not None and len(scene_extraction_infos) > 0:\n", - " iteration_delta = idx - scene_extraction_infos[-1].initial_idx\n", - " if (iteration_delta * log_metadata.timestep_seconds) < filter.timestamp_threshold_s:\n", - " continue\n", - "\n", - " scene_extraction_infos.append(scene_extraction_info)\n", - "\n", - " del recording_table, log_metadata\n", - " return scene_extraction_infos\n", - "\n", - "\n", - "scenes = _get_scene_extraction_info(log_path, filter)\n", - "len(scenes)\n", - "\n", - "# 4580" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "_get_scene_extraction_info(log_path, filter)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb b/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb deleted file mode 100644 index 780092ce..00000000 --- a/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb +++ /dev/null @@ -1,71 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from d123.conversion.nuplan.nuplan_map_conversion import NuPlanMapConverter, MAP_LOCATIONS\n", - "\n", - "\n", - "\n", - "\n", - "# file_to_delete = Path(\"nuplan_us-pa-pittsburgh-hazelwood.gpkg\")\n", - "# file_to_delete = Path(\"nuplan_us-pa-pittsburgh-hazelwood.gpkg\")\n", - "# if file_to_delete.exists():\n", - "# file_to_delete.unlink()\n", - "\n", - "# NuPlanMapConverter().convert()\n", - "root = Path(\"/home/daniel/d123_workspace/data\") / \"maps\"\n", - "\n", - "\n", - "for map_name in MAP_LOCATIONS:\n", - " print(f\"Converting {map_name} map...\")\n", - " file_to_delete = root / f\"nuplan_{map_name}.gpkg\"\n", - " if file_to_delete.exists():\n", - " file_to_delete.unlink()\n", - "\n", - " NuPlanMapConverter(root).convert(map_name=map_name)\n", - " print(f\"Converting {map_name} map... done\")\n", - "\n", - "# NUPLAN_MAPS_ROOT = os.environ[\"NUPLAN_MAPS_ROOT\"]\n", - "\n", - "# map_file_path = Path(NUPLAN_MAPS_ROOT) / \"us-pa-pittsburgh-hazelwood\" / \"9.17.1937\" / \"map.gpkg\"\n", - "\n", - "# meassure RAM\n", - "\n", - "# gdf_in_pixel_coords = gpd.read_file(map_file_path, layer=\"baseline_paths\", fid_as_index=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/deprecated/map_conversion/test_opendrive_conversion.ipynb b/notebooks/deprecated/map_conversion/test_opendrive_conversion.ipynb deleted file mode 100644 index ac7aaded..00000000 --- a/notebooks/deprecated/map_conversion/test_opendrive_conversion.ipynb +++ /dev/null @@ -1,191 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# import matplotlib.pyplot as plt\n", - "# from shapely.geometry import LineString, Polygon, Point\n", - "# import numpy as np\n", - "\n", - "# # from typing import List\n", - "\n", - "# # from d123.dataset.conversion.map.opendrive.elements.lane import Lane, LaneSection\n", - "# # from d123.dataset.conversion.map.opendrive.elements.reference import Border" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from d123.dataset.conversion.map.opendrive.parser.opendrive import OpenDrive\n", - "\n", - "CARLA_MAP_LOCATIONS = [\n", - " \"Town01\", # A small, simple town with a river and several bridges.\n", - " \"Town02\", # A small simple town with a mixture of residential and commercial buildings.\n", - " \"Town03\", # A larger, urban map with a roundabout and large junctions.\n", - " \"Town04\", # A small town embedded in the mountains with a special \"figure of 8\" infinite highway.\n", - " \"Town05\", # Squared-grid town with cross junctions and a bridge. It has multiple lanes per direction. Useful to perform lane changes.\n", - " \"Town06\", # Long many lane highways with many highway entrances and exits. It also has a Michigan left.\n", - " \"Town07\", # A rural environment with narrow roads, corn, barns and hardly any traffic lights.\n", - " \"Town08\", # Secret \"unseen\" town used for the Leaderboard challenge\n", - " \"Town09\", # Secret \"unseen\" town used for the Leaderboard challenge\n", - " \"Town10HD\", # A downtown urban environment with skyscrapers, residential buildings and an ocean promenade.\n", - " \"Town11\", # A Large Map that is undecorated. Serves as a proof of concept for the Large Maps feature.\n", - " \"Town12\", # A Large Map with numerous different regions, including high-rise, residential and rural environments.\n", - " \"Town13\", # ???\n", - " \"Town14\", # Secret \"unseen\" town used for the Leaderboard challenge ???\n", - " \"Town15\", # ???\n", - "]\n", - "\n", - "AVAILABLE_CARLA_MAP_LOCATIONS = [\n", - " \"Town01\", # A small, simple town with a river and several bridges.\n", - " \"Town02\", # A small simple town with a mixture of residential and commercial buildings.\n", - " \"Town03\", # A larger, urban map with a roundabout and large junctions.\n", - " \"Town04\", # A small town embedded in the mountains with a special \"figure of 8\" infinite highway.\n", - " \"Town05\", # Squared-grid town with cross junctions and a bridge. It has multiple lanes per direction. Useful to perform lane changes.\n", - " \"Town06\", # Long many lane highways with many highway entrances and exits. It also has a Michigan left.\n", - " \"Town07\", # A rural environment with narrow roads, corn, barns and hardly any traffic lights.\n", - " \"Town10HD\", # A downtown urban environment with skyscrapers, residential buildings and an ocean promenade.\n", - " \"Town11\", # A Large Map that is undecorated. Serves as a proof of concept for the Large Maps feature.\n", - " \"Town12\", # A Large Map with numerous different regions, including high-rise, residential and rural environments.\n", - " \"Town13\", # ???\n", - " \"Town15\", # ???\n", - "]\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# town_name.lower()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from d123.dataset.conversion.map.opendrive.opendrive_converter import OpenDriveConverter\n", - "\n", - "\n", - "\n", - "MAP_ROOT = Path(\"/home/daniel/d123_workspace/data\") / \"maps\"\n", - "\n", - "for town_name in AVAILABLE_CARLA_MAP_LOCATIONS:\n", - " map_name = town_name.lower()\n", - " print(f\"Start {map_name} map...\")\n", - " file_to_delete = MAP_ROOT / f\"carla_{map_name}.gpkg\"\n", - " if file_to_delete.exists():\n", - " print(f\"Unlink {file_to_delete} map...\")\n", - " file_to_delete.unlink()\n", - " print(f\"Unlink {file_to_delete} map... done\")\n", - "\n", - " if town_name not in [\"Town11\", \"Town12\", \"Town13\", \"Town15\"]:\n", - " carla_maps_root = Path(\"/home/daniel/carla_workspace/carla_garage/carla/CarlaUE4/Content/Carla/Maps/OpenDrive\")\n", - " carla_map_path = carla_maps_root / f\"{town_name}.xodr\"\n", - "\n", - " else:\n", - " carla_map_path = f\"/home/daniel/carla_workspace/carla_garage/carla/CarlaUE4/Content/Carla/Maps/{town_name}/OpenDrive/{town_name}.xodr\"\n", - "\n", - " print(f\"Parsing {map_name} map...\")\n", - " opendrive = OpenDrive.parse_from_file(carla_map_path)\n", - " print(f\"Parsing {map_name} map... done\")\n", - "\n", - " print(f\"Converting {map_name} map... done\")\n", - " converter = OpenDriveConverter(opendrive)\n", - " converter.run(f\"carla_{town_name.lower()}\")\n", - " print(f\"Converting {map_name} map... done\")\n", - " # break\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "converter" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from d123.dataset.maps.map_datatypes import MapLayer\n", - "\n", - "\n", - "str(MapLayer.GENERIC_DRIVABLE).split(\".\")[-1].lower()\n", - "\n", - "\n", - "MapLayer.GENERIC_DRIVABLE.name\n", - "\n", - "MapLayer.deserialize(MapLayer.GENERIC_DRIVABLE.name)\n", - "\n", - "\n", - "MapLayer.GENERIC_DRIVABLE.name.lower().islower()\n", - "\n", - "\n", - "AVAILABLE_MAP_LAYERS = list(MapLayer)\n", - "AVAILABLE_MAP_LAYERS\n", - "\n", - "pyogrio.read_dataframe()." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "gdf = converter._extract_walkways_dataframe()\n", - "gdf" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/deprecated/nuplan_map_dataframe.ipynb b/notebooks/deprecated/nuplan_map_dataframe.ipynb deleted file mode 100644 index 113d8391..00000000 --- a/notebooks/deprecated/nuplan_map_dataframe.ipynb +++ /dev/null @@ -1,570 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nuplan.common.maps.nuplan_map.map_factory import get_maps_api, get_maps_db\n", - "\n", - "MAP_LOCATIONS = {\"sg-one-north\", \"us-ma-boston\", \"us-nv-las-vegas-strip\", \"us-pa-pittsburgh-hazelwood\"}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "length = {0: 1, 1: 2, 2: 3, 3: 4, 4: 0}\n", - "max(length, key=length.get)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import geopandas as gpd\n", - "import pyogrio\n", - "\n", - "\n", - "# Path to your .gpkg file\n", - "# path_to_gpkg = \"/home/daniel/nuplan/dataset/maps/us-nv-las-vegas-strip/9.15.1915/map.gpkg\"\n", - "path_to_gpkg = \"/home/daniel/nuplan/dataset/maps/us-ma-boston/9.12.1817/map.gpkg\"\n", - "# path_to_gpkg = \"/home/daniel/nuplan/dataset/maps/us-pa-pittsburgh-hazelwood/9.17.1937/map.gpkg\"\n", - "# path_to_gpkg = \"/home/daniel/nuplan/dataset/maps/sg-one-north/9.17.1964/map.gpkg\"\n", - "\n", - "\n", - "# List all available layers\n", - "layers = [\n", - " \"baseline_paths\",\n", - " \"carpark_areas\",\n", - " \"generic_drivable_areas\",\n", - " \"dubins_nodes\",\n", - " \"lane_connectors\",\n", - " \"intersections\",\n", - " \"boundaries\",\n", - " \"crosswalks\",\n", - " \"lanes_polygons\",\n", - " \"lane_group_connectors\",\n", - " \"lane_groups_polygons\",\n", - " \"walkways\",\n", - " \"gen_lane_connectors_scaled_width_polygons\",\n", - " \"meta\",\n", - "]\n", - "\n", - "\n", - "def load_layer(layer_name: str) -> gpd.geodataframe:\n", - "\n", - " map_meta = gpd.read_file(path_to_gpkg, layer=\"meta\", engine=\"pyogrio\")\n", - " projection_system = map_meta[map_meta[\"key\"] == \"projectedCoordSystem\"][\"value\"].iloc[0]\n", - "\n", - " gdf_in_pixel_coords = pyogrio.read_dataframe(path_to_gpkg, layer=layer_name, fid_as_index=True)\n", - " gdf_in_utm_coords = gdf_in_pixel_coords.to_crs(projection_system)\n", - "\n", - " # For backwards compatibility, cast the index to string datatype.\n", - " # and mirror it to the \"fid\" column.\n", - " gdf_in_utm_coords.index = gdf_in_utm_coords.index.map(str)\n", - " gdf_in_utm_coords[\"fid\"] = gdf_in_utm_coords.index\n", - "\n", - " return gdf_in_utm_coords\n", - "\n", - "\n", - "pyogrio.read_dataframe(path_to_gpkg)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "\n", - "\n", - "layers = [\n", - " \"baseline_paths\",\n", - " \"carpark_areas\",\n", - " \"generic_drivable_areas\",\n", - " \"dubins_nodes\",\n", - " \"lane_connectors\",\n", - " \"intersections\",\n", - " \"boundaries\",\n", - " \"crosswalks\",\n", - " \"lanes_polygons\",\n", - " \"lane_group_connectors\",\n", - " \"lane_groups_polygons\",\n", - " \"road_segments\",\n", - " \"stop_polygons\",\n", - " \"traffic_lights\",\n", - " \"walkways\",\n", - " \"gen_lane_connectors_scaled_width_polygons\",\n", - " \"meta\",\n", - "]\n", - "\n", - "\n", - "def non_nan_set(series: pd.Series) -> set:\n", - " return set(series.dropna().values)\n", - "\n", - "\n", - "lanes = load_layer(\"intersections\")\n", - "boundaries = load_layer(\"boundaries\")\n", - "\n", - "\n", - "def get_lane(id: int):\n", - " return lanes[lanes[\"fid\"] == str(id)]\n", - "\n", - "\n", - "def get_right_boundary(lane):\n", - " return boundaries[boundaries[\"fid\"] == str(lane[\"right_boundary_fid\"].iloc[0])]\n", - "\n", - "\n", - "def get_left_boundary(lane):\n", - " return boundaries[boundaries[\"fid\"] == str(lane[\"left_boundary_fid\"].iloc[0])]\n", - "\n", - "\n", - "# boundaries\n", - "\n", - "# correct_lane_fid = 47716\n", - "# error_lane_fid = 48508\n", - "\n", - "# correct_lane_df = get_lane(correct_lane_fid)\n", - "# error_lane_df = get_lane(error_lane_fid)\n", - "\n", - "\n", - "# 46552,46553\n", - "# test_id = 46553\n", - "\n", - "print(lanes.keys())\n", - "non_nan_set(lanes.intersection_type_fid)\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "baseline_paths = load_layer(\"baseline_paths\")\n", - "\n", - "\n", - "import matplotlib.pyplot as plt\n", - "from networkx import center\n", - "\n", - "# Create a figure\n", - "fig, ax = plt.subplots(figsize=(15, 12))\n", - "\n", - "# Get unique path_type_fid values\n", - "path_types = baseline_paths[\"path_type_fid\"].unique()\n", - "\n", - "# Define colors for different path types\n", - "colors = [\"blue\", \"red\", \"green\", \"brown\", \"purple\", \"orange\", \"pink\"]\n", - "\n", - "# Plot each path type with a different color\n", - "target_label = 0\n", - "for i, path_type in enumerate(path_types):\n", - " # Filter baseline_paths by path_type\n", - " paths_of_type = baseline_paths[baseline_paths[\"path_type_fid\"] == path_type]\n", - "\n", - " if path_type == target_label:\n", - " alpha = 1.0\n", - " target_fids = paths_of_type.fid.tolist()\n", - " else:\n", - " alpha = 0.5\n", - "\n", - " # Plot these paths with a specific color\n", - " paths_of_type.plot(ax=ax, color=colors[i % len(colors)], label=f\"Path Type {path_type}\", alpha=alpha)\n", - "\n", - "# Add title and legend\n", - "ax.set_title(\"Baseline Paths by Path Type\", fontsize=16)\n", - "ax.legend()\n", - "ax.set_xlabel(\"X Coordinate\")\n", - "ax.set_ylabel(\"Y Coordinate\")\n", - "ax.axis(\"equal\") # Maintain aspect ratio\n", - "\n", - "center_zoom = False # Set to True to zoom into the center of the data\n", - "fid_zoom = target_fids[25]\n", - "\n", - "if center_zoom:\n", - " # Get the bounds of the data for better focusing\n", - " x_min, y_min, x_max, y_max = baseline_paths.total_bounds\n", - "\n", - " # Calculate center coordinates\n", - " center_x = (x_min + x_max) / 2\n", - " center_y = (y_min + y_max) / 2\n", - "\n", - " # Set axis limits to zoom into the center (using 30% of the total range)\n", - " range_x = x_max - x_min\n", - " range_y = y_max - y_min\n", - " zoom_factor = 2.0\n", - "\n", - " ax.set_xlim(center_x - range_x * zoom_factor / 2, center_x + range_x * zoom_factor / 2)\n", - " ax.set_ylim(center_y - range_y * zoom_factor / 2, center_y + range_y * zoom_factor / 2)\n", - "\n", - "elif fid_zoom:\n", - " # Filter to get the specific element with the given FID\n", - " specific_path = baseline_paths[baseline_paths[\"fid\"] == str(fid_zoom)]\n", - "\n", - " if not specific_path.empty:\n", - " # Get the bounds of the specific path\n", - " x_min, y_min, x_max, y_max = specific_path.total_bounds\n", - "\n", - " # Add some padding around the element\n", - " padding = 100 # meters\n", - " ax.set_xlim(x_min - padding, x_max + padding)\n", - " ax.set_ylim(y_min - padding, y_max + padding)\n", - "\n", - " # Highlight the specific element\n", - " # specific_path.plot(ax=ax, color=\"yellow\", linewidth=3, zorder=10)\n", - "\n", - " # Update title to show we're zoomed to a specific FID\n", - " ax.set_title(f\"Baseline Path - FID {fid_zoom}\", fontsize=16)\n", - " else:\n", - " print(f\"FID {fid_zoom} not found in baseline_paths\")\n", - "\n", - "\n", - "# Add a title to indicate we're looking at the center\n", - "ax.set_title(\"Baseline Paths by Path Type (Center Zoom)\", fontsize=16)\n", - "\n", - "\n", - "# Convert the specific path coordinates to WGS84 (latitude/longitude) for Google Maps\n", - "if fid_zoom and not specific_path.empty:\n", - " # Create a copy to avoid modifying the original\n", - " wgs84_path = specific_path.copy()\n", - "\n", - " # Convert from the current projection to WGS84 (EPSG:4326)\n", - " wgs84_path = wgs84_path.to_crs(\"EPSG:4326\")\n", - "\n", - " # Get the centroid of the path for easier lookup\n", - " centroid = wgs84_path.geometry.iloc[0].centroid\n", - "\n", - " # Display the coordinates\n", - " print(f\"\\nGoogle Maps coordinates for FID {fid_zoom}:\")\n", - " print(f\"Latitude: {centroid.y}, Longitude: {centroid.x}\")\n", - " print(f\"Google Maps link: https://www.google.com/maps?q={centroid.y},{centroid.x}\")\n", - "\n", - " # Add a text annotation showing coordinates on the plot\n", - " ax.annotate(\n", - " f\"Lat: {centroid.y:.6f}, Lon: {centroid.x:.6f}\",\n", - " xy=(0.05, 0.05),\n", - " xycoords=\"axes fraction\",\n", - " bbox=dict(boxstyle=\"round,pad=0.5\", fc=\"white\", alpha=0.8),\n", - " fontsize=10,\n", - " )\n", - "plt.tight_layout()\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "baseline_paths = load_layer(\"baseline_paths\")\n", - "intersections = load_layer(\"intersections\") # Load the intersections layer\n", - "\n", - "import matplotlib.pyplot as plt\n", - "from networkx import center\n", - "\n", - "# Create a figure\n", - "fig, ax = plt.subplots(figsize=(15, 12))\n", - "\n", - "# Get unique intersection types\n", - "intersection_types = intersections[\"intersection_type_fid\"].unique()\n", - "\n", - "# Define colors for different intersection types\n", - "colors = [\"blue\", \"red\", \"green\", \"brown\", \"purple\", \"orange\", \"pink\"]\n", - "\n", - "# Plot each intersection type with a different color\n", - "target_label = 2 # Target intersection type to highlight\n", - "for i, intersection_type in enumerate(intersection_types):\n", - " # Filter intersections by type\n", - " intersections_of_type = intersections[intersections[\"intersection_type_fid\"] == intersection_type]\n", - "\n", - " if intersection_type == target_label:\n", - " alpha = 1.0\n", - " target_fids = intersections_of_type.fid.tolist()\n", - " else:\n", - " alpha = 0.5\n", - "\n", - " # Plot these intersections with a specific color\n", - " intersections_of_type.plot(ax=ax, color=colors[i % len(colors)], \n", - " label=f\"Intersection Type {intersection_type}\", alpha=alpha)\n", - "\n", - "# Add title and legend\n", - "ax.set_title(\"Intersections by Type\", fontsize=16)\n", - "\n", - "ax.set_xlabel(\"X Coordinate\")\n", - "ax.set_ylabel(\"Y Coordinate\")\n", - "ax.axis(\"equal\") # Maintain aspect ratio\n", - "\n", - "center_zoom = False # Set to True to zoom into the center of the data\n", - "fid_zoom = target_fids[0] if 'target_fids' in locals() and len(target_fids) > 0 else None\n", - "\n", - "if center_zoom:\n", - " # Get the bounds of the data for better focusing\n", - " x_min, y_min, x_max, y_max = intersections.total_bounds\n", - "\n", - " # Calculate center coordinates\n", - " center_x = (x_min + x_max) / 2\n", - " center_y = (y_min + y_max) / 2\n", - "\n", - " # Set axis limits to zoom into the center (using 30% of the total range)\n", - " range_x = x_max - x_min\n", - " range_y = y_max - y_min\n", - " zoom_factor = 2.0\n", - "\n", - " ax.set_xlim(center_x - range_x * zoom_factor / 2, center_x + range_x * zoom_factor / 2)\n", - " ax.set_ylim(center_y - range_y * zoom_factor / 2, center_y + range_y * zoom_factor / 2)\n", - "\n", - "elif fid_zoom:\n", - " # Filter to get the specific intersection with the given FID\n", - " specific_intersection = intersections[intersections[\"fid\"] == fid_zoom]\n", - "\n", - " if not specific_intersection.empty:\n", - " # Get the bounds of the specific intersection\n", - " x_min, y_min, x_max, y_max = specific_intersection.total_bounds\n", - "\n", - " # Add some padding around the element\n", - " padding = 100 # meters\n", - " ax.set_xlim(x_min - padding, x_max + padding)\n", - " ax.set_ylim(y_min - padding, y_max + padding)\n", - "\n", - " # Update title to show we're zoomed to a specific FID\n", - " ax.set_title(f\"Intersection - FID {fid_zoom} (Type {specific_intersection['intersection_type_fid'].iloc[0]})\", fontsize=16)\n", - " else:\n", - " print(f\"FID {fid_zoom} not found in intersections\")\n", - "\n", - "\n", - "# Add a title if not zooming to a specific FID\n", - "if not fid_zoom:\n", - " ax.set_title(\"Intersections by Type\", fontsize=16)\n", - "\n", - "# Convert the specific intersection coordinates to WGS84 (latitude/longitude) for Google Maps\n", - "if fid_zoom and 'specific_intersection' in locals() and not specific_intersection.empty:\n", - " # Create a copy to avoid modifying the original\n", - " wgs84_intersection = specific_intersection.copy()\n", - "\n", - " # Convert from the current projection to WGS84 (EPSG:4326)\n", - " wgs84_intersection = wgs84_intersection.to_crs(\"EPSG:4326\")\n", - "\n", - " # Get the centroid of the intersection for easier lookup\n", - " centroid = wgs84_intersection.geometry.iloc[0].centroid\n", - "\n", - " # Display the coordinates\n", - " print(f\"\\nGoogle Maps coordinates for Intersection FID {fid_zoom}:\")\n", - " print(f\"Latitude: {centroid.y}, Longitude: {centroid.x}\")\n", - " print(f\"Google Maps link: https://www.google.com/maps?q={centroid.y},{centroid.x}\")\n", - "\n", - " # Add a text annotation showing coordinates on the plot\n", - " ax.annotate(\n", - " f\"Lat: {centroid.y:.6f}, Lon: {centroid.x:.6f}\",\n", - " xy=(0.05, 0.05),\n", - " xycoords=\"axes fraction\",\n", - " bbox=dict(boxstyle=\"round,pad=0.5\", fc=\"white\", alpha=0.8),\n", - " fontsize=10,\n", - " )\n", - "ax.legend()\n", - "plt.tight_layout()\n", - "plt.show()\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def _get_set(df, column: str):\n", - " \"\"\"\n", - " Returns a set of unique values in the specified column of the DataFrame.\n", - " \"\"\"\n", - " return set(df[column].unique())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "_get_set(lanes, \"lane_index\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "lanes[lanes[\"left_boundary_fid\"] == test_id].keys()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "get_left_boundary(correct_lane_df)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "get_right_boundary(correct_lane_df)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "get_left_boundary(error_lane_df)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "get_right_boundary(error_lane_df)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "\n", - "lane_df = load_layer(\"generic_drivable_areas\")\n", - "lane_df" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "walkways_df = load_layer(\"carparks\")\n", - "walkways_df" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "gen_lane_connectors_scaled_width_polygons_df = load_layer(\"boundaries\")\n", - "\n", - "gen_lane_connectors_scaled_width_polygons_df[gen_lane_connectors_scaled_width_polygons_df[\"fid\"] == \"17950\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "[None] * 10" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "\n", - "lane_df = load_layer(\"baseline_paths\")\n", - "lane_df" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def mps_to_mph(mph: float) -> float:\n", - " \"\"\"\n", - " Convert meters per second to miles per hour.\n", - " :param mph: miles per hour [mi/h]\n", - " :return: meters per second [m/s]\n", - " \"\"\"\n", - " return mph / 0.44704\n", - "\n", - "\n", - "mps_to_mph(6.705409029950827)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# fix, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - "# type = 3\n", - "\n", - "# for i in np.random.choice(len(geoms[type]), 10):\n", - "# ax.plot(*geoms[type][i].coords.xy, color=\"blue\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "geoms[2][1]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "geoms[3][1]" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/deprecated/scene_rendering.ipynb b/notebooks/deprecated/scene_rendering.ipynb deleted file mode 100644 index 0a44912d..00000000 --- a/notebooks/deprecated/scene_rendering.ipynb +++ /dev/null @@ -1,115 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from d123.dataset.scene.arrow_scene import ArrowScene\n", - "from d123.common.visualization.matplotlib.plots import plot_scene_at_iteration\n", - "\n", - "\n", - "\n", - "# log_name = \"2021.06.07.12.54.00_veh-35_01843_02314\"\n", - "# log_file = Path(f\"/home/daniel/d123_workspace/data/nuplan_mini_val/{log_name}.arrow\")\n", - "\n", - "log_name = \"_Rep0_longest1_route0_07_04_10_18_47\"\n", - "log_file = Path(f\"/home/daniel/d123_workspace/data/carla/{log_name}.arrow\")\n", - "scene = ArrowScene(log_file)\n", - "\n", - "plot_scene_at_iteration(scene, iteration=1000)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "scene._vehicle_parameters" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "import traceback\n", - "from d123.common.visualization.matplotlib.plots import render_scene_animation\n", - "\n", - "output_path = Path(\"/home/daniel/d123_logs_videos\")\n", - "# render_scene_as_mp4(scene, output_path, fps=30, end_idx=10000, step=5, dpi=100)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Create an mp4 animation with a specific FPS\n", - "import traceback\n", - "from d123.common.visualization.matplotlib.plots import render_scene_animation\n", - "\n", - "\n", - "output_path = Path(\"/home/daniel/d123_logs_videos\")\n", - "log_path = Path(\"/home/daniel/d123_workspace/data/nuplan_mini_val\")\n", - "for log_file in log_path.iterdir():\n", - " scene = ArrowScene(log_file)\n", - " try:\n", - " render_scene_animation(scene, output_path, fps=30, end_idx=None, step=5)\n", - " except Exception as e:\n", - " traceback.print_exc()\n", - " erroneous_file = output_path / f\"{log_name}.mp4\"\n", - " if erroneous_file.exists():\n", - " erroneous_file.unlink()\n", - " del scene\n", - " # break" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/deprecated/test_intesection_polygons.ipynb b/notebooks/deprecated/test_intesection_polygons.ipynb deleted file mode 100644 index 4329d274..00000000 --- a/notebooks/deprecated/test_intesection_polygons.ipynb +++ /dev/null @@ -1,293 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from shapely.geometry import LineString, Polygon, MultiPolygon\n", - "from shapely.ops import unary_union, polygonize\n", - "import matplotlib.pyplot as plt\n", - "import geopandas as gpd\n", - "\n", - "from shapely.geometry import Point" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "def extract_intersection_outline(lanes):\n", - "\n", - " # Step 1: Extract all boundary line segments\n", - " all_polygons = []\n", - " for lane in lanes:\n", - " all_polygons.append(lane['polygon'])\n", - " \n", - " # Step 2: Merge all boundaries and extract the enclosed polygons\n", - " merged_boundaries = unary_union(all_polygons)\n", - "\n", - " # Step 3: Generate polygons from the merged lines\n", - " polygons = list(polygonize(merged_boundaries))\n", - " \n", - " if not polygons:\n", - " # If no polygon is formed, use buffer-based approach\n", - " return buffer_based_outline(lanes)\n", - " \n", - " # Step 4: Select the polygon that represents the intersection\n", - " # Usually it's the largest polygon\n", - " if len(polygons) == 1:\n", - " return polygons[0]\n", - " else:\n", - " # Take the largest polygon if there are multiple\n", - " return max(polygons, key=lambda p: p.area)\n", - "\n", - "def buffer_based_outline(lanes):\n", - " \"\"\"\n", - " Alternative approach using buffer operations when line-based\n", - " polygonization doesn't work well.\n", - " \"\"\"\n", - " # Create a small buffer around each boundary\n", - " buffer_distance = 0.5 # Adjust based on your data's scale\n", - " buffered_lanes = []\n", - " \n", - " for lane in lanes:\n", - " # Buffer each lane as a whole\n", - " left = lane['left_boundary']\n", - " right = lane['right_boundary']\n", - " \n", - " # Create a lane polygon by buffering both sides and connecting ends\n", - " if left.coords[0] != right.coords[0]: # Check if start points need connecting\n", - " start_connector = LineString([left.coords[0], right.coords[0]])\n", - " all_parts = [left, right, start_connector]\n", - " else:\n", - " all_parts = [left, right]\n", - " \n", - " if left.coords[-1] != right.coords[-1]: # Check if end points need connecting\n", - " end_connector = LineString([left.coords[-1], right.coords[-1]])\n", - " all_parts.append(end_connector)\n", - " \n", - " lane_outline = unary_union([line.buffer(buffer_distance) for line in all_parts])\n", - " buffered_lanes.append(lane_outline)\n", - " \n", - " # Merge all lane buffers\n", - " intersection_area = unary_union(buffered_lanes)\n", - " \n", - " # Extract the exterior boundary\n", - " if isinstance(intersection_area, MultiPolygon):\n", - " largest_polygon = max(intersection_area.geoms, key=lambda p: p.area)\n", - " return Polygon(largest_polygon.exterior.coords)\n", - " else:\n", - " return Polygon(intersection_area.exterior.coords)\n", - "\n", - "def is_boundary_part_of_outline(boundary, outline, tolerance=1e-8):\n", - " \"\"\"\n", - " Determine if a boundary linestring is part of the intersection outline.\n", - " \n", - " Parameters:\n", - " boundary -- A shapely LineString representing a lane boundary\n", - " outline -- A shapely Polygon representing the intersection outline\n", - " tolerance -- Distance tolerance for considering a point on the outline\n", - " \n", - " Returns:\n", - " Boolean indicating if the boundary contributes to the outline\n", - " \"\"\"\n", - " # Sample points along the boundary\n", - " num_points = min(20, len(boundary.coords))\n", - " sample_indices = np.linspace(0, len(boundary.coords) - 1, num_points).astype(int)\n", - " sample_points = [boundary.coords[i] for i in sample_indices]\n", - " \n", - " # Check if sampled points are on the outline\n", - " outline_boundary = outline.exterior\n", - " points_on_outline = 0\n", - " \n", - " for point in sample_points:\n", - " distance = outline_boundary.distance(Point(point))\n", - " if distance <= tolerance:\n", - " points_on_outline += 1\n", - " \n", - " # If most points are on the outline, consider it part of the outline\n", - " return points_on_outline / num_points > 0.7\n", - "\n", - "def identify_outline_boundaries(lanes, intersection_outline):\n", - " \"\"\"\n", - " Identify which lane boundaries contribute to the intersection outline.\n", - " \n", - " Parameters:\n", - " lanes -- List of dictionaries with 'left_boundary' and 'right_boundary' keys\n", - " intersection_outline -- Shapely Polygon representing the outline\n", - " \n", - " Returns:\n", - " List of boundary linestrings that form the outline\n", - " \"\"\"\n", - " \n", - " \n", - " outline_boundaries = []\n", - " \n", - " for i, lane in enumerate(lanes):\n", - " # Check left boundary\n", - " if is_boundary_part_of_outline(lane['left_boundary'], intersection_outline):\n", - " outline_boundaries.append({\n", - " 'lane_index': i,\n", - " 'boundary_type': 'left',\n", - " 'linestring': lane['left_boundary']\n", - " })\n", - " \n", - " # Check right boundary\n", - " if is_boundary_part_of_outline(lane['right_boundary'], intersection_outline):\n", - " outline_boundaries.append({\n", - " 'lane_index': i,\n", - " 'boundary_type': 'right',\n", - " 'linestring': lane['right_boundary']\n", - " })\n", - " \n", - " return outline_boundaries\n", - "\n", - "def visualize_intersection(lanes, intersection_outline=None, outline_boundaries=None):\n", - " \"\"\"\n", - " Visualize the intersection, its outline, and contributing boundaries.\n", - " \"\"\"\n", - " plt.figure(figsize=(12, 10))\n", - " \n", - " # Plot all lane boundaries\n", - " for i, lane in enumerate(lanes):\n", - " plt.plot(*lane['left_boundary'].xy, 'b-', alpha=0.5, label='Left boundary' if i == 0 else \"\")\n", - " plt.plot(*lane['right_boundary'].xy, 'g-', alpha=0.5, label='Right boundary' if i == 0 else \"\")\n", - " \n", - " # Plot the intersection outline if provided\n", - " if intersection_outline:\n", - " plt.plot(*intersection_outline.exterior.xy, 'r-', linewidth=2, label='Intersection outline')\n", - " \n", - " # Highlight the contributing boundaries if provided\n", - " if outline_boundaries:\n", - " for boundary in outline_boundaries:\n", - " color = 'purple' if boundary['boundary_type'] == 'right' else 'orange'\n", - " plt.plot(*boundary['linestring'].xy, color=color, linewidth=2.5, \n", - " label=f\"{boundary['boundary_type'].capitalize()} boundary in outline\" if boundary == outline_boundaries[0] else \"\")\n", - " \n", - " plt.axis('equal')\n", - " plt.grid(True)\n", - " plt.legend()\n", - " plt.title('Intersection with Lane Boundaries and Outline')\n", - " plt.show()\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "lane_group_df = gpd.read_file(\"/home/daniel/d123_workspace/d123/notebooks/carla_town05.gpkg\", layer=\"lane_group\")\n", - "lane_group_df = lane_group_df[~lane_group_df[\"intersection_id\"].isna()]\n", - "\n", - "\n", - "intersection_ids = list(set(lane_group_df[\"intersection_id\"]))\n", - "intersection_id = intersection_ids[2]\n", - "intersection_df = lane_group_df[lane_group_df[\"intersection_id\"] == intersection_id]\n", - "\n", - "# fig, ax = plt.subplots(figsize=(10, 10))\n", - "# intersection_df.plot(ax=ax)\n", - "# intersection_df\n", - "\n", - "print(intersection_id)\n", - "\n", - "\n", - "left_boundaries = list(intersection_df[\"left_boundary\"])\n", - "right_boundaries = list(intersection_df[\"right_boundary\"])\n", - "polygons = list(intersection_df[\"geometry\"])\n", - "lanes = []\n", - "for i in range(len(left_boundaries)):\n", - " lane = {\n", - " 'left_boundary': left_boundaries[i],\n", - " 'right_boundary': right_boundaries[i],\n", - " \"polygon\": polygons[i]\n", - " }\n", - " lanes.append(lane)\n", - "\n", - "# lanes = [\n", - "# {\n", - "# 'left_boundary': LineString([(0, 0), (10, 0)]),\n", - "# 'right_boundary': LineString([(0, 5), (10, 5)])\n", - "# },\n", - "# {\n", - "# 'left_boundary': LineString([(10, 0), (20, 0)]),\n", - "# 'right_boundary': LineString([(10, 5), (20, 5)])\n", - "# },\n", - "# # Add more lanes as needed\n", - "# ]\n", - "\n", - "\n", - "outline = extract_intersection_outline(lanes)\n", - "# outline_boundaries = identify_outline_boundaries(lanes, outline)\n", - "# visualize_intersection(lanes, outline, outline_boundaries)\n", - "\n", - "print(isinstance(outline, MultiPolygon))\n", - "\n", - "\n", - "polygons = list(polygonize(outline))\n", - "\n", - "polygons[0]\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "left_boundaries" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "lane_group_df = gpd.read_file(\"/home/daniel/d123_workspace/d123/notebooks/carla_town05.gpkg\", layer=\"intersection\")\n", - "lane_group_df[\"lane_group_ids\"][0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/deprecated/test_scene_builder.ipynb b/notebooks/deprecated/test_scene_builder.ipynb deleted file mode 100644 index 105e60a5..00000000 --- a/notebooks/deprecated/test_scene_builder.ipynb +++ /dev/null @@ -1,258 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", - "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# log_names = [\"2021.06.07.12.54.00_veh-35_01843_02314\"]\n", - "# log_names = [\"_Rep0_bench2drive220_route2_06_12_20_50_31\"]\n", - "log_names = None\n", - "split = \"nuplan_mini_val\"\n", - "scene_uuids = [\"8445a99210185a81\"]\n", - "scene_filter = SceneFilter(split_names=[split], log_names=log_names, duration_s=8.1, scene_uuids=scene_uuids)\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", - "worker = Sequential() \n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(len(scenes))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from d123.common.visualization.matplotlib.plots import render_scene_animation\n", - "\n", - "output_path = Path(\"/home/daniel/d123_route_testing\")\n", - "for scene in scenes:\n", - " render_scene_animation(scene, output_path, fps=20, end_idx=None, step=1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# from pathlib import Path\n", - "# from typing import Optional, Tuple\n", - "\n", - "# import matplotlib.animation as animation\n", - "# import matplotlib.pyplot as plt\n", - "# from tqdm import tqdm\n", - "\n", - "# from d123.geometry import Point2D, StateSE2\n", - "# from d123.geometry.bounding_box import BoundingBoxSE2\n", - "# from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG\n", - "# from d123.common.visualization.matplotlib.observation import (\n", - "# add_bounding_box_to_ax,\n", - "# add_box_detections_to_ax,\n", - "# add_default_map_on_ax,\n", - "# add_traffic_lights_to_ax,\n", - "# )\n", - "# from d123.dataset.arrow.conversion import TrafficLightDetectionWrapper\n", - "# from d123.dataset.maps.abstract_map import AbstractMap\n", - "# from d123.dataset.observation.detection.detection import BoxDetectionWrapper\n", - "# from d123.dataset.scene.abstract_scene import AbstractScene\n", - "# from nuplan.common.actor_state.ego_state import EgoState\n", - "# import io\n", - "# from PIL import Image\n", - "\n", - "\n", - "# def add_ego_vehicle_to_ax_(ax: plt.Axes, ego_state: EgoState) -> None:\n", - "# bounding_box = BoundingBoxSE2(\n", - "# center=StateSE2(*ego_state.center),\n", - "# length=ego_state.car_footprint.length,\n", - "# width=ego_state.car_footprint.width,\n", - "# )\n", - "# add_bounding_box_to_ax(ax, bounding_box, EGO_VEHICLE_CONFIG)\n", - "\n", - "\n", - "# def _plot_scene_on_ax(\n", - "# ax: plt.Axes,\n", - "# map_api: AbstractMap,\n", - "# ego_state: EgoState,\n", - "# box_detections: BoxDetectionWrapper,\n", - "# traffic_light_detections: TrafficLightDetectionWrapper,\n", - "# radius: float = 80,\n", - "# ) -> plt.Axes:\n", - "\n", - "# point_2d = Point2D(ego_state.center.x, ego_state.center.y)\n", - "# add_default_map_on_ax(ax, map_api, point_2d, radius=radius)\n", - "# add_traffic_lights_to_ax(ax, traffic_light_detections, map_api)\n", - "\n", - "# add_box_detections_to_ax(ax, box_detections)\n", - "# add_ego_vehicle_to_ax_(ax, ego_state)\n", - "\n", - "# ax.set_xlim(point_2d.x - radius, point_2d.x + radius)\n", - "# ax.set_ylim(point_2d.y - radius, point_2d.y + radius)\n", - "\n", - "\n", - "# ax.set_aspect(\"equal\", adjustable=\"box\")\n", - "# return ax\n", - "\n", - "# def plot_scene_to_image(\n", - "# map_api: AbstractMap,\n", - "# ego_state: EgoState,\n", - "# box_detections: BoxDetectionWrapper,\n", - "# traffic_light_detections: TrafficLightDetectionWrapper,\n", - "# radius: float = 80,\n", - "# figsize: Tuple[int, int] = (8, 8),\n", - "# ) -> Image:\n", - "\n", - "# fig, ax = plt.subplots(figsize=figsize)\n", - "# _plot_scene_on_ax(ax, map_api, ego_state, box_detections, traffic_light_detections, radius)\n", - "# ax.set_aspect(\"equal\", adjustable=\"box\")\n", - "# plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)\n", - "# # plt.tight_layout()\n", - " \n", - "# buf = io.BytesIO()\n", - "# fig.savefig(buf, format=\"png\", bbox_inches=\"tight\")\n", - "# plt.close(fig)\n", - "# buf.seek(0)\n", - "# img = Image.open(buf)\n", - "# return img\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "# from d123.simulation.gym.demo_gym_env import DemoGymEnv\n", - "\n", - "# images = []\n", - "# action = [1.0, 0.1] # Placeholder action, replace with actual action logic\n", - "# env = DemoGymEnv(scenes)\n", - "# map_api, ego_state, detection_observation = env.reset()\n", - "\n", - "# images.append(\n", - "# plot_scene_to_image(\n", - "# map_api,\n", - "# ego_state,\n", - "# detection_observation.box_detections,\n", - "# detection_observation.traffic_light_detections,\n", - "# )\n", - "# )\n", - "\n", - "# for i in range(150):\n", - "# ego_state, detection_observation, end = env.step(action)\n", - "# images.append(\n", - "# plot_scene_to_image(\n", - "# map_api,\n", - "# ego_state,\n", - "# detection_observation.box_detections,\n", - "# detection_observation.traffic_light_detections,\n", - "# )\n", - "# )\n", - "# if end:\n", - "# print(\"End of scene reached.\")\n", - "# break\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "# import numpy as np\n", - "\n", - "\n", - "# def create_gif(images, output_path, duration=100):\n", - "# \"\"\"\n", - "# Create a GIF from a list of PIL images.\n", - "\n", - "# Args:\n", - "# images (list): List of PIL.Image objects.\n", - "# output_path (str): Path to save the GIF.\n", - "# duration (int): Duration between frames in milliseconds.\n", - "# \"\"\"\n", - "# if images:\n", - "# print(len(images))\n", - "# images_p = [img.convert('P', palette=Image.ADAPTIVE) for img in images]\n", - "# images_p[0].save(\n", - "# output_path,\n", - "# save_all=True,\n", - "# append_images=images_p[1:],\n", - "# duration=duration,\n", - "# loop=0\n", - "# )\n", - "\n", - "# create_gif(images, f\"{split}_{np.random.randint(0, 1000)}_{action}.gif\", duration=20)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/deprecated/test_waypoints.ipynb b/notebooks/deprecated/test_waypoints.ipynb deleted file mode 100644 index 08e21ba0..00000000 --- a/notebooks/deprecated/test_waypoints.ipynb +++ /dev/null @@ -1,167 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.common.multithreading.worker_sequential import Sequential" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "import os, psutil\n", - "\n", - "def print_memory_usage():\n", - " process = psutil.Process(os.getpid())\n", - " memory_info = process.memory_info()\n", - " print(f\"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB\")\n", - "\n", - "print_memory_usage()\n", - "# log_names = [\"2021.06.07.12.54.00_veh-35_01843_02314\"]\n", - "# log_names = [\"_Rep0_bench2drive220_route2_06_12_20_50_31\"]\n", - "log_names = [\"_Rep0_longest1_route0_06_13_17_21_21\"]\n", - "split = \"carla\"\n", - "scene_filter = SceneFilter(split_names=[split], log_names=log_names, timestamp_threshold_s=None, duration_s=None)\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", - "worker = Sequential() \n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(len(scenes))\n", - "print_memory_usage()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "from shapely.geometry import LineString\n", - "import numpy as np\n", - "from matplotlib import pyplot as plt\n", - "from d123.conversion.carla.carla_data_processor import _load_json_gz \n", - "from d123.common.visualization.matplotlib.plots import _plot_scene_on_ax\n", - "json_dict = _load_json_gz(\"/home/daniel/carla_workspace/data/_Rep0_longest1_route0_06_13_17_21_21/boxes/0000000002.json.gz\")\n", - "json_dict\n", - "\n", - "\n", - "scene = scenes[0]\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "_plot_scene_on_ax(ax, scene, 4000, 200)\n", - "\n", - "\n", - "# x,y = json_dict[\"pos_global\"]\n", - "\n", - "# target_points = np.array([json_dict[\"target_point\"], json_dict[\"target_point_next\"],])\n", - "# route = np.array(json_dict[\"route\"])\n", - "# route[...,1] *= (-1)\n", - "# route = route[:500]\n", - "# target_points[...,1] *= (-1)\n", - "# for point in route:\n", - "# print(point)\n", - "\n", - "# linestring = LineString(route)\n", - "\n", - "\n", - "# ax.plot(*route.T, color=\"green\", marker=\"o\", markersize=1, zorder=20, label=\"route\")\n", - "# ax.plot(*target_points.T, color=\"red\", marker=\"o\", markersize=10, zorder=20, label=\"route\")\n", - "# ax.legend()\n", - "# print(route[0,0], route[0,1], linestring.length)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "len(route)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "# list(json_dict.keys())\n", - "\n", - "x,y = json_dict[\"pos_global\"]\n", - "\n", - "route = np.array([json_dict[\"target_point\"], json_dict[\"target_point_next\"],])\n", - "route[...,0] += x\n", - "route[...,1] += y\n", - "\n", - "route[...,1] *= (-1)\n", - "\n", - "ax.plot(*route, color=\"red\", marker=\"o\", markersize=10, zorder=20, label=\"Target Points\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "json_dict.keys()\n", - "\n", - "json_dict[\"route\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "route.tolist()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index f1a18ba8..37b5ef20 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -40,13 +40,16 @@ "# log_names = [\"2021.09.29.17.35.58_veh-44_00066_00432\"]\n", "\n", "\n", - "splits = [\"wopd_val\"]\n", + "\n", + "# splits = [\"wopd_val\"]\n", "# splits = [\"carla\"]\n", "# splits = [\"nuplan-mini_test\"]\n", - "# splits = [\"av2-sensor-mini_train\"]\n", + "splits = [\"av2-sensor-mini_train\"]\n", "# log_names = None\n", "\n", "\n", + "# generator = Path(\"/home/daniel/d123_workspace/data/logs\").iterdir()\n", + "\n", "log_names = None\n", "scene_uuids = None\n", "\n", @@ -252,20 +255,16 @@ " return fig, ax\n", "\n", "\n", - "scene_index = 17\n", + "# scene_index = \n", "iteration = 99\n", - "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=500)\n", - "plt.show()\n", - "\n", - "\n", - "# camera = scenes[scene_index].get_camera_at_iteration(\n", - "# iteration=iteration, camera_type=CameraType.CAM_F0\n", - "# )\n", "\n", - "# plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n", - "# # # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n", + "fig, ax = plt.subplots(1, 3, figsize=(15, 5))\n", + "scene = np.random.choice(scenes)\n", + "_plot_scene_on_ax(ax[0], scene, iteration, radius=20)\n", + "_plot_scene_on_ax(ax[1], scene, iteration, radius=50)\n", + "_plot_scene_on_ax(ax[2], scene, iteration, radius=100)\n", "\n", - "# scenes[scene_index].log_name" + "plt.show()\n" ] }, { From 24f4dc4528354937bd251c466bcfbabb59e90394 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Wed, 15 Oct 2025 15:47:12 +0800 Subject: [PATCH 080/145] merge dev_v0.0.7 into kitti360 to align codebase --- ...load_sensor.py => kitti360_load_sensor.py} | 0 .../kitti_360/kitti_360_data_converter.py | 251 ++++++++---------- .../datasets/kitti_360/kitti_360_helper.py | 2 +- .../kitti_360/kitti_360_map_conversion.py | 121 ++++++--- .../kitti_360/preprocess_detection.py | 4 +- .../scene/arrow/utils/arrow_getters.py | 2 +- .../datasets/kitti360_dataset.yaml | 9 +- 7 files changed, 203 insertions(+), 186 deletions(-) rename d123/conversion/datasets/kitti_360/{load_sensor.py => kitti360_load_sensor.py} (100%) rename d123/script/config/{ => conversion}/datasets/kitti360_dataset.yaml (73%) diff --git a/d123/conversion/datasets/kitti_360/load_sensor.py b/d123/conversion/datasets/kitti_360/kitti360_load_sensor.py similarity index 100% rename from d123/conversion/datasets/kitti_360/load_sensor.py rename to d123/conversion/datasets/kitti_360/kitti360_load_sensor.py diff --git a/d123/conversion/datasets/kitti_360/kitti_360_data_converter.py b/d123/conversion/datasets/kitti_360/kitti_360_data_converter.py index 69ed6f8b..833493bd 100644 --- a/d123/conversion/datasets/kitti_360/kitti_360_data_converter.py +++ b/d123/conversion/datasets/kitti_360/kitti_360_data_converter.py @@ -13,7 +13,6 @@ import copy from collections import defaultdict import datetime -import hashlib import xml.etree.ElementTree as ET import pyarrow as pa from PIL import Image @@ -40,17 +39,24 @@ FisheyeMEIProjection, ) from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datasets.utils.sensor.lidar_index_registry import Kitti360LidarIndex +from d123.conversion.utils.sensor_utils.lidar_index_registry import Kitti360LidarIndex from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3 from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table -from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter -from d123.datasets.utils.arrow_ipc_writer import ArrowLogWriter +from d123.common.utils.uuid import create_deterministic_uuid +from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter +from d123.conversion.dataset_converter_config import DatasetConverterConfig +from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from d123.conversion.log_writer.arrow_log_writer import ArrowLogWriter +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from d123.datatypes.maps.map_metadata import MapMetadata from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from d123.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT -from d123.datasets.kitti_360.kitti_360_map_conversion import convert_kitti360_map +from d123.conversion.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic +from d123.conversion.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT +from d123.conversion.datasets.kitti_360.kitti_360_map_conversion import ( + convert_kitti360_map_with_writer +) from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex from d123.geometry.rotation import EulerAngles @@ -91,26 +97,38 @@ } D123_DEVKIT_ROOT = Path(os.environ["D123_DEVKIT_ROOT"]) -PREPOCESS_DETECTION_DIR = D123_DEVKIT_ROOT / "d123" / "dataset" / "dataset_specific" / "kitti_360" / "detection_preprocess" - -def create_token(input_data: str) -> str: - # TODO: Refactor this function. - # TODO: Add a general function to create tokens from arbitrary data. - if isinstance(input_data, str): - input_data = input_data.encode("utf-8") - - hash_obj = hashlib.sha256(input_data) - return hash_obj.hexdigest()[:16] +PREPOCESS_DETECTION_DIR = D123_DEVKIT_ROOT / "d123" / "conversion" / "datasets" / "kitti_360" / "detection_preprocess" +def create_token(split: str, log_name: str, timestamp_us: int, misc: str = None) -> str: + """Create a deterministic UUID-based token for KITTI-360 data. + + :param split: The data split (e.g., "kitti360") + :param log_name: The name of the log without file extension + :param timestamp_us: The timestamp in microseconds + :param misc: Any additional information to include in the UUID, defaults to None + :return: The generated deterministic UUID as hex string + """ + uuid_obj = create_deterministic_uuid(split=split, log_name=log_name, timestamp_us=timestamp_us, misc=misc) + return uuid_obj.hex + +def get_kitti360_map_metadata(split: str, log_name: str) -> MapMetadata: + return MapMetadata( + dataset="kitti360", + split=split, + log_name=log_name, + location=log_name, + map_has_z=True, + map_is_local=True, + ) -class Kitti360DataConverter(RawDataConverter): +class Kitti360DataConverter(AbstractDatasetConverter): def __init__( self, splits: List[str], log_path: Union[Path, str], - data_converter_config: DataConverterConfig, + dataset_converter_config: DatasetConverterConfig, ) -> None: - super().__init__(data_converter_config) + super().__init__(dataset_converter_config) for split in splits: assert ( split in self.get_available_splits() @@ -118,13 +136,17 @@ def __init__( self._splits: List[str] = splits self._log_path: Path = Path(log_path) - self._log_paths_per_split: Dict[str, List[Path]] = self._collect_log_paths() + self._log_paths_and_split: List[Tuple[Path, str]] = self._collect_log_paths() + + self._total_maps = len(self._log_paths_and_split) # Each log has its own map + self._total_logs = len(self._log_paths_and_split) - def _collect_log_paths(self) -> Dict[str, List[Path]]: + def _collect_log_paths(self) -> List[Tuple[Path, str]]: """ - Collect candidate sequence folders under data_2d_raw that end with '_sync', - and keep only those sequences that are present in ALL required modality roots - (e.g., data_2d_semantics, data_3d_raw, etc.). + Collect candidate sequence folders under data_2d_raw that end with '_sync', + and keep only those sequences that are present in ALL required modality roots + (e.g., data_2d_semantics, data_3d_raw, etc.). + Returns a list of (log_path, split) tuples. """ missing_roots = [str(p) for p in KITTI360_REQUIRED_MODALITY_ROOTS.values() if not p.exists()] if missing_roots: @@ -141,7 +163,7 @@ def _has_modality(seq_name: str, modality_name: str, root: Path) -> bool: else: return (root / seq_name).exists() - valid_seqs: List[Path] = [] + log_paths_and_split: List[Tuple[Path, str]] = [] for seq_dir in candidates: seq_name = seq_dir.name missing_modalities = [ @@ -150,115 +172,72 @@ def _has_modality(seq_name: str, modality_name: str, root: Path) -> bool: if not _has_modality(seq_name, modality_name, root) ] if not missing_modalities: - valid_seqs.append(seq_dir) #KITTI360_DATA_ROOT / DIR_2D_RAW /seq_name + log_paths_and_split.append((seq_dir, "kitti360")) else: logging.info( f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. " f"Root: {KITTI360_DATA_ROOT}" ) - logging.info(f"vadid sequences found: {valid_seqs}") - return {"kitti360": valid_seqs} + + logging.info(f"Valid sequences found: {len(log_paths_and_split)}") + return log_paths_and_split def get_available_splits(self) -> List[str]: """Returns a list of available raw data types.""" return ["kitti360"] - - def convert_maps(self, worker: WorkerPool) -> None: - log_args = [ - { - "log_path": log_path, - "split": split, - } - for split, log_paths in self._log_paths_per_split.items() - for log_path in log_paths - ] - worker_map( - worker, - partial( - convert_kitti360_map_to_gpkg, - data_converter_config=self.data_converter_config - ), - log_args, - ) - - def convert_logs(self, worker: WorkerPool) -> None: - log_args = [ - { - "log_path": log_path, - "split": split, - } - for split, log_paths in self._log_paths_per_split.items() - for log_path in log_paths - ] - - worker_map( - worker, - partial( - convert_kitti360_log_to_arrow, - data_converter_config=self.data_converter_config, - ), - log_args, - ) - -def convert_kitti360_map_to_gpkg( - args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig -) -> List[Any]: - for log_info in args: - log_path: Path = log_info["log_path"] - split: str = log_info["split"] - log_name = log_path.stem - - D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) - map_path = D123_MAPS_ROOT / split / f"{log_name}.gpkg" - #map_path = data_converter_config.output_path / "maps" / split / f"{log_name}.gpkg" - map_path.parent.mkdir(parents=True, exist_ok=True) - if data_converter_config.force_map_conversion or not map_path.exists(): - map_path.unlink(missing_ok=True) - convert_kitti360_map(log_name, map_path) - return [] - -def convert_kitti360_log_to_arrow( - args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig -) -> List[Any]: - for log_info in args: - log_path: Path = log_info["log_path"] - split: str = log_info["split"] - log_name = log_path.stem - - if not log_path.exists(): - raise FileNotFoundError(f"Log path {log_path} does not exist.") - log_file_path = data_converter_config.output_path / split / f"{log_name}.arrow" - - if data_converter_config.force_log_conversion or not log_file_path.exists(): - log_file_path.unlink(missing_ok=True) - if not log_file_path.parent.exists(): - log_file_path.parent.mkdir(parents=True, exist_ok=True) - - log_metadata = LogMetadata( - dataset="kitti360", - split=split, - log_name=log_name, - location=log_name, - timestep_seconds=KITTI360_DT, - vehicle_parameters=get_kitti360_station_wagon_parameters(), - camera_metadata=get_kitti360_camera_metadata(), - lidar_metadata=get_kitti360_lidar_metadata(), - map_has_z=True, - map_is_local=True, - ) - - log_writer = ArrowLogWriter( - log_path=log_file_path, - data_converter_config=data_converter_config, - log_metadata=log_metadata, - ) - - _write_recording_table(log_name, log_writer, log_file_path, data_converter_config) - - gc.collect() - return [] - + def get_number_of_maps(self) -> int: + """Returns the number of available raw data maps for conversion.""" + return self._total_maps + + def get_number_of_logs(self) -> int: + """Returns the number of available raw data logs for conversion.""" + return self._total_logs + + def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: + """ + Convert a single map in raw data format to the uniform 123D format. + :param map_index: The index of the map to convert. + :param map_writer: The map writer to use for writing the converted map. + """ + source_log_path, split = self._log_paths_and_split[map_index] + log_name = source_log_path.stem + + map_metadata = get_kitti360_map_metadata(split, log_name) + + map_needs_writing = map_writer.reset(self.dataset_converter_config, map_metadata) + if map_needs_writing: + convert_kitti360_map_with_writer(log_name, map_writer) + + map_writer.close() + + def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: + """ + Convert a single log in raw data format to the uniform 123D format. + :param log_index: The index of the log to convert. + :param log_writer: The log writer to use for writing the converted log. + """ + source_log_path, split = self._log_paths_and_split[log_index] + log_name = source_log_path.stem + + # Create log metadata + log_metadata = LogMetadata( + dataset="kitti360", + split=split, + log_name=log_name, + location=log_name, + timestep_seconds=KITTI360_DT, + vehicle_parameters=get_kitti360_station_wagon_parameters(), + camera_metadata=get_kitti360_camera_metadata(), + lidar_metadata=get_kitti360_lidar_metadata(), + map_metadata=get_kitti360_map_metadata(split, log_name) + ) + + log_needs_writing = log_writer.reset(self.dataset_converter_config, log_metadata) + if log_needs_writing: + _write_recording_table(log_name, log_writer, self.dataset_converter_config) + + log_writer.close() def get_kitti360_camera_metadata() -> Dict[Union[PinholeCameraType, FisheyeMEICameraType], Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]]: @@ -359,9 +338,8 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: def _write_recording_table( log_name: str, - log_writer: ArrowLogWriter, - log_file_path: Path, - data_converter_config: DataConverterConfig + log_writer: AbstractLogWriter, + data_converter_config: DatasetConverterConfig ) -> None: ts_list: List[TimePoint] = _read_timestamps(log_name) @@ -375,8 +353,7 @@ def _write_recording_table( cameras = _extract_cameras(log_name, valid_idx, data_converter_config) lidars = _extract_lidar(log_name, valid_idx, data_converter_config) - log_writer.add_row( - token=create_token(f"{log_name}_{idx}"), + log_writer.write( timestamp=ts_list[valid_idx], ego_state=ego_state_all[idx], box_detections=box_detection_wrapper_all[valid_idx], @@ -387,12 +364,10 @@ def _write_recording_table( route_lane_group_ids=None, ) - log_writer.close() - - if SORT_BY_TIMESTAMP: - recording_table = open_arrow_table(log_file_path) - recording_table = recording_table.sort_by([("timestamp", "ascending")]) - write_arrow_table(recording_table, log_file_path) + # if SORT_BY_TIMESTAMP: + # recording_table = open_arrow_table(log_file_path) + # recording_table = recording_table.sort_by([("timestamp", "ascending")]) + # write_arrow_table(recording_table, log_file_path) def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: """ @@ -627,7 +602,7 @@ def _extract_detections( box_detection_wrapper_all.append(BoxDetectionWrapper(box_detections=box_detections)) return box_detection_wrapper_all -def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]: +def _extract_lidar(log_name: str, idx: int, data_converter_config: DatasetConverterConfig) -> Dict[LiDARType, Optional[str]]: #NOTE special case for sequence 2013_05_28_drive_0002_sync which has no lidar data before frame 4391 if log_name == "2013_05_28_drive_0002_sync" and idx <= 4390: @@ -645,7 +620,7 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverter return {LiDARType.LIDAR_TOP: lidar} def _extract_cameras( - log_name: str, idx: int, data_converter_config: DataConverterConfig + log_name: str, idx: int, data_converter_config: DatasetConverterConfig ) -> Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]]: camera_dict: Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]] = {} diff --git a/d123/conversion/datasets/kitti_360/kitti_360_helper.py b/d123/conversion/datasets/kitti_360/kitti_360_helper.py index 01c3d1fe..608e4352 100644 --- a/d123/conversion/datasets/kitti_360/kitti_360_helper.py +++ b/d123/conversion/datasets/kitti_360/kitti_360_helper.py @@ -9,7 +9,7 @@ from d123.geometry import BoundingBoxSE3, StateSE3 from d123.geometry.polyline import Polyline3D from d123.geometry.rotation import EulerAngles -from d123.datasets.kitti_360.labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT +from d123.conversion.datasets.kitti_360.labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT import os from pathlib import Path diff --git a/d123/conversion/datasets/kitti_360/kitti_360_map_conversion.py b/d123/conversion/datasets/kitti_360/kitti_360_map_conversion.py index 643a13c6..17f047dc 100644 --- a/d123/conversion/datasets/kitti_360/kitti_360_map_conversion.py +++ b/d123/conversion/datasets/kitti_360/kitti_360_map_conversion.py @@ -11,14 +11,20 @@ from shapely.geometry import LineString import shapely.geometry as geom -from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import ( +from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, split_line_geometry_by_max_length, ) -from d123.datatypes.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value +from d123.datatypes.maps.gpkg.gpkg_utils import get_all_rows_with_value, get_row_with_value from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType from d123.geometry.polyline import Polyline3D -from d123.datasets.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D +from d123.conversion.datasets.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from d123.datatypes.maps.cache.cache_map_objects import ( + CacheGenericDrivable, + CacheWalkway, + CacheRoadEdge, +) MAX_ROAD_EDGE_LENGTH = 100.0 # meters, used to filter out very long road edges @@ -28,7 +34,7 @@ PATH_3D_BBOX_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_BBOX -KIITI360_MAP_BBOX = [ +KITTI360_MAP_BBOX = [ "road", "sidewalk", # "railtrack", @@ -36,39 +42,6 @@ # "driveway", ] -def convert_kitti360_map(log_name: str, map_path: Path) -> None: - - xml_path = PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml" - - if not xml_path.exists(): - raise FileNotFoundError(f"BBox 3D file not found: {xml_path}") - - tree = ET.parse(xml_path) - root = tree.getroot() - objs: List[KITTI360_MAP_Bbox3D] = [] - for child in root: - label = child.find('label').text - if child.find("transform") is None or label not in KIITI360_MAP_BBOX: - continue - obj = KITTI360_MAP_Bbox3D() - obj.parseBbox(child) - objs.append(obj) - - dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} - dataframes[MapLayer.LANE] = _get_none_data() - dataframes[MapLayer.LANE_GROUP] = _get_none_data() - dataframes[MapLayer.INTERSECTION] = _get_none_data() - dataframes[MapLayer.CROSSWALK] = _get_none_data() - dataframes[MapLayer.WALKWAY] = _extract_walkway_df(objs) - dataframes[MapLayer.CARPARK] = _get_none_data() - dataframes[MapLayer.GENERIC_DRIVABLE] = _extract_generic_drivable_df(objs) - dataframes[MapLayer.ROAD_EDGE] = _extract_road_edge_df(objs) - dataframes[MapLayer.ROAD_LINE] = _get_none_data() - - map_file_name = map_path - for layer, gdf in dataframes.items(): - gdf.to_file(map_file_name, layer=layer.serialize(), driver="GPKG", mode="a") - def _get_none_data() -> gpd.GeoDataFrame: ids = [] geometries = [] @@ -85,7 +58,7 @@ def _extract_generic_drivable_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoData continue ids.append(obj.id) outlines.append(obj.vertices.linestring) - geometries.append(geom.Polygon(obj.vertices.array[:, :2])) + geometries.append(geom.Polygon(obj.vertices.array[:, :3])) data = pd.DataFrame({"id": ids, "outline": outlines}) gdf = gpd.GeoDataFrame(data, geometry=geometries) return gdf @@ -99,7 +72,7 @@ def _extract_walkway_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: continue ids.append(obj.id) outlines.append(obj.vertices.linestring) - geometries.append(geom.Polygon(obj.vertices.array[:, :2])) + geometries.append(geom.Polygon(obj.vertices.array[:, :3])) data = pd.DataFrame({"id": ids, "outline": outlines}) gdf = gpd.GeoDataFrame(data, geometry=geometries) @@ -110,7 +83,7 @@ def _extract_road_edge_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: for obj in objs: if obj.label != "road": continue - geometries.append(geom.Polygon(obj.vertices.array[:, :2])) + geometries.append(geom.Polygon(obj.vertices.array[:, :3])) road_edge_linear_rings = get_road_edge_linear_rings(geometries) road_edges = split_line_geometry_by_max_length(road_edge_linear_rings, MAX_ROAD_EDGE_LENGTH) @@ -122,4 +95,70 @@ def _extract_road_edge_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: road_edge_types.append(int(RoadEdgeType.ROAD_EDGE_BOUNDARY)) data = pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}) - return gpd.GeoDataFrame(data, geometry=road_edges) \ No newline at end of file + return gpd.GeoDataFrame(data, geometry=road_edges) + + +def convert_kitti360_map_with_writer(log_name: str, map_writer: AbstractMapWriter) -> None: + """ + Convert KITTI-360 map data using the provided map writer. + This function extracts map data from KITTI-360 XML files and writes them using the map writer interface. + + :param log_name: The name of the log to convert + :param map_writer: The map writer to use for writing the converted map + """ + xml_path = PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml" + if not xml_path.exists(): + xml_path = PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" + + if not xml_path.exists(): + raise FileNotFoundError(f"BBox 3D file not found: {xml_path}") + + tree = ET.parse(xml_path) + root = tree.getroot() + objs: List[KITTI360_MAP_Bbox3D] = [] + + for child in root: + label = child.find('label').text + if child.find("transform") is None or label not in KITTI360_MAP_BBOX: + continue + obj = KITTI360_MAP_Bbox3D() + obj.parseBbox(child) + objs.append(obj) + + + generic_drivable_gdf = _extract_generic_drivable_df(objs) + walkway_gdf = _extract_walkway_df(objs) + road_edge_gdf = _extract_road_edge_df(objs) + + for idx, row in generic_drivable_gdf.iterrows(): + if not row.geometry.is_empty: + map_writer.write_generic_drivable( + CacheGenericDrivable( + object_id=idx, + geometry=row.geometry + ) + ) + + for idx, row in walkway_gdf.iterrows(): + if not row.geometry.is_empty: + map_writer.write_walkway( + CacheWalkway( + object_id=idx, + geometry=row.geometry + ) + ) + + for idx, row in road_edge_gdf.iterrows(): + if not row.geometry.is_empty: + if hasattr(row.geometry, 'exterior'): + road_edge_line = row.geometry.exterior + else: + road_edge_line = row.geometry + + map_writer.write_road_edge( + CacheRoadEdge( + object_id=idx, + road_edge_type=RoadEdgeType.ROAD_EDGE_BOUNDARY, + polyline=Polyline3D.from_linestring(road_edge_line) + ) + ) \ No newline at end of file diff --git a/d123/conversion/datasets/kitti_360/preprocess_detection.py b/d123/conversion/datasets/kitti_360/preprocess_detection.py index 92806736..18bbc125 100644 --- a/d123/conversion/datasets/kitti_360/preprocess_detection.py +++ b/d123/conversion/datasets/kitti_360/preprocess_detection.py @@ -31,8 +31,8 @@ PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES -from d123.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from d123.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT +from d123.conversion.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic +from d123.conversion.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT def _bbox_xml_path(log_name: str) -> Path: if log_name == "2013_05_28_drive_0004_sync": diff --git a/d123/datatypes/scene/arrow/utils/arrow_getters.py b/d123/datatypes/scene/arrow/utils/arrow_getters.py index 03951f07..fb810af4 100644 --- a/d123/datatypes/scene/arrow/utils/arrow_getters.py +++ b/d123/datatypes/scene/arrow/utils/arrow_getters.py @@ -167,7 +167,7 @@ def get_lidar_from_arrow_table( elif log_metadata.dataset == "wopd": raise NotImplementedError elif log_metadata.dataset == "kitti360": - from d123.datasets.kitti_360.load_sensor import load_kitti360_lidar_from_path + from d123.conversion.datasets.kitti_360.kitti360_load_sensor import load_kitti360_lidar_from_path lidar = load_kitti360_lidar_from_path(full_lidar_path, lidar_metadata) else: diff --git a/d123/script/config/datasets/kitti360_dataset.yaml b/d123/script/config/conversion/datasets/kitti360_dataset.yaml similarity index 73% rename from d123/script/config/datasets/kitti360_dataset.yaml rename to d123/script/config/conversion/datasets/kitti360_dataset.yaml index be7567bf..c18c7ec3 100644 --- a/d123/script/config/datasets/kitti360_dataset.yaml +++ b/d123/script/config/conversion/datasets/kitti360_dataset.yaml @@ -1,17 +1,20 @@ kitti360_dataset: - _target_: d123.datasets.kitti_360.kitti_360_data_converter.Kitti360DataConverter + _target_: d123.conversion.datasets.kitti_360.kitti_360_data_converter.Kitti360DataConverter _convert_: 'all' splits: ["kitti360"] log_path: ${oc.env:KITTI360_DATA_ROOT} - data_converter_config: - _target_: d123.datasets.raw_data_converter.DataConverterConfig + dataset_converter_config: + _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} + + # Map + include_map: true # Ego include_ego: true From 8c5a705a07445138c9c3ed270e9b61e468c8dc07 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Wed, 15 Oct 2025 21:16:57 +0200 Subject: [PATCH 081/145] Delete some old notebooks (#42) --- notebooks/av2/delete_me.ipynb | 480 ------------------ notebooks/av2/delete_me_map.ipynb | 313 ------------ notebooks/carla/vehicle_params.ipynb | 127 ----- notebooks/dataset/dataset.ipynb | 148 ------ notebooks/dataset/parquet_testing.ipynb | 133 ----- notebooks/nuplan/nuplan_sensor_loading.ipynb | 501 ------------------- 6 files changed, 1702 deletions(-) delete mode 100644 notebooks/av2/delete_me.ipynb delete mode 100644 notebooks/av2/delete_me_map.ipynb delete mode 100644 notebooks/carla/vehicle_params.ipynb delete mode 100644 notebooks/dataset/dataset.ipynb delete mode 100644 notebooks/dataset/parquet_testing.ipynb delete mode 100644 notebooks/nuplan/nuplan_sensor_loading.ipynb diff --git a/notebooks/av2/delete_me.ipynb b/notebooks/av2/delete_me.ipynb deleted file mode 100644 index c3f0188f..00000000 --- a/notebooks/av2/delete_me.ipynb +++ /dev/null @@ -1,480 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "\n", - "import matplotlib.pyplot as plt\n", - "from pathlib import Path\n", - "\n", - "import numpy as np\n", - "import io\n", - "\n", - "from PIL import Image\n", - "\n", - "import pandas as pd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "# split = \"test\"\n", - "\n", - "\n", - "split = \"train\"\n", - "# split = \"val\"\n", - "\n", - "split_folder = Path(f\"/media/nvme1/argoverse/sensor_mini/{split}\")\n", - "log_folder = sorted(list(split_folder.iterdir()))[0]\n", - "\n", - "log_folder.name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "def _ls(path: Path):\n", - " \"\"\"List all files in the given path.\"\"\"\n", - " return [f.name for f in path.iterdir()]\n", - "\n", - "\n", - "def get_arrow_from_file(file_path: Path):\n", - " if file_path.suffix == \".parquet\":\n", - " import pyarrow.parquet as pq\n", - " return pq.read_table(file_path)\n", - " elif file_path.suffix == \".feather\":\n", - " import pyarrow.feather as feather\n", - " return feather.read_feather(file_path)\n", - " else:\n", - " raise ValueError(f\"Unsupported file type: {file_path.suffix}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "\n", - "\n", - "# 1. Calibration \n", - "calibration_folder = log_folder / \"calibration\"\n", - "\n", - "# 1.1 -> ego to sensor transformation\n", - "egovehicle_SE3_sensor_file = log_folder / \"calibration\" / \"egovehicle_SE3_sensor.feather\"\n", - "egovehicle_se3_sensor_table = get_arrow_from_file(egovehicle_SE3_sensor_file)\n", - "\n", - "egovehicle_se3_sensor_table" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# 1.2 -> intrinsic parameters\n", - "intrinsics_file = log_folder / \"calibration\" / \"intrinsics.feather\"\n", - "intrinsics_table = get_arrow_from_file(intrinsics_file)\n", - "\n", - "\n", - "intrinsics_table" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "# 2. Ego Vehicle\n", - "city_SE3_egovehicle_file = log_folder / \"city_SE3_egovehicle.feather\"\n", - "city_se3_egovehicle_table = get_arrow_from_file(city_SE3_egovehicle_file)\n", - "\n", - "city_se3_egovehicle_table" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# # 3. Map\n", - "# # map_folder = log_folder / \"map\"\n", - "# # print(_ls(map_folder))\n", - "\n", - "# # # 4. sensors\n", - "# # print(_ls(log_folder))\n", - "\n", - "# # from d123.conversion.av2.av2_data_converter import AV2SensorDataConverter\n", - "# from d123.conversion.av2.av2_data_converter import AV2SensorDataConverter\n", - "\n", - "# # AV2SensorDataConverter([])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# 5. Annotations\n", - "annotations_file = log_folder / \"annotations.feather\"\n", - "annotations_table = get_arrow_from_file(annotations_file)\n", - "\n", - "# print(_ls(annotations_folder))\n", - "annotations_table" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "camera_name = \"ring_side_left\"\n", - "\n", - "camera_folder = log_folder / \"sensors\" / \"cameras\" / camera_name\n", - "camera_files = sorted(list(camera_folder.iterdir()))\n", - "\n", - "\n", - "def jpg_to_array(file_path):\n", - " image = np.array(Image.open(io.BytesIO(file_path.read_bytes())))\n", - " return image\n", - "\n", - "plt.imshow(jpg_to_array(camera_files[1]))\n", - "print(len(camera_files))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "\n", - "lidar_folder = log_folder / \"sensors\" / \"lidar\" \n", - "lidar_files = sorted(list(lidar_folder.iterdir()))\n", - "\n", - "\n", - "get_arrow_from_file(lidar_files[0])\n", - "\n", - "print(lidar_files)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "# Testing sensor syn dataframes\n", - "\n", - "from typing import Optional\n", - "from d123.conversion.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING\n", - "from d123.conversion.av2.av2_helper import build_sensor_dataframe, build_synchronization_dataframe\n", - "\n", - "\n", - "sensor_df = build_sensor_dataframe(log_folder)\n", - "synchronization_df = build_synchronization_dataframe(sensor_df)\n", - "dataset_dir = split_folder.parent\n", - "\n", - "\n", - "def find_closest_target_fpath(\n", - " split: str,\n", - " log_id: str,\n", - " src_sensor_name: str,\n", - " src_timestamp_ns: int,\n", - " target_sensor_name: str,\n", - ") -> Optional[Path]:\n", - " \"\"\"Find the file path to the target sensor from a source sensor.\"\"\"\n", - " if synchronization_df is None:\n", - " raise RuntimeError(\"Requested synchronized data, but the synchronization database has not been created.\")\n", - "\n", - " src_timedelta_ns = pd.Timedelta(src_timestamp_ns)\n", - " src_to_target_records = synchronization_df.loc[(split, log_id, src_sensor_name)].set_index(src_sensor_name)\n", - " index = src_to_target_records.index\n", - " if src_timedelta_ns not in index:\n", - " # This timestamp does not correspond to any lidar sweep.\n", - " return None\n", - "\n", - " # Grab the synchronization record.\n", - " target_timestamp_ns = src_to_target_records.loc[src_timedelta_ns, target_sensor_name]\n", - " if pd.isna(target_timestamp_ns):\n", - " # No match was found within tolerance.\n", - " return None\n", - "\n", - " sensor_dir = dataset_dir / split / log_id / \"sensors\"\n", - " valid_cameras = list(AV2_CAMERA_TYPE_MAPPING.keys())\n", - " timestamp_ns_str = str(target_timestamp_ns.asm8.item())\n", - " if target_sensor_name in valid_cameras:\n", - " target_path = sensor_dir / \"cameras\" / target_sensor_name / f\"{timestamp_ns_str}.jpg\"\n", - " else:\n", - " target_path = sensor_dir / target_sensor_name / f\"{timestamp_ns_str}.feather\"\n", - " return target_path" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "# split=\"train\"\n", - "# log_id=\"00a6ffc1-6ce9-3bc3-a060-6006e9893a1a\"\n", - "# src_sensor_name=\"ring_front_center\"\n", - "# src_timestamp_ns=315967376959702000\n", - "# target_sensor_name=\"lidar\"\n", - "\n", - "# src_to_target_records = synchronization_df.loc[(\"train\", \"\", src_sensor_name)]\n", - "# # synchronization_df" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "lidar_timestamp_ns_list = [int(path.stem) for path in lidar_files]\n", - "\n", - "\n", - "\n", - "\n", - "for lidar_timestamp_ns in lidar_timestamp_ns_list:\n", - "\n", - " fpath = find_closest_target_fpath(\n", - " split=\"train\",\n", - " log_id=log_folder.name,\n", - " src_sensor_name=\"lidar\",\n", - " src_timestamp_ns=lidar_timestamp_ns,\n", - " target_sensor_name=\"ring_front_center\",\n", - " )\n", - " if fpath is None:\n", - " continue\n", - " # print(fpath)\n", - "\n", - " egovehicle_se3_sensor_table" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": {}, - "outputs": [], - "source": [ - "for _, row in egovehicle_se3_sensor_table.iterrows():\n", - " row = row.to_dict()\n", - " print(row)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "from pyquaternion import Quaternion\n", - "\n", - "lidar_timestamps = [int(f.stem) for f in lidar_files]\n", - "camera_timestamps = [int(f.stem) for f in camera_files]\n", - "\n", - "\n", - "def get_slice_with_timestamp_ns(dataframe: pd.DataFrame, timestamp_ns: int):\n", - " \"\"\"Get the index of the closest timestamp to the target timestamp.\"\"\"\n", - " return dataframe[dataframe[\"timestamp_ns\"] == timestamp_ns]\n", - "\n", - "\n", - "def find_nearest_timestamp(target_ns, timestamp_list):\n", - " timestamp_array = np.array(timestamp_list, dtype=np.int64)\n", - " idx = np.argmin(np.abs(timestamp_array - np.int64(target_ns)))\n", - " return int(timestamp_array[idx])\n", - "\n", - "# for lidar_timestamp in lidar_timestamps:\n", - "# slice = get_slice_with_timestamp_ns(annotations_table, lidar_timestamp)\n", - "# assert len(slice) >= 1\n", - "\n", - "\n", - "\n", - "# ego_pose = city_SE3_egovehicle_table[city_SE3_egovehicle_table[\"timestamp_ns\"] == lidar_timestamps[10]]\n", - "# ego_pose_dict = ego_pose.iloc[0].to_dict()\n", - "\n", - "\n", - "annotations_slice = get_slice_with_timestamp_ns(annotations_table, lidar_timestamps[10])\n", - "for _, row in annotations_slice.iterrows():\n", - "# qw = row[\"qw\"]\n", - "# qx = row[\"qx\"]\n", - "# qy = row[\"qy\"]\n", - "# qz = row[\"qz\"]\n", - "# tx_m = row[\"tx_m\"]\n", - "# ty_m = row[\"ty_m\"]\n", - "# tz_m = row[\"tz_m\"]\n", - " print(row.to_dict())\n", - "\n", - "annotations_slice\n", - "\n", - "# qw\tqx\tqy\tqz\ttx_m\tty_m\ttz_m\n", - "# # def jpg_to_array(file_path):\n", - "\n", - "# camera_frames = []\n", - "# for lidar_timestamp in lidar_timestamps:\n", - "# camera_stamp_at_lidar = find_nearest_timestamp(lidar_timestamp, camera_timestamps)\n", - "# image = jpg_to_array(camera_folder / f\"{camera_stamp_at_lidar}.jpg\")\n", - "# camera_frames.append(image)\n", - " \n", - "# print(len(camera_frames))\n", - "# height, width, _ = camera_frames[0].shape\n", - "# video_path = \"camera_frames_video.mp4\"\n", - "# fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n", - "# out = cv2.VideoWriter(video_path, fourcc, 10, (width, height))\n", - "\n", - "# for frame in camera_frames:\n", - "# out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n", - "\n", - "# out.release()\n", - "# print(f\"Saved video to {video_path}\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "16", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "\n", - "from pyquaternion import Quaternion\n", - "d123.datatypes.detections.detection_types import DetectionType\n", - "from d123.geometry.base import StateSE2\n", - "from d123.geometry.bounding_box import BoundingBoxSE2\n", - "from d123.common.visualization.color.config import PlotConfig\n", - "from d123.common.visualization.color.default import BOX_DETECTION_CONFIG\n", - "from d123.common.visualization.matplotlib.utils import add_shapely_polygon_to_ax\n", - "\n", - "\n", - "\n", - "fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n", - "\n", - "for cuboid in sensor_data.annotations:\n", - " yaw, pitch, roll = Quaternion(matrix=cuboid.dst_SE3_object.rotation).yaw_pitch_roll\n", - " center = StateSE2(cuboid.dst_SE3_object.translation[0], cuboid.dst_SE3_object.translation[1], yaw)\n", - " bounding_box = BoundingBoxSE2(center, cuboid.length_m, cuboid.width_m)\n", - " add_shapely_polygon_to_ax(ax, bounding_box.shapely_polygon, BOX_DETECTION_CONFIG[DetectionType.VEHICLE])\n", - "\n", - "ax.set_aspect(\"equal\")\n", - "\n", - "radius = 200\n", - "ax.set_xlim(-radius, radius)\n", - "ax.set_ylim(-radius, radius)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17", - "metadata": {}, - "outputs": [], - "source": [ - "bounding_box.shapely_polygon\n", - "\n", - "bounding_box.corners_array" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "sensor_cache = \"/home/daniel/.cache/av2/sensor_cache.feather\"\n", - "get_arrow_from_file(Path(sensor_cache))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19", - "metadata": {}, - "outputs": [], - "source": [ - "synchronization_cache = \"/home/daniel/.cache/av2/synchronization_cache.feather\"\n", - "synchronization_cache = get_arrow_from_file(Path(synchronization_cache))\n", - "\n", - "synchronization_cache[\"sensor_name\"].unique()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20", - "metadata": {}, - "outputs": [], - "source": [ - "CAM_SHUTTER_INTERVAL_MS" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/av2/delete_me_map.ipynb b/notebooks/av2/delete_me_map.ipynb deleted file mode 100644 index ce3ca200..00000000 --- a/notebooks/av2/delete_me_map.ipynb +++ /dev/null @@ -1,313 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "from pathlib import Path\n", - "\n", - "import numpy as np\n", - "import io\n", - "\n", - "from PIL import Image\n", - "\n", - "import pandas as pd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "# split = \"test\"\n", - "\n", - "\n", - "split = \"train\"\n", - "# split = \"val\"\n", - "\n", - "split_folder = Path(f\"/media/nvme1/argoverse/sensor_mini/{split}\")\n", - "log_folder = sorted(list(split_folder.iterdir()))[2]\n", - "\n", - "log_folder.name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "for lane_segment_id, lane_segment_dict in log_map_archive[\"lane_segments\"].items():\n", - " print(f\"Lane Segment ID: {lane_segment_id}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "from typing import Dict, List\n", - "\n", - "from d123.geometry.line.polylines import Polyline3D\n", - "from d123.conversion.av2.av2_map_conversion import _extract_lane_group_dict\n", - "\n", - "\n", - "def _extract_polyline(data: List[Dict[str, float]], close: bool = False) -> Polyline3D:\n", - " polyline = np.array([[p[\"x\"], p[\"y\"], p[\"z\"]] for p in data], dtype=np.float64)\n", - " if close:\n", - " polyline = np.vstack([polyline, polyline[0]])\n", - "\n", - " return Polyline3D.from_array(polyline)\n", - "\n", - "\n", - "map_folder = log_folder / \"map\"\n", - "log_map_archive_path = next(map_folder.glob(\"log_map_archive_*.json\"))\n", - "\n", - "with open(log_map_archive_path, \"r\") as f:\n", - " log_map_archive = json.load(f)\n", - "\n", - "drivable_areas: Dict[int, Polyline3D] = {}\n", - "\n", - "for drivable_area_id, drivable_area_dict in log_map_archive[\"drivable_areas\"].items():\n", - " # keys: [\"area_boundary\", \"id\"]\n", - " drivable_areas[int(drivable_area_id)] = _extract_polyline(drivable_area_dict[\"area_boundary\"], close=True)\n", - "\n", - "for lane_segment_id, lane_segment_dict in log_map_archive[\"lane_segments\"].items():\n", - " # keys = [\n", - " # \"id\",\n", - " # \"is_intersection\",\n", - " # \"lane_type\",\n", - " # \"left_lane_boundary\",\n", - " # \"left_lane_mark_type\",\n", - " # \"right_lane_boundary\",\n", - " # \"right_lane_mark_type\",\n", - " # \"successors\",\n", - " # \"predecessors\",\n", - " # \"right_neighbor_id\",\n", - " # \"left_neighbor_id\",\n", - " # ]\n", - " lane_segment_dict[\"left_lane_boundary\"] = _extract_polyline(lane_segment_dict[\"left_lane_boundary\"])\n", - " lane_segment_dict[\"right_lane_boundary\"] = _extract_polyline(lane_segment_dict[\"right_lane_boundary\"])\n", - "\n", - "for crosswalk_id, crosswalk_dict in log_map_archive[\"pedestrian_crossings\"].items():\n", - " # keys = [\"id\", \"outline\"]\n", - " # https://github.com/argoverse/av2-api/blob/6b22766247eda941cb1953d6a58e8d5631c561da/src/av2/map/pedestrian_crossing.py\n", - "\n", - " p1, p2 = np.array([[p[\"x\"], p[\"y\"], p[\"z\"]] for p in crosswalk_dict[\"edge1\"]], dtype=np.float64)\n", - " p3, p4 = np.array([[p[\"x\"], p[\"y\"], p[\"z\"]] for p in crosswalk_dict[\"edge2\"]], dtype=np.float64)\n", - " crosswalk_dict[\"outline\"] = Polyline3D.from_array(np.array([p1, p2, p4, p3, p1], dtype=np.float64))\n", - "\n", - "\n", - "lane_group_dict = _extract_lane_group_dict(log_map_archive[\"lane_segments\"])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Any\n", - "\n", - "import shapely\n", - "\n", - "from d123.geometry.base import Point3DIndex\n", - "import geopandas as gpd\n", - "\n", - "from d123.geometry.line.polylines import Polyline2D\n", - "from d123.geometry.occupancy_map import OccupancyMap2D\n", - "\n", - "import numpy.typing as npt\n", - "\n", - "\n", - "def _interpolate_z_on_segment(point: shapely.Point, segment_coords: npt.NDArray[np.float64]) -> float:\n", - " \"\"\"Interpolate Z coordinate along a 3D line segment.\"\"\"\n", - " p1, p2 = segment_coords[0], segment_coords[1]\n", - "\n", - " # Project point onto segment\n", - " segment_vec = p2[:2] - p1[:2]\n", - " point_vec = np.array([point.x, point.y]) - p1[:2]\n", - "\n", - " # Handle degenerate case\n", - " segment_length_sq = np.dot(segment_vec, segment_vec)\n", - " if segment_length_sq == 0:\n", - " return p1[2]\n", - "\n", - " # Calculate projection parameter\n", - " t = np.dot(point_vec, segment_vec) / segment_length_sq\n", - " t = np.clip(t, 0, 1) # Clamp to segment bounds\n", - "\n", - " # Interpolate Z\n", - " return p1[2] + t * (p2[2] - p1[2])\n", - "\n", - "\n", - "def _extract_intersection_dict(\n", - " lanes: Dict[int, Any], lane_group_dict: Dict[int, Any], max_distance: float = 0.01\n", - ") -> Dict[str, Any]:\n", - "\n", - " # 1. Collect all lane groups where at least one lane is marked as an intersection.\n", - " lane_group_intersection_dict = {}\n", - " for lane_group_id, lane_group in lane_group_dict.items():\n", - " is_intersection_lanes = [lanes[str(lane_id)][\"is_intersection\"] for lane_id in lane_group[\"lane_ids\"]]\n", - " if any(is_intersection_lanes):\n", - " lane_group_intersection_dict[lane_group_id] = lane_group\n", - "\n", - " # 2. Merge polygons of lane groups that are marked as intersections.\n", - " lane_group_intersection_geometry = {\n", - " lane_group_id: shapely.Polygon(lane_group[\"outline\"].array[:, Point3DIndex.XY])\n", - " for lane_group_id, lane_group in lane_group_intersection_dict.items()\n", - " }\n", - " intersection_polygons = gpd.GeoSeries(lane_group_intersection_geometry).union_all()\n", - "\n", - " # 3. Collect all intersection polygons and their lane group IDs.\n", - " intersection_dict = {}\n", - " for intersection_idx, intersection_polygon in enumerate(intersection_polygons.geoms):\n", - " if intersection_polygon.is_empty:\n", - " continue\n", - " lane_group_ids = [\n", - " lane_group_id\n", - " for lane_group_id, lane_group_polygon in lane_group_intersection_geometry.items()\n", - " if intersection_polygon.intersects(lane_group_polygon)\n", - " ]\n", - " intersection_dict[f\"intersection_{intersection_idx}\"] = {\n", - " \"id\": intersection_idx,\n", - " \"outline_2d\": Polyline2D.from_array(np.array(list(intersection_polygon.exterior.coords), dtype=np.float64)),\n", - " \"lane_group_ids\": lane_group_ids,\n", - " }\n", - "\n", - " # 4. Lift intersection outlines to 3D.\n", - " boundary_segments = []\n", - " for lane_group in lane_group_intersection_dict.values():\n", - " coords = np.array(lane_group[\"outline\"].linestring.coords, dtype=np.float64).reshape(-1, 1, 3)\n", - " segment_coords_boundary = np.concatenate([coords[:-1], coords[1:]], axis=1)\n", - " boundary_segments.append(segment_coords_boundary)\n", - "\n", - " boundary_segments = np.concatenate(boundary_segments, axis=0)\n", - " boundary_segment_linestrings = shapely.creation.linestrings(boundary_segments)\n", - " occupancy_map = OccupancyMap2D(boundary_segment_linestrings)\n", - "\n", - " for intersection_id, intersection_data in intersection_dict.items():\n", - " points_2d = intersection_data[\"outline_2d\"].array\n", - " points_3d = np.zeros((len(points_2d), 3), dtype=np.float64)\n", - " points_3d[:, :2] = points_2d\n", - "\n", - " query_points = shapely.creation.points(points_2d)\n", - " results = occupancy_map.query_nearest(query_points, max_distance=max_distance, exclusive=True)\n", - " for query_idx, geometry_idx in zip(*results):\n", - " query_point = query_points[query_idx]\n", - " segment_coords = boundary_segments[geometry_idx]\n", - " best_z = _interpolate_z_on_segment(query_point, segment_coords)\n", - " points_3d[query_idx, 2] = best_z\n", - "\n", - " intersection_dict[intersection_id][\"outline_3d\"] = Polyline3D.from_array(points_3d)\n", - "\n", - " return intersection_dict\n", - "\n", - "\n", - "intersection_dict = _extract_intersection_dict(log_map_archive[\"lane_segments\"], lane_group_dict)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "from matplotlib import cm\n", - "from matplotlib.colors import Normalize\n", - "\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - "for intersection_id, values in intersection_dict.items():\n", - "\n", - " outline = values[\"outline_3d\"].array\n", - " print(outline[:, 2].min(), outline[:, 2].max())\n", - "\n", - " # Normalize z values to [0, 1] for colormap mapping\n", - " norm = Normalize(vmin=outline[:, 2].min(), vmax=outline[:, 2].max())\n", - " colors = cm.viridis(norm(outline[:, 2]))\n", - "\n", - " # Plot each segment with its corresponding color\n", - " for i in range(len(outline) - 1):\n", - " ax.plot(outline[i : i + 2, 0], outline[i : i + 2, 1], color=colors[i], linewidth=2)\n", - "\n", - "ax.set_aspect(\"equal\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - "\n", - "for lane in log_map_archive[\"lane_segments\"].values():\n", - " left_boundary = lane[\"left_lane_boundary\"]\n", - " right_boundary = lane[\"right_lane_boundary\"]\n", - "\n", - " polygon = np.concatenate([left_boundary.array, right_boundary.array[::-1]]).reshape(-1, 3)[:, :2]\n", - " print(polygon)\n", - " ax.fill(\n", - " polygon[:, 0], polygon[:, 1], alpha=0.5, edgecolor=\"black\", color=\"red\" if lane[\"is_intersection\"] else \"blue\"\n", - " )\n", - "\n", - " # if left_boundary and right_boundary:\n", - " # ax.plot(left_boundary.array[:, 0], left_boundary.array[:, 1], color=\"blue\", linewidth=1)\n", - " # ax.plot(right_boundary.array[:, 0], right_boundary.array[:, 1], color=\"red\", linewidth=1)\n", - "\n", - "ax.set_title(\"Lane Segments\")\n", - "ax.set_xlabel(\"X\")\n", - "ax.set_ylabel(\"Y\")\n", - "ax.set_aspect(\"equal\")\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/carla/vehicle_params.ipynb b/notebooks/carla/vehicle_params.ipynb deleted file mode 100644 index 319b0ae8..00000000 --- a/notebooks/carla/vehicle_params.ipynb +++ /dev/null @@ -1,127 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "import gzip\n", - "import json\n", - "from typing import Dict\n", - "\n", - "\n", - "path = Path(\"/home/daniel/carla_workspace/data/_Rep0_longest1_route0_07_04_10_18_47/boxes\")\n", - "\n", - "def _load_json_gz(path: Path) -> Dict:\n", - " \"\"\"Helper function to load a gzipped JSON file.\"\"\"\n", - " with gzip.open(path, \"rt\") as f:\n", - " data = json.load(f)\n", - " return data\n", - "\n", - "bounding_box_paths = sorted([bb_path for bb_path in path.iterdir()])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "width = [] \n", - "length = [] \n", - "height = []\n", - "wheel_base = [] \n", - "rear_axle_to_center_vertical = [] \n", - "rear_axle_to_center_longitudinal = [] \n", - "for bb_path in bounding_box_paths:\n", - " bb_dict = _load_json_gz(bb_path)\n", - " vehicle_parameters = bb_dict[\"vehicle_parameters\"]\n", - " width.append(vehicle_parameters[\"width\"])\n", - " length.append(vehicle_parameters[\"length\"])\n", - " height.append(vehicle_parameters[\"height\"])\n", - " wheel_base.append(vehicle_parameters[\"wheel_base\"])\n", - " rear_axle_to_center_vertical.append(vehicle_parameters[\"rear_axle_to_center_vertical\"])\n", - " rear_axle_to_center_longitudinal.append(vehicle_parameters[\"rear_axle_to_center_longitudinal\"])\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "print(\"width\", np.mean(width), np.std(width))\n", - "print(\"length\", np.mean(length), np.std(length))\n", - "print(\"height\", np.mean(height), np.std(height))\n", - "print(\"wheel_base\", np.mean(wheel_base), np.std(wheel_base))\n", - "print(\"rear_axle_to_center_vertical\", np.mean(rear_axle_to_center_vertical), np.std(rear_axle_to_center_vertical))\n", - "print(\"rear_axle_to_center_longitudinal\", np.mean(rear_axle_to_center_longitudinal), np.std(rear_axle_to_center_longitudinal))\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "print(\"width\", round(np.median(width), 5), np.std(width))\n", - "print(\"length\", round(np.median(length), 5), np.std(length))\n", - "print(\"height\", round(np.median(height), 5), np.std(height))\n", - "print(\"wheel_base\", round(np.median(wheel_base), 5), np.std(wheel_base))\n", - "print(\"rear_axle_to_center_vertical\", round(np.median(rear_axle_to_center_vertical), 5), np.std(rear_axle_to_center_vertical))\n", - "print(\"rear_axle_to_center_longitudinal\", round(np.median(rear_axle_to_center_longitudinal), 5), np.std(rear_axle_to_center_longitudinal))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "vehicle_parameters[\"vehicle_name\"]" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/dataset/dataset.ipynb b/notebooks/dataset/dataset.ipynb deleted file mode 100644 index 83f602a1..00000000 --- a/notebooks/dataset/dataset.ipynb +++ /dev/null @@ -1,148 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "s3_uri = \"d123testing/nuplan_private_test/2021.07.25.16.16.23_veh-26_02446_02589.arrow\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "import pyarrow as pa\n", - "import pyarrow.fs as fs\n", - "import pyarrow.dataset as ds\n", - "\n", - "import os\n", - "\n", - "s3_fs = fs.S3FileSystem(\n", - " access_key=os.environ.get('AWS_ACCESS_KEY_ID'),\n", - " secret_key=os.environ.get('AWS_SECRET_ACCESS_KEY'),\n", - " region=os.environ.get('AWS_DEFAULT_REGION')\n", - ")\n", - "from d123.common.utils.timer import Timer\n", - "\n", - "\n", - "timer = Timer()\n", - "timer.start()\n", - "\n", - "dataset = ds.dataset(f\"{s3_uri}\", format=\"ipc\", filesystem=s3_fs)\n", - "timer.log(\"1. Dataset loaded\")\n", - "\n", - "# Get all column names and remove the ones you want to drop\n", - "all_columns = dataset.schema.names\n", - "columns_to_keep = [col for col in all_columns if col not in [\"front_cam_demo\", \"front_cam_transform\"]]\n", - "timer.log(\"2. Columns filtered\")\n", - "\n", - "table = dataset.to_table(columns=columns_to_keep)\n", - "timer.log(\"3. Table created\")\n", - "# Save locally\n", - "with pa.ipc.new_file(\"filtered_file.arrow\", table.schema) as writer:\n", - " writer.write_table(table)\n", - "timer.log(\"4. Table saved locally\")\n", - "\n", - "timer.end()\n", - "timer.stats(verbose=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "timer = Timer()\n", - "timer.start()\n", - "\n", - "table = dataset.take([i for i in range(0, 100, 1)], columns=all_columns)\n", - "timer.log(\"3. Table created\")\n", - "\n", - "# Save locally\n", - "with pa.ipc.new_file(\"filtered_file_v2.arrow\", table.schema) as writer:\n", - " writer.write_table(table)\n", - "\n", - "timer.log(\"4. Table saved locally\")\n", - "timer.end()\n", - "timer.stats(verbose=False)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# with pa.memory_map(\"filtered_file_v2.arrow\", \"r\") as source:\n", - "# mmap_table = pa.ipc.open_file(source).read_all()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# import pyarrow.parquet as pq\n", - "# import pyarrow.dataset as ds\n", - "# import pyarrow.dataset as ds\n", - "# import pyarrow as pa\n", - "# from huggingface_hub import hf_hub_download\n", - "# from io import BytesIO\n", - "\n", - "# # Download specific file from the repo\n", - "# file_path = hf_hub_download(\n", - "# repo_id=\"DanielDauner/delete_me\",\n", - "# filename=\"2021.05.25.14.16.10_veh-35_01690_02183.arrow\", # or whatever the file is named\n", - "# repo_type=\"dataset\"\n", - "# )\n", - "\n", - "# # Read with PyArrow\n", - "# with pa.memory_map(file_path, \"r\") as source:\n", - "# mmap_table = pa.ipc.open_file(source).read_all()\n", - "\n", - "# # Create dataset and slice\n", - "# sliced_table = mmap_table.slice(0, 1000) # First 1000 rows\n", - "# sliced_table" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.21" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/dataset/parquet_testing.ipynb b/notebooks/dataset/parquet_testing.ipynb deleted file mode 100644 index 13b3fed7..00000000 --- a/notebooks/dataset/parquet_testing.ipynb +++ /dev/null @@ -1,133 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "import pyarrow as pa\n", - "\n", - "import pyarrow.ipc as ipc\n", - "import pyarrow.parquet as pq\n", - "\n", - "import os, psutil\n", - "\n", - "def print_memory_usage():\n", - " process = psutil.Process(os.getpid())\n", - " memory_info = process.memory_info()\n", - " print(f\"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "with pa.memory_map(\"filtered_file_v2.arrow\", \"r\") as source:\n", - " reader = ipc.RecordBatchFileReader(source)\n", - " table = reader.read_all()\n", - "\n", - "pq.write_table(table, \"filtered_file_v2.parquet\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "print_memory_usage()\n", - "\n", - "parquet_table = pq.read_table(\"/home/daniel/Downloads/episode_000000.parquet\")\n", - "\n", - "\n", - "print_memory_usage()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "print_memory_usage()\n", - "\n", - "with pa.memory_map(\"filtered_file_v2.arrow\", \"r\") as source:\n", - " reader = ipc.RecordBatchFileReader(source)\n", - " arrow_table = reader.read_all()\n", - "\n", - "\n", - "print_memory_usage()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "import requests\n", - "\n", - "parquet_web_file = \"https://huggingface.co/datasets/yaak-ai/L2D/raw/main/data/chunk-000/episode_000000.parquet\"\n", - "response = requests.get(parquet_web_file)\n", - "with pa.BufferReader(response.content) as source:\n", - " web_table = pq.read_table(source)\n", - "\n", - "\n", - "web_table" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "import pyarrow as pa\n", - "import pyarrow.fs as fs\n", - "import pyarrow.dataset as ds\n", - "import pyarrow as pa\n", - "\n", - "# s3_fs = fs.S3FileSystem()\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/nuplan/nuplan_sensor_loading.ipynb b/notebooks/nuplan/nuplan_sensor_loading.ipynb deleted file mode 100644 index c1e419a9..00000000 --- a/notebooks/nuplan/nuplan_sensor_loading.ipynb +++ /dev/null @@ -1,501 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import glob\n", - "from pathlib import Path\n", - "import numpy as np\n", - "import pyarrow as pa\n", - "import pyarrow.ipc as ipc\n", - "from PIL import Image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "np.zeros((3,3)).tolist()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "from d123.conversion.nuplan.nuplan_converter import NuplanDataConverter" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "def process_images_to_arrow(input_folder, output_file, batch_size=100):\n", - " \"\"\"\n", - " Process JPEG images from a folder and save them as arrays in Arrow IPC format.\n", - "\n", - " Args:\n", - " input_folder (str): Path to folder containing JPEG images\n", - " output_file (str): Path to output Arrow IPC file\n", - " batch_size (int): Number of images to process in each batch\n", - " \"\"\"\n", - "\n", - " # Get all JPEG files in the folder\n", - " jpeg_extensions = [\"*.jpg\", \"*.jpeg\", \"*.JPG\", \"*.JPEG\"]\n", - " image_files = []\n", - "\n", - " for ext in jpeg_extensions:\n", - " image_files.extend(glob.glob(os.path.join(input_folder, ext)))\n", - "\n", - " if not image_files:\n", - " print(f\"No JPEG files found in {input_folder}\")\n", - " return\n", - "\n", - " print(f\"Found {len(image_files)} JPEG files\")\n", - "\n", - " # Create Arrow schema\n", - " schema = pa.schema(\n", - " [\n", - " # pa.field(\"filename\", pa.string()),\n", - " pa.field(\"image_array\", pa.binary()),\n", - " # pa.field(\"height\", pa.int32()),\n", - " # pa.field(\"width\", pa.int32()),\n", - " # pa.field(\"channels\", pa.int32()),\n", - " ]\n", - " )\n", - "\n", - " # Open Arrow IPC writer\n", - " with open(output_file, \"wb\") as f:\n", - " writer = ipc.new_file(f, schema)\n", - "\n", - " # Process images in batches\n", - " for i in range(0, len(image_files), batch_size):\n", - " batch_files = image_files[i : i + batch_size]\n", - "\n", - " # Lists to store batch data\n", - " filenames = []\n", - " image_arrays = []\n", - " image_bytes = []\n", - " heights = []\n", - " widths = []\n", - " channels = []\n", - "\n", - " print(f\"Processing batch {i//batch_size + 1}/{(len(image_files)-1)//batch_size + 1}\")\n", - "\n", - " for img_path in batch_files:\n", - " try:\n", - " # Load image\n", - " # with Image.open(img_path) as img:\n", - " # # Convert to RGB if not already\n", - " # if img.mode != \"RGB\":\n", - " # img = img.convert(\"RGB\")\n", - " with open(img_path, \"rb\") as f:\n", - " jpg_bytes = f.read()\n", - "\n", - " # Convert to numpy array\n", - " # img_array = np.array(img)\n", - "\n", - " # Store image data\n", - " filenames.append(os.path.basename(img_path))\n", - " image_bytes.append(jpg_bytes)\n", - " # image_arrays.append(img_array.flatten().tolist())\n", - " # heights.append(img_array.shape[0])\n", - " # widths.append(img_array.shape[1])\n", - " # channels.append(img_array.shape[2])\n", - "\n", - " except Exception as e:\n", - " print(f\"Error processing {img_path}: {e}\")\n", - " continue\n", - "\n", - " # Create Arrow arrays for this batch\n", - " if filenames: # Only create batch if we have valid images\n", - " batch_data = pa.record_batch(\n", - " [\n", - " # pa.array(filenames),\n", - " pa.array(image_bytes),\n", - " # pa.array(heights),\n", - " # pa.array(widths),\n", - " # pa.array(channels),\n", - " ],\n", - " schema=schema,\n", - " )\n", - "\n", - " # Write batch to file\n", - " writer.write_batch(batch_data)\n", - "\n", - " writer.close()\n", - "\n", - " print(f\"Successfully saved images to {output_file}\")\n", - "\n", - "\n", - "def read_arrow_file(arrow_file):\n", - " \"\"\"\n", - " Read and display info about the Arrow IPC file.\n", - "\n", - " Args:\n", - " arrow_file (str): Path to Arrow IPC file\n", - " \"\"\"\n", - " with open(arrow_file, \"rb\") as f:\n", - " reader = ipc.open_file(f)\n", - "\n", - " print(f\"Schema: {reader.schema}\")\n", - " print(f\"Number of record batches: {reader.num_record_batches}\")\n", - "\n", - " total_images = 0\n", - " for i in range(reader.num_record_batches):\n", - " batch = reader.get_batch(i)\n", - " total_images += len(batch)\n", - " print(f\"Batch {i}: {len(batch)} images\")\n", - "\n", - " print(f\"Total images: {total_images}\")\n", - "\n", - " # Show first few filenames as example\n", - " if reader.num_record_batches > 0:\n", - " first_batch = reader.get_batch(0)\n", - " print(f\"First few filenames: {first_batch['filename'][:5].to_pylist()}\")\n", - "\n", - "\n", - "def reconstruct_image(arrow_file, filename, output_path):\n", - " \"\"\"\n", - " Reconstruct and save an image from the Arrow file.\n", - "\n", - " Args:\n", - " arrow_file (str): Path to Arrow IPC file\n", - " filename (str): Name of the image file to reconstruct\n", - " output_path (str): Path to save reconstructed image\n", - " \"\"\"\n", - " with open(arrow_file, \"rb\") as f:\n", - " reader = ipc.open_file(f)\n", - "\n", - " for i in range(reader.num_record_batches):\n", - " batch = reader.get_batch(i)\n", - "\n", - " # Find the image by filename\n", - " filenames = batch[\"filename\"].to_pylist()\n", - " if filename in filenames:\n", - " idx = filenames.index(filename)\n", - "\n", - " # Get image data\n", - " img_array = batch[\"image_array\"][idx].to_pylist()\n", - " height = batch[\"height\"][idx].as_py()\n", - " width = batch[\"width\"][idx].as_py()\n", - " channels = batch[\"channels\"][idx].as_py()\n", - "\n", - " # Reconstruct image\n", - " img_array = np.array(img_array, dtype=np.uint8)\n", - " img_array = img_array.reshape(height, width, channels)\n", - "\n", - " # Save image\n", - " img = Image.fromarray(img_array)\n", - " img.save(output_path)\n", - " print(f\"Image reconstructed and saved to {output_path}\")\n", - " return\n", - "\n", - " print(f\"Image {filename} not found in Arrow file\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# process_images_to_arrow(\"/mnt/nvme/nuplan/dataset/nuplan-v1.1/sensor/2021.07.01.20.35.47_veh-38_00016_00281/F0\", \"test.arrow\", 10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "# # Read IPC file\n", - "# import io\n", - "\n", - "# import time\n", - "\n", - "# # with pa.OSFile(\"test.arrow\", 'rb') as source:\n", - "# # with ipc.open_file(source) as reader:\n", - "# # table = reader.read_all()\n", - "\n", - "# with pa.ipc.open_file(\n", - "# pa.memory_map(\"/home/daniel/d123_workspace/data/nuplan_private_test/2021.07.25.16.16.23_veh-26_02446_02589.arrow\")\n", - "# ) as reader:\n", - "# # This doesn't load data into memory yet!\n", - "# table = reader.read_all()\n", - "\n", - "\n", - "# print(len(table))\n", - "# start = time.time()\n", - "# # Extract JPG data\n", - "# jpg_data = table[\"front_cam_demo\"][500].as_py()\n", - "# read_image = Image.open(io.BytesIO(jpg_data))\n", - "\n", - "# # read_image = read_image.convert(\"RGB\") # Ensure it's in RGB format\n", - "# read_image = np.array(read_image)\n", - "# print(read_image.dtype)\n", - "# print(f\"Image loaded in {time.time() - start:.4f} seconds\")\n", - "\n", - "# import matplotlib.pyplot as plt\n", - "\n", - "# plt.imshow(read_image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB\n", - "import os\n", - "NUPLAN_DATA_ROOT = Path(os.environ[\"NUPLAN_DATA_ROOT\"])\n", - "NUPLAN_SENSOR_ROOT = Path(\"/media/nvme1/nuplan/dataset/nuplan-v1.1/sensor_blobs\")\n", - "\n", - "\n", - "log_path = \"/media/nvme1/nuplan/dataset/nuplan-v1.1/splits/private_test/2021.07.01.21.22.09_veh-14_00016_00656.db\"\n", - "log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None)\n", - "\n", - "\n", - "log_db.lidar_pc.filename" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "import io\n", - "import pickle\n", - "from nuplan.database.nuplan_db.nuplan_scenario_queries import get_images_from_lidar_tokens, get_cameras\n", - "from nuplan.planning.scenario_builder.nuplan_db.nuplan_scenario import NuPlanScenario, CameraChannel, LidarChannel\n", - "\n", - "from nuplan.database.utils.pointclouds.lidar import LidarPointCloud, PointCloud\n", - "from pyquaternion import Quaternion\n", - "\n", - "\n", - "NUPLAN_DB_PATH = \"/media/nvme1/nuplan/dataset/nuplan-v1.1/splits/private_test\"\n", - "\n", - "\n", - "def get_log_cam_info(log):\n", - " log_name = log.logfile\n", - " log_file = os.path.join(NUPLAN_DB_PATH, log_name + \".db\")\n", - "\n", - " log_cam_infos = {}\n", - " for cam in get_cameras(log_file, [str(CameraChannel.F0.value)]):\n", - " intrinsics = np.array(pickle.loads(cam.intrinsic))\n", - " translation = np.array(pickle.loads(cam.translation))\n", - " rotation = np.array(pickle.loads(cam.rotation))\n", - " rotation = Quaternion(rotation).rotation_matrix\n", - " distortion = np.array(pickle.loads(cam.distortion))\n", - " c = dict(\n", - " intrinsic=intrinsics,\n", - " distortion=distortion,\n", - " translation=translation,\n", - " rotation=rotation,\n", - " )\n", - " log_cam_infos[cam.token] = c\n", - "\n", - " return log_cam_infos\n", - "\n", - "\n", - "images = []\n", - "for lidar_pc in log_db.lidar_pc[::2]:\n", - "\n", - " # front_image = get_images_from_lidar_tokens(log_path, [lidar_pc.token], [str(CameraChannel.F0.value)])\n", - " # parameters = get_log_cam_info(log_db.log)\n", - " # print(parameters)\n", - "\n", - " # images.append(list(front_image))\n", - " lidar_path = NUPLAN_SENSOR_ROOT / lidar_pc.filename\n", - " with open(lidar_path, \"rb\") as fp:\n", - " buffer = io.BytesIO(fp.read())\n", - "\n", - " pcd_data = buffer.read() # type: ignore\n", - "\n", - " # headers_list = []\n", - " # with io.BytesIO(pcd_data) as stream:\n", - " # while True:\n", - " # line = stream.readline().decode('utf8').strip()\n", - " # print(line)\n", - " # if line.startswith('#'):\n", - " # continue\n", - " # columns = line.split()\n", - " # key = columns[0].lower()\n", - " # val = columns[1:] if len(columns) > 2 else columns[1]\n", - " # headers_list.append((key, val))\n", - "\n", - " # if key == 'data':\n", - " # break\n", - "\n", - " pc = PointCloud.parse(pcd_data)\n", - "\n", - " # print(pc.header)\n", - " # pcd_data\n", - "\n", - " # break\n", - "\n", - " # print(isinstance(pcd_data, bytes))\n", - "\n", - " # lidar_point_cloud = LidarPointCloud.from_buffer(buffer, \"pcd\")\n", - " # print(lidar_point_cloud.points.shape)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "parameters[\"0872b6c896e85f9f\"][\"rotation\"]\n", - "\n", - "\n", - "# intrinsics = np.array([[1.545e03, 0.000e00, 9.600e02], [0.000e00, 1.545e03, 5.600e02], [0.000e00, 0.000e00, 1.000e00]])\n", - "# distortion = np.array([-0.356123, 0.172545, -0.00213, 0.000464, -0.05231])\n", - "# translation = np.array([ 1.66433035e+00, -1.32379618e-03, 1.57190200e+00])\n", - "# rotation = np.array(\n", - "# [\n", - "# [-0.00395669, -0.03969443, 0.99920403],\n", - "# [-0.99971496, -0.02336898, -0.00488707],\n", - "# [0.02354437, -0.99893856, -0.03959065],\n", - "# ]\n", - "# )\n", - "# distortion\n", - "\n", - "np.array(\n", - " [\n", - " [-0.00395669, -0.03969443, 0.99920403],\n", - " [-0.99971496, -0.02336898, -0.00488707],\n", - " [0.02354437, -0.99893856, -0.03959065],\n", - " ]\n", - ")\n", - "np.array(\n", - " [\n", - " [-0.00395669, -0.03969443, 0.99920403],\n", - " [-0.99971496, -0.02336898, -0.00488707],\n", - " [0.02354437, -0.99893856, -0.03959065],\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "import pickle\n", - "\n", - "\n", - "for cam in get_cameras(log_path, [str(channel.value) for channel in CameraChannel]):\n", - " print(pickle.loads(cam.translation))\n", - " print(pickle.loads(cam.translation))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "9.600e02, 1920 / 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "import cv2\n", - "\n", - "sensor_root = Path(\"/mnt/nvme/nuplan/dataset/nuplan-v1.1/sensor\")\n", - "\n", - "frames = []\n", - "for image in images:\n", - " if len(image) == 0:\n", - " continue\n", - "\n", - " jpg_name = image[0].filename_jpg\n", - " jpg_path = sensor_root / jpg_name\n", - " with open(jpg_path, \"rb\") as f:\n", - " jpg_data = f.read()\n", - " read_image = Image.open(io.BytesIO(jpg_data))\n", - " read_image = np.array(read_image)\n", - " # Convert RGB to BGR for OpenCV\n", - " frame = cv2.cvtColor(read_image, cv2.COLOR_RGB2BGR)\n", - " frames.append(frame)\n", - "\n", - "# Define video writer\n", - "height, width, _ = frames[0].shape\n", - "out = cv2.VideoWriter(f\"{log_db.name}.mp4\", cv2.VideoWriter_fourcc(*\"mp4v\"), 20, (width, height))\n", - "\n", - "for frame in frames:\n", - " out.write(frame)\n", - "out.release()\n", - "print(\"Video saved as output.mp4\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "d123", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From 31ada25c13b02cb7a42284d29758e662f393adf2 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 16 Oct 2025 10:45:10 +0200 Subject: [PATCH 082/145] Add unfinished and untested Pandaset converter. --- .../datasets/pandaset/pandaset_constants.py | 216 +++++++++++ .../datasets/pandaset/pandaset_converter.py | 340 ++++++++++++++++++ .../datasets/wopd/wopd_converter.py | 2 +- .../vehicle_state/vehicle_parameters.py | 12 + .../config/conversion/datasets/pandaset.yaml | 150 ++++++++ .../config/conversion/default_conversion.yaml | 3 +- 6 files changed, 721 insertions(+), 2 deletions(-) create mode 100644 d123/conversion/datasets/pandaset/pandaset_constants.py create mode 100644 d123/conversion/datasets/pandaset/pandaset_converter.py create mode 100644 d123/script/config/conversion/datasets/pandaset.yaml diff --git a/d123/conversion/datasets/pandaset/pandaset_constants.py b/d123/conversion/datasets/pandaset/pandaset_constants.py new file mode 100644 index 00000000..88b0549f --- /dev/null +++ b/d123/conversion/datasets/pandaset/pandaset_constants.py @@ -0,0 +1,216 @@ +from typing import Dict, List + +from d123.common.utils.enums import SerialIntEnum +from d123.datatypes.detections.detection_types import DetectionType +from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType + +PANDASET_SPLITS: List[str] = ["pandaset_train", "pandaset_val", "pandaset_test"] + +PANDASET_CAMERA_MAPPING: Dict[str, PinholeCameraType] = { + "front_camera": PinholeCameraType.CAM_F0, + "back_camera": PinholeCameraType.CAM_B0, + "front_left_camera": PinholeCameraType.CAM_L0, + "front_right_camera": PinholeCameraType.CAM_R0, + "left_camera": PinholeCameraType.CAM_L1, + "right_camera": PinholeCameraType.CAM_R1, +} + + +class PandasetBoxDetectionType(SerialIntEnum): + + ANIMALS_BIRD = 0 + ANIMALS_OTHER = 1 + BICYCLE = 2 + BUS = 3 + CAR = 4 + CONES = 5 + CONSTRUCTION_SIGNS = 6 + EMERGENCY_VEHICLE = 7 + MEDIUM_SIZED_TRUCK = 8 + MOTORCYCLE = 9 + MOTORIZED_SCOOTER = 10 + OTHER_VEHICLE_CONSTRUCTION_VEHICLE = 11 + OTHER_VEHICLE_PEDICAB = 12 + OTHER_VEHICLE_UNCOMMON = 13 + PEDESTRIAN = 14 + PEDESTRIAN_WITH_OBJECT = 15 + PERSONAL_MOBILITY_DEVICE = 16 + PICKUP_TRUCK = 17 + PYLONS = 18 + ROAD_BARRIERS = 19 + ROLLING_CONTAINERS = 20 + SEMI_TRUCK = 21 + SIGNS = 22 + TEMPORARY_CONSTRUCTION_BARRIERS = 23 + TOWED_OBJECT = 24 + TRAIN = 25 + TRAM_SUBWAY = 26 + + +PANDASET_BOX_DETECTION_FROM_STR: Dict[str, PandasetBoxDetectionType] = { + "Animals - Bird": PandasetBoxDetectionType.ANIMALS_BIRD, + "Animals - Other": PandasetBoxDetectionType.ANIMALS_OTHER, + "Bicycle": PandasetBoxDetectionType.BICYCLE, + "Bus": PandasetBoxDetectionType.BUS, + "Car": PandasetBoxDetectionType.CAR, + "Cones": PandasetBoxDetectionType.CONES, + "Construction Signs": PandasetBoxDetectionType.CONSTRUCTION_SIGNS, + "Emergency Vehicle": PandasetBoxDetectionType.EMERGENCY_VEHICLE, + "Medium-sized Truck": PandasetBoxDetectionType.MEDIUM_SIZED_TRUCK, + "Motorcycle": PandasetBoxDetectionType.MOTORCYCLE, + "Motorized Scooter": PandasetBoxDetectionType.MOTORIZED_SCOOTER, + "Other Vehicle - Construction Vehicle": PandasetBoxDetectionType.OTHER_VEHICLE_CONSTRUCTION_VEHICLE, + "Other Vehicle - Pedicab": PandasetBoxDetectionType.OTHER_VEHICLE_PEDICAB, + "Other Vehicle - Uncommon": PandasetBoxDetectionType.OTHER_VEHICLE_UNCOMMON, + "Pedestrian": PandasetBoxDetectionType.PEDESTRIAN, + "Pedestrian with Object": PandasetBoxDetectionType.PEDESTRIAN_WITH_OBJECT, + "Personal Mobility Device": PandasetBoxDetectionType.PERSONAL_MOBILITY_DEVICE, + "Pickup Truck": PandasetBoxDetectionType.PICKUP_TRUCK, + "Pylons": PandasetBoxDetectionType.PYLONS, + "Road Barriers": PandasetBoxDetectionType.ROAD_BARRIERS, + "Rolling Containers": PandasetBoxDetectionType.ROLLING_CONTAINERS, + "Semi-truck": PandasetBoxDetectionType.SEMI_TRUCK, + "Signs": PandasetBoxDetectionType.SIGNS, + "Temporary Construction Barriers": PandasetBoxDetectionType.TEMPORARY_CONSTRUCTION_BARRIERS, + "Towed Object": PandasetBoxDetectionType.TOWED_OBJECT, + "Train": PandasetBoxDetectionType.TRAIN, + "Tram / Subway": PandasetBoxDetectionType.TRAM_SUBWAY, +} + + +PANDASET_BOX_DETECTION_TO_DEFAULT: Dict[PandasetBoxDetectionType, DetectionType] = { + PandasetBoxDetectionType.ANIMALS_BIRD: DetectionType.GENERIC_OBJECT, # TODO: Adjust default types + PandasetBoxDetectionType.ANIMALS_OTHER: DetectionType.GENERIC_OBJECT, # TODO: Adjust default types + PandasetBoxDetectionType.BICYCLE: DetectionType.BICYCLE, + PandasetBoxDetectionType.BUS: DetectionType.VEHICLE, + PandasetBoxDetectionType.CAR: DetectionType.VEHICLE, + PandasetBoxDetectionType.CONES: DetectionType.TRAFFIC_CONE, + PandasetBoxDetectionType.CONSTRUCTION_SIGNS: DetectionType.CZONE_SIGN, + PandasetBoxDetectionType.EMERGENCY_VEHICLE: DetectionType.VEHICLE, + PandasetBoxDetectionType.MEDIUM_SIZED_TRUCK: DetectionType.VEHICLE, + PandasetBoxDetectionType.MOTORCYCLE: DetectionType.BICYCLE, + PandasetBoxDetectionType.MOTORIZED_SCOOTER: DetectionType.BICYCLE, + PandasetBoxDetectionType.OTHER_VEHICLE_CONSTRUCTION_VEHICLE: DetectionType.VEHICLE, + PandasetBoxDetectionType.OTHER_VEHICLE_PEDICAB: DetectionType.BICYCLE, + PandasetBoxDetectionType.OTHER_VEHICLE_UNCOMMON: DetectionType.VEHICLE, + PandasetBoxDetectionType.PEDESTRIAN: DetectionType.PEDESTRIAN, + PandasetBoxDetectionType.PEDESTRIAN_WITH_OBJECT: DetectionType.PEDESTRIAN, + PandasetBoxDetectionType.PERSONAL_MOBILITY_DEVICE: DetectionType.BICYCLE, + PandasetBoxDetectionType.PICKUP_TRUCK: DetectionType.VEHICLE, + PandasetBoxDetectionType.PYLONS: DetectionType.TRAFFIC_CONE, + PandasetBoxDetectionType.ROAD_BARRIERS: DetectionType.BARRIER, + PandasetBoxDetectionType.ROLLING_CONTAINERS: DetectionType.GENERIC_OBJECT, + PandasetBoxDetectionType.SEMI_TRUCK: DetectionType.VEHICLE, + PandasetBoxDetectionType.SIGNS: DetectionType.SIGN, + PandasetBoxDetectionType.TEMPORARY_CONSTRUCTION_BARRIERS: DetectionType.BARRIER, + PandasetBoxDetectionType.TOWED_OBJECT: DetectionType.VEHICLE, + PandasetBoxDetectionType.TRAIN: DetectionType.GENERIC_OBJECT, # TODO: Adjust default types + PandasetBoxDetectionType.TRAM_SUBWAY: DetectionType.GENERIC_OBJECT, # TODO: Adjust default types +} + + +PANDASET_LOG_NAMES: List[str] = [ + "001", + "002", + "003", + "004", + "005", + "006", + "008", + "011", + "012", + "013", + "014", + "015", + "016", + "017", + "018", + "019", + "020", + "021", + "023", + "024", + "027", + "028", + "029", + "030", + "032", + "033", + "034", + "035", + "037", + "038", + "039", + "040", + "041", + "042", + "043", + "044", + "045", + "046", + "047", + "048", + "050", + "051", + "052", + "053", + "054", + "055", + "056", + "057", + "058", + "059", + "062", + "063", + "064", + "065", + "066", + "067", + "068", + "069", + "070", + "071", + "072", + "073", + "074", + "077", + "078", + "079", + "080", + "084", + "085", + "086", + "088", + "089", + "090", + "091", + "092", + "093", + "094", + "095", + "097", + "098", + "099", + "100", + "101", + "102", + "103", + "104", + "105", + "106", + "109", + "110", + "112", + "113", + "115", + "116", + "117", + "119", + "120", + "122", + "123", + "124", + "139", + "149", + "158", +] diff --git a/d123/conversion/datasets/pandaset/pandaset_converter.py b/d123/conversion/datasets/pandaset/pandaset_converter.py new file mode 100644 index 00000000..3715160f --- /dev/null +++ b/d123/conversion/datasets/pandaset/pandaset_converter.py @@ -0,0 +1,340 @@ +import gzip +import json +import pickle +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union + +from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter +from d123.conversion.dataset_converter_config import DatasetConverterConfig +from d123.conversion.datasets.pandaset.pandaset_constants import ( + PANDASET_CAMERA_MAPPING, + PANDASET_LOG_NAMES, + PANDASET_SPLITS, +) +from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from d123.datatypes.detections.detection import BoxDetectionWrapper +from d123.datatypes.maps.map_metadata import MapMetadata +from d123.datatypes.scene.scene_metadata import LogMetadata +from d123.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, +) +from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from d123.datatypes.time.time_point import TimePoint +from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 +from d123.datatypes.vehicle_state.vehicle_parameters import ( + get_pandaset_chrysler_pacifica_parameters, + rear_axle_se3_to_center_se3, +) +from d123.geometry import StateSE3, Vector3D + + +class PandasetConverter(AbstractDatasetConverter): + def __init__( + self, + splits: List[str], + pandaset_data_root: Union[Path, str], + dataset_converter_config: DatasetConverterConfig, + train_log_names: List[str], + val_log_names: List[str], + test_log_names: List[str], + ) -> None: + super().__init__(dataset_converter_config) + for split in splits: + assert split in PANDASET_SPLITS, f"Split {split} is not available. Available splits: {PANDASET_SPLITS}" + + self._splits: List[str] = splits + self._pandaset_data_root: Path = Path(pandaset_data_root) + + self._train_log_names: List[str] = train_log_names + self._val_log_names: List[str] = val_log_names + self._test_log_names: List[str] = test_log_names + self._log_paths_and_split: Dict[str, List[Path]] = self._collect_log_paths() + + def _collect_log_paths(self) -> Dict[str, List[Path]]: + log_paths_and_split: List[Tuple[Path, str]] = [] + + for log_folder in self._pandaset_data_root.iterdir(): + if not log_folder.is_dir(): + continue + + log_name = log_folder.name + assert log_name in PANDASET_LOG_NAMES, f"Log name {log_name} is not recognized." + if (log_name in self._train_log_names) and ("pandaset_train" in self._splits): + log_paths_and_split.append((log_folder, "train")) + elif (log_name in self._val_log_names) and ("pandaset_val" in self._splits): + log_paths_and_split.append((log_folder, "val")) + elif (log_name in self._test_log_names) and ("pandaset_test" in self._splits): + log_paths_and_split.append((log_folder, "test")) + + return log_paths_and_split + + def get_number_of_maps(self) -> int: + """Inherited, see superclass.""" + return 0 # NOTE: Pandaset does not have maps. + + def get_number_of_logs(self) -> int: + """Inherited, see superclass.""" + return len(self._log_paths_and_split) + + def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: + """Inherited, see superclass.""" + return None # NOTE: Pandaset does not have maps. + + def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: + """Inherited, see superclass.""" + + source_log_path, split = self._log_paths_and_split[log_index] + + # 1. Initialize Metadata + log_metadata = LogMetadata( + dataset="pandaset", + split=split, + log_name=source_log_path.name, + location=None, # TODO: Add location information. + timestep_seconds=0.1, + vehicle_parameters=get_pandaset_chrysler_pacifica_parameters(), + camera_metadata=_get_pandaset_camera_metadata(source_log_path), + lidar_metadata=_get_pandaset_lidar_metadata(source_log_path), + map_metadata=None, # NOTE: Pandaset does not have maps. + ) + + # 2. Prepare log writer + log_needs_writing = log_writer.reset(self.dataset_converter_config, log_metadata) + + # 3. Process source log data + if log_needs_writing: + + timesteps = _read_json(source_log_path / "meta" / "timestamps.json") + gps: List[Dict[str, float]] = _read_json(source_log_path / "meta" / "gps.json") + lidar_poses: List[Dict[str, Dict[str, float]]] = _read_json(source_log_path / "lidar" / "poses.json") + + for iteration, timestep_s in enumerate(timesteps): + iteration_str = f"{iteration:02d}" + + ego_state = _extract_pandaset_sensor_ego_state(gps[iteration], lidar_poses[iteration]) + log_writer.write( + timestamp=TimePoint.from_s(timestep_s), + ego_state=ego_state, + box_detections=_extract_pandaset_sensor_box_detections(source_log_path, iteration_str, ego_state), + cameras=_extract_pandaset_sensor_camera(self.dataset_converter_config), + ) + + # 4. Finalize log writing + log_writer.close() + + +def _get_pandaset_sensor_map_metadata(split: str, log_name: str) -> MapMetadata: + return MapMetadata( + dataset="pandaset-sensor", + split=split, + log_name=log_name, + location=None, # TODO: Add location information, e.g. city name. + map_has_z=True, + map_is_local=True, + ) + + +def _get_pandaset_camera_metadata(source_log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: + + all_cameras_folder = source_log_path / "camera" + camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} + + for camera_folder in all_cameras_folder.iterdir(): + camera_name = camera_folder.name + + assert camera_name in PANDASET_CAMERA_MAPPING.keys(), f"Camera name {camera_name} is not recognized." + camera_type = PANDASET_CAMERA_MAPPING[camera_name] + + intrinsics_file = camera_folder / "intrinsics.json" + assert intrinsics_file.exists(), f"Camera intrinsics file {intrinsics_file} does not exist." + intrinsics_data = _read_json(intrinsics_file) + + camera_metadata[camera_type] = PinholeCameraMetadata( + camera_type=camera_type, + width=1920, + height=1080, + intrinsics=PinholeIntrinsics( + fx=intrinsics_data["fx"], + fy=intrinsics_data["fy"], + cx=intrinsics_data["cx"], + cy=intrinsics_data["cy"], + ), + distortion=PinholeDistortion(k1=0.0, k2=0.0, p1=0.0, p2=0.0, k3=0.0), + ) + + return camera_metadata + + +def _get_pandaset_lidar_metadata(log_path: Path) -> Dict[LiDARType, LiDARMetadata]: + # TODO: Implement + return {} + + +def _extract_pandaset_sensor_box_detections( + source_log_path: Path, + iteration_str: str, + ego_state_se3: EgoStateSE3, +) -> BoxDetectionWrapper: + + # TODO: Implement + + cuboids_file = source_log_path / "annotations" / "cuboids" / f"{iteration_str}.pkl.gz" + + if not cuboids_file.exists(): + return BoxDetectionWrapper(box_detections=[]) + + # cuboid_df = _read_pkl_gz(cuboids_file) + + # labels = list(cuboid_df["label"]) + # position_x = list(cuboid_df["position_x"]) + # position_y = list(cuboid_df["position_y"]) + # position_z = list(cuboid_df["position_z"]) + # yaws = list(cuboid_df["yaw"]) + + # annotations_slice = _get_pandaset_camera_metadata(annotations_df, lidar_timestamp_ns) + # num_detections = len(annotations_slice) + + # detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) + # detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) + # detections_token: List[str] = annotations_slice["track_uuid"].tolist() + # detections_types: List[DetectionType] = [] + + # for detection_idx, (_, row) in enumerate(annotations_slice.iterrows()): + # row = row.to_dict() + + # detections_state[detection_idx, BoundingBoxSE3Index.XYZ] = [row["tx_m"], row["ty_m"], row["tz_m"]] + # detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = [row["qw"], row["qx"], row["qy"], row["qz"]] + # detections_state[detection_idx, BoundingBoxSE3Index.EXTENT] = [row["length_m"], row["width_m"], row["height_m"]] + + # pandaset_detection_type = PANDASET_BOX_DETECTION_MAPPING.deserialize(row["category"]) + # detections_types.append(PANDASET_BOX_DETECTION_MAPPING[pandaset_detection_type]) + + # detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( + # origin=ego_state_se3.rear_axle_se3, + # se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3], + # ) + + # box_detections: List[BoxDetectionSE3] = [] + # for detection_idx in range(num_detections): + # box_detections.append( + # BoxDetectionSE3( + # metadata=BoxDetectionMetadata( + # detection_type=detections_types[detection_idx], + # timepoint=None, + # track_token=detections_token[detection_idx], + # confidence=None, + # ), + # bounding_box_se3=BoundingBoxSE3.from_array(detections_state[detection_idx]), + # velocity=Vector3D.from_array(detections_velocity[detection_idx]), + # ) + # ) + + # return BoxDetectionWrapper(box_detections=box_detections) + return BoxDetectionWrapper(box_detections=[]) + + +def _extract_pandaset_sensor_ego_state(gps: Dict[str, float], lidar_pose: Dict[str, Dict[str, float]]) -> EgoStateSE3: + + rear_axle_pose = StateSE3( + x=lidar_pose["position"]["x"], + y=lidar_pose["position"]["y"], + z=lidar_pose["position"]["z"], + qw=lidar_pose["heading"]["w"], + qx=lidar_pose["heading"]["x"], + qy=lidar_pose["heading"]["y"], + qz=lidar_pose["heading"]["z"], + ) + + vehicle_parameters = get_pandaset_chrysler_pacifica_parameters() + center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) + + # TODO: Add script to calculate the dynamic state from log sequence. + dynamic_state = DynamicStateSE3( + velocity=Vector3D(x=gps["xvel"], y=gps["yvel"], z=gps["zvel"]), + acceleration=Vector3D(x=0.0, y=0.0, z=0.0), + angular_velocity=Vector3D(x=0.0, y=0.0, z=0.0), + ) + + return EgoStateSE3( + center_se3=center, + dynamic_state_se3=dynamic_state, + vehicle_parameters=vehicle_parameters, + timepoint=None, + ) + + +def _extract_pandaset_sensor_camera( + dataset_converter_config: DatasetConverterConfig, +) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: + + # TODO: Implement + + # camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} + # split = source_log_path.parent.name + # log_id = source_log_path.name + + # source_dataset_dir = source_log_path.parent.parent + + # for _, row in egovehicle_se3_sensor_df.iterrows(): + # row = row.to_dict() + # if row["sensor_name"] not in PANDASET_CAMERA_MAPPING: + # continue + + # camera_name = row["sensor_name"] + # camera_type = PANDASET_CAMERA_MAPPING[camera_name] + + # relative_image_path = find_closest_target_fpath( + # split=split, + # log_id=log_id, + # src_sensor_name="lidar", + # src_timestamp_ns=lidar_timestamp_ns, + # target_sensor_name=camera_name, + # synchronization_df=synchronization_df, + # ) + # if relative_image_path is not None: + # absolute_image_path = source_dataset_dir / relative_image_path + # assert absolute_image_path.exists() + + # # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. + # camera_extrinsic = StateSE3( + # x=row["tx_m"], + # y=row["ty_m"], + # z=row["tz_m"], + # qw=row["qw"], + # qx=row["qx"], + # qy=row["qy"], + # qz=row["qz"], + # ) + # camera_data = None + # if dataset_converter_config.camera_store_option == "path": + # camera_data = str(relative_image_path) + # elif dataset_converter_config.camera_store_option == "binary": + # with open(absolute_image_path, "rb") as f: + # camera_data = f.read() + # camera_dict[camera_type] = camera_data, camera_extrinsic + + # return camera_dict + return {} + + +def _extract_lidar(lidar_pc, dataset_converter_config: DatasetConverterConfig) -> Dict[LiDARType, Optional[str]]: + # TODO: Implement this function to extract lidar data. + return {} + + +def _read_json(json_file: Path): + """Helper function to read a json file as dict.""" + with open(json_file, "r") as f: + json_data = json.load(f) + return json_data + + +def _read_pkl_gz(pkl_gz_file: Path): + """Helper function to read a pkl.gz file as dict.""" + with gzip.open(pkl_gz_file, "rb") as f: + pkl_data = pickle.load(f) + return pkl_data diff --git a/d123/conversion/datasets/wopd/wopd_converter.py b/d123/conversion/datasets/wopd/wopd_converter.py index 4cf33ef9..fc8bdffa 100644 --- a/d123/conversion/datasets/wopd/wopd_converter.py +++ b/d123/conversion/datasets/wopd/wopd_converter.py @@ -142,7 +142,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: dataset="wopd", split=split, log_name=log_name, - location=None, # TODO: Add location information. + location=str(initial_frame.context.stats.location), timestep_seconds=0.1, vehicle_parameters=get_wopd_chrysler_pacifica_parameters(), camera_metadata=_get_wopd_camera_metadata( diff --git a/d123/datatypes/vehicle_state/vehicle_parameters.py b/d123/datatypes/vehicle_state/vehicle_parameters.py index 5c8a57de..6955616b 100644 --- a/d123/datatypes/vehicle_state/vehicle_parameters.py +++ b/d123/datatypes/vehicle_state/vehicle_parameters.py @@ -83,6 +83,18 @@ def get_av2_ford_fusion_hybrid_parameters() -> VehicleParameters: ) +def get_pandaset_chrysler_pacifica_parameters() -> VehicleParameters: + return VehicleParameters( + vehicle_name="pandaset_chrysler_pacifica", + width=2.297, + length=5.176, + height=1.777, + wheel_base=3.089, + rear_axle_to_center_vertical=0.45, + rear_axle_to_center_longitudinal=1.461, + ) + + def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3: """ Converts a center state to a rear axle state. diff --git a/d123/script/config/conversion/datasets/pandaset.yaml b/d123/script/config/conversion/datasets/pandaset.yaml new file mode 100644 index 00000000..d36db5f7 --- /dev/null +++ b/d123/script/config/conversion/datasets/pandaset.yaml @@ -0,0 +1,150 @@ +pandaset_dataset: + _target_: d123.conversion.datasets.pandaset.pandaset_converter.PandasetConverter + _convert_: 'all' + + splits: ["pandaset_train", "pandaset_val", "pandaset_test"] + pandaset_data_root: "/media/nvme1/argoverse" + + dataset_converter_config: + _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig + _convert_: 'all' + + output_path: ${d123_data_root} + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + + # Map + include_map: false + + # Ego + include_ego: true + + # Box Detections + include_box_detections: true + + # Traffic Lights + include_traffic_lights: false + + # Cameras + include_cameras: true + camera_store_option: "binary" # "path", "binary", "mp4" + + # LiDARs + include_lidars: false + lidar_store_option: "path" # "path", "binary" + + # Scenario tag / Route + # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + include_scenario_tags: false + include_route: false + + # NOTE: Pandaset does not have official splits, so we create our own here. + # We use 80% of the logs for training, 10% for validation, and 10% for testing. + train_log_names: + - "001" + - "002" + - "003" + - "006" + - "008" + - "011" + - "012" + - "013" + - "014" + - "016" + - "019" + - "020" + - "023" + - "024" + - "027" + - "028" + - "029" + - "030" + - "032" + - "033" + - "034" + - "039" + - "041" + - "042" + - "043" + - "045" + - "046" + - "047" + - "048" + - "050" + - "051" + - "052" + - "053" + - "054" + - "055" + - "056" + - "057" + - "058" + - "059" + - "062" + - "063" + - "064" + - "065" + - "067" + - "068" + - "069" + - "070" + - "071" + - "072" + - "073" + - "074" + - "077" + - "079" + - "080" + - "084" + - "085" + - "088" + - "089" + - "090" + - "091" + - "092" + - "094" + - "095" + - "097" + - "098" + - "099" + - "101" + - "102" + - "103" + - "104" + - "106" + - "109" + - "110" + - "113" + - "115" + - "116" + - "119" + - "120" + - "122" + - "123" + - "149" + - "158" + + val_log_names: + - "004" + - "017" + - "018" + - "021" + - "037" + - "040" + - "044" + - "100" + - "105" + - "117" + - "124" + + test_log_names: + - "005" + - "015" + - "035" + - "038" + - "066" + - "078" + - "086" + - "093" + - "112" + - "139" diff --git a/d123/script/config/conversion/default_conversion.yaml b/d123/script/config/conversion/default_conversion.yaml index d9cbd752..94004587 100644 --- a/d123/script/config/conversion/default_conversion.yaml +++ b/d123/script/config/conversion/default_conversion.yaml @@ -19,7 +19,8 @@ defaults: - nuplan_mini_dataset # - carla_dataset # - wopd_dataset - - av2_sensor_dataset + # - av2_sensor_dataset + # - pandaset_dataset - _self_ From e189c65b1b5d59c12cb384a55a5e2d1109f7ef4c Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Thu, 16 Oct 2025 20:28:59 +0800 Subject: [PATCH 083/145] refactor kitti360 --- .../kitti_360/kitti_360_data_converter.py | 16 ++++------------ .../kitti_360/kitti_360_map_conversion.py | 9 ++------- d123/conversion/datasets/kitti_360/labels.py | 2 +- .../datasets/kitti_360/preprocess_detection.py | 2 +- 4 files changed, 8 insertions(+), 21 deletions(-) diff --git a/d123/conversion/datasets/kitti_360/kitti_360_data_converter.py b/d123/conversion/datasets/kitti_360/kitti_360_data_converter.py index 833493bd..5a3aea8c 100644 --- a/d123/conversion/datasets/kitti_360/kitti_360_data_converter.py +++ b/d123/conversion/datasets/kitti_360/kitti_360_data_converter.py @@ -1,21 +1,15 @@ -import gc -import json import os import re import yaml from dataclasses import asdict -from functools import partial from pathlib import Path from typing import Any, Dict, Final, List, Optional, Tuple, Union import numpy as np import pickle -import copy from collections import defaultdict import datetime import xml.etree.ElementTree as ET -import pyarrow as pa -from PIL import Image import logging from pyquaternion import Quaternion @@ -43,7 +37,6 @@ from d123.datatypes.time.time_point import TimePoint from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from d123.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3 -from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table from d123.common.utils.uuid import create_deterministic_uuid from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter from d123.conversion.dataset_converter_config import DatasetConverterConfig @@ -53,7 +46,7 @@ from d123.datatypes.maps.map_metadata import MapMetadata from d123.datatypes.scene.scene_metadata import LogMetadata from d123.conversion.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from d123.conversion.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT +from d123.conversion.datasets.kitti_360.labels import KITTI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT from d123.conversion.datasets.kitti_360.kitti_360_map_conversion import ( convert_kitti360_map_with_writer ) @@ -61,7 +54,6 @@ from d123.geometry.rotation import EulerAngles KITTI360_DT: Final[float] = 0.1 -SORT_BY_TIMESTAMP: Final[bool] = True KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) @@ -519,7 +511,7 @@ def _extract_detections( else: lable = child.find('label').text name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, 'unknown') - if child.find('transform') is None or name not in KIITI360_DETECTION_NAME_DICT.keys(): + if child.find('transform') is None or name not in KITTI360_DETECTION_NAME_DICT.keys(): continue obj = KITTI360Bbox3D() obj.parseBbox(child) @@ -535,7 +527,7 @@ def _extract_detections( detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append(np.array([0.0, 0.0, 0.0])) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(KIITI360_DETECTION_NAME_DICT[obj.name]) + detections_types[frame].append(KITTI360_DETECTION_NAME_DICT[obj.name]) else: global_ID = obj.globalID dynamic_objs[global_ID].append(obj) @@ -572,7 +564,7 @@ def _extract_detections( detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append(vel) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(KIITI360_DETECTION_NAME_DICT[obj.name]) + detections_types[frame].append(KITTI360_DETECTION_NAME_DICT[obj.name]) box_detection_wrapper_all: List[BoxDetectionWrapper] = [] for frame in range(ts_len): diff --git a/d123/conversion/datasets/kitti_360/kitti_360_map_conversion.py b/d123/conversion/datasets/kitti_360/kitti_360_map_conversion.py index 17f047dc..23a9d944 100644 --- a/d123/conversion/datasets/kitti_360/kitti_360_map_conversion.py +++ b/d123/conversion/datasets/kitti_360/kitti_360_map_conversion.py @@ -1,22 +1,18 @@ import os -import warnings from pathlib import Path -from typing import Dict, List, Optional +from typing import List import geopandas as gpd import numpy as np import pandas as pd import xml.etree.ElementTree as ET -import pyogrio -from shapely.geometry import LineString import shapely.geometry as geom from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, split_line_geometry_by_max_length, ) -from d123.datatypes.maps.gpkg.gpkg_utils import get_all_rows_with_value, get_row_with_value -from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from d123.datatypes.maps.map_datatypes import RoadEdgeType from d123.geometry.polyline import Polyline3D from d123.conversion.datasets.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter @@ -91,7 +87,6 @@ def _extract_road_edge_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: road_edge_types = [] for idx in range(len(road_edges)): ids.append(idx) - # TODO @DanielDauner: Figure out if other types should/could be assigned here. road_edge_types.append(int(RoadEdgeType.ROAD_EDGE_BOUNDARY)) data = pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}) diff --git a/d123/conversion/datasets/kitti_360/labels.py b/d123/conversion/datasets/kitti_360/labels.py index 45e2d315..aae1c397 100644 --- a/d123/conversion/datasets/kitti_360/labels.py +++ b/d123/conversion/datasets/kitti_360/labels.py @@ -184,7 +184,7 @@ def assureSingleInstanceName( name ): "caravan": "caravan", } -KIITI360_DETECTION_NAME_DICT = { +KITTI360_DETECTION_NAME_DICT = { "traffic light": DetectionType.SIGN, "traffic sign": DetectionType.SIGN, "person": DetectionType.PEDESTRIAN, diff --git a/d123/conversion/datasets/kitti_360/preprocess_detection.py b/d123/conversion/datasets/kitti_360/preprocess_detection.py index 18bbc125..803d162c 100644 --- a/d123/conversion/datasets/kitti_360/preprocess_detection.py +++ b/d123/conversion/datasets/kitti_360/preprocess_detection.py @@ -32,7 +32,7 @@ PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES from d123.conversion.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from d123.conversion.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT +from d123.conversion.datasets.kitti_360.labels import KITTI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT def _bbox_xml_path(log_name: str) -> Path: if log_name == "2013_05_28_drive_0004_sync": From 9b6473f2275a14b02298557d642fc03b228960da Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Thu, 16 Oct 2025 20:30:13 +0800 Subject: [PATCH 084/145] refactor kitti360 --- d123/conversion/datasets/kitti_360/preprocess_detection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/d123/conversion/datasets/kitti_360/preprocess_detection.py b/d123/conversion/datasets/kitti_360/preprocess_detection.py index 803d162c..99e84681 100644 --- a/d123/conversion/datasets/kitti_360/preprocess_detection.py +++ b/d123/conversion/datasets/kitti_360/preprocess_detection.py @@ -65,7 +65,7 @@ def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]: lable = child.find('label').text name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, 'unknown') timestamp = int(child.find('timestamp').text) # -1 for static objects - if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT or timestamp != -1: + if child.find("transform") is None or name not in KITTI360_DETECTION_NAME_DICT or timestamp != -1: continue obj = KITTI360Bbox3D() obj.parseBbox(child) From d78421b14b15f5ab1925117712de8a3b7076a2ad Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 17 Oct 2025 09:54:44 +0200 Subject: [PATCH 085/145] Push unfinished pandaset converter. (still bugs in coordinate conversion). --- .../viser/elements/map_elements.py | 78 ++-- .../datasets/pandaset/pandaset_converter.py | 364 +++++++++++------- .../conversion/log_writer/arrow_log_writer.py | 2 +- d123/datatypes/detections/detection.py | 5 +- d123/datatypes/scene/arrow/arrow_scene.py | 15 +- .../{pandaset.yaml => pandaset_dataset.yaml} | 2 +- .../config/conversion/default_conversion.yaml | 4 +- notebooks/viz/bev_matplotlib.ipynb | 17 +- test_viser.py | 3 +- 9 files changed, 293 insertions(+), 197 deletions(-) rename d123/script/config/conversion/datasets/{pandaset.yaml => pandaset_dataset.yaml} (98%) diff --git a/d123/common/visualization/viser/elements/map_elements.py b/d123/common/visualization/viser/elements/map_elements.py index edafef5c..4532eb90 100644 --- a/d123/common/visualization/viser/elements/map_elements.py +++ b/d123/common/visualization/viser/elements/map_elements.py @@ -30,6 +30,9 @@ def _get_map_trimesh_dict( viser_config: ViserConfig, ) -> Dict[str, trimesh.Trimesh]: + # Dictionary to hold the output trimesh meshes. + output_trimesh_dict: Dict[str, trimesh.Trimesh] = {} + # Unpack scene center for translation of map objects. scene_center: Point3D = initial_ego_state.center.point_3d scene_center_array = scene_center.array @@ -43,40 +46,41 @@ def _get_map_trimesh_dict( MapLayer.CARPARK, MapLayer.GENERIC_DRIVABLE, ] - map_objects_dict = scene.get_map_api().get_proximal_map_objects( - scene_center.point_2d, - radius=viser_config.map_radius, - layers=map_layers, - ) - - # Create trimesh meshes for each map layer. - trimesh_dict = {} - for map_layer in map_objects_dict.keys(): - surface_meshes = [] - for map_surface in map_objects_dict[map_layer]: - map_surface: AbstractSurfaceMapObject - - trimesh_mesh = map_surface.trimesh_mesh - trimesh_mesh.vertices -= scene_center_array - - # Adjust height of non-road surfaces to avoid z-fighting in the visualization. - if map_layer in [ - MapLayer.WALKWAY, - MapLayer.CROSSWALK, - MapLayer.CARPARK, - ]: - trimesh_mesh.vertices[..., Point3DIndex.Z] += viser_config.map_non_road_z_offset - - # If the map does not have z-values, we place the surfaces on the ground level of the ego vehicle. - if not scene.log_metadata.map_metadata.map_has_z: - trimesh_mesh.vertices[..., Point3DIndex.Z] += ( - scene_center.z - initial_ego_state.vehicle_parameters.height / 2 - ) - - # Color the mesh based on the map layer type. - trimesh_mesh.visual.face_colors = MAP_SURFACE_CONFIG[map_layer].fill_color.rgba - surface_meshes.append(trimesh_mesh) - - trimesh_dict[f"{map_layer.serialize()}"] = trimesh.util.concatenate(surface_meshes) - - return trimesh_dict + map_api = scene.get_map_api() + if map_api is not None: + map_objects_dict = map_api.get_proximal_map_objects( + scene_center.point_2d, + radius=viser_config.map_radius, + layers=map_layers, + ) + + # Create trimesh meshes for each map layer. + for map_layer in map_objects_dict.keys(): + surface_meshes = [] + for map_surface in map_objects_dict[map_layer]: + map_surface: AbstractSurfaceMapObject + + trimesh_mesh = map_surface.trimesh_mesh + trimesh_mesh.vertices -= scene_center_array + + # Adjust height of non-road surfaces to avoid z-fighting in the visualization. + if map_layer in [ + MapLayer.WALKWAY, + MapLayer.CROSSWALK, + MapLayer.CARPARK, + ]: + trimesh_mesh.vertices[..., Point3DIndex.Z] += viser_config.map_non_road_z_offset + + # If the map does not have z-values, we place the surfaces on the ground level of the ego vehicle. + if not scene.log_metadata.map_metadata.map_has_z: + trimesh_mesh.vertices[..., Point3DIndex.Z] += ( + scene_center.z - initial_ego_state.vehicle_parameters.height / 2 + ) + + # Color the mesh based on the map layer type. + trimesh_mesh.visual.face_colors = MAP_SURFACE_CONFIG[map_layer].fill_color.rgba + surface_meshes.append(trimesh_mesh) + + output_trimesh_dict[f"{map_layer.serialize()}"] = trimesh.util.concatenate(surface_meshes) + + return output_trimesh_dict diff --git a/d123/conversion/datasets/pandaset/pandaset_converter.py b/d123/conversion/datasets/pandaset/pandaset_converter.py index 3715160f..55260d09 100644 --- a/d123/conversion/datasets/pandaset/pandaset_converter.py +++ b/d123/conversion/datasets/pandaset/pandaset_converter.py @@ -4,17 +4,20 @@ from pathlib import Path from typing import Dict, List, Optional, Tuple, Union +import numpy as np + from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter from d123.conversion.dataset_converter_config import DatasetConverterConfig from d123.conversion.datasets.pandaset.pandaset_constants import ( + PANDASET_BOX_DETECTION_FROM_STR, + PANDASET_BOX_DETECTION_TO_DEFAULT, PANDASET_CAMERA_MAPPING, PANDASET_LOG_NAMES, PANDASET_SPLITS, ) from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from d123.datatypes.detections.detection import BoxDetectionWrapper -from d123.datatypes.maps.map_metadata import MapMetadata +from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from d123.datatypes.scene.scene_metadata import LogMetadata from d123.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, @@ -29,7 +32,14 @@ get_pandaset_chrysler_pacifica_parameters, rear_axle_se3_to_center_se3, ) -from d123.geometry import StateSE3, Vector3D +from d123.geometry import BoundingBoxSE3, StateSE3, Vector3D +from d123.geometry.geometry_index import BoundingBoxSE3Index, EulerAnglesIndex +from d123.geometry.transform.transform_se3 import ( + convert_absolute_to_relative_se3_array, + translate_se3_along_body_frame, +) +from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +from d123.geometry.utils.rotation_utils import get_quaternion_array_from_euler_array class PandasetConverter(AbstractDatasetConverter): @@ -64,11 +74,11 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]: log_name = log_folder.name assert log_name in PANDASET_LOG_NAMES, f"Log name {log_name} is not recognized." if (log_name in self._train_log_names) and ("pandaset_train" in self._splits): - log_paths_and_split.append((log_folder, "train")) + log_paths_and_split.append((log_folder, "pandaset_train")) elif (log_name in self._val_log_names) and ("pandaset_val" in self._splits): - log_paths_and_split.append((log_folder, "val")) + log_paths_and_split.append((log_folder, "pandaset_val")) elif (log_name in self._test_log_names) and ("pandaset_test" in self._splits): - log_paths_and_split.append((log_folder, "test")) + log_paths_and_split.append((log_folder, "pandaset_test")) return log_paths_and_split @@ -108,36 +118,36 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: # 3. Process source log data if log_needs_writing: + # Read files from pandaset timesteps = _read_json(source_log_path / "meta" / "timestamps.json") gps: List[Dict[str, float]] = _read_json(source_log_path / "meta" / "gps.json") lidar_poses: List[Dict[str, Dict[str, float]]] = _read_json(source_log_path / "lidar" / "poses.json") + camera_poses: Dict[str, List[Dict[str, Dict[str, float]]]] = { + camera_name: _read_json(source_log_path / "camera" / camera_name / "poses.json") + for camera_name in PANDASET_CAMERA_MAPPING.keys() + } + # Write data to log writer for iteration, timestep_s in enumerate(timesteps): - iteration_str = f"{iteration:02d}" ego_state = _extract_pandaset_sensor_ego_state(gps[iteration], lidar_poses[iteration]) log_writer.write( timestamp=TimePoint.from_s(timestep_s), ego_state=ego_state, - box_detections=_extract_pandaset_sensor_box_detections(source_log_path, iteration_str, ego_state), - cameras=_extract_pandaset_sensor_camera(self.dataset_converter_config), + box_detections=_extract_pandaset_box_detections(source_log_path, iteration, ego_state), + cameras=_extract_pandaset_sensor_camera( + source_log_path, + iteration, + ego_state, + camera_poses, + self.dataset_converter_config, + ), ) # 4. Finalize log writing log_writer.close() -def _get_pandaset_sensor_map_metadata(split: str, log_name: str) -> MapMetadata: - return MapMetadata( - dataset="pandaset-sensor", - split=split, - log_name=log_name, - location=None, # TODO: Add location information, e.g. city name. - map_has_z=True, - map_is_local=True, - ) - - def _get_pandaset_camera_metadata(source_log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: all_cameras_folder = source_log_path / "camera" @@ -174,87 +184,31 @@ def _get_pandaset_lidar_metadata(log_path: Path) -> Dict[LiDARType, LiDARMetadat return {} -def _extract_pandaset_sensor_box_detections( - source_log_path: Path, - iteration_str: str, - ego_state_se3: EgoStateSE3, -) -> BoxDetectionWrapper: - - # TODO: Implement - - cuboids_file = source_log_path / "annotations" / "cuboids" / f"{iteration_str}.pkl.gz" - - if not cuboids_file.exists(): - return BoxDetectionWrapper(box_detections=[]) - - # cuboid_df = _read_pkl_gz(cuboids_file) - - # labels = list(cuboid_df["label"]) - # position_x = list(cuboid_df["position_x"]) - # position_y = list(cuboid_df["position_y"]) - # position_z = list(cuboid_df["position_z"]) - # yaws = list(cuboid_df["yaw"]) - - # annotations_slice = _get_pandaset_camera_metadata(annotations_df, lidar_timestamp_ns) - # num_detections = len(annotations_slice) - - # detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) - # detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) - # detections_token: List[str] = annotations_slice["track_uuid"].tolist() - # detections_types: List[DetectionType] = [] - - # for detection_idx, (_, row) in enumerate(annotations_slice.iterrows()): - # row = row.to_dict() - - # detections_state[detection_idx, BoundingBoxSE3Index.XYZ] = [row["tx_m"], row["ty_m"], row["tz_m"]] - # detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = [row["qw"], row["qx"], row["qy"], row["qz"]] - # detections_state[detection_idx, BoundingBoxSE3Index.EXTENT] = [row["length_m"], row["width_m"], row["height_m"]] - - # pandaset_detection_type = PANDASET_BOX_DETECTION_MAPPING.deserialize(row["category"]) - # detections_types.append(PANDASET_BOX_DETECTION_MAPPING[pandaset_detection_type]) - - # detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( - # origin=ego_state_se3.rear_axle_se3, - # se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3], - # ) - - # box_detections: List[BoxDetectionSE3] = [] - # for detection_idx in range(num_detections): - # box_detections.append( - # BoxDetectionSE3( - # metadata=BoxDetectionMetadata( - # detection_type=detections_types[detection_idx], - # timepoint=None, - # track_token=detections_token[detection_idx], - # confidence=None, - # ), - # bounding_box_se3=BoundingBoxSE3.from_array(detections_state[detection_idx]), - # velocity=Vector3D.from_array(detections_velocity[detection_idx]), - # ) - # ) - - # return BoxDetectionWrapper(box_detections=box_detections) - return BoxDetectionWrapper(box_detections=[]) - - def _extract_pandaset_sensor_ego_state(gps: Dict[str, float], lidar_pose: Dict[str, Dict[str, float]]) -> EgoStateSE3: - rear_axle_pose = StateSE3( - x=lidar_pose["position"]["x"], - y=lidar_pose["position"]["y"], - z=lidar_pose["position"]["z"], - qw=lidar_pose["heading"]["w"], - qx=lidar_pose["heading"]["x"], - qy=lidar_pose["heading"]["y"], - qz=lidar_pose["heading"]["z"], + rear_axle_pose = _main_lidar_to_rear_axle( + StateSE3( + x=lidar_pose["position"]["x"], + y=lidar_pose["position"]["y"], + z=lidar_pose["position"]["z"], + qw=lidar_pose["heading"]["w"], + qx=lidar_pose["heading"]["x"], + qy=lidar_pose["heading"]["y"], + qz=lidar_pose["heading"]["z"], + ) ) + # rear_axle_pose = translate_se3_along_body_frame( + # main_lidar_pose, + # vector_3d=Vector3D(x=-0.83, y=0.0, z=0.0), + # ) vehicle_parameters = get_pandaset_chrysler_pacifica_parameters() center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) # TODO: Add script to calculate the dynamic state from log sequence. dynamic_state = DynamicStateSE3( - velocity=Vector3D(x=gps["xvel"], y=gps["yvel"], z=gps["zvel"]), + # velocity=Vector3D(x=gps["xvel"], y=gps["yvel"], z=0.0), + velocity=Vector3D(x=0.0, y=0.0, z=0.0), acceleration=Vector3D(x=0.0, y=0.0, z=0.0), angular_velocity=Vector3D(x=0.0, y=0.0, z=0.0), ) @@ -267,58 +221,134 @@ def _extract_pandaset_sensor_ego_state(gps: Dict[str, float], lidar_pose: Dict[s ) +def _extract_pandaset_box_detections( + source_log_path: Path, iteration: int, ego_state_se3: EgoStateSE3 +) -> BoxDetectionWrapper: + + # NOTE: The following provided quboids annotations are not stored in 123D + # - stationary + # - camera_used + # - attributes.object_motion + # - cuboids.sibling_id + # - cuboids.sensor_id + # - attributes.pedestrian_behavior + # - attributes.pedestrian_age + # - attributes.rider_status + # https://github.com/scaleapi/pandaset-devkit/blob/59be180e2a3f3e37f6d66af9e67bf944ccbf6ec0/README.md?plain=1#L288 + + iteration_str = f"{iteration:02d}" + cuboids_file = source_log_path / "annotations" / "cuboids" / f"{iteration_str}.pkl.gz" + + if not cuboids_file.exists(): + return BoxDetectionWrapper(box_detections=[]) + + cuboid_df = _read_pkl_gz(cuboids_file) + + # Read cuboid data + box_label_names = list(cuboid_df["label"]) + box_uuids = list(cuboid_df["uuid"]) + num_boxes = len(box_uuids) + + box_position_x = np.array(cuboid_df["position.x"], dtype=np.float64) + box_position_y = np.array(cuboid_df["position.y"], dtype=np.float64) + box_position_z = np.array(cuboid_df["position.z"], dtype=np.float64) + box_points = np.stack([box_position_x, box_position_y, box_position_z], axis=-1) + box_yaws = np.array(cuboid_df["yaw"], dtype=np.float64) + + # NOTE: Rather strange format to have dimensions.x as width, dimensions.y as length + box_widths = np.array(cuboid_df["dimensions.x"], dtype=np.float64) + box_lengths = np.array(cuboid_df["dimensions.y"], dtype=np.float64) + box_heights = np.array(cuboid_df["dimensions.z"], dtype=np.float64) + + # Create se3 array for boxes (i.e. convert rotation to quaternion) + box_euler_angles_array = np.zeros((num_boxes, len(EulerAnglesIndex)), dtype=np.float64) + box_euler_angles_array[..., EulerAnglesIndex.ROLL] = DEFAULT_ROLL + box_euler_angles_array[..., EulerAnglesIndex.PITCH] = DEFAULT_PITCH + box_euler_angles_array[..., EulerAnglesIndex.YAW] = box_yaws + + box_se3_array = np.zeros((num_boxes, len(BoundingBoxSE3Index)), dtype=np.float64) + box_se3_array[:, BoundingBoxSE3Index.XYZ] = box_points + box_se3_array[:, BoundingBoxSE3Index.QUATERNION] = get_quaternion_array_from_euler_array(box_euler_angles_array) + box_se3_array[:, BoundingBoxSE3Index.EXTENT] = np.stack([box_lengths, box_widths, box_heights], axis=-1) + + # Fill bounding box detections and return + box_detections: List[BoxDetectionSE3] = [] + for box_idx in range(num_boxes): + pandaset_box_detection_type = PANDASET_BOX_DETECTION_FROM_STR[box_label_names[box_idx]] + box_detection_type = PANDASET_BOX_DETECTION_TO_DEFAULT[pandaset_box_detection_type] + + # Convert coordinates to ISO 8855 + # NOTE: This would be faster over a batch operation. + box_se3_array[box_idx, BoundingBoxSE3Index.STATE_SE3] = _rotate_pose_to_iso_coordinates( + StateSE3.from_array(box_se3_array[box_idx, BoundingBoxSE3Index.STATE_SE3], copy=False) + ).array + + box_detection_se3 = BoxDetectionSE3( + metadata=BoxDetectionMetadata( + detection_type=box_detection_type, + track_token=box_uuids[box_idx], + ), + bounding_box_se3=BoundingBoxSE3.from_array(box_se3_array[box_idx]), + velocity=Vector3D(0.0, 0.0, 0.0), # TODO: Add velocity + ) + box_detections.append(box_detection_se3) + + return BoxDetectionWrapper(box_detections=box_detections) + + def _extract_pandaset_sensor_camera( + source_log_path: Path, + iteration: int, + ego_state_se3: EgoStateSE3, + camera_poses: Dict[str, List[Dict[str, Dict[str, float]]]], dataset_converter_config: DatasetConverterConfig, ) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: - # TODO: Implement + camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} + iteration_str = f"{iteration:02d}" - # camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} - # split = source_log_path.parent.name - # log_id = source_log_path.name - - # source_dataset_dir = source_log_path.parent.parent - - # for _, row in egovehicle_se3_sensor_df.iterrows(): - # row = row.to_dict() - # if row["sensor_name"] not in PANDASET_CAMERA_MAPPING: - # continue - - # camera_name = row["sensor_name"] - # camera_type = PANDASET_CAMERA_MAPPING[camera_name] - - # relative_image_path = find_closest_target_fpath( - # split=split, - # log_id=log_id, - # src_sensor_name="lidar", - # src_timestamp_ns=lidar_timestamp_ns, - # target_sensor_name=camera_name, - # synchronization_df=synchronization_df, - # ) - # if relative_image_path is not None: - # absolute_image_path = source_dataset_dir / relative_image_path - # assert absolute_image_path.exists() - - # # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. - # camera_extrinsic = StateSE3( - # x=row["tx_m"], - # y=row["ty_m"], - # z=row["tz_m"], - # qw=row["qw"], - # qx=row["qx"], - # qy=row["qy"], - # qz=row["qz"], - # ) - # camera_data = None - # if dataset_converter_config.camera_store_option == "path": - # camera_data = str(relative_image_path) - # elif dataset_converter_config.camera_store_option == "binary": - # with open(absolute_image_path, "rb") as f: - # camera_data = f.read() - # camera_dict[camera_type] = camera_data, camera_extrinsic - - # return camera_dict - return {} + if dataset_converter_config.include_cameras: + + for camera_name, camera_type in PANDASET_CAMERA_MAPPING.items(): + image_rel_path = f"camera/{camera_name}/{iteration_str}.jpg" + + image_abs_path = source_log_path / image_rel_path + assert image_abs_path.exists(), f"Camera image file {str(image_abs_path)} does not exist." + + camera_pose_dict = camera_poses[camera_name][iteration] + camera_extrinsic = _rotate_pose_to_iso_coordinates( + StateSE3( + x=camera_pose_dict["position"]["x"], + y=camera_pose_dict["position"]["y"], + z=camera_pose_dict["position"]["z"], + qw=camera_pose_dict["heading"]["w"], + qx=camera_pose_dict["heading"]["x"], + qy=camera_pose_dict["heading"]["y"], + qz=camera_pose_dict["heading"]["z"], + ) + ) + # camera_extrinsic = StateSE3( + # x=camera_pose_dict["position"]["x"], + # y=camera_pose_dict["position"]["y"], + # z=camera_pose_dict["position"]["z"], + # qw=camera_pose_dict["heading"]["w"], + # qx=camera_pose_dict["heading"]["x"], + # qy=camera_pose_dict["heading"]["y"], + # qz=camera_pose_dict["heading"]["z"], + # ) + camera_extrinsic = StateSE3.from_array( + convert_absolute_to_relative_se3_array(ego_state_se3.rear_axle_se3, camera_extrinsic.array), copy=True + ) + + camera_data = None + if dataset_converter_config.camera_store_option == "path": + camera_data = str(image_rel_path) + elif dataset_converter_config.camera_store_option == "binary": + with open(image_abs_path, "rb") as f: + camera_data = f.read() + camera_dict[camera_type] = camera_data, camera_extrinsic + + return camera_dict def _extract_lidar(lidar_pc, dataset_converter_config: DatasetConverterConfig) -> Dict[LiDARType, Optional[str]]: @@ -338,3 +368,59 @@ def _read_pkl_gz(pkl_gz_file: Path): with gzip.open(pkl_gz_file, "rb") as f: pkl_data = pickle.load(f) return pkl_data + + +def _rotate_pose_to_iso_coordinates(pose: StateSE3) -> StateSE3: + """Helper function for pandaset to rotate a pose to ISO coordinate system (x: forward, y: left, z: up). + + NOTE: Pandaset uses a different coordinate system (x: right, y: forward, z: up). + [1] https://arxiv.org/pdf/2112.12610.pdf + + :param pose: The input pose. + :return: The rotated pose. + """ + F = np.array( + [ + [0.0, 1.0, 0.0], # new X = old Y (forward) + [-1.0, 0.0, 0.0], # new Y = old -X (left) + [0.0, 0.0, 1.0], # new Z = old Z (up) + ], + dtype=np.float64, + ).T + # F = np.eye(3, dtype=np.float64) + transformation_matrix = pose.transformation_matrix.copy() + transformation_matrix[0:3, 0:3] = transformation_matrix[0:3, 0:3] @ F + + # transformation_matrix[0, 3] = pose.y + # transformation_matrix[1, 3] = -pose.x + # transformation_matrix[2, 3] = pose.z + + return StateSE3.from_transformation_matrix(transformation_matrix) + + +def _main_lidar_to_rear_axle(pose: StateSE3) -> StateSE3: + + F = np.array( + [ + [0.0, 1.0, 0.0], # new X = old Y (forward) + [-1.0, 0.0, 0.0], # new Y = old X (left) + [0.0, 0.0, 1.0], # new Z = old Z (up) + ], + dtype=np.float64, + ).T + # F = np.eye(3, dtype=np.float64) + transformation_matrix = pose.transformation_matrix.copy() + transformation_matrix[0:3, 0:3] = transformation_matrix[0:3, 0:3] @ F + + rotated_pose = StateSE3.from_transformation_matrix(transformation_matrix) + + imu_pose = translate_se3_along_body_frame( + rotated_pose, + vector_3d=Vector3D(x=-0.840, y=0.0, z=0.0), + ) + + # transformation_matrix[0, 3] = pose.y + # transformation_matrix[1, 3] = -pose.x + # transformation_matrix[2, 3] = pose.z + + return imu_pose diff --git a/d123/conversion/log_writer/arrow_log_writer.py b/d123/conversion/log_writer/arrow_log_writer.py index 4d27492d..a1b70ea2 100644 --- a/d123/conversion/log_writer/arrow_log_writer.py +++ b/d123/conversion/log_writer/arrow_log_writer.py @@ -119,7 +119,7 @@ def write( for box_detection in box_detections: box_detection_state.append(box_detection.bounding_box.array) - box_detection_velocity.append(box_detection.velocity.array) + box_detection_velocity.append(box_detection.velocity.array) # TODO: make optional box_detection_token.append(box_detection.metadata.track_token) box_detection_type.append(int(box_detection.metadata.detection_type)) diff --git a/d123/datatypes/detections/detection.py b/d123/datatypes/detections/detection.py index 3e7e09a2..916e9c4e 100644 --- a/d123/datatypes/detections/detection.py +++ b/d123/datatypes/detections/detection.py @@ -14,9 +14,10 @@ class BoxDetectionMetadata: detection_type: DetectionType - timepoint: TimePoint # TODO: Consider removing or making optional track_token: str - confidence: Optional[float] = None + timepoint: Optional[TimePoint] = None # TimePoint when the detection was made, if available + confidence: Optional[float] = None # Confidence score of the detection, if available + num_lidar_points: Optional[int] = None # Number of LiDAR points within the bounding box @dataclass diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py index 928f1205..f0ca15aa 100644 --- a/d123/datatypes/scene/arrow/arrow_scene.py +++ b/d123/datatypes/scene/arrow/arrow_scene.py @@ -83,14 +83,15 @@ def get_scene_extraction_metadata(self) -> SceneExtractionMetadata: def get_map_api(self) -> Optional[AbstractMap]: map_api: Optional[AbstractMap] = None - if self.log_metadata.map_metadata.map_is_local: - if self._local_map_api is None: - map_api = get_local_map_api(self.log_metadata.split, self.log_name) - self._local_map_api = map_api + if self.log_metadata.map_metadata is not None: + if self.log_metadata.map_metadata.map_is_local: + if self._local_map_api is None: + map_api = get_local_map_api(self.log_metadata.split, self.log_name) + self._local_map_api = map_api + else: + map_api = self._local_map_api else: - map_api = self._local_map_api - else: - map_api = get_global_map_api(self.log_metadata.dataset, self.log_metadata.location) + map_api = get_global_map_api(self.log_metadata.dataset, self.log_metadata.location) return map_api def get_timepoint_at_iteration(self, iteration: int) -> TimePoint: diff --git a/d123/script/config/conversion/datasets/pandaset.yaml b/d123/script/config/conversion/datasets/pandaset_dataset.yaml similarity index 98% rename from d123/script/config/conversion/datasets/pandaset.yaml rename to d123/script/config/conversion/datasets/pandaset_dataset.yaml index d36db5f7..115ab4e1 100644 --- a/d123/script/config/conversion/datasets/pandaset.yaml +++ b/d123/script/config/conversion/datasets/pandaset_dataset.yaml @@ -3,7 +3,7 @@ pandaset_dataset: _convert_: 'all' splits: ["pandaset_train", "pandaset_val", "pandaset_test"] - pandaset_data_root: "/media/nvme1/argoverse" + pandaset_data_root: "/media/nvme1/pandaset" dataset_converter_config: _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig diff --git a/d123/script/config/conversion/default_conversion.yaml b/d123/script/config/conversion/default_conversion.yaml index 94004587..32421c52 100644 --- a/d123/script/config/conversion/default_conversion.yaml +++ b/d123/script/config/conversion/default_conversion.yaml @@ -16,11 +16,11 @@ defaults: - log_writer: arrow_log_writer - map_writer: gpkg_map_writer - datasets: - - nuplan_mini_dataset + # - nuplan_mini_dataset # - carla_dataset # - wopd_dataset # - av2_sensor_dataset - # - pandaset_dataset + - pandaset_dataset - _self_ diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index 37b5ef20..5abe6dfa 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -44,7 +44,8 @@ "# splits = [\"wopd_val\"]\n", "# splits = [\"carla\"]\n", "# splits = [\"nuplan-mini_test\"]\n", - "splits = [\"av2-sensor-mini_train\"]\n", + "# splits = [\"av2-sensor-mini_train\"]\n", + "splits = [\"pandaset_train\"]\n", "# log_names = None\n", "\n", "\n", @@ -57,7 +58,7 @@ " split_names=splits,\n", " log_names=log_names,\n", " scene_uuids=scene_uuids,\n", - " duration_s=10,\n", + " duration_s=6.0,\n", " history_s=0.0,\n", " timestamp_threshold_s=20,\n", " shuffle=True,\n", @@ -227,10 +228,12 @@ "\n", " ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)\n", " box_detections = scene.get_box_detections_at_iteration(iteration)\n", + " map_api = scene.get_map_api()\n", "\n", " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", - " add_default_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + " if map_api is not None:\n", + " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", " add_box_detections_to_ax(ax, box_detections)\n", @@ -256,13 +259,13 @@ "\n", "\n", "# scene_index = \n", - "iteration = 99\n", + "iteration = 1\n", "\n", "fig, ax = plt.subplots(1, 3, figsize=(15, 5))\n", "scene = np.random.choice(scenes)\n", - "_plot_scene_on_ax(ax[0], scene, iteration, radius=20)\n", - "_plot_scene_on_ax(ax[1], scene, iteration, radius=50)\n", - "_plot_scene_on_ax(ax[2], scene, iteration, radius=100)\n", + "_plot_scene_on_ax(ax[0], scene, iteration, radius=10)\n", + "_plot_scene_on_ax(ax[1], scene, iteration, radius=30)\n", + "_plot_scene_on_ax(ax[2], scene, iteration, radius=50)\n", "\n", "plt.show()\n" ] diff --git a/test_viser.py b/test_viser.py index ed04109b..50ca9d28 100644 --- a/test_viser.py +++ b/test_viser.py @@ -13,8 +13,9 @@ # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla"] - splits = ["wopd_val"] + # splits = ["wopd_val"] # splits = ["av2-sensor-mini_train"] + splits = ["pandaset_test", "pandaset_val", "pandaset_train"] log_names = None scene_uuids = None From 5589ee162a75396e91509f36c85547a6bf57680c Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 17 Oct 2025 10:44:41 +0200 Subject: [PATCH 086/145] Rename pip package from `d123` to `py123d` (#53) --- .../visualization/viser/elements/__init__.py | 7 --- d123/geometry/__init__.py | 22 ------- .../config/common/default_dataset_paths.yaml | 12 ---- .../scene_builder/default_scene_builder.yaml | 4 -- .../config/common/worker/sequential.yaml | 2 - .../map_writer/gpkg_map_writer.yaml | 4 -- docs/conf.py | 2 +- docs/contributing.md | 8 +-- docs/datasets/av2.rst | 2 +- docs/datasets/carla.rst | 2 +- docs/datasets/kitti-360.rst | 2 +- docs/datasets/nuplan.rst | 2 +- docs/datasets/nuscenes.rst | 2 +- docs/datasets/template.rst | 2 +- docs/datasets/wopd.rst | 2 +- docs/geometry.rst | 60 +++++++++---------- docs/installation.md | 16 ++--- environment.yml | 2 +- notebooks/scene_rendering.ipynb | 26 ++++---- notebooks/scene_sensor_loading.ipynb | 16 ++--- notebooks/tools/merge_videos.ipynb | 4 +- notebooks/tools/plot_map_sizes.ipynb | 4 +- notebooks/viz/bev_matplotlib.ipynb | 44 +++++++------- notebooks/viz/bev_matplotlib_prediction.ipynb | 36 +++++------ notebooks/viz/camera_matplotlib.ipynb | 18 +++--- notebooks/viz/log_rendering.ipynb | 18 +++--- notebooks/viz/video_example.ipynb | 44 +++++++------- notebooks/viz/viser_testing_v2_scene.ipynb | 14 ++--- .../waymo_perception/lidar_testing.ipynb | 6 +- notebooks/waymo_perception/map_testing.ipynb | 14 ++--- notebooks/waymo_perception/testing.ipynb | 8 +-- {d123 => py123d}/__init__.py | 0 {d123 => py123d}/common/__init__.py | 0 .../common/multithreading/ray_execution.py | 2 +- .../common/multithreading/worker_parallel.py | 2 +- .../common/multithreading/worker_pool.py | 0 .../common/multithreading/worker_ray.py | 4 +- .../multithreading/worker_sequential.py | 2 +- .../common/multithreading/worker_utils.py | 2 +- {d123 => py123d}/common/utils/__init__.py | 0 {d123 => py123d}/common/utils/arrow_helper.py | 0 {d123 => py123d}/common/utils/dependencies.py | 2 +- {d123 => py123d}/common/utils/enums.py | 0 {d123 => py123d}/common/utils/mixin.py | 0 {d123 => py123d}/common/utils/timer.py | 0 {d123 => py123d}/common/utils/uuid.py | 0 .../common/visualization/__init__.py | 0 .../common/visualization/bokeh/.gitkeep | 0 .../common/visualization/color/__init__.py | 0 .../common/visualization/color/color.py | 0 .../common/visualization/color/config.py | 2 +- .../common/visualization/color/default.py | 10 ++-- .../visualization/matplotlib/__init__.py | 0 .../common/visualization/matplotlib/camera.py | 14 ++--- .../common/visualization/matplotlib/lidar.py | 0 .../visualization/matplotlib/observation.py | 24 ++++---- .../common/visualization/matplotlib/plots.py | 4 +- .../common/visualization/matplotlib/utils.py | 4 +- .../common/visualization/utils/.gitkeep | 0 .../common/visualization/viser/__init__.py | 0 .../visualization/viser/elements/__init__.py | 7 +++ .../viser/elements/detection_elements.py | 14 ++--- .../viser/elements/map_elements.py | 14 ++--- .../viser/elements/sensor_elements.py | 14 ++--- .../visualization/viser/viser_config.py | 6 +- .../visualization/viser/viser_viewer.py | 12 ++-- {d123 => py123d}/conversion/__init__.py | 0 .../conversion/abstract_dataset_converter.py | 6 +- .../conversion/dataset_converter_config.py | 0 .../conversion/datasets/__init__.py | 0 .../conversion/datasets/av2/av2_constants.py | 8 +-- .../conversion/datasets/av2/av2_helper.py | 2 +- .../datasets/av2/av2_map_conversion.py | 14 ++--- .../datasets/av2/av2_sensor_converter.py | 38 ++++++------ .../conversion/datasets/carla/__init__.py | 0 .../datasets/carla/carla_data_converter.py | 28 ++++----- .../datasets/carla/carla_load_sensor.py | 2 +- .../conversion/datasets/kitti_360/.gitkeep | 0 .../conversion/datasets/nuplan/__init__.py | 0 .../datasets/nuplan/nuplan_converter.py | 38 ++++++------ .../datasets/nuplan/nuplan_load_sensor.py | 4 +- .../datasets/nuplan/nuplan_map_conversion.py | 14 ++--- .../datasets/nuplan/utils/__init__.py | 0 .../datasets/nuplan/utils/log_splits.yaml | 0 .../datasets/nuplan/utils/nuplan_constants.py | 8 +-- .../nuplan/utils/nuplan_sql_helper.py | 10 ++-- .../conversion/datasets/nuscenes/.gitkeep | 0 .../datasets/pandaset/pandaset_constants.py | 6 +- .../datasets/pandaset/pandaset_converter.py | 34 +++++------ .../conversion/datasets/wopd/__init__.py | 0 .../datasets/wopd/utils/wopd_constants.py | 8 +-- .../waymo_map_utils/womp_boundary_utils.py | 10 ++-- .../waymo_map_utils/wopd_map_utils copy.py | 10 ++-- .../wopd/waymo_map_utils/wopd_map_utils.py | 18 +++--- .../datasets/wopd/wopd_converter.py | 46 +++++++------- .../conversion/datasets/wopd/wopd_utils.py | 2 +- .../conversion/log_writer/__init__.py | 0 .../log_writer/abstract_log_writer.py | 14 ++--- .../conversion/log_writer/arrow_log_writer.py | 20 +++---- .../map_writer/abstract_map_writer.py | 6 +- .../conversion/map_writer/gpkg_map_writer.py | 12 ++-- {d123 => py123d}/conversion/utils/__init__.py | 0 .../conversion/utils/map_utils/__init__.py | 0 .../map_utils/opendrive/__init__ copy.py | 0 .../utils/map_utils/opendrive/__init__.py | 0 .../opendrive/opendrive_map_conversion.py | 20 +++---- .../map_utils/opendrive/parser/__init__.py | 0 .../map_utils/opendrive/parser/elevation.py | 2 +- .../map_utils/opendrive/parser/geometry.py | 2 +- .../utils/map_utils/opendrive/parser/lane.py | 2 +- .../map_utils/opendrive/parser/objects.py | 0 .../map_utils/opendrive/parser/opendrive.py | 2 +- .../map_utils/opendrive/parser/polynomial.py | 0 .../map_utils/opendrive/parser/reference.py | 10 ++-- .../utils/map_utils/opendrive/parser/road.py | 8 +-- .../map_utils/opendrive/utils/__init__.py | 0 .../map_utils/opendrive/utils/collection.py | 12 ++-- .../map_utils/opendrive/utils/id_mapping.py | 0 .../map_utils/opendrive/utils/id_system.py | 0 .../map_utils/opendrive/utils/lane_helper.py | 12 ++-- .../opendrive/utils/objects_helper.py | 10 ++-- .../utils/map_utils/road_edge/__init__.py | 0 .../map_utils/road_edge/road_edge_2d_utils.py | 0 .../map_utils/road_edge/road_edge_3d_utils.py | 6 +- .../utils/sensor_utils/camera_conventions.py | 2 +- .../sensor_utils/lidar_index_registry.py | 2 +- {d123 => py123d}/datatypes/__init__.py | 0 .../datatypes/detections/__init__.py | 0 .../datatypes/detections/detection.py | 8 +-- .../datatypes/detections/detection_types.py | 4 +- .../datatypes/maps/abstract_map.py | 8 +-- .../datatypes/maps/abstract_map_objects.py | 4 +- .../datatypes/maps/cache/__init__.py | 0 .../datatypes/maps/cache/cache_map_objects.py | 8 +-- .../datatypes/maps/gpkg/__init__.py | 0 .../datatypes/maps/gpkg/gpkg_map.py | 26 ++++---- .../datatypes/maps/gpkg/gpkg_map_objects.py | 8 +-- .../datatypes/maps/gpkg/gpkg_utils.py | 2 +- .../datatypes/maps/map_datatypes.py | 2 +- .../datatypes/maps/map_metadata.py | 4 +- {d123 => py123d}/datatypes/scene/__init__.py | 0 .../datatypes/scene/abstract_scene.py | 16 ++--- .../datatypes/scene/abstract_scene_builder.py | 6 +- .../datatypes/scene/arrow/__init__.py | 0 .../datatypes/scene/arrow/arrow_scene.py | 24 ++++---- .../scene/arrow/arrow_scene_builder.py | 16 ++--- .../datatypes/scene/arrow/utils/__init__.py | 0 .../scene/arrow/utils/arrow_getters.py | 22 +++---- .../scene/arrow/utils/arrow_metadata_utils.py | 4 +- .../datatypes/scene/scene_filter.py | 2 +- .../datatypes/scene/scene_metadata.py | 12 ++-- .../datatypes/sensors/__init__.py | 0 .../datatypes/sensors/camera/__init__.py | 0 .../sensors/camera/pinhole_camera.py | 8 +-- .../datatypes/sensors/lidar/__init__.py | 0 .../datatypes/sensors/lidar/lidar.py | 6 +- {d123 => py123d}/datatypes/time/__init__.py | 0 {d123 => py123d}/datatypes/time/time_point.py | 0 .../datatypes/vehicle_state/__init__.py | 0 .../datatypes/vehicle_state/ego_state.py | 12 ++-- .../vehicle_state/vehicle_parameters.py | 6 +- py123d/geometry/__init__.py | 22 +++++++ {d123 => py123d}/geometry/bounding_box.py | 32 +++++----- {d123 => py123d}/geometry/geometry_index.py | 2 +- {d123 => py123d}/geometry/occupancy_map.py | 4 +- {d123 => py123d}/geometry/point.py | 12 ++-- {d123 => py123d}/geometry/polyline.py | 24 ++++---- {d123 => py123d}/geometry/rotation.py | 12 ++-- {d123 => py123d}/geometry/se.py | 22 +++---- {d123 => py123d}/geometry/test/__init__.py | 0 .../geometry/test/test_bounding_box.py | 6 +- .../geometry/test/test_occupancy_map.py | 2 +- {d123 => py123d}/geometry/test/test_point.py | 4 +- .../geometry/test/test_polyline.py | 2 +- .../geometry/test/test_rotation.py | 4 +- {d123 => py123d}/geometry/test/test_vector.py | 2 +- {d123 => py123d}/geometry/torch/.gitkeep | 0 .../geometry/transform/__init__.py | 0 .../geometry/transform/test/__init__.py | 0 .../test/test_transform_consistency.py | 8 +-- .../test/test_transform_euler_se3.py | 4 +- .../transform/test/test_transform_se2.py | 4 +- .../transform/test/test_transform_se3.py | 8 +-- .../geometry/transform/transform_euler_se3.py | 4 +- .../geometry/transform/transform_se2.py | 12 ++-- .../geometry/transform/transform_se3.py | 22 +++---- {d123 => py123d}/geometry/utils/__init__.py | 0 .../geometry/utils/bounding_box_utils.py | 24 ++++---- {d123 => py123d}/geometry/utils/constants.py | 0 .../geometry/utils/polyline_utils.py | 2 +- .../geometry/utils/rotation_utils.py | 2 +- .../geometry/utils/test/__init__.py | 0 .../utils/test/test_bounding_box_utils.py | 10 ++-- .../utils/test/test_polyline_utils.py | 0 .../utils/test/test_rotation_utils.py | 0 {d123 => py123d}/geometry/utils/units.py | 0 {d123 => py123d}/geometry/vector.py | 12 ++-- {d123 => py123d}/script/__init__.py | 0 {d123 => py123d}/script/builders/__init__.py | 0 .../builders/dataset_converter_builder.py | 4 +- .../script/builders/scene_builder_builder.py | 2 +- .../script/builders/scene_filter_builder.py | 2 +- .../script/builders/utils/utils_type.py | 0 .../script/builders/worker_pool_builder.py | 4 +- .../script/builders/writer_builder.py | 6 +- {d123 => py123d}/script/config/__init__.py | 0 .../script/config/common/__init__.py | 0 .../script/config/common/default_common.yaml | 0 .../config/common/default_dataset_paths.yaml | 11 ++++ .../config/common/default_experiment.yaml | 2 +- .../scene_builder/default_scene_builder.yaml | 4 ++ .../common/scene_filter/all_scenes.yaml | 2 +- .../common/scene_filter/log_scenes.yaml | 2 +- .../scene_filter/nuplan_mini_train.yaml | 2 +- .../common/scene_filter/nuplan_mini_val.yaml | 2 +- .../common/scene_filter/nuplan_sim_agent.yaml | 2 +- .../common/scene_filter/viser_scenes.yaml | 2 +- .../script/config/common/worker/__init__.py | 0 .../config/common/worker/ray_distributed.yaml | 2 +- .../config/common/worker/sequential.yaml | 2 + .../worker/single_machine_thread_pool.yaml | 2 +- .../script/config/conversion/__init__.py | 0 .../config/conversion/datasets/__init__.py | 0 .../datasets/av2_sensor_dataset.yaml | 6 +- .../conversion/datasets/carla_dataset.yaml | 6 +- .../conversion/datasets/nuplan_dataset.yaml | 6 +- .../datasets/nuplan_mini_dataset.yaml | 6 +- .../conversion/datasets/pandaset_dataset.yaml | 6 +- .../conversion/datasets/wopd_dataset.yaml | 6 +- .../config/conversion/default_conversion.yaml | 4 +- .../config/conversion/log_writer/__init__.py | 0 .../log_writer/arrow_log_writer.yaml | 4 +- .../config/conversion/map_writer/__init__.py | 0 .../map_writer/gpkg_map_writer.yaml | 4 ++ .../script/config/viser/__init__.py | 0 .../script/config/viser/default_viser.yaml | 6 +- {d123 => py123d}/script/run_conversion.py | 10 ++-- {d123 => py123d}/script/run_viser.py | 8 +-- pyproject.toml | 12 ++-- scripts/dataset/run_log_caching.sh | 2 +- scripts/viz/run_viser.sh | 2 +- test_viser.py | 15 +++-- 242 files changed, 806 insertions(+), 812 deletions(-) delete mode 100644 d123/common/visualization/viser/elements/__init__.py delete mode 100644 d123/geometry/__init__.py delete mode 100644 d123/script/config/common/default_dataset_paths.yaml delete mode 100644 d123/script/config/common/scene_builder/default_scene_builder.yaml delete mode 100644 d123/script/config/common/worker/sequential.yaml delete mode 100644 d123/script/config/conversion/map_writer/gpkg_map_writer.yaml rename {d123 => py123d}/__init__.py (100%) rename {d123 => py123d}/common/__init__.py (100%) rename {d123 => py123d}/common/multithreading/ray_execution.py (98%) rename {d123 => py123d}/common/multithreading/worker_parallel.py (97%) rename {d123 => py123d}/common/multithreading/worker_pool.py (100%) rename {d123 => py123d}/common/multithreading/worker_ray.py (97%) rename {d123 => py123d}/common/multithreading/worker_sequential.py (96%) rename {d123 => py123d}/common/multithreading/worker_utils.py (95%) rename {d123 => py123d}/common/utils/__init__.py (100%) rename {d123 => py123d}/common/utils/arrow_helper.py (100%) rename {d123 => py123d}/common/utils/dependencies.py (91%) rename {d123 => py123d}/common/utils/enums.py (100%) rename {d123 => py123d}/common/utils/mixin.py (100%) rename {d123 => py123d}/common/utils/timer.py (100%) rename {d123 => py123d}/common/utils/uuid.py (100%) rename {d123 => py123d}/common/visualization/__init__.py (100%) rename {d123 => py123d}/common/visualization/bokeh/.gitkeep (100%) rename {d123 => py123d}/common/visualization/color/__init__.py (100%) rename {d123 => py123d}/common/visualization/color/color.py (100%) rename {d123 => py123d}/common/visualization/color/config.py (88%) rename {d123 => py123d}/common/visualization/color/default.py (95%) rename {d123 => py123d}/common/visualization/matplotlib/__init__.py (100%) rename {d123 => py123d}/common/visualization/matplotlib/camera.py (95%) rename {d123 => py123d}/common/visualization/matplotlib/lidar.py (100%) rename {d123 => py123d}/common/visualization/matplotlib/observation.py (88%) rename {d123 => py123d}/common/visualization/matplotlib/plots.py (95%) rename {d123 => py123d}/common/visualization/matplotlib/utils.py (97%) rename {d123 => py123d}/common/visualization/utils/.gitkeep (100%) rename {d123 => py123d}/common/visualization/viser/__init__.py (100%) create mode 100644 py123d/common/visualization/viser/elements/__init__.py rename {d123 => py123d}/common/visualization/viser/elements/detection_elements.py (88%) rename {d123 => py123d}/common/visualization/viser/elements/map_elements.py (86%) rename {d123 => py123d}/common/visualization/viser/elements/sensor_elements.py (93%) rename {d123 => py123d}/common/visualization/viser/viser_config.py (92%) rename {d123 => py123d}/common/visualization/viser/viser_viewer.py (95%) rename {d123 => py123d}/conversion/__init__.py (100%) rename {d123 => py123d}/conversion/abstract_dataset_converter.py (84%) rename {d123 => py123d}/conversion/dataset_converter_config.py (100%) rename {d123 => py123d}/conversion/datasets/__init__.py (100%) rename {d123 => py123d}/conversion/datasets/av2/av2_constants.py (94%) rename {d123 => py123d}/conversion/datasets/av2/av2_helper.py (99%) rename {d123 => py123d}/conversion/datasets/av2/av2_map_conversion.py (96%) rename {d123 => py123d}/conversion/datasets/av2/av2_sensor_converter.py (90%) rename {d123 => py123d}/conversion/datasets/carla/__init__.py (100%) rename {d123 => py123d}/conversion/datasets/carla/carla_data_converter.py (94%) rename {d123 => py123d}/conversion/datasets/carla/carla_load_sensor.py (79%) rename {d123 => py123d}/conversion/datasets/kitti_360/.gitkeep (100%) rename {d123 => py123d}/conversion/datasets/nuplan/__init__.py (100%) rename {d123 => py123d}/conversion/datasets/nuplan/nuplan_converter.py (92%) rename {d123 => py123d}/conversion/datasets/nuplan/nuplan_load_sensor.py (78%) rename {d123 => py123d}/conversion/datasets/nuplan/nuplan_map_conversion.py (97%) rename {d123 => py123d}/conversion/datasets/nuplan/utils/__init__.py (100%) rename {d123 => py123d}/conversion/datasets/nuplan/utils/log_splits.yaml (100%) rename {d123 => py123d}/conversion/datasets/nuplan/utils/nuplan_constants.py (89%) rename {d123 => py123d}/conversion/datasets/nuplan/utils/nuplan_sql_helper.py (90%) rename {d123 => py123d}/conversion/datasets/nuscenes/.gitkeep (100%) rename {d123 => py123d}/conversion/datasets/pandaset/pandaset_constants.py (96%) rename {d123 => py123d}/conversion/datasets/pandaset/pandaset_converter.py (92%) rename {d123 => py123d}/conversion/datasets/wopd/__init__.py (100%) rename {d123 => py123d}/conversion/datasets/wopd/utils/wopd_constants.py (89%) rename {d123 => py123d}/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py (97%) rename {d123 => py123d}/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py (97%) rename {d123 => py123d}/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py (93%) rename {d123 => py123d}/conversion/datasets/wopd/wopd_converter.py (90%) rename {d123 => py123d}/conversion/datasets/wopd/wopd_utils.py (98%) rename {d123 => py123d}/conversion/log_writer/__init__.py (100%) rename {d123 => py123d}/conversion/log_writer/abstract_log_writer.py (70%) rename {d123 => py123d}/conversion/log_writer/arrow_log_writer.py (94%) rename {d123 => py123d}/conversion/map_writer/abstract_map_writer.py (91%) rename {d123 => py123d}/conversion/map_writer/gpkg_map_writer.py (95%) rename {d123 => py123d}/conversion/utils/__init__.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/__init__.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/__init__ copy.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/__init__.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py (95%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/parser/__init__.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/parser/elevation.py (97%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/parser/geometry.py (99%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/parser/lane.py (98%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/parser/objects.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/parser/opendrive.py (98%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/parser/polynomial.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/parser/reference.py (93%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/parser/road.py (93%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/utils/__init__.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/utils/collection.py (96%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/utils/id_mapping.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/utils/id_system.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/utils/lane_helper.py (95%) rename {d123 => py123d}/conversion/utils/map_utils/opendrive/utils/objects_helper.py (88%) rename {d123 => py123d}/conversion/utils/map_utils/road_edge/__init__.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py (100%) rename {d123 => py123d}/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py (98%) rename {d123 => py123d}/conversion/utils/sensor_utils/camera_conventions.py (98%) rename {d123 => py123d}/conversion/utils/sensor_utils/lidar_index_registry.py (95%) rename {d123 => py123d}/datatypes/__init__.py (100%) rename {d123 => py123d}/datatypes/detections/__init__.py (100%) rename {d123 => py123d}/datatypes/detections/detection.py (93%) rename {d123 => py123d}/datatypes/detections/detection_types.py (93%) rename {d123 => py123d}/datatypes/maps/abstract_map.py (91%) rename {d123 => py123d}/datatypes/maps/abstract_map_objects.py (98%) rename {d123 => py123d}/datatypes/maps/cache/__init__.py (100%) rename {d123 => py123d}/datatypes/maps/cache/cache_map_objects.py (97%) rename {d123 => py123d}/datatypes/maps/gpkg/__init__.py (100%) rename {d123 => py123d}/datatypes/maps/gpkg/gpkg_map.py (95%) rename {d123 => py123d}/datatypes/maps/gpkg/gpkg_map_objects.py (97%) rename {d123 => py123d}/datatypes/maps/gpkg/gpkg_utils.py (98%) rename {d123 => py123d}/datatypes/maps/map_datatypes.py (97%) rename {d123 => py123d}/datatypes/maps/map_metadata.py (91%) rename {d123 => py123d}/datatypes/scene/__init__.py (100%) rename {d123 => py123d}/datatypes/scene/abstract_scene.py (84%) rename {d123 => py123d}/datatypes/scene/abstract_scene_builder.py (77%) rename {d123 => py123d}/datatypes/scene/arrow/__init__.py (100%) rename {d123 => py123d}/datatypes/scene/arrow/arrow_scene.py (86%) rename {d123 => py123d}/datatypes/scene/arrow/arrow_scene_builder.py (91%) rename {d123 => py123d}/datatypes/scene/arrow/utils/__init__.py (100%) rename {d123 => py123d}/datatypes/scene/arrow/utils/arrow_getters.py (88%) rename {d123 => py123d}/datatypes/scene/arrow/utils/arrow_metadata_utils.py (82%) rename {d123 => py123d}/datatypes/scene/scene_filter.py (95%) rename {d123 => py123d}/datatypes/scene/scene_metadata.py (86%) rename {d123 => py123d}/datatypes/sensors/__init__.py (100%) rename {d123 => py123d}/datatypes/sensors/camera/__init__.py (100%) rename {d123 => py123d}/datatypes/sensors/camera/pinhole_camera.py (97%) rename {d123 => py123d}/datatypes/sensors/lidar/__init__.py (100%) rename {d123 => py123d}/datatypes/sensors/lidar/lidar.py (94%) rename {d123 => py123d}/datatypes/time/__init__.py (100%) rename {d123 => py123d}/datatypes/time/time_point.py (100%) rename {d123 => py123d}/datatypes/vehicle_state/__init__.py (100%) rename {d123 => py123d}/datatypes/vehicle_state/ego_state.py (95%) rename {d123 => py123d}/datatypes/vehicle_state/vehicle_parameters.py (95%) create mode 100644 py123d/geometry/__init__.py rename {d123 => py123d}/geometry/bounding_box.py (88%) rename {d123 => py123d}/geometry/geometry_index.py (99%) rename {d123 => py123d}/geometry/occupancy_map.py (98%) rename {d123 => py123d}/geometry/point.py (93%) rename {d123 => py123d}/geometry/polyline.py (95%) rename {d123 => py123d}/geometry/rotation.py (96%) rename {d123 => py123d}/geometry/se.py (95%) rename {d123 => py123d}/geometry/test/__init__.py (100%) rename {d123 => py123d}/geometry/test/test_bounding_box.py (97%) rename {d123 => py123d}/geometry/test/test_occupancy_map.py (99%) rename {d123 => py123d}/geometry/test/test_point.py (98%) rename {d123 => py123d}/geometry/test/test_polyline.py (99%) rename {d123 => py123d}/geometry/test/test_rotation.py (98%) rename {d123 => py123d}/geometry/test/test_vector.py (98%) rename {d123 => py123d}/geometry/torch/.gitkeep (100%) rename {d123 => py123d}/geometry/transform/__init__.py (100%) rename {d123 => py123d}/geometry/transform/test/__init__.py (100%) rename {d123 => py123d}/geometry/transform/test/test_transform_consistency.py (98%) rename {d123 => py123d}/geometry/transform/test/test_transform_euler_se3.py (99%) rename {d123 => py123d}/geometry/transform/test/test_transform_se2.py (99%) rename {d123 => py123d}/geometry/transform/test/test_transform_se3.py (97%) rename {d123 => py123d}/geometry/transform/transform_euler_se3.py (97%) rename {d123 => py123d}/geometry/transform/transform_se2.py (94%) rename {d123 => py123d}/geometry/transform/transform_se3.py (94%) rename {d123 => py123d}/geometry/utils/__init__.py (100%) rename {d123 => py123d}/geometry/utils/bounding_box_utils.py (91%) rename {d123 => py123d}/geometry/utils/constants.py (100%) rename {d123 => py123d}/geometry/utils/polyline_utils.py (96%) rename {d123 => py123d}/geometry/utils/rotation_utils.py (99%) rename {d123 => py123d}/geometry/utils/test/__init__.py (100%) rename {d123 => py123d}/geometry/utils/test/test_bounding_box_utils.py (97%) rename {d123 => py123d}/geometry/utils/test/test_polyline_utils.py (100%) rename {d123 => py123d}/geometry/utils/test/test_rotation_utils.py (100%) rename {d123 => py123d}/geometry/utils/units.py (100%) rename {d123 => py123d}/geometry/vector.py (95%) rename {d123 => py123d}/script/__init__.py (100%) rename {d123 => py123d}/script/builders/__init__.py (100%) rename {d123 => py123d}/script/builders/dataset_converter_builder.py (81%) rename {d123 => py123d}/script/builders/scene_builder_builder.py (90%) rename {d123 => py123d}/script/builders/scene_filter_builder.py (95%) rename {d123 => py123d}/script/builders/utils/utils_type.py (100%) rename {d123 => py123d}/script/builders/worker_pool_builder.py (80%) rename {d123 => py123d}/script/builders/writer_builder.py (76%) rename {d123 => py123d}/script/config/__init__.py (100%) rename {d123 => py123d}/script/config/common/__init__.py (100%) rename {d123 => py123d}/script/config/common/default_common.yaml (100%) create mode 100644 py123d/script/config/common/default_dataset_paths.yaml rename {d123 => py123d}/script/config/common/default_experiment.yaml (69%) create mode 100644 py123d/script/config/common/scene_builder/default_scene_builder.yaml rename {d123 => py123d}/script/config/common/scene_filter/all_scenes.yaml (81%) rename {d123 => py123d}/script/config/common/scene_filter/log_scenes.yaml (82%) rename {d123 => py123d}/script/config/common/scene_filter/nuplan_mini_train.yaml (80%) rename {d123 => py123d}/script/config/common/scene_filter/nuplan_mini_val.yaml (80%) rename {d123 => py123d}/script/config/common/scene_filter/nuplan_sim_agent.yaml (97%) rename {d123 => py123d}/script/config/common/scene_filter/viser_scenes.yaml (81%) rename {d123 => py123d}/script/config/common/worker/__init__.py (100%) rename {d123 => py123d}/script/config/common/worker/ray_distributed.yaml (89%) create mode 100644 py123d/script/config/common/worker/sequential.yaml rename {d123 => py123d}/script/config/common/worker/single_machine_thread_pool.yaml (73%) rename {d123 => py123d}/script/config/conversion/__init__.py (100%) rename {d123 => py123d}/script/config/conversion/datasets/__init__.py (100%) rename {d123 => py123d}/script/config/conversion/datasets/av2_sensor_dataset.yaml (80%) rename {d123 => py123d}/script/config/conversion/datasets/carla_dataset.yaml (79%) rename {d123 => py123d}/script/config/conversion/datasets/nuplan_dataset.yaml (82%) rename {d123 => py123d}/script/config/conversion/datasets/nuplan_mini_dataset.yaml (82%) rename {d123 => py123d}/script/config/conversion/datasets/pandaset_dataset.yaml (91%) rename {d123 => py123d}/script/config/conversion/datasets/wopd_dataset.yaml (86%) rename {d123 => py123d}/script/config/conversion/default_conversion.yaml (86%) rename {d123 => py123d}/script/config/conversion/log_writer/__init__.py (100%) rename {d123 => py123d}/script/config/conversion/log_writer/arrow_log_writer.yaml (67%) rename {d123 => py123d}/script/config/conversion/map_writer/__init__.py (100%) create mode 100644 py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml rename {d123 => py123d}/script/config/viser/__init__.py (100%) rename {d123 => py123d}/script/config/viser/default_viser.yaml (70%) rename {d123 => py123d}/script/run_conversion.py (87%) rename {d123 => py123d}/script/run_viser.py (67%) diff --git a/d123/common/visualization/viser/elements/__init__.py b/d123/common/visualization/viser/elements/__init__.py deleted file mode 100644 index 55c47327..00000000 --- a/d123/common/visualization/viser/elements/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from d123.common.visualization.viser.elements.detection_elements import add_box_detections_to_viser_server -from d123.common.visualization.viser.elements.map_elements import add_map_to_viser_server -from d123.common.visualization.viser.elements.sensor_elements import ( - add_camera_frustums_to_viser_server, - add_camera_gui_to_viser_server, - add_lidar_pc_to_viser_server, -) diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py deleted file mode 100644 index e022c86a..00000000 --- a/d123/geometry/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -from d123.geometry.geometry_index import ( - Point2DIndex, - Point3DIndex, - BoundingBoxSE2Index, - BoundingBoxSE3Index, - Corners2DIndex, - Corners3DIndex, - EulerAnglesIndex, - EulerStateSE3Index, - QuaternionIndex, - StateSE2Index, - StateSE3Index, - Vector2DIndex, - Vector3DIndex, -) -from d123.geometry.point import Point2D, Point3D -from d123.geometry.vector import Vector2D, Vector3D -from d123.geometry.rotation import EulerAngles, Quaternion -from d123.geometry.se import EulerStateSE3, StateSE2, StateSE3 -from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3 -from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 -from d123.geometry.occupancy_map import OccupancyMap2D diff --git a/d123/script/config/common/default_dataset_paths.yaml b/d123/script/config/common/default_dataset_paths.yaml deleted file mode 100644 index 53e06caa..00000000 --- a/d123/script/config/common/default_dataset_paths.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -# 123D Defaults -d123_data_root: ${oc.env:D123_DATA_ROOT} -d123_logs_root: ${oc.env:D123_DATA_ROOT}/logs -d123_maps_root: ${oc.env:D123_DATA_ROOT}/maps -d123_sensors_root: ${oc.env:D123_DATA_ROOT}/sensors - - -# nuPlan defaults -nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} -nuplan_sensor_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/sensor_blobs diff --git a/d123/script/config/common/scene_builder/default_scene_builder.yaml b/d123/script/config/common/scene_builder/default_scene_builder.yaml deleted file mode 100644 index 5e42aaf4..00000000 --- a/d123/script/config/common/scene_builder/default_scene_builder.yaml +++ /dev/null @@ -1,4 +0,0 @@ -_target_: d123.datatypes.scene.arrow.arrow_scene_builder.ArrowSceneBuilder -_convert_: 'all' - -dataset_path: ${d123_data_root} diff --git a/d123/script/config/common/worker/sequential.yaml b/d123/script/config/common/worker/sequential.yaml deleted file mode 100644 index ea004415..00000000 --- a/d123/script/config/common/worker/sequential.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: d123.common.multithreading.worker_sequential.Sequential -_convert_: 'all' diff --git a/d123/script/config/conversion/map_writer/gpkg_map_writer.yaml b/d123/script/config/conversion/map_writer/gpkg_map_writer.yaml deleted file mode 100644 index 2e2e42b5..00000000 --- a/d123/script/config/conversion/map_writer/gpkg_map_writer.yaml +++ /dev/null @@ -1,4 +0,0 @@ -_target_: d123.conversion.map_writer.gpkg_map_writer.GPKGMapWriter -_convert_: 'all' - -maps_root: ${d123_maps_root} diff --git a/docs/conf.py b/docs/conf.py index b14d3fea..e1288398 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -6,7 +6,7 @@ # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -project = "d123" +project = "py123d" copyright = "2025, 123D Contributors" author = "123D Contributors" release = "v0.0.6" diff --git a/docs/contributing.md b/docs/contributing.md index e447d876..8ce0f775 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -8,15 +8,15 @@ Thank you for your interest in contributing to 123D! This guide will help you ge ### 1. Clone the Repository ```sh -git clone git@github.com:DanielDauner/d123.git -cd d123 +git clone git@github.com:DanielDauner/py123d.git +cd py123d ``` ### 2. Install the pip-package ```sh -conda env create -f environment.yml --name d123_dev # Optional -conda activate d123_dev +conda env create -f environment.yml --name py123d_dev # Optional +conda activate py123d_dev pip install -e .[dev] pre-commit install ``` diff --git a/docs/datasets/av2.rst b/docs/datasets/av2.rst index ab01edfb..17311422 100644 --- a/docs/datasets/av2.rst +++ b/docs/datasets/av2.rst @@ -29,7 +29,7 @@ Installation .. code-block:: bash # Example installation commands - pip install d123[dataset_name] + pip install py123d[dataset_name] # or wget https://example.com/dataset.zip diff --git a/docs/datasets/carla.rst b/docs/datasets/carla.rst index 3ca7ff09..ccc921f1 100644 --- a/docs/datasets/carla.rst +++ b/docs/datasets/carla.rst @@ -29,7 +29,7 @@ Installation .. code-block:: bash # Example installation commands - pip install d123[dataset_name] + pip install py123d[dataset_name] # or wget https://example.com/dataset.zip diff --git a/docs/datasets/kitti-360.rst b/docs/datasets/kitti-360.rst index 76100d27..42d549b6 100644 --- a/docs/datasets/kitti-360.rst +++ b/docs/datasets/kitti-360.rst @@ -29,7 +29,7 @@ Installation .. code-block:: bash # Example installation commands - pip install d123[dataset_name] + pip install py123d[dataset_name] # or wget https://example.com/dataset.zip diff --git a/docs/datasets/nuplan.rst b/docs/datasets/nuplan.rst index c1590f03..94b50f08 100644 --- a/docs/datasets/nuplan.rst +++ b/docs/datasets/nuplan.rst @@ -29,7 +29,7 @@ Installation .. code-block:: bash # Example installation commands - pip install d123[dataset_name] + pip install py123d[dataset_name] # or wget https://example.com/dataset.zip diff --git a/docs/datasets/nuscenes.rst b/docs/datasets/nuscenes.rst index 1f4e1621..638c5ad6 100644 --- a/docs/datasets/nuscenes.rst +++ b/docs/datasets/nuscenes.rst @@ -29,7 +29,7 @@ Installation .. code-block:: bash # Example installation commands - pip install d123[dataset_name] + pip install py123d[dataset_name] # or wget https://example.com/dataset.zip diff --git a/docs/datasets/template.rst b/docs/datasets/template.rst index d38723ed..29797269 100644 --- a/docs/datasets/template.rst +++ b/docs/datasets/template.rst @@ -29,7 +29,7 @@ Installation .. code-block:: bash # Example installation commands - pip install d123[dataset_name] + pip install py123d[dataset_name] # or wget https://example.com/dataset.zip diff --git a/docs/datasets/wopd.rst b/docs/datasets/wopd.rst index 8dc276a8..8a92d9d5 100644 --- a/docs/datasets/wopd.rst +++ b/docs/datasets/wopd.rst @@ -29,7 +29,7 @@ Installation .. code-block:: bash # Example installation commands - pip install d123[dataset_name] + pip install py123d[dataset_name] # or wget https://example.com/dataset.zip diff --git a/docs/geometry.rst b/docs/geometry.rst index 61b3f65d..0a3215e3 100644 --- a/docs/geometry.rst +++ b/docs/geometry.rst @@ -7,49 +7,49 @@ Geometric Primitives Points ~~~~~~ -.. autoclass:: d123.geometry.Point2D() +.. autoclass:: py123d.geometry.Point2D() -.. autoclass:: d123.geometry.Point3D() +.. autoclass:: py123d.geometry.Point3D() Vectors ~~~~~~~ -.. autoclass:: d123.geometry.Vector2D() +.. autoclass:: py123d.geometry.Vector2D() -.. autoclass:: d123.geometry.Vector3D() +.. autoclass:: py123d.geometry.Vector3D() Special Euclidean Group ~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: d123.geometry.StateSE2() +.. autoclass:: py123d.geometry.StateSE2() -.. autoclass:: d123.geometry.StateSE3() +.. autoclass:: py123d.geometry.StateSE3() Bounding Boxes ~~~~~~~~~~~~~~ -.. autoclass:: d123.geometry.BoundingBoxSE2() +.. autoclass:: py123d.geometry.BoundingBoxSE2() -.. autoclass:: d123.geometry.BoundingBoxSE3() +.. autoclass:: py123d.geometry.BoundingBoxSE3() Indexing Enums ~~~~~~~~~~~~~~ -.. autoclass:: d123.geometry.Point2DIndex() +.. autoclass:: py123d.geometry.Point2DIndex() -.. autoclass:: d123.geometry.Point3DIndex() +.. autoclass:: py123d.geometry.Point3DIndex() -.. autoclass:: d123.geometry.Vector2DIndex() +.. autoclass:: py123d.geometry.Vector2DIndex() -.. autoclass:: d123.geometry.Vector3DIndex() +.. autoclass:: py123d.geometry.Vector3DIndex() -.. autoclass:: d123.geometry.StateSE2Index() +.. autoclass:: py123d.geometry.StateSE2Index() -.. autoclass:: d123.geometry.StateSE3Index() +.. autoclass:: py123d.geometry.StateSE3Index() -.. autoclass:: d123.geometry.BoundingBoxSE2Index() +.. autoclass:: py123d.geometry.BoundingBoxSE2Index() -.. autoclass:: d123.geometry.BoundingBoxSE3Index() +.. autoclass:: py123d.geometry.BoundingBoxSE3Index() -.. autoclass:: d123.geometry.Corners2DIndex() +.. autoclass:: py123d.geometry.Corners2DIndex() -.. autoclass:: d123.geometry.Corners3DIndex() +.. autoclass:: py123d.geometry.Corners3DIndex() Transformations @@ -57,31 +57,31 @@ Transformations Transformations in 2D ~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: d123.geometry.transform.convert_absolute_to_relative_se2_array +.. autofunction:: py123d.geometry.transform.convert_absolute_to_relative_se2_array -.. autofunction:: d123.geometry.transform.convert_relative_to_absolute_se2_array +.. autofunction:: py123d.geometry.transform.convert_relative_to_absolute_se2_array -.. autofunction:: d123.geometry.transform.translate_se2_along_body_frame +.. autofunction:: py123d.geometry.transform.translate_se2_along_body_frame -.. autofunction:: d123.geometry.transform.translate_se2_along_x +.. autofunction:: py123d.geometry.transform.translate_se2_along_x -.. autofunction:: d123.geometry.transform.translate_se2_along_y +.. autofunction:: py123d.geometry.transform.translate_se2_along_y Transformations in 3D ~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: d123.geometry.transform.convert_absolute_to_relative_se3_array +.. autofunction:: py123d.geometry.transform.convert_absolute_to_relative_se3_array -.. autofunction:: d123.geometry.transform.convert_relative_to_absolute_se3_array +.. autofunction:: py123d.geometry.transform.convert_relative_to_absolute_se3_array -.. autofunction:: d123.geometry.transform.translate_se3_along_body_frame +.. autofunction:: py123d.geometry.transform.translate_se3_along_body_frame -.. autofunction:: d123.geometry.transform.translate_se3_along_x +.. autofunction:: py123d.geometry.transform.translate_se3_along_x -.. autofunction:: d123.geometry.transform.translate_se3_along_y +.. autofunction:: py123d.geometry.transform.translate_se3_along_y -.. autofunction:: d123.geometry.transform.translate_se3_along_z +.. autofunction:: py123d.geometry.transform.translate_se3_along_z Occupancy Map ------------- -.. autoclass:: d123.geometry.OccupancyMap2D() +.. autoclass:: py123d.geometry.OccupancyMap2D() diff --git a/docs/installation.md b/docs/installation.md index 95d09a04..37770165 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -1,10 +1,9 @@ - # Installation -Note, the following installation assumes the following folder structure: +Note, the following installation assumes the following folder structure: TODO UPDATE ``` -~/d123_workspace -├── d123 +~/py123d_workspace +├── py123d ├── exp │ └── ... └── data @@ -25,19 +24,16 @@ Note, the following installation assumes the following folder structure: ``` -First you need to create a new conda environment and install `d123` as editable pip package. +First you need to create a new conda environment and install `py123d` as editable pip package. ```bash conda env create -f environment.yml -conda activate d123 +conda activate py123d pip install -e . ``` Next, you need add the following environment variables in your `.bashrc`: ```bash -export D123_DEVKIT_ROOT="$HOME/d123_workspace/d123" -export D123_MAPS_ROOT="$HOME/d123_workspace/data/maps" -export D123_DATA_ROOT="$HOME/d123_workspace/data" -export D123_EXP_ROOT="$HOME/d123_workspace/exp" +export PY123D_DATA_ROOT="$HOME/py123d_workspace/data" # CARLA export CARLA_SIMULATOR_ROOT="$HOME/carla_workspace/carla_garage/carla" diff --git a/environment.yml b/environment.yml index 53e458bc..0f4d171c 100644 --- a/environment.yml +++ b/environment.yml @@ -1,4 +1,4 @@ -name: d123 +name: py123d channels: - conda-forge dependencies: diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb index 780e9bab..12f8a864 100644 --- a/notebooks/scene_rendering.ipynb +++ b/notebooks/scene_rendering.ipynb @@ -7,12 +7,12 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", + "from py123d.dataset.scene.scene_builder import ArrowSceneBuilder\n", + "from py123d.dataset.scene.scene_filter import SceneFilter\n", "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n", + "from py123d.common.multithreading.worker_sequential import Sequential\n", "\n", - "# from d123.common.multithreading.worker_ray import RayDistributed" + "# from py123d.common.multithreading.worker_ray import RayDistributed" ] }, { @@ -45,7 +45,7 @@ " duration_s=15.1,\n", " history_s=1.0,\n", ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", + "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", "worker = Sequential()\n", "# worker = RayDistributed()\n", "scenes = scene_builder.get_scenes(scene_filter, worker)\n", @@ -61,18 +61,18 @@ "outputs": [], "source": [ "from typing import Tuple\n", - "d123.datatypes.detections.detection import BoxDetection\n", - "d123.datatypes.detections.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n", - "from d123.geometry import StateSE2\n", - "from d123.geometry.transform.tranform_2d import translate_along_yaw\n", - "from d123.geometry.vector import Vector2D\n", - "from d123.common.visualization.matplotlib.observation import (\n", + "py123d.datatypes.detections.detection import BoxDetection\n", + "py123d.datatypes.detections.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n", + "from py123d.geometry import StateSE2\n", + "from py123d.geometry.transform.tranform_2d import translate_along_yaw\n", + "from py123d.geometry.vector import Vector2D\n", + "from py123d.common.visualization.matplotlib.observation import (\n", " add_box_detections_to_ax,\n", " add_default_map_on_ax,\n", " add_ego_vehicle_to_ax,\n", " add_traffic_lights_to_ax,\n", ")\n", - "from d123.dataset.scene.abstract_scene import AbstractScene\n", + "from py123d.dataset.scene.abstract_scene import AbstractScene\n", "\n", "import matplotlib.pyplot as plt\n", "\n", @@ -157,7 +157,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/scene_sensor_loading.ipynb b/notebooks/scene_sensor_loading.ipynb index 56fdf4f4..49d7e34e 100644 --- a/notebooks/scene_sensor_loading.ipynb +++ b/notebooks/scene_sensor_loading.ipynb @@ -8,11 +8,11 @@ "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", + "from py123d.dataset.scene.scene_builder import ArrowSceneBuilder\n", + "from py123d.dataset.scene.scene_filter import SceneFilter\n", "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n", - "# from d123.common.multithreading.worker_ray impo\n", + "from py123d.common.multithreading.worker_sequential import Sequential\n", + "# from py123d.common.multithreading.worker_ray impo\n", "# rt RayDistribute\n", "\n", "\n" @@ -48,7 +48,7 @@ " history_s=1.0,\n", " timestamp_threshold_s=15.0,\n", ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", + "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", "worker = Sequential()\n", "# worker = RayDistributed()\n", "scenes = scene_builder.get_scenes(scene_filter, worker)\n", @@ -63,8 +63,8 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.common.datatypes.sensor.camera import CameraType\n", - "from d123.dataset.scene.arrow_scene import ArrowScene\n", + "from py123d.common.datatypes.sensor.camera import CameraType\n", + "from py123d.dataset.scene.arrow_scene import ArrowScene\n", "\n", "scene: ArrowScene = scenes[12]\n", "scene.open()\n", @@ -101,7 +101,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/tools/merge_videos.ipynb b/notebooks/tools/merge_videos.ipynb index 257bb8ad..92629b3a 100644 --- a/notebooks/tools/merge_videos.ipynb +++ b/notebooks/tools/merge_videos.ipynb @@ -89,7 +89,7 @@ " return False\n", "\n", "# List your MP4 files in the order you want them merged\n", - "video_folder = Path(\"/home/daniel/d123_logs_videos/wopd_train\")\n", + "video_folder = Path(\"/home/daniel/py123d_logs_videos/wopd_train\")\n", "video_files = [str(file) for file in video_folder.glob(\"*.mp4\") if file.is_file()]\n", "\n", "\n", @@ -115,7 +115,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/tools/plot_map_sizes.ipynb b/notebooks/tools/plot_map_sizes.ipynb index 4bea65c7..c37c0581 100644 --- a/notebooks/tools/plot_map_sizes.ipynb +++ b/notebooks/tools/plot_map_sizes.ipynb @@ -11,7 +11,7 @@ "\n", "import matplotlib.pyplot as plt\n", "\n", - "folder_path = \"/home/daniel/d123_workspace/data/maps\" # Replace with your folder path\n", + "folder_path = \"/home/daniel/py123d_workspace/data/maps\" # Replace with your folder path\n", "\n", "# Get all files in the folder (not subfolders)\n", "files = sorted([f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))])\n", @@ -45,7 +45,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index 5abe6dfa..c63afdb4 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -7,11 +7,11 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", - "from d123.datatypes.scene.scene_filter import SceneFilter\n", + "from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", + "from py123d.datatypes.scene.scene_filter import SceneFilter\n", "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n", - "from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType " + "from py123d.common.multithreading.worker_sequential import Sequential\n", + "from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType " ] }, { @@ -21,12 +21,12 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.geometry import Point2D\n", + "from py123d.geometry import Point2D\n", "import numpy as np\n", "\n", "# import torch\n", "\n", - "from d123.geometry.polyline import Polyline2D" + "from py123d.geometry.polyline import Polyline2D" ] }, { @@ -49,7 +49,7 @@ "# log_names = None\n", "\n", "\n", - "# generator = Path(\"/home/daniel/d123_workspace/data/logs\").iterdir()\n", + "# generator = Path(\"/home/daniel/py123d_workspace/data/logs\").iterdir()\n", "\n", "log_names = None\n", "scene_uuids = None\n", @@ -64,7 +64,7 @@ " shuffle=True,\n", " # camera_types=[CameraType.CAM_F0],\n", ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", + "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", "worker = Sequential()\n", "# worker = RayDistributed()\n", "scenes = scene_builder.get_scenes(scene_filter, worker)\n", @@ -84,22 +84,22 @@ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", - "from d123.geometry import Point2D\n", - "from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", - "from d123.common.visualization.color.config import PlotConfig\n", - "from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", - "from d123.common.visualization.matplotlib.observation import (\n", + "from py123d.geometry import Point2D\n", + "from py123d.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", + "from py123d.common.visualization.color.config import PlotConfig\n", + "from py123d.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", + "from py123d.common.visualization.matplotlib.observation import (\n", " add_box_detections_to_ax,\n", " add_default_map_on_ax,\n", " add_ego_vehicle_to_ax,\n", " add_traffic_lights_to_ax,\n", ")\n", - "from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", - "from d123.datatypes.maps.abstract_map import AbstractMap\n", - "from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", - "from d123.datatypes.maps.gpkg.gpkg_map_objects import GPKGIntersection\n", - "from d123.datatypes.maps.map_datatypes import MapLayer\n", - "from d123.datatypes.scene.abstract_scene import AbstractScene\n", + "from py123d.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", + "from py123d.datatypes.maps.abstract_map import AbstractMap\n", + "from py123d.datatypes.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", + "from py123d.datatypes.maps.gpkg.gpkg_map_objects import GPKGIntersection\n", + "from py123d.datatypes.maps.map_datatypes import MapLayer\n", + "from py123d.datatypes.scene.abstract_scene import AbstractScene\n", "\n", "\n", "import shapely.geometry as geom\n", @@ -296,8 +296,8 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", - "from d123.dataset.maps.gpkg.gpkg_map import GPKGMap\n", + "from py123d.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", + "from py123d.dataset.maps.gpkg.gpkg_map import GPKGMap\n", "\n", "\n", "map_api: GPKGMap = scenes[scene_index].map_api\n", @@ -355,7 +355,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/viz/bev_matplotlib_prediction.ipynb b/notebooks/viz/bev_matplotlib_prediction.ipynb index 3363e5ef..f8ba4b43 100644 --- a/notebooks/viz/bev_matplotlib_prediction.ipynb +++ b/notebooks/viz/bev_matplotlib_prediction.ipynb @@ -7,11 +7,11 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", + "from py123d.dataset.scene.scene_builder import ArrowSceneBuilder\n", + "from py123d.dataset.scene.scene_filter import SceneFilter\n", "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n", - "from d123.common.datatypes.sensor.camera import CameraType" + "from py123d.common.multithreading.worker_sequential import Sequential\n", + "from py123d.common.datatypes.sensor.camera import CameraType" ] }, { @@ -45,7 +45,7 @@ " shuffle=True,\n", " # camera_types=[CameraType.CAM_F0],\n", ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", + "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", "worker = Sequential()\n", "# worker = RayDistributed()\n", "scenes = scene_builder.get_scenes(scene_filter, worker)\n", @@ -64,22 +64,22 @@ "from typing import List, Optional, Tuple\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", - "from d123.geometry.base import Point2D\n", - "from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", - "from d123.common.visualization.color.config import PlotConfig\n", - "from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", - "from d123.common.visualization.matplotlib.observation import (\n", + "from py123d.geometry.base import Point2D\n", + "from py123d.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", + "from py123d.common.visualization.color.config import PlotConfig\n", + "from py123d.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", + "from py123d.common.visualization.matplotlib.observation import (\n", " add_box_detections_to_ax,\n", " add_box_future_detections_to_ax,\n", " add_default_map_on_ax,\n", " add_ego_vehicle_to_ax,\n", " add_traffic_lights_to_ax,\n", ")\n", - "from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", - "from d123.dataset.maps.abstract_map import AbstractMap\n", - "from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", - "from d123.dataset.maps.map_datatypes import MapLayer\n", - "from d123.dataset.scene.abstract_scene import AbstractScene\n", + "from py123d.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", + "from py123d.dataset.maps.abstract_map import AbstractMap\n", + "from py123d.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", + "from py123d.dataset.maps.map_datatypes import MapLayer\n", + "from py123d.dataset.scene.abstract_scene import AbstractScene\n", "\n", "\n", "\n", @@ -165,8 +165,8 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", - "from d123.dataset.maps.gpkg.gpkg_map import GPKGMap\n", + "from py123d.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", + "from py123d.dataset.maps.gpkg.gpkg_map import GPKGMap\n", "\n", "\n", "map_api: GPKGMap = scenes[scene_index].map_api\n", @@ -224,7 +224,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/viz/camera_matplotlib.ipynb b/notebooks/viz/camera_matplotlib.ipynb index 77380f62..85a2cc4a 100644 --- a/notebooks/viz/camera_matplotlib.ipynb +++ b/notebooks/viz/camera_matplotlib.ipynb @@ -11,16 +11,16 @@ "\n", "import matplotlib.pyplot as plt\n", "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n", + "from py123d.common.multithreading.worker_sequential import Sequential\n", "\n", - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", - "from d123.dataset.scene.abstract_scene import AbstractScene\n", + "from py123d.dataset.scene.scene_builder import ArrowSceneBuilder\n", + "from py123d.dataset.scene.scene_filter import SceneFilter\n", + "from py123d.dataset.scene.abstract_scene import AbstractScene\n", "\n", "from typing import Dict\n", - "from d123.common.datatypes.sensor.camera import CameraType\n", - "from d123.common.visualization.matplotlib.camera import add_camera_ax\n", - "from d123.common.visualization.matplotlib.camera import add_box_detections_to_camera_ax" + "from py123d.common.datatypes.sensor.camera import CameraType\n", + "from py123d.common.visualization.matplotlib.camera import add_camera_ax\n", + "from py123d.common.visualization.matplotlib.camera import add_box_detections_to_camera_ax" ] }, { @@ -59,7 +59,7 @@ " shuffle=False,\n", " camera_types=[CameraType.CAM_F0],\n", ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", + "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", "worker = Sequential()\n", "# worker = RayDistributed()\n", "scenes = scene_builder.get_scenes(scene_filter, worker)\n", @@ -375,7 +375,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/viz/log_rendering.ipynb b/notebooks/viz/log_rendering.ipynb index fe88e006..611fe58f 100644 --- a/notebooks/viz/log_rendering.ipynb +++ b/notebooks/viz/log_rendering.ipynb @@ -8,13 +8,13 @@ "outputs": [], "source": [ "from pathlib import Path\n", - "from d123.dataset.scene.arrow_scene import ArrowScene\n", - "from d123.common.visualization.matplotlib.plots import plot_scene_at_iteration\n", + "from py123d.dataset.scene.arrow_scene import ArrowScene\n", + "from py123d.common.visualization.matplotlib.plots import plot_scene_at_iteration\n", "\n", "\n", "\n", "log_name = \"1005081002024129653_5313_150_5333_150\"\n", - "log_file = Path(f\"/home/daniel/d123_workspace/data/wopd_train/{log_name}.arrow\")\n", + "log_file = Path(f\"/home/daniel/py123d_workspace/data/wopd_train/{log_name}.arrow\")\n", "scene = ArrowScene(log_file)\n", "\n", "fig, ax = plot_scene_at_iteration(scene, iteration=10)\n", @@ -30,9 +30,9 @@ "outputs": [], "source": [ "import traceback\n", - "from d123.common.visualization.matplotlib.plots import render_scene_animation\n", + "from py123d.common.visualization.matplotlib.plots import render_scene_animation\n", "\n", - "output_path = Path(\"/home/daniel/d123_logs_videos\")\n", + "output_path = Path(\"/home/daniel/py123d_logs_videos\")\n", "# render_scene_as_mp4(scene, output_path, fps=30, end_idx=10000, step=5, dpi=100)" ] }, @@ -45,11 +45,11 @@ "source": [ "# Create an mp4 animation with a specific FPS\n", "import traceback\n", - "from d123.common.visualization.matplotlib.plots import render_scene_animation\n", + "from py123d.common.visualization.matplotlib.plots import render_scene_animation\n", "\n", "split = \"av2-sensor-mini_train\"\n", - "output_path = Path(f\"/home/daniel/d123_logs_videos/{split}\")\n", - "log_path = Path(f\"/home/daniel/d123_workspace/data/{split}\")\n", + "output_path = Path(f\"/home/daniel/py123d_logs_videos/{split}\")\n", + "log_path = Path(f\"/home/daniel/py123d_workspace/data/{split}\")\n", "for log_file in log_path.iterdir():\n", " try:\n", " scene = ArrowScene(log_file)\n", @@ -83,7 +83,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123_dev", + "display_name": "py123d_dev", "language": "python", "name": "python3" }, diff --git a/notebooks/viz/video_example.ipynb b/notebooks/viz/video_example.ipynb index 3b836715..3868681a 100644 --- a/notebooks/viz/video_example.ipynb +++ b/notebooks/viz/video_example.ipynb @@ -7,11 +7,11 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from d123.dataset.scene.scene_filter import SceneFilter\n", + "from py123d.dataset.scene.scene_builder import ArrowSceneBuilder\n", + "from py123d.dataset.scene.scene_filter import SceneFilter\n", "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n", - "from d123.common.datatypes.sensor.camera import CameraType" + "from py123d.common.multithreading.worker_sequential import Sequential\n", + "from py123d.common.datatypes.sensor.camera import CameraType" ] }, { @@ -21,12 +21,12 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.geometry import Point2D\n", + "from py123d.geometry import Point2D\n", "import numpy as np\n", "\n", "import torch\n", "\n", - "from d123.geometry.polyline import Polyline2D" + "from py123d.geometry.polyline import Polyline2D" ] }, { @@ -60,7 +60,7 @@ " shuffle=True,\n", " camera_types=[CameraType.CAM_F0],\n", ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", + "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", "worker = Sequential()\n", "# worker = RayDistributed()\n", "scenes = scene_builder.get_scenes(scene_filter, worker)\n", @@ -78,23 +78,23 @@ "from typing import List, Optional, Tuple\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", - "from d123.common.visualization.matplotlib.camera import add_camera_ax\n", - "from d123.geometry import Point2D\n", - "from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", - "from d123.common.visualization.color.config import PlotConfig\n", - "from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", - "from d123.common.visualization.matplotlib.observation import (\n", + "from py123d.common.visualization.matplotlib.camera import add_camera_ax\n", + "from py123d.geometry import Point2D\n", + "from py123d.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", + "from py123d.common.visualization.color.config import PlotConfig\n", + "from py123d.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", + "from py123d.common.visualization.matplotlib.observation import (\n", " add_box_detections_to_ax,\n", " add_default_map_on_ax,\n", " add_ego_vehicle_to_ax,\n", " add_traffic_lights_to_ax,\n", ")\n", - "from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", - "from d123.dataset.maps.abstract_map import AbstractMap\n", - "from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", - "from d123.dataset.maps.gpkg.gpkg_map_objects import GPKGIntersection\n", - "from d123.dataset.maps.map_datatypes import MapLayer\n", - "from d123.dataset.scene.abstract_scene import AbstractScene\n", + "from py123d.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", + "from py123d.dataset.maps.abstract_map import AbstractMap\n", + "from py123d.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", + "from py123d.dataset.maps.gpkg.gpkg_map_objects import GPKGIntersection\n", + "from py123d.dataset.maps.map_datatypes import MapLayer\n", + "from py123d.dataset.scene.abstract_scene import AbstractScene\n", "\n", "\n", "import shapely.geometry as geom\n", @@ -238,8 +238,8 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", - "from d123.dataset.maps.gpkg.gpkg_map import GPKGMap\n", + "from py123d.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", + "from py123d.dataset.maps.gpkg.gpkg_map import GPKGMap\n", "\n", "\n", "map_api: GPKGMap = scenes[scene_index].map_api\n", @@ -297,7 +297,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/viz/viser_testing_v2_scene.ipynb b/notebooks/viz/viser_testing_v2_scene.ipynb index 7c6d4a22..3cb9eead 100644 --- a/notebooks/viz/viser_testing_v2_scene.ipynb +++ b/notebooks/viz/viser_testing_v2_scene.ipynb @@ -7,11 +7,11 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", - "from d123.datatypes.scene.scene_filter import SceneFilter\n", + "from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", + "from py123d.datatypes.scene.scene_filter import SceneFilter\n", "\n", - "from d123.common.multithreading.worker_sequential import Sequential\n", - "from d123.datatypes.sensors.camera import CameraType" + "from py123d.common.multithreading.worker_sequential import Sequential\n", + "from py123d.datatypes.sensors.camera import CameraType" ] }, { @@ -40,7 +40,7 @@ " shuffle=True,\n", " camera_types=[CameraType.CAM_F0],\n", ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n", + "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", "worker = Sequential()\n", "# worker = RayDistributed()\n", "scenes = scene_builder.get_scenes(scene_filter, worker)\n", @@ -55,7 +55,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.common.visualization.viser.server import ViserVisualizationServer\n", + "from py123d.common.visualization.viser.server import ViserVisualizationServer\n", "\n", "\n", "visualization_server = ViserVisualizationServer(scenes, scene_index=0)" @@ -72,7 +72,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/waymo_perception/lidar_testing.ipynb b/notebooks/waymo_perception/lidar_testing.ipynb index fa6a92a6..82a5ac27 100644 --- a/notebooks/waymo_perception/lidar_testing.ipynb +++ b/notebooks/waymo_perception/lidar_testing.ipynb @@ -54,8 +54,8 @@ "import io\n", "from pyquaternion import Quaternion\n", "\n", - "from d123.geometry import StateSE3\n", - "from d123.geometry.bounding_box import BoundingBoxSE3\n", + "from py123d.geometry import StateSE3\n", + "from py123d.geometry.bounding_box import BoundingBoxSE3\n", "\n", "from waymo_open_dataset.utils import frame_utils\n", "\n", @@ -279,7 +279,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb index b172331b..abf08b80 100644 --- a/notebooks/waymo_perception/map_testing.ipynb +++ b/notebooks/waymo_perception/map_testing.ipynb @@ -54,8 +54,8 @@ "import io\n", "from pyquaternion import Quaternion\n", "\n", - "from d123.geometry import StateSE3\n", - "from d123.geometry.bounding_box import BoundingBoxSE3\n", + "from py123d.geometry import StateSE3\n", + "from py123d.geometry.bounding_box import BoundingBoxSE3\n", "\n", "from waymo_open_dataset.utils import frame_utils\n", "\n", @@ -139,7 +139,7 @@ "source": [ "from collections import defaultdict\n", "\n", - "from d123.geometry.utils.units import mph_to_mps\n", + "from py123d.geometry.utils.units import mph_to_mps\n", "\n", "\n", "dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n", @@ -201,7 +201,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.common.visualization.matplotlib.utils import add_non_repeating_legend_to_ax\n", + "from py123d.common.visualization.matplotlib.utils import add_non_repeating_legend_to_ax\n", "\n", "\n", "fig, ax = plt.subplots(figsize=(30, 30))\n", @@ -241,7 +241,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.conversion.datasets.wopd.wopd_map_utils import extract_lane_boundaries\n", + "from py123d.conversion.datasets.wopd.wopd_map_utils import extract_lane_boundaries\n", "\n", "\n", "left_boundaries, right_boundaries = extract_lane_boundaries(\n", @@ -382,7 +382,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.geometry.polyline import Polyline3D\n", + "from py123d.geometry.polyline import Polyline3D\n", "import numpy as np\n", "\n", "\n", @@ -631,7 +631,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/notebooks/waymo_perception/testing.ipynb b/notebooks/waymo_perception/testing.ipynb index ae984f72..e83a9b86 100644 --- a/notebooks/waymo_perception/testing.ipynb +++ b/notebooks/waymo_perception/testing.ipynb @@ -51,8 +51,8 @@ "import io\n", "from pyquaternion import Quaternion\n", "\n", - "from d123.geometry import StateSE3\n", - "from d123.geometry.bounding_box import BoundingBoxSE3\n", + "from py123d.geometry import StateSE3\n", + "from py123d.geometry.bounding_box import BoundingBoxSE3\n", "\n", "from waymo_open_dataset.utils import frame_utils\n", "\n", @@ -226,7 +226,7 @@ "metadata": {}, "outputs": [], "source": [ - "from d123.common.datatypes.time.time_point import TimePoint\n", + "from py123d.common.datatypes.time.time_point import TimePoint\n", "\n", "\n", "for frame_idx, data in enumerate(dataset):\n", @@ -258,7 +258,7 @@ ], "metadata": { "kernelspec": { - "display_name": "d123", + "display_name": "py123d", "language": "python", "name": "python3" }, diff --git a/d123/__init__.py b/py123d/__init__.py similarity index 100% rename from d123/__init__.py rename to py123d/__init__.py diff --git a/d123/common/__init__.py b/py123d/common/__init__.py similarity index 100% rename from d123/common/__init__.py rename to py123d/common/__init__.py diff --git a/d123/common/multithreading/ray_execution.py b/py123d/common/multithreading/ray_execution.py similarity index 98% rename from d123/common/multithreading/ray_execution.py rename to py123d/common/multithreading/ray_execution.py index 4e44c56f..2ca1d472 100644 --- a/d123/common/multithreading/ray_execution.py +++ b/py123d/common/multithreading/ray_execution.py @@ -10,7 +10,7 @@ from ray.remote_function import RemoteFunction from tqdm import tqdm -from d123.common.multithreading.worker_pool import Task +from py123d.common.multithreading.worker_pool import Task def _ray_object_iterator(initial_ids: List[ray.ObjectRef]) -> Iterator[Tuple[ray.ObjectRef, Any]]: diff --git a/d123/common/multithreading/worker_parallel.py b/py123d/common/multithreading/worker_parallel.py similarity index 97% rename from d123/common/multithreading/worker_parallel.py rename to py123d/common/multithreading/worker_parallel.py index 737bd486..9183a0fc 100644 --- a/d123/common/multithreading/worker_parallel.py +++ b/py123d/common/multithreading/worker_parallel.py @@ -6,7 +6,7 @@ from tqdm import tqdm -from d123.common.multithreading.worker_pool import ( +from py123d.common.multithreading.worker_pool import ( Task, WorkerPool, WorkerResources, diff --git a/d123/common/multithreading/worker_pool.py b/py123d/common/multithreading/worker_pool.py similarity index 100% rename from d123/common/multithreading/worker_pool.py rename to py123d/common/multithreading/worker_pool.py diff --git a/d123/common/multithreading/worker_ray.py b/py123d/common/multithreading/worker_ray.py similarity index 97% rename from d123/common/multithreading/worker_ray.py rename to py123d/common/multithreading/worker_ray.py index 8bb12b04..48b06f77 100644 --- a/d123/common/multithreading/worker_ray.py +++ b/py123d/common/multithreading/worker_ray.py @@ -7,8 +7,8 @@ import ray from psutil import cpu_count -from d123.common.multithreading.ray_execution import ray_map -from d123.common.multithreading.worker_pool import Task, WorkerPool, WorkerResources +from py123d.common.multithreading.ray_execution import ray_map +from py123d.common.multithreading.worker_pool import Task, WorkerPool, WorkerResources logger = logging.getLogger(__name__) diff --git a/d123/common/multithreading/worker_sequential.py b/py123d/common/multithreading/worker_sequential.py similarity index 96% rename from d123/common/multithreading/worker_sequential.py rename to py123d/common/multithreading/worker_sequential.py index e3d436cb..c0106e86 100644 --- a/d123/common/multithreading/worker_sequential.py +++ b/py123d/common/multithreading/worker_sequential.py @@ -4,7 +4,7 @@ from tqdm import tqdm -from d123.common.multithreading.worker_pool import ( +from py123d.common.multithreading.worker_pool import ( Task, WorkerPool, WorkerResources, diff --git a/d123/common/multithreading/worker_utils.py b/py123d/common/multithreading/worker_utils.py similarity index 95% rename from d123/common/multithreading/worker_utils.py rename to py123d/common/multithreading/worker_utils.py index fbe0a753..ce79d6df 100644 --- a/d123/common/multithreading/worker_utils.py +++ b/py123d/common/multithreading/worker_utils.py @@ -3,7 +3,7 @@ import numpy as np from psutil import cpu_count -from d123.common.multithreading.worker_pool import Task, WorkerPool +from py123d.common.multithreading.worker_pool import Task, WorkerPool def chunk_list(input_list: List[Any], num_chunks: Optional[int] = None) -> List[List[Any]]: diff --git a/d123/common/utils/__init__.py b/py123d/common/utils/__init__.py similarity index 100% rename from d123/common/utils/__init__.py rename to py123d/common/utils/__init__.py diff --git a/d123/common/utils/arrow_helper.py b/py123d/common/utils/arrow_helper.py similarity index 100% rename from d123/common/utils/arrow_helper.py rename to py123d/common/utils/arrow_helper.py diff --git a/d123/common/utils/dependencies.py b/py123d/common/utils/dependencies.py similarity index 91% rename from d123/common/utils/dependencies.py rename to py123d/common/utils/dependencies.py index 547947e3..38d7a88f 100644 --- a/d123/common/utils/dependencies.py +++ b/py123d/common/utils/dependencies.py @@ -14,5 +14,5 @@ def check_dependencies(modules: Union[str, List[str,]], optional_name: str) -> N __import__(module) except ImportError: raise ImportError( - f"Missing '{module}'. Install with: `pip install d123[{optional_name}]` or `pip install -e .[{optional_name}]`" + f"Missing '{module}'. Install with: `pip install py123d[{optional_name}]` or `pip install -e .[{optional_name}]`" ) diff --git a/d123/common/utils/enums.py b/py123d/common/utils/enums.py similarity index 100% rename from d123/common/utils/enums.py rename to py123d/common/utils/enums.py diff --git a/d123/common/utils/mixin.py b/py123d/common/utils/mixin.py similarity index 100% rename from d123/common/utils/mixin.py rename to py123d/common/utils/mixin.py diff --git a/d123/common/utils/timer.py b/py123d/common/utils/timer.py similarity index 100% rename from d123/common/utils/timer.py rename to py123d/common/utils/timer.py diff --git a/d123/common/utils/uuid.py b/py123d/common/utils/uuid.py similarity index 100% rename from d123/common/utils/uuid.py rename to py123d/common/utils/uuid.py diff --git a/d123/common/visualization/__init__.py b/py123d/common/visualization/__init__.py similarity index 100% rename from d123/common/visualization/__init__.py rename to py123d/common/visualization/__init__.py diff --git a/d123/common/visualization/bokeh/.gitkeep b/py123d/common/visualization/bokeh/.gitkeep similarity index 100% rename from d123/common/visualization/bokeh/.gitkeep rename to py123d/common/visualization/bokeh/.gitkeep diff --git a/d123/common/visualization/color/__init__.py b/py123d/common/visualization/color/__init__.py similarity index 100% rename from d123/common/visualization/color/__init__.py rename to py123d/common/visualization/color/__init__.py diff --git a/d123/common/visualization/color/color.py b/py123d/common/visualization/color/color.py similarity index 100% rename from d123/common/visualization/color/color.py rename to py123d/common/visualization/color/color.py diff --git a/d123/common/visualization/color/config.py b/py123d/common/visualization/color/config.py similarity index 88% rename from d123/common/visualization/color/config.py rename to py123d/common/visualization/color/config.py index 6d610844..db9007e3 100644 --- a/d123/common/visualization/color/config.py +++ b/py123d/common/visualization/color/config.py @@ -1,7 +1,7 @@ from dataclasses import dataclass, field from typing import Optional -from d123.common.visualization.color.color import BLACK, Color +from py123d.common.visualization.color.color import BLACK, Color @dataclass diff --git a/d123/common/visualization/color/default.py b/py123d/common/visualization/color/default.py similarity index 95% rename from d123/common/visualization/color/default.py rename to py123d/common/visualization/color/default.py index 5d90977a..9ea270e8 100644 --- a/d123/common/visualization/color/default.py +++ b/py123d/common/visualization/color/default.py @@ -1,6 +1,6 @@ from typing import Dict -from d123.common.visualization.color.color import ( +from py123d.common.visualization.color.color import ( BLACK, DARKER_GREY, ELLIS_5, @@ -10,10 +10,10 @@ WHITE, Color, ) -from d123.common.visualization.color.config import PlotConfig -from d123.datatypes.detections.detection import TrafficLightStatus -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.maps.map_datatypes import MapLayer +from py123d.common.visualization.color.config import PlotConfig +from py123d.datatypes.detections.detection import TrafficLightStatus +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.maps.map_datatypes import MapLayer HEADING_MARKER_STYLE: str = "^" # "^": triangle, "-": line diff --git a/d123/common/visualization/matplotlib/__init__.py b/py123d/common/visualization/matplotlib/__init__.py similarity index 100% rename from d123/common/visualization/matplotlib/__init__.py rename to py123d/common/visualization/matplotlib/__init__.py diff --git a/d123/common/visualization/matplotlib/camera.py b/py123d/common/visualization/matplotlib/camera.py similarity index 95% rename from d123/common/visualization/matplotlib/camera.py rename to py123d/common/visualization/matplotlib/camera.py index 49c567fb..f2de42d5 100644 --- a/d123/common/visualization/matplotlib/camera.py +++ b/py123d/common/visualization/matplotlib/camera.py @@ -10,13 +10,13 @@ # from PIL import ImageColor from pyquaternion import Quaternion -from d123.common.visualization.color.default import BOX_DETECTION_CONFIG -from d123.datatypes.detections.detection import BoxDetectionSE3, BoxDetectionWrapper -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.geometry import BoundingBoxSE3Index, Corners3DIndex -from d123.geometry.transform.transform_euler_se3 import convert_absolute_to_relative_euler_se3_array +from py123d.common.visualization.color.default import BOX_DETECTION_CONFIG +from py123d.datatypes.detections.detection import BoxDetectionSE3, BoxDetectionWrapper +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.geometry import BoundingBoxSE3Index, Corners3DIndex +from py123d.geometry.transform.transform_euler_se3 import convert_absolute_to_relative_euler_se3_array # from navsim.common.dataclasses import Annotations, Camera, Lidar # from navsim.common.enums import BoundingBoxIndex, LidarIndex diff --git a/d123/common/visualization/matplotlib/lidar.py b/py123d/common/visualization/matplotlib/lidar.py similarity index 100% rename from d123/common/visualization/matplotlib/lidar.py rename to py123d/common/visualization/matplotlib/lidar.py diff --git a/d123/common/visualization/matplotlib/observation.py b/py123d/common/visualization/matplotlib/observation.py similarity index 88% rename from d123/common/visualization/matplotlib/observation.py rename to py123d/common/visualization/matplotlib/observation.py index 0d6c5ddf..4dba948c 100644 --- a/d123/common/visualization/matplotlib/observation.py +++ b/py123d/common/visualization/matplotlib/observation.py @@ -4,8 +4,8 @@ import numpy as np import shapely.geometry as geom -from d123.common.visualization.color.config import PlotConfig -from d123.common.visualization.color.default import ( +from py123d.common.visualization.color.config import PlotConfig +from py123d.common.visualization.color.default import ( BOX_DETECTION_CONFIG, CENTERLINE_CONFIG, EGO_VEHICLE_CONFIG, @@ -13,21 +13,21 @@ ROUTE_CONFIG, TRAFFIC_LIGHT_CONFIG, ) -from d123.common.visualization.matplotlib.utils import ( +from py123d.common.visualization.matplotlib.utils import ( add_shapely_linestring_to_ax, add_shapely_polygon_to_ax, get_pose_triangle, shapely_geometry_local_coords, ) -from d123.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.maps.abstract_map import AbstractMap -from d123.datatypes.maps.abstract_map_objects import AbstractLane -from d123.datatypes.maps.map_datatypes import MapLayer -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3 -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, StateSE2Index, Vector2D -from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame +from py123d.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.maps.abstract_map import AbstractMap +from py123d.datatypes.maps.abstract_map_objects import AbstractLane +from py123d.datatypes.maps.map_datatypes import MapLayer +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3 +from py123d.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, StateSE2Index, Vector2D +from py123d.geometry.transform.transform_se2 import translate_se2_along_body_frame def add_default_map_on_ax( diff --git a/d123/common/visualization/matplotlib/plots.py b/py123d/common/visualization/matplotlib/plots.py similarity index 95% rename from d123/common/visualization/matplotlib/plots.py rename to py123d/common/visualization/matplotlib/plots.py index 3e297aac..c7d55ffc 100644 --- a/d123/common/visualization/matplotlib/plots.py +++ b/py123d/common/visualization/matplotlib/plots.py @@ -5,13 +5,13 @@ import matplotlib.pyplot as plt from tqdm import tqdm -from d123.common.visualization.matplotlib.observation import ( +from py123d.common.visualization.matplotlib.observation import ( add_box_detections_to_ax, add_default_map_on_ax, add_ego_vehicle_to_ax, add_traffic_lights_to_ax, ) -from d123.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.scene.abstract_scene import AbstractScene def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes: diff --git a/d123/common/visualization/matplotlib/utils.py b/py123d/common/visualization/matplotlib/utils.py similarity index 97% rename from d123/common/visualization/matplotlib/utils.py rename to py123d/common/visualization/matplotlib/utils.py index ef335bb0..f4b2aa52 100644 --- a/d123/common/visualization/matplotlib/utils.py +++ b/py123d/common/visualization/matplotlib/utils.py @@ -7,8 +7,8 @@ import shapely.geometry as geom from matplotlib.path import Path -from d123.common.visualization.color.config import PlotConfig -from d123.geometry import StateSE2, StateSE3 +from py123d.common.visualization.color.config import PlotConfig +from py123d.geometry import StateSE2, StateSE3 def add_shapely_polygon_to_ax( diff --git a/d123/common/visualization/utils/.gitkeep b/py123d/common/visualization/utils/.gitkeep similarity index 100% rename from d123/common/visualization/utils/.gitkeep rename to py123d/common/visualization/utils/.gitkeep diff --git a/d123/common/visualization/viser/__init__.py b/py123d/common/visualization/viser/__init__.py similarity index 100% rename from d123/common/visualization/viser/__init__.py rename to py123d/common/visualization/viser/__init__.py diff --git a/py123d/common/visualization/viser/elements/__init__.py b/py123d/common/visualization/viser/elements/__init__.py new file mode 100644 index 00000000..45d9d60f --- /dev/null +++ b/py123d/common/visualization/viser/elements/__init__.py @@ -0,0 +1,7 @@ +from py123d.common.visualization.viser.elements.detection_elements import add_box_detections_to_viser_server +from py123d.common.visualization.viser.elements.map_elements import add_map_to_viser_server +from py123d.common.visualization.viser.elements.sensor_elements import ( + add_camera_frustums_to_viser_server, + add_camera_gui_to_viser_server, + add_lidar_pc_to_viser_server, +) diff --git a/d123/common/visualization/viser/elements/detection_elements.py b/py123d/common/visualization/viser/elements/detection_elements.py similarity index 88% rename from d123/common/visualization/viser/elements/detection_elements.py rename to py123d/common/visualization/viser/elements/detection_elements.py index cdb2b0c8..62e5da4e 100644 --- a/d123/common/visualization/viser/elements/detection_elements.py +++ b/py123d/common/visualization/viser/elements/detection_elements.py @@ -3,13 +3,13 @@ import trimesh import viser -from d123.common.visualization.color.default import BOX_DETECTION_CONFIG -from d123.common.visualization.viser.viser_config import ViserConfig -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.geometry.geometry_index import BoundingBoxSE3Index, Corners3DIndex, StateSE3Index -from d123.geometry.utils.bounding_box_utils import ( +from py123d.common.visualization.color.default import BOX_DETECTION_CONFIG +from py123d.common.visualization.viser.viser_config import ViserConfig +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.geometry.geometry_index import BoundingBoxSE3Index, Corners3DIndex, StateSE3Index +from py123d.geometry.utils.bounding_box_utils import ( bbse3_array_to_corners_array, corners_array_to_3d_mesh, corners_array_to_edge_lines, diff --git a/d123/common/visualization/viser/elements/map_elements.py b/py123d/common/visualization/viser/elements/map_elements.py similarity index 86% rename from d123/common/visualization/viser/elements/map_elements.py rename to py123d/common/visualization/viser/elements/map_elements.py index 4532eb90..78cc425f 100644 --- a/d123/common/visualization/viser/elements/map_elements.py +++ b/py123d/common/visualization/viser/elements/map_elements.py @@ -3,13 +3,13 @@ import trimesh import viser -from d123.common.visualization.color.default import MAP_SURFACE_CONFIG -from d123.common.visualization.viser.viser_config import ViserConfig -from d123.datatypes.maps.abstract_map import MapLayer -from d123.datatypes.maps.abstract_map_objects import AbstractSurfaceMapObject -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.geometry import Point3D, Point3DIndex +from py123d.common.visualization.color.default import MAP_SURFACE_CONFIG +from py123d.common.visualization.viser.viser_config import ViserConfig +from py123d.datatypes.maps.abstract_map import MapLayer +from py123d.datatypes.maps.abstract_map_objects import AbstractSurfaceMapObject +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.geometry import Point3D, Point3DIndex def add_map_to_viser_server( diff --git a/d123/common/visualization/viser/elements/sensor_elements.py b/py123d/common/visualization/viser/elements/sensor_elements.py similarity index 93% rename from d123/common/visualization/viser/elements/sensor_elements.py rename to py123d/common/visualization/viser/elements/sensor_elements.py index 0cc63621..a467af0e 100644 --- a/d123/common/visualization/viser/elements/sensor_elements.py +++ b/py123d/common/visualization/viser/elements/sensor_elements.py @@ -6,13 +6,13 @@ import numpy.typing as npt import viser -from d123.common.visualization.viser.viser_config import ViserConfig -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType -from d123.datatypes.sensors.lidar.lidar import LiDARType -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.geometry import StateSE3Index -from d123.geometry.transform.transform_se3 import ( +from py123d.common.visualization.viser.viser_config import ViserConfig +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.geometry import StateSE3Index +from py123d.geometry.transform.transform_se3 import ( convert_relative_to_absolute_points_3d_array, convert_relative_to_absolute_se3_array, ) diff --git a/d123/common/visualization/viser/viser_config.py b/py123d/common/visualization/viser/viser_config.py similarity index 92% rename from d123/common/visualization/viser/viser_config.py rename to py123d/common/visualization/viser/viser_config.py index f99f7823..af06dadc 100644 --- a/d123/common/visualization/viser/viser_config.py +++ b/py123d/common/visualization/viser/viser_config.py @@ -1,8 +1,8 @@ from dataclasses import dataclass, field from typing import List, Literal, Optional, Tuple -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from d123.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDARType all_camera_types: List[PinholeCameraType] = [ PinholeCameraType.CAM_F0, @@ -31,7 +31,7 @@ class ViserConfig: # Server server_host: str = "localhost" server_port: int = 8080 - server_label: str = "D123 Viser Server" + server_label: str = "123D Viser Server" server_verbose: bool = True # Theme diff --git a/d123/common/visualization/viser/viser_viewer.py b/py123d/common/visualization/viser/viser_viewer.py similarity index 95% rename from d123/common/visualization/viser/viser_viewer.py rename to py123d/common/visualization/viser/viser_viewer.py index 8cf1ec80..3da910e6 100644 --- a/d123/common/visualization/viser/viser_viewer.py +++ b/py123d/common/visualization/viser/viser_viewer.py @@ -5,18 +5,18 @@ import viser from viser.theme import TitlebarButton, TitlebarConfig, TitlebarImage -from d123.common.visualization.viser.elements import ( +from py123d.common.visualization.viser.elements import ( add_box_detections_to_viser_server, add_camera_frustums_to_viser_server, add_camera_gui_to_viser_server, add_lidar_pc_to_viser_server, add_map_to_viser_server, ) -from d123.common.visualization.viser.viser_config import ViserConfig -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from d123.datatypes.sensors.lidar.lidar import LiDARType -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.common.visualization.viser.viser_config import ViserConfig +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 logger = logging.getLogger(__name__) diff --git a/d123/conversion/__init__.py b/py123d/conversion/__init__.py similarity index 100% rename from d123/conversion/__init__.py rename to py123d/conversion/__init__.py diff --git a/d123/conversion/abstract_dataset_converter.py b/py123d/conversion/abstract_dataset_converter.py similarity index 84% rename from d123/conversion/abstract_dataset_converter.py rename to py123d/conversion/abstract_dataset_converter.py index db9c3284..25052da9 100644 --- a/d123/conversion/abstract_dataset_converter.py +++ b/py123d/conversion/abstract_dataset_converter.py @@ -1,8 +1,8 @@ import abc -from d123.conversion.dataset_converter_config import DatasetConverterConfig -from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter -from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter class AbstractDatasetConverter(abc.ABC): diff --git a/d123/conversion/dataset_converter_config.py b/py123d/conversion/dataset_converter_config.py similarity index 100% rename from d123/conversion/dataset_converter_config.py rename to py123d/conversion/dataset_converter_config.py diff --git a/d123/conversion/datasets/__init__.py b/py123d/conversion/datasets/__init__.py similarity index 100% rename from d123/conversion/datasets/__init__.py rename to py123d/conversion/datasets/__init__.py diff --git a/d123/conversion/datasets/av2/av2_constants.py b/py123d/conversion/datasets/av2/av2_constants.py similarity index 94% rename from d123/conversion/datasets/av2/av2_constants.py rename to py123d/conversion/datasets/av2/av2_constants.py index fb61313d..8e682b74 100644 --- a/d123/conversion/datasets/av2/av2_constants.py +++ b/py123d/conversion/datasets/av2/av2_constants.py @@ -1,9 +1,9 @@ from typing import Dict, Final, Set -from d123.common.utils.enums import SerialIntEnum -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.maps.map_datatypes import RoadLineType -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.common.utils.enums import SerialIntEnum +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.maps.map_datatypes import RoadLineType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType AV2_SENSOR_SPLITS: Set[str] = { "av2-sensor_train", diff --git a/d123/conversion/datasets/av2/av2_helper.py b/py123d/conversion/datasets/av2/av2_helper.py similarity index 99% rename from d123/conversion/datasets/av2/av2_helper.py rename to py123d/conversion/datasets/av2/av2_helper.py index 401a979e..3dbb5d0b 100644 --- a/d123/conversion/datasets/av2/av2_helper.py +++ b/py123d/conversion/datasets/av2/av2_helper.py @@ -3,7 +3,7 @@ import pandas as pd -from d123.conversion.datasets.av2.av2_constants import ( +from py123d.conversion.datasets.av2.av2_constants import ( AV2_CAMERA_TYPE_MAPPING, AV2_SENSOR_CAM_SHUTTER_INTERVAL_MS, AV2_SENSOR_LIDAR_SWEEP_INTERVAL_W_BUFFER_NS, diff --git a/d123/conversion/datasets/av2/av2_map_conversion.py b/py123d/conversion/datasets/av2/av2_map_conversion.py similarity index 96% rename from d123/conversion/datasets/av2/av2_map_conversion.py rename to py123d/conversion/datasets/av2/av2_map_conversion.py index ab3bdd66..ea5abf4e 100644 --- a/d123/conversion/datasets/av2/av2_map_conversion.py +++ b/py123d/conversion/datasets/av2/av2_map_conversion.py @@ -8,14 +8,14 @@ import shapely import shapely.geometry as geom -from d123.conversion.datasets.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING -from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( +from py123d.conversion.datasets.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, split_line_geometry_by_max_length, ) -from d123.conversion.utils.map_utils.road_edge.road_edge_3d_utils import lift_road_edges_to_3d -from d123.datatypes.maps.cache.cache_map_objects import ( +from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import lift_road_edges_to_3d +from py123d.datatypes.maps.cache.cache_map_objects import ( CacheCrosswalk, CacheGenericDrivable, CacheIntersection, @@ -24,8 +24,8 @@ CacheRoadEdge, CacheRoadLine, ) -from d123.datatypes.maps.map_datatypes import RoadEdgeType -from d123.geometry import OccupancyMap2D, Point3DIndex, Polyline2D, Polyline3D +from py123d.datatypes.maps.map_datatypes import RoadEdgeType +from py123d.geometry import OccupancyMap2D, Point3DIndex, Polyline2D, Polyline3D LANE_GROUP_MARK_TYPES: List[str] = [ "DASHED_WHITE", diff --git a/d123/conversion/datasets/av2/av2_sensor_converter.py b/py123d/conversion/datasets/av2/av2_sensor_converter.py similarity index 90% rename from d123/conversion/datasets/av2/av2_sensor_converter.py rename to py123d/conversion/datasets/av2/av2_sensor_converter.py index f731a5f9..26c9a618 100644 --- a/d123/conversion/datasets/av2/av2_sensor_converter.py +++ b/py123d/conversion/datasets/av2/av2_sensor_converter.py @@ -4,43 +4,43 @@ import numpy as np import pandas as pd -from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter -from d123.conversion.dataset_converter_config import DatasetConverterConfig -from d123.conversion.datasets.av2.av2_constants import ( +from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.conversion.datasets.av2.av2_constants import ( AV2_CAMERA_TYPE_MAPPING, AV2_SENSOR_SPLITS, AV2_TO_DETECTION_TYPE, AV2SensorBoxDetectionType, ) -from d123.conversion.datasets.av2.av2_helper import ( +from py123d.conversion.datasets.av2.av2_helper import ( build_sensor_dataframe, build_synchronization_dataframe, find_closest_target_fpath, get_slice_with_timestamp_ns, ) -from d123.conversion.datasets.av2.av2_map_conversion import convert_av2_map -from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter -from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.maps.map_metadata import MapMetadata -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import ( +from py123d.conversion.datasets.av2.av2_map_conversion import convert_av2_map +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.maps.map_metadata import MapMetadata +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, ) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 -from d123.datatypes.vehicle_state.vehicle_parameters import ( +from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 +from py123d.datatypes.vehicle_state.vehicle_parameters import ( get_av2_ford_fusion_hybrid_parameters, rear_axle_se3_to_center_se3, ) -from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex -from d123.geometry.bounding_box import BoundingBoxSE3 -from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array +from py123d.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from py123d.geometry.bounding_box import BoundingBoxSE3 +from py123d.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array class AV2SensorConverter(AbstractDatasetConverter): diff --git a/d123/conversion/datasets/carla/__init__.py b/py123d/conversion/datasets/carla/__init__.py similarity index 100% rename from d123/conversion/datasets/carla/__init__.py rename to py123d/conversion/datasets/carla/__init__.py diff --git a/d123/conversion/datasets/carla/carla_data_converter.py b/py123d/conversion/datasets/carla/carla_data_converter.py similarity index 94% rename from d123/conversion/datasets/carla/carla_data_converter.py rename to py123d/conversion/datasets/carla/carla_data_converter.py index 65ed911a..9caff6e7 100644 --- a/d123/conversion/datasets/carla/carla_data_converter.py +++ b/py123d/conversion/datasets/carla/carla_data_converter.py @@ -11,24 +11,24 @@ import numpy as np import pyarrow as pa -from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table -from d123.conversion.abstract_dataset_converter import AbstractDataConverter, DatasetConverterConfig -from d123.conversion.utils.map_utils.opendrive.opendrive_map_conversion import convert_from_xodr -from d123.conversion.utils.sensor.lidar_index_registry import CARLALidarIndex -from d123.datatypes.maps.abstract_map import AbstractMap, MapLayer -from d123.datatypes.maps.abstract_map_objects import AbstractLane -from d123.datatypes.maps.gpkg.gpkg_map import get_global_map_api -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import ( +from py123d.common.multithreading.worker_utils import WorkerPool, worker_map +from py123d.common.utils.arrow_helper import open_arrow_table, write_arrow_table +from py123d.conversion.abstract_dataset_converter import AbstractDataConverter, DatasetConverterConfig +from py123d.conversion.utils.map_utils.opendrive.opendrive_map_conversion import convert_from_xodr +from py123d.conversion.utils.sensor.lidar_index_registry import CARLALidarIndex +from py123d.datatypes.maps.abstract_map import AbstractMap, MapLayer +from py123d.datatypes.maps.abstract_map_objects import AbstractLane +from py123d.datatypes.maps.gpkg.gpkg_map import get_global_map_api +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, camera_metadata_dict_to_json, ) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3Index -from d123.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters -from d123.geometry import BoundingBoxSE3Index, Point2D, Point3D, Vector3DIndex +from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3Index +from py123d.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters +from py123d.geometry import BoundingBoxSE3Index, Point2D, Point3D, Vector3DIndex AVAILABLE_CARLA_MAP_LOCATIONS: Final[List[str]] = [ "Town01", # A small, simple town with a river and several bridges. diff --git a/d123/conversion/datasets/carla/carla_load_sensor.py b/py123d/conversion/datasets/carla/carla_load_sensor.py similarity index 79% rename from d123/conversion/datasets/carla/carla_load_sensor.py rename to py123d/conversion/datasets/carla/carla_load_sensor.py index 464fecef..f3d85d00 100644 --- a/d123/conversion/datasets/carla/carla_load_sensor.py +++ b/py123d/conversion/datasets/carla/carla_load_sensor.py @@ -2,7 +2,7 @@ import numpy as np -from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata +from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata def load_carla_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: diff --git a/d123/conversion/datasets/kitti_360/.gitkeep b/py123d/conversion/datasets/kitti_360/.gitkeep similarity index 100% rename from d123/conversion/datasets/kitti_360/.gitkeep rename to py123d/conversion/datasets/kitti_360/.gitkeep diff --git a/d123/conversion/datasets/nuplan/__init__.py b/py123d/conversion/datasets/nuplan/__init__.py similarity index 100% rename from d123/conversion/datasets/nuplan/__init__.py rename to py123d/conversion/datasets/nuplan/__init__.py diff --git a/d123/conversion/datasets/nuplan/nuplan_converter.py b/py123d/conversion/datasets/nuplan/nuplan_converter.py similarity index 92% rename from d123/conversion/datasets/nuplan/nuplan_converter.py rename to py123d/conversion/datasets/nuplan/nuplan_converter.py index 83ce1bd2..da4b4cc9 100644 --- a/d123/conversion/datasets/nuplan/nuplan_converter.py +++ b/py123d/conversion/datasets/nuplan/nuplan_converter.py @@ -5,47 +5,47 @@ import numpy as np import yaml -import d123.conversion.datasets.nuplan.utils as nuplan_utils -from d123.common.utils.dependencies import check_dependencies -from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter -from d123.conversion.dataset_converter_config import DatasetConverterConfig -from d123.conversion.datasets.nuplan.nuplan_map_conversion import write_nuplan_map -from d123.conversion.datasets.nuplan.utils.nuplan_constants import ( +import py123d.conversion.datasets.nuplan.utils as nuplan_utils +from py123d.common.utils.dependencies import check_dependencies +from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.conversion.datasets.nuplan.nuplan_map_conversion import write_nuplan_map +from py123d.conversion.datasets.nuplan.utils.nuplan_constants import ( NUPLAN_DATA_SPLITS, NUPLAN_DEFAULT_DT, NUPLAN_MAP_LOCATIONS, NUPLAN_ROLLING_SHUTTER_S, NUPLAN_TRAFFIC_STATUS_DICT, ) -from d123.conversion.datasets.nuplan.utils.nuplan_sql_helper import ( +from py123d.conversion.datasets.nuplan.utils.nuplan_sql_helper import ( get_box_detections_for_lidarpc_token_from_db, get_nearest_ego_pose_for_timestamp_from_db, ) -from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter -from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from d123.conversion.utils.sensor_utils.lidar_index_registry import NuPlanLidarIndex -from d123.datatypes.detections.detection import ( +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.utils.sensor_utils.lidar_index_registry import NuPlanLidarIndex +from py123d.datatypes.detections.detection import ( BoxDetectionSE3, BoxDetectionWrapper, TrafficLightDetection, TrafficLightDetectionWrapper, ) -from d123.datatypes.maps.map_metadata import MapMetadata -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import ( +from py123d.datatypes.maps.map_metadata import MapMetadata +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, ) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 -from d123.datatypes.vehicle_state.vehicle_parameters import ( +from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 +from py123d.datatypes.vehicle_state.vehicle_parameters import ( get_nuplan_chrysler_pacifica_parameters, rear_axle_se3_to_center_se3, ) -from d123.geometry import StateSE3, Vector3D +from py123d.geometry import StateSE3, Vector3D check_dependencies(["nuplan", "sqlalchemy"], "nuplan") from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens diff --git a/d123/conversion/datasets/nuplan/nuplan_load_sensor.py b/py123d/conversion/datasets/nuplan/nuplan_load_sensor.py similarity index 78% rename from d123/conversion/datasets/nuplan/nuplan_load_sensor.py rename to py123d/conversion/datasets/nuplan/nuplan_load_sensor.py index 6e80df47..e964e03c 100644 --- a/d123/conversion/datasets/nuplan/nuplan_load_sensor.py +++ b/py123d/conversion/datasets/nuplan/nuplan_load_sensor.py @@ -1,8 +1,8 @@ import io from pathlib import Path -from d123.common.utils.dependencies import check_dependencies -from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata +from py123d.common.utils.dependencies import check_dependencies +from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata check_dependencies(["nuplan"], "nuplan") from nuplan.database.utils.pointclouds.lidar import LidarPointCloud diff --git a/d123/conversion/datasets/nuplan/nuplan_map_conversion.py b/py123d/conversion/datasets/nuplan/nuplan_map_conversion.py similarity index 97% rename from d123/conversion/datasets/nuplan/nuplan_map_conversion.py rename to py123d/conversion/datasets/nuplan/nuplan_map_conversion.py index 97cceeb8..bff709be 100644 --- a/d123/conversion/datasets/nuplan/nuplan_map_conversion.py +++ b/py123d/conversion/datasets/nuplan/nuplan_map_conversion.py @@ -7,17 +7,17 @@ import pyogrio from shapely import LineString -from d123.conversion.datasets.nuplan.utils.nuplan_constants import ( +from py123d.conversion.datasets.nuplan.utils.nuplan_constants import ( NUPLAN_MAP_GPKG_LAYERS, NUPLAN_MAP_LOCATION_FILES, NUPLAN_ROAD_LINE_CONVERSION, ) -from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, split_line_geometry_by_max_length, ) -from d123.datatypes.maps.cache.cache_map_objects import ( +from py123d.datatypes.maps.cache.cache_map_objects import ( CacheCarpark, CacheCrosswalk, CacheGenericDrivable, @@ -28,9 +28,9 @@ CacheRoadLine, CacheWalkway, ) -from d123.datatypes.maps.gpkg.gpkg_utils import get_all_rows_with_value, get_row_with_value -from d123.datatypes.maps.map_datatypes import RoadEdgeType -from d123.geometry.polyline import Polyline2D, Polyline3D +from py123d.datatypes.maps.gpkg.gpkg_utils import get_all_rows_with_value, get_row_with_value +from py123d.datatypes.maps.map_datatypes import RoadEdgeType +from py123d.geometry.polyline import Polyline2D, Polyline3D MAX_ROAD_EDGE_LENGTH: Final[float] = 100.0 # meters, used to filter out very long road edges. TODO @add to config? diff --git a/d123/conversion/datasets/nuplan/utils/__init__.py b/py123d/conversion/datasets/nuplan/utils/__init__.py similarity index 100% rename from d123/conversion/datasets/nuplan/utils/__init__.py rename to py123d/conversion/datasets/nuplan/utils/__init__.py diff --git a/d123/conversion/datasets/nuplan/utils/log_splits.yaml b/py123d/conversion/datasets/nuplan/utils/log_splits.yaml similarity index 100% rename from d123/conversion/datasets/nuplan/utils/log_splits.yaml rename to py123d/conversion/datasets/nuplan/utils/log_splits.yaml diff --git a/d123/conversion/datasets/nuplan/utils/nuplan_constants.py b/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py similarity index 89% rename from d123/conversion/datasets/nuplan/utils/nuplan_constants.py rename to py123d/conversion/datasets/nuplan/utils/nuplan_constants.py index b2a5876c..5190e84e 100644 --- a/d123/conversion/datasets/nuplan/utils/nuplan_constants.py +++ b/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py @@ -1,10 +1,10 @@ from enum import IntEnum from typing import Dict, Final, List, Set -from d123.datatypes.detections.detection import TrafficLightStatus -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.maps.map_datatypes import RoadLineType -from d123.datatypes.time.time_point import TimePoint +from py123d.datatypes.detections.detection import TrafficLightStatus +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.maps.map_datatypes import RoadLineType +from py123d.datatypes.time.time_point import TimePoint class NuPlanBoxDetectionType(IntEnum): diff --git a/d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py b/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py similarity index 90% rename from d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py rename to py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py index 773c6123..99d044ec 100644 --- a/d123/conversion/datasets/nuplan/utils/nuplan_sql_helper.py +++ b/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py @@ -1,10 +1,10 @@ from typing import List -from d123.common.utils.dependencies import check_dependencies -from d123.conversion.datasets.nuplan.utils.nuplan_constants import NUPLAN_DETECTION_NAME_DICT -from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3 -from d123.geometry import BoundingBoxSE3, EulerAngles, StateSE3, Vector3D -from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +from py123d.common.utils.dependencies import check_dependencies +from py123d.conversion.datasets.nuplan.utils.nuplan_constants import NUPLAN_DETECTION_NAME_DICT +from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3 +from py123d.geometry import BoundingBoxSE3, EulerAngles, StateSE3, Vector3D +from py123d.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL check_dependencies(modules=["nuplan"], optional_name="nuplan") from nuplan.database.nuplan_db.query_session import execute_many, execute_one diff --git a/d123/conversion/datasets/nuscenes/.gitkeep b/py123d/conversion/datasets/nuscenes/.gitkeep similarity index 100% rename from d123/conversion/datasets/nuscenes/.gitkeep rename to py123d/conversion/datasets/nuscenes/.gitkeep diff --git a/d123/conversion/datasets/pandaset/pandaset_constants.py b/py123d/conversion/datasets/pandaset/pandaset_constants.py similarity index 96% rename from d123/conversion/datasets/pandaset/pandaset_constants.py rename to py123d/conversion/datasets/pandaset/pandaset_constants.py index 88b0549f..51ef348d 100644 --- a/d123/conversion/datasets/pandaset/pandaset_constants.py +++ b/py123d/conversion/datasets/pandaset/pandaset_constants.py @@ -1,8 +1,8 @@ from typing import Dict, List -from d123.common.utils.enums import SerialIntEnum -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.common.utils.enums import SerialIntEnum +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType PANDASET_SPLITS: List[str] = ["pandaset_train", "pandaset_val", "pandaset_test"] diff --git a/d123/conversion/datasets/pandaset/pandaset_converter.py b/py123d/conversion/datasets/pandaset/pandaset_converter.py similarity index 92% rename from d123/conversion/datasets/pandaset/pandaset_converter.py rename to py123d/conversion/datasets/pandaset/pandaset_converter.py index 55260d09..04f6149b 100644 --- a/d123/conversion/datasets/pandaset/pandaset_converter.py +++ b/py123d/conversion/datasets/pandaset/pandaset_converter.py @@ -6,40 +6,40 @@ import numpy as np -from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter -from d123.conversion.dataset_converter_config import DatasetConverterConfig -from d123.conversion.datasets.pandaset.pandaset_constants import ( +from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.conversion.datasets.pandaset.pandaset_constants import ( PANDASET_BOX_DETECTION_FROM_STR, PANDASET_BOX_DETECTION_TO_DEFAULT, PANDASET_CAMERA_MAPPING, PANDASET_LOG_NAMES, PANDASET_SPLITS, ) -from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter -from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import ( +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, ) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 -from d123.datatypes.vehicle_state.vehicle_parameters import ( +from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 +from py123d.datatypes.vehicle_state.vehicle_parameters import ( get_pandaset_chrysler_pacifica_parameters, rear_axle_se3_to_center_se3, ) -from d123.geometry import BoundingBoxSE3, StateSE3, Vector3D -from d123.geometry.geometry_index import BoundingBoxSE3Index, EulerAnglesIndex -from d123.geometry.transform.transform_se3 import ( +from py123d.geometry import BoundingBoxSE3, StateSE3, Vector3D +from py123d.geometry.geometry_index import BoundingBoxSE3Index, EulerAnglesIndex +from py123d.geometry.transform.transform_se3 import ( convert_absolute_to_relative_se3_array, translate_se3_along_body_frame, ) -from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL -from d123.geometry.utils.rotation_utils import get_quaternion_array_from_euler_array +from py123d.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +from py123d.geometry.utils.rotation_utils import get_quaternion_array_from_euler_array class PandasetConverter(AbstractDatasetConverter): diff --git a/d123/conversion/datasets/wopd/__init__.py b/py123d/conversion/datasets/wopd/__init__.py similarity index 100% rename from d123/conversion/datasets/wopd/__init__.py rename to py123d/conversion/datasets/wopd/__init__.py diff --git a/d123/conversion/datasets/wopd/utils/wopd_constants.py b/py123d/conversion/datasets/wopd/utils/wopd_constants.py similarity index 89% rename from d123/conversion/datasets/wopd/utils/wopd_constants.py rename to py123d/conversion/datasets/wopd/utils/wopd_constants.py index 61d0a150..2edf67e7 100644 --- a/d123/conversion/datasets/wopd/utils/wopd_constants.py +++ b/py123d/conversion/datasets/wopd/utils/wopd_constants.py @@ -1,9 +1,9 @@ from typing import Dict, List -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.maps.map_datatypes import LaneType, RoadEdgeType, RoadLineType -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from d123.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.maps.map_datatypes import LaneType, RoadEdgeType, RoadLineType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDARType WOPD_AVAILABLE_SPLITS: List[str] = [ "wopd_train", diff --git a/d123/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py b/py123d/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py similarity index 97% rename from d123/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py rename to py123d/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py index a1a3e237..e9e6afad 100644 --- a/d123/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py +++ b/py123d/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py @@ -4,11 +4,11 @@ import numpy as np import shapely.geometry as geom -from d123.datatypes.maps.abstract_map_objects import AbstractRoadEdge, AbstractRoadLine -from d123.datatypes.maps.map_datatypes import LaneType -from d123.geometry import OccupancyMap2D, Point3D, Polyline3D, PolylineSE2, StateSE2, Vector2D -from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame -from d123.geometry.utils.rotation_utils import normalize_angle +from py123d.datatypes.maps.abstract_map_objects import AbstractRoadEdge, AbstractRoadLine +from py123d.datatypes.maps.map_datatypes import LaneType +from py123d.geometry import OccupancyMap2D, Point3D, Polyline3D, PolylineSE2, StateSE2, Vector2D +from py123d.geometry.transform.transform_se2 import translate_se2_along_body_frame +from py123d.geometry.utils.rotation_utils import normalize_angle MAX_LANE_WIDTH = 25.0 # meters MIN_LANE_WIDTH = 2.0 diff --git a/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py b/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py similarity index 97% rename from d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py rename to py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py index 24974b85..0cc69d25 100644 --- a/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py +++ b/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py @@ -8,11 +8,11 @@ # import pandas as pd # import shapely.geometry as geom -# from d123.common.utils.dependencies import check_dependencies -# from d123.conversion.datasets.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries -# from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType -# from d123.geometry import Point3DIndex, Polyline3D -# from d123.geometry.utils.units import mph_to_mps +# from py123d.common.utils.dependencies import check_dependencies +# from py123d.conversion.datasets.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries +# from py123d.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +# from py123d.geometry import Point3DIndex, Polyline3D +# from py123d.geometry.utils.units import mph_to_mps # check_dependencies(modules=["waymo_open_dataset"], optional_name="waymo") # from waymo_open_dataset import dataset_pb2 diff --git a/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py b/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py similarity index 93% rename from d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py rename to py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py index 4f8782af..741d9de7 100644 --- a/d123/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py +++ b/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py @@ -2,16 +2,16 @@ import numpy as np -from d123.common.utils.dependencies import check_dependencies -from d123.conversion.datasets.wopd.utils.wopd_constants import ( +from py123d.common.utils.dependencies import check_dependencies +from py123d.conversion.datasets.wopd.utils.wopd_constants import ( WAYMO_LANE_TYPE_CONVERSION, WAYMO_ROAD_EDGE_TYPE_CONVERSION, WAYMO_ROAD_LINE_TYPE_CONVERSION, ) -from d123.conversion.datasets.wopd.waymo_map_utils.womp_boundary_utils import WaymoLaneData, fill_lane_boundaries -from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractRoadEdge, AbstractRoadLine -from d123.datatypes.maps.cache.cache_map_objects import ( +from py123d.conversion.datasets.wopd.waymo_map_utils.womp_boundary_utils import WaymoLaneData, fill_lane_boundaries +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.datatypes.maps.abstract_map_objects import AbstractLane, AbstractRoadEdge, AbstractRoadLine +from py123d.datatypes.maps.cache.cache_map_objects import ( CacheCarpark, CacheCrosswalk, CacheLane, @@ -19,9 +19,9 @@ CacheRoadEdge, CacheRoadLine, ) -from d123.datatypes.maps.map_datatypes import LaneType, RoadEdgeType, RoadLineType -from d123.geometry import Polyline3D -from d123.geometry.utils.units import mph_to_mps +from py123d.datatypes.maps.map_datatypes import LaneType, RoadEdgeType, RoadLineType +from py123d.geometry import Polyline3D +from py123d.geometry.utils.units import mph_to_mps check_dependencies(modules=["waymo_open_dataset"], optional_name="waymo") from waymo_open_dataset import dataset_pb2 diff --git a/d123/conversion/datasets/wopd/wopd_converter.py b/py123d/conversion/datasets/wopd/wopd_converter.py similarity index 90% rename from d123/conversion/datasets/wopd/wopd_converter.py rename to py123d/conversion/datasets/wopd/wopd_converter.py index fc8bdffa..801421de 100644 --- a/d123/conversion/datasets/wopd/wopd_converter.py +++ b/py123d/conversion/datasets/wopd/wopd_converter.py @@ -6,35 +6,35 @@ import numpy as np import numpy.typing as npt -from d123.common.utils.dependencies import check_dependencies -from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter -from d123.conversion.dataset_converter_config import DatasetConverterConfig -from d123.conversion.datasets.wopd.utils.wopd_constants import ( +from py123d.common.utils.dependencies import check_dependencies +from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.conversion.datasets.wopd.utils.wopd_constants import ( WOPD_AVAILABLE_SPLITS, WOPD_CAMERA_TYPES, WOPD_DETECTION_NAME_DICT, WOPD_LIDAR_TYPES, ) -from d123.conversion.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map -from d123.conversion.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection -from d123.conversion.log_writer.abstract_log_writer import AbstractLogWriter -from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from d123.conversion.utils.sensor_utils.camera_conventions import CameraConvention, convert_camera_convention -from d123.conversion.utils.sensor_utils.lidar_index_registry import DefaultLidarIndex, WOPDLidarIndex -from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper -from d123.datatypes.maps.map_metadata import MapMetadata -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import ( +from py123d.conversion.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map +from py123d.conversion.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.utils.sensor_utils.camera_conventions import CameraConvention, convert_camera_convention +from py123d.conversion.utils.sensor_utils.lidar_index_registry import DefaultLidarIndex, WOPDLidarIndex +from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper +from py123d.datatypes.maps.map_metadata import MapMetadata +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, ) -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 -from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters -from d123.geometry import ( +from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 +from py123d.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters +from py123d.geometry import ( BoundingBoxSE3, BoundingBoxSE3Index, EulerAngles, @@ -44,9 +44,9 @@ Vector3D, Vector3DIndex, ) -from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array -from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL -from d123.geometry.utils.rotation_utils import ( +from py123d.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array +from py123d.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL +from py123d.geometry.utils.rotation_utils import ( get_euler_array_from_quaternion_array, get_quaternion_array_from_euler_array, ) @@ -390,7 +390,7 @@ def _extract_wopd_cameras( camera_type = WOPD_CAMERA_TYPES[calibration.name] camera_transform = np.array(calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4) camera_pose = StateSE3.from_transformation_matrix(camera_transform) - # NOTE: WOPD uses a different camera convention than d123 + # NOTE: WOPD uses a different camera convention than py123d # https://arxiv.org/pdf/1912.04838 (Figure 1.) camera_pose = convert_camera_convention( camera_pose, diff --git a/d123/conversion/datasets/wopd/wopd_utils.py b/py123d/conversion/datasets/wopd/wopd_utils.py similarity index 98% rename from d123/conversion/datasets/wopd/wopd_utils.py rename to py123d/conversion/datasets/wopd/wopd_utils.py index e58813f9..e81597bf 100644 --- a/d123/conversion/datasets/wopd/wopd_utils.py +++ b/py123d/conversion/datasets/wopd/wopd_utils.py @@ -1,6 +1,6 @@ from typing import Dict, List, Tuple -from d123.common.utils.dependencies import check_dependencies +from py123d.common.utils.dependencies import check_dependencies check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") import tensorflow as tf diff --git a/d123/conversion/log_writer/__init__.py b/py123d/conversion/log_writer/__init__.py similarity index 100% rename from d123/conversion/log_writer/__init__.py rename to py123d/conversion/log_writer/__init__.py diff --git a/d123/conversion/log_writer/abstract_log_writer.py b/py123d/conversion/log_writer/abstract_log_writer.py similarity index 70% rename from d123/conversion/log_writer/abstract_log_writer.py rename to py123d/conversion/log_writer/abstract_log_writer.py index cc7fdf83..dc67495a 100644 --- a/d123/conversion/log_writer/abstract_log_writer.py +++ b/py123d/conversion/log_writer/abstract_log_writer.py @@ -1,13 +1,13 @@ import abc from typing import Any, Dict, List, Optional, Tuple -from d123.conversion.dataset_converter_config import DatasetConverterConfig -from d123.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from d123.datatypes.sensors.lidar.lidar import LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 class AbstractLogWriter(abc.ABC): diff --git a/d123/conversion/log_writer/arrow_log_writer.py b/py123d/conversion/log_writer/arrow_log_writer.py similarity index 94% rename from d123/conversion/log_writer/arrow_log_writer.py rename to py123d/conversion/log_writer/arrow_log_writer.py index a1b70ea2..684db049 100644 --- a/d123/conversion/log_writer/arrow_log_writer.py +++ b/py123d/conversion/log_writer/arrow_log_writer.py @@ -3,16 +3,16 @@ import pyarrow as pa -from d123.common.utils.uuid import create_deterministic_uuid -from d123.conversion.abstract_dataset_converter import AbstractLogWriter, DatasetConverterConfig -from d123.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper -from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from d123.datatypes.sensors.lidar.lidar import LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3, EgoStateSE3Index -from d123.geometry import BoundingBoxSE3Index, StateSE3, StateSE3Index, Vector3DIndex +from py123d.common.utils.uuid import create_deterministic_uuid +from py123d.conversion.abstract_dataset_converter import AbstractLogWriter, DatasetConverterConfig +from py123d.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper +from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3, EgoStateSE3Index +from py123d.geometry import BoundingBoxSE3Index, StateSE3, StateSE3Index, Vector3DIndex class ArrowLogWriter(AbstractLogWriter): diff --git a/d123/conversion/map_writer/abstract_map_writer.py b/py123d/conversion/map_writer/abstract_map_writer.py similarity index 91% rename from d123/conversion/map_writer/abstract_map_writer.py rename to py123d/conversion/map_writer/abstract_map_writer.py index 25158a76..99c867e6 100644 --- a/d123/conversion/map_writer/abstract_map_writer.py +++ b/py123d/conversion/map_writer/abstract_map_writer.py @@ -1,8 +1,8 @@ import abc from abc import abstractmethod -from d123.conversion.dataset_converter_config import DatasetConverterConfig -from d123.datatypes.maps.abstract_map_objects import ( +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.datatypes.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, AbstractGenericDrivable, @@ -14,7 +14,7 @@ AbstractStopLine, AbstractWalkway, ) -from d123.datatypes.maps.map_metadata import MapMetadata +from py123d.datatypes.maps.map_metadata import MapMetadata class AbstractMapWriter(abc.ABC): diff --git a/d123/conversion/map_writer/gpkg_map_writer.py b/py123d/conversion/map_writer/gpkg_map_writer.py similarity index 95% rename from d123/conversion/map_writer/gpkg_map_writer.py rename to py123d/conversion/map_writer/gpkg_map_writer.py index 3ae6e9e8..8190d61d 100644 --- a/d123/conversion/map_writer/gpkg_map_writer.py +++ b/py123d/conversion/map_writer/gpkg_map_writer.py @@ -6,9 +6,9 @@ import pandas as pd import shapely.geometry as geom -from d123.conversion.dataset_converter_config import DatasetConverterConfig -from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from d123.datatypes.maps.abstract_map_objects import ( +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.datatypes.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, AbstractGenericDrivable, @@ -22,9 +22,9 @@ AbstractSurfaceMapObject, AbstractWalkway, ) -from d123.datatypes.maps.map_datatypes import MapLayer -from d123.datatypes.maps.map_metadata import MapMetadata -from d123.geometry.polyline import Polyline3D +from py123d.datatypes.maps.map_datatypes import MapLayer +from py123d.datatypes.maps.map_metadata import MapMetadata +from py123d.geometry.polyline import Polyline3D MAP_OBJECT_DATA = Dict[str, List[Union[str, int, float, bool, geom.base.BaseGeometry]]] diff --git a/d123/conversion/utils/__init__.py b/py123d/conversion/utils/__init__.py similarity index 100% rename from d123/conversion/utils/__init__.py rename to py123d/conversion/utils/__init__.py diff --git a/d123/conversion/utils/map_utils/__init__.py b/py123d/conversion/utils/map_utils/__init__.py similarity index 100% rename from d123/conversion/utils/map_utils/__init__.py rename to py123d/conversion/utils/map_utils/__init__.py diff --git a/d123/conversion/utils/map_utils/opendrive/__init__ copy.py b/py123d/conversion/utils/map_utils/opendrive/__init__ copy.py similarity index 100% rename from d123/conversion/utils/map_utils/opendrive/__init__ copy.py rename to py123d/conversion/utils/map_utils/opendrive/__init__ copy.py diff --git a/d123/conversion/utils/map_utils/opendrive/__init__.py b/py123d/conversion/utils/map_utils/opendrive/__init__.py similarity index 100% rename from d123/conversion/utils/map_utils/opendrive/__init__.py rename to py123d/conversion/utils/map_utils/opendrive/__init__.py diff --git a/d123/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py b/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py similarity index 95% rename from d123/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py rename to py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py index 2edafb96..c904efe2 100644 --- a/d123/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py +++ b/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py @@ -10,22 +10,22 @@ import shapely from shapely.ops import polygonize, unary_union -from d123.conversion.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType -from d123.conversion.utils.map_utils.opendrive.parser.opendrive import Junction, OpenDrive -from d123.conversion.utils.map_utils.opendrive.utils.collection import collect_element_helpers -from d123.conversion.utils.map_utils.opendrive.utils.id_mapping import IntIDMapping -from d123.conversion.utils.map_utils.opendrive.utils.lane_helper import ( +from py123d.conversion.utils.map_utils.opendrive.parser.opendrive import Junction, OpenDrive +from py123d.conversion.utils.map_utils.opendrive.utils.collection import collect_element_helpers +from py123d.conversion.utils.map_utils.opendrive.utils.id_mapping import IntIDMapping +from py123d.conversion.utils.map_utils.opendrive.utils.lane_helper import ( OpenDriveLaneGroupHelper, OpenDriveLaneHelper, ) -from d123.conversion.utils.map_utils.opendrive.utils.objects_helper import ( +from py123d.conversion.utils.map_utils.opendrive.utils.objects_helper import ( OpenDriveObjectHelper, ) -from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length -from d123.conversion.utils.map_utils.road_edge.road_edge_3d_utils import get_road_edges_3d_from_gdf +from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length +from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import get_road_edges_3d_from_gdf +from py123d.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType logger = logging.getLogger(__name__) -D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) +PY123D_MAPS_ROOT = Path(os.environ.get("PY123D_MAPS_ROOT")) MAX_ROAD_EDGE_LENGTH: Final[float] = 100.0 # [m] @@ -72,7 +72,7 @@ def convert_from_xodr( dataframes[MapLayer.LANE], dataframes[MapLayer.LANE_GROUP], ) - map_file_name = D123_MAPS_ROOT / f"{location}.gpkg" + map_file_name = PY123D_MAPS_ROOT / f"{location}.gpkg" with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="'crs' was not provided") for layer, gdf in dataframes.items(): diff --git a/d123/conversion/utils/map_utils/opendrive/parser/__init__.py b/py123d/conversion/utils/map_utils/opendrive/parser/__init__.py similarity index 100% rename from d123/conversion/utils/map_utils/opendrive/parser/__init__.py rename to py123d/conversion/utils/map_utils/opendrive/parser/__init__.py diff --git a/d123/conversion/utils/map_utils/opendrive/parser/elevation.py b/py123d/conversion/utils/map_utils/opendrive/parser/elevation.py similarity index 97% rename from d123/conversion/utils/map_utils/opendrive/parser/elevation.py rename to py123d/conversion/utils/map_utils/opendrive/parser/elevation.py index c9339091..2a1261ca 100644 --- a/d123/conversion/utils/map_utils/opendrive/parser/elevation.py +++ b/py123d/conversion/utils/map_utils/opendrive/parser/elevation.py @@ -4,7 +4,7 @@ from typing import List, Optional from xml.etree.ElementTree import Element -from d123.conversion.utils.map_utils.opendrive.parser.polynomial import Polynomial +from py123d.conversion.utils.map_utils.opendrive.parser.polynomial import Polynomial @dataclass diff --git a/d123/conversion/utils/map_utils/opendrive/parser/geometry.py b/py123d/conversion/utils/map_utils/opendrive/parser/geometry.py similarity index 99% rename from d123/conversion/utils/map_utils/opendrive/parser/geometry.py rename to py123d/conversion/utils/map_utils/opendrive/parser/geometry.py index dc782a7a..3ddb24a8 100644 --- a/d123/conversion/utils/map_utils/opendrive/parser/geometry.py +++ b/py123d/conversion/utils/map_utils/opendrive/parser/geometry.py @@ -8,7 +8,7 @@ import numpy.typing as npt from scipy.special import fresnel -from d123.geometry import StateSE2Index +from py123d.geometry import StateSE2Index @dataclass diff --git a/d123/conversion/utils/map_utils/opendrive/parser/lane.py b/py123d/conversion/utils/map_utils/opendrive/parser/lane.py similarity index 98% rename from d123/conversion/utils/map_utils/opendrive/parser/lane.py rename to py123d/conversion/utils/map_utils/opendrive/parser/lane.py index 4a8341a3..bb6df9f5 100644 --- a/d123/conversion/utils/map_utils/opendrive/parser/lane.py +++ b/py123d/conversion/utils/map_utils/opendrive/parser/lane.py @@ -4,7 +4,7 @@ from typing import List, Optional from xml.etree.ElementTree import Element -from d123.conversion.utils.map_utils.opendrive.parser.polynomial import Polynomial +from py123d.conversion.utils.map_utils.opendrive.parser.polynomial import Polynomial @dataclass diff --git a/d123/conversion/utils/map_utils/opendrive/parser/objects.py b/py123d/conversion/utils/map_utils/opendrive/parser/objects.py similarity index 100% rename from d123/conversion/utils/map_utils/opendrive/parser/objects.py rename to py123d/conversion/utils/map_utils/opendrive/parser/objects.py diff --git a/d123/conversion/utils/map_utils/opendrive/parser/opendrive.py b/py123d/conversion/utils/map_utils/opendrive/parser/opendrive.py similarity index 98% rename from d123/conversion/utils/map_utils/opendrive/parser/opendrive.py rename to py123d/conversion/utils/map_utils/opendrive/parser/opendrive.py index 586a72d8..719f7bf3 100644 --- a/d123/conversion/utils/map_utils/opendrive/parser/opendrive.py +++ b/py123d/conversion/utils/map_utils/opendrive/parser/opendrive.py @@ -6,7 +6,7 @@ from typing import List, Literal, Optional from xml.etree.ElementTree import Element, parse -from d123.conversion.utils.map_utils.opendrive.parser.road import Road +from py123d.conversion.utils.map_utils.opendrive.parser.road import Road @dataclass diff --git a/d123/conversion/utils/map_utils/opendrive/parser/polynomial.py b/py123d/conversion/utils/map_utils/opendrive/parser/polynomial.py similarity index 100% rename from d123/conversion/utils/map_utils/opendrive/parser/polynomial.py rename to py123d/conversion/utils/map_utils/opendrive/parser/polynomial.py diff --git a/d123/conversion/utils/map_utils/opendrive/parser/reference.py b/py123d/conversion/utils/map_utils/opendrive/parser/reference.py similarity index 93% rename from d123/conversion/utils/map_utils/opendrive/parser/reference.py rename to py123d/conversion/utils/map_utils/opendrive/parser/reference.py index b94ddc5a..9ea3f0f0 100644 --- a/d123/conversion/utils/map_utils/opendrive/parser/reference.py +++ b/py123d/conversion/utils/map_utils/opendrive/parser/reference.py @@ -9,11 +9,11 @@ import numpy as np import numpy.typing as npt -from d123.conversion.utils.map_utils.opendrive.parser.elevation import Elevation -from d123.conversion.utils.map_utils.opendrive.parser.geometry import Arc, Geometry, Line, Spiral -from d123.conversion.utils.map_utils.opendrive.parser.lane import LaneOffset, Width -from d123.conversion.utils.map_utils.opendrive.parser.polynomial import Polynomial -from d123.geometry import Point3DIndex, StateSE2Index +from py123d.conversion.utils.map_utils.opendrive.parser.elevation import Elevation +from py123d.conversion.utils.map_utils.opendrive.parser.geometry import Arc, Geometry, Line, Spiral +from py123d.conversion.utils.map_utils.opendrive.parser.lane import LaneOffset, Width +from py123d.conversion.utils.map_utils.opendrive.parser.polynomial import Polynomial +from py123d.geometry import Point3DIndex, StateSE2Index TOLERANCE: Final[float] = 1e-3 diff --git a/d123/conversion/utils/map_utils/opendrive/parser/road.py b/py123d/conversion/utils/map_utils/opendrive/parser/road.py similarity index 93% rename from d123/conversion/utils/map_utils/opendrive/parser/road.py rename to py123d/conversion/utils/map_utils/opendrive/parser/road.py index a763911b..7db82b69 100644 --- a/d123/conversion/utils/map_utils/opendrive/parser/road.py +++ b/py123d/conversion/utils/map_utils/opendrive/parser/road.py @@ -4,10 +4,10 @@ from typing import List, Optional from xml.etree.ElementTree import Element -from d123.conversion.utils.map_utils.opendrive.parser.elevation import ElevationProfile, LateralProfile -from d123.conversion.utils.map_utils.opendrive.parser.lane import Lanes -from d123.conversion.utils.map_utils.opendrive.parser.objects import Object -from d123.conversion.utils.map_utils.opendrive.parser.reference import PlanView +from py123d.conversion.utils.map_utils.opendrive.parser.elevation import ElevationProfile, LateralProfile +from py123d.conversion.utils.map_utils.opendrive.parser.lane import Lanes +from py123d.conversion.utils.map_utils.opendrive.parser.objects import Object +from py123d.conversion.utils.map_utils.opendrive.parser.reference import PlanView @dataclass diff --git a/d123/conversion/utils/map_utils/opendrive/utils/__init__.py b/py123d/conversion/utils/map_utils/opendrive/utils/__init__.py similarity index 100% rename from d123/conversion/utils/map_utils/opendrive/utils/__init__.py rename to py123d/conversion/utils/map_utils/opendrive/utils/__init__.py diff --git a/d123/conversion/utils/map_utils/opendrive/utils/collection.py b/py123d/conversion/utils/map_utils/opendrive/utils/collection.py similarity index 96% rename from d123/conversion/utils/map_utils/opendrive/utils/collection.py rename to py123d/conversion/utils/map_utils/opendrive/utils/collection.py index 6b78f3b1..fc6f96f3 100644 --- a/d123/conversion/utils/map_utils/opendrive/utils/collection.py +++ b/py123d/conversion/utils/map_utils/opendrive/utils/collection.py @@ -3,21 +3,21 @@ import numpy as np -from d123.conversion.utils.map_utils.opendrive.parser.opendrive import Junction, OpenDrive -from d123.conversion.utils.map_utils.opendrive.parser.reference import ReferenceLine -from d123.conversion.utils.map_utils.opendrive.parser.road import Road -from d123.conversion.utils.map_utils.opendrive.utils.id_system import ( +from py123d.conversion.utils.map_utils.opendrive.parser.opendrive import Junction, OpenDrive +from py123d.conversion.utils.map_utils.opendrive.parser.reference import ReferenceLine +from py123d.conversion.utils.map_utils.opendrive.parser.road import Road +from py123d.conversion.utils.map_utils.opendrive.utils.id_system import ( build_lane_id, derive_lane_section_id, lane_group_id_from_lane_id, road_id_from_lane_group_id, ) -from d123.conversion.utils.map_utils.opendrive.utils.lane_helper import ( +from py123d.conversion.utils.map_utils.opendrive.utils.lane_helper import ( OpenDriveLaneGroupHelper, OpenDriveLaneHelper, lane_section_to_lane_helpers, ) -from d123.conversion.utils.map_utils.opendrive.utils.objects_helper import OpenDriveObjectHelper, get_object_helper +from py123d.conversion.utils.map_utils.opendrive.utils.objects_helper import OpenDriveObjectHelper, get_object_helper logger = logging.getLogger(__name__) diff --git a/d123/conversion/utils/map_utils/opendrive/utils/id_mapping.py b/py123d/conversion/utils/map_utils/opendrive/utils/id_mapping.py similarity index 100% rename from d123/conversion/utils/map_utils/opendrive/utils/id_mapping.py rename to py123d/conversion/utils/map_utils/opendrive/utils/id_mapping.py diff --git a/d123/conversion/utils/map_utils/opendrive/utils/id_system.py b/py123d/conversion/utils/map_utils/opendrive/utils/id_system.py similarity index 100% rename from d123/conversion/utils/map_utils/opendrive/utils/id_system.py rename to py123d/conversion/utils/map_utils/opendrive/utils/id_system.py diff --git a/d123/conversion/utils/map_utils/opendrive/utils/lane_helper.py b/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py similarity index 95% rename from d123/conversion/utils/map_utils/opendrive/utils/lane_helper.py rename to py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py index 76ef62fa..5401859d 100644 --- a/d123/conversion/utils/map_utils/opendrive/utils/lane_helper.py +++ b/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py @@ -6,16 +6,16 @@ import numpy.typing as npt import shapely -from d123.conversion.utils.map_utils.opendrive.parser.lane import Lane, LaneSection -from d123.conversion.utils.map_utils.opendrive.parser.reference import ReferenceLine -from d123.conversion.utils.map_utils.opendrive.parser.road import RoadType -from d123.conversion.utils.map_utils.opendrive.utils.id_system import ( +from py123d.conversion.utils.map_utils.opendrive.parser.lane import Lane, LaneSection +from py123d.conversion.utils.map_utils.opendrive.parser.reference import ReferenceLine +from py123d.conversion.utils.map_utils.opendrive.parser.road import RoadType +from py123d.conversion.utils.map_utils.opendrive.utils.id_system import ( derive_lane_group_id, derive_lane_id, lane_group_id_from_lane_id, ) -from d123.geometry import StateSE2Index -from d123.geometry.utils.units import kmph_to_mps, mph_to_mps +from py123d.geometry import StateSE2Index +from py123d.geometry.utils.units import kmph_to_mps, mph_to_mps @dataclass diff --git a/d123/conversion/utils/map_utils/opendrive/utils/objects_helper.py b/py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py similarity index 88% rename from d123/conversion/utils/map_utils/opendrive/utils/objects_helper.py rename to py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py index 88a11bcf..72104899 100644 --- a/d123/conversion/utils/map_utils/opendrive/utils/objects_helper.py +++ b/py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py @@ -5,11 +5,11 @@ import numpy.typing as npt import shapely -from d123.conversion.utils.map_utils.opendrive.parser.objects import Object -from d123.conversion.utils.map_utils.opendrive.parser.reference import ReferenceLine -from d123.geometry import Point2D, Point3D, Point3DIndex, StateSE2 -from d123.geometry.transform.tranform_2d import translate_along_yaw -from d123.geometry.utils.rotation_utils import normalize_angle +from py123d.conversion.utils.map_utils.opendrive.parser.objects import Object +from py123d.conversion.utils.map_utils.opendrive.parser.reference import ReferenceLine +from py123d.geometry import Point2D, Point3D, Point3DIndex, StateSE2 +from py123d.geometry.transform.tranform_2d import translate_along_yaw +from py123d.geometry.utils.rotation_utils import normalize_angle # TODO: make naming consistent with group_collections.py diff --git a/d123/conversion/utils/map_utils/road_edge/__init__.py b/py123d/conversion/utils/map_utils/road_edge/__init__.py similarity index 100% rename from d123/conversion/utils/map_utils/road_edge/__init__.py rename to py123d/conversion/utils/map_utils/road_edge/__init__.py diff --git a/d123/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py b/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py similarity index 100% rename from d123/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py rename to py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py diff --git a/d123/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py b/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py similarity index 98% rename from d123/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py rename to py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py index 0d4aea2b..837abc80 100644 --- a/d123/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py +++ b/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py @@ -9,9 +9,9 @@ import shapely from shapely.geometry import LineString -from d123.conversion.utils.map_utils.road_edge.road_edge_2d_utils import get_road_edge_linear_rings -from d123.geometry import Point3DIndex -from d123.geometry.occupancy_map import OccupancyMap2D +from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import get_road_edge_linear_rings +from py123d.geometry import Point3DIndex +from py123d.geometry.occupancy_map import OccupancyMap2D logger = logging.getLogger(__name__) diff --git a/d123/conversion/utils/sensor_utils/camera_conventions.py b/py123d/conversion/utils/sensor_utils/camera_conventions.py similarity index 98% rename from d123/conversion/utils/sensor_utils/camera_conventions.py rename to py123d/conversion/utils/sensor_utils/camera_conventions.py index c734f00a..49e3a533 100644 --- a/d123/conversion/utils/sensor_utils/camera_conventions.py +++ b/py123d/conversion/utils/sensor_utils/camera_conventions.py @@ -28,7 +28,7 @@ import numpy as np -from d123.geometry import StateSE3 +from py123d.geometry import StateSE3 class CameraConvention(Enum): diff --git a/d123/conversion/utils/sensor_utils/lidar_index_registry.py b/py123d/conversion/utils/sensor_utils/lidar_index_registry.py similarity index 95% rename from d123/conversion/utils/sensor_utils/lidar_index_registry.py rename to py123d/conversion/utils/sensor_utils/lidar_index_registry.py index ad90b334..9242366b 100644 --- a/d123/conversion/utils/sensor_utils/lidar_index_registry.py +++ b/py123d/conversion/utils/sensor_utils/lidar_index_registry.py @@ -1,6 +1,6 @@ from enum import IntEnum -from d123.common.utils.enums import classproperty +from py123d.common.utils.enums import classproperty LIDAR_INDEX_REGISTRY = {} diff --git a/d123/datatypes/__init__.py b/py123d/datatypes/__init__.py similarity index 100% rename from d123/datatypes/__init__.py rename to py123d/datatypes/__init__.py diff --git a/d123/datatypes/detections/__init__.py b/py123d/datatypes/detections/__init__.py similarity index 100% rename from d123/datatypes/detections/__init__.py rename to py123d/datatypes/detections/__init__.py diff --git a/d123/datatypes/detections/detection.py b/py123d/datatypes/detections/detection.py similarity index 93% rename from d123/datatypes/detections/detection.py rename to py123d/datatypes/detections/detection.py index 916e9c4e..415510e9 100644 --- a/d123/datatypes/detections/detection.py +++ b/py123d/datatypes/detections/detection.py @@ -4,10 +4,10 @@ import shapely -from d123.common.utils.enums import SerialIntEnum -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.time.time_point import TimePoint -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, StateSE3, Vector2D, Vector3D +from py123d.common.utils.enums import SerialIntEnum +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.time.time_point import TimePoint +from py123d.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, StateSE3, Vector2D, Vector3D @dataclass diff --git a/d123/datatypes/detections/detection_types.py b/py123d/datatypes/detections/detection_types.py similarity index 93% rename from d123/datatypes/detections/detection_types.py rename to py123d/datatypes/detections/detection_types.py index a22a614c..69754140 100644 --- a/d123/datatypes/detections/detection_types.py +++ b/py123d/datatypes/detections/detection_types.py @@ -1,11 +1,11 @@ from __future__ import annotations -from d123.common.utils.enums import SerialIntEnum +from py123d.common.utils.enums import SerialIntEnum class DetectionType(SerialIntEnum): """ - Enum for agents in d123. + Enum for agents in py123d. """ # TODO: diff --git a/d123/datatypes/maps/abstract_map.py b/py123d/datatypes/maps/abstract_map.py similarity index 91% rename from d123/datatypes/maps/abstract_map.py rename to py123d/datatypes/maps/abstract_map.py index e42f3fa1..3aee0e99 100644 --- a/d123/datatypes/maps/abstract_map.py +++ b/py123d/datatypes/maps/abstract_map.py @@ -5,10 +5,10 @@ import shapely -from d123.datatypes.maps.abstract_map_objects import AbstractMapObject -from d123.datatypes.maps.map_datatypes import MapLayer -from d123.datatypes.maps.map_metadata import MapMetadata -from d123.geometry import Point2D +from py123d.datatypes.maps.abstract_map_objects import AbstractMapObject +from py123d.datatypes.maps.map_datatypes import MapLayer +from py123d.datatypes.maps.map_metadata import MapMetadata +from py123d.geometry import Point2D # TODO: # - add docstrings diff --git a/d123/datatypes/maps/abstract_map_objects.py b/py123d/datatypes/maps/abstract_map_objects.py similarity index 98% rename from d123/datatypes/maps/abstract_map_objects.py rename to py123d/datatypes/maps/abstract_map_objects.py index 4b08cb05..83004a94 100644 --- a/d123/datatypes/maps/abstract_map_objects.py +++ b/py123d/datatypes/maps/abstract_map_objects.py @@ -6,8 +6,8 @@ import shapely.geometry as geom import trimesh -from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType -from d123.geometry import Polyline2D, Polyline3D, PolylineSE2 +from py123d.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from py123d.geometry import Polyline2D, Polyline3D, PolylineSE2 # TODO: Refactor and just use int # type MapObjectIDType = Union[str, int] for Python >= 3.12 diff --git a/d123/datatypes/maps/cache/__init__.py b/py123d/datatypes/maps/cache/__init__.py similarity index 100% rename from d123/datatypes/maps/cache/__init__.py rename to py123d/datatypes/maps/cache/__init__.py diff --git a/d123/datatypes/maps/cache/cache_map_objects.py b/py123d/datatypes/maps/cache/cache_map_objects.py similarity index 97% rename from d123/datatypes/maps/cache/cache_map_objects.py rename to py123d/datatypes/maps/cache/cache_map_objects.py index 85a89673..498c01c1 100644 --- a/d123/datatypes/maps/cache/cache_map_objects.py +++ b/py123d/datatypes/maps/cache/cache_map_objects.py @@ -6,7 +6,7 @@ import shapely.geometry as geom import trimesh -from d123.datatypes.maps.abstract_map_objects import ( +from py123d.datatypes.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, AbstractGenericDrivable, @@ -20,9 +20,9 @@ AbstractWalkway, MapObjectIDType, ) -from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType -from d123.geometry import Polyline3D -from d123.geometry.polyline import Polyline2D +from py123d.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from py123d.geometry import Polyline3D +from py123d.geometry.polyline import Polyline2D class CacheSurfaceObject(AbstractSurfaceMapObject): diff --git a/d123/datatypes/maps/gpkg/__init__.py b/py123d/datatypes/maps/gpkg/__init__.py similarity index 100% rename from d123/datatypes/maps/gpkg/__init__.py rename to py123d/datatypes/maps/gpkg/__init__.py diff --git a/d123/datatypes/maps/gpkg/gpkg_map.py b/py123d/datatypes/maps/gpkg/gpkg_map.py similarity index 95% rename from d123/datatypes/maps/gpkg/gpkg_map.py rename to py123d/datatypes/maps/gpkg/gpkg_map.py index d77f90a9..9c9b0b74 100644 --- a/d123/datatypes/maps/gpkg/gpkg_map.py +++ b/py123d/datatypes/maps/gpkg/gpkg_map.py @@ -11,9 +11,9 @@ import shapely import shapely.geometry as geom -from d123.datatypes.maps.abstract_map import AbstractMap -from d123.datatypes.maps.abstract_map_objects import AbstractMapObject -from d123.datatypes.maps.gpkg.gpkg_map_objects import ( +from py123d.datatypes.maps.abstract_map import AbstractMap +from py123d.datatypes.maps.abstract_map_objects import AbstractMapObject +from py123d.datatypes.maps.gpkg.gpkg_map_objects import ( GPKGCarpark, GPKGCrosswalk, GPKGGenericDrivable, @@ -24,10 +24,10 @@ GPKGRoadLine, GPKGWalkway, ) -from d123.datatypes.maps.gpkg.gpkg_utils import load_gdf_with_geometry_columns -from d123.datatypes.maps.map_datatypes import MapLayer -from d123.datatypes.maps.map_metadata import MapMetadata -from d123.geometry import Point2D +from py123d.datatypes.maps.gpkg.gpkg_utils import load_gdf_with_geometry_columns +from py123d.datatypes.maps.map_datatypes import MapLayer +from py123d.datatypes.maps.map_metadata import MapMetadata +from py123d.geometry import Point2D USE_ARROW: bool = True MAX_LRU_CACHED_TABLES: Final[int] = 128 # TODO: add to some configs @@ -377,18 +377,18 @@ def _get_road_line(self, id: str) -> Optional[GPKGRoadLine]: @lru_cache(maxsize=MAX_LRU_CACHED_TABLES) def get_global_map_api(dataset: str, location: str) -> GPKGMap: - D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) - gpkg_path = D123_MAPS_ROOT / dataset / f"{dataset}_{location}.gpkg" - assert gpkg_path.is_file(), f"{dataset}_{location}.gpkg not found in {str(D123_MAPS_ROOT)}." + PY123D_MAPS_ROOT = Path(os.environ.get("PY123D_MAPS_ROOT")) # TODO: Remove env variable + gpkg_path = PY123D_MAPS_ROOT / dataset / f"{dataset}_{location}.gpkg" + assert gpkg_path.is_file(), f"{dataset}_{location}.gpkg not found in {str(PY123D_MAPS_ROOT)}." map_api = GPKGMap(gpkg_path) map_api.initialize() return map_api def get_local_map_api(split_name: str, log_name: str) -> GPKGMap: - D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT")) - gpkg_path = D123_MAPS_ROOT / split_name / f"{log_name}.gpkg" - assert gpkg_path.is_file(), f"{log_name}.gpkg not found in {str(D123_MAPS_ROOT)}." + PY123D_MAPS_ROOT = Path(os.environ.get("PY123D_MAPS_ROOT")) # TODO: Remove env variable + gpkg_path = PY123D_MAPS_ROOT / split_name / f"{log_name}.gpkg" + assert gpkg_path.is_file(), f"{log_name}.gpkg not found in {str(PY123D_MAPS_ROOT)}." map_api = GPKGMap(gpkg_path) map_api.initialize() return map_api diff --git a/d123/datatypes/maps/gpkg/gpkg_map_objects.py b/py123d/datatypes/maps/gpkg/gpkg_map_objects.py similarity index 97% rename from d123/datatypes/maps/gpkg/gpkg_map_objects.py rename to py123d/datatypes/maps/gpkg/gpkg_map_objects.py index 64e78838..642c97b7 100644 --- a/d123/datatypes/maps/gpkg/gpkg_map_objects.py +++ b/py123d/datatypes/maps/gpkg/gpkg_map_objects.py @@ -10,7 +10,7 @@ import shapely.geometry as geom import trimesh -from d123.datatypes.maps.abstract_map_objects import ( +from py123d.datatypes.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, AbstractGenericDrivable, @@ -24,9 +24,9 @@ AbstractWalkway, MapObjectIDType, ) -from d123.datatypes.maps.gpkg.gpkg_utils import get_row_with_value, get_trimesh_from_boundaries -from d123.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType -from d123.geometry import Point3DIndex, Polyline3D +from py123d.datatypes.maps.gpkg.gpkg_utils import get_row_with_value, get_trimesh_from_boundaries +from py123d.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType +from py123d.geometry import Point3DIndex, Polyline3D class GPKGSurfaceObject(AbstractSurfaceMapObject): diff --git a/d123/datatypes/maps/gpkg/gpkg_utils.py b/py123d/datatypes/maps/gpkg/gpkg_utils.py similarity index 98% rename from d123/datatypes/maps/gpkg/gpkg_utils.py rename to py123d/datatypes/maps/gpkg/gpkg_utils.py index a1d382f7..54dd93e6 100644 --- a/d123/datatypes/maps/gpkg/gpkg_utils.py +++ b/py123d/datatypes/maps/gpkg/gpkg_utils.py @@ -6,7 +6,7 @@ import trimesh from shapely import wkt -from d123.geometry.polyline import Polyline3D +from py123d.geometry.polyline import Polyline3D def load_gdf_with_geometry_columns(gdf: gpd.GeoDataFrame, geometry_column_names: List[str] = []): diff --git a/d123/datatypes/maps/map_datatypes.py b/py123d/datatypes/maps/map_datatypes.py similarity index 97% rename from d123/datatypes/maps/map_datatypes.py rename to py123d/datatypes/maps/map_datatypes.py index 8a96d1d0..dc9e4820 100644 --- a/d123/datatypes/maps/map_datatypes.py +++ b/py123d/datatypes/maps/map_datatypes.py @@ -1,6 +1,6 @@ from __future__ import annotations -from d123.common.utils.enums import SerialIntEnum +from py123d.common.utils.enums import SerialIntEnum # TODO: Add stop pads or stop lines. # - Add type for stop zones. diff --git a/d123/datatypes/maps/map_metadata.py b/py123d/datatypes/maps/map_metadata.py similarity index 91% rename from d123/datatypes/maps/map_metadata.py rename to py123d/datatypes/maps/map_metadata.py index c643f43b..14fd13c8 100644 --- a/d123/datatypes/maps/map_metadata.py +++ b/py123d/datatypes/maps/map_metadata.py @@ -3,7 +3,7 @@ from dataclasses import asdict, dataclass from typing import Any, Dict, Optional -import d123 +import py123d # TODO: Refactor the usage of the map map metadata in this repo. @@ -18,7 +18,7 @@ class MapMetadata: location: str map_has_z: bool map_is_local: bool # True, if map is per log - version: str = str(d123.__version__) + version: str = str(py123d.__version__) def to_dict(self) -> dict: return asdict(self) diff --git a/d123/datatypes/scene/__init__.py b/py123d/datatypes/scene/__init__.py similarity index 100% rename from d123/datatypes/scene/__init__.py rename to py123d/datatypes/scene/__init__.py diff --git a/d123/datatypes/scene/abstract_scene.py b/py123d/datatypes/scene/abstract_scene.py similarity index 84% rename from d123/datatypes/scene/abstract_scene.py rename to py123d/datatypes/scene/abstract_scene.py index 619ef273..ca302047 100644 --- a/d123/datatypes/scene/abstract_scene.py +++ b/py123d/datatypes/scene/abstract_scene.py @@ -3,14 +3,14 @@ import abc from typing import List, Optional -from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper -from d123.datatypes.maps.abstract_map import AbstractMap -from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType -from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters +from py123d.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper +from py123d.datatypes.maps.abstract_map import AbstractMap +from py123d.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType +from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.datatypes.vehicle_state.vehicle_parameters import VehicleParameters class AbstractScene(abc.ABC): diff --git a/d123/datatypes/scene/abstract_scene_builder.py b/py123d/datatypes/scene/abstract_scene_builder.py similarity index 77% rename from d123/datatypes/scene/abstract_scene_builder.py rename to py123d/datatypes/scene/abstract_scene_builder.py index 56eb9a6b..87327c05 100644 --- a/d123/datatypes/scene/abstract_scene_builder.py +++ b/py123d/datatypes/scene/abstract_scene_builder.py @@ -1,9 +1,9 @@ import abc from typing import Iterator -from d123.common.multithreading.worker_utils import WorkerPool -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.scene_filter import SceneFilter +from py123d.common.multithreading.worker_utils import WorkerPool +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.scene.scene_filter import SceneFilter # TODO: Expand lazy implementation for scene builder. diff --git a/d123/datatypes/scene/arrow/__init__.py b/py123d/datatypes/scene/arrow/__init__.py similarity index 100% rename from d123/datatypes/scene/arrow/__init__.py rename to py123d/datatypes/scene/arrow/__init__.py diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/py123d/datatypes/scene/arrow/arrow_scene.py similarity index 86% rename from d123/datatypes/scene/arrow/arrow_scene.py rename to py123d/datatypes/scene/arrow/arrow_scene.py index f0ca15aa..21d65d8f 100644 --- a/d123/datatypes/scene/arrow/arrow_scene.py +++ b/py123d/datatypes/scene/arrow/arrow_scene.py @@ -3,12 +3,12 @@ import pyarrow as pa -from d123.common.utils.arrow_helper import get_lru_cached_arrow_table -from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper -from d123.datatypes.maps.abstract_map import AbstractMap -from d123.datatypes.maps.gpkg.gpkg_map import get_global_map_api, get_local_map_api -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.arrow.utils.arrow_getters import ( +from py123d.common.utils.arrow_helper import get_lru_cached_arrow_table +from py123d.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper +from py123d.datatypes.maps.abstract_map import AbstractMap +from py123d.datatypes.maps.gpkg.gpkg_map import get_global_map_api, get_local_map_api +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.scene.arrow.utils.arrow_getters import ( get_box_detections_from_arrow_table, get_camera_from_arrow_table, get_ego_vehicle_state_from_arrow_table, @@ -16,12 +16,12 @@ get_timepoint_from_arrow_table, get_traffic_light_detections_from_arrow_table, ) -from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow -from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType -from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow +from py123d.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType +from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 class ArrowScene(AbstractScene): diff --git a/d123/datatypes/scene/arrow/arrow_scene_builder.py b/py123d/datatypes/scene/arrow/arrow_scene_builder.py similarity index 91% rename from d123/datatypes/scene/arrow/arrow_scene_builder.py rename to py123d/datatypes/scene/arrow/arrow_scene_builder.py index c2abd492..e381459b 100644 --- a/d123/datatypes/scene/arrow/arrow_scene_builder.py +++ b/py123d/datatypes/scene/arrow/arrow_scene_builder.py @@ -3,14 +3,14 @@ from pathlib import Path from typing import Iterator, List, Optional, Set, Union -from d123.common.multithreading.worker_utils import WorkerPool, worker_map -from d123.common.utils.arrow_helper import get_lru_cached_arrow_table -from d123.datatypes.scene.abstract_scene import AbstractScene -from d123.datatypes.scene.abstract_scene_builder import SceneBuilder -from d123.datatypes.scene.arrow.arrow_scene import ArrowScene -from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow -from d123.datatypes.scene.scene_filter import SceneFilter -from d123.datatypes.scene.scene_metadata import SceneExtractionMetadata +from py123d.common.multithreading.worker_utils import WorkerPool, worker_map +from py123d.common.utils.arrow_helper import get_lru_cached_arrow_table +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.scene.abstract_scene_builder import SceneBuilder +from py123d.datatypes.scene.arrow.arrow_scene import ArrowScene +from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow +from py123d.datatypes.scene.scene_filter import SceneFilter +from py123d.datatypes.scene.scene_metadata import SceneExtractionMetadata class ArrowSceneBuilder(SceneBuilder): diff --git a/d123/datatypes/scene/arrow/utils/__init__.py b/py123d/datatypes/scene/arrow/utils/__init__.py similarity index 100% rename from d123/datatypes/scene/arrow/utils/__init__.py rename to py123d/datatypes/scene/arrow/utils/__init__.py diff --git a/d123/datatypes/scene/arrow/utils/arrow_getters.py b/py123d/datatypes/scene/arrow/utils/arrow_getters.py similarity index 88% rename from d123/datatypes/scene/arrow/utils/arrow_getters.py rename to py123d/datatypes/scene/arrow/utils/arrow_getters.py index 7ba3e96d..6fdf842a 100644 --- a/d123/datatypes/scene/arrow/utils/arrow_getters.py +++ b/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -9,7 +9,7 @@ import numpy.typing as npt import pyarrow as pa -from d123.datatypes.detections.detection import ( +from py123d.datatypes.detections.detection import ( BoxDetection, BoxDetectionMetadata, BoxDetectionSE3, @@ -18,14 +18,14 @@ TrafficLightDetectionWrapper, TrafficLightStatus, ) -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType -from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters -from d123.geometry import BoundingBoxSE3, StateSE3, Vector3D +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType +from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.datatypes.vehicle_state.vehicle_parameters import VehicleParameters +from py123d.geometry import BoundingBoxSE3, StateSE3, Vector3D DATASET_SENSOR_ROOT: Dict[str, Path] = { "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", @@ -146,11 +146,11 @@ def get_lidar_from_arrow_table( # NOTE: We move data specific import into if-else block, to avoid data specific import errors if log_metadata.dataset == "nuplan": - from d123.conversion.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path + from py123d.conversion.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "carla": - from d123.conversion.datasets.carla.carla_load_sensor import load_carla_lidar_from_path + from py123d.conversion.datasets.carla.carla_load_sensor import load_carla_lidar_from_path lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "wopd": diff --git a/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py b/py123d/datatypes/scene/arrow/utils/arrow_metadata_utils.py similarity index 82% rename from d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py rename to py123d/datatypes/scene/arrow/utils/arrow_metadata_utils.py index bcca116e..3f264b62 100644 --- a/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py +++ b/py123d/datatypes/scene/arrow/utils/arrow_metadata_utils.py @@ -5,8 +5,8 @@ import pyarrow as pa -from d123.common.utils.arrow_helper import get_lru_cached_arrow_table -from d123.datatypes.scene.scene_metadata import LogMetadata +from py123d.common.utils.arrow_helper import get_lru_cached_arrow_table +from py123d.datatypes.scene.scene_metadata import LogMetadata @lru_cache(maxsize=10000) diff --git a/d123/datatypes/scene/scene_filter.py b/py123d/datatypes/scene/scene_filter.py similarity index 95% rename from d123/datatypes/scene/scene_filter.py rename to py123d/datatypes/scene/scene_filter.py index 083d7ab1..8d6cf102 100644 --- a/d123/datatypes/scene/scene_filter.py +++ b/py123d/datatypes/scene/scene_filter.py @@ -1,7 +1,7 @@ from dataclasses import dataclass from typing import List, Optional -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType # TODO: Add more filter options (e.g. scene tags, ego movement, or whatever appropriate) diff --git a/d123/datatypes/scene/scene_metadata.py b/py123d/datatypes/scene/scene_metadata.py similarity index 86% rename from d123/datatypes/scene/scene_metadata.py rename to py123d/datatypes/scene/scene_metadata.py index 27b56e85..eb42019c 100644 --- a/d123/datatypes/scene/scene_metadata.py +++ b/py123d/datatypes/scene/scene_metadata.py @@ -3,11 +3,11 @@ from dataclasses import asdict, dataclass, field from typing import Dict, Optional -import d123 -from d123.datatypes.maps.map_metadata import MapMetadata -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraMetadata, PinholeCameraType -from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters +import py123d +from py123d.datatypes.maps.map_metadata import MapMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraMetadata, PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.vehicle_state.vehicle_parameters import VehicleParameters @dataclass @@ -24,7 +24,7 @@ class LogMetadata: lidar_metadata: Dict[LiDARType, LiDARMetadata] = field(default_factory=dict) map_metadata: Optional[MapMetadata] = None - version: str = str(d123.__version__) + version: str = str(py123d.__version__) @classmethod def from_dict(cls, data_dict: Dict) -> LogMetadata: diff --git a/d123/datatypes/sensors/__init__.py b/py123d/datatypes/sensors/__init__.py similarity index 100% rename from d123/datatypes/sensors/__init__.py rename to py123d/datatypes/sensors/__init__.py diff --git a/d123/datatypes/sensors/camera/__init__.py b/py123d/datatypes/sensors/camera/__init__.py similarity index 100% rename from d123/datatypes/sensors/camera/__init__.py rename to py123d/datatypes/sensors/camera/__init__.py diff --git a/d123/datatypes/sensors/camera/pinhole_camera.py b/py123d/datatypes/sensors/camera/pinhole_camera.py similarity index 97% rename from d123/datatypes/sensors/camera/pinhole_camera.py rename to py123d/datatypes/sensors/camera/pinhole_camera.py index 33249d17..0bb99be6 100644 --- a/d123/datatypes/sensors/camera/pinhole_camera.py +++ b/py123d/datatypes/sensors/camera/pinhole_camera.py @@ -7,14 +7,14 @@ import numpy.typing as npt from zmq import IntEnum -from d123.common.utils.enums import SerialIntEnum -from d123.common.utils.mixin import ArrayMixin -from d123.geometry.se import StateSE3 +from py123d.common.utils.enums import SerialIntEnum +from py123d.common.utils.mixin import ArrayMixin +from py123d.geometry.se import StateSE3 class PinholeCameraType(SerialIntEnum): """ - Enum for cameras in d123. + Enum for cameras in py123d. """ CAM_F0 = 0 diff --git a/d123/datatypes/sensors/lidar/__init__.py b/py123d/datatypes/sensors/lidar/__init__.py similarity index 100% rename from d123/datatypes/sensors/lidar/__init__.py rename to py123d/datatypes/sensors/lidar/__init__.py diff --git a/d123/datatypes/sensors/lidar/lidar.py b/py123d/datatypes/sensors/lidar/lidar.py similarity index 94% rename from d123/datatypes/sensors/lidar/lidar.py rename to py123d/datatypes/sensors/lidar/lidar.py index e057c46c..5f8385a1 100644 --- a/d123/datatypes/sensors/lidar/lidar.py +++ b/py123d/datatypes/sensors/lidar/lidar.py @@ -6,9 +6,9 @@ import numpy as np import numpy.typing as npt -from d123.common.utils.enums import SerialIntEnum -from d123.conversion.utils.sensor_utils.lidar_index_registry import LIDAR_INDEX_REGISTRY, LiDARIndex -from d123.geometry import StateSE3 +from py123d.common.utils.enums import SerialIntEnum +from py123d.conversion.utils.sensor_utils.lidar_index_registry import LIDAR_INDEX_REGISTRY, LiDARIndex +from py123d.geometry import StateSE3 class LiDARType(SerialIntEnum): diff --git a/d123/datatypes/time/__init__.py b/py123d/datatypes/time/__init__.py similarity index 100% rename from d123/datatypes/time/__init__.py rename to py123d/datatypes/time/__init__.py diff --git a/d123/datatypes/time/time_point.py b/py123d/datatypes/time/time_point.py similarity index 100% rename from d123/datatypes/time/time_point.py rename to py123d/datatypes/time/time_point.py diff --git a/d123/datatypes/vehicle_state/__init__.py b/py123d/datatypes/vehicle_state/__init__.py similarity index 100% rename from d123/datatypes/vehicle_state/__init__.py rename to py123d/datatypes/vehicle_state/__init__.py diff --git a/d123/datatypes/vehicle_state/ego_state.py b/py123d/datatypes/vehicle_state/ego_state.py similarity index 95% rename from d123/datatypes/vehicle_state/ego_state.py rename to py123d/datatypes/vehicle_state/ego_state.py index cc1aab7d..d8748658 100644 --- a/d123/datatypes/vehicle_state/ego_state.py +++ b/py123d/datatypes/vehicle_state/ego_state.py @@ -8,18 +8,18 @@ import numpy as np import numpy.typing as npt -from d123.common.utils.enums import classproperty -from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE2, BoxDetectionSE3 -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.vehicle_parameters import ( +from py123d.common.utils.enums import classproperty +from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE2, BoxDetectionSE3 +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.vehicle_parameters import ( VehicleParameters, center_se2_to_rear_axle_se2, center_se3_to_rear_axle_se3, rear_axle_se2_to_center_se2, rear_axle_se3_to_center_se3, ) -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, StateSE3, Vector2D, Vector3D +from py123d.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, StateSE3, Vector2D, Vector3D EGO_TRACK_TOKEN: Final[str] = "ego_vehicle" diff --git a/d123/datatypes/vehicle_state/vehicle_parameters.py b/py123d/datatypes/vehicle_state/vehicle_parameters.py similarity index 95% rename from d123/datatypes/vehicle_state/vehicle_parameters.py rename to py123d/datatypes/vehicle_state/vehicle_parameters.py index 6955616b..2e177f74 100644 --- a/d123/datatypes/vehicle_state/vehicle_parameters.py +++ b/py123d/datatypes/vehicle_state/vehicle_parameters.py @@ -2,9 +2,9 @@ from dataclasses import asdict, dataclass -from d123.geometry import StateSE2, StateSE3, Vector2D, Vector3D -from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame -from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame +from py123d.geometry import StateSE2, StateSE3, Vector2D, Vector3D +from py123d.geometry.transform.transform_se2 import translate_se2_along_body_frame +from py123d.geometry.transform.transform_se3 import translate_se3_along_body_frame @dataclass diff --git a/py123d/geometry/__init__.py b/py123d/geometry/__init__.py new file mode 100644 index 00000000..c391b29a --- /dev/null +++ b/py123d/geometry/__init__.py @@ -0,0 +1,22 @@ +from py123d.geometry.geometry_index import ( + Point2DIndex, + Point3DIndex, + BoundingBoxSE2Index, + BoundingBoxSE3Index, + Corners2DIndex, + Corners3DIndex, + EulerAnglesIndex, + EulerStateSE3Index, + QuaternionIndex, + StateSE2Index, + StateSE3Index, + Vector2DIndex, + Vector3DIndex, +) +from py123d.geometry.point import Point2D, Point3D +from py123d.geometry.vector import Vector2D, Vector3D +from py123d.geometry.rotation import EulerAngles, Quaternion +from py123d.geometry.se import EulerStateSE3, StateSE2, StateSE3 +from py123d.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3 +from py123d.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2 +from py123d.geometry.occupancy_map import OccupancyMap2D diff --git a/d123/geometry/bounding_box.py b/py123d/geometry/bounding_box.py similarity index 88% rename from d123/geometry/bounding_box.py rename to py123d/geometry/bounding_box.py index b164fc40..2e3b3a77 100644 --- a/d123/geometry/bounding_box.py +++ b/py123d/geometry/bounding_box.py @@ -8,11 +8,11 @@ import numpy.typing as npt import shapely.geometry as geom -from d123.common.utils.mixin import ArrayMixin -from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex -from d123.geometry.point import Point2D, Point3D -from d123.geometry.se import StateSE2, StateSE3 -from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array, bbse3_array_to_corners_array +from py123d.common.utils.mixin import ArrayMixin +from py123d.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex +from py123d.geometry.point import Point2D, Point3D +from py123d.geometry.se import StateSE2, StateSE3 +from py123d.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array, bbse3_array_to_corners_array class BoundingBoxSE2(ArrayMixin): @@ -20,7 +20,7 @@ class BoundingBoxSE2(ArrayMixin): Rotated bounding box in 2D defined by center (StateSE2), length and width. Example: - >>> from d123.geometry import StateSE2 + >>> from py123d.geometry import StateSE2 >>> bbox = BoundingBoxSE2(center=StateSE2(1.0, 2.0, 0.5), length=4.0, width=2.0) >>> bbox.array array([1. , 2. , 0.5, 4. , 2. ]) @@ -50,7 +50,7 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Boundi """Create a BoundingBoxSE2 from a numpy array. :param array: A 1D numpy array containing the bounding box parameters, indexed by \ - :class:`~d123.geometry.BoundingBoxSE2Index`. + :class:`~py123d.geometry.BoundingBoxSE2Index`. :param copy: Whether to copy the input array. Defaults to True. :return: A BoundingBoxSE2 instance. """ @@ -94,7 +94,7 @@ def width(self) -> float: @cached_property def array(self) -> npt.NDArray[np.float64]: - """Converts the BoundingBoxSE2 instance to a numpy array, indexed by :class:`~d123.geometry.BoundingBoxSE2Index`. + """Converts the BoundingBoxSE2 instance to a numpy array, indexed by :class:`~py123d.geometry.BoundingBoxSE2Index`. :return: A numpy array of shape (5,) containing the bounding box parameters [x, y, yaw, length, width]. """ @@ -121,7 +121,7 @@ def corners_array(self) -> npt.NDArray[np.float64]: """Returns the corner points of the bounding box as a numpy array. :return: A numpy array of shape (4, 2) containing the corner points of the bounding box, \ - indexed by :class:`~d123.geometry.Corners2DIndex` and :class:`~d123.geometry.Point2DIndex`. + indexed by :class:`~py123d.geometry.Corners2DIndex` and :class:`~py123d.geometry.Point2DIndex`. """ return bbse2_array_to_corners_array(self.array) @@ -129,7 +129,7 @@ def corners_array(self) -> npt.NDArray[np.float64]: def corners_dict(self) -> Dict[Corners2DIndex, Point2D]: """Returns the corner points of the bounding box as a dictionary. - :return: A dictionary mapping :class:`~d123.geometry.Corners2DIndex` to :class:`~d123.geometry.Point2D` instances. + :return: A dictionary mapping :class:`~py123d.geometry.Corners2DIndex` to :class:`~py123d.geometry.Point2D` instances. """ corners_array = self.corners_array return {index: Point2D.from_array(corners_array[index]) for index in Corners2DIndex} @@ -140,7 +140,7 @@ class BoundingBoxSE3(ArrayMixin): Rotated bounding box in 3D defined by center with quaternion rotation (StateSE3), length, width and height. Example: - >>> from d123.geometry import StateSE3 + >>> from py123d.geometry import StateSE3 >>> bbox = BoundingBoxSE3(center=StateSE3(1.0, 2.0, 3.0, 1.0, 0.0, 0.0, 0.0), length=4.0, width=2.0, height=1.5) >>> bbox.array array([1. , 2. , 3. , 1. , 0. , 0. , 0. , 4. , 2. , 1.5]) @@ -172,7 +172,7 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Boundi """Create a BoundingBoxSE3 from a numpy array. :param array: A 1D numpy array containing the bounding box parameters, indexed by \ - :class:`~d123.geometry.BoundingBoxSE3Index`. + :class:`~py123d.geometry.BoundingBoxSE3Index`. :param copy: Whether to copy the input array. Defaults to True. :return: A BoundingBoxSE3 instance. """ @@ -235,7 +235,7 @@ def array(self) -> npt.NDArray[np.float64]: """Convert the BoundingBoxSE3 instance to a numpy array. :return: A 1D numpy array containing the bounding box parameters, indexed by \ - :class:`~d123.geometry.BoundingBoxSE3Index`. + :class:`~py123d.geometry.BoundingBoxSE3Index`. """ return self._array @@ -264,7 +264,7 @@ def corners_array(self) -> npt.NDArray[np.float64]: """Returns the corner points of the bounding box as a numpy array, shape (8, 3). :return: A numpy array of shape (8, 3) containing the corner points of the bounding box, \ - indexed by :class:`~d123.geometry.Corners3DIndex` and :class:`~d123.geometry.Point3DIndex`. + indexed by :class:`~py123d.geometry.Corners3DIndex` and :class:`~py123d.geometry.Point3DIndex`. """ return bbse3_array_to_corners_array(self.array) @@ -272,8 +272,8 @@ def corners_array(self) -> npt.NDArray[np.float64]: def corners_dict(self) -> Dict[Corners3DIndex, Point3D]: """Returns the corner points of the bounding box as a dictionary. - :return: A dictionary mapping :class:`~d123.geometry.Corners3DIndex` to \ - :class:`~d123.geometry.Point3D` instances. + :return: A dictionary mapping :class:`~py123d.geometry.Corners3DIndex` to \ + :class:`~py123d.geometry.Point3D` instances. """ corners_array = self.corners_array return {index: Point3D.from_array(corners_array[index]) for index in Corners3DIndex} diff --git a/d123/geometry/geometry_index.py b/py123d/geometry/geometry_index.py similarity index 99% rename from d123/geometry/geometry_index.py rename to py123d/geometry/geometry_index.py index aa42cc4b..5d596f77 100644 --- a/d123/geometry/geometry_index.py +++ b/py123d/geometry/geometry_index.py @@ -1,6 +1,6 @@ from enum import IntEnum -from d123.common.utils.enums import classproperty +from py123d.common.utils.enums import classproperty class Point2DIndex(IntEnum): diff --git a/d123/geometry/occupancy_map.py b/py123d/geometry/occupancy_map.py similarity index 98% rename from d123/geometry/occupancy_map.py rename to py123d/geometry/occupancy_map.py index 648ea91a..a0e4021a 100644 --- a/d123/geometry/occupancy_map.py +++ b/py123d/geometry/occupancy_map.py @@ -8,7 +8,7 @@ from shapely.geometry.base import BaseGeometry from shapely.strtree import STRtree -from d123.geometry.geometry_index import Point2DIndex +from py123d.geometry.geometry_index import Point2DIndex class OccupancyMap2D: @@ -156,7 +156,7 @@ def contains_vectorized(self, points: npt.NDArray[np.float64]) -> npt.NDArray[np NOTE: This function can be significantly faster than using the str-tree, if the number of geometries is relatively small compared to the number of input-points. - :param points: array of shape (num_points, 2), indexed by :class:`~d123.geometry.Point2DIndex`. + :param points: array of shape (num_points, 2), indexed by :class:`~py123d.geometry.Point2DIndex`. :return: boolean array of shape (polygons, input-points) """ output = np.zeros((len(self._geometries), len(points)), dtype=bool) diff --git a/d123/geometry/point.py b/py123d/geometry/point.py similarity index 93% rename from d123/geometry/point.py rename to py123d/geometry/point.py index b3daa3c8..29d7b00a 100644 --- a/d123/geometry/point.py +++ b/py123d/geometry/point.py @@ -7,8 +7,8 @@ import numpy.typing as npt import shapely.geometry as geom -from d123.common.utils.mixin import ArrayMixin -from d123.geometry.geometry_index import Point2DIndex, Point3DIndex +from py123d.common.utils.mixin import ArrayMixin +from py123d.geometry.geometry_index import Point2DIndex, Point3DIndex class Point2D(ArrayMixin): @@ -28,7 +28,7 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Point2 """Constructs a Point2D from a numpy array. :param array: Array of shape (2,) representing the point coordinates [x, y], indexed by \ - :class:`~d123.geometry.Point2DIndex`. + :class:`~py123d.geometry.Point2DIndex`. :param copy: Whether to copy the input array. Defaults to True. :return: A Point2D instance. """ @@ -59,7 +59,7 @@ def array(self) -> npt.NDArray[np.float64]: """The array representation of the point. :return: A numpy array of shape (2,) containing the point coordinates [x, y], indexed by \ - :class:`~d123.geometry.Point2DIndex`. + :class:`~py123d.geometry.Point2DIndex`. """ return self._array @@ -98,7 +98,7 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Point3 """Constructs a Point3D from a numpy array. :param array: Array of shape (3,) representing the point coordinates [x, y, z], indexed by \ - :class:`~d123.geometry.Point3DIndex`. + :class:`~py123d.geometry.Point3DIndex`. :param copy: Whether to copy the input array. Defaults to True. :return: A Point3D instance. """ @@ -113,7 +113,7 @@ def array(self) -> npt.NDArray[np.float64]: """The array representation of the point. :return: A numpy array of shape (3,) containing the point coordinates [x, y, z], indexed by \ - :class:`~d123.geometry.Point3DIndex`. + :class:`~py123d.geometry.Point3DIndex`. """ return self._array diff --git a/d123/geometry/polyline.py b/py123d/geometry/polyline.py similarity index 95% rename from d123/geometry/polyline.py rename to py123d/geometry/polyline.py index 19004273..6d971682 100644 --- a/d123/geometry/polyline.py +++ b/py123d/geometry/polyline.py @@ -9,13 +9,13 @@ import shapely.geometry as geom from scipy.interpolate import interp1d -from d123.common.utils.mixin import ArrayMixin -from d123.geometry.geometry_index import Point2DIndex, Point3DIndex, StateSE2Index -from d123.geometry.point import Point2D, Point3D -from d123.geometry.se import StateSE2 -from d123.geometry.utils.constants import DEFAULT_Z -from d123.geometry.utils.polyline_utils import get_linestring_yaws, get_path_progress -from d123.geometry.utils.rotation_utils import normalize_angle +from py123d.common.utils.mixin import ArrayMixin +from py123d.geometry.geometry_index import Point2DIndex, Point3DIndex, StateSE2Index +from py123d.geometry.point import Point2D, Point3D +from py123d.geometry.se import StateSE2 +from py123d.geometry.utils.constants import DEFAULT_Z +from py123d.geometry.utils.polyline_utils import get_linestring_yaws, get_path_progress +from py123d.geometry.utils.rotation_utils import normalize_angle # TODO: Implement PolylineSE3 # TODO: Benchmark interpolation performance and reconsider reliance on LineString @@ -45,7 +45,7 @@ def from_array(cls, polyline_array: npt.NDArray[np.float32]) -> Polyline2D: """Creates a Polyline2D from a numpy array. :param polyline_array: A numpy array of shape (N, 2) or (N, 3), e.g. indexed by \ - :class:`~d123.geometry.Point2DIndex` or :class:`~d123.geometry.Point3DIndex`. + :class:`~py123d.geometry.Point2DIndex` or :class:`~py123d.geometry.Point3DIndex`. :raises ValueError: If the input array is not of the expected shape. :return: A Polyline2D instance. """ @@ -69,7 +69,7 @@ def from_discrete_points(cls, discrete_points: List[Point2D]) -> Polyline2D: @property def array(self) -> npt.NDArray[np.float64]: - """Converts the polyline to a numpy array, indexed by :class:`~d123.geometry.Point2DIndex`. + """Converts the polyline to a numpy array, indexed by :class:`~py123d.geometry.Point2DIndex`. :return: A numpy array of shape (N, 2) representing the polyline. """ @@ -172,7 +172,7 @@ def from_array(cls, polyline_array: npt.NDArray[np.float32]) -> PolylineSE2: """Creates a PolylineSE2 from a numpy array. :param polyline_array: The input numpy array representing, either indexed by \ - :class:`~d123.geometry.Point2DIndex` or :class:`~d123.geometry.StateSE2Index`. + :class:`~py123d.geometry.Point2DIndex` or :class:`~py123d.geometry.StateSE2Index`. :raises ValueError: If the input array is not of the expected shape. :return: A PolylineSE2 representing the same path as the input array. """ @@ -272,7 +272,7 @@ def from_array(cls, array: npt.NDArray[np.float64]) -> Polyline3D: """Creates a Polyline3D from a numpy array. :param array: A numpy array of shape (N, 3) representing 3D points, e.g. indexed by \ - :class:`~d123.geometry.Point3DIndex`. + :class:`~py123d.geometry.Point3DIndex`. :return: A Polyline3D instance. """ assert array.ndim == 2 and array.shape[1] == 3, "Array must be 3D with shape (N, 3)" @@ -299,7 +299,7 @@ def polyline_se2(self) -> PolylineSE2: def array(self) -> npt.NDArray[np.float64]: """Converts the 3D polyline to the discrete 3D points. - :return: A numpy array of shape (N, 3), indexed by :class:`~d123.geometry.Point3DIndex`. + :return: A numpy array of shape (N, 3), indexed by :class:`~py123d.geometry.Point3DIndex`. """ return np.array(self.linestring.coords, dtype=np.float64) diff --git a/d123/geometry/rotation.py b/py123d/geometry/rotation.py similarity index 96% rename from d123/geometry/rotation.py rename to py123d/geometry/rotation.py index 1ee21e25..fb537717 100644 --- a/d123/geometry/rotation.py +++ b/py123d/geometry/rotation.py @@ -6,9 +6,9 @@ import numpy.typing as npt import pyquaternion -from d123.common.utils.mixin import ArrayMixin -from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex -from d123.geometry.utils.rotation_utils import ( +from py123d.common.utils.mixin import ArrayMixin +from py123d.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex +from py123d.geometry.utils.rotation_utils import ( get_euler_array_from_quaternion_array, get_quaternion_array_from_rotation_matrix, get_rotation_matrix_from_euler_array, @@ -37,7 +37,7 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> EulerA """Constructs a EulerAngles from a numpy array. :param array: Array of shape (3,) representing the euler angles [roll, pitch, yaw], indexed by \ - :class:`~d123.geometry.EulerAnglesIndex`. + :class:`~py123d.geometry.EulerAnglesIndex`. :param copy: Whether to copy the input array. Defaults to True. :return: A EulerAngles instance. """ @@ -90,7 +90,7 @@ def array(self) -> npt.NDArray[np.float64]: """Converts the EulerAngles instance to a numpy array. :return: A numpy array of shape (3,) containing the Euler angles [roll, pitch, yaw], indexed by \ - :class:`~d123.geometry.EulerAnglesIndex`. + :class:`~py123d.geometry.EulerAnglesIndex`. """ return self._array @@ -205,7 +205,7 @@ def array(self) -> npt.NDArray[np.float64]: """Converts the Quaternion instance to a numpy array. :return: A numpy array of shape (4,) containing the quaternion [qw, qx, qy, qz], indexed by \ - :class:`~d123.geometry.QuaternionIndex`. + :class:`~py123d.geometry.QuaternionIndex`. """ return self._array diff --git a/d123/geometry/se.py b/py123d/geometry/se.py similarity index 95% rename from d123/geometry/se.py rename to py123d/geometry/se.py index a67d973d..357a062d 100644 --- a/d123/geometry/se.py +++ b/py123d/geometry/se.py @@ -7,10 +7,10 @@ import shapely.geometry as geom from pyparsing import cached_property -from d123.common.utils.mixin import ArrayMixin -from d123.geometry.geometry_index import EulerStateSE3Index, Point3DIndex, StateSE2Index, StateSE3Index -from d123.geometry.point import Point2D, Point3D -from d123.geometry.rotation import EulerAngles, Quaternion +from py123d.common.utils.mixin import ArrayMixin +from py123d.geometry.geometry_index import EulerStateSE3Index, Point3DIndex, StateSE2Index, StateSE3Index +from py123d.geometry.point import Point2D, Point3D +from py123d.geometry.rotation import EulerAngles, Quaternion class StateSE2(ArrayMixin): @@ -31,7 +31,7 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateS """Constructs a StateSE2 from a numpy array. :param array: Array of shape (3,) representing the state [x, y, yaw], indexed by \ - :class:`~d123.geometry.geometry_index.StateSE2Index`. + :class:`~py123d.geometry.geometry_index.StateSE2Index`. :param copy: Whether to copy the input array. Defaults to True. :return: A StateSE2 instance. """ @@ -58,7 +58,7 @@ def array(self) -> npt.NDArray[np.float64]: """Converts the StateSE2 instance to a numpy array :return: A numpy array of shape (3,) containing the state, indexed by \ - :class:`~d123.geometry.geometry_index.StateSE2Index`. + :class:`~py123d.geometry.geometry_index.StateSE2Index`. """ return self._array @@ -126,7 +126,7 @@ def __init__(self, x: float, y: float, z: float, qw: float, qx: float, qy: float def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateSE3: """Constructs a QuaternionSE3 from a numpy array. - :param array: Array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`. + :param array: Array of shape (7,), indexed by :class:`~py123d.geometry.geometry_index.QuaternionSE3Index`. :param copy: Whether to copy the input array. Defaults to True. :return: A QuaternionSE3 instance. """ @@ -210,7 +210,7 @@ def qz(self) -> float: def array(self) -> npt.NDArray[np.float64]: """Converts the QuaternionSE3 instance to a numpy array. - :return: A numpy array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`. + :return: A numpy array of shape (7,), indexed by :class:`~py123d.geometry.geometry_index.QuaternionSE3Index`. """ return self._array @@ -311,7 +311,7 @@ def transformation_matrix(self) -> npt.NDArray[np.float64]: class EulerStateSE3(ArrayMixin): """ Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw). - NOTE: This class is deprecated, use :class:`~d123.geometry.StateSE3` instead (quaternion based). + NOTE: This class is deprecated, use :class:`~py123d.geometry.StateSE3` instead (quaternion based). """ _array: npt.NDArray[np.float64] @@ -332,7 +332,7 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> EulerS """Constructs a StateSE3 from a numpy array. :param array: Array of shape (6,) representing the state [x, y, z, roll, pitch, yaw], indexed by \ - :class:`~d123.geometry.geometry_index.StateSE3Index`. + :class:`~py123d.geometry.geometry_index.StateSE3Index`. :param copy: Whether to copy the input array. Defaults to True. :return: A StateSE3 instance. """ @@ -416,7 +416,7 @@ def array(self) -> npt.NDArray[np.float64]: """Returns the StateSE3 instance as a numpy array. :return: A numpy array of shape (6,), indexed by \ - :class:`~d123.geometry.geometry_index.StateSE3Index`. + :class:`~py123d.geometry.geometry_index.StateSE3Index`. """ return self._array diff --git a/d123/geometry/test/__init__.py b/py123d/geometry/test/__init__.py similarity index 100% rename from d123/geometry/test/__init__.py rename to py123d/geometry/test/__init__.py diff --git a/d123/geometry/test/test_bounding_box.py b/py123d/geometry/test/test_bounding_box.py similarity index 97% rename from d123/geometry/test/test_bounding_box.py rename to py123d/geometry/test/test_bounding_box.py index a102b4da..34c24fc6 100644 --- a/d123/geometry/test/test_bounding_box.py +++ b/py123d/geometry/test/test_bounding_box.py @@ -3,9 +3,9 @@ import numpy as np import shapely.geometry as geom -from d123.common.utils.mixin import ArrayMixin -from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, StateSE3 -from d123.geometry.geometry_index import ( +from py123d.common.utils.mixin import ArrayMixin +from py123d.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, StateSE3 +from py123d.geometry.geometry_index import ( BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, diff --git a/d123/geometry/test/test_occupancy_map.py b/py123d/geometry/test/test_occupancy_map.py similarity index 99% rename from d123/geometry/test/test_occupancy_map.py rename to py123d/geometry/test/test_occupancy_map.py index 7344f4de..2390300d 100644 --- a/d123/geometry/test/test_occupancy_map.py +++ b/py123d/geometry/test/test_occupancy_map.py @@ -3,7 +3,7 @@ import numpy as np import shapely.geometry as geom -from d123.geometry import OccupancyMap2D +from py123d.geometry import OccupancyMap2D class TestOccupancyMap2D(unittest.TestCase): diff --git a/d123/geometry/test/test_point.py b/py123d/geometry/test/test_point.py similarity index 98% rename from d123/geometry/test/test_point.py rename to py123d/geometry/test/test_point.py index 5c1b30d7..15932540 100644 --- a/d123/geometry/test/test_point.py +++ b/py123d/geometry/test/test_point.py @@ -3,8 +3,8 @@ import numpy as np -from d123.geometry import Point2D, Point3D -from d123.geometry.geometry_index import Point2DIndex, Point3DIndex +from py123d.geometry import Point2D, Point3D +from py123d.geometry.geometry_index import Point2DIndex, Point3DIndex class TestPoint2D(unittest.TestCase): diff --git a/d123/geometry/test/test_polyline.py b/py123d/geometry/test/test_polyline.py similarity index 99% rename from d123/geometry/test/test_polyline.py rename to py123d/geometry/test/test_polyline.py index d1c1d652..32e0acc7 100644 --- a/d123/geometry/test/test_polyline.py +++ b/py123d/geometry/test/test_polyline.py @@ -3,7 +3,7 @@ import numpy as np import shapely.geometry as geom -from d123.geometry import Point2D, Point3D, Polyline2D, Polyline3D, PolylineSE2, StateSE2 +from py123d.geometry import Point2D, Point3D, Polyline2D, Polyline3D, PolylineSE2, StateSE2 class TestPolyline2D(unittest.TestCase): diff --git a/d123/geometry/test/test_rotation.py b/py123d/geometry/test/test_rotation.py similarity index 98% rename from d123/geometry/test/test_rotation.py rename to py123d/geometry/test/test_rotation.py index 65f902d9..133d056b 100644 --- a/d123/geometry/test/test_rotation.py +++ b/py123d/geometry/test/test_rotation.py @@ -2,8 +2,8 @@ import numpy as np -from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex -from d123.geometry.rotation import EulerAngles, Quaternion +from py123d.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex +from py123d.geometry.rotation import EulerAngles, Quaternion class TestEulerAngles(unittest.TestCase): diff --git a/d123/geometry/test/test_vector.py b/py123d/geometry/test/test_vector.py similarity index 98% rename from d123/geometry/test/test_vector.py rename to py123d/geometry/test/test_vector.py index 526a2104..4f3b159e 100644 --- a/d123/geometry/test/test_vector.py +++ b/py123d/geometry/test/test_vector.py @@ -2,7 +2,7 @@ import numpy as np -from d123.geometry import Vector2D, Vector2DIndex, Vector3D, Vector3DIndex +from py123d.geometry import Vector2D, Vector2DIndex, Vector3D, Vector3DIndex class TestVector2D(unittest.TestCase): diff --git a/d123/geometry/torch/.gitkeep b/py123d/geometry/torch/.gitkeep similarity index 100% rename from d123/geometry/torch/.gitkeep rename to py123d/geometry/torch/.gitkeep diff --git a/d123/geometry/transform/__init__.py b/py123d/geometry/transform/__init__.py similarity index 100% rename from d123/geometry/transform/__init__.py rename to py123d/geometry/transform/__init__.py diff --git a/d123/geometry/transform/test/__init__.py b/py123d/geometry/transform/test/__init__.py similarity index 100% rename from d123/geometry/transform/test/__init__.py rename to py123d/geometry/transform/test/__init__.py diff --git a/d123/geometry/transform/test/test_transform_consistency.py b/py123d/geometry/transform/test/test_transform_consistency.py similarity index 98% rename from d123/geometry/transform/test/test_transform_consistency.py rename to py123d/geometry/transform/test/test_transform_consistency.py index 72facda0..32805f60 100644 --- a/d123/geometry/transform/test/test_transform_consistency.py +++ b/py123d/geometry/transform/test/test_transform_consistency.py @@ -3,9 +3,9 @@ import numpy as np import numpy.typing as npt -from d123.geometry import EulerStateSE3, StateSE2, Vector2D, Vector3D -from d123.geometry.geometry_index import EulerStateSE3Index, Point2DIndex, Point3DIndex, StateSE2Index -from d123.geometry.transform.transform_euler_se3 import ( +from py123d.geometry import EulerStateSE3, StateSE2, Vector2D, Vector3D +from py123d.geometry.geometry_index import EulerStateSE3Index, Point2DIndex, Point3DIndex, StateSE2Index +from py123d.geometry.transform.transform_euler_se3 import ( convert_absolute_to_relative_euler_se3_array, convert_absolute_to_relative_points_3d_array, convert_relative_to_absolute_euler_se3_array, @@ -14,7 +14,7 @@ translate_euler_se3_along_x, translate_euler_se3_along_y, ) -from d123.geometry.transform.transform_se2 import ( +from py123d.geometry.transform.transform_se2 import ( convert_absolute_to_relative_point_2d_array, convert_absolute_to_relative_se2_array, convert_relative_to_absolute_point_2d_array, diff --git a/d123/geometry/transform/test/test_transform_euler_se3.py b/py123d/geometry/transform/test/test_transform_euler_se3.py similarity index 99% rename from d123/geometry/transform/test/test_transform_euler_se3.py rename to py123d/geometry/transform/test/test_transform_euler_se3.py index f20203cc..8b146fd1 100644 --- a/d123/geometry/transform/test/test_transform_euler_se3.py +++ b/py123d/geometry/transform/test/test_transform_euler_se3.py @@ -3,8 +3,8 @@ import numpy as np import numpy.typing as npt -from d123.geometry import EulerStateSE3, Vector3D -from d123.geometry.transform.transform_euler_se3 import ( +from py123d.geometry import EulerStateSE3, Vector3D +from py123d.geometry.transform.transform_euler_se3 import ( convert_absolute_to_relative_euler_se3_array, convert_absolute_to_relative_points_3d_array, convert_relative_to_absolute_euler_se3_array, diff --git a/d123/geometry/transform/test/test_transform_se2.py b/py123d/geometry/transform/test/test_transform_se2.py similarity index 99% rename from d123/geometry/transform/test/test_transform_se2.py rename to py123d/geometry/transform/test/test_transform_se2.py index cbed45c6..60af633e 100644 --- a/d123/geometry/transform/test/test_transform_se2.py +++ b/py123d/geometry/transform/test/test_transform_se2.py @@ -3,8 +3,8 @@ import numpy as np import numpy.typing as npt -from d123.geometry import StateSE2, Vector2D -from d123.geometry.transform.transform_se2 import ( +from py123d.geometry import StateSE2, Vector2D +from py123d.geometry.transform.transform_se2 import ( convert_absolute_to_relative_point_2d_array, convert_absolute_to_relative_se2_array, convert_relative_to_absolute_point_2d_array, diff --git a/d123/geometry/transform/test/test_transform_se3.py b/py123d/geometry/transform/test/test_transform_se3.py similarity index 97% rename from d123/geometry/transform/test/test_transform_se3.py rename to py123d/geometry/transform/test/test_transform_se3.py index be936c71..0303eecf 100644 --- a/d123/geometry/transform/test/test_transform_se3.py +++ b/py123d/geometry/transform/test/test_transform_se3.py @@ -3,9 +3,9 @@ import numpy as np import numpy.typing as npt -import d123.geometry.transform.transform_euler_se3 as euler_transform_se3 -from d123.geometry import EulerStateSE3, EulerStateSE3Index, Point3D, Quaternion, StateSE3, StateSE3Index -from d123.geometry.transform.transform_se3 import ( +import py123d.geometry.transform.transform_euler_se3 as euler_transform_se3 +from py123d.geometry import EulerStateSE3, EulerStateSE3Index, Point3D, Quaternion, StateSE3, StateSE3Index +from py123d.geometry.transform.transform_se3 import ( convert_absolute_to_relative_points_3d_array, convert_absolute_to_relative_se3_array, convert_relative_to_absolute_points_3d_array, @@ -15,7 +15,7 @@ translate_se3_along_y, translate_se3_along_z, ) -from d123.geometry.utils.rotation_utils import ( +from py123d.geometry.utils.rotation_utils import ( get_rotation_matrices_from_euler_array, get_rotation_matrices_from_quaternion_array, ) diff --git a/d123/geometry/transform/transform_euler_se3.py b/py123d/geometry/transform/transform_euler_se3.py similarity index 97% rename from d123/geometry/transform/transform_euler_se3.py rename to py123d/geometry/transform/transform_euler_se3.py index 516f1ba9..e7c4d298 100644 --- a/d123/geometry/transform/transform_euler_se3.py +++ b/py123d/geometry/transform/transform_euler_se3.py @@ -3,8 +3,8 @@ import numpy as np import numpy.typing as npt -from d123.geometry import EulerAngles, EulerStateSE3, EulerStateSE3Index, Point3DIndex, Vector3D, Vector3DIndex -from d123.geometry.utils.rotation_utils import ( +from py123d.geometry import EulerAngles, EulerStateSE3, EulerStateSE3Index, Point3DIndex, Vector3D, Vector3DIndex +from py123d.geometry.utils.rotation_utils import ( get_rotation_matrices_from_euler_array, get_rotation_matrix_from_euler_array, normalize_angle, diff --git a/d123/geometry/transform/transform_se2.py b/py123d/geometry/transform/transform_se2.py similarity index 94% rename from d123/geometry/transform/transform_se2.py rename to py123d/geometry/transform/transform_se2.py index 48f718fa..0ee2d5b5 100644 --- a/d123/geometry/transform/transform_se2.py +++ b/py123d/geometry/transform/transform_se2.py @@ -3,8 +3,8 @@ import numpy as np import numpy.typing as npt -from d123.geometry import Point2DIndex, StateSE2, StateSE2Index, Vector2D, Vector2DIndex -from d123.geometry.utils.rotation_utils import normalize_angle +from py123d.geometry import Point2DIndex, StateSE2, StateSE2Index, Vector2D, Vector2DIndex +from py123d.geometry.utils.rotation_utils import normalize_angle def convert_absolute_to_relative_se2_array( @@ -14,9 +14,9 @@ def convert_absolute_to_relative_se2_array( :param origin: origin pose of relative coords system :param state_se2_array: array of SE2 states with (x,y,yaw), indexed by \ - :class:`~d123.geometry.geometry_index.StateSE2Index`, in last dim + :class:`~py123d.geometry.geometry_index.StateSE2Index`, in last dim :return: SE2 array, index by \ - :class:`~d123.geometry.geometry_index.StateSE2Index`, in last dim + :class:`~py123d.geometry.geometry_index.StateSE2Index`, in last dim """ if isinstance(origin, StateSE2): origin_array = origin.array @@ -188,11 +188,11 @@ def translate_2d_along_body_frame( ) -> npt.NDArray[np.float64]: """Translate 2D points along their body frame. - :param points_2d: Array of 2D points, indexed by :class:`~d123.geometry.Point2DIndex`. + :param points_2d: Array of 2D points, indexed by :class:`~py123d.geometry.Point2DIndex`. :param yaws: Array of yaw angles. :param x_translate: Array of x translation, i.e. forward translation. :param y_translate: Array of y translation, i.e. left translation. - :return: Array of translated 2D points, indexed by :class:`~d123.geometry.Point2DIndex`. + :return: Array of translated 2D points, indexed by :class:`~py123d.geometry.Point2DIndex`. """ assert points_2d.shape[-1] == len(Point2DIndex) half_pi = np.pi / 2.0 diff --git a/d123/geometry/transform/transform_se3.py b/py123d/geometry/transform/transform_se3.py similarity index 94% rename from d123/geometry/transform/transform_se3.py rename to py123d/geometry/transform/transform_se3.py index a11fa2e2..bc6b5fca 100644 --- a/d123/geometry/transform/transform_se3.py +++ b/py123d/geometry/transform/transform_se3.py @@ -3,8 +3,8 @@ import numpy as np import numpy.typing as npt -from d123.geometry import Point3DIndex, QuaternionIndex, StateSE3, StateSE3Index, Vector3D, Vector3DIndex -from d123.geometry.utils.rotation_utils import ( +from py123d.geometry import Point3DIndex, QuaternionIndex, StateSE3, StateSE3Index, Vector3D, Vector3DIndex +from py123d.geometry.utils.rotation_utils import ( conjugate_quaternion_array, get_rotation_matrices_from_quaternion_array, get_rotation_matrix_from_quaternion_array, @@ -20,7 +20,7 @@ def convert_absolute_to_relative_points_3d_array( :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. :param points_3d_array: The 3D points in the absolute frame. :raises TypeError: If the origin is not a StateSE3 or np.ndarray. - :return: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`. + :return: The 3D points in the relative frame, indexed by :class:`~py123d.geometry.Point3DIndex`. """ if isinstance(origin, StateSE3): @@ -49,7 +49,7 @@ def convert_absolute_to_relative_se3_array( :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. :param se3_array: The SE3 array in the absolute frame. :raises TypeError: If the origin is not a StateSE3 or np.ndarray. - :return: The SE3 array in the relative frame, indexed by :class:`~d123.geometry.StateSE3Index`. + :return: The SE3 array in the relative frame, indexed by :class:`~py123d.geometry.StateSE3Index`. """ if isinstance(origin, StateSE3): origin_array = origin.array @@ -90,9 +90,9 @@ def convert_relative_to_absolute_points_3d_array( """Converts 3D points from the relative frame to the absolute frame. :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray. - :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`. + :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~py123d.geometry.Point3DIndex`. :raises TypeError: If the origin is not a StateSE3 or np.ndarray. - :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`. + :return: The 3D points in the absolute frame, indexed by :class:`~py123d.geometry.Point3DIndex`. """ if isinstance(origin, StateSE3): t_origin = origin.point_3d.array @@ -118,7 +118,7 @@ def convert_relative_to_absolute_se3_array( :param origin: The origin state in the relative frame, as a StateSE3 or np.ndarray. :param se3_array: The SE3 array in the relative frame. :raises TypeError: If the origin is not a StateSE3 or np.ndarray. - :return: The SE3 array in the absolute frame, indexed by :class:`~d123.geometry.StateSE3Index`. + :return: The SE3 array in the absolute frame, indexed by :class:`~py123d.geometry.StateSE3Index`. """ if isinstance(origin, StateSE3): @@ -219,10 +219,10 @@ def translate_3d_along_body_frame( ) -> npt.NDArray[np.float64]: """Translates 3D points along a vector in the body frame defined by quaternions. - :param points_3d: Array of 3D points, index by :class:`~d123.geometry.Point3DIndex`. - :param quaternions: Array of quaternions, index by :class:`~d123.geometry.QuaternionIndex`. - :param translation: Array of translation vectors, index by :class:`~d123.geometry.Vector3DIndex`. - :return: The translated 3D points in the world frame, index by :class:`~d123.geometry.Point3DIndex`. + :param points_3d: Array of 3D points, index by :class:`~py123d.geometry.Point3DIndex`. + :param quaternions: Array of quaternions, index by :class:`~py123d.geometry.QuaternionIndex`. + :param translation: Array of translation vectors, index by :class:`~py123d.geometry.Vector3DIndex`. + :return: The translated 3D points in the world frame, index by :class:`~py123d.geometry.Point3DIndex`. """ assert points_3d.shape[-1] == len(Point3DIndex) assert quaternions.shape[-1] == len(QuaternionIndex) diff --git a/d123/geometry/utils/__init__.py b/py123d/geometry/utils/__init__.py similarity index 100% rename from d123/geometry/utils/__init__.py rename to py123d/geometry/utils/__init__.py diff --git a/d123/geometry/utils/bounding_box_utils.py b/py123d/geometry/utils/bounding_box_utils.py similarity index 91% rename from d123/geometry/utils/bounding_box_utils.py rename to py123d/geometry/utils/bounding_box_utils.py index 72f6e994..d31058bd 100644 --- a/d123/geometry/utils/bounding_box_utils.py +++ b/py123d/geometry/utils/bounding_box_utils.py @@ -4,7 +4,7 @@ import numpy.typing as npt import shapely -from d123.geometry.geometry_index import ( +from py123d.geometry.geometry_index import ( BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, @@ -14,8 +14,8 @@ Vector2DIndex, Vector3DIndex, ) -from d123.geometry.transform.transform_se2 import translate_2d_along_body_frame -from d123.geometry.transform.transform_se3 import translate_3d_along_body_frame +from py123d.geometry.transform.transform_se2 import translate_2d_along_body_frame +from py123d.geometry.transform.transform_se3 import translate_3d_along_body_frame def get_corners_2d_factors() -> npt.NDArray[np.float64]: @@ -24,8 +24,8 @@ def get_corners_2d_factors() -> npt.NDArray[np.float64]: The factors are defined such that multiplying them with the length and width of the bounding box yields the corner coordinates in the body frame. - :return: A (4, 2), indexed by :class:`~d123.geometry.Corners2DIndex` and - :class:`~d123.geometry.Point2DIndex`, respectively. + :return: A (4, 2), indexed by :class:`~py123d.geometry.Corners2DIndex` and + :class:`~py123d.geometry.Point2DIndex`, respectively. """ # NOTE: ISO 8855 convention for rotation factors = np.zeros((len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64) @@ -43,8 +43,8 @@ def get_corners_3d_factors() -> npt.NDArray[np.float64]: The factors are defined such that multiplying them with the length, width, and height of the bounding box yields the corner coordinates in the body frame. - :return: A (8, 3), indexed by :class:`~d123.geometry.Corners3DIndex` and - :class:`~d123.geometry.Vector3DIndex`, respectively. + :return: A (8, 3), indexed by :class:`~py123d.geometry.Corners3DIndex` and + :class:`~py123d.geometry.Vector3DIndex`, respectively. """ # NOTE: ISO 8855 convention for rotation factors = np.zeros((len(Corners3DIndex), len(Vector3DIndex)), dtype=np.float64) @@ -63,9 +63,9 @@ def get_corners_3d_factors() -> npt.NDArray[np.float64]: def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """Converts an array of BoundingBoxSE2 objects to the 2D coordinates array of their corners. - :param bbse2: Array of SE2 bounding boxes, indexed by :class:`~d123.geometry.BoundingBoxSE2Index`. + :param bbse2: Array of SE2 bounding boxes, indexed by :class:`~py123d.geometry.BoundingBoxSE2Index`. :return: Coordinates array of shape (..., 4, 2), indexed by - :class:`~d123.geometry.Corners2DIndex` and :class:`~d123.geometry.Point2DIndex`, respectively. + :class:`~py123d.geometry.Corners2DIndex` and :class:`~py123d.geometry.Point2DIndex`, respectively. """ assert bbse2.shape[-1] == len(BoundingBoxSE2Index) @@ -104,7 +104,7 @@ def corners_2d_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]: """Converts an array of BoundingBoxSE2 objects to an array of shapely Polygons. - :param bbse2: Array of SE2 bounding boxes, indexed by :class:`~d123.geometry.BoundingBoxSE2Index`. + :param bbse2: Array of SE2 bounding boxes, indexed by :class:`~py123d.geometry.BoundingBoxSE2Index`. :return: Array of shapely Polygons. """ return corners_2d_array_to_polygon_array(bbse2_array_to_corners_array(bbse2)) @@ -113,9 +113,9 @@ def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[ def bbse3_array_to_corners_array(bbse3_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """Converts an array of BoundingBoxSE3 objects to the 3D coordinates array of their corners. - :param bbse3_array: Array of SE3 bounding boxes, indexed by :class:`~d123.geometry.BoundingBoxSE3Index`. + :param bbse3_array: Array of SE3 bounding boxes, indexed by :class:`~py123d.geometry.BoundingBoxSE3Index`. :return: Coordinates array of shape (..., 8, 3), indexed by - :class:`~d123.geometry.Corners3DIndex` and :class:`~d123.geometry.Point3DIndex`, respectively. + :class:`~py123d.geometry.Corners3DIndex` and :class:`~py123d.geometry.Point3DIndex`, respectively. """ assert bbse3_array.shape[-1] == len(BoundingBoxSE3Index) diff --git a/d123/geometry/utils/constants.py b/py123d/geometry/utils/constants.py similarity index 100% rename from d123/geometry/utils/constants.py rename to py123d/geometry/utils/constants.py diff --git a/d123/geometry/utils/polyline_utils.py b/py123d/geometry/utils/polyline_utils.py similarity index 96% rename from d123/geometry/utils/polyline_utils.py rename to py123d/geometry/utils/polyline_utils.py index 5e82ea9f..20bf299f 100644 --- a/d123/geometry/utils/polyline_utils.py +++ b/py123d/geometry/utils/polyline_utils.py @@ -2,7 +2,7 @@ import numpy.typing as npt from shapely.geometry import LineString -from d123.geometry.geometry_index import Point2DIndex, StateSE2Index +from py123d.geometry.geometry_index import Point2DIndex, StateSE2Index def get_linestring_yaws(linestring: LineString) -> npt.NDArray[np.float64]: diff --git a/d123/geometry/utils/rotation_utils.py b/py123d/geometry/utils/rotation_utils.py similarity index 99% rename from d123/geometry/utils/rotation_utils.py rename to py123d/geometry/utils/rotation_utils.py index b8a8ddca..ea31dd0f 100644 --- a/d123/geometry/utils/rotation_utils.py +++ b/py123d/geometry/utils/rotation_utils.py @@ -3,7 +3,7 @@ import numpy as np import numpy.typing as npt -from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex +from py123d.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex # import pyquaternion diff --git a/d123/geometry/utils/test/__init__.py b/py123d/geometry/utils/test/__init__.py similarity index 100% rename from d123/geometry/utils/test/__init__.py rename to py123d/geometry/utils/test/__init__.py diff --git a/d123/geometry/utils/test/test_bounding_box_utils.py b/py123d/geometry/utils/test/test_bounding_box_utils.py similarity index 97% rename from d123/geometry/utils/test/test_bounding_box_utils.py rename to py123d/geometry/utils/test/test_bounding_box_utils.py index 3bba0330..0cdb52e6 100644 --- a/d123/geometry/utils/test/test_bounding_box_utils.py +++ b/py123d/geometry/utils/test/test_bounding_box_utils.py @@ -4,7 +4,7 @@ import numpy.typing as npt import shapely -from d123.geometry.geometry_index import ( +from py123d.geometry.geometry_index import ( BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex, @@ -12,16 +12,16 @@ Point2DIndex, Point3DIndex, ) -from d123.geometry.se import EulerStateSE3, StateSE3 -from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame -from d123.geometry.utils.bounding_box_utils import ( +from py123d.geometry.se import EulerStateSE3, StateSE3 +from py123d.geometry.transform.transform_se3 import translate_se3_along_body_frame +from py123d.geometry.utils.bounding_box_utils import ( bbse2_array_to_corners_array, bbse2_array_to_polygon_array, bbse3_array_to_corners_array, corners_2d_array_to_polygon_array, get_corners_3d_factors, ) -from d123.geometry.vector import Vector3D +from py123d.geometry.vector import Vector3D class TestBoundingBoxUtils(unittest.TestCase): diff --git a/d123/geometry/utils/test/test_polyline_utils.py b/py123d/geometry/utils/test/test_polyline_utils.py similarity index 100% rename from d123/geometry/utils/test/test_polyline_utils.py rename to py123d/geometry/utils/test/test_polyline_utils.py diff --git a/d123/geometry/utils/test/test_rotation_utils.py b/py123d/geometry/utils/test/test_rotation_utils.py similarity index 100% rename from d123/geometry/utils/test/test_rotation_utils.py rename to py123d/geometry/utils/test/test_rotation_utils.py diff --git a/d123/geometry/utils/units.py b/py123d/geometry/utils/units.py similarity index 100% rename from d123/geometry/utils/units.py rename to py123d/geometry/utils/units.py diff --git a/d123/geometry/vector.py b/py123d/geometry/vector.py similarity index 95% rename from d123/geometry/vector.py rename to py123d/geometry/vector.py index 77a3f67f..f2846c1f 100644 --- a/d123/geometry/vector.py +++ b/py123d/geometry/vector.py @@ -6,8 +6,8 @@ import numpy as np import numpy.typing as npt -from d123.common.utils.mixin import ArrayMixin -from d123.geometry.geometry_index import Vector2DIndex, Vector3DIndex +from py123d.common.utils.mixin import ArrayMixin +from py123d.geometry.geometry_index import Vector2DIndex, Vector3DIndex class Vector2D(ArrayMixin): @@ -40,7 +40,7 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Vector """Constructs a Vector2D from a numpy array. :param array: Array of shape (2,) representing the vector components [x, y], indexed by \ - :class:`~d123.geometry.Vector2DIndex`. + :class:`~py123d.geometry.Vector2DIndex`. :param copy: Whether to copy the input array. Defaults to True. :return: A Vector2D instance. """ @@ -71,7 +71,7 @@ def array(self) -> npt.NDArray[np.float64]: """The array representation of the 2D vector. :return: A numpy array of shape (2,) containing the vector components [x, y], indexed by \ - :class:`~d123.geometry.Vector2DIndex`. + :class:`~py123d.geometry.Vector2DIndex`. """ array = np.zeros(len(Vector2DIndex), dtype=np.float64) array[Vector2DIndex.X] = self.x @@ -166,7 +166,7 @@ def __init__(self, x: float, y: float, z: float): def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Vector3D: """Constructs a Vector3D from a numpy array. - :param array: Array of shape (3,), indexed by :class:`~d123.geometry.geometry_index.Vector3DIndex`. + :param array: Array of shape (3,), indexed by :class:`~py123d.geometry.geometry_index.Vector3DIndex`. :param copy: Whether to copy the input array. Defaults to True. :return: A Vector3D instance. """ @@ -206,7 +206,7 @@ def array(self) -> npt.NDArray[np.float64]: Returns the vector components as a numpy array :return: A numpy array representing the vector components [x, y, z], indexed by \ - :class:`~d123.geometry.geometry_index.Vector3DIndex`. + :class:`~py123d.geometry.geometry_index.Vector3DIndex`. """ return self._array diff --git a/d123/script/__init__.py b/py123d/script/__init__.py similarity index 100% rename from d123/script/__init__.py rename to py123d/script/__init__.py diff --git a/d123/script/builders/__init__.py b/py123d/script/builders/__init__.py similarity index 100% rename from d123/script/builders/__init__.py rename to py123d/script/builders/__init__.py diff --git a/d123/script/builders/dataset_converter_builder.py b/py123d/script/builders/dataset_converter_builder.py similarity index 81% rename from d123/script/builders/dataset_converter_builder.py rename to py123d/script/builders/dataset_converter_builder.py index 1c95cb61..53f9531d 100644 --- a/d123/script/builders/dataset_converter_builder.py +++ b/py123d/script/builders/dataset_converter_builder.py @@ -4,8 +4,8 @@ from hydra.utils import instantiate from omegaconf import DictConfig -from d123.conversion.abstract_dataset_converter import AbstractDatasetConverter -from d123.script.builders.utils.utils_type import validate_type +from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter +from py123d.script.builders.utils.utils_type import validate_type logger = logging.getLogger(__name__) diff --git a/d123/script/builders/scene_builder_builder.py b/py123d/script/builders/scene_builder_builder.py similarity index 90% rename from d123/script/builders/scene_builder_builder.py rename to py123d/script/builders/scene_builder_builder.py index 148a4792..3c7523a4 100644 --- a/d123/script/builders/scene_builder_builder.py +++ b/py123d/script/builders/scene_builder_builder.py @@ -5,7 +5,7 @@ from hydra.utils import instantiate from omegaconf import DictConfig -from d123.datatypes.scene.abstract_scene_builder import SceneBuilder +from py123d.datatypes.scene.abstract_scene_builder import SceneBuilder logger = logging.getLogger(__name__) diff --git a/d123/script/builders/scene_filter_builder.py b/py123d/script/builders/scene_filter_builder.py similarity index 95% rename from d123/script/builders/scene_filter_builder.py rename to py123d/script/builders/scene_filter_builder.py index f6ad657b..191af8ea 100644 --- a/d123/script/builders/scene_filter_builder.py +++ b/py123d/script/builders/scene_filter_builder.py @@ -4,7 +4,7 @@ from hydra.utils import instantiate from omegaconf import DictConfig -from d123.datatypes.scene.scene_filter import SceneFilter +from py123d.datatypes.scene.scene_filter import SceneFilter logger = logging.getLogger(__name__) diff --git a/d123/script/builders/utils/utils_type.py b/py123d/script/builders/utils/utils_type.py similarity index 100% rename from d123/script/builders/utils/utils_type.py rename to py123d/script/builders/utils/utils_type.py diff --git a/d123/script/builders/worker_pool_builder.py b/py123d/script/builders/worker_pool_builder.py similarity index 80% rename from d123/script/builders/worker_pool_builder.py rename to py123d/script/builders/worker_pool_builder.py index 123ec3c3..d5607da7 100644 --- a/d123/script/builders/worker_pool_builder.py +++ b/py123d/script/builders/worker_pool_builder.py @@ -3,8 +3,8 @@ from hydra.utils import instantiate from omegaconf import DictConfig -from d123.common.multithreading.worker_pool import WorkerPool -from d123.script.builders.utils.utils_type import validate_type +from py123d.common.multithreading.worker_pool import WorkerPool +from py123d.script.builders.utils.utils_type import validate_type logger = logging.getLogger(__name__) diff --git a/d123/script/builders/writer_builder.py b/py123d/script/builders/writer_builder.py similarity index 76% rename from d123/script/builders/writer_builder.py rename to py123d/script/builders/writer_builder.py index 165e9b7b..ebee9935 100644 --- a/d123/script/builders/writer_builder.py +++ b/py123d/script/builders/writer_builder.py @@ -3,9 +3,9 @@ from hydra.utils import instantiate from omegaconf import DictConfig -from d123.conversion.abstract_dataset_converter import AbstractLogWriter -from d123.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from d123.script.builders.utils.utils_type import validate_type +from py123d.conversion.abstract_dataset_converter import AbstractLogWriter +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.script.builders.utils.utils_type import validate_type logger = logging.getLogger(__name__) diff --git a/d123/script/config/__init__.py b/py123d/script/config/__init__.py similarity index 100% rename from d123/script/config/__init__.py rename to py123d/script/config/__init__.py diff --git a/d123/script/config/common/__init__.py b/py123d/script/config/common/__init__.py similarity index 100% rename from d123/script/config/common/__init__.py rename to py123d/script/config/common/__init__.py diff --git a/d123/script/config/common/default_common.yaml b/py123d/script/config/common/default_common.yaml similarity index 100% rename from d123/script/config/common/default_common.yaml rename to py123d/script/config/common/default_common.yaml diff --git a/py123d/script/config/common/default_dataset_paths.yaml b/py123d/script/config/common/default_dataset_paths.yaml new file mode 100644 index 00000000..5f7af456 --- /dev/null +++ b/py123d/script/config/common/default_dataset_paths.yaml @@ -0,0 +1,11 @@ +# 123D Defaults +py123d_data_root: ${oc.env:PY123D_DATA_ROOT} +py123d_logs_root: ${oc.env:PY123D_DATA_ROOT}/logs +py123d_maps_root: ${oc.env:PY123D_DATA_ROOT}/maps +py123d_sensors_root: ${oc.env:PY123D_DATA_ROOT}/sensors + + +# nuPlan defaults +nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} +nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} +nuplan_sensor_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/sensor_blobs diff --git a/d123/script/config/common/default_experiment.yaml b/py123d/script/config/common/default_experiment.yaml similarity index 69% rename from d123/script/config/common/default_experiment.yaml rename to py123d/script/config/common/default_experiment.yaml index 9617f335..049ad4cc 100644 --- a/d123/script/config/common/default_experiment.yaml +++ b/py123d/script/config/common/default_experiment.yaml @@ -8,4 +8,4 @@ defaults: experiment_name: ??? date_format: '%Y.%m.%d.%H.%M.%S' experiment_uid: ${now:${date_format}} -output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid} # path where output csv is saved +output_dir: ${oc.env:PY123D_EXP_ROOT}/${experiment_name}/${experiment_uid} # path where output csv is saved diff --git a/py123d/script/config/common/scene_builder/default_scene_builder.yaml b/py123d/script/config/common/scene_builder/default_scene_builder.yaml new file mode 100644 index 00000000..3a4be1f3 --- /dev/null +++ b/py123d/script/config/common/scene_builder/default_scene_builder.yaml @@ -0,0 +1,4 @@ +_target_: py123d.datatypes.scene.arrow.arrow_scene_builder.ArrowSceneBuilder +_convert_: 'all' + +dataset_path: ${py123d_data_root} diff --git a/d123/script/config/common/scene_filter/all_scenes.yaml b/py123d/script/config/common/scene_filter/all_scenes.yaml similarity index 81% rename from d123/script/config/common/scene_filter/all_scenes.yaml rename to py123d/script/config/common/scene_filter/all_scenes.yaml index 0134a435..83360b6d 100644 --- a/d123/script/config/common/scene_filter/all_scenes.yaml +++ b/py123d/script/config/common/scene_filter/all_scenes.yaml @@ -1,4 +1,4 @@ -_target_: d123.datatypes.scene.scene_filter.SceneFilter +_target_: py123d.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null diff --git a/d123/script/config/common/scene_filter/log_scenes.yaml b/py123d/script/config/common/scene_filter/log_scenes.yaml similarity index 82% rename from d123/script/config/common/scene_filter/log_scenes.yaml rename to py123d/script/config/common/scene_filter/log_scenes.yaml index f1121675..750bf0f8 100644 --- a/d123/script/config/common/scene_filter/log_scenes.yaml +++ b/py123d/script/config/common/scene_filter/log_scenes.yaml @@ -1,4 +1,4 @@ -_target_: d123.datatypes.scene.scene_filter.SceneFilter +_target_: py123d.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null diff --git a/d123/script/config/common/scene_filter/nuplan_mini_train.yaml b/py123d/script/config/common/scene_filter/nuplan_mini_train.yaml similarity index 80% rename from d123/script/config/common/scene_filter/nuplan_mini_train.yaml rename to py123d/script/config/common/scene_filter/nuplan_mini_train.yaml index d04697ec..62bb8789 100644 --- a/d123/script/config/common/scene_filter/nuplan_mini_train.yaml +++ b/py123d/script/config/common/scene_filter/nuplan_mini_train.yaml @@ -1,4 +1,4 @@ -_target_: d123.datatypes.scene.scene_filter.SceneFilter +_target_: py123d.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null diff --git a/d123/script/config/common/scene_filter/nuplan_mini_val.yaml b/py123d/script/config/common/scene_filter/nuplan_mini_val.yaml similarity index 80% rename from d123/script/config/common/scene_filter/nuplan_mini_val.yaml rename to py123d/script/config/common/scene_filter/nuplan_mini_val.yaml index b847f623..f767bbcc 100644 --- a/d123/script/config/common/scene_filter/nuplan_mini_val.yaml +++ b/py123d/script/config/common/scene_filter/nuplan_mini_val.yaml @@ -1,4 +1,4 @@ -_target_: d123.datatypes.scene.scene_filter.SceneFilter +_target_: py123d.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null diff --git a/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml b/py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml similarity index 97% rename from d123/script/config/common/scene_filter/nuplan_sim_agent.yaml rename to py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml index 68a2424d..8e2b3c49 100644 --- a/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml +++ b/py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml @@ -1,4 +1,4 @@ -_target_: d123.datatypes.scene.scene_filter.SceneFilter +_target_: py123d.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null diff --git a/d123/script/config/common/scene_filter/viser_scenes.yaml b/py123d/script/config/common/scene_filter/viser_scenes.yaml similarity index 81% rename from d123/script/config/common/scene_filter/viser_scenes.yaml rename to py123d/script/config/common/scene_filter/viser_scenes.yaml index 9e561981..c52ec40d 100644 --- a/d123/script/config/common/scene_filter/viser_scenes.yaml +++ b/py123d/script/config/common/scene_filter/viser_scenes.yaml @@ -1,4 +1,4 @@ -_target_: d123.datatypes.scene.scene_filter.SceneFilter +_target_: py123d.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null diff --git a/d123/script/config/common/worker/__init__.py b/py123d/script/config/common/worker/__init__.py similarity index 100% rename from d123/script/config/common/worker/__init__.py rename to py123d/script/config/common/worker/__init__.py diff --git a/d123/script/config/common/worker/ray_distributed.yaml b/py123d/script/config/common/worker/ray_distributed.yaml similarity index 89% rename from d123/script/config/common/worker/ray_distributed.yaml rename to py123d/script/config/common/worker/ray_distributed.yaml index 2c101f66..bb663462 100644 --- a/d123/script/config/common/worker/ray_distributed.yaml +++ b/py123d/script/config/common/worker/ray_distributed.yaml @@ -1,4 +1,4 @@ -_target_: d123.common.multithreading.worker_ray.RayDistributed +_target_: py123d.common.multithreading.worker_ray.RayDistributed _convert_: 'all' master_node_ip: null # Set to a master node IP if you desire to connect to cluster remotely threads_per_node: null # Number of CPU threads to use per node, "null" means all threads available diff --git a/py123d/script/config/common/worker/sequential.yaml b/py123d/script/config/common/worker/sequential.yaml new file mode 100644 index 00000000..fe3d04f8 --- /dev/null +++ b/py123d/script/config/common/worker/sequential.yaml @@ -0,0 +1,2 @@ +_target_: py123d.common.multithreading.worker_sequential.Sequential +_convert_: 'all' diff --git a/d123/script/config/common/worker/single_machine_thread_pool.yaml b/py123d/script/config/common/worker/single_machine_thread_pool.yaml similarity index 73% rename from d123/script/config/common/worker/single_machine_thread_pool.yaml rename to py123d/script/config/common/worker/single_machine_thread_pool.yaml index ace763f6..1344c762 100644 --- a/d123/script/config/common/worker/single_machine_thread_pool.yaml +++ b/py123d/script/config/common/worker/single_machine_thread_pool.yaml @@ -1,4 +1,4 @@ -_target_: d123.common.multithreading.worker_parallel.SingleMachineParallelExecutor +_target_: py123d.common.multithreading.worker_parallel.SingleMachineParallelExecutor _convert_: 'all' use_process_pool: True # If true, use ProcessPoolExecutor as the backend, otherwise uses ThreadPoolExecutor max_workers: 16 # Number of CPU workers (threads/processes) to use per node, "null" means all available diff --git a/d123/script/config/conversion/__init__.py b/py123d/script/config/conversion/__init__.py similarity index 100% rename from d123/script/config/conversion/__init__.py rename to py123d/script/config/conversion/__init__.py diff --git a/d123/script/config/conversion/datasets/__init__.py b/py123d/script/config/conversion/datasets/__init__.py similarity index 100% rename from d123/script/config/conversion/datasets/__init__.py rename to py123d/script/config/conversion/datasets/__init__.py diff --git a/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml b/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml similarity index 80% rename from d123/script/config/conversion/datasets/av2_sensor_dataset.yaml rename to py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml index 4fbda36f..ca5a6837 100644 --- a/d123/script/config/conversion/datasets/av2_sensor_dataset.yaml +++ b/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml @@ -1,15 +1,15 @@ av2_sensor_dataset: - _target_: d123.conversion.datasets.av2.av2_sensor_converter.AV2SensorConverter + _target_: py123d.conversion.datasets.av2.av2_sensor_converter.AV2SensorConverter _convert_: 'all' splits: ["av2-sensor-mini_train"] av2_data_root: "/media/nvme1/argoverse" dataset_converter_config: - _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig + _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${d123_data_root} + output_path: ${py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/d123/script/config/conversion/datasets/carla_dataset.yaml b/py123d/script/config/conversion/datasets/carla_dataset.yaml similarity index 79% rename from d123/script/config/conversion/datasets/carla_dataset.yaml rename to py123d/script/config/conversion/datasets/carla_dataset.yaml index a6540ba3..b3c85a3c 100644 --- a/d123/script/config/conversion/datasets/carla_dataset.yaml +++ b/py123d/script/config/conversion/datasets/carla_dataset.yaml @@ -1,15 +1,15 @@ carla_dataset: - _target_: d123.conversion.datasets.carla.carla_data_converter.CarlaDataConverter + _target_: py123d.conversion.datasets.carla.carla_data_converter.CarlaDataConverter _convert_: 'all' splits: ["carla"] log_path: "${oc.env:HOME}/carla_workspace/data" dataset_converter_config: - _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig + _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${d123_data_root} + output_path: ${py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/d123/script/config/conversion/datasets/nuplan_dataset.yaml b/py123d/script/config/conversion/datasets/nuplan_dataset.yaml similarity index 82% rename from d123/script/config/conversion/datasets/nuplan_dataset.yaml rename to py123d/script/config/conversion/datasets/nuplan_dataset.yaml index b5fa60d7..db89b1db 100644 --- a/d123/script/config/conversion/datasets/nuplan_dataset.yaml +++ b/py123d/script/config/conversion/datasets/nuplan_dataset.yaml @@ -1,5 +1,5 @@ nuplan_dataset: - _target_: d123.conversion.datasets.nuplan.nuplan_converter.NuPlanConverter + _target_: py123d.conversion.datasets.nuplan.nuplan_converter.NuPlanConverter _convert_: 'all' splits: ["nuplan_train", "nuplan_val", "nuplan_test"] @@ -8,10 +8,10 @@ nuplan_dataset: nuplan_sensor_root: ${nuplan_sensor_root} dataset_converter_config: - _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig + _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${d123_data_root} + output_path: ${py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml b/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml similarity index 82% rename from d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml rename to py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml index 146e8d2d..e3276efa 100644 --- a/d123/script/config/conversion/datasets/nuplan_mini_dataset.yaml +++ b/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml @@ -1,5 +1,5 @@ nuplan_mini_dataset: - _target_: d123.conversion.datasets.nuplan.nuplan_converter.NuPlanConverter + _target_: py123d.conversion.datasets.nuplan.nuplan_converter.NuPlanConverter _convert_: 'all' splits: ["nuplan-mini_train", "nuplan-mini_val", "nuplan-mini_test"] @@ -8,10 +8,10 @@ nuplan_mini_dataset: nuplan_sensor_root: ${nuplan_sensor_root} dataset_converter_config: - _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig + _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${d123_data_root} + output_path: ${py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/d123/script/config/conversion/datasets/pandaset_dataset.yaml b/py123d/script/config/conversion/datasets/pandaset_dataset.yaml similarity index 91% rename from d123/script/config/conversion/datasets/pandaset_dataset.yaml rename to py123d/script/config/conversion/datasets/pandaset_dataset.yaml index 115ab4e1..c4e8cfd0 100644 --- a/d123/script/config/conversion/datasets/pandaset_dataset.yaml +++ b/py123d/script/config/conversion/datasets/pandaset_dataset.yaml @@ -1,15 +1,15 @@ pandaset_dataset: - _target_: d123.conversion.datasets.pandaset.pandaset_converter.PandasetConverter + _target_: py123d.conversion.datasets.pandaset.pandaset_converter.PandasetConverter _convert_: 'all' splits: ["pandaset_train", "pandaset_val", "pandaset_test"] pandaset_data_root: "/media/nvme1/pandaset" dataset_converter_config: - _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig + _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${d123_data_root} + output_path: ${py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/d123/script/config/conversion/datasets/wopd_dataset.yaml b/py123d/script/config/conversion/datasets/wopd_dataset.yaml similarity index 86% rename from d123/script/config/conversion/datasets/wopd_dataset.yaml rename to py123d/script/config/conversion/datasets/wopd_dataset.yaml index f936270d..78cbc612 100644 --- a/d123/script/config/conversion/datasets/wopd_dataset.yaml +++ b/py123d/script/config/conversion/datasets/wopd_dataset.yaml @@ -1,5 +1,5 @@ wopd_dataset: - _target_: d123.conversion.datasets.wopd.wopd_converter.WOPDConverter + _target_: py123d.conversion.datasets.wopd.wopd_converter.WOPDConverter _convert_: 'all' splits: ["wopd_train", "wopd_val", "wopd_test"] # Which splits to convert. Options: ["wopd_train", "wopd_val", "wopd_test"] @@ -10,10 +10,10 @@ wopd_dataset: dataset_converter_config: - _target_: d123.conversion.dataset_converter_config.DatasetConverterConfig + _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${d123_data_root} + output_path: ${py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/d123/script/config/conversion/default_conversion.yaml b/py123d/script/config/conversion/default_conversion.yaml similarity index 86% rename from d123/script/config/conversion/default_conversion.yaml rename to py123d/script/config/conversion/default_conversion.yaml index 32421c52..9ba01d42 100644 --- a/d123/script/config/conversion/default_conversion.yaml +++ b/py123d/script/config/conversion/default_conversion.yaml @@ -3,8 +3,8 @@ hydra: dir: . output_subdir: null searchpath: - - pkg://d123.script.config - - pkg://d123.script.config.common + - pkg://py123d.script.config + - pkg://py123d.script.config.common job: env_set: HYDRA_FULL_ERROR: 1 diff --git a/d123/script/config/conversion/log_writer/__init__.py b/py123d/script/config/conversion/log_writer/__init__.py similarity index 100% rename from d123/script/config/conversion/log_writer/__init__.py rename to py123d/script/config/conversion/log_writer/__init__.py diff --git a/d123/script/config/conversion/log_writer/arrow_log_writer.yaml b/py123d/script/config/conversion/log_writer/arrow_log_writer.yaml similarity index 67% rename from d123/script/config/conversion/log_writer/arrow_log_writer.yaml rename to py123d/script/config/conversion/log_writer/arrow_log_writer.yaml index 5b7e7766..9707e057 100644 --- a/d123/script/config/conversion/log_writer/arrow_log_writer.yaml +++ b/py123d/script/config/conversion/log_writer/arrow_log_writer.yaml @@ -1,7 +1,7 @@ -_target_: d123.conversion.log_writer.arrow_log_writer.ArrowLogWriter +_target_: py123d.conversion.log_writer.arrow_log_writer.ArrowLogWriter _convert_: 'all' -logs_root: ${d123_logs_root} +logs_root: ${py123d_logs_root} compression: null # Compression method for ipc files. Options: None, 'lz4', 'zstd' compression_level: null # Compression level for ipc files. Options: None, or depending on compression method diff --git a/d123/script/config/conversion/map_writer/__init__.py b/py123d/script/config/conversion/map_writer/__init__.py similarity index 100% rename from d123/script/config/conversion/map_writer/__init__.py rename to py123d/script/config/conversion/map_writer/__init__.py diff --git a/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml b/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml new file mode 100644 index 00000000..9921ba8b --- /dev/null +++ b/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml @@ -0,0 +1,4 @@ +_target_: py123d.conversion.map_writer.gpkg_map_writer.GPKGMapWriter +_convert_: 'all' + +maps_root: ${py123d_maps_root} diff --git a/d123/script/config/viser/__init__.py b/py123d/script/config/viser/__init__.py similarity index 100% rename from d123/script/config/viser/__init__.py rename to py123d/script/config/viser/__init__.py diff --git a/d123/script/config/viser/default_viser.yaml b/py123d/script/config/viser/default_viser.yaml similarity index 70% rename from d123/script/config/viser/default_viser.yaml rename to py123d/script/config/viser/default_viser.yaml index fb0e47a2..5b230727 100644 --- a/d123/script/config/viser/default_viser.yaml +++ b/py123d/script/config/viser/default_viser.yaml @@ -3,9 +3,9 @@ hydra: dir: . output_subdir: null searchpath: # Only in these paths are discoverable - - pkg://d123.script.config - - pkg://d123.script.config.common - - pkg://d123.script.config.preprocessing + - pkg://py123d.script.config + - pkg://py123d.script.config.common + - pkg://py123d.script.config.preprocessing job: chdir: False # diff --git a/d123/script/run_conversion.py b/py123d/script/run_conversion.py similarity index 87% rename from d123/script/run_conversion.py rename to py123d/script/run_conversion.py index b25a5ba4..9ef94d75 100644 --- a/d123/script/run_conversion.py +++ b/py123d/script/run_conversion.py @@ -6,11 +6,11 @@ import hydra from omegaconf import DictConfig -from d123 import ascii_banner -from d123.common.multithreading.worker_utils import worker_map -from d123.script.builders.dataset_converter_builder import AbstractDatasetConverter, build_dataset_converters -from d123.script.builders.worker_pool_builder import build_worker -from d123.script.builders.writer_builder import build_log_writer, build_map_writer +from py123d import ascii_banner +from py123d.common.multithreading.worker_utils import worker_map +from py123d.script.builders.dataset_converter_builder import AbstractDatasetConverter, build_dataset_converters +from py123d.script.builders.worker_pool_builder import build_worker +from py123d.script.builders.writer_builder import build_log_writer, build_map_writer logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) diff --git a/d123/script/run_viser.py b/py123d/script/run_viser.py similarity index 67% rename from d123/script/run_viser.py rename to py123d/script/run_viser.py index 04c900c7..b7f11093 100644 --- a/d123/script/run_viser.py +++ b/py123d/script/run_viser.py @@ -3,10 +3,10 @@ import hydra from omegaconf import DictConfig -from d123.common.visualization.viser.viser_viewer import ViserViewer -from d123.script.builders.scene_builder_builder import build_scene_builder -from d123.script.builders.scene_filter_builder import build_scene_filter -from d123.script.run_conversion import build_worker +from py123d.common.visualization.viser.viser_viewer import ViserViewer +from py123d.script.builders.scene_builder_builder import build_scene_builder +from py123d.script.builders.scene_filter_builder import build_scene_filter +from py123d.script.run_conversion import build_worker logger = logging.getLogger(__name__) diff --git a/pyproject.toml b/pyproject.toml index 2e5e5149..5a0c0f59 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "License :: OSI Approved :: Apache Software License", ] -name = "d123" +name = "py123d" version = "v0.0.7" authors = [{ name = "Daniel Dauner", email = "daniel.dauner@gmail.com" }] description = "TODO" @@ -50,8 +50,8 @@ dependencies = [ ] [project.scripts] -d123-viser = "d123.script.run_viser:main" -d123-conversion = "d123.script.run_conversion:main" +py123d-viser = "py123d.script.run_viser:main" +py123d-conversion = "py123d.script.run_conversion:main" [project.optional-dependencies] @@ -94,9 +94,9 @@ waymo = [ [tool.setuptools.packages.find] where = ["."] -include = ["d123*"] # Only include d123 package +include = ["py123d*"] # Only include py123d package exclude = ["notebooks*", "docs*"] # Explicitly exclude notebooks [project.urls] -"Homepage" = "https://github.com/DanielDauner/d123" -"Bug Tracker" = "https://github.com/DanielDauner/d123/issues" +"Homepage" = "https://github.com/DanielDauner/py123d" +"Bug Tracker" = "https://github.com/DanielDauner/py123d/issues" diff --git a/scripts/dataset/run_log_caching.sh b/scripts/dataset/run_log_caching.sh index 834d98fc..fc6eeb84 100644 --- a/scripts/dataset/run_log_caching.sh +++ b/scripts/dataset/run_log_caching.sh @@ -1,4 +1,4 @@ -python $D123_DEVKIT_ROOT/d123/script/run_conversion.py +py123d-conversion diff --git a/scripts/viz/run_viser.sh b/scripts/viz/run_viser.sh index 0dafc0a3..436e7643 100644 --- a/scripts/viz/run_viser.sh +++ b/scripts/viz/run_viser.sh @@ -1,6 +1,6 @@ -python $D123_DEVKIT_ROOT/d123/script/run_viser.py \ +py123d-viser \ scene_filter=log_scenes \ scene_filter.shuffle=True \ worker=sequential diff --git a/test_viser.py b/test_viser.py index 50ca9d28..3bc83f76 100644 --- a/test_viser.py +++ b/test_viser.py @@ -1,12 +1,11 @@ import os +from pathlib import Path -from anyio import Path - -from d123.common.multithreading.worker_sequential import Sequential -from d123.common.visualization.viser.viser_viewer import ViserViewer -from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder -from d123.datatypes.scene.scene_filter import SceneFilter -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.common.multithreading.worker_sequential import Sequential +from py123d.common.visualization.viser.viser_viewer import ViserViewer +from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder +from py123d.datatypes.scene.scene_filter import SceneFilter +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType if __name__ == "__main__": @@ -29,7 +28,7 @@ shuffle=True, camera_types=[PinholeCameraType.CAM_F0], ) - scene_builder = ArrowSceneBuilder(Path(os.environ["D123_DATA_ROOT"])) + scene_builder = ArrowSceneBuilder(Path(os.environ["PY123D_DATA_ROOT"])) worker = Sequential() scenes = scene_builder.get_scenes(scene_filter, worker) print(f"Found {len(scenes)} scenes") From 26ac1b120f3c31657f93f8a824798df28feb518b Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 17 Oct 2025 14:05:24 +0200 Subject: [PATCH 087/145] Change repository to `src`-structure (#39) --- py123d/__init__.py | 27 ------------------- pyproject.toml | 4 +-- scripts/conversion/av2_sensor_conversion.sh | 1 + scripts/conversion/nuplan_mini_conversion.sh | 1 + scripts/conversion/pandaset_conversion.sh | 1 + scripts/dataset/run_log_caching.sh | 4 --- src/py123d/__init__.py | 1 + {py123d => src/py123d}/common/__init__.py | 0 .../common/multithreading/ray_execution.py | 0 .../common/multithreading/worker_parallel.py | 0 .../common/multithreading/worker_pool.py | 0 .../common/multithreading/worker_ray.py | 0 .../multithreading/worker_sequential.py | 0 .../common/multithreading/worker_utils.py | 0 .../py123d}/common/utils/__init__.py | 0 .../py123d}/common/utils/arrow_helper.py | 0 .../py123d}/common/utils/dependencies.py | 0 {py123d => src/py123d}/common/utils/enums.py | 0 {py123d => src/py123d}/common/utils/mixin.py | 0 {py123d => src/py123d}/common/utils/timer.py | 0 {py123d => src/py123d}/common/utils/uuid.py | 0 .../py123d}/common/visualization/__init__.py | 0 .../common/visualization/bokeh/.gitkeep | 0 .../common/visualization/color/__init__.py | 0 .../common/visualization/color/color.py | 0 .../common/visualization/color/config.py | 0 .../common/visualization/color/default.py | 0 .../visualization/matplotlib/__init__.py | 0 .../common/visualization/matplotlib/camera.py | 0 .../common/visualization/matplotlib/lidar.py | 0 .../visualization/matplotlib/observation.py | 0 .../common/visualization/matplotlib/plots.py | 0 .../common/visualization/matplotlib/utils.py | 0 .../common/visualization/utils/.gitkeep | 0 .../common/visualization/viser/__init__.py | 0 .../visualization/viser/elements/__init__.py | 0 .../viser/elements/detection_elements.py | 0 .../viser/elements/map_elements.py | 0 .../viser/elements/sensor_elements.py | 0 .../visualization/viser/viser_config.py | 0 .../visualization/viser/viser_viewer.py | 2 +- {py123d => src/py123d}/conversion/__init__.py | 0 .../conversion/abstract_dataset_converter.py | 0 .../conversion/dataset_converter_config.py | 0 .../py123d}/conversion/datasets/__init__.py | 0 .../conversion/datasets/av2/av2_constants.py | 0 .../conversion/datasets/av2/av2_helper.py | 0 .../datasets/av2/av2_map_conversion.py | 0 .../datasets/av2/av2_sensor_converter.py | 0 .../conversion/datasets/carla/__init__.py | 0 .../datasets/carla/carla_data_converter.py | 0 .../datasets/carla/carla_load_sensor.py | 0 .../conversion/datasets/kitti_360/.gitkeep | 0 .../conversion/datasets/nuplan/__init__.py | 0 .../datasets/nuplan/nuplan_converter.py | 0 .../datasets/nuplan/nuplan_load_sensor.py | 0 .../datasets/nuplan/nuplan_map_conversion.py | 0 .../datasets/nuplan/utils/__init__.py | 0 .../datasets/nuplan/utils/log_splits.yaml | 0 .../datasets/nuplan/utils/nuplan_constants.py | 0 .../nuplan/utils/nuplan_sql_helper.py | 0 .../conversion/datasets/nuscenes/.gitkeep | 0 .../datasets/pandaset/pandaset_constants.py | 0 .../datasets/pandaset/pandaset_converter.py | 0 .../conversion/datasets/wopd/__init__.py | 0 .../datasets/wopd/utils/wopd_constants.py | 0 .../waymo_map_utils/womp_boundary_utils.py | 0 .../waymo_map_utils/wopd_map_utils copy.py | 0 .../wopd/waymo_map_utils/wopd_map_utils.py | 0 .../datasets/wopd/wopd_converter.py | 0 .../conversion/datasets/wopd/wopd_utils.py | 0 .../py123d}/conversion/log_writer/__init__.py | 0 .../log_writer/abstract_log_writer.py | 0 .../conversion/log_writer/arrow_log_writer.py | 0 .../map_writer/abstract_map_writer.py | 0 .../conversion/map_writer/gpkg_map_writer.py | 0 .../py123d}/conversion/utils/__init__.py | 0 .../conversion/utils/map_utils/__init__.py | 0 .../map_utils/opendrive/__init__ copy.py | 0 .../utils/map_utils/opendrive/__init__.py | 0 .../opendrive/opendrive_map_conversion.py | 0 .../map_utils/opendrive/parser/__init__.py | 0 .../map_utils/opendrive/parser/elevation.py | 0 .../map_utils/opendrive/parser/geometry.py | 0 .../utils/map_utils/opendrive/parser/lane.py | 0 .../map_utils/opendrive/parser/objects.py | 0 .../map_utils/opendrive/parser/opendrive.py | 0 .../map_utils/opendrive/parser/polynomial.py | 0 .../map_utils/opendrive/parser/reference.py | 0 .../utils/map_utils/opendrive/parser/road.py | 0 .../map_utils/opendrive/utils/__init__.py | 0 .../map_utils/opendrive/utils/collection.py | 0 .../map_utils/opendrive/utils/id_mapping.py | 0 .../map_utils/opendrive/utils/id_system.py | 0 .../map_utils/opendrive/utils/lane_helper.py | 0 .../opendrive/utils/objects_helper.py | 0 .../utils/map_utils/road_edge/__init__.py | 0 .../map_utils/road_edge/road_edge_2d_utils.py | 0 .../map_utils/road_edge/road_edge_3d_utils.py | 0 .../utils/sensor_utils/camera_conventions.py | 0 .../sensor_utils/lidar_index_registry.py | 0 {py123d => src/py123d}/datatypes/__init__.py | 0 .../py123d}/datatypes/detections/__init__.py | 0 .../py123d}/datatypes/detections/detection.py | 0 .../datatypes/detections/detection_types.py | 0 .../py123d}/datatypes/maps/abstract_map.py | 0 .../datatypes/maps/abstract_map_objects.py | 0 .../py123d}/datatypes/maps/cache/__init__.py | 0 .../datatypes/maps/cache/cache_map_objects.py | 0 .../py123d}/datatypes/maps/gpkg/__init__.py | 0 .../py123d}/datatypes/maps/gpkg/gpkg_map.py | 8 +++--- .../datatypes/maps/gpkg/gpkg_map_objects.py | 0 .../py123d}/datatypes/maps/gpkg/gpkg_utils.py | 0 .../py123d}/datatypes/maps/map_datatypes.py | 0 .../py123d}/datatypes/maps/map_metadata.py | 0 .../py123d}/datatypes/scene/__init__.py | 0 .../py123d}/datatypes/scene/abstract_scene.py | 0 .../datatypes/scene/abstract_scene_builder.py | 0 .../py123d}/datatypes/scene/arrow/__init__.py | 0 .../datatypes/scene/arrow/arrow_scene.py | 0 .../scene/arrow/arrow_scene_builder.py | 0 .../datatypes/scene/arrow/utils/__init__.py | 0 .../scene/arrow/utils/arrow_getters.py | 0 .../scene/arrow/utils/arrow_metadata_utils.py | 0 .../py123d}/datatypes/scene/scene_filter.py | 0 .../py123d}/datatypes/scene/scene_metadata.py | 0 .../py123d}/datatypes/sensors/__init__.py | 0 .../datatypes/sensors/camera/__init__.py | 0 .../sensors/camera/pinhole_camera.py | 0 .../datatypes/sensors/lidar/__init__.py | 0 .../py123d}/datatypes/sensors/lidar/lidar.py | 0 .../py123d}/datatypes/time/__init__.py | 0 .../py123d}/datatypes/time/time_point.py | 0 .../datatypes/vehicle_state/__init__.py | 0 .../datatypes/vehicle_state/ego_state.py | 0 .../vehicle_state/vehicle_parameters.py | 0 {py123d => src/py123d}/geometry/__init__.py | 0 .../py123d}/geometry/bounding_box.py | 0 .../py123d}/geometry/geometry_index.py | 0 .../py123d}/geometry/occupancy_map.py | 0 {py123d => src/py123d}/geometry/point.py | 0 {py123d => src/py123d}/geometry/polyline.py | 0 {py123d => src/py123d}/geometry/rotation.py | 0 {py123d => src/py123d}/geometry/se.py | 0 .../py123d}/geometry/test/__init__.py | 0 .../geometry/test/test_bounding_box.py | 0 .../geometry/test/test_occupancy_map.py | 0 .../py123d}/geometry/test/test_point.py | 0 .../py123d}/geometry/test/test_polyline.py | 0 .../py123d}/geometry/test/test_rotation.py | 0 .../py123d}/geometry/test/test_vector.py | 0 .../py123d}/geometry/torch/.gitkeep | 0 .../py123d}/geometry/transform/__init__.py | 0 .../geometry/transform/test/__init__.py | 0 .../test/test_transform_consistency.py | 0 .../test/test_transform_euler_se3.py | 0 .../transform/test/test_transform_se2.py | 0 .../transform/test/test_transform_se3.py | 0 .../geometry/transform/transform_euler_se3.py | 0 .../geometry/transform/transform_se2.py | 0 .../geometry/transform/transform_se3.py | 0 .../py123d}/geometry/utils/__init__.py | 0 .../geometry/utils/bounding_box_utils.py | 0 .../py123d}/geometry/utils/constants.py | 0 .../py123d}/geometry/utils/polyline_utils.py | 0 .../py123d}/geometry/utils/rotation_utils.py | 0 .../py123d}/geometry/utils/test/__init__.py | 0 .../utils/test/test_bounding_box_utils.py | 0 .../utils/test/test_polyline_utils.py | 0 .../utils/test/test_rotation_utils.py | 0 .../py123d}/geometry/utils/units.py | 0 {py123d => src/py123d}/geometry/vector.py | 0 {py123d => src/py123d}/script/__init__.py | 0 .../py123d}/script/builders/__init__.py | 0 .../builders/dataset_converter_builder.py | 0 .../script/builders/scene_builder_builder.py | 0 .../script/builders/scene_filter_builder.py | 0 .../script/builders/utils/utils_type.py | 0 .../script/builders/worker_pool_builder.py | 0 .../py123d}/script/builders/writer_builder.py | 0 .../py123d}/script/config/__init__.py | 0 .../py123d}/script/config/common/__init__.py | 0 .../script/config/common/default_common.yaml | 0 .../config/common/default_dataset_paths.yaml | 0 .../config/common/default_experiment.yaml | 0 .../scene_builder/default_scene_builder.yaml | 0 .../common/scene_filter/all_scenes.yaml | 0 .../common/scene_filter/log_scenes.yaml | 0 .../scene_filter/nuplan_mini_train.yaml | 0 .../common/scene_filter/nuplan_mini_val.yaml | 0 .../common/scene_filter/nuplan_sim_agent.yaml | 0 .../common/scene_filter/viser_scenes.yaml | 0 .../script/config/common/worker/__init__.py | 0 .../config/common/worker/ray_distributed.yaml | 0 .../config/common/worker/sequential.yaml | 0 .../worker/single_machine_thread_pool.yaml | 0 .../script/config/conversion/__init__.py | 0 .../config/conversion/datasets/__init__.py | 0 .../datasets/av2_sensor_dataset.yaml | 0 .../conversion/datasets/carla_dataset.yaml | 0 .../conversion/datasets/nuplan_dataset.yaml | 0 .../datasets/nuplan_mini_dataset.yaml | 0 .../conversion/datasets/pandaset_dataset.yaml | 0 .../conversion/datasets/wopd_dataset.yaml | 0 .../config/conversion/default_conversion.yaml | 6 +---- .../config/conversion/log_writer/__init__.py | 0 .../log_writer/arrow_log_writer.yaml | 0 .../config/conversion/map_writer/__init__.py | 0 .../map_writer/gpkg_map_writer.yaml | 0 .../py123d}/script/config/viser/__init__.py | 0 .../script/config/viser/default_viser.yaml | 4 +-- .../py123d}/script/run_conversion.py | 4 --- {py123d => src/py123d}/script/run_viser.py | 3 +++ test_viser.py | 4 +-- 214 files changed, 18 insertions(+), 52 deletions(-) delete mode 100644 py123d/__init__.py create mode 100644 scripts/conversion/av2_sensor_conversion.sh create mode 100644 scripts/conversion/nuplan_mini_conversion.sh create mode 100644 scripts/conversion/pandaset_conversion.sh delete mode 100644 scripts/dataset/run_log_caching.sh create mode 100644 src/py123d/__init__.py rename {py123d => src/py123d}/common/__init__.py (100%) rename {py123d => src/py123d}/common/multithreading/ray_execution.py (100%) rename {py123d => src/py123d}/common/multithreading/worker_parallel.py (100%) rename {py123d => src/py123d}/common/multithreading/worker_pool.py (100%) rename {py123d => src/py123d}/common/multithreading/worker_ray.py (100%) rename {py123d => src/py123d}/common/multithreading/worker_sequential.py (100%) rename {py123d => src/py123d}/common/multithreading/worker_utils.py (100%) rename {py123d => src/py123d}/common/utils/__init__.py (100%) rename {py123d => src/py123d}/common/utils/arrow_helper.py (100%) rename {py123d => src/py123d}/common/utils/dependencies.py (100%) rename {py123d => src/py123d}/common/utils/enums.py (100%) rename {py123d => src/py123d}/common/utils/mixin.py (100%) rename {py123d => src/py123d}/common/utils/timer.py (100%) rename {py123d => src/py123d}/common/utils/uuid.py (100%) rename {py123d => src/py123d}/common/visualization/__init__.py (100%) rename {py123d => src/py123d}/common/visualization/bokeh/.gitkeep (100%) rename {py123d => src/py123d}/common/visualization/color/__init__.py (100%) rename {py123d => src/py123d}/common/visualization/color/color.py (100%) rename {py123d => src/py123d}/common/visualization/color/config.py (100%) rename {py123d => src/py123d}/common/visualization/color/default.py (100%) rename {py123d => src/py123d}/common/visualization/matplotlib/__init__.py (100%) rename {py123d => src/py123d}/common/visualization/matplotlib/camera.py (100%) rename {py123d => src/py123d}/common/visualization/matplotlib/lidar.py (100%) rename {py123d => src/py123d}/common/visualization/matplotlib/observation.py (100%) rename {py123d => src/py123d}/common/visualization/matplotlib/plots.py (100%) rename {py123d => src/py123d}/common/visualization/matplotlib/utils.py (100%) rename {py123d => src/py123d}/common/visualization/utils/.gitkeep (100%) rename {py123d => src/py123d}/common/visualization/viser/__init__.py (100%) rename {py123d => src/py123d}/common/visualization/viser/elements/__init__.py (100%) rename {py123d => src/py123d}/common/visualization/viser/elements/detection_elements.py (100%) rename {py123d => src/py123d}/common/visualization/viser/elements/map_elements.py (100%) rename {py123d => src/py123d}/common/visualization/viser/elements/sensor_elements.py (100%) rename {py123d => src/py123d}/common/visualization/viser/viser_config.py (100%) rename {py123d => src/py123d}/common/visualization/viser/viser_viewer.py (99%) rename {py123d => src/py123d}/conversion/__init__.py (100%) rename {py123d => src/py123d}/conversion/abstract_dataset_converter.py (100%) rename {py123d => src/py123d}/conversion/dataset_converter_config.py (100%) rename {py123d => src/py123d}/conversion/datasets/__init__.py (100%) rename {py123d => src/py123d}/conversion/datasets/av2/av2_constants.py (100%) rename {py123d => src/py123d}/conversion/datasets/av2/av2_helper.py (100%) rename {py123d => src/py123d}/conversion/datasets/av2/av2_map_conversion.py (100%) rename {py123d => src/py123d}/conversion/datasets/av2/av2_sensor_converter.py (100%) rename {py123d => src/py123d}/conversion/datasets/carla/__init__.py (100%) rename {py123d => src/py123d}/conversion/datasets/carla/carla_data_converter.py (100%) rename {py123d => src/py123d}/conversion/datasets/carla/carla_load_sensor.py (100%) rename {py123d => src/py123d}/conversion/datasets/kitti_360/.gitkeep (100%) rename {py123d => src/py123d}/conversion/datasets/nuplan/__init__.py (100%) rename {py123d => src/py123d}/conversion/datasets/nuplan/nuplan_converter.py (100%) rename {py123d => src/py123d}/conversion/datasets/nuplan/nuplan_load_sensor.py (100%) rename {py123d => src/py123d}/conversion/datasets/nuplan/nuplan_map_conversion.py (100%) rename {py123d => src/py123d}/conversion/datasets/nuplan/utils/__init__.py (100%) rename {py123d => src/py123d}/conversion/datasets/nuplan/utils/log_splits.yaml (100%) rename {py123d => src/py123d}/conversion/datasets/nuplan/utils/nuplan_constants.py (100%) rename {py123d => src/py123d}/conversion/datasets/nuplan/utils/nuplan_sql_helper.py (100%) rename {py123d => src/py123d}/conversion/datasets/nuscenes/.gitkeep (100%) rename {py123d => src/py123d}/conversion/datasets/pandaset/pandaset_constants.py (100%) rename {py123d => src/py123d}/conversion/datasets/pandaset/pandaset_converter.py (100%) rename {py123d => src/py123d}/conversion/datasets/wopd/__init__.py (100%) rename {py123d => src/py123d}/conversion/datasets/wopd/utils/wopd_constants.py (100%) rename {py123d => src/py123d}/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py (100%) rename {py123d => src/py123d}/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py (100%) rename {py123d => src/py123d}/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py (100%) rename {py123d => src/py123d}/conversion/datasets/wopd/wopd_converter.py (100%) rename {py123d => src/py123d}/conversion/datasets/wopd/wopd_utils.py (100%) rename {py123d => src/py123d}/conversion/log_writer/__init__.py (100%) rename {py123d => src/py123d}/conversion/log_writer/abstract_log_writer.py (100%) rename {py123d => src/py123d}/conversion/log_writer/arrow_log_writer.py (100%) rename {py123d => src/py123d}/conversion/map_writer/abstract_map_writer.py (100%) rename {py123d => src/py123d}/conversion/map_writer/gpkg_map_writer.py (100%) rename {py123d => src/py123d}/conversion/utils/__init__.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/__init__.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/__init__ copy.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/__init__.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/parser/__init__.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/parser/elevation.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/parser/geometry.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/parser/lane.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/parser/objects.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/parser/opendrive.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/parser/polynomial.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/parser/reference.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/parser/road.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/utils/__init__.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/utils/collection.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/utils/id_mapping.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/utils/id_system.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/utils/lane_helper.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/opendrive/utils/objects_helper.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/road_edge/__init__.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py (100%) rename {py123d => src/py123d}/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py (100%) rename {py123d => src/py123d}/conversion/utils/sensor_utils/camera_conventions.py (100%) rename {py123d => src/py123d}/conversion/utils/sensor_utils/lidar_index_registry.py (100%) rename {py123d => src/py123d}/datatypes/__init__.py (100%) rename {py123d => src/py123d}/datatypes/detections/__init__.py (100%) rename {py123d => src/py123d}/datatypes/detections/detection.py (100%) rename {py123d => src/py123d}/datatypes/detections/detection_types.py (100%) rename {py123d => src/py123d}/datatypes/maps/abstract_map.py (100%) rename {py123d => src/py123d}/datatypes/maps/abstract_map_objects.py (100%) rename {py123d => src/py123d}/datatypes/maps/cache/__init__.py (100%) rename {py123d => src/py123d}/datatypes/maps/cache/cache_map_objects.py (100%) rename {py123d => src/py123d}/datatypes/maps/gpkg/__init__.py (100%) rename {py123d => src/py123d}/datatypes/maps/gpkg/gpkg_map.py (98%) rename {py123d => src/py123d}/datatypes/maps/gpkg/gpkg_map_objects.py (100%) rename {py123d => src/py123d}/datatypes/maps/gpkg/gpkg_utils.py (100%) rename {py123d => src/py123d}/datatypes/maps/map_datatypes.py (100%) rename {py123d => src/py123d}/datatypes/maps/map_metadata.py (100%) rename {py123d => src/py123d}/datatypes/scene/__init__.py (100%) rename {py123d => src/py123d}/datatypes/scene/abstract_scene.py (100%) rename {py123d => src/py123d}/datatypes/scene/abstract_scene_builder.py (100%) rename {py123d => src/py123d}/datatypes/scene/arrow/__init__.py (100%) rename {py123d => src/py123d}/datatypes/scene/arrow/arrow_scene.py (100%) rename {py123d => src/py123d}/datatypes/scene/arrow/arrow_scene_builder.py (100%) rename {py123d => src/py123d}/datatypes/scene/arrow/utils/__init__.py (100%) rename {py123d => src/py123d}/datatypes/scene/arrow/utils/arrow_getters.py (100%) rename {py123d => src/py123d}/datatypes/scene/arrow/utils/arrow_metadata_utils.py (100%) rename {py123d => src/py123d}/datatypes/scene/scene_filter.py (100%) rename {py123d => src/py123d}/datatypes/scene/scene_metadata.py (100%) rename {py123d => src/py123d}/datatypes/sensors/__init__.py (100%) rename {py123d => src/py123d}/datatypes/sensors/camera/__init__.py (100%) rename {py123d => src/py123d}/datatypes/sensors/camera/pinhole_camera.py (100%) rename {py123d => src/py123d}/datatypes/sensors/lidar/__init__.py (100%) rename {py123d => src/py123d}/datatypes/sensors/lidar/lidar.py (100%) rename {py123d => src/py123d}/datatypes/time/__init__.py (100%) rename {py123d => src/py123d}/datatypes/time/time_point.py (100%) rename {py123d => src/py123d}/datatypes/vehicle_state/__init__.py (100%) rename {py123d => src/py123d}/datatypes/vehicle_state/ego_state.py (100%) rename {py123d => src/py123d}/datatypes/vehicle_state/vehicle_parameters.py (100%) rename {py123d => src/py123d}/geometry/__init__.py (100%) rename {py123d => src/py123d}/geometry/bounding_box.py (100%) rename {py123d => src/py123d}/geometry/geometry_index.py (100%) rename {py123d => src/py123d}/geometry/occupancy_map.py (100%) rename {py123d => src/py123d}/geometry/point.py (100%) rename {py123d => src/py123d}/geometry/polyline.py (100%) rename {py123d => src/py123d}/geometry/rotation.py (100%) rename {py123d => src/py123d}/geometry/se.py (100%) rename {py123d => src/py123d}/geometry/test/__init__.py (100%) rename {py123d => src/py123d}/geometry/test/test_bounding_box.py (100%) rename {py123d => src/py123d}/geometry/test/test_occupancy_map.py (100%) rename {py123d => src/py123d}/geometry/test/test_point.py (100%) rename {py123d => src/py123d}/geometry/test/test_polyline.py (100%) rename {py123d => src/py123d}/geometry/test/test_rotation.py (100%) rename {py123d => src/py123d}/geometry/test/test_vector.py (100%) rename {py123d => src/py123d}/geometry/torch/.gitkeep (100%) rename {py123d => src/py123d}/geometry/transform/__init__.py (100%) rename {py123d => src/py123d}/geometry/transform/test/__init__.py (100%) rename {py123d => src/py123d}/geometry/transform/test/test_transform_consistency.py (100%) rename {py123d => src/py123d}/geometry/transform/test/test_transform_euler_se3.py (100%) rename {py123d => src/py123d}/geometry/transform/test/test_transform_se2.py (100%) rename {py123d => src/py123d}/geometry/transform/test/test_transform_se3.py (100%) rename {py123d => src/py123d}/geometry/transform/transform_euler_se3.py (100%) rename {py123d => src/py123d}/geometry/transform/transform_se2.py (100%) rename {py123d => src/py123d}/geometry/transform/transform_se3.py (100%) rename {py123d => src/py123d}/geometry/utils/__init__.py (100%) rename {py123d => src/py123d}/geometry/utils/bounding_box_utils.py (100%) rename {py123d => src/py123d}/geometry/utils/constants.py (100%) rename {py123d => src/py123d}/geometry/utils/polyline_utils.py (100%) rename {py123d => src/py123d}/geometry/utils/rotation_utils.py (100%) rename {py123d => src/py123d}/geometry/utils/test/__init__.py (100%) rename {py123d => src/py123d}/geometry/utils/test/test_bounding_box_utils.py (100%) rename {py123d => src/py123d}/geometry/utils/test/test_polyline_utils.py (100%) rename {py123d => src/py123d}/geometry/utils/test/test_rotation_utils.py (100%) rename {py123d => src/py123d}/geometry/utils/units.py (100%) rename {py123d => src/py123d}/geometry/vector.py (100%) rename {py123d => src/py123d}/script/__init__.py (100%) rename {py123d => src/py123d}/script/builders/__init__.py (100%) rename {py123d => src/py123d}/script/builders/dataset_converter_builder.py (100%) rename {py123d => src/py123d}/script/builders/scene_builder_builder.py (100%) rename {py123d => src/py123d}/script/builders/scene_filter_builder.py (100%) rename {py123d => src/py123d}/script/builders/utils/utils_type.py (100%) rename {py123d => src/py123d}/script/builders/worker_pool_builder.py (100%) rename {py123d => src/py123d}/script/builders/writer_builder.py (100%) rename {py123d => src/py123d}/script/config/__init__.py (100%) rename {py123d => src/py123d}/script/config/common/__init__.py (100%) rename {py123d => src/py123d}/script/config/common/default_common.yaml (100%) rename {py123d => src/py123d}/script/config/common/default_dataset_paths.yaml (100%) rename {py123d => src/py123d}/script/config/common/default_experiment.yaml (100%) rename {py123d => src/py123d}/script/config/common/scene_builder/default_scene_builder.yaml (100%) rename {py123d => src/py123d}/script/config/common/scene_filter/all_scenes.yaml (100%) rename {py123d => src/py123d}/script/config/common/scene_filter/log_scenes.yaml (100%) rename {py123d => src/py123d}/script/config/common/scene_filter/nuplan_mini_train.yaml (100%) rename {py123d => src/py123d}/script/config/common/scene_filter/nuplan_mini_val.yaml (100%) rename {py123d => src/py123d}/script/config/common/scene_filter/nuplan_sim_agent.yaml (100%) rename {py123d => src/py123d}/script/config/common/scene_filter/viser_scenes.yaml (100%) rename {py123d => src/py123d}/script/config/common/worker/__init__.py (100%) rename {py123d => src/py123d}/script/config/common/worker/ray_distributed.yaml (100%) rename {py123d => src/py123d}/script/config/common/worker/sequential.yaml (100%) rename {py123d => src/py123d}/script/config/common/worker/single_machine_thread_pool.yaml (100%) rename {py123d => src/py123d}/script/config/conversion/__init__.py (100%) rename {py123d => src/py123d}/script/config/conversion/datasets/__init__.py (100%) rename {py123d => src/py123d}/script/config/conversion/datasets/av2_sensor_dataset.yaml (100%) rename {py123d => src/py123d}/script/config/conversion/datasets/carla_dataset.yaml (100%) rename {py123d => src/py123d}/script/config/conversion/datasets/nuplan_dataset.yaml (100%) rename {py123d => src/py123d}/script/config/conversion/datasets/nuplan_mini_dataset.yaml (100%) rename {py123d => src/py123d}/script/config/conversion/datasets/pandaset_dataset.yaml (100%) rename {py123d => src/py123d}/script/config/conversion/datasets/wopd_dataset.yaml (100%) rename {py123d => src/py123d}/script/config/conversion/default_conversion.yaml (76%) rename {py123d => src/py123d}/script/config/conversion/log_writer/__init__.py (100%) rename {py123d => src/py123d}/script/config/conversion/log_writer/arrow_log_writer.yaml (100%) rename {py123d => src/py123d}/script/config/conversion/map_writer/__init__.py (100%) rename {py123d => src/py123d}/script/config/conversion/map_writer/gpkg_map_writer.yaml (100%) rename {py123d => src/py123d}/script/config/viser/__init__.py (100%) rename {py123d => src/py123d}/script/config/viser/default_viser.yaml (63%) rename {py123d => src/py123d}/script/run_conversion.py (97%) rename {py123d => src/py123d}/script/run_viser.py (99%) diff --git a/py123d/__init__.py b/py123d/__init__.py deleted file mode 100644 index e118230f..00000000 --- a/py123d/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# _ _ _ _ -# / /\ /\ \ /\ \ /\ \ -# / / \ / \ \ / \ \ / \ \____ -# /_/ /\ \ / /\ \ \ / /\ \ \ / /\ \_____\ -# \_\/\ \ \ \/_/\ \ \ / / /\ \ \ / / /\/___ / -# \ \ \ / / / \/_//_\ \ \ / / / / / / -# \ \ \ / / / __\___ \ \ / / / / / / -# \ \ \ / / / _ / /\ \ \ \ / / / / / / -# __\ \ \___ / / /_/\_\ / /_/____\ \ \\ \ \__/ / / -# /___\_\/__/\/ /_____/ //__________\ \ \\ \___\/ / -# \_________\/\________/ \_____________\/ \/_____/ - -ascii_banner = r""" - _ _ _ _ - / /\ /\ \ /\ \ /\ \ - / / \ / \ \ / \ \ / \ \____ -/_/ /\ \ / /\ \ \ / /\ \ \ / /\ \_____\ -\_\/\ \ \ \/_/\ \ \ / / /\ \ \ / / /\/___ / - \ \ \ / / / \/_//_\ \ \ / / / / / / - \ \ \ / / / __\___ \ \ / / / / / / - \ \ \ / / / _ / /\ \ \ \ / / / / / / - __\ \ \___ / / /_/\_\ / /_/____\ \ \\ \ \__/ / / - /___\_\/__/\/ /_____/ //__________\ \ \\ \___\/ / - \_________\/\________/ \_____________\/ \/_____/ -""" - -__version__ = "0.0.7" diff --git a/pyproject.toml b/pyproject.toml index 5a0c0f59..9e46379c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,9 +93,7 @@ waymo = [ ] [tool.setuptools.packages.find] -where = ["."] -include = ["py123d*"] # Only include py123d package -exclude = ["notebooks*", "docs*"] # Explicitly exclude notebooks +where = ["src"] [project.urls] "Homepage" = "https://github.com/DanielDauner/py123d" diff --git a/scripts/conversion/av2_sensor_conversion.sh b/scripts/conversion/av2_sensor_conversion.sh new file mode 100644 index 00000000..8f4ce53f --- /dev/null +++ b/scripts/conversion/av2_sensor_conversion.sh @@ -0,0 +1 @@ +py123d-conversion datasets=["av2_sensor_dataset"] \ No newline at end of file diff --git a/scripts/conversion/nuplan_mini_conversion.sh b/scripts/conversion/nuplan_mini_conversion.sh new file mode 100644 index 00000000..76fd599b --- /dev/null +++ b/scripts/conversion/nuplan_mini_conversion.sh @@ -0,0 +1 @@ +py123d-conversion datasets=["nuplan_mini_dataset"] \ No newline at end of file diff --git a/scripts/conversion/pandaset_conversion.sh b/scripts/conversion/pandaset_conversion.sh new file mode 100644 index 00000000..e895fcbb --- /dev/null +++ b/scripts/conversion/pandaset_conversion.sh @@ -0,0 +1 @@ +py123d-conversion datasets=[pandaset_dataset] \ No newline at end of file diff --git a/scripts/dataset/run_log_caching.sh b/scripts/dataset/run_log_caching.sh deleted file mode 100644 index fc6eeb84..00000000 --- a/scripts/dataset/run_log_caching.sh +++ /dev/null @@ -1,4 +0,0 @@ - - - -py123d-conversion diff --git a/src/py123d/__init__.py b/src/py123d/__init__.py new file mode 100644 index 00000000..6526deb4 --- /dev/null +++ b/src/py123d/__init__.py @@ -0,0 +1 @@ +__version__ = "0.0.7" diff --git a/py123d/common/__init__.py b/src/py123d/common/__init__.py similarity index 100% rename from py123d/common/__init__.py rename to src/py123d/common/__init__.py diff --git a/py123d/common/multithreading/ray_execution.py b/src/py123d/common/multithreading/ray_execution.py similarity index 100% rename from py123d/common/multithreading/ray_execution.py rename to src/py123d/common/multithreading/ray_execution.py diff --git a/py123d/common/multithreading/worker_parallel.py b/src/py123d/common/multithreading/worker_parallel.py similarity index 100% rename from py123d/common/multithreading/worker_parallel.py rename to src/py123d/common/multithreading/worker_parallel.py diff --git a/py123d/common/multithreading/worker_pool.py b/src/py123d/common/multithreading/worker_pool.py similarity index 100% rename from py123d/common/multithreading/worker_pool.py rename to src/py123d/common/multithreading/worker_pool.py diff --git a/py123d/common/multithreading/worker_ray.py b/src/py123d/common/multithreading/worker_ray.py similarity index 100% rename from py123d/common/multithreading/worker_ray.py rename to src/py123d/common/multithreading/worker_ray.py diff --git a/py123d/common/multithreading/worker_sequential.py b/src/py123d/common/multithreading/worker_sequential.py similarity index 100% rename from py123d/common/multithreading/worker_sequential.py rename to src/py123d/common/multithreading/worker_sequential.py diff --git a/py123d/common/multithreading/worker_utils.py b/src/py123d/common/multithreading/worker_utils.py similarity index 100% rename from py123d/common/multithreading/worker_utils.py rename to src/py123d/common/multithreading/worker_utils.py diff --git a/py123d/common/utils/__init__.py b/src/py123d/common/utils/__init__.py similarity index 100% rename from py123d/common/utils/__init__.py rename to src/py123d/common/utils/__init__.py diff --git a/py123d/common/utils/arrow_helper.py b/src/py123d/common/utils/arrow_helper.py similarity index 100% rename from py123d/common/utils/arrow_helper.py rename to src/py123d/common/utils/arrow_helper.py diff --git a/py123d/common/utils/dependencies.py b/src/py123d/common/utils/dependencies.py similarity index 100% rename from py123d/common/utils/dependencies.py rename to src/py123d/common/utils/dependencies.py diff --git a/py123d/common/utils/enums.py b/src/py123d/common/utils/enums.py similarity index 100% rename from py123d/common/utils/enums.py rename to src/py123d/common/utils/enums.py diff --git a/py123d/common/utils/mixin.py b/src/py123d/common/utils/mixin.py similarity index 100% rename from py123d/common/utils/mixin.py rename to src/py123d/common/utils/mixin.py diff --git a/py123d/common/utils/timer.py b/src/py123d/common/utils/timer.py similarity index 100% rename from py123d/common/utils/timer.py rename to src/py123d/common/utils/timer.py diff --git a/py123d/common/utils/uuid.py b/src/py123d/common/utils/uuid.py similarity index 100% rename from py123d/common/utils/uuid.py rename to src/py123d/common/utils/uuid.py diff --git a/py123d/common/visualization/__init__.py b/src/py123d/common/visualization/__init__.py similarity index 100% rename from py123d/common/visualization/__init__.py rename to src/py123d/common/visualization/__init__.py diff --git a/py123d/common/visualization/bokeh/.gitkeep b/src/py123d/common/visualization/bokeh/.gitkeep similarity index 100% rename from py123d/common/visualization/bokeh/.gitkeep rename to src/py123d/common/visualization/bokeh/.gitkeep diff --git a/py123d/common/visualization/color/__init__.py b/src/py123d/common/visualization/color/__init__.py similarity index 100% rename from py123d/common/visualization/color/__init__.py rename to src/py123d/common/visualization/color/__init__.py diff --git a/py123d/common/visualization/color/color.py b/src/py123d/common/visualization/color/color.py similarity index 100% rename from py123d/common/visualization/color/color.py rename to src/py123d/common/visualization/color/color.py diff --git a/py123d/common/visualization/color/config.py b/src/py123d/common/visualization/color/config.py similarity index 100% rename from py123d/common/visualization/color/config.py rename to src/py123d/common/visualization/color/config.py diff --git a/py123d/common/visualization/color/default.py b/src/py123d/common/visualization/color/default.py similarity index 100% rename from py123d/common/visualization/color/default.py rename to src/py123d/common/visualization/color/default.py diff --git a/py123d/common/visualization/matplotlib/__init__.py b/src/py123d/common/visualization/matplotlib/__init__.py similarity index 100% rename from py123d/common/visualization/matplotlib/__init__.py rename to src/py123d/common/visualization/matplotlib/__init__.py diff --git a/py123d/common/visualization/matplotlib/camera.py b/src/py123d/common/visualization/matplotlib/camera.py similarity index 100% rename from py123d/common/visualization/matplotlib/camera.py rename to src/py123d/common/visualization/matplotlib/camera.py diff --git a/py123d/common/visualization/matplotlib/lidar.py b/src/py123d/common/visualization/matplotlib/lidar.py similarity index 100% rename from py123d/common/visualization/matplotlib/lidar.py rename to src/py123d/common/visualization/matplotlib/lidar.py diff --git a/py123d/common/visualization/matplotlib/observation.py b/src/py123d/common/visualization/matplotlib/observation.py similarity index 100% rename from py123d/common/visualization/matplotlib/observation.py rename to src/py123d/common/visualization/matplotlib/observation.py diff --git a/py123d/common/visualization/matplotlib/plots.py b/src/py123d/common/visualization/matplotlib/plots.py similarity index 100% rename from py123d/common/visualization/matplotlib/plots.py rename to src/py123d/common/visualization/matplotlib/plots.py diff --git a/py123d/common/visualization/matplotlib/utils.py b/src/py123d/common/visualization/matplotlib/utils.py similarity index 100% rename from py123d/common/visualization/matplotlib/utils.py rename to src/py123d/common/visualization/matplotlib/utils.py diff --git a/py123d/common/visualization/utils/.gitkeep b/src/py123d/common/visualization/utils/.gitkeep similarity index 100% rename from py123d/common/visualization/utils/.gitkeep rename to src/py123d/common/visualization/utils/.gitkeep diff --git a/py123d/common/visualization/viser/__init__.py b/src/py123d/common/visualization/viser/__init__.py similarity index 100% rename from py123d/common/visualization/viser/__init__.py rename to src/py123d/common/visualization/viser/__init__.py diff --git a/py123d/common/visualization/viser/elements/__init__.py b/src/py123d/common/visualization/viser/elements/__init__.py similarity index 100% rename from py123d/common/visualization/viser/elements/__init__.py rename to src/py123d/common/visualization/viser/elements/__init__.py diff --git a/py123d/common/visualization/viser/elements/detection_elements.py b/src/py123d/common/visualization/viser/elements/detection_elements.py similarity index 100% rename from py123d/common/visualization/viser/elements/detection_elements.py rename to src/py123d/common/visualization/viser/elements/detection_elements.py diff --git a/py123d/common/visualization/viser/elements/map_elements.py b/src/py123d/common/visualization/viser/elements/map_elements.py similarity index 100% rename from py123d/common/visualization/viser/elements/map_elements.py rename to src/py123d/common/visualization/viser/elements/map_elements.py diff --git a/py123d/common/visualization/viser/elements/sensor_elements.py b/src/py123d/common/visualization/viser/elements/sensor_elements.py similarity index 100% rename from py123d/common/visualization/viser/elements/sensor_elements.py rename to src/py123d/common/visualization/viser/elements/sensor_elements.py diff --git a/py123d/common/visualization/viser/viser_config.py b/src/py123d/common/visualization/viser/viser_config.py similarity index 100% rename from py123d/common/visualization/viser/viser_config.py rename to src/py123d/common/visualization/viser/viser_config.py diff --git a/py123d/common/visualization/viser/viser_viewer.py b/src/py123d/common/visualization/viser/viser_viewer.py similarity index 99% rename from py123d/common/visualization/viser/viser_viewer.py rename to src/py123d/common/visualization/viser/viser_viewer.py index 3da910e6..9c84d057 100644 --- a/py123d/common/visualization/viser/viser_viewer.py +++ b/src/py123d/common/visualization/viser/viser_viewer.py @@ -94,7 +94,7 @@ def __init__( self, scenes: List[AbstractScene], viser_config: ViserConfig = ViserConfig(), - scene_index: int = 0.0, + scene_index: int = 0, ) -> None: assert len(scenes) > 0, "At least one scene must be provided." diff --git a/py123d/conversion/__init__.py b/src/py123d/conversion/__init__.py similarity index 100% rename from py123d/conversion/__init__.py rename to src/py123d/conversion/__init__.py diff --git a/py123d/conversion/abstract_dataset_converter.py b/src/py123d/conversion/abstract_dataset_converter.py similarity index 100% rename from py123d/conversion/abstract_dataset_converter.py rename to src/py123d/conversion/abstract_dataset_converter.py diff --git a/py123d/conversion/dataset_converter_config.py b/src/py123d/conversion/dataset_converter_config.py similarity index 100% rename from py123d/conversion/dataset_converter_config.py rename to src/py123d/conversion/dataset_converter_config.py diff --git a/py123d/conversion/datasets/__init__.py b/src/py123d/conversion/datasets/__init__.py similarity index 100% rename from py123d/conversion/datasets/__init__.py rename to src/py123d/conversion/datasets/__init__.py diff --git a/py123d/conversion/datasets/av2/av2_constants.py b/src/py123d/conversion/datasets/av2/av2_constants.py similarity index 100% rename from py123d/conversion/datasets/av2/av2_constants.py rename to src/py123d/conversion/datasets/av2/av2_constants.py diff --git a/py123d/conversion/datasets/av2/av2_helper.py b/src/py123d/conversion/datasets/av2/av2_helper.py similarity index 100% rename from py123d/conversion/datasets/av2/av2_helper.py rename to src/py123d/conversion/datasets/av2/av2_helper.py diff --git a/py123d/conversion/datasets/av2/av2_map_conversion.py b/src/py123d/conversion/datasets/av2/av2_map_conversion.py similarity index 100% rename from py123d/conversion/datasets/av2/av2_map_conversion.py rename to src/py123d/conversion/datasets/av2/av2_map_conversion.py diff --git a/py123d/conversion/datasets/av2/av2_sensor_converter.py b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py similarity index 100% rename from py123d/conversion/datasets/av2/av2_sensor_converter.py rename to src/py123d/conversion/datasets/av2/av2_sensor_converter.py diff --git a/py123d/conversion/datasets/carla/__init__.py b/src/py123d/conversion/datasets/carla/__init__.py similarity index 100% rename from py123d/conversion/datasets/carla/__init__.py rename to src/py123d/conversion/datasets/carla/__init__.py diff --git a/py123d/conversion/datasets/carla/carla_data_converter.py b/src/py123d/conversion/datasets/carla/carla_data_converter.py similarity index 100% rename from py123d/conversion/datasets/carla/carla_data_converter.py rename to src/py123d/conversion/datasets/carla/carla_data_converter.py diff --git a/py123d/conversion/datasets/carla/carla_load_sensor.py b/src/py123d/conversion/datasets/carla/carla_load_sensor.py similarity index 100% rename from py123d/conversion/datasets/carla/carla_load_sensor.py rename to src/py123d/conversion/datasets/carla/carla_load_sensor.py diff --git a/py123d/conversion/datasets/kitti_360/.gitkeep b/src/py123d/conversion/datasets/kitti_360/.gitkeep similarity index 100% rename from py123d/conversion/datasets/kitti_360/.gitkeep rename to src/py123d/conversion/datasets/kitti_360/.gitkeep diff --git a/py123d/conversion/datasets/nuplan/__init__.py b/src/py123d/conversion/datasets/nuplan/__init__.py similarity index 100% rename from py123d/conversion/datasets/nuplan/__init__.py rename to src/py123d/conversion/datasets/nuplan/__init__.py diff --git a/py123d/conversion/datasets/nuplan/nuplan_converter.py b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py similarity index 100% rename from py123d/conversion/datasets/nuplan/nuplan_converter.py rename to src/py123d/conversion/datasets/nuplan/nuplan_converter.py diff --git a/py123d/conversion/datasets/nuplan/nuplan_load_sensor.py b/src/py123d/conversion/datasets/nuplan/nuplan_load_sensor.py similarity index 100% rename from py123d/conversion/datasets/nuplan/nuplan_load_sensor.py rename to src/py123d/conversion/datasets/nuplan/nuplan_load_sensor.py diff --git a/py123d/conversion/datasets/nuplan/nuplan_map_conversion.py b/src/py123d/conversion/datasets/nuplan/nuplan_map_conversion.py similarity index 100% rename from py123d/conversion/datasets/nuplan/nuplan_map_conversion.py rename to src/py123d/conversion/datasets/nuplan/nuplan_map_conversion.py diff --git a/py123d/conversion/datasets/nuplan/utils/__init__.py b/src/py123d/conversion/datasets/nuplan/utils/__init__.py similarity index 100% rename from py123d/conversion/datasets/nuplan/utils/__init__.py rename to src/py123d/conversion/datasets/nuplan/utils/__init__.py diff --git a/py123d/conversion/datasets/nuplan/utils/log_splits.yaml b/src/py123d/conversion/datasets/nuplan/utils/log_splits.yaml similarity index 100% rename from py123d/conversion/datasets/nuplan/utils/log_splits.yaml rename to src/py123d/conversion/datasets/nuplan/utils/log_splits.yaml diff --git a/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py similarity index 100% rename from py123d/conversion/datasets/nuplan/utils/nuplan_constants.py rename to src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py diff --git a/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py b/src/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py similarity index 100% rename from py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py rename to src/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py diff --git a/py123d/conversion/datasets/nuscenes/.gitkeep b/src/py123d/conversion/datasets/nuscenes/.gitkeep similarity index 100% rename from py123d/conversion/datasets/nuscenes/.gitkeep rename to src/py123d/conversion/datasets/nuscenes/.gitkeep diff --git a/py123d/conversion/datasets/pandaset/pandaset_constants.py b/src/py123d/conversion/datasets/pandaset/pandaset_constants.py similarity index 100% rename from py123d/conversion/datasets/pandaset/pandaset_constants.py rename to src/py123d/conversion/datasets/pandaset/pandaset_constants.py diff --git a/py123d/conversion/datasets/pandaset/pandaset_converter.py b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py similarity index 100% rename from py123d/conversion/datasets/pandaset/pandaset_converter.py rename to src/py123d/conversion/datasets/pandaset/pandaset_converter.py diff --git a/py123d/conversion/datasets/wopd/__init__.py b/src/py123d/conversion/datasets/wopd/__init__.py similarity index 100% rename from py123d/conversion/datasets/wopd/__init__.py rename to src/py123d/conversion/datasets/wopd/__init__.py diff --git a/py123d/conversion/datasets/wopd/utils/wopd_constants.py b/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py similarity index 100% rename from py123d/conversion/datasets/wopd/utils/wopd_constants.py rename to src/py123d/conversion/datasets/wopd/utils/wopd_constants.py diff --git a/py123d/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py b/src/py123d/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py similarity index 100% rename from py123d/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py rename to src/py123d/conversion/datasets/wopd/waymo_map_utils/womp_boundary_utils.py diff --git a/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py b/src/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py similarity index 100% rename from py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py rename to src/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py diff --git a/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py b/src/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py similarity index 100% rename from py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py rename to src/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils.py diff --git a/py123d/conversion/datasets/wopd/wopd_converter.py b/src/py123d/conversion/datasets/wopd/wopd_converter.py similarity index 100% rename from py123d/conversion/datasets/wopd/wopd_converter.py rename to src/py123d/conversion/datasets/wopd/wopd_converter.py diff --git a/py123d/conversion/datasets/wopd/wopd_utils.py b/src/py123d/conversion/datasets/wopd/wopd_utils.py similarity index 100% rename from py123d/conversion/datasets/wopd/wopd_utils.py rename to src/py123d/conversion/datasets/wopd/wopd_utils.py diff --git a/py123d/conversion/log_writer/__init__.py b/src/py123d/conversion/log_writer/__init__.py similarity index 100% rename from py123d/conversion/log_writer/__init__.py rename to src/py123d/conversion/log_writer/__init__.py diff --git a/py123d/conversion/log_writer/abstract_log_writer.py b/src/py123d/conversion/log_writer/abstract_log_writer.py similarity index 100% rename from py123d/conversion/log_writer/abstract_log_writer.py rename to src/py123d/conversion/log_writer/abstract_log_writer.py diff --git a/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py similarity index 100% rename from py123d/conversion/log_writer/arrow_log_writer.py rename to src/py123d/conversion/log_writer/arrow_log_writer.py diff --git a/py123d/conversion/map_writer/abstract_map_writer.py b/src/py123d/conversion/map_writer/abstract_map_writer.py similarity index 100% rename from py123d/conversion/map_writer/abstract_map_writer.py rename to src/py123d/conversion/map_writer/abstract_map_writer.py diff --git a/py123d/conversion/map_writer/gpkg_map_writer.py b/src/py123d/conversion/map_writer/gpkg_map_writer.py similarity index 100% rename from py123d/conversion/map_writer/gpkg_map_writer.py rename to src/py123d/conversion/map_writer/gpkg_map_writer.py diff --git a/py123d/conversion/utils/__init__.py b/src/py123d/conversion/utils/__init__.py similarity index 100% rename from py123d/conversion/utils/__init__.py rename to src/py123d/conversion/utils/__init__.py diff --git a/py123d/conversion/utils/map_utils/__init__.py b/src/py123d/conversion/utils/map_utils/__init__.py similarity index 100% rename from py123d/conversion/utils/map_utils/__init__.py rename to src/py123d/conversion/utils/map_utils/__init__.py diff --git a/py123d/conversion/utils/map_utils/opendrive/__init__ copy.py b/src/py123d/conversion/utils/map_utils/opendrive/__init__ copy.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/__init__ copy.py rename to src/py123d/conversion/utils/map_utils/opendrive/__init__ copy.py diff --git a/py123d/conversion/utils/map_utils/opendrive/__init__.py b/src/py123d/conversion/utils/map_utils/opendrive/__init__.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/__init__.py rename to src/py123d/conversion/utils/map_utils/opendrive/__init__.py diff --git a/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py b/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py rename to src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py diff --git a/py123d/conversion/utils/map_utils/opendrive/parser/__init__.py b/src/py123d/conversion/utils/map_utils/opendrive/parser/__init__.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/parser/__init__.py rename to src/py123d/conversion/utils/map_utils/opendrive/parser/__init__.py diff --git a/py123d/conversion/utils/map_utils/opendrive/parser/elevation.py b/src/py123d/conversion/utils/map_utils/opendrive/parser/elevation.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/parser/elevation.py rename to src/py123d/conversion/utils/map_utils/opendrive/parser/elevation.py diff --git a/py123d/conversion/utils/map_utils/opendrive/parser/geometry.py b/src/py123d/conversion/utils/map_utils/opendrive/parser/geometry.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/parser/geometry.py rename to src/py123d/conversion/utils/map_utils/opendrive/parser/geometry.py diff --git a/py123d/conversion/utils/map_utils/opendrive/parser/lane.py b/src/py123d/conversion/utils/map_utils/opendrive/parser/lane.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/parser/lane.py rename to src/py123d/conversion/utils/map_utils/opendrive/parser/lane.py diff --git a/py123d/conversion/utils/map_utils/opendrive/parser/objects.py b/src/py123d/conversion/utils/map_utils/opendrive/parser/objects.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/parser/objects.py rename to src/py123d/conversion/utils/map_utils/opendrive/parser/objects.py diff --git a/py123d/conversion/utils/map_utils/opendrive/parser/opendrive.py b/src/py123d/conversion/utils/map_utils/opendrive/parser/opendrive.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/parser/opendrive.py rename to src/py123d/conversion/utils/map_utils/opendrive/parser/opendrive.py diff --git a/py123d/conversion/utils/map_utils/opendrive/parser/polynomial.py b/src/py123d/conversion/utils/map_utils/opendrive/parser/polynomial.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/parser/polynomial.py rename to src/py123d/conversion/utils/map_utils/opendrive/parser/polynomial.py diff --git a/py123d/conversion/utils/map_utils/opendrive/parser/reference.py b/src/py123d/conversion/utils/map_utils/opendrive/parser/reference.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/parser/reference.py rename to src/py123d/conversion/utils/map_utils/opendrive/parser/reference.py diff --git a/py123d/conversion/utils/map_utils/opendrive/parser/road.py b/src/py123d/conversion/utils/map_utils/opendrive/parser/road.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/parser/road.py rename to src/py123d/conversion/utils/map_utils/opendrive/parser/road.py diff --git a/py123d/conversion/utils/map_utils/opendrive/utils/__init__.py b/src/py123d/conversion/utils/map_utils/opendrive/utils/__init__.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/utils/__init__.py rename to src/py123d/conversion/utils/map_utils/opendrive/utils/__init__.py diff --git a/py123d/conversion/utils/map_utils/opendrive/utils/collection.py b/src/py123d/conversion/utils/map_utils/opendrive/utils/collection.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/utils/collection.py rename to src/py123d/conversion/utils/map_utils/opendrive/utils/collection.py diff --git a/py123d/conversion/utils/map_utils/opendrive/utils/id_mapping.py b/src/py123d/conversion/utils/map_utils/opendrive/utils/id_mapping.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/utils/id_mapping.py rename to src/py123d/conversion/utils/map_utils/opendrive/utils/id_mapping.py diff --git a/py123d/conversion/utils/map_utils/opendrive/utils/id_system.py b/src/py123d/conversion/utils/map_utils/opendrive/utils/id_system.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/utils/id_system.py rename to src/py123d/conversion/utils/map_utils/opendrive/utils/id_system.py diff --git a/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py b/src/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py rename to src/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py diff --git a/py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py b/src/py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py similarity index 100% rename from py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py rename to src/py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py diff --git a/py123d/conversion/utils/map_utils/road_edge/__init__.py b/src/py123d/conversion/utils/map_utils/road_edge/__init__.py similarity index 100% rename from py123d/conversion/utils/map_utils/road_edge/__init__.py rename to src/py123d/conversion/utils/map_utils/road_edge/__init__.py diff --git a/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py similarity index 100% rename from py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py rename to src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py diff --git a/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py similarity index 100% rename from py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py rename to src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py diff --git a/py123d/conversion/utils/sensor_utils/camera_conventions.py b/src/py123d/conversion/utils/sensor_utils/camera_conventions.py similarity index 100% rename from py123d/conversion/utils/sensor_utils/camera_conventions.py rename to src/py123d/conversion/utils/sensor_utils/camera_conventions.py diff --git a/py123d/conversion/utils/sensor_utils/lidar_index_registry.py b/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py similarity index 100% rename from py123d/conversion/utils/sensor_utils/lidar_index_registry.py rename to src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py diff --git a/py123d/datatypes/__init__.py b/src/py123d/datatypes/__init__.py similarity index 100% rename from py123d/datatypes/__init__.py rename to src/py123d/datatypes/__init__.py diff --git a/py123d/datatypes/detections/__init__.py b/src/py123d/datatypes/detections/__init__.py similarity index 100% rename from py123d/datatypes/detections/__init__.py rename to src/py123d/datatypes/detections/__init__.py diff --git a/py123d/datatypes/detections/detection.py b/src/py123d/datatypes/detections/detection.py similarity index 100% rename from py123d/datatypes/detections/detection.py rename to src/py123d/datatypes/detections/detection.py diff --git a/py123d/datatypes/detections/detection_types.py b/src/py123d/datatypes/detections/detection_types.py similarity index 100% rename from py123d/datatypes/detections/detection_types.py rename to src/py123d/datatypes/detections/detection_types.py diff --git a/py123d/datatypes/maps/abstract_map.py b/src/py123d/datatypes/maps/abstract_map.py similarity index 100% rename from py123d/datatypes/maps/abstract_map.py rename to src/py123d/datatypes/maps/abstract_map.py diff --git a/py123d/datatypes/maps/abstract_map_objects.py b/src/py123d/datatypes/maps/abstract_map_objects.py similarity index 100% rename from py123d/datatypes/maps/abstract_map_objects.py rename to src/py123d/datatypes/maps/abstract_map_objects.py diff --git a/py123d/datatypes/maps/cache/__init__.py b/src/py123d/datatypes/maps/cache/__init__.py similarity index 100% rename from py123d/datatypes/maps/cache/__init__.py rename to src/py123d/datatypes/maps/cache/__init__.py diff --git a/py123d/datatypes/maps/cache/cache_map_objects.py b/src/py123d/datatypes/maps/cache/cache_map_objects.py similarity index 100% rename from py123d/datatypes/maps/cache/cache_map_objects.py rename to src/py123d/datatypes/maps/cache/cache_map_objects.py diff --git a/py123d/datatypes/maps/gpkg/__init__.py b/src/py123d/datatypes/maps/gpkg/__init__.py similarity index 100% rename from py123d/datatypes/maps/gpkg/__init__.py rename to src/py123d/datatypes/maps/gpkg/__init__.py diff --git a/py123d/datatypes/maps/gpkg/gpkg_map.py b/src/py123d/datatypes/maps/gpkg/gpkg_map.py similarity index 98% rename from py123d/datatypes/maps/gpkg/gpkg_map.py rename to src/py123d/datatypes/maps/gpkg/gpkg_map.py index 9c9b0b74..7868992f 100644 --- a/py123d/datatypes/maps/gpkg/gpkg_map.py +++ b/src/py123d/datatypes/maps/gpkg/gpkg_map.py @@ -377,8 +377,8 @@ def _get_road_line(self, id: str) -> Optional[GPKGRoadLine]: @lru_cache(maxsize=MAX_LRU_CACHED_TABLES) def get_global_map_api(dataset: str, location: str) -> GPKGMap: - PY123D_MAPS_ROOT = Path(os.environ.get("PY123D_MAPS_ROOT")) # TODO: Remove env variable - gpkg_path = PY123D_MAPS_ROOT / dataset / f"{dataset}_{location}.gpkg" + PY123D_MAPS_ROOT = Path(os.environ.get("PY123D_DATA_ROOT")) # TODO: Remove env variable + gpkg_path = PY123D_MAPS_ROOT / "maps" / dataset / f"{dataset}_{location}.gpkg" assert gpkg_path.is_file(), f"{dataset}_{location}.gpkg not found in {str(PY123D_MAPS_ROOT)}." map_api = GPKGMap(gpkg_path) map_api.initialize() @@ -386,8 +386,8 @@ def get_global_map_api(dataset: str, location: str) -> GPKGMap: def get_local_map_api(split_name: str, log_name: str) -> GPKGMap: - PY123D_MAPS_ROOT = Path(os.environ.get("PY123D_MAPS_ROOT")) # TODO: Remove env variable - gpkg_path = PY123D_MAPS_ROOT / split_name / f"{log_name}.gpkg" + PY123D_MAPS_ROOT = Path(os.environ.get("PY123D_DATA_ROOT")) # TODO: Remove env variable + gpkg_path = PY123D_MAPS_ROOT / "maps" / split_name / f"{log_name}.gpkg" assert gpkg_path.is_file(), f"{log_name}.gpkg not found in {str(PY123D_MAPS_ROOT)}." map_api = GPKGMap(gpkg_path) map_api.initialize() diff --git a/py123d/datatypes/maps/gpkg/gpkg_map_objects.py b/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py similarity index 100% rename from py123d/datatypes/maps/gpkg/gpkg_map_objects.py rename to src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py diff --git a/py123d/datatypes/maps/gpkg/gpkg_utils.py b/src/py123d/datatypes/maps/gpkg/gpkg_utils.py similarity index 100% rename from py123d/datatypes/maps/gpkg/gpkg_utils.py rename to src/py123d/datatypes/maps/gpkg/gpkg_utils.py diff --git a/py123d/datatypes/maps/map_datatypes.py b/src/py123d/datatypes/maps/map_datatypes.py similarity index 100% rename from py123d/datatypes/maps/map_datatypes.py rename to src/py123d/datatypes/maps/map_datatypes.py diff --git a/py123d/datatypes/maps/map_metadata.py b/src/py123d/datatypes/maps/map_metadata.py similarity index 100% rename from py123d/datatypes/maps/map_metadata.py rename to src/py123d/datatypes/maps/map_metadata.py diff --git a/py123d/datatypes/scene/__init__.py b/src/py123d/datatypes/scene/__init__.py similarity index 100% rename from py123d/datatypes/scene/__init__.py rename to src/py123d/datatypes/scene/__init__.py diff --git a/py123d/datatypes/scene/abstract_scene.py b/src/py123d/datatypes/scene/abstract_scene.py similarity index 100% rename from py123d/datatypes/scene/abstract_scene.py rename to src/py123d/datatypes/scene/abstract_scene.py diff --git a/py123d/datatypes/scene/abstract_scene_builder.py b/src/py123d/datatypes/scene/abstract_scene_builder.py similarity index 100% rename from py123d/datatypes/scene/abstract_scene_builder.py rename to src/py123d/datatypes/scene/abstract_scene_builder.py diff --git a/py123d/datatypes/scene/arrow/__init__.py b/src/py123d/datatypes/scene/arrow/__init__.py similarity index 100% rename from py123d/datatypes/scene/arrow/__init__.py rename to src/py123d/datatypes/scene/arrow/__init__.py diff --git a/py123d/datatypes/scene/arrow/arrow_scene.py b/src/py123d/datatypes/scene/arrow/arrow_scene.py similarity index 100% rename from py123d/datatypes/scene/arrow/arrow_scene.py rename to src/py123d/datatypes/scene/arrow/arrow_scene.py diff --git a/py123d/datatypes/scene/arrow/arrow_scene_builder.py b/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py similarity index 100% rename from py123d/datatypes/scene/arrow/arrow_scene_builder.py rename to src/py123d/datatypes/scene/arrow/arrow_scene_builder.py diff --git a/py123d/datatypes/scene/arrow/utils/__init__.py b/src/py123d/datatypes/scene/arrow/utils/__init__.py similarity index 100% rename from py123d/datatypes/scene/arrow/utils/__init__.py rename to src/py123d/datatypes/scene/arrow/utils/__init__.py diff --git a/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py similarity index 100% rename from py123d/datatypes/scene/arrow/utils/arrow_getters.py rename to src/py123d/datatypes/scene/arrow/utils/arrow_getters.py diff --git a/py123d/datatypes/scene/arrow/utils/arrow_metadata_utils.py b/src/py123d/datatypes/scene/arrow/utils/arrow_metadata_utils.py similarity index 100% rename from py123d/datatypes/scene/arrow/utils/arrow_metadata_utils.py rename to src/py123d/datatypes/scene/arrow/utils/arrow_metadata_utils.py diff --git a/py123d/datatypes/scene/scene_filter.py b/src/py123d/datatypes/scene/scene_filter.py similarity index 100% rename from py123d/datatypes/scene/scene_filter.py rename to src/py123d/datatypes/scene/scene_filter.py diff --git a/py123d/datatypes/scene/scene_metadata.py b/src/py123d/datatypes/scene/scene_metadata.py similarity index 100% rename from py123d/datatypes/scene/scene_metadata.py rename to src/py123d/datatypes/scene/scene_metadata.py diff --git a/py123d/datatypes/sensors/__init__.py b/src/py123d/datatypes/sensors/__init__.py similarity index 100% rename from py123d/datatypes/sensors/__init__.py rename to src/py123d/datatypes/sensors/__init__.py diff --git a/py123d/datatypes/sensors/camera/__init__.py b/src/py123d/datatypes/sensors/camera/__init__.py similarity index 100% rename from py123d/datatypes/sensors/camera/__init__.py rename to src/py123d/datatypes/sensors/camera/__init__.py diff --git a/py123d/datatypes/sensors/camera/pinhole_camera.py b/src/py123d/datatypes/sensors/camera/pinhole_camera.py similarity index 100% rename from py123d/datatypes/sensors/camera/pinhole_camera.py rename to src/py123d/datatypes/sensors/camera/pinhole_camera.py diff --git a/py123d/datatypes/sensors/lidar/__init__.py b/src/py123d/datatypes/sensors/lidar/__init__.py similarity index 100% rename from py123d/datatypes/sensors/lidar/__init__.py rename to src/py123d/datatypes/sensors/lidar/__init__.py diff --git a/py123d/datatypes/sensors/lidar/lidar.py b/src/py123d/datatypes/sensors/lidar/lidar.py similarity index 100% rename from py123d/datatypes/sensors/lidar/lidar.py rename to src/py123d/datatypes/sensors/lidar/lidar.py diff --git a/py123d/datatypes/time/__init__.py b/src/py123d/datatypes/time/__init__.py similarity index 100% rename from py123d/datatypes/time/__init__.py rename to src/py123d/datatypes/time/__init__.py diff --git a/py123d/datatypes/time/time_point.py b/src/py123d/datatypes/time/time_point.py similarity index 100% rename from py123d/datatypes/time/time_point.py rename to src/py123d/datatypes/time/time_point.py diff --git a/py123d/datatypes/vehicle_state/__init__.py b/src/py123d/datatypes/vehicle_state/__init__.py similarity index 100% rename from py123d/datatypes/vehicle_state/__init__.py rename to src/py123d/datatypes/vehicle_state/__init__.py diff --git a/py123d/datatypes/vehicle_state/ego_state.py b/src/py123d/datatypes/vehicle_state/ego_state.py similarity index 100% rename from py123d/datatypes/vehicle_state/ego_state.py rename to src/py123d/datatypes/vehicle_state/ego_state.py diff --git a/py123d/datatypes/vehicle_state/vehicle_parameters.py b/src/py123d/datatypes/vehicle_state/vehicle_parameters.py similarity index 100% rename from py123d/datatypes/vehicle_state/vehicle_parameters.py rename to src/py123d/datatypes/vehicle_state/vehicle_parameters.py diff --git a/py123d/geometry/__init__.py b/src/py123d/geometry/__init__.py similarity index 100% rename from py123d/geometry/__init__.py rename to src/py123d/geometry/__init__.py diff --git a/py123d/geometry/bounding_box.py b/src/py123d/geometry/bounding_box.py similarity index 100% rename from py123d/geometry/bounding_box.py rename to src/py123d/geometry/bounding_box.py diff --git a/py123d/geometry/geometry_index.py b/src/py123d/geometry/geometry_index.py similarity index 100% rename from py123d/geometry/geometry_index.py rename to src/py123d/geometry/geometry_index.py diff --git a/py123d/geometry/occupancy_map.py b/src/py123d/geometry/occupancy_map.py similarity index 100% rename from py123d/geometry/occupancy_map.py rename to src/py123d/geometry/occupancy_map.py diff --git a/py123d/geometry/point.py b/src/py123d/geometry/point.py similarity index 100% rename from py123d/geometry/point.py rename to src/py123d/geometry/point.py diff --git a/py123d/geometry/polyline.py b/src/py123d/geometry/polyline.py similarity index 100% rename from py123d/geometry/polyline.py rename to src/py123d/geometry/polyline.py diff --git a/py123d/geometry/rotation.py b/src/py123d/geometry/rotation.py similarity index 100% rename from py123d/geometry/rotation.py rename to src/py123d/geometry/rotation.py diff --git a/py123d/geometry/se.py b/src/py123d/geometry/se.py similarity index 100% rename from py123d/geometry/se.py rename to src/py123d/geometry/se.py diff --git a/py123d/geometry/test/__init__.py b/src/py123d/geometry/test/__init__.py similarity index 100% rename from py123d/geometry/test/__init__.py rename to src/py123d/geometry/test/__init__.py diff --git a/py123d/geometry/test/test_bounding_box.py b/src/py123d/geometry/test/test_bounding_box.py similarity index 100% rename from py123d/geometry/test/test_bounding_box.py rename to src/py123d/geometry/test/test_bounding_box.py diff --git a/py123d/geometry/test/test_occupancy_map.py b/src/py123d/geometry/test/test_occupancy_map.py similarity index 100% rename from py123d/geometry/test/test_occupancy_map.py rename to src/py123d/geometry/test/test_occupancy_map.py diff --git a/py123d/geometry/test/test_point.py b/src/py123d/geometry/test/test_point.py similarity index 100% rename from py123d/geometry/test/test_point.py rename to src/py123d/geometry/test/test_point.py diff --git a/py123d/geometry/test/test_polyline.py b/src/py123d/geometry/test/test_polyline.py similarity index 100% rename from py123d/geometry/test/test_polyline.py rename to src/py123d/geometry/test/test_polyline.py diff --git a/py123d/geometry/test/test_rotation.py b/src/py123d/geometry/test/test_rotation.py similarity index 100% rename from py123d/geometry/test/test_rotation.py rename to src/py123d/geometry/test/test_rotation.py diff --git a/py123d/geometry/test/test_vector.py b/src/py123d/geometry/test/test_vector.py similarity index 100% rename from py123d/geometry/test/test_vector.py rename to src/py123d/geometry/test/test_vector.py diff --git a/py123d/geometry/torch/.gitkeep b/src/py123d/geometry/torch/.gitkeep similarity index 100% rename from py123d/geometry/torch/.gitkeep rename to src/py123d/geometry/torch/.gitkeep diff --git a/py123d/geometry/transform/__init__.py b/src/py123d/geometry/transform/__init__.py similarity index 100% rename from py123d/geometry/transform/__init__.py rename to src/py123d/geometry/transform/__init__.py diff --git a/py123d/geometry/transform/test/__init__.py b/src/py123d/geometry/transform/test/__init__.py similarity index 100% rename from py123d/geometry/transform/test/__init__.py rename to src/py123d/geometry/transform/test/__init__.py diff --git a/py123d/geometry/transform/test/test_transform_consistency.py b/src/py123d/geometry/transform/test/test_transform_consistency.py similarity index 100% rename from py123d/geometry/transform/test/test_transform_consistency.py rename to src/py123d/geometry/transform/test/test_transform_consistency.py diff --git a/py123d/geometry/transform/test/test_transform_euler_se3.py b/src/py123d/geometry/transform/test/test_transform_euler_se3.py similarity index 100% rename from py123d/geometry/transform/test/test_transform_euler_se3.py rename to src/py123d/geometry/transform/test/test_transform_euler_se3.py diff --git a/py123d/geometry/transform/test/test_transform_se2.py b/src/py123d/geometry/transform/test/test_transform_se2.py similarity index 100% rename from py123d/geometry/transform/test/test_transform_se2.py rename to src/py123d/geometry/transform/test/test_transform_se2.py diff --git a/py123d/geometry/transform/test/test_transform_se3.py b/src/py123d/geometry/transform/test/test_transform_se3.py similarity index 100% rename from py123d/geometry/transform/test/test_transform_se3.py rename to src/py123d/geometry/transform/test/test_transform_se3.py diff --git a/py123d/geometry/transform/transform_euler_se3.py b/src/py123d/geometry/transform/transform_euler_se3.py similarity index 100% rename from py123d/geometry/transform/transform_euler_se3.py rename to src/py123d/geometry/transform/transform_euler_se3.py diff --git a/py123d/geometry/transform/transform_se2.py b/src/py123d/geometry/transform/transform_se2.py similarity index 100% rename from py123d/geometry/transform/transform_se2.py rename to src/py123d/geometry/transform/transform_se2.py diff --git a/py123d/geometry/transform/transform_se3.py b/src/py123d/geometry/transform/transform_se3.py similarity index 100% rename from py123d/geometry/transform/transform_se3.py rename to src/py123d/geometry/transform/transform_se3.py diff --git a/py123d/geometry/utils/__init__.py b/src/py123d/geometry/utils/__init__.py similarity index 100% rename from py123d/geometry/utils/__init__.py rename to src/py123d/geometry/utils/__init__.py diff --git a/py123d/geometry/utils/bounding_box_utils.py b/src/py123d/geometry/utils/bounding_box_utils.py similarity index 100% rename from py123d/geometry/utils/bounding_box_utils.py rename to src/py123d/geometry/utils/bounding_box_utils.py diff --git a/py123d/geometry/utils/constants.py b/src/py123d/geometry/utils/constants.py similarity index 100% rename from py123d/geometry/utils/constants.py rename to src/py123d/geometry/utils/constants.py diff --git a/py123d/geometry/utils/polyline_utils.py b/src/py123d/geometry/utils/polyline_utils.py similarity index 100% rename from py123d/geometry/utils/polyline_utils.py rename to src/py123d/geometry/utils/polyline_utils.py diff --git a/py123d/geometry/utils/rotation_utils.py b/src/py123d/geometry/utils/rotation_utils.py similarity index 100% rename from py123d/geometry/utils/rotation_utils.py rename to src/py123d/geometry/utils/rotation_utils.py diff --git a/py123d/geometry/utils/test/__init__.py b/src/py123d/geometry/utils/test/__init__.py similarity index 100% rename from py123d/geometry/utils/test/__init__.py rename to src/py123d/geometry/utils/test/__init__.py diff --git a/py123d/geometry/utils/test/test_bounding_box_utils.py b/src/py123d/geometry/utils/test/test_bounding_box_utils.py similarity index 100% rename from py123d/geometry/utils/test/test_bounding_box_utils.py rename to src/py123d/geometry/utils/test/test_bounding_box_utils.py diff --git a/py123d/geometry/utils/test/test_polyline_utils.py b/src/py123d/geometry/utils/test/test_polyline_utils.py similarity index 100% rename from py123d/geometry/utils/test/test_polyline_utils.py rename to src/py123d/geometry/utils/test/test_polyline_utils.py diff --git a/py123d/geometry/utils/test/test_rotation_utils.py b/src/py123d/geometry/utils/test/test_rotation_utils.py similarity index 100% rename from py123d/geometry/utils/test/test_rotation_utils.py rename to src/py123d/geometry/utils/test/test_rotation_utils.py diff --git a/py123d/geometry/utils/units.py b/src/py123d/geometry/utils/units.py similarity index 100% rename from py123d/geometry/utils/units.py rename to src/py123d/geometry/utils/units.py diff --git a/py123d/geometry/vector.py b/src/py123d/geometry/vector.py similarity index 100% rename from py123d/geometry/vector.py rename to src/py123d/geometry/vector.py diff --git a/py123d/script/__init__.py b/src/py123d/script/__init__.py similarity index 100% rename from py123d/script/__init__.py rename to src/py123d/script/__init__.py diff --git a/py123d/script/builders/__init__.py b/src/py123d/script/builders/__init__.py similarity index 100% rename from py123d/script/builders/__init__.py rename to src/py123d/script/builders/__init__.py diff --git a/py123d/script/builders/dataset_converter_builder.py b/src/py123d/script/builders/dataset_converter_builder.py similarity index 100% rename from py123d/script/builders/dataset_converter_builder.py rename to src/py123d/script/builders/dataset_converter_builder.py diff --git a/py123d/script/builders/scene_builder_builder.py b/src/py123d/script/builders/scene_builder_builder.py similarity index 100% rename from py123d/script/builders/scene_builder_builder.py rename to src/py123d/script/builders/scene_builder_builder.py diff --git a/py123d/script/builders/scene_filter_builder.py b/src/py123d/script/builders/scene_filter_builder.py similarity index 100% rename from py123d/script/builders/scene_filter_builder.py rename to src/py123d/script/builders/scene_filter_builder.py diff --git a/py123d/script/builders/utils/utils_type.py b/src/py123d/script/builders/utils/utils_type.py similarity index 100% rename from py123d/script/builders/utils/utils_type.py rename to src/py123d/script/builders/utils/utils_type.py diff --git a/py123d/script/builders/worker_pool_builder.py b/src/py123d/script/builders/worker_pool_builder.py similarity index 100% rename from py123d/script/builders/worker_pool_builder.py rename to src/py123d/script/builders/worker_pool_builder.py diff --git a/py123d/script/builders/writer_builder.py b/src/py123d/script/builders/writer_builder.py similarity index 100% rename from py123d/script/builders/writer_builder.py rename to src/py123d/script/builders/writer_builder.py diff --git a/py123d/script/config/__init__.py b/src/py123d/script/config/__init__.py similarity index 100% rename from py123d/script/config/__init__.py rename to src/py123d/script/config/__init__.py diff --git a/py123d/script/config/common/__init__.py b/src/py123d/script/config/common/__init__.py similarity index 100% rename from py123d/script/config/common/__init__.py rename to src/py123d/script/config/common/__init__.py diff --git a/py123d/script/config/common/default_common.yaml b/src/py123d/script/config/common/default_common.yaml similarity index 100% rename from py123d/script/config/common/default_common.yaml rename to src/py123d/script/config/common/default_common.yaml diff --git a/py123d/script/config/common/default_dataset_paths.yaml b/src/py123d/script/config/common/default_dataset_paths.yaml similarity index 100% rename from py123d/script/config/common/default_dataset_paths.yaml rename to src/py123d/script/config/common/default_dataset_paths.yaml diff --git a/py123d/script/config/common/default_experiment.yaml b/src/py123d/script/config/common/default_experiment.yaml similarity index 100% rename from py123d/script/config/common/default_experiment.yaml rename to src/py123d/script/config/common/default_experiment.yaml diff --git a/py123d/script/config/common/scene_builder/default_scene_builder.yaml b/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml similarity index 100% rename from py123d/script/config/common/scene_builder/default_scene_builder.yaml rename to src/py123d/script/config/common/scene_builder/default_scene_builder.yaml diff --git a/py123d/script/config/common/scene_filter/all_scenes.yaml b/src/py123d/script/config/common/scene_filter/all_scenes.yaml similarity index 100% rename from py123d/script/config/common/scene_filter/all_scenes.yaml rename to src/py123d/script/config/common/scene_filter/all_scenes.yaml diff --git a/py123d/script/config/common/scene_filter/log_scenes.yaml b/src/py123d/script/config/common/scene_filter/log_scenes.yaml similarity index 100% rename from py123d/script/config/common/scene_filter/log_scenes.yaml rename to src/py123d/script/config/common/scene_filter/log_scenes.yaml diff --git a/py123d/script/config/common/scene_filter/nuplan_mini_train.yaml b/src/py123d/script/config/common/scene_filter/nuplan_mini_train.yaml similarity index 100% rename from py123d/script/config/common/scene_filter/nuplan_mini_train.yaml rename to src/py123d/script/config/common/scene_filter/nuplan_mini_train.yaml diff --git a/py123d/script/config/common/scene_filter/nuplan_mini_val.yaml b/src/py123d/script/config/common/scene_filter/nuplan_mini_val.yaml similarity index 100% rename from py123d/script/config/common/scene_filter/nuplan_mini_val.yaml rename to src/py123d/script/config/common/scene_filter/nuplan_mini_val.yaml diff --git a/py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml b/src/py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml similarity index 100% rename from py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml rename to src/py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml diff --git a/py123d/script/config/common/scene_filter/viser_scenes.yaml b/src/py123d/script/config/common/scene_filter/viser_scenes.yaml similarity index 100% rename from py123d/script/config/common/scene_filter/viser_scenes.yaml rename to src/py123d/script/config/common/scene_filter/viser_scenes.yaml diff --git a/py123d/script/config/common/worker/__init__.py b/src/py123d/script/config/common/worker/__init__.py similarity index 100% rename from py123d/script/config/common/worker/__init__.py rename to src/py123d/script/config/common/worker/__init__.py diff --git a/py123d/script/config/common/worker/ray_distributed.yaml b/src/py123d/script/config/common/worker/ray_distributed.yaml similarity index 100% rename from py123d/script/config/common/worker/ray_distributed.yaml rename to src/py123d/script/config/common/worker/ray_distributed.yaml diff --git a/py123d/script/config/common/worker/sequential.yaml b/src/py123d/script/config/common/worker/sequential.yaml similarity index 100% rename from py123d/script/config/common/worker/sequential.yaml rename to src/py123d/script/config/common/worker/sequential.yaml diff --git a/py123d/script/config/common/worker/single_machine_thread_pool.yaml b/src/py123d/script/config/common/worker/single_machine_thread_pool.yaml similarity index 100% rename from py123d/script/config/common/worker/single_machine_thread_pool.yaml rename to src/py123d/script/config/common/worker/single_machine_thread_pool.yaml diff --git a/py123d/script/config/conversion/__init__.py b/src/py123d/script/config/conversion/__init__.py similarity index 100% rename from py123d/script/config/conversion/__init__.py rename to src/py123d/script/config/conversion/__init__.py diff --git a/py123d/script/config/conversion/datasets/__init__.py b/src/py123d/script/config/conversion/datasets/__init__.py similarity index 100% rename from py123d/script/config/conversion/datasets/__init__.py rename to src/py123d/script/config/conversion/datasets/__init__.py diff --git a/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml similarity index 100% rename from py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml rename to src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml diff --git a/py123d/script/config/conversion/datasets/carla_dataset.yaml b/src/py123d/script/config/conversion/datasets/carla_dataset.yaml similarity index 100% rename from py123d/script/config/conversion/datasets/carla_dataset.yaml rename to src/py123d/script/config/conversion/datasets/carla_dataset.yaml diff --git a/py123d/script/config/conversion/datasets/nuplan_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml similarity index 100% rename from py123d/script/config/conversion/datasets/nuplan_dataset.yaml rename to src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml diff --git a/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml similarity index 100% rename from py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml rename to src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml diff --git a/py123d/script/config/conversion/datasets/pandaset_dataset.yaml b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml similarity index 100% rename from py123d/script/config/conversion/datasets/pandaset_dataset.yaml rename to src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml diff --git a/py123d/script/config/conversion/datasets/wopd_dataset.yaml b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml similarity index 100% rename from py123d/script/config/conversion/datasets/wopd_dataset.yaml rename to src/py123d/script/config/conversion/datasets/wopd_dataset.yaml diff --git a/py123d/script/config/conversion/default_conversion.yaml b/src/py123d/script/config/conversion/default_conversion.yaml similarity index 76% rename from py123d/script/config/conversion/default_conversion.yaml rename to src/py123d/script/config/conversion/default_conversion.yaml index 9ba01d42..daa55f12 100644 --- a/py123d/script/config/conversion/default_conversion.yaml +++ b/src/py123d/script/config/conversion/default_conversion.yaml @@ -16,11 +16,7 @@ defaults: - log_writer: arrow_log_writer - map_writer: gpkg_map_writer - datasets: - # - nuplan_mini_dataset - # - carla_dataset - # - wopd_dataset - # - av2_sensor_dataset - - pandaset_dataset + - ??? - _self_ diff --git a/py123d/script/config/conversion/log_writer/__init__.py b/src/py123d/script/config/conversion/log_writer/__init__.py similarity index 100% rename from py123d/script/config/conversion/log_writer/__init__.py rename to src/py123d/script/config/conversion/log_writer/__init__.py diff --git a/py123d/script/config/conversion/log_writer/arrow_log_writer.yaml b/src/py123d/script/config/conversion/log_writer/arrow_log_writer.yaml similarity index 100% rename from py123d/script/config/conversion/log_writer/arrow_log_writer.yaml rename to src/py123d/script/config/conversion/log_writer/arrow_log_writer.yaml diff --git a/py123d/script/config/conversion/map_writer/__init__.py b/src/py123d/script/config/conversion/map_writer/__init__.py similarity index 100% rename from py123d/script/config/conversion/map_writer/__init__.py rename to src/py123d/script/config/conversion/map_writer/__init__.py diff --git a/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml b/src/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml similarity index 100% rename from py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml rename to src/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml diff --git a/py123d/script/config/viser/__init__.py b/src/py123d/script/config/viser/__init__.py similarity index 100% rename from py123d/script/config/viser/__init__.py rename to src/py123d/script/config/viser/__init__.py diff --git a/py123d/script/config/viser/default_viser.yaml b/src/py123d/script/config/viser/default_viser.yaml similarity index 63% rename from py123d/script/config/viser/default_viser.yaml rename to src/py123d/script/config/viser/default_viser.yaml index 5b230727..0c100964 100644 --- a/py123d/script/config/viser/default_viser.yaml +++ b/src/py123d/script/config/viser/default_viser.yaml @@ -2,10 +2,9 @@ hydra: run: dir: . output_subdir: null - searchpath: # Only in these paths are discoverable + searchpath: - pkg://py123d.script.config - pkg://py123d.script.config.common - - pkg://py123d.script.config.preprocessing job: chdir: False # @@ -13,5 +12,6 @@ defaults: - default_common - default_dataset_paths - override scene_filter: viser_scenes + - self port_number: 8080 diff --git a/py123d/script/run_conversion.py b/src/py123d/script/run_conversion.py similarity index 97% rename from py123d/script/run_conversion.py rename to src/py123d/script/run_conversion.py index 9ef94d75..4687d573 100644 --- a/py123d/script/run_conversion.py +++ b/src/py123d/script/run_conversion.py @@ -6,7 +6,6 @@ import hydra from omegaconf import DictConfig -from py123d import ascii_banner from py123d.common.multithreading.worker_utils import worker_map from py123d.script.builders.dataset_converter_builder import AbstractDatasetConverter, build_dataset_converters from py123d.script.builders.worker_pool_builder import build_worker @@ -25,9 +24,6 @@ def main(cfg: DictConfig) -> None: Main entrypoint for metric caching. :param cfg: omegaconf dictionary """ - logger.info(ascii_banner) - - # Build worker # Precompute and cache all features logger.info("Starting Dataset Caching...") diff --git a/py123d/script/run_viser.py b/src/py123d/script/run_viser.py similarity index 99% rename from py123d/script/run_viser.py rename to src/py123d/script/run_viser.py index b7f11093..84164c52 100644 --- a/py123d/script/run_viser.py +++ b/src/py123d/script/run_viser.py @@ -18,8 +18,11 @@ def main(cfg: DictConfig) -> None: worker = build_worker(cfg) + scene_filter = build_scene_filter(cfg.scene_filter) + scene_builder = build_scene_builder(cfg.scene_builder) + scenes = scene_builder.get_scenes(scene_filter, worker=worker) ViserViewer(scenes=scenes) diff --git a/test_viser.py b/test_viser.py index 3bc83f76..58a94379 100644 --- a/test_viser.py +++ b/test_viser.py @@ -9,12 +9,12 @@ if __name__ == "__main__": - # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] + splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla"] # splits = ["wopd_val"] # splits = ["av2-sensor-mini_train"] - splits = ["pandaset_test", "pandaset_val", "pandaset_train"] + # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] log_names = None scene_uuids = None From c25436805d5417dae18500c9843482ff09ba8f39 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 17 Oct 2025 15:33:18 +0200 Subject: [PATCH 088/145] Remove reliance on environment variables via global dataset paths config (#41) --- docs/installation.md | 8 ++-- environment.yml | 7 --- .../common/utils/{uuid.py => uuid_utils.py} | 0 .../datasets/carla/carla_data_converter.py | 1 + .../conversion/log_writer/arrow_log_writer.py | 2 +- src/py123d/datatypes/maps/gpkg/gpkg_map.py | 10 ++--- .../scene/arrow/arrow_scene_builder.py | 27 ++++++++---- .../config/common/default_dataset_paths.yaml | 20 +++++---- .../scene_builder/default_scene_builder.yaml | 2 +- .../datasets/av2_sensor_dataset.yaml | 2 +- .../conversion/datasets/carla_dataset.yaml | 2 +- .../conversion/datasets/nuplan_dataset.yaml | 2 +- .../datasets/nuplan_mini_dataset.yaml | 2 +- .../conversion/datasets/pandaset_dataset.yaml | 2 +- .../conversion/datasets/wopd_dataset.yaml | 2 +- .../map_writer/gpkg_map_writer.yaml | 2 +- .../script/config/viser/default_viser.yaml | 2 +- src/py123d/script/run_conversion.py | 6 ++- src/py123d/script/run_viser.py | 3 ++ src/py123d/script/utils/__init__.py | 0 src/py123d/script/utils/dataset_path_utils.py | 44 +++++++++++++++++++ test_viser.py | 5 +-- 22 files changed, 102 insertions(+), 49 deletions(-) delete mode 100644 environment.yml rename src/py123d/common/utils/{uuid.py => uuid_utils.py} (100%) create mode 100644 src/py123d/script/utils/__init__.py create mode 100644 src/py123d/script/utils/dataset_path_utils.py diff --git a/docs/installation.md b/docs/installation.md index 37770165..5b96008c 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -26,7 +26,7 @@ Note, the following installation assumes the following folder structure: TODO UP First you need to create a new conda environment and install `py123d` as editable pip package. ```bash -conda env create -f environment.yml +conda create -n py123d python=3.12 conda activate py123d pip install -e . ``` @@ -34,11 +34,11 @@ pip install -e . Next, you need add the following environment variables in your `.bashrc`: ```bash export PY123D_DATA_ROOT="$HOME/py123d_workspace/data" +``` -# CARLA + \ No newline at end of file diff --git a/environment.yml b/environment.yml deleted file mode 100644 index 0f4d171c..00000000 --- a/environment.yml +++ /dev/null @@ -1,7 +0,0 @@ -name: py123d -channels: - - conda-forge -dependencies: - - python=3.12 - - pip=25.1.1 - - nb_conda_kernels diff --git a/src/py123d/common/utils/uuid.py b/src/py123d/common/utils/uuid_utils.py similarity index 100% rename from src/py123d/common/utils/uuid.py rename to src/py123d/common/utils/uuid_utils.py diff --git a/src/py123d/conversion/datasets/carla/carla_data_converter.py b/src/py123d/conversion/datasets/carla/carla_data_converter.py index 9caff6e7..548a99bc 100644 --- a/src/py123d/conversion/datasets/carla/carla_data_converter.py +++ b/src/py123d/conversion/datasets/carla/carla_data_converter.py @@ -51,6 +51,7 @@ CARLA_CAMERA_TYPES = {PinholeCameraType.CAM_F0} +# TODO: remove envinronment variable dependency CARLA_DATA_ROOT: Final[Path] = Path(os.environ["CARLA_DATA_ROOT"]) diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 684db049..62154cf6 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -3,7 +3,7 @@ import pyarrow as pa -from py123d.common.utils.uuid import create_deterministic_uuid +from py123d.common.utils.uuid_utils import create_deterministic_uuid from py123d.conversion.abstract_dataset_converter import AbstractLogWriter, DatasetConverterConfig from py123d.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema diff --git a/src/py123d/datatypes/maps/gpkg/gpkg_map.py b/src/py123d/datatypes/maps/gpkg/gpkg_map.py index 7868992f..acc1cfbb 100644 --- a/src/py123d/datatypes/maps/gpkg/gpkg_map.py +++ b/src/py123d/datatypes/maps/gpkg/gpkg_map.py @@ -1,6 +1,5 @@ from __future__ import annotations -import os import warnings from collections import defaultdict from functools import lru_cache @@ -28,6 +27,7 @@ from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.geometry import Point2D +from py123d.script.utils.dataset_path_utils import get_dataset_paths USE_ARROW: bool = True MAX_LRU_CACHED_TABLES: Final[int] = 128 # TODO: add to some configs @@ -377,8 +377,8 @@ def _get_road_line(self, id: str) -> Optional[GPKGRoadLine]: @lru_cache(maxsize=MAX_LRU_CACHED_TABLES) def get_global_map_api(dataset: str, location: str) -> GPKGMap: - PY123D_MAPS_ROOT = Path(os.environ.get("PY123D_DATA_ROOT")) # TODO: Remove env variable - gpkg_path = PY123D_MAPS_ROOT / "maps" / dataset / f"{dataset}_{location}.gpkg" + PY123D_MAPS_ROOT: Path = Path(get_dataset_paths().py123d_maps_root) + gpkg_path = PY123D_MAPS_ROOT / dataset / f"{dataset}_{location}.gpkg" assert gpkg_path.is_file(), f"{dataset}_{location}.gpkg not found in {str(PY123D_MAPS_ROOT)}." map_api = GPKGMap(gpkg_path) map_api.initialize() @@ -386,8 +386,8 @@ def get_global_map_api(dataset: str, location: str) -> GPKGMap: def get_local_map_api(split_name: str, log_name: str) -> GPKGMap: - PY123D_MAPS_ROOT = Path(os.environ.get("PY123D_DATA_ROOT")) # TODO: Remove env variable - gpkg_path = PY123D_MAPS_ROOT / "maps" / split_name / f"{log_name}.gpkg" + PY123D_MAPS_ROOT: Path = Path(get_dataset_paths().py123d_maps_root) + gpkg_path = PY123D_MAPS_ROOT / split_name / f"{log_name}.gpkg" assert gpkg_path.is_file(), f"{log_name}.gpkg not found in {str(PY123D_MAPS_ROOT)}." map_api = GPKGMap(gpkg_path) map_api.initialize() diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py b/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py index e381459b..451ff9d2 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py @@ -11,6 +11,7 @@ from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow from py123d.datatypes.scene.scene_filter import SceneFilter from py123d.datatypes.scene.scene_metadata import SceneExtractionMetadata +from py123d.script.utils.dataset_path_utils import get_dataset_paths class ArrowSceneBuilder(SceneBuilder): @@ -18,18 +19,28 @@ class ArrowSceneBuilder(SceneBuilder): A class to build a scene from a dataset. """ - def __init__(self, dataset_path: Union[str, Path]): - self._dataset_path = Path(dataset_path) + def __init__( + self, + py123d_logs_root: Optional[Union[str, Path]] = None, + py123d_maps_root: Optional[Union[str, Path]] = None, + ): + if py123d_logs_root is None: + py123d_logs_root = get_dataset_paths().py123d_logs_root + if py123d_maps_root is None: + py123d_maps_root = get_dataset_paths().py123d_maps_root + + self._logs_root = Path(py123d_logs_root) + self._maps_root = Path(py123d_maps_root) def get_scenes(self, filter: SceneFilter, worker: WorkerPool) -> Iterator[AbstractScene]: """See superclass.""" split_types = set(filter.split_types) if filter.split_types else {"train", "val", "test"} split_names = ( - set(filter.split_names) if filter.split_names else _discover_split_names(self._dataset_path, split_types) + set(filter.split_names) if filter.split_names else _discover_split_names(self._logs_root, split_types) ) filter_log_names = set(filter.log_names) if filter.log_names else None - log_paths = _discover_log_paths(self._dataset_path, split_names, filter_log_names) + log_paths = _discover_log_paths(self._logs_root, split_names, filter_log_names) if len(log_paths) == 0: return [] scenes = worker_map(worker, partial(_extract_scenes_from_logs, filter=filter), log_paths) @@ -43,12 +54,12 @@ def get_scenes(self, filter: SceneFilter, worker: WorkerPool) -> Iterator[Abstra return scenes -def _discover_split_names(dataset_path: Path, split_types: Set[str]) -> Set[str]: +def _discover_split_names(logs_root: Path, split_types: Set[str]) -> Set[str]: assert set(split_types).issubset( {"train", "val", "test"} ), f"Invalid split types: {split_types}. Valid split types are 'train', 'val', 'test'." split_names: List[str] = [] - for split in dataset_path.iterdir(): + for split in logs_root.iterdir(): split_name = split.name if split.is_dir() and split.name != "maps": if any(split_type in split_name for split_type in split_types): @@ -57,10 +68,10 @@ def _discover_split_names(dataset_path: Path, split_types: Set[str]) -> Set[str] return split_names -def _discover_log_paths(dataset_path: Path, split_names: Set[str], log_names: Optional[List[str]]) -> List[Path]: +def _discover_log_paths(logs_root: Path, split_names: Set[str], log_names: Optional[List[str]]) -> List[Path]: log_paths: List[Path] = [] for split_name in split_names: - for log_path in (dataset_path / "logs" / split_name).iterdir(): + for log_path in (logs_root / split_name).iterdir(): if log_path.is_file() and log_path.name.endswith(".arrow"): if log_names is None or log_path.stem in log_names: log_paths.append(log_path) diff --git a/src/py123d/script/config/common/default_dataset_paths.yaml b/src/py123d/script/config/common/default_dataset_paths.yaml index 5f7af456..0f77918b 100644 --- a/src/py123d/script/config/common/default_dataset_paths.yaml +++ b/src/py123d/script/config/common/default_dataset_paths.yaml @@ -1,11 +1,13 @@ -# 123D Defaults -py123d_data_root: ${oc.env:PY123D_DATA_ROOT} -py123d_logs_root: ${oc.env:PY123D_DATA_ROOT}/logs -py123d_maps_root: ${oc.env:PY123D_DATA_ROOT}/maps -py123d_sensors_root: ${oc.env:PY123D_DATA_ROOT}/sensors +dataset_paths: + # 123D Defaults + py123d_data_root: ${oc.env:PY123D_DATA_ROOT} + py123d_logs_root: ${oc.env:PY123D_DATA_ROOT}/logs + py123d_maps_root: ${oc.env:PY123D_DATA_ROOT}/maps + py123d_sensors_root: ${oc.env:PY123D_DATA_ROOT}/sensors -# nuPlan defaults -nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} -nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} -nuplan_sensor_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/sensor_blobs + + # nuPlan defaults + nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} + nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} + nuplan_sensor_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/sensor_blobs diff --git a/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml b/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml index 3a4be1f3..77445192 100644 --- a/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml +++ b/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml @@ -1,4 +1,4 @@ _target_: py123d.datatypes.scene.arrow.arrow_scene_builder.ArrowSceneBuilder _convert_: 'all' -dataset_path: ${py123d_data_root} +dataset_path: ${dataset_paths.py123d_data_root} diff --git a/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml index ca5a6837..837b0c9b 100644 --- a/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml @@ -9,7 +9,7 @@ av2_sensor_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${py123d_data_root} + output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/datasets/carla_dataset.yaml b/src/py123d/script/config/conversion/datasets/carla_dataset.yaml index b3c85a3c..0a2b5220 100644 --- a/src/py123d/script/config/conversion/datasets/carla_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/carla_dataset.yaml @@ -9,7 +9,7 @@ carla_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${py123d_data_root} + output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml index db89b1db..26e01ff4 100644 --- a/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml @@ -11,7 +11,7 @@ nuplan_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${py123d_data_root} + output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml index e3276efa..8c03e368 100644 --- a/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml @@ -11,7 +11,7 @@ nuplan_mini_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${py123d_data_root} + output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml index c4e8cfd0..683b1899 100644 --- a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml @@ -9,7 +9,7 @@ pandaset_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${py123d_data_root} + output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml index 78cbc612..1e03f557 100644 --- a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml @@ -13,7 +13,7 @@ wopd_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${py123d_data_root} + output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml b/src/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml index 9921ba8b..6bfb4877 100644 --- a/src/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml +++ b/src/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml @@ -1,4 +1,4 @@ _target_: py123d.conversion.map_writer.gpkg_map_writer.GPKGMapWriter _convert_: 'all' -maps_root: ${py123d_maps_root} +maps_root: ${dataset_paths.py123d_maps_root} diff --git a/src/py123d/script/config/viser/default_viser.yaml b/src/py123d/script/config/viser/default_viser.yaml index 0c100964..99059e83 100644 --- a/src/py123d/script/config/viser/default_viser.yaml +++ b/src/py123d/script/config/viser/default_viser.yaml @@ -12,6 +12,6 @@ defaults: - default_common - default_dataset_paths - override scene_filter: viser_scenes - - self + - _self_ port_number: 8080 diff --git a/src/py123d/script/run_conversion.py b/src/py123d/script/run_conversion.py index 4687d573..406a7e3f 100644 --- a/src/py123d/script/run_conversion.py +++ b/src/py123d/script/run_conversion.py @@ -10,6 +10,7 @@ from py123d.script.builders.dataset_converter_builder import AbstractDatasetConverter, build_dataset_converters from py123d.script.builders.worker_pool_builder import build_worker from py123d.script.builders.writer_builder import build_log_writer, build_map_writer +from py123d.script.utils.dataset_path_utils import setup_dataset_paths logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @@ -21,11 +22,12 @@ @hydra.main(config_path=CONFIG_PATH, config_name=CONFIG_NAME, version_base=None) def main(cfg: DictConfig) -> None: """ - Main entrypoint for metric caching. + Main entrypoint for dataset conversion. :param cfg: omegaconf dictionary """ - # Precompute and cache all features + setup_dataset_paths(cfg.dataset_paths) + logger.info("Starting Dataset Caching...") dataset_converters: List[AbstractDatasetConverter] = build_dataset_converters(cfg.datasets) diff --git a/src/py123d/script/run_viser.py b/src/py123d/script/run_viser.py index 84164c52..33be1b35 100644 --- a/src/py123d/script/run_viser.py +++ b/src/py123d/script/run_viser.py @@ -7,6 +7,7 @@ from py123d.script.builders.scene_builder_builder import build_scene_builder from py123d.script.builders.scene_filter_builder import build_scene_filter from py123d.script.run_conversion import build_worker +from py123d.script.utils.dataset_path_utils import setup_dataset_paths logger = logging.getLogger(__name__) @@ -17,6 +18,8 @@ @hydra.main(config_path=CONFIG_PATH, config_name=CONFIG_NAME, version_base=None) def main(cfg: DictConfig) -> None: + setup_dataset_paths(cfg.dataset_paths) + worker = build_worker(cfg) scene_filter = build_scene_filter(cfg.scene_filter) diff --git a/src/py123d/script/utils/__init__.py b/src/py123d/script/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/script/utils/dataset_path_utils.py b/src/py123d/script/utils/dataset_path_utils.py new file mode 100644 index 00000000..4cc71278 --- /dev/null +++ b/src/py123d/script/utils/dataset_path_utils.py @@ -0,0 +1,44 @@ +import logging +from pathlib import Path +from typing import Optional + +from omegaconf import DictConfig, OmegaConf + + +logger = logging.getLogger(__name__) +_global_dataset_paths: Optional[DictConfig] = None + + +def setup_dataset_paths(cfg: DictConfig) -> None: + """Setup the global dataset paths. + + :param cfg: The configuration containing dataset paths. + :return: None + """ + + global _global_dataset_paths + + if _global_dataset_paths is None: + # Make it immutable + OmegaConf.set_struct(cfg, True) # Prevents adding new keys + OmegaConf.set_readonly(cfg, True) # Prevents any modifications + _global_dataset_paths = cfg + + return None + + +def get_dataset_paths() -> DictConfig: + """Get the global dataset paths from anywhere in your code. + + :return: global dataset paths configuration + """ + global _global_dataset_paths + + if _global_dataset_paths is None: + dataset_paths_config_yaml = Path(__file__).parent.parent / "config" / "common" / "default_dataset_paths.yaml" + logger.warning(f"Dataset paths not set. Using default config: {dataset_paths_config_yaml}") + + cfg = OmegaConf.load(dataset_paths_config_yaml) + setup_dataset_paths(cfg.dataset_paths) + + return _global_dataset_paths diff --git a/test_viser.py b/test_viser.py index 58a94379..2df564d2 100644 --- a/test_viser.py +++ b/test_viser.py @@ -1,6 +1,3 @@ -import os -from pathlib import Path - from py123d.common.multithreading.worker_sequential import Sequential from py123d.common.visualization.viser.viser_viewer import ViserViewer from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder @@ -28,7 +25,7 @@ shuffle=True, camera_types=[PinholeCameraType.CAM_F0], ) - scene_builder = ArrowSceneBuilder(Path(os.environ["PY123D_DATA_ROOT"])) + scene_builder = ArrowSceneBuilder() worker = Sequential() scenes = scene_builder.get_scenes(scene_filter, worker) print(f"Found {len(scenes)} scenes") From c501080b9087430edd291a65012072446b7b3f94 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 17 Oct 2025 16:28:21 +0200 Subject: [PATCH 089/145] Requery map in viser when ego moves out of bounds. --- .../viser/elements/map_elements.py | 60 ++++++++++++++++--- .../visualization/viser/viser_config.py | 3 +- .../visualization/viser/viser_viewer.py | 19 +++++- 3 files changed, 72 insertions(+), 10 deletions(-) diff --git a/src/py123d/common/visualization/viser/elements/map_elements.py b/src/py123d/common/visualization/viser/elements/map_elements.py index 78cc425f..11c07978 100644 --- a/src/py123d/common/visualization/viser/elements/map_elements.py +++ b/src/py123d/common/visualization/viser/elements/map_elements.py @@ -1,5 +1,6 @@ -from typing import Dict +from typing import Dict, Optional +import numpy as np import trimesh import viser @@ -12,30 +13,73 @@ from py123d.geometry import Point3D, Point3DIndex +last_query_position: Optional[Point3D] = None + + def add_map_to_viser_server( scene: AbstractScene, + iteration: int, initial_ego_state: EgoStateSE3, viser_server: viser.ViserServer, viser_config: ViserConfig, + map_handles: Dict[MapLayer, viser.GlbHandle], ) -> None: + global last_query_position if viser_config.map_visible: - for name, mesh in _get_map_trimesh_dict(scene, initial_ego_state, viser_config).items(): - viser_server.scene.add_mesh_trimesh(f"/map/{name}", mesh, visible=True) + + map_trimesh_dict: Optional[Dict[MapLayer, trimesh.Trimesh]] = None + + if len(map_handles) == 0: + current_ego_state = initial_ego_state + map_trimesh_dict = _get_map_trimesh_dict( + scene, + initial_ego_state, + current_ego_state, + viser_config, + ) + last_query_position = current_ego_state.center_se3.point_3d + + elif viser_config.map_requery: + current_ego_state = scene.get_ego_state_at_iteration(iteration) + current_position = current_ego_state.center_se3.point_3d + + if np.linalg.norm(current_position.array - last_query_position.array) > viser_config.map_radius / 2: + last_query_position = current_position + map_trimesh_dict = _get_map_trimesh_dict( + scene, + initial_ego_state, + current_ego_state, + viser_config, + ) + print("Requeried map objects for visualization.") + + if map_trimesh_dict is not None: + for map_layer, mesh in map_trimesh_dict.items(): + # if map_layer in map_handles: + # map_handles[map_layer].mesh = mesh + # else: + map_handles[map_layer] = viser_server.scene.add_mesh_trimesh( + f"/map/{map_layer.serialize()}", + mesh, + visible=True, + ) def _get_map_trimesh_dict( scene: AbstractScene, initial_ego_state: EgoStateSE3, + current_ego_state: Optional[EgoStateSE3], viser_config: ViserConfig, -) -> Dict[str, trimesh.Trimesh]: +) -> Dict[MapLayer, trimesh.Trimesh]: # Dictionary to hold the output trimesh meshes. - output_trimesh_dict: Dict[str, trimesh.Trimesh] = {} + output_trimesh_dict: Dict[MapLayer, trimesh.Trimesh] = {} # Unpack scene center for translation of map objects. scene_center: Point3D = initial_ego_state.center.point_3d scene_center_array = scene_center.array + scene_query_position = current_ego_state.center.point_3d # Load map objects within a certain radius around the scene center. map_layers = [ @@ -49,7 +93,7 @@ def _get_map_trimesh_dict( map_api = scene.get_map_api() if map_api is not None: map_objects_dict = map_api.get_proximal_map_objects( - scene_center.point_2d, + scene_query_position.point_2d, radius=viser_config.map_radius, layers=map_layers, ) @@ -74,13 +118,13 @@ def _get_map_trimesh_dict( # If the map does not have z-values, we place the surfaces on the ground level of the ego vehicle. if not scene.log_metadata.map_metadata.map_has_z: trimesh_mesh.vertices[..., Point3DIndex.Z] += ( - scene_center.z - initial_ego_state.vehicle_parameters.height / 2 + scene_query_position.z - initial_ego_state.vehicle_parameters.height / 2 ) # Color the mesh based on the map layer type. trimesh_mesh.visual.face_colors = MAP_SURFACE_CONFIG[map_layer].fill_color.rgba surface_meshes.append(trimesh_mesh) - output_trimesh_dict[f"{map_layer.serialize()}"] = trimesh.util.concatenate(surface_meshes) + output_trimesh_dict[map_layer] = trimesh.util.concatenate(surface_meshes) return output_trimesh_dict diff --git a/src/py123d/common/visualization/viser/viser_config.py b/src/py123d/common/visualization/viser/viser_config.py index af06dadc..40d08b5c 100644 --- a/src/py123d/common/visualization/viser/viser_config.py +++ b/src/py123d/common/visualization/viser/viser_config.py @@ -44,8 +44,9 @@ class ViserConfig: # Map map_visible: bool = True - map_radius: float = 200.0 # [m] + map_radius: float = 100.0 # [m] map_non_road_z_offset: float = 0.1 # small z-translation to place crosswalks, parking, etc. on top of the road + map_requery: bool = True # Re-query map when ego vehicle moves out of current map bounds # Bounding boxes bounding_box_visible: bool = True diff --git a/src/py123d/common/visualization/viser/viser_viewer.py b/src/py123d/common/visualization/viser/viser_viewer.py index 9c84d057..b8b9ac78 100644 --- a/src/py123d/common/visualization/viser/viser_viewer.py +++ b/src/py123d/common/visualization/viser/viser_viewer.py @@ -13,6 +13,7 @@ add_map_to_viser_server, ) from py123d.common.visualization.viser.viser_config import ViserConfig +from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDARType @@ -195,6 +196,14 @@ def _(_) -> None: self._viser_config, lidar_pc_handle, ) + add_map_to_viser_server( + scene, + gui_timestep.value, + initial_ego_state, + self._viser_server, + self._viser_config, + map_handles, + ) rendering_time = time.perf_counter() - start sleep_time = 1.0 / gui_framerate.value - rendering_time if sleep_time > 0: @@ -203,6 +212,7 @@ def _(_) -> None: camera_frustum_handles: Dict[PinholeCameraType, viser.CameraFrustumHandle] = {} camera_gui_handles: Dict[PinholeCameraType, viser.GuiImageHandle] = {} lidar_pc_handle: Optional[viser.PointCloudHandle] = None + map_handles: Dict[MapLayer, viser.MeshHandle] = {} add_box_detections_to_viser_server( scene, @@ -234,7 +244,14 @@ def _(_) -> None: self._viser_config, lidar_pc_handle, ) - add_map_to_viser_server(scene, initial_ego_state, self._viser_server, self._viser_config) + add_map_to_viser_server( + scene, + gui_timestep.value, + initial_ego_state, + self._viser_server, + self._viser_config, + map_handles, + ) # Playback update loop. while server_playing: From 280ce799cf48f19435c04a5fe0aa10b7f26b198c Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 17 Oct 2025 19:31:49 +0200 Subject: [PATCH 090/145] Update logo and add to `README.md` for testing (#57) --- README.md | 24 +++- assets/123D_logo_v1.png | Bin 1219377 -> 0 bytes assets/logo/123D_logo_transparent_black.png | Bin 0 -> 2486726 bytes assets/logo/123D_logo_transparent_white.png | Bin 0 -> 2486726 bytes .../logo/svg/123D_logo_transparent_black.svg | 103 +++++++++++++++ .../logo/svg/123D_logo_transparent_white.svg | 120 ++++++++++++++++++ docs/conf.py | 2 +- docs/contributing.md | 6 +- 8 files changed, 250 insertions(+), 5 deletions(-) delete mode 100644 assets/123D_logo_v1.png create mode 100644 assets/logo/123D_logo_transparent_black.png create mode 100644 assets/logo/123D_logo_transparent_white.png create mode 100644 assets/logo/svg/123D_logo_transparent_black.svg create mode 100644 assets/logo/svg/123D_logo_transparent_white.svg diff --git a/README.md b/README.md index 95948bd8..87ba2466 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,27 @@

      - Logo + Logo

      123D: One Interface for 2D and 3D Driving Data

      + + +

      + Logo +

      123D: One Interface for 2D and 3D Driving Data

      +

      + + + +

      + Logo +

      123D: One Interface for 2D and 3D Driving Data

      +

      + + + + +

      + Logo +

      123D: One Interface for 2D and 3D Driving Data

      +

      + \ No newline at end of file diff --git a/assets/123D_logo_v1.png b/assets/123D_logo_v1.png deleted file mode 100644 index 9a61360da44449644fd27580d3ca12b63993cabb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1219377 zcmeFa2fQA|l_y%dSGr19Ip-@~<(w1BNT7ryl1VlgW58exw!vc$6ZXw}!*Ac)xBF(- zo}HODo_TM*v-a9E9*=E;jSV(nFa|^hArJ~=BoQQm1j_sWSM}-dRCRwhemAPlz4e__ zr>d*FPoICEuCD6t|7**pb9;0f+RYf#W8K=-7Z_81nK2!%=~4mB)BoC;#)QnWy{K(s)#K(s)#K(s)#K(s)#K(s)#Kq)Lx zR*FVq8_@#M0?`7|0?`7|0?`7|0?`7|0?`7|0?q=llNCK7S|C~=TA)rAc;J!0ykF)5 z)Mo04p(%7Xn|;fXn|;fXn|;fXn|;fXn|;fXn_)1Aa=4! zXw2ADv_P~#v_P~#v_P~#v_Q0gv%qsJ_MIQ+0z_|!7BF#CD>^~6K(s)#K(s)#K(s)# zK(s)#K(s)#Kq)N{N3}|6&Dd77K(s)#K(s)#K(s)#K(s)#K(xRaZ-Lm!I^$iS0S4Im zFUK#8a{(G)xVT8PK(s)#K(s)#K(s)#K(xS_W`Wq@I@6AL+}~({Xn|;fXn|;fXn|;f zXn|;fXn`iRKXXXXjTVR&h!%(zh!%(zh!%(z zh!%(zsILVUzwou!+S^=!*x{K(s)#K(s)#K(xRaWP#Y>I)e^&+|_7-Xn|;fXn|;fXn|;fXn|;fXo04;Kv zgRCE6E{9j-fnY zjjX4yMn7$TE)e&q1uf7D@W;K6*LN-G15LT=*vYEJx^)C#eFI;y`gI0cT|gN-TeTeA zSlsR`(9C!h`_&b7- zrSP{G>mJ^KUC+Z+o{miy)5n_~4KRFjEpn!R!T3*L=$7hW@fh>2Cr=7tVi|QjK9cmw zL+Z1hkkpql;kFznA?71R3$z6b+<}d3E&jS3+cG5C9>G*ioc7Ht*LuoAQdjaGkGS>B z(V8>=2^+^_*-|tSE1rZT{!|l}Z4B##ddZVZU9C&y*6XKtj(qH1W2(%$)hjPbH#Z9H z+XDX)yGMU*DH?FSXu^le>;|LsuD_i(T z@b3K*Bms|fuE<3voSP~#IZ5&^=YB(-FTG&`ie0a8#?M=~irj_)&d1w_Pujm)APV9z{R( z%n>uqWC8dcUbWXMVg~V-ckQNeB{HoevhYx+^OS{HKbMHX+=8fVISU-RqsQ%M?EFDE zD;m0{O0o)n!%OzeVI*OYsGaxeEwzgC-?K&8;|}PISz5vg*4`B zlZ%yeJ-RGiZ^UhCiWV2(ucNUMuOM+qHbc`_RGG{F-7>59`|z2n^;SIp;A`dwfAL_r z+<%7Rk3(fnKJ+b}&y(|X(yT5MUF>wFmWV=ASpfLs-Hhjvc%vahRRc$u|8&h?;KE5R zoq{U4o*UH3%c8j*Z|^eS{NDc&mi`n9-Za!yY%QT)=S2=V+ZCOxBg|WUj1yv>mgP-z zyRBvSsV-~hIB(j);sUcb+}I=?tm8*NG0)%ky_&X(=>IwrxlTMbVPZK+bG-~@Sw`2f z3k5$oKRF8YF&iz=A{Kx*o{bgfcCffXM^BtIfA<9AY264xh8KFdsiqb#nljFe=-DHz z@R@#0+9l5-T-?u>80oW6ac%d`{A%O~`S3H1;$WHJ*ya1~#bcX<3(1CPn-?JG9DQ z9^^BMaN=hG`jX!1%DzL*_yy-_RmtQRe)HeWCm(&_wO84f57~QkF>+N%JRTa5#Xu;a zljA2K6OLv)kV-x3GsIv>$`nLJv_SJ(;5NNFI^^)4m0D$f_`=KPjYA)KI%e`ZQ<+Li z5!P2lrej%|`O^I9T1B}F($|rQC7rmLOrq0ykwcE^X<%d-Nq9}Nt!OLM1FyrsaNy_0 zlyWXWx*1VOEP%r?7~)mhAmts)_UinpQ>V>CKYCHCipX*WSQqY^C@bq=mRy{<)+BhY zMWS!z`KQ)}GMC9qP^Z4ia-m|flw(?cj4`P(qR6NAE6lun4al z!fm-a{e_EOuxLZ5BBs8IB-j$pYdqpb^5l{y*Rqs}UO1}Rdd!oAi>&jmLd2i7Lrebf zV4i8j507}atY@#k-jsFhRB}6TMC;jK{r_IOoxArkbI;?q7d+z1sT6PtCupPtCY)uG zST3?GBOFB@2(_MNBoCA^AUdPO-{!Xfd~Yh&dahpc*<(hUuP&OURp$7~Q|50Uf6nT3 zr^&0-fK1rdMU%&x5xu)dRbODJ6z?s0e{21+>tH0OPPm5!6Y#ycnrD5V4lp)5KIc!I?9n+*aC

      `7gxH>13w?9IjJPnxy-k||nkPM$bre*cq~5}kOWS0&AGiea&oR#HO8PUU9Nmdx}e z#t}XvBK}BDe`TUiiC*M9PF+n2#nDe`gh(xB0q;3PK7+Mo;W~5iS(_lybqn;bkKSc7 z(n%_mkYX@Ut=Pz8ZRh0D&bR7!>gTlGNBI|mhCoh(` z9+r`WyIeLOukpk(Ex>tT&m>}Ji(k4+Y3AB%@rEex#$;O77~#aRPs}s-{)5*9-*7l_ z$wgK-WkvYRr0IqN&a)3Mt9YytS& zP^`iO#PmV^&AefQY{%#3g)_Y^IEmo*H%~m9>f{oZJeR^N*q~CeW~`8=+5lyyQj&5NvgC^H`~@~jo& zPpK?`)82fvwNkgYVEk+|vi~r{mtf2s!$}eadTQr$X6L(mQibk3rOG-A;j$b>8{VHG z%%iK2m zTpC@EbxcUmfM7(d!f8sJaI`uX-<=2eVrWja>jl_>Ke)lf$| zTwH|DY~7e0+3JiUZm(dHFGCPK)`rnMYzqF0aWro+N={-uk8p0w=)4LsQ&J1S+&o8f zdR|KCT)nsavMF|+E1#QM`WgSZsbim>Fn{>j%eKCDzqA1g;(WF7WFcv%bGJ@r?)q_F zQ^-^yZA22^6iy$NM4k3o`jOP5oR=4U$Wh(%O_qBU{nRr@%rwpdw}*>!L`&$x#dLk2 ze)`xi(vc~@#LwwYl*oRKLXPF6&GWrk?^JOzJN{-2arljCyyV+b6NhU9V)t1nn_D92FOn# zoit_T<>=sjCR{N`*?31^z@>vBfh2k1Qct*mFU(URf_q9BXPM%EoT9CwlK-4K#MP! z_*%!#<>@(g&f03zv!5Sl9{Jbp_!icQ!a|h9(a7scIm;}$Xp+IpIKmb<@P-;i1qL3-xP%Ui%7kZh$F#ah&727!gaae zswR%6BmQWF)=E5}X9Dp~^w;%{avI;1l~-t0$>bLw{GmB~V4v6CYbtvm_;GC;x z%}5mx_zcxq7x~Xnee%&!^Ta)PFh6j*@B*%`dAq)(t)Bh6nHgt~)|)_?+q4u(d{DUf z9%GEi!o}Ytuep?k9L?0tPgyZiv_L~Gkeuhfh2`m&Ddmk~T^PI(@Dy%#|LpN!+GPY| z%xE}TSz-WqrcCOyEJvWX=`5 zO`{*yVziViSBa>_A}Dt!OoHQ3kE&^1k;zG{n^MGSYKo>5u0iA8$2#V%tKM1BwIh$c zM+-MiFkO4FQ(mBl|7DwfO9nOiO07_%?vl4We1G|+Q#f1{Ho&2`ntW6HC}qia_@~=C zVZviN7a;UY@mKP&jN2eOk;yX@eKuo~sHlSl62BRNs+a2OV+M^f)2sZsyDXnGc7_=< zc$BVc9(eu{v;W}xUT-Qr@7rD!?uhF@J1yx{AvPodhjECBL`;&_%vf%EcH>M9k2r0E zW88u>o-+h*qLaMFm&k&t+BjD#bwnm7Q6H&=EWiWV-ic2J^PRjYb?V&JOgo!XfJ#p< zJ@g}U_(Ok;ns373oJBjVm4MdGDLjSwg%@GohaZ%&RSc2>MggK+%nA7Ncpg{n2nad(MR447Qi_$6HB1` zc-@z>W1rj{K1j7_^8|0p4(>lYK`oA4O~kw8^=OusJ9q1CM$gWSJ9Fa63M2uea0!fgz!MM(7cfd)$>(wI zGxb7R7G31g0;RTqKc^QDF@AQ2Kj$i3Vfe5X1KeLf{)`KMDlp>3GcNd{+y=pb+j3Rk zzk8X);A9yF)Gc`De9p)g@2HMK5?l%o@tPMv1((9}a2e{bDi5cbd<>p6Ns|#{NtJGW z+KN%HbgiuLF8XvizbahOh3-(GhyMA+w38(rB=THG&2e6DnL@L+&E=O(Rhm@14pQiw z^gGej=AZP#=%=YUqtM(INX~0~F!E}>?qU4~n}y@&Xq6`~o;1%4={v~N-S_N+=HQV- zUd#wPQC>W7HI3H^OLSppYb`oiU9+#U{t`BT=Lj7av|JY)))j-YudJ^r%BuXJx2_gdyjv|UmHP&)EA^$wPI>TIU3zFMnFCcpIXznG8Sf6Hrc!s7Gs zP*xA%5)Wz>NmG_xYB~fDsGt#oGz6l6RJblDpd=`2PQ4`RMTThWlT%01{vbM6g5P*C zmzp99jkExK>1;GHTQ|dZs}JnXSr*0VtdT>_!r_Cx3J>mi%WQpXUwXY7O#VnhV>X1k zddYQe()nV;tZBJpHph{zoWTo@a&T%$9v-!hxFS|HF11XoNnY?-jH9Y0jZZC0dE_Ou z0Q{B5hj-Kq)WICxc;me6oTWky)2(M`ydrR{(irpJ8;8t`zkA(wkgRjj_p-B-|M4p`pR7Zjn&d9nFk$zMxellea(eWAo8_{wUVC z1)EF-9*VScfYpmI^wdEs<4($?rJ47>bKAJFJVpc`ITHh+1W4+&EOnty>&tQ)czH6H z5iknZI?8c0%UU7+G{yqJC*RMzR=2Tg%m_2CFJFaQJIxt7&@91-mZnGFc*i{cCZCIR z7prp7nZ!fJK9yYHw4sr~QpyUUQtRIS-}b#|jG7C;`g|o}k&&%yu@O30imY)ecor@g z)jSWQTBmnk;3|s6! za~&yh=L}aj-{Gs;=L^eFKYso&GpH(qs|!%8Ka%!IEsI}LCq5}U$qT2iQP#exWfY|p z`em3;D@Gw&Ahdw$l;40d@7a$n3)Y&BxW226+$#dtc-0Pndc@rK>_Zku4oj}M5JOzN z(0SHK^NGx$VgFP->5Z4J<#Ysju2_ylOljN+-UMqQ7tCp{v6d%e6_%t-^5h==lznxb zD3{OzJaFxm_!htEdgmtKnW-NAhG|tvWILjD&;RSS#VLxMHfTq(FyiIj@s5!#$V`$a zXPv%7#+ren`P{NcXCp^&YG4Z3w2m^t6LRWUU%)1Ls63sNQ!hkaE=6GGnk6dhV1dMU zI0eh|uWRSzK4Vg}#c!N7$8P{Nzx~+LjCpQk3VFE`na6pFnvP%v7pyL9CfB$Op7IBI z)5I}&W-BwY#S2(Y58+))9Y8~jGB5Zo#s#B-$zmK$S@P_dDq(1mBX67qU`kHC^M+DK z<3=&*1LfG^U!?vf}conJG>W{s_-(C7mjM)jqv|{YQGoI^cr`v)1y4bLr{j-~XFA^xm6Z z!(-=daL;9-10|za>`ckq2CdAIE&@C0Xz>k%0rB{J4oQH=fGCMNmTOK~@_E3dP6DEq zp|&Jtk{2EUDwc@6u@>Od$(QLyrVZ+E<`46qxzgo%GM8C7n*EvrJ^jvp^T-=-BN!&r zKv2YM*f1du+Bz<{G9(r{Tynw6HryYBC@O%MF9UpT$i8Hbn7jsw*wgqFJl4eXa4I+? z*ErRBkx?b>Z&DYj^cL`DN1*vr=YM_S==!W?z{p-^@>%>UK!J8YyVtzFZNI%pqc*(2 zJB1Dvlh%9nk`5RBlXa|zI%a09tTMeZf))^Q$0=C!(VBdcW%N6#BR)$161g_N6#o>C zMWdh62xkO+ty}L$j8Ss|8n@T#y6$SUm-b*{Cq1HU4ny{!bC|DG1Y8-}G4D*6P;oL&X zB;hUtLt7#KG>HXxVA>;{gPmHWcWm7Jji%S2Dy=Gs%+6Byj22(pK7Qe5V2+(BfHvt+ z>5(kyWVvaCHe#AVV`iE@L#o5=+6V=&MtQ6>=tS0ff+Ykbb*vL6>ca$8suP)<#JVY| z6JV-~LbO0l7D#;N7S!>cZ^?OudQHuwWnWt~%XdnYeESd2x(*fso^-r4XRu83sBb%C z430KHO2?~k@ydW|bNm4@w{b%5$kt{Qvj<50g`KW2sD@Z%J`bmP_(WBYJY4FMh*52g zhRLwuaxA$-FFkyGzn2D%?3MY@ny!;4ue^fSSUx@crCfYh%DAqOqMwEqiTS!% zfWG%NU6CE8wR2W!Rh{JZ=wyX&2fXWvUz_8nFw00{Kn^DkU3@_7z#nC+!;jb5;#P2Z z{-Mg2h3jxq&$#^;P)qQnxyB%I#WKN{aKRV3=*YD`vqF@Uis@Zx{B1G|@Bp<(y4~9; zbkt|9yUtUUD)aKAKQjmS?(v!%GiR;o+`X4MEmH*fwn2_)h0klHdN+NL;N|qjfGUu` z94QZk0#X4S>rmDJ4FOnN9*bKNPzo1)u{JQhCHj8zdP?QS5?dssq4)h$16fpGP&pWF8ls zN}1$^huEY()P;Feu|z#dxXZ=g##+FSC1`E)R~I&D{`qjc5wLoeKYF|Uu{X`$-3N>N zsrFy%kMPN)BPJa#?qGW1N9RracHV|@_N|B1DEcg9-sBJ;6t3e7brMU&H}iPti#6qC z6U0A-i$3ybfjkTFA@*^);N01m28cH~*Oxfm@LWtG)U^N5`{uEipGGV=`0XdGnudtO@ZZI1IVQ9m^zN6V~#|WVZ!(!mU2ukQ4JX z0Y7v{kK1|F+6^V%vKTwGOz+efe6XtTFh1*4TCzh~_xzpLs2j@KgviI<6Nj?gkXAU9 zRT%1WjT9|VXA2}aWsl2yAJiA{>cR5tcMj^@?$;O1wr{J@3;gwC&zcj6ki6q>qowq$ zEO*0Ka>c5nrS5&X<=AR|&uhbk(Pm8c!7UC$>)|cIYRCnbf?LUloOr8i;>KnuM$?Q* z>a2k6W})&Ly}+PRy|#~-)GzbO30HQue_uUfty;cq8kvCuaSU z_59&3w;Z?NwCU8jqhZf0pn=GrgH+p9AAM2$R{XLie=X*xdi6!)NAy!|rYI|axJ_Hb zzTpCM2OCP8@g|K5Og++c=M(qZ=|{Xi%f<_LSI%C$p`6%boiuJFLtTPLwku`SU9f0_ zUX{qa^Fb_y7}WVZ{0R>8cy0KTa@mH+o5=z^SnZAeJlvt#>-d_@($kK|?lcGX?)Dn4 znzb6kTD=kc2uKcRxi?_Q975r?17;&8>m?mC=0W2YZt~hhhC>49BT3NH02RR16ab|T zfyR0QIL)E9R7@x3DOV@LR2PM4fg%>5-`%9kdUdWa7f%_hRrQv~;d*z&_|e{SuYPdA z{1(Goc>(hzv6ci%$6co@4UF~zmY1k5ShAz$%BbTX^8gWs%Ws%B-CHj*43bwSocqh9 z1E=VO3ogm2lf1?tW$LvK*<4~Bl5m%czYVp(pN6*F^3??c8rm3_8#`x+89CW+;)RD^ zM;!UM;Aht7r1RwbjXsz9sO^BEG3wlp^+}eKY(0b$@l2%$0_D(P>UOQnWy50rpUMqa!80x4nPQzG=R`LT^ktS~=}3Z?U&O*lV8N zjc+d?HpsP@+$^^@%x%ml#0}Ts@;h58HWda5%CSR6n~B55dh0`GB~ld{Ndu1oh;qy;w37BrF6n1pK=JisC9W`){n2s z8_imWW`-jXgEa+EVvtF&D)~GvIINAMxhy{8%@CTUegvEG-F;j1Qhf$jzB+p5U~eQ= zmk09YH+}zvG2vW*pwhBSyl-^jwX~M_98Z=5=(D+IIpdVd54`Uy z@N2|d7VuQtI68mfdc+R8bb;>P`s+drAhuNE3<$;tr^|M{P~ysj6MG;og95WTyUoN_ z-t!?62Lz8AgHk4(IMTA@1#{d4kweb13@N*3%A%s-7T`f@PxNQl89G!-v)A+Wnx&^# zpSZ^ycz3tgaMg@erb{xSB^@f)@k$=pO75CahX{#(*C7+|IvrkhR$gwZ=VneO?18OC zk|4B4bsPMo10`GnsLLZ5A~;GJJ7~5XjZw#NDS5XX^7fAa;>cF!W)3ka zQ$vVH!Kh%9oY<3mh)=<0eej9qENPr-S;`}?y9NFjRe20j+I-6bzEz^pG-b(1GjNpu zy?|%#+igDn(0x(WPHSa{DM{fpSJ%1XgP8eeY;|9SA;_uASQ_Sc=BW+&& zaQ-7-$txJs_|upR@u;yD;xEKqP1uq$;cC;OSV{}yct4s)s0}Q(WuFD zZAz7Wy1X%A!7x7C)M)B}*QzYLgyE_{Cyt*qPu%?~!a?C8O?R$b#|vdiC(8P#?Od@w z+QE-4qhBU<*gEU=9#o0<%=589k9c>0kL-(Y3fDd=ekol0BW3!TaP6B~MwN`8vOH3> zK*j>3lYyIfD!_9g$|bMWKl;z zI=#JoOTp>%ow{^4qi1g`g;%D+kjguk`LZ*69l} zS3!)rRk@L^+-J5pY9N792UTK@dYwXiN?vd&d2-Qd4s}Y@3u#$&k=Jg4KSC8Avy{%e zX@192es*TonlYx&pzdk=6rTL$PV>pf_@XKt@>DWSkUW*CPm-NRC@}*%`lG$TV`qzh zL8pryyqxXl)L!vT@iFmBofkQe)5k=weKM4}iy?`B3g<;|Du^n2 znd-q+B{RT?{zJ{&u`|5(Uc#u>&Ug2u7)!C1;LJU@MXWIZ4|VCbsaAUS}2HC zMoRnKS6R6i>$%=bk*E}42^Sy=FsTQ1 z(yB2d%(%V{esNpcPAJ?oe}>_dL{0zg=@&7w1snoXUgE_nbasR*364-XJfZ=q3zgL| zFEZgtfVw|i4UDZad@)BrtC8o(md2pQreH9S6I)uJhey$IwIY)XY1!)H1+18+IERfm zH|Z)}qibd7x2LZdHAL$gEAxX_OD^`G(>nUeaq~2$5T>J9>^#{H6nr(|HV+y9PM371 zL|?3RSS8ZHy72;3y+&7Umr#?~>TYv|pJ<#pg2|8y=bI<#7 zDwk5&`P9AU{Wo9snjA58ndyo*;S@So>@Xn#jQDYSUYS8I{Iu1dN)C%;QR#fYJcf04t`~b+EWcEQ=H^P$vtxi~7sH{pG(hCYcNH4J_!L zbG|s6vzJOwLwa;Kmrf;A3iR&BN6bC1?6K1ZId#xYA7oH;ohlm?A=?*b0Dsg|C*^5J z3r%V{fi6*Nr=wgyC-<4H%dnW>Q#frQ7qir9eTYxyi9g{Qr=g5$DX~mQ%c6_CxCQuH zX>b1ay6b0m;MBiT)BKI&O!wZr<|@z=_v|poKjm9%3o#`J> z{Fy$zH0a=At=y>#k+k0xlS4n{vbrwwq7ymfsFESpAqkHoTh1P5+;4J2Xbg7TyLmo- zXtkL#lK1UOOKf*)(C%})x0$!!e>dq=xy}_kSnefCt`o*M;5uOLdd|fQVvXehoI{@d z#gDDaEnhlik?B*JnR31vMI7GNu|Q)@^qLbZ8hexlYg&dn)u;HPQc*@9Z_Fv{ltAPG zX|t3#!$|A^r|G2Bf5aqoRFru7#L}- zNTCJzz%M%*DNP&H-z*rOxx1d0H&(cD{&dr|Bl|D~`u_9V&CwHuku4d?vYjsAk6Z#J zgJ~h}I##xGMeL$UCh2V1U~1ue+N>K_Wybf*yft(+s`DWk!KVaDjZ4Z>9_lqN^Dv7= zEk=d2eu~LJW}S#rYlZwD#1pqIIQ``K_zP=ld6rM-Cn}eC@kT z85GVAm5gl3R6^^soLWd9R2?lksf{!2l-Zc$_?Vf4%+PU}S7fii{`5y8XqdDQiob?j z>WIJP<)utGn$E-%?W-)06fMA;a`eNL@${3 z*NKwREEh8h*Lkj*g>dc+>;AwMc?7!4kQ~V;Ca@SLCvc=$IE- zI8+%O%Y?Kny2wjl0UnIDP8?>g&^s|_BSy5EV4C;tr+;nUe`C8|6+Sk67Dk^2BpoJu zt1$^|1g3PjY}7*_bbuu42txdzEthQG%2Xvfj%J<2i(JwX zEcz-tQS{s7xh?vimFb7ppW%tDljIX8b?%w?FjunV!b#q`km1VtI(zXq?W@InQtAp9 zKNLS@UdvKm%nwB`{EWXDaOsVYJZwz6eJ=pqqc|lu&v}FHN?mJszw9$ry1cRS%+XWr zx36?NPwjlpym#mWJF>+QEtxvVc;KcGs!=ZDFuBIF_#_t7o9$?oI86Syj@8qbn#!&m z#PaBTcN)=h11Clz0N~k$+zA zK%XJiX7udL#8_RW!Sa*As|PWu#EhMXVJ(X+-Q#k-Ezzgk!p=oT35V|L&0V>1!}c9AoG4lgLQUq*12PCo;L9P7o$}6nSNN<*Y-}Wlq73jT9{a3&0gB!MtGt zzPV!U*ilVxpBvEo>0aSq$^PhtSIj5JPo%vp8P<~M=zU}h5R{%5aY~u6w9vGXl(91z z;!59F+mNq8M$-=SW|lB+%!iOM{m6d$a~Kcx!4>l&ha5$@Wz>^cKT?S;zys2jiDS*)t@Ak>ZUiVx zzgv5rx!>&H^So!PVUre_ZoLNrK&pdfZ*AbRj5^2=yac$pJX&IrCIT^!`VF68`j7OV zphmqVNPHC`K}c|DP6IHn0V+DyrL0p3M#-a^2TGl%94T6$m<6sy0fR?MeBEI9rQx)& zghHRLUCfpEIEM~R4z`)TiJaxU4Yp{@gm6#z z(FM+7(*DLBr_a>ppRB_q%Sv8! z@Mpo8=(rYE3W%?+5y~0a;&J4-Gr0suk;%C*d9ftr!lfSbVS>sM^_s$bykoAdbB4;m zDg(z&diQ&Et}vgO;@`d3<)xN6uaE20^y6(i%*V%$B^@W}K*@Zzo%Ib*!$-hb;Ov+KFNX(x*vD?6Vs8O@Tp zh4j7rPM7m%TSgzX_x_x(&c=J@@v$&(eaM`L#H&Dk!KeLAd{+D~EYrRyWm+dP*Q;g8 zlap8$>CCeLu9uAW%81Pi*5U>o-LYwj*IAnvtn*fK;^YbQ@V3Vt{*r6GbRMBx>j77; zBgS)o(lJ9_>7+4A$jv*5Y5b_fcYIb%JIh-iGMu5c2uW~8uCMDvPo2o*f+fw1=>=Q5 zuE=Q0lGKUUa$=E2avqGfN*re9^rVz#um6(aTPjLTuRZsG*|+C8ui2p!7vQx~_&|d0 zY%!AAP8X`$=*OT|7};{yV)oZ-E0eJ|^AcZA&s3u#$&k)OF1;3&=*z5AcVC{wpiowTY+<(}wZeSX#yZykq@ z9y51gWJ{(Eo&;9iNEJI)lnGlKE>pdfv&fISX6^=Dqo1(UyrHRCMLNg65$Wmrpg}gJFDdOFBue zgC%#^;d|_K(Feh;?~z+sb8I`qjZJiJCzp3B!*s>*;g0cR1uK!nx9DFILxgKx=$kq( zKBzg@S1ix6T#7dXa?R4-lpIy{Z_0J+Y0kz2P0dBir!F!BdU=D|=Fyk8nj=S#0B5e_ zWHAV^N#36SnNQ+~9?Wt2&RN#U*h zPUWGmZNv*@oAbQ@@LiryZ?o1)e#*)^969^S>7%r+spY;ydYGxp{8jGVeZai6;{!L9 z(E2Fv=fNlC?HS4R!m!gtpQO&lB>FFx!45!lF$$c|hvjrAH=`QkiX`z*`j5!MwT^O_ zSTEE;<@qGbg`_U?S}*01w?hjgvEv%-8tz^fXw&?2O+`n4Rzjh)^b$wUHqZA*wvHY@ zYX0!@llI)tXL%6;AZJIfY>WW7Y#ri_Km&AxyYEzVe@)m3~@4}@tYwfkBtYhi#n!o5k)L@MSczTsqQ{nr)rPMJY;LN=OEInu zUVC`>_;EY3iu1NG~WfDtb64Z)6fZ)oa0a3yVG^7 z;G?dCMIVHRvEu}R-0E%K`5(iP^;47B_zj)_Q%bq$BsHYC8Jk%RExL+(4^Dl&i~X&a-A`!cQVWPXa6CeH}9PE2D{ov#t6ZL z#-89xIO~X<$E6M7tRu2;u2##ECr>HjFEvGj1RB1r5jzy#Fmu#4i{ zHfRSUT6`Xik<6h5%t8mOFip?}v?Q;($t%?0J zAJ|%kqWV6kCHM@vU{f&5)ryWhk3=t==bco}TcADbu=F#q*!`^~=B5895DyxX0fDxGXci#`Z_q0g(|C~&lA`7RkSYjP1)_;ahPwxcq4|q|YETZ!8C{ zJYuk%gd~_Dm+Lp;MertE>pJ{d_jCWkm~<`x%OpvAlGhy7(t4p@%E&F9$uUE0&`{2U z&}Jw(&|k&|_tzUe^PFq&2B*vezPd_-SN(5+KP6MpoHuY14q z|D7~E!Vj!bh;!@I2SrzE_g}@%XYf3H$=Kk#61KiYKn|(@s3G|P2wih5wFw> zZUtw;rCjowW3d#gp`K-toCSCX*}EGsedVZkdi3)<=*?-Yl~+v7tnQincDv3K94j5c zV#kTe`W!f?5sIw!kZnAYGPEuoFz)BH<)gy&RYSpzpx=m(i4ST{f32yGF4I0pnWVXt zyOQ|Z?k$j9%kj8-H$2$s**KT?k6VV;;~A>3v-ikLj~l)q!*;IN@k%;hK%B*2+Uc^L zFNMPdb2d2O$KjK3Bqkl-vT!|sAW%s6nKwPC)OZW?VY~>-HFjB6ETt<=1gXa?<&=2L zNU0(Ub+P~tL|Y>Eq&RIbrOp`YowDMxG+#<#>z#icG>1;kf{oBIxbY6S3t%pYxe+bu ztlrgf%M(-qiLW~D{MO3S(V{Oa*#WVG>Dal8nXveRpf&qa4Bjyp;IY9Fpm{o}!}g?{ zyrv{|r4IACg#BTiNN0`(7%;taN;q||B~0#oHuV$ zId6e>tox?kH(24s+Uw?w)Vk)DM^7JQMosk|RWZBq_Q4PL9k!h+j%>LREkvQDqh&i@ zyr4r~%hCx;`dRQv#w+X*>)M2Gg!XGNqkcBHF0m{ zNgLF08B1rdkBSM}Bj@vg-c63x)0de(l`VKv4$~Ib&RXT!`p}V&%yWCT+0K>R)R2xA zJ6NLA+{O)Htk3~-n6uZ8_6iYs{EZqg+)S(DAeKjSkQ_%f3sjX)h$b%JCr}Zw>5pVFkc!^I9J@m<7qXU9 zXY~xMvcIRzjq_$mMmVv@kuAX>u_JPAoON|#eaUN?I+B!W?n=(^U-&CWUwL7Z(u4)W z-s&@`yVx$(H@CpI5o%(`>G|L8Nj_K%|3aQKD)66nx|~kRoDW)Gl{$&z+Inc*`Y8Og z|L`7|y2#Hndc?U+W01tB$QeUu8_J^BIw{k>rggH6l%pBjo^a8VM{36wNUrO+hu_e< zS%z7?d=W-V(|PmH!AFMuXEh&w=`q{kk`9!O2{O6`FzL>glX*j);7j%fT;hm@qTmsH z)4a?+69c^IA($bTYdDrMpQqCp(z3Lpb+yS@MFyYLdBkH`3i29lZ<#}P^thczpsi5m z>B)+&m1gRy&o;^S=M8J|L9GI!y{oc>v)`5IoK&dC zWso{|?~RAX)`zWgrvF^mu&pR)YzXIyYC?rgxk zF=tng%zJQa399jxgo3l0Fe-I~>$=p{mQW=_T!ti^=blt{MeY9g_FxZBt-pRQk4cNu z#D&An&~bge?bxyP9dr2Lr-hysuiR8`E1g$Jy)A?Pu}8)Xz0~&3!0DY<<;qyR2 zb*`Hq*b043^7IwT#kkayI;@wMpicFaHL*TP_?hUV&I0Z8cNKPvACO9E>6C?La36nI zH!W;Np_hHn{4#Hi?|<~6dFjm^>76+}q{UYbN)N2Si7&Zh=gKYm1dkSpR%0fO7;nZ8 zt@hT348OR=5rz;;I$u|r7-HQ#42llLmLyM35-zgN+X|)lVf?9yO0kZXYJ-Mv^+e-i zBR`KcN^k7cRiDEHOa7~3ba_MNcVB(dy#4ay-r|F+XPRCEMnXsMBpoXPCP{>gmA$13 z8P|0}KDm}5bGyY3mFi&eL2VR@t^?lj&SkByTgI{uX6lN|^FAKN+vL3M!Nl4E!lZ#T zFM2utQeGF1W1ZW>D4i+#DTyo%S60EijaIG<6-G|(Z$?b&Z|kd{ z#gI7`Eq+7WgmFc5&z*U5_k7T?6PV!)|@mm^L*RpaFZ?9u`sOn{Vm;;q}h> zczCI`5Vs}#_!Wn@cpWJ5z{Uh%(Q;raoyQBAI`_PZ+?W@g#1j`Y+$E%YC1J4d#1o_5LtrI-5Oea}hxGoE2Eb1+*%UMR!x;)v{D&a5vCZrNJ-=s~^=xu@MNVn+? zU5GaVjM+DAVnQ} z1}FqkTW)upI%8Ii8EK{t^vuIj-nAT#L?aPnAuj8q(-@WZv<}T@h~*^VE_Y`9h9g>L z{g>wW@BeFIJ4|0$W%>>CeRcaE-ZGzjd@S4Pf|E%{%U#^5JUdq`*L7^X!Vc0otsm!( zY|)Q4Akja>H}hoCg_iCS%)MXF;E3lR~CQUo&`8R^=!S_bH~lF4=-t5 zOUnIw_BBfj7)69tenR&b` zQIU5-=>Ph9j`=)Xi7w<)U$~SrpO>Ic^;!<=vMi6ndiBlcwVBbil};Cp-Kr@$sq(cK z5x={>LtDgq=sq9t_+oeW7#NIFWcvjh+3 zXcaqE^ksIS>;-Ak0kg|Ey(?CuUUoTGZ~B_g+hv1wa&oUYwSb8L)gTqU=CX|D$p)tF z=`zZBl+nk)m}jQ;p~JE5h&{PU?@h1H73MQjJhq&j`Tx#!F#s1L&&e1zRd_uoBk!bT)L-=yCS&BDSXgYE1q(I+kAF2IW;7_niJkBXyu#o-v)*AfXD@z1XSeW2cb^NL@&#R98K6Z^Yl_i9;tm>fSrbFy^B}O zm}t6nVytL=v}IDY85_KN{*gD{HhT{jMz$CX?K|KRREcaG9I-1dpt`)hh6U(d;Itvw z)|G&o#851S7LZvvW<=h|7N=@)Zn9ubFe>@F@(_c0d8{TwtVa^=a_#%~&%*uP_~p5S zTH`LyT|d@@@11|?_ivbEpPq2>NIF{J@_ND~L={H4Y#ny2=!ZzmGvSZ_bWSTnTzo!1 z)!6TayPh%ph?b^-RntxHK~+!@7{Q2^u9(P~qsrr)BySW8 zI_m@Jb6W11tZ;OTb@*rTAj?q0Ho>RkHH-nVU^5brEoA{sUASx1Ehn0+jBTOc{6x9Gj7C@(ix&d7bhURP@|xg59&N4D6Z`nmnUmUN)pWhc~W$BJJ! zmQEJqV;W3th_u(LiH@>c-UV9ORg7%SD>||z7!!;VKMbhkT9;QZtiybmpvvf2CzskQ zR zpYuy8vz;sWE-&x+<2qa9E{~6i;d$h4scjO0oh&NFr^E-D7g>B%b6sEbqRZo4oz_d9 zoW!z7ZQlaCVNQQYiK9oYl`cMO6CBTXy~kdDQpEznns9c$c>ZTBNo1Q>GV%mqict$j zI(P@OZqC`EDY`rqeAveU%h_GEz-*qH@-hqg?tHUqn7 zF4T0D7MC}SuQGqPwXb<;{|D)|Joe_hW-VUe!54X)g4=cgzUjPpX9ghH!GQxhf`lC` z!u8Z?)+2CS-P2IIvjRBJ4>nH_Kt|nF_=5Aaf&I<%``$~}3xT6qKSA1sMBHc`GH_Ct zhhJTfb-1~L=MJXJQ;$DtjJy|sWjY~i+a=e;I?@(-dnP`kMSG>R@y5BCw?d?atw30~ zd4hT12QS!Xv}AK$eE4-t`x|OHphITL;Aiv-dYnCy^gCpsI}JyrNIm`16}$TOKx6R^ zZXxGzc!A7n$bJ}UkwoU4#J!l~1VdahpUM26aykyB?k zGmu%vxC*%1J+=Ax)4DCd zbKvknn@@f&0Ott92_#k|-k2y$K9swoj3jz;Qn9?0*-A-0k%dbelCLl4xt2b#9qCG> zkB~y*I_N8TlvgI(B+E$NTR!nW9*f*^uX2eqt+vCJ7=^bx=IDJMzi^Z3*`LR$w6y=V z7t9;m9`+g?FlwslGo%^^hIb_3(>y+syqZDAo=>17Pm&99E}wRu;7j)0gF57?Odn&O z9WU!|vK+YNg`?iIvp;X{|A(*ZjbISmi1go(B-lxC(|N5Enc&S?QL00+C8@&<9hZ4h zQ77gxZ3!oLn-zacX#pNfw0Em+lZtEbB8=8fW!Uta@yyl@cl{>aR0hF2@yyosbEd(O z859c+UFcvzhh>>BLtwNWE!M-1v&+R(gX?^;1^l6a=X932Ivm-WX1;v)Z`1WcpxGEi z{~sh`Pvej>%gIGguIpi~SVbYReqLf7>O-PFLp;9i39Hzhk=V&g^^W!))O|lbCY711 zqwBP^+__sP!;aQ(zyEx?DMt<+HM^eOYsStV0;hnF@uzpX#qr-F7R-1=@_@epr@G;kz`{gU&??s|g{EutZK1n}f+~HcJj+C=ZCoU@q zVk)x^^(4w9=JRA*7=PN71$dECp||*=#pmGw%OTe#tw+9a(MGS6#Vvb$$J6#hSf@Mi z+K>)fmf#IIwAYI+25@AHH!>{#Y+XP?JeoH_urOl=@ydF9)8VBYaL}vEfLpv5^T-%ExmCa%F4~Gbzjn|lF9Z>gG7rW z{p~EE!5&rnYh@zK>0}Y_RDnR}{!mKUj#N=(E#x_ZWp&_;Zp_=1)U`;YoYduVY{#~h zaNCwKqw!hu-h;=;W(YqULQfu;EQeGWb5`Wu^1vt-s zo!*%h7_}MKkI&DyemVyuSkv)UWlc}Mz0bUU@T2sbTI@{mj6m)ZAuqBEj>z&fV9RYF zMSU*C$aDB{Rg6;d81dEuI;r%rIDc`?1z!D1O=8A-UywXfel#_o7i z1UKF=w?}dcZPj~v7QS`WsWaW6KrjC8b^G=+>yMl&m`)d@pCx(G3#U_pymITnR%yMq zqDx0JZ^KwyPyO_ROdk~=qK}DRQYSt~u4Snoa;YPFES9!S9Z8otP4LSLra%?5tm9YP{7tQN0Jm@vof8=D-cj!3i z2ol_=vw?|VNTQZkTV-(U_{YwXCCOV|nK^mO`Mp^1u znR!We9bBf|Oh5ZdyE^sL8S=SFWi~3#E3)QnRPwR{$&-t&7)KSa4)eLxoX(xQ0a4j# z3na(zHZ(-vNU144R(#;oqvr6jcj=}IQ@0l&#*>VBU21+dD0%j0U zp=TQsOl@#Ybndj_+PES6%oaE2LL|YOV2}Dx7nVt0crlDpPs%HICBbid!7BD+Fm~-K zy?Z_SbbWuy(ve-Xt`st#rQ(|bOG%ZzyO_D_{ENIp?;XJ`jQ9MGmhD)<2g#FAB^P)u z4?VX{%D9nMre8Xl!h%iXO{a>?MIN93l}?uUWayvF>pFGuN%2Q+0cDbKmn-;eUYXR1 zymedPW^5j(p{B&Q4wl*LomACoh4X_qBobFo?0CkW@8z5?F~MPuqg})yr--NbP$Q|+)^B{qbtrE!Zko2_^KjX83V&uFE@ zp@%z>jv&dr4(=#DS@M*bL@zSSlMV5>l(US>NF9haPfkjF9zV-esP7cfW1$GgHQ!|8tFZ7= zzP$hnMoEAJAR}ia1p?h9eAKmb2r&nQoTm@Y17SJmbGf)F^G3i?-vS20iqXUK9^6`l zDpg3kkXT*tNtsu#UuXLWTS zGfY*D68MGTfBLfr%=SOLiTUl_%vE=+G{eUCYgC(h;T4xnHILu5!+yzF^v^%|niod#mrbW+HC#htFkKc zkx0BX@gx#`gg!>U6o0IZ+f77$;cO#JP+1-8uuO>gNNvyp$<=IkZ2FCQ^LuyiWmZh( zXld!_XOI2T&bzC?tEsO$_j7oa-@H0EZhJX3)SfqPrv2d8Yy00!YrXLLcC&cWJkzsl z58#TuDL6NAgg+ZApuj9_hegOv=i-C~A6n{gy4>NCb@3+;aoxK4M)Mzj@@>QQb?MZ_ z^zM#XN(vFI+uDgeM%|O%@y;%DFI1i34dB^Cu<@0ai!kR(KF6A{f1A}s*=d>z44OS^W1N}wzwV+Wo3r3YC9GP zX4d(^ElD2QBZuBkJ6J6H@SUAz&$GWVV`iUYJ5CIg;ZT;FPsmtCVw*^|ZN<4p==je2 zX{$bK9{V>w>yZ*~AFV{X2T3lxBp6DboJs3Nrt^g<*$~+vTTUJ8kqa@OOU>cfSn&cg z9~iD-EFr^l9#zP4z# zx$-A`7R3=Am>=8Xs|rscSkl8?M~fVwfv(UALy%-SDv^_D9H*n~g0EcU+K`GuR=sil zbnk(&WYLSj|07a}MPijr3vStj#xL_>vYV1Dr)Ab-J|yZh)J(X|thZDOY(M8Pd3Wl% z^{6;7Yjt%=C%r!OqZe&wi`&g%r+@zUkC?Cie|Uvio~0YGbG-(3M<=MtJoT$x>Czv* z`>}av=Z9vW$wd>f zuASj99f!C0$_)AnllZA{>cmI2f3jTrU@@7?OBqDF4ss!uNgZ;=!6v_8l9fl^U<>d8 z5ZoCk(3S=3Os9_C)ZRj21EgJhUo+qP&5z(5r_CqFJ~iLD`yUPG{q^YTji%X_-5I$4 zD>;ABdh?%u{`cvECr_QU!&@t-;YQ$T+%QLpjTJn{6LWS*%VDwb;1;5V^8^m!%k7jSY!ONE_v-RiS zp{IMz@y#1NmIVOVoUKOyu)bcx^Y6s*?QT#`MznT4Rk)ComF;=vJ{aqLX*81HQ!q)+5d_f_OCn1h z>U1immpZ7PA(oMZyIkA;y&0R|L+`?xt7le}m6vW8`GKp4fBceOh1t93fZ6_N;SRW0 zh>Z0S`bj;~^2_kXmh`0WTI6d8?T8h7{%KqAH#BV9=4e)!Y&}&6`NUz}GRX8ERB0wI z9v*J?WGKcXi4T#7eyH=}gIXuc>N3%hlQa*@T#fkK>Ma0Y<+0pG?tS{VlC@t2YR*nNndNe=oaT(3Va5-u4!4pwZe}3q zb3}M5^N~DsM?d-4y!qlI!2-s7@b;_b;NIPK znR<5IZ@~zWZLLv7f%Q)4cmJA8^bk#+mviw)FMI zvoj6e`N}Q?LIy+Kp>mxq1~wZcv6KW$>2wL#0o4Ulo<^`FHxSG7@5b66)(Wv1=4tdi zo#;a>N}d}bvgXuhsI6iZ+s!FUU)DQR(Y518^EQm*je?TVV|Tt{4j=e5)7}IBQ_qMg zgpr(UrOdK^rrcy z`o;=R{OrHLc{!`0Kob_x2?PLicAK+aZXFknt=-q#(4>=P&-s*-5v|=%-itZ*#|kS^ z(#ya9H+!ej)lK}xGS`_BJ=>H7f&Y#u(_B;_&A=Cp%R6;eYp^2k955hCP~_6unTzE^ zeN9fz6|p{3NTL%SmdAYCumJtXyF=NlbA`EN>bR0_{db@AFI1)RV<%4chG0oS}ovSs2~Y#1C)1{N4vV6C^=WI$pwEr^|J)ptJQ{3wC?(rv`zVV^K>ppS(r1{;Cw&gDW+6(*4p6B=33#1yZhjN!K zrd)AFc8cz{N8T`}Pr}bQdd2ue-?Kc)>rpN0CC`e9-zH+Zt*B*&jO}YiPv>Kq9`OMf z4tGf`;5l7liO4l^men<(YNjrAky^P0l507}i`(?3cJ5SRHlUMLO8WWZxW2+PzK}lr z_<(tM+hd_RKkuL*C+&QWC09*fYWh~Ek z>N$?()pp1-PRX{p0#@+J>Z!^W`4wA1Zq}G-X3B^O;g+n2VlI+gpUU|pztg(Ijk)*VP?dJsb1SY5&sFhZs@X{nW3$M*9pIYo7Cp;0I<^y(#D-%>kC@k@ z*rssSx3b@K*Pi{%_=W6KdBi(O92OL(r8BcZ$!jMSos=glw)N{GquESd>N1p^h%@@A zXnht)j@3ojvTD6`mrtK)x^?30kkZgQA00No*s-T*V?TKQWwZD2k&?%g+s>L}omQ{- z=k_&(>{Ph`XFEzbCyXy*`zlF#FU z)v&B4ymF;^<YzTCp|jfLHi6G+ z1w^0Oh$OxvKF2&|&2^j+z0?;jdUBnRwR!T8SdS!}u`rcgQT%NM7Pt`Gz^SGwt(m#P z^z7!%ZUy{M|Y=$fdpcS5}kfT7|PGud<#(C#>g`XZfYM$Nm zBF_E6Xcln<;`&`IaGfu!la5$|IjNJ>cFWj}lTBnBwmGY{IAzO!xy~b-mWix+C^O%XbIIE`b=qZ29o`GTjcXN@oG@hw zG2N!Qo4lLoslKto6L)?G&WebnXguEHXC0m(uz$||S;abU5$BdsC2}5dc8bywt*3rv zP8`opF<~{*X*|XJ@}q8K%ld|O9W2lI!FGbgWmsg1Wn3c_R(9%H>#oK6>9-zn=eVRe zq&PP7I!PU=msiF*c?oK(Cl87BNy1$&{zC7Ay`jowpP76D z(+Eqp7kuj=zxG=bZGZF)``lK39p@e5C60$RvvpQnJ~galUgF3Wsyl?1jQP&RxhhL|Qw0Rpvz-eDC}5 z6DKTq5EU$dh^>>-3A3@mVbM(^lz33UDC^r8K@2(o`uN4hqIT{kr~3Tj++|uy$rqJO z9hte2vj*EUABpD-xi%Fp=aDc^y_`>^j^u0NSj;X*PGVh;ln;CQhGe*5t%b&5LoU%9 zGhoydGh%XSU+v8H`S!nETYsrnWxoL2f5fE3xviI5ptxvL@LZA7qarJv_vVHC)3Yy`gGVt{BOnH*h9qg`Yl~)O&bB|>zRPgsn3-h*oC|_37;?F^ z_}jK<9!Vunxf9+$LJ6hA)riETB-0G-x$xZslsw?0TU zHw;8(J7iXH{^&YkEO6zEFUZ_Tp?%jONqo>;%Zv*WTO?+%TkhP|mddL#H+_W`|pzt!dSm5-l1VfYM|CcbO~exOz6&c}aceK`+uiN~X;?tfkI zXvVtPXM5{7jM+;szOfy+;>cAJ7X(w_Cy`v1JV{(9<$!Zz^&eM+CY{a-EJdCTDsD5u zkEPRY)E z3mf9yW}JH-MzKn>pWZ$`dFNllK@oKng`-%gV4Yf3NkwgW8A(dUl+MEdP zvJ9WR#0;Ov_fr(;)QRK9KBrZNR%}NLYruZ<81s8CmM3-~v&Wug^bx1GH!u)wW)BY6P7I6HesUDv6v{6Eln zVn<8Ub#tbg8|F>pNj4>Z#rS(qZ?pL{U?l-3tyB*|+L%0`uURv8lwI<>H(iY>FaBJB z*A5&qFYJ9!b)sA_EsSvK4i>L(aSm~DFbRq(SYj<{2MsdoF)hs>+2S3(F-SDKU{cG) zbgUC5)(v$!kKzomoFv@k+Q#n{*vu-usf#v`KiaiNg;q66=1V9Z_`wTa8+-=r@;_f@ zMou2!>7Ti8w>hwn9kawSywZXnh(FZCWzprAP7%>^ZqN4Lzhy_Y?DFo$I{b@H#&ysy z6W=69ed$<98GYJyzU(5_H@WPTr6Ww=A(h_xkl|b^D#efJW5g`;;%}N$mb}!_I$b7h zl9O~^WLMHQ{ZqER-SOnQj{C)HyOenSvKg;-FC{(j!Xt*yV`x)+Ko>OEO1o2XSR^%MJU6kyF=+ZSFDzHoYDaLzu0>Z zXg!YO&bNsSf*?Q;fyhK60*Oq3U?#;(ilnG4S;0z{EX%Uj_U_t#d-k34_IS^}w`b4U zJ$rW7UfI?QK`&J*ndr2ThZ)O2xh<4yfcNemZ>Px_n}w1lr3-6}-(6ce*9qk_`Nof|44h z!LNh&ds|igI5&TgR=iWEi%w{Z^gb!Cz4XGlYs1cs`!hAyJ$O~f_Ot6elU`crL$kpint_e{S%JT#jHwHhG%`244>pEcXACFN~}hR_0JZAx;N^yn4t znwtH@uwT%eG28eoYP<+>r0>QofkV0nvbKA-^R+E+0=tlRlj8Te`N z^puy<60ZZ&0mm`caeP0Y0cNgT>k_`AC8uV>kCxW&@qBoX_m8`>CM0UZz60USE!skkzhZ=A#5B*#aBYO7>#$y= zP-tc70JfQJg3B}!H;ot?W)2>hm8(d{l2-MMoITEiDcOC}8TXDkga_0t;2Kw#vh;jn z>0w^~rDFE19tbX4>b|P8$Ve;5)9$q&lDh z=T&Ksg8`S1v5w<~KLei~_#d~0P(BNg%uxThS{c4r+)S9i;;pb@XKlF(-FHsA)n26H zg`m-cMrOs$)iT%teSZgtZ6o&UnNS-d=y*^&WpvftE(Mmi;&>qs%a*t(FD_|WBUHnR z-dE&B*&-c9G(a8T$FV~U{%Oat5u2LWQjyJ}HRy_ zhn25Bnq{Y~fukmy;YRC?W@<+aJ6gEq;fiFSK@aE1L%FP_T`<<@Il>gWcE4DA=hi;r zGjR0Oz{jvsSp35G!@IU8ykGkQQ$}VFX|94cnVio@$$06l0yitpQ`0Er`d2Z$mYlBE z8ZbJ6wBk<3l*+NahFnqC5pBX5D6R=!0MBEUaLvHW!}Y@lm+@v0-!7gFbX`5WbP2yU zxpq7K*Rhcy}Iezug6)}$+8n^(TM4bt{ ztQo2DPQP<>S*E8ge|57SkvJ*4<7p+I$Q6?`bE-sxJ;(;#4Ba+i@E6O8G|xuP*3w-C zIIp)Txa}}*V3!_ux{K2zKCv@}b<-j{`2^b%SM zJNEAitG2E!%27YBoBN)(91Cb&sz>=;A?;1ovNlV*wKVD(+hA;gvEGpzX);-y%t#m5 zB9%jwtT5jP(NEvpFv?td!JFoUSPEX)ws)@6Q&~X z85Er%>xJ1ZM?Rp{VC%eg*K__)|A!fUs%YGthpgD(gDXY)^U74o&om$?2%UND>Emj(RS0% zuH6{;fK>_~yLz0Qs#>uZckL2>ecGhVUUnWn5_i$?_L{WW*rI9GEr+r^&tEV`_PsPb zG=5Z>48b1s^EYpcy@cd_AY|hKf8*JCsMGYe>_V&#I)IU}IJ82UCgLW2)T_bD7SH^S zQ{cC!1BcU9>~i30sJ7#3y0X*(GJxY4dmkRvKk|(0wQ1ER-#T*tWmn`kv(of)p`ZQE zqAZ)9PYkzwBEOk6^WHJ`EMY46(hrwtTwsrt2tN5b)8xoy!KpknD`_`=ggI>uP8>TK zR=mE&42#t%vo~|7$nQF;v~03;x-6?5j3?FT!BA&EFfKIMy757!`lQG0$>VOD@Zo{o z1|55zmawvvwuej#o(EnvEtTt_&+{2j+w=4PM=FYITf#?f_+UvlgIKX;by&Qi_Cy0Z zZWd$d`bP32I~%3>aqoC}d!mSmc?!u}m=LNse)ZaXm@G`;Z)S#g*aO zaZ{@`V>N=s0G0{qH#}fh-nTf0*C=@3K$_DnVTl;H4HHRe=H6L6ins1a1>Q@b6?`qN zz$N{LgmJU!x0?(5cWekNUiopRw0HkuTE&`*&6+`*@k+vs)C}JevOHy^aB>RoM<`xP zzD~Tl@}aIS-Fs>k>)PlfQCMt6Vet4_bzc~M`I@tgQ~c^b$R0H?qBYv;2Qv`C@Tr+4dKv=Z{E{K@L~wtuH30@U8ia!iiI~ zo!G)^EiFpLc1~Nbg_x4dlP?we9lf~KwKFDPgGaTpby8&^YlEe*4~lS|=L0D?4K`TQ z(^w_r;2C+8K@W5~e68`y7LUfI?a%2H)2`BZBRXw^RhmjvdBhvS`?Rg9_sZb(oxeUc ze={~qZhK_uyiH-_vRzrmX??oojN7wfcWVcYmzKU}?Z(@mnYX0C*!h`4Dr>al2lW{c=8Vto@q{gV zw#8jIP{(bxvh5{f2W1It?b*`fm|`nk%0+UNh=jG_8V3o>Z@NX5zo*a}6aRS}KY8;b zYSc9O41Py3=ATuk3tk%NJBX)!NJ^uac-p5ZT0)gySCLZk90>}4PoU@oIg#1uG~@oS zg-%^qcxf)Y{@DMlThSV?ceLUe#gHqZrQ$UKbb}}(N`B?2?hffnA%z`bBd~eb@$B&=0|1!GJ2k_#U$|VuaPfsdso~qI&Op&Vzog3Wm}v<(Kh?+T%EWUZmhRjg zUf!U)@Cn?btpHhBssawyXQ$>9*R2N+hkw$0UbL-h6OIFw4^IiMG<6@BOh-}Z(B8em zhbNBBs<8dgk?`uqt?`D4glkTf50H*k7U>LyC>5A)9l)N?XwoEv9N|V*wubPuW(JJn z9V=VeDaj+U*IUKmPG||^uN%6zx^qOCc#cGMMT>s<)=%XtTCN%=t4FW=roan-zgYIe zm5&gwJ=b3WK1r2ePLJ70A%~$9(5*+8x*aRn8Dq^`T7fO}P@ZkOt9aBEZ7sFCw0!be z31G^lEDj3Se|UnerWM6I;`R3G_>hhpK2|uU&v*HB+-QlWoHUJi3Zwy!W31zNLC!!i zR!|m??Pim=X0B*qiQD$>2=kV|p8YPIxN$>9XFMYJPX9n)r;HRHeeEeXWy97;uyHNj zqW_G4Ebm(5u!dpzX%@VDmd(G!vz0BhNx=GhD?@R6XA3?z;8bRbr5lZ=I;Q)3U5eQs zX(gCG#8YOOTH`0lEy>n$y*Cu^(p`5I3t1Vk)ODKz{YWE6i~fT&0(46u?Kc|H%34CT z|H$HrotGI}Lr}5z3YbNGBQ5-5-4&Q{s>8Nuc@wRb?c!=1ZUNHe>DzkNp`#_44F8`HpxhT=vzPkb`>*Fu#a**;Jp8Fvvrh61pMdt8{fSMJ%R(*@V}(rA z5u4xFwW`FInNxUl`5Id}i_g**t5;GU9j9VzDP{116`Y2$uc5k1i}6?$`2!QEe+*3?z3RSUM-hpW;;^k`xjbY<=JuRC`A`d^1o?h8Fue1^`e*}Ypc2Op^SNAZyJk004q1~g?1;jy|Ima$St%@jNnbQ(1=iHiR!; z9XlV~0q^}8;4aY@(rOI7d~mpKT-`)E6=^g56Ky@nMiVKtwAlt9+Pby!F4gL7>*6Db z-UiI7v3Y9}XU8rt?7|}l4iD2tUz1iTtlhrBR<~lSL4Ohwr_0KfF5EpIGt%)wrC44o zPKQzte3$^bBUMDxSd#EQCO>tn-r1_3*WxSCu`wVMs*V@^h^L*YZ=sLyJ}2!rs{4NmE5y2n%Oc^pfH>8QrxJCFuA zjEI=Pt09*1r#_~D_bAGdR$pH#I7SpZ556I5k+88 zrqgI+7kp@?nyh4yi)A4n^BK?hiYr36L7R;dMOi+&+FL)J_uZL*A~$Q7 zK;EfJ;km!lO$N;qYmQZE1rlpa$H`S;KZywZ9+z~rM+nn04(>S`)-B#?8On*7^4TNm z%473X#Vw7xP_p>^P9k8)NJm?ZLbA_~Ot5i@)GrEa{lkaXgP_;CG~(r)5g`$n_7zxl748cLEDMsBhuSUHHpP369+KF_SZ18e!k} zsu{$Ut)!2}?)^`VGOi{i*wci)kB{vm=AyBD^!g8`#TxB95YmYF1H3KTUhvk=ltTFy za2}m@Og%&=f2JrZQ(Dv>qp=K6Cr=d`@v1WKMSMj6q2Y!J3~?Fo%;{|kjU241b5)Id zKjHn2#~Wb^P5B;AG4wh0DOBLS0N(q^?wI$uq;Hsb%g38umxH^v=xdJunCazxuq(#R z6fYW~86MUOIF7M+Ec$A5W<)#I8P|Ut7g*6+{rb<+T;?zix$3$oPfDsgvHxKcwR25n zJb3HrZx_0__ANjqPYY7UzUKDN1a>~Gz(b02cYs1tW31AoW5g*xHOiEZCnV}r7Srt* zIfs;XUI%{voPpm>tJ0-Yr@F`X()6~{m_IuW@*%AhTJu%puWCb%?v>T1KYdLrTXwfS zwrZT4GyTQ%C`(;vnif1)5&uoCY_S7k3Qw$D7gx5Bbw^8?qvKS+vV~sKJd}fGQE-gM zx^U$0>-8&JQ~}4=+ac}8+hdAb;#XymYZSz8kx$zD5&oSj#cwI$-Y-wjZs3&(=Rk1% zHZ0j0R=trw;yUsA;bG`FUN>$TF71D@ZIPZzY+1D@tbTK=SY`k<+f$UXatEy7Sz44Q zpUKst;YK~u*QG0$xCZQwh#bX4oiQJF7)^IlwN+1RgyFR|NVHBCUI$#@Gf+6Pm#Wy`OpES+X}@suq&X?;oM~C3 z<$dCfUxcG4j%9j2JV%b4nL@@cBSZRK5oV0eov_#S72{32wn{5@&K(A~_gGS5@>X(e z3mAV4l^jUyU+@#lf>gTs#Rjt#NEqAO^?0pZ|`yc0} z{R_M_?k5CZ3e|X;)@GVQ-wWU+`|O2;sds;=kweh#y1nuE|Egcnx}FC+@~; zsl}nV=&6%Mid`u=TNnjl!#gj8qX+k-Im6KL*M%eI-fWsCjU3T}LM{=H9#eF3`qU)-~96>Yax zww_`@Q~&FEM$IL7a8Ndvs1PW+a#4AHUtf@ft%dGfH0{&$!)qbMKa z!}(Ey1?VC~!-?{ikAs+z4)#bN8*`+vciW+`dBv_mhC=jT=q~T7W3N_I-@-ejdztkz$ug`97vNPUTZwQdJu(h$Mg<;F$AX(lM#ycplEc z*QK*>s`o?J-5J!cjlCQ44QgVUwX(~$DI%*!8P9JaQVw+oR7?rv zJsZSdeKx5JpUxMqZ=Q5rwczn$+@Zi-&uX9_@!VoQalN1MfQ=JHJhs3SCf8 zpVvbL{;RamZasU4t8e>s$~re1151LfX#MhsnXX=!4br<)TG6u6O(UG$)hKXjwndf( z{j<47@O78p93|0wFJV=6MXO7Pckb(|=XR zD;b5ZGTsUTN4CLcBdlyu6Kuh;(-^SRRh-It8oX){t0B+%;0}0yoq^v=tHTBTf_D6C z7GVG1UAQzHICeZ!$$MCxT4~p;HdS{8{9jtxV(_v?rxuqDR^q4eT&5W8$m6+-MhfSv zGpE!&GWn!dwvL?CH;b$s<;T%d8F3(vWvN$52JlFe9tT)7N18s%>Q?z{Z z!AHfZwoBSy8metGO>?^1PSZGrrg5j?zu$m&wD?UW-20X3*@Ul5I0r(PCa?;%OdkbK zVcfNY!^o>!e#AAE9WeaTF!B0fDJ!hh3fblryGxt&#MDNeCe3NX<`}_?e6ttPY&Fl8 z^C>KPF1`yUbCQj6kYzF48Hx2q8b)OPdb zc;oW0@9=(WbElA_ot0(Ac_(on6DX74Y=4EU<%vZl?Xb{;Oz|hh{FDqxGpJ9>eF{OlCuN)R84mk63Ql)U{J)_5w5YlG>YH9Tp&kz3fTT8Np%P;Ox zx9wjhP>BrD%Ty1IADwa8((r|~o0wQypf$Uflj6uPWUQ&ep>K%t^bf|dG~Ki9*-c+l z&X=C|$zx=B%O=3e=%cCYh;@jgsnOC%9RSxI^QOIyXwwA_;ksd0G+N%`A!r_2p*?x~ zOljviPSc>%fJ_>%(n_uaWB|wXttB0kF3eHIj!;RTN=xfM?9u~cXAVhO?WUjk+l6*L zQbo6arlCiiie&WdpUIzRBJst4SW?;z9#PybP}e?A#%kVCnHKGgyk;k6K+#ro417dv z(vA7-a_6S~Vf)&>vHpcU6DjRXI#ONcnVEjChs)M*G~oE+7IjI0apQgkw>`Hnk7Fm! z^D)MGsSLa`1tr()k4OUK%iz7T&#V%^x{)vHr)3Y8J~->{y6ykk^YdkS;n7#0%2f2! z&SUpXyFKHbDbnbR#&Z|FXtrN^Iz_v{zGM4^8Hjhnyn zO-oDjn+K+RmOt^v&>6imiq;=>DsaJxaUHE#J5rZOfXD+lw#Yw|!{B~{v|M?8wYHcYH2o zHKs9~G&Ghfxrg`d2urKWT79*wb=7P$J~v8q7WP@@g)oPzINIseDUN4f(wb9Tq@}Gm zwKyGC&3`=0+gW?n51kN~wa69q3)^KP9U_A3AZvb z|_cq`MqE`hz$q zF|diYKX6R@^31ElrIn9)vAlKoL|ootzH4P_P>lm`Q3uP9&ZyVx1@=;mwscC3W%}{A z^cx#MYnHdXJv!z@Iqeu;OMzk=rNMu{z#pn2j437D`_&njR7|2wYb(Je+P-dIc<1@` zS=JF+(i%N=P{uo3WcbAXq54eY+j^mD_h#K*$Eot`-vY)kqMYokaEvI^Y?d?&r)}T) z>hFK&MQuY!9{$jev6?=gG}iJv-{ethd15+gmTuWkhw;}93oP4J;10!MrnZB<5pRU6 zw&?MyY^o0$sPdfeX(rThX#N=}99KH*>@g*rpZjKF0__Lv6<~LXjk`8y3h$bBTj<3N z?N$O4_sjIWVG2(!m>W)Lw={2iyfWo~E%BoLd%KDCGg%a?y|8EbU_CyS!oqb+!l9#w zEuG(lqlO+ICu06MUO=Nqv9eyqvwY%PG6%$nM|mqAW&=Q*L6ThZ8pyxbtdHOLP)o~O z-BdSh&YvTM_K@})jrxviy2rf_Id4Ckf-WR{R)vj93uL#39+zB}vT`~xqCBB-4CCSN z4XtRMJaIG=8aL-5b<3UNfP;bBU}ZL47;h#XBaB!Ir85K#4-nq4_@!`o|IQdMjyQ5% zd+3E07cfxuaC#ifcsuCI*goh zTUCOY+*k!_OsE=Q8h0A(@~y!V_cM~^gAbr1gQ6qPcpZ`RdIos2 zzkK7JOdY#Ud{)~UwiWp3*1hAd%(5-qwlmDzv@JS3YU8O>!+_**njVgq;ern>(zq@g zdlP;2uUoh9IXQwU@S@qXYd1<~F<>gQ>#zZ|;Kwp4uQrfIntVn7W2MIW#KAN2XAc_` z*p9IRXIjl-(DAnEZM9LG_BQLX9?A(lP1$9#tGC<=4|v`VQ=M;OZNO zH?2xB4A|5xwB<_ zGo%m(jk+w1nUS|wo>Sbt^+43^_9l6^Gk^Bg#zXPXwwh&)M0z;U*MEpb@C>>uc8`MF zmD{7oQw*KPQQ6j@EOk(40~gvp>uPis#v>nDvnGVrJ_~RbW$#V$Cspj2wCH>E)!<8* z2Q?9XJnv_jHl8*6;I(&Uyt75R>gBO}XM7+N+oLZU&s*_^nwGbV8ZO6-wEc<3Axq_b zm5X*o9*nTMvUO4~o4v7mp`{nTmYP%IP@z#b>Hyc{V~Lj)<7$Ah^zvwVoNLMChzVSB*R0u~gPWTly9XxFDcP z@uUAuS{QqfPr2(0DXT_f9A_~QGr-c^Vhja`_w5Wzp8xxrE>XSu4%99cb3~a5T3R&6 zOiZb9)hWg9Z0R>>Vo}=(16|b>txjsp!?eG91iwxNt`%J*i_bj8lB3MYVbkS1}_!B5Le@Q*F!#9Ie#I1Zra4qM|;hu@bgvc!l4toGpawE^C|-q z*r+!Syy*7wy*$&)!E#193Ntz5QVQvzr7a0RrX(LoI0fGRQrdZr8>PW(DJZF78vOSf zcsinIT8X>AH0_d$DlaUh>20C8G`qJP2yZ>JCd zJheI;*nLEktm4|64PY-dn=zXp-9o{~HcTeQDHb_+ytq|`#V>A*I%SL!F&d+f<;HmA zM;-7HGM&m1>qEvu*2GSSDYuUbz58XCfuebQkOqz2xox=(R%i?i1I1Xuxy|NtI5H2;!q;v;$l^@Nip{IT(v2%Jg*Q%|9R_G8v9`j!({B&m z^dfx2azT1`M{09`HmaOw8+EryHLQSV0>EG6=SA{wQv0* zWfjJhv#PN(8i#C+9V+Tpw61$l3tDoZWXNWs1&#$`pbDHqGuGl<;#BDmhs$2MGA9d1 zOX&)mmb@Ge?b()9Z}7NlLs#wcfj(1bdZRweXVOKSG?S8LNFO=+kGLlEy!02@yI+f+ z|6bsczaG8f08O1rpe}8ML8GUJE5^R2eh1J3Um;5m%1rA4sS;C+=%3QHAell%BTb3$>XQO z{Kr>@TRxez0j{J2XBQ|7RU+xc8I%AS&S}x?h=JW)dCkBuV$y)He({c&uH*W5c7y}B z9}1Ta=q1~XhG8mn`OMas;JvFSKEyP~OBP{vPjOg_H3mCIlA={E9}AoF@KxcdZ@-0* z!zChaS9n5!+&+fzYY5mId~iy`bD0z)W4~G$NYjX?Y0wSfLfL1-!hiLc+?35;I{d(t zn*-YcHWU8omB;JUJv{r~I$moSmtNdkUopNuJoV;tr8r*~Ubbmv7&myd84(HDbc4RS zXnSfIN-d69prW)p`gOBN#gXMk{QWcT2!H+T4^U@t?8Nc#-RFKN=j-w)BW~1tQQQg2 zRFF3Ywr%X*rMu#VP8Hy36itD#S_v+7nt;he_0VEQOWQyycNA{ru~$!~3GIh2T8-?X z$4t)zzim=B6A5Kh{Hn}L(;Qd9)cOLq@pCKLKBtV0`KdPoF>D=H#I$3{F zt>UJouLQ#RhKu_Q2ty~zhfssD0fbYWo_;@_R3uuiRmsdx2gCM07Uy3Bks1|ZD_?ue zzf=Vde)Lu12|7oAC7eej_~b-?KRjKa@`NTM9lH`oxWD5`5rv<8{p0bR%}z zmN2rU0ohfEEaQ=$rWHvY=zN?3b>y7yFDdj-_49rbAJu9WEZey!ytuyd%>9@V;YvLY z*dE}Wt<>Q{7a#8WFZbMHYTe<&q{;zC*2S+T?k&{~(+0M}DWZkaF5T(kowC2tW*&UW z*x?yDTpt)cELY<~_c>}YFuXx4q z9)4%e^zfMv6F29Fr#vE{iRs0U6>|fd5{>H&rlqwMvj9 zbaAK7jjCCSw3^_$?AviTyz%6!EYHA^`d&t5JHbq#6_IIoj0#Wv<(pye_Cuxg{3lk1 z>*Ta_>!G2!dRB|G&m<5_jaP=>sRT5UB4t8J(O)k z#aQx`0`^w?igNrQ?l$RTjPt&Ok9%kT+Y=>LjB$ zS4XI&ifr4v!`{h@xCi^D@WRbY{T^zrA^Lly}D_fbDv=%|Aq{_QBVzTi>K_QisVBkHh|zuY z=-N4aQG3#-z@G00b%ygE7D@2 zrz>!gXdmA8oIX<<_V$TvYlB4VfV_ZX`nZygNzFY5G`kfs+bBTU-Y)gSO>SDWcKSxofw&4{RMhv^Ok$@rQ|`c9JS{+C5*Y zD(NZ)4_I3bh21dS5aDCoR`Ud>>7<=4NTe(SItB6qj$^Fjcs|d-howB5C4ELKHaJ?% z0NTK|gNMVT%hzP;W)2w`W(;O7)_!QTJKx{DS=%~0ZTG&B01izfXN$hV22oYZ+OPE& z)R93}dx=lEDh%qC-!pn<&4#f5m?p3i;At~3I-(xOsk0TQ66izLbfcdV6}S2p<&y?{ zbB=Bn)dP=Y`+!56@iv>%Ro>asNKXT$rg5j?HxvHb!0&1Sr1kp(a91;1eZAwele>5A z&gp1Ba9IxSIT~Jjd_|`0@}a%MX`^#zDHCXAmpX_1o^Im9D!1gt7h2zN*E)J17Pm<9XVWFh#l=3)*2pk}sp#pRLaK5&z zm>&-8)L@kWD^a{lMMLBImw)6}2_E;fh&xG}J9FU9QBKa?ut_&)(CV3#5W<^J{<+>A zI2n1=ubnXNx`*m^Je1126_`z?MoD3Ah$#&(9W-6WvCD#68zkBR7wQZY1LyCh|cyi_XLLQw(UUXYIaYfC`LsjHUR=KPWVoKC# z(k&Eu;j^;E@2mhTTg;}tt$Ew?wh3;O=KR*SOXM1X^BvP?mUN>G=gN~>Jd@u|^XqZD zW3wN;ir>;Sc;WBg2`7%7%Jh9e-_ELj9XJzcLxe{J=RB0J7$4YmBrLJ**qOjZyDYJc z11AR?F`mV1G=>sou^A#UKKa&>q2J(3vYM@&za<@$OTn%Cw&vBX#uLGKU z2DqNwlr3TAm?`agoUc?Q0=ohppZ~ONAYqZ>@bGo_=|XdP+Xt*_RljKb;u1Zjbo7wf zKW(5Iye&otOYOwlj@pCw&vTx+bjMzCNpHukIIzA^olhL;DR51N-#1_qKR< z%fkTCZ4GR^ajEtOXc+W_74N6DHn3Zy?p8G=bk-c=JOc04xMP*J_q5M|Ce3+NrxBeR z6zPELblUlr*w%v{2)|(P+bZe#w6s|df3tPp(MmJ5f?mJqx$yM2_#{vS9FxsU=V^QM z+vA{M4OxwuQ^}xVm_q@YBhb|KYK1O~Xhf9E+yT7g`R}`=;fwT+*6>MjCB4*A5-6%g zeEt!!_))Q{P@~~>m4n49S-N4!!QETz{jjU_0p?}^J0woI>(|3e-~W%PI!5IWC_Jjb z86aTLV1{B0j=0k)=7+X6pbuGa6`b!_r=)ey!d4Y^#Ls#LJ}QapQmGG37!%kGqZz=( zJ*4f>zyEgjVrv-Dr+2th?`AaRpOs}6#7BgeAYO#DP4d&?&x^R4 z=6M{uEY(N@=~gx26yRSd_>v0Bo)Nv}lh<_VesPmlU-e54A2=3RRZ2nI$A@Wmj!IeW zrf>Mj#PGrobmJj$=3dd3?~`sAu8UYZrE#1PM`zyN6U1J)>4ywrTrI9BSH@@&W?abW z*}X?5gvb8pd?iL;wQR+!n`~8%agBf?qov0;#6X`t?_n}Zrwk;;&)(BArVcXV8$LWC zyz%qZ(q&3X0UlC#Q-K;1sHF!4-5#9>c7B=%p2w?LNtOoA3NQGc*U~!15>C@O!yhRp z=f8vxU;n|9ZU%AuE5#)GI=p6}-<#=c%Z3K0h?2i{4D!$Opr-DiqjG z7YPEV;|Ys$PDcqHRrj;MueJYAdjBysIqyJiRLF}8YtzEkEO=TkCiQ807&GgEz^)i6 ztbgbE@|@ZTBXhLD0kedbu$|iE>hoM5nXLNu6D|sySI!UiY+|Ck&%lwPhk6CTWKE|< ztjuUn$we}g@@ErOK5>;rUIMc`mbv%{RwsQ8IfXZW_N_G9ZhF!!pVY_A`d0NkC-SpEH$P6G!1?ST$nTP@2bUoamLkER=eVVw`iH3vpb&Y;6<(K z*M~EKbBNq}H3KEN^kSV_mE_8p~;{^$=(?Pbh%MlR{w!*-KM)$H1QAgo-l#l|I>gyHfQI`*k~f!TuW zjAbRWA^0XNP0?7QoY|F@L**@2_hMiA%BFA}Z?DK_Wti?lf5C6f@|Kqg?l+`&U_bi` zT%$Pac-Vv6oMY#m36plVAd%Acsg;#PX3w-s)^=7zcte?7V289jc%cRQ>%E^)ccWo<4lds&~Y23hNy zV>={H-CtcHKPUmdYHV9H>h1*1%K>8r5|+}%^@IY)?v!Rj5)W9_XuPaAgC$=ve~f}e zrJNi{B#&6BKzLP%lJQtgvp8AO&`_wc?iNJt9Oh>rO7i9E2JonO)PNJ> ztmoPQg0bnAGe&2pi@465B4+G%YapH)(&5_+TVkzx_3>1#(I>R-OuiV3Q%Xh}3!0;n zLXWO2alrJ_V9@VzoGs~))78CKpW-86CT)jZo0f-lTDof%bnV_VTz%W8tLoSpgD-pW zW2hO}=``;AbRuWJq=CHi8)1pp0dg4>9eu{@h@ATwD9&g8pp?|D+P(MOl~!_|)P(S_ zv;>qwUwxG7k%{N$jp48A17ImI=zUB3%i}~5%%Kv`X(pJdvlR!;VwjbjlC95x&%Ljh zP6?+2OMt^K?Miq?kd^$~HOI9m^(-zRJo|zL+~6fBQNf z8LWekisS9j+iHrvt)@KMSjw-aRog0sR(W;p6ru9~bW)>Iy*EBG@uKR>PHB2uX`HK< zf3!5q*8B3Fa)!oaykW({$Gb1{}v&?dW{R8e@GBt_gKeXFUV|RuWvJ5&XK^5 zfBM>!S?-?N6a4OJw`IKcM)>#_dQ&)l@eoU2t4Pf z*lA>16C~D94n9yjX)*q*eSB``ed|JK{k{OrJsZDYRrg>t-VSJ5lwN<4!Kx+P243zn5mJOhiuQ^1;Kr790U=6mi z^|e{KgU5M$V#Qh;M;TD#Kuw;QFX~J6GCoG#)Q|epKM}}dapNHq`O^mX*UHx5>^CYh zRY5cOvR!#wO)>2%jXMp$EkWXSKz_in`ZPMg_Jh<=&wrh2>eRW@$(uhuscA;Uxqa!M zmgHzPGI+zgJILMoq91>lRkygnc~{x>=0C0bHap) zo1GN=m|n=5!GtjDfw6j-xLZ<6uzlZ8oj|GB3Uw=6u)WNp9O6L9r%W=#M@C`D*nZ*4 z$#uKl%$CFu1-G$AV~X1%7&*tDr#)EeI^bN-K+%qQQ}nZ`4qkeiF?LGIYBxP^`5R%& z?roW#d-Ubui|gkqnM_+Fd{Ar0{#|w1o>}-y>$B5-8oN$+!HBC|(cY~smaAK*HEx~? z&%N_frkA>p7%(*Qm{*Fjl`CO0YIqRD_*XzL&kWPPVmBJ&_QX>mYTH+vhM0$Z@ws&f z=9GlE$`$j)IW0WXDY00A#@OT}(Aqz$1;l#fxJV(@$fz5c7?0^Lb&(w3Md74gg029L zgZC3;^ghBd;@-Ew1FFBWCB9eWY}j?0^41)@z}Cn%Lx!C(-bod$nD3-I2gcPHO{SjvUcIeS#C(0o3lkxjy~;Rh4wrbWa*Ln_ce>QOPV|QQJ>{5Xwxm)bempC$r;bsRwf6NRlTVb@f-CrS_QDJ2Nm4L+&TXisMC%f*no=s1~bnF)Ru|7hT zLTAl^?!WfVl+_fv(U*}?yg^Stt=hU){W1M@5sJQ>l`eO-tliKaieRf-`VDW&IoP>> zcUH$q!>-aEG{6H=p3|gN^E8t8{NSd57t$1Etx&=b{(dUAf5-Z;ZOy`z)iip-jXX_L z10L~X8^RJ)8_e1R*TxJGhsT6;j6~SJc2U@??POYlJzG|V1G_dWnGEPGPHkbS3_#Kz z|E%Fehe{T%ghiQ<_BL4k_)t4&1ym@b9JSaH{UVzOuk}vq*Vup&2q+L0Y^$ zzgzgxN#nwRUd@{vr*&GiZD)9W^Y)Zg(od{hYhN06hsGQnHOd%|j}pdrv^0rT>#e0L zUdS<^8edxD;3}h+W&Y-@{ALc$+hfbuN{h?IDMXx~R-HR!6!;Sn{|X*_OCuc`naE=` z>-yp6w3?N+)0Fo1*IHbj+l26T>X`ntq!YQel033hg6{CXd)ue3$zNP8Q`=5(PUrn( zc{sTDXqH(kWuaTorY)9by0@&}6IQ&oDdTx$)q<^I=cfI|>XNQ`;rN&xvbu#G6ugK% z4we=(7BI#V<8$;?mPz?u2kK#&q5|l#GY5yk zW8(YD;yTm)jn39~)Afrk^PfX|rzmqpcytX8~Lm45V?VV@f{8IIjb@a0ZID z%r*RtRAf-!0pUh{P^+1+WaEm!YEugAiow==&4f+6w${B%mFj=~otM=Pxe-ycJK4Rp z4a2b=#4!PfEXK)C`*>y%H{NT2BXpHMvV{X=v(CcX(x+bIyi;78E|R6NUYn1KneuBq zpC=-YqoqvzOPCTYkK)#-Oh?W6>RdQsKIe>1kn3v}rVjN^96yava{^1`kO45|5LX2KX-Tn0$cCK|2rH!R0;PB5stqMd2nT zd`Eg121;0YhSj1{=p2Y~*L|>Vzw}K@Uyg;lHA=6{!v1PS&>Sp52BT9X*_E&I9$p67 zZy;sk;$Pa9Vs0>I@z$844!>@8b)*xynxRWU8t>Glyj#&{;D|6lpGQw&?ZUZXx8B=o z7F^nYSQvfneN}bb3LUS2op^BLBMzPNamG@fr@3r3uf$E>`Dq-wA?Umz-vP}#16!o( zf28Vm9X=YqU3teUO>YhT=Wo27iE+_*Z18+RwY;>PV<%6AA1zsBD_Pt%!zn7?*JAKg zJGBX|4VpEli>``>F17^@Hra$%6mZYjkzxGh`2=&`#;x|qB)K&wEeGQ`DCco67dguD zftxznUtO8hA%qrIwpw~;%iAKCp-QK`Mrov{fnT1+ord33P*cBG3a?F-PP=2&$;+;| zBxSXWK6&DlZ3&nv;ZZw25_P`d`M-O|u3=XF$ zE6k&FcxX1wzq!Ddk!d3>!Q1PW*vb?#=4`RLRXAMkY*`v@#YPStCBq|+%Qmjc)Q%oB zBJ{Fv_D~-4JsCc)sED>IxJ3#>lFsyoKLE#x065<;PMgWei82Yvv6@97O_;eQK3H6} zit?mKew2%8^-fWH`L%g%EUy;l9MsB#=I8NysW*v5~nkTLG$}afPD+8xdOAxMP9xWV?CnknY0=bQfPr&>MB51ARKx1Q~fy z=M;@D>bJ7s$+5afD_XH3kR}5a-!}M|#XH3bsD<)@t!BcE`@Tv=t18oXwn)TgQfDfq zJslb7)&z-}T#BIq&+oJ`I}YtX1F|{JZ3_QMk?fg=KbyZO;B2)OXhX|)?Flce&F|XZ zrM=z9_UqeLb*+-$sxKrzp&kD!a59ZQzG6){pxqCtpF1}3@j*3N22`aup8+t&V;aM0 z{EK}AI>>_@1JZBK&R4e3^O)XEV~*-h6M1aTT9gyxXhj!#(PzgR55A?54vmZm!PG1I zha2iwwx|eqWW4Rr7F!!UPiwJM*8zC}$MmHo9h2I0{Nq$wnoesM!2D&SRG^JC=W_nz zD+3Nk3O9XpV(58E(~3z|>wVutVd1lDGoB*HQ^vA+oAmYMxUq^kRWh6sV-Lj5Fe1je zA!_zTI@Y3pV;a`J@7jyKQ|J6ryz|0(lQZY25SQVX`dtNjDAUw}h!?zCU7`ZYzWU~o zfj#moaJS+x)A6B^ZMHO~q_X^wmIB3k8Z?mp@RW|J9ml#eP_)~NmGE0>V&7huhTA9K zn6lbUZ{EE%yteYqOwaVulfx)EDb0kvhxUhOEALN9fA;7F{q>7W=9MqSJQ}Bd8s~Vz-3}AKb|SAl%&&oFW`G=p-1GH@WHDT`hD$Re=>yT?h8%gvjQd>)HJ9YW+3HnIbP1TGu#2Btn z9qc^LkBvVp;&iY~Wl}llMnUY1PrL&z%o*6LV*Ybl6d%ROfvx4Ytlq2Cm@n0p=`PBuO)ydH zx}8Q#q?yn)SlMFo&K=Nnyl^@lAJTCnrK@8`nwOHR=_wDoA;`|i*8z>sz*oc`kS4Lx z)U8W%Z$|uN-jmj+rsX_*UGrD8*xj(YJD|#ZOks0cHqPJS<3}=w${nugXz6xb(cWki zM~>^GeA>(-h0A;Q3s>k%#kF}IFRTdc9jrJH#PV#8=tt&+`ZEWsuuAer#{|CSjhZi- zvji>zj+s3gsHPRu)X2zZvS8(P+Vv&*N%Q%rJ3(i|E`KKM^{bXm9xv0&f%jTvdwiWO zI?AUNyp4jL7HM?a4|p0l<)^gkpu2L~v2|euUnm1)>Ol2w$ag^sKCe(zpjwF^sLGlm%d_yimb{8ia0V*MFsKvoPl?5Sk~wfJ=QiiDlj^4P$*&R zsyD;2L;Flm@f@MFM&0qqleshmWud`XpiCSq;c~?*HYRIO#$)UH2g`CG_%icbKmW^& z*WSqV`+lRUf|@?0P&LxjnSv(WWgN$!oToW2#mbr)J8=h>BMxm)*p){+aQn{y=k~uV zWcK_w3zvlT`W97NV4F6B`029MSr)!=b?uNr8LwH9BPUOWzpvlGgFBT!QrMrC^XRfQ z;gFmycc$Wi>5fjEG{%V}v`sxtmX3l|uJ~8$HcUp{GAHlrvnQuTS$=k1#G&$X=)o z0mt;IB^{Gmb^P~LsSkX1a`!6lT;rU>1y8LGd$u0TRN!Fs>eIAhGTnS3`L!pqhapJc zrtlMm9Yh@7(YsVf4{H?`8>l)|Sv=Z@Oe)GcHiopB058^>-4(`+jjqB7rU&>Unqv4b*CuO|$Mvfdm z8lGG*x2k5h7(Ai$e44-)jrkrHj#UiN)~k+|_1F3h^OnyKN2Nc7Yeur7g_+TKFuphw zgr!g28qdF*g%pkuy<}{k&;(3A@+_{*(E&r3=7{#!$s8=wJ4;7=4~scwoL{21bKyi8 zPw}eL1D{7hGpDHHgq>5G^tuyO;zBR=wfcZnCpHpUw}WYQ2J;o8U*v>wnjE0hc$M#c z1^jptqXc<8t(xC;I=_mehWzLhRv}VeghTar7WXGLE8QD=r@N@>d!A>flLnxafpzHS4qQStP*ImP7#S11 z(>PTapsiLxi*F49XA3@g3MY(53dI|8)vP*)amfomsQZAj)Soj9oiHa18a=I=pXDuX z)4OppDDpErp3=?(I~`9$o~N27antG8^APWV3v&h-CH_NN)Cs+S|KGGZMr(lv@mC8L z>%))SfT}@t3-nq_`zdx5+;j9;Rnc5A_&v|22?w=0_vFfTg>zGLYTVh0PEO?8d{RG% z-(>Sg=pw|KXw*zPQny|?OioXB(>J`iWk*=EcVBeSBFt7UT!HXp8ctC zo77rqu}tdojoDL)2nMYF{Fg##@GQW`l$cIry}hPwv`SZHyuCX1_IWmJHEYj7eIp&x zJ*ked*A6~9^s45)9M5VI-?2)ei#CwBseT8&RHR+>%m1`296yrXYee7E3Oq+Ymx#mh zqbI{VFKno-TDfCmSau=o_7a_*Xe*_`%1+#NT<3t{mxilm=MMT-ZPc-2!~W9V$WK1N z@>5v-CCaPPxt#sMal}&Sr^qb|jm8tVE4Q~QuG*)|w}ic1Pxp8S zZrvI9jKoH!qW90dv*}x|KR*8#;rPkyDcZohU~$%zmfk)ZXY0_>{BdDvoU6dV^_oI< zXYu8w^YsLT-<*r9ShNRsx=MQ&@0W{TRrVTm@7gVl9&}~l95JU;njLUJ6j8t#i-sBF zF*iK@6*6FRu#6Y=M8_pMDL7KmQ6sK&i$~{)G@K}Rz$~sgXlDh}fjs)n0jueY`N$hX z=_oaFo?76Qc@y2S9DaWcWj%`ctCdy3;%xrBz?>@UT7;p2c=~eed}KY{d7dwgr#$$8 z^U&eJ5|T8JpB?Z2liq(!Pj+5Gmg#4wTF&=V#_=Nu!}_ATida`*Kr%0TILKHwxSMz`(lU!^`NI7woCO>7ya?A3Hve8k_o~Tk(|3l&#nzSH}22$T=&7NLNoV}PnDlMek#23B5mXLs_G&~z*ZF!12`lK08jmXlYcV%)#+`TY7o;0{8 z_bU}vpJaN2PyU*!>Z za2iZm4k_PxZNe>8x=l*jDD0K~k@E4TTL~-cUxq5v+C%j2KQs*1vQP?JR=yrioj7KV z#VlXOT0(J}m%sAkl9doYOK3=LLuS3iDczdcv}~R|K8Stz?Fekdo)Ih>KyB%Y#-dA+ zG$CeTJ|)Qr2rWrN35bKN0EZv`LJ z7!;vB?zCghqNhQ(B}lwZ9$p7D;S8L?IprE*&;NH?43mDoWPCQDXW~O#`?d2#3SXXi zwK`T*Hy`G%*$`Ik*7LeK@GuIF7QmX<^F|eieOd;4di93r%-G;rb8O7PK~6tLKihiS z7I74dgD^i{aKnhfVb;+6IZ5`QU%h9abm>o%)69t~%OJI=hbW0z`7WD1yQm5wOW;WT zzBy;QH5g;n4$WB7@^;(Swp^=48!6c*Z%93oW*c$!z@rVmhMS7CiRPR=``vsgt86!t7&eZRV;O7lW3GidIvN(1I742`aMcw@Z=oz_esl zvv1$qJ+=?hN&DsEeB%M;aFd2#B}P%G;l__-`EjFMHB>EZ+Xwym-42Q5-NFhZ|Y z&=6Bza58W=B%AeZT>PT$M5?_i^)!uzGcDcbWT_$0U`RB_N=9aPAPmNw@DFH{wD7vmlUeu@1W;rJtJyfh7mjCAK!v8TBX$I4>) zz*Rs7aLjou>6p}UZ08x^eEvk?Hb73b;tD?`siR44o;B?o&~9JheuP*sIcfx&(y8{j+~z=u#y&? zs6wX%{OiVLc}2PCZd7jM>4!lN-a9s2HmFy|YgUA36@U2;4Od^8*g0^Xo>Sl^ zK?w`zu8m(X*2OMYx9r+;{S|O_%;AYy%qHxhxa^~x1DMgoI3EtH!jcy^6!ORvA<=C- zR2D?7+^E>|MB^;-E5ZwK)$I?Zh6z2_nBVNdZO!>$#*r#Nm3R7VSZXpr9&p@R+l;w> z^^bp${zN9*&D^Cz7OOt)-SF6J={!2vvx#@gv1=w!)=w&5MwWh@E7~5$3t$I$ey^zr<1FKA1R=louqEl>66^3k0jLtj;8JRgzI9IwO0c|oCk35Vzu5iUwE%fo> ztLkrAsNFH%J(=Bv@v58?9)H6lp;zC5)x7tMfwP6(aN@u|2ztDVT^4?OgH&|D1v&%& zSGAn~@5R^6*sFZvG=j&q1H^{P4jP}GIx$?_weHcOMk?COV?X&<>K#8i=3NSIYg7#f z>z4{8>^^cdJg2?r-NA{2zK)tHT0l}cYZFUoR3RDvNOW0JEQ5|S1`n+Jy6_S`dbv#X zwVV=mC(0cs&1Fkn#*<{^)nsd>*JQn1s`kw}(~{oID#YRA3f@M&ErLCGdri}-JU^E% zZyccvpGq?gy0Xu)an}w>S?#8u{SNobYCykx%O~m`kD5gLsJEY67j|u` zyMc&JVw>^OMoc{0B4FQ+!(r{B?a9g!*FWYev7$g3S2`jNDGhJzjKN{ZxPG2&c+J9X zT6H@b?GQx@*dk!IFS>NBWSq4$r8ijFvdSoX-vPa}_dAz0gRzQDRB#(=WTT|H&866B z&+E9#r$EvGmvJ1q9mnhp6cfff#hjBS+;a7tFtGA8O`3i#^ef9=*CTh^v#j?{|3K4U z0-L|$t+0OQrmP%ASboB(a=_3Da|`VJ@bc2vWb3t;v|^KxsQzDGHs8`pVG&o4m|#v4 zev-}UWO~f`G?TMGWQpq_9^4qm;V?N%iq4ohHON_rPL8ksxI7M7lq;Md9tG6A(O!Zw z=LwpfG2%*=J4(?RbEnL8*PO4^8EPg?`Dxh;)+`E#k7f5jIYoHibxT_$6u@Z%xMo%G zH276Ije9=lfg1&vJ10!1bRi-AF_im+!rrt3=j#i`UApxMqi5cm78Q8^>hQiDaV)S) z#o?l`f5(QfV{JYND`APk%0fXDEFwb0pHtv@-xAg@d{#50Q(7(7&&I(j8j)ccyf?Jm z!E(9S;Ao^#zJ|N%D|lmO;qTPo;GV5=3|W9^4s_Q3t&{Kgd{x;cV%)C4btI&Nn$utp z44f|K>7MsYSlKKexC-RUKcz5rQ{jz}BN0F%DQ(>!l&-4TAS-6dLr zZ)<0a6uN2mm(NV$iLmnnfBw3@0$Z8BhW^f0I!?I#aW0&njw$dx&1GHA(^AY&CEf^}@0jtqq#I@U7s8_rmVle^ zq3iA|wU0=%KxO zJmsyprJ#)$G07b-wG#bzmXhdP)mE<*7e5AQ(o@_yjMvtRUF`GYWpQD_6Mv?TfNC`d zuD>L+iN=7_0T4P3)pX(>M3%Jkzz!+xybj#lGw|=FF1xezpS7}e zirH2Lc&rd7EQJqG91{lg>Y1{dM)PH5K8{s^^@^`8cvbsgNr8^Lgzegs{^j+XnIozV z#=n|lBcAbNTA8UJNCQ9?oMGG$ChOx`w_KS!JS%ta)y5*b)N#}tEC%kHQ$?9IXNwvY z^V`JXiuLg_y?o;;^4t2rR`rLgN}p1oje6Td#A$FEcN*L%xO}6uMm(u$9cWTANRq#v zN?$&-_pz&Q9G)o;#;q|-ZsN%W;rW1Ycr?%jTU(F zAswvv-6_mXBiJ$fzMDNOLFQ~`{??jz!oj15QqeGZq;4kId<}OACkx|tIu=7Z&V|@Y zM@Hc^yhxMADwL;1$Et9u?8ZILR$QSnr%H;LN5<8v!g;YWpqXRFP+OVuRVB|;lQE|( zb;R5uYbGx1+9@pAxI$kn-kqI8MV?o{H)sT${_6E=BnJ<+k;fBX8dt^Qe2?rf|` z3ijUs%V{pm(+V1)YOwarUs!`wo6+T#F_&K=#&Hr;#tZJmPK40q>Q~`d;am~aP$EXY zQ|R8SuU6^qt`#I|@76Ve@|p$RdR`JH-SUa5x&dNbui%D=4MBrbI_0Hlp0Bl7YU_Z! z8R#6=@iKvq$hn+>#me;kG$R{3{KH~j+>I$TfIrt)ic{cXd|}4sj2KZn(<-&Z{|ul$bBP3>;)D)>om2j+UMesilm zhL@_j=GGBm(3O2tR#WK)t5{qu@2D{m;vPncZummuTXWaiHOKJyEtWx+ZTCDBVI|7J z(&NEd%1}#(Ntb!!M_J8!^xyoNZgp0l1vq))-LP!lMk~`*nXM%&TT~+2Us0zb&3P83 z^&tl`td*^gPE;%d&I32_s$)eOciNpX;^cFF8i(H~h+Wq#;Q`2J@GpE^$yT&DG3>kE zZ1{2gyHnx8S@$%%nDCD;bDz8p<|Z6W6fjdQFk|3Wm=3S6S{RO=II2Foc$`q;JVKa4 z*UnwTgrQ^P;K-!R3N^Y(*TMEF3I%uFxMHv3dtA${8P>3ht~m6W7Y^9A2nwCZZF<0ROB4V5csU zXFR5I(12rTzsArhq%!au#o?v-t1_v+hBR0Cy#lZ7GpYc~wI!^0_0f`UHZlH&4`(Wm z>V?K#8XWp|kV>&puQUk!F(2f*jo<N9PiTPI9k9>9URlbrd7HE`mF7CqVfK!V z8I2gUf;=hX8Lp+nb767IM+?!#4wAzr^v}vtYC9p~}VHmH~H3zVXO}n<*E_12I8DpjdzFOQ&*t~mdn787M zs!Aq%YZc5=D~WC=q&=Ia@xiY*R@GL+H}BdKHtx#X^>{g0J-YU=*G6zkIHSzd%t4Vd zo7c&tZSGg#=upYfky8d85j5IXq>8y7wCQqvibuzZMp8IOrSqh?Xm^Zk!;C^c<|*ow z@KVPorMqZ;Nl1CkqPae6wZKsUJXe;v{92FNlhB()n9@dz#Cq_)KZY0WKe7 zG48bCVxY))9g%jQf%VextyIf4J#zO~Z}RG5gm1mR$W~=M4rXij^98);wByi`s`|NN z(Ei*`+-_)>v_}Fo?Gw2Y{%Pr|m`JLi=17qt{>2nO7J?o<1O8pDY%zGI@MFqDg~HiZ zwsINjLj7Vso8;;cdBP9^R<>@@*OV(TUh&Baetyz6nywN!zlzg*PFFES!fOQ1cdY&_ zJ2Br>rtCSkJ3c$5TZ7%|8tHBokFCskj4y?WHw+8S{1S4i8~Ry&71^&#lJ-GI6ea z70sM2W%G0hcl@w#dsqZq$UxliNM4qbu)PNqus6;_YIh!Y>}hEnx-CKCbwK_MDz=%W z{R&6^t$rKlxP%X1&!uTD{CM6Ixt&UVGvEL6+Wx3PIZFG5rzaQ_1+?mE ziE=nR(V>fR=85rJRngIkt5&{pC7#VIC0k&t7j-Mp#PiCcF0Hile^4zat}a!%?T=sFXFwQ!&CQvj?RuZ;#IZwR<*Sd?bp#UpsRH%% zlL_|8oAVv>+jc0trclDZZEM3WZNx#NviGlOK;}%v1{fVFcc>C9<1U>nRnOvkmCGaY$)~!HQIS8zbZ?-rJ$rTtx9_w44|U} z&hHucf2E#PqZ0n~wS{4izE8E|@R7h4<|#}b)IZD~%G~k%!2fxZCp$7YuGmWo-VSM> zX&ZH6(swIl?UR+J^gT0EQcO?_WRpr)QpMRMw~{#S^1k8TaU(PCdhOD zI{fiKo2(XbbI2q|JqS@oacSgeSP3ysF`^Y|VP)%+5=fn^v`waMHKo0cdYew;PN$ek zq;aR=Hw5*Iy_E1 zS~ZAO@}L1^98%hOzw%h^kKa99;Zy049W`h~m^$j}l+`X8TiADae|YYlmoq)AX0dwK zOxSy9f8bkM72wf}r3KHdvY`FC!*iN3<%J^M&_3zp-J(NB4~IqTTuqc1s!cHZ>!Ztd zDhr2*P0={BI2iB=Mr+>2DwgvQiZX@MWND)L!x>T$o%NcDIV#43UxZ>_XigT7`gvY- z7LJrTQw5(g-C07m+#G`yu`G~Qg#Vwt_m0=AIP*M9APEVO2qFt10TKv-5IJWI2DiaB z&Yte}Y`eo}W_l;5PwzjwJD=X&-tC>;ot~LaZg;m+ce~vNM}i3w7?~^qBBOwa0t83` z2_eG%zE$UU-sjYP@6{Fe6=0+1-sgQ{g;VFLQ>RXypmR;HWVPkG{D#^-TDorIhIH?$ z2V(QWv*P%akbVrX>E0i4-&i*s&55fY(gQUu+U~D?oIlpccfB#vu*1Rn-=Fe(-lE}r z#-tyy^lr|a`7DpB$8P&U6bH)BJ@Yf2{N8?ROImgBjp@1N_jL3URtgCRx1#re{gAxf_t<|{I%w``u1}_` zm))FRcw$HtcYL-*n7Z#F>CodYtdnpEK0-*_fN_+Q*vi)X49~vkjm{%d@~{jK)N;{= z-))GwkZK3Jl@?%@^+(Z%uN433o%g4|((4B62d94f#1XH~$85Hlzg2Is;JxV;Fr>a# z=@8vNO|AQh82V`o-_t{AoC)N!#8z7v5*xaFl=^uT9m`ugtB*&>%Uk&jw#=UL3`!1# z_GVznYz5RdN=2Pz%o2*OSO25--7B5DZOdEki^Ea(z0uORx#qi#AZ^oM|Ebu{jb-Qm zqxlmi?7|^qFu;9$kKD!}-h(+O9hhbwy>Cx3VD6UhJgB$OvS&JQt)wAJ>Dc}gqbILI zJpQ7?qAEX>b-$Lk48%c!906h@wXLSn=bbch($4AB%Q=+yVAHE_BxZFvw&Y&kszH1S z6}Q7JZ?!Wo`4Qe!C+lFvbKZEy72KxQ0j7Bu3yD2q#4#9S%{qc&I3N2vAImjW7C8^} z!@`GbEcrKTh>5W=K6A!pYNQdHD7ue||IwX4OhQTlq> z#2ZJX0?19&W_h80I}hOBrU$d`UVd+SQ?DJ2uwd4_G858yT|Q^a=W4!^&UsujQ_55Y zasH=syMmk#;)C9^F3I@l!kA}e(DGoveAqM1`C*x5kzXl0&xY~?R?Mnc`YOv`=(C*b z0N+C|UG=E)qx?}~y9~y5unk*D$1K>(grOb1?8rV~UwHrE5_x36`>ja*f1{7{jk|O+2v{Nlif=O(LNS_)`APtWLswz@jq&0 zH@KENSOo@awpD zVCl7A?Z{&lj@Ob_AGl094}JtDJcFPILa6Of`#@{7J?iDw@k2J)4;g+3y!#d)X8S?Z z|2H{26P^BA%Szw6uiHltp1OOwWcJMHHcne`yk%&LS$j@RmmP6f$MEci*V3KOtl6&1Tff`?m4shV zMPeVNAJGr3v+TYZDZCNC;ZUZ&HuM1L44iELOAF9lcAfC%DIcE`Rb%4WjVx=`yYu{C zJ$6j_j+A)ghRx}&s~@jj)<|)GrF1Bqpj8l}o$JwoJb8QX^jnn~p}ZSZfzjXq$n{nBYicIN-l zyv;oOee;$#(sfG~cGB*<$Fy|%+><)Gu?m~scs*USbI?eD4yr z@Z^inq}v|tUYOr)(yr<70}fSCg$b0RhzX6(?#jf8^Em#b&l4vxc~ZjUhY1RQx|m*U zWg_GWRi418EBL~Do>=8Ks*zSD&;!U{n9HPG^3VA-E6(Un-7*%z_F240{UOxqQ;I40;WY8Dzjhh<9&>8+~-bP>+L_Bul?)v zQSQsLG4B<4BV1lBf5!&X$L{!{*@4oX#!pCdPGzppgDurNJ7rldWYQGtX~j zT~`nEp||~J9h;_Ct7qEAQ-srn1U8jxDwt*KBK4qY-$?Nr31#lsk^6$$mwa}>U|V3L zvirlB8vCXz z9~yE}+}g&<{7ZdZc!CDT2tUw+XK!ue%2r-g(5B`M(hn!K1+5dsC}3slyh9HzLhtuT zu^cSi*W3q@N8;@tDgIHed6~BgzafuM8kiz1BcsTLkQw2PzMf?HHUdlW-)SZ^O=4X$6cTEQ!IZe_#seae}n_J#W4{8Eb zLoXJZm>_iEoS=a%de-^5AgBH5>q9v#50*{CRFVNt1YM4*t%Rc zIb~k@hUX!%Q!hIrml1xZia%c%#|Yvn#+dn<+7G>Ljz!BdwXq5h*r!N+em^8*$-mI1 z?;bb*GpApcPpG4Ato-%&E=n6Vz1qpI_InOf`9Q(z z8!A%Z)oL3S%sN_cj^MnmcW##_KUrp?!ug)4&eifCJjlc;Q%{amt?Wk%1u_#OrD4@7 zPsDUuqiI-=$CiDhYtoU1J1CHgjR}+N{)1OOIlm@-_RIPvpN1cDFJPaESnq+ba*tuD zv+{AhMDrvu*Q@y4`p_MTiEstXirH+y`=^#|^FBg`kAB{7wm}X3Y;|7I7e3l;8$xxA z9M+Em&<6pi;G@FMO3-7sYf)+Vx{s;ebIV7R*Vb16|6AvQm|(~Q955cA%o|y(Gz3G=;K!rcxV2-*L^Lj#>^jo z>2KG05ZtW$dmuxG=cwty($qds^Z0KK<t9_q3s@pbv(CfCRo>hf?TOfzKhF7ig{7W($;M)d^u95MTwfvQ|-q>HC4L&nrv%W$cUQP1z9h!%=)9N^IqtuVvDW7yerehVc`3e(Us4G7lUKFnlch z_WikCV~!vB@`5N1l;5)X&Ghr{^SnU^cJ5AcHv&m%7l}}>AF-2uOewfBs#1Gm2k@nNewd0Fya9U*eUwUXJ zkh^I^|4H1~X;XCDu|96Kb@0iGl|&q?ct`q=@8YoCgWV_XmOirJoStF; z+*>*a`te;{*XRJVuiF}pA~N?LsKL5^nJ>Wil6%-bi*!}1*5wi__h(+>28QI0F`GNTOt zU17UC^Xf~_q(Abgc6(Af*3zZ`>J7i5W^0|wzfcQbM9CA--UnKFOzjz;X3BOeY2lwd+DB5#teY2qhqPw@0Lxe z558Pf>703ArB?*<8b5=(ndM07qd=!$Mu&?3XQwPkS3R^m@veFUZxrDTBD?6`dL~sl z;8kemB{0EKAZPtfddiv!)=tui6{kT9UYTXgI`LAL6nc5HR<_PRbVj;yC5N;Yn9-an zTr8wW_cv%n+6XyP{6|8nVmmy53|KBwN?av6)B^O(E9UN8zo8-8x0QeCSv`Gyj~}&YwJO4cns~CBk#NF5Qj_sQ_ zZb=W{_H>$m-pn>>Ze-=2&fEpJ=~Th zWz-mPK)%;Jac zOwYaid?%+%PPiaVn%KRN&~bPd!p(Z)#tZ8?73shtN$!ww-+P~%k_f)W>!cga4vr*ymSGC5PRnUb#aN8a>~tZitnNYUF@<{rEqg`Bi!8-^pFzA%jX&gDzh zRxj5{n037=Bo%G%)bxGp#Ab-H_nZCgC`L!O{*I# zv*4q@nXdn{e-*Rg(&VoQIlE@tOU26oNX2P~II+S9y?yQDHgo!#Vn+wSM?i0TZHs<8 z_^r1uK?m&+6qBx&v3WVlbs&vk_%d1=XZU`rgD# z#|8Q3376{Sje*HLc__VqncUr^Nfx>Qza##3VNN2Nixg!`>61q6?yv5v_~K{ZQe00L zWx3Tw=XB`*JwFDmsJyK&+fv3rfD`FNGOtMs8O&P58`chjnL^x22#q)bglZVlvdK|CrZ zKFoAz3toi=%@+HMa)Rgd=j!8POMki|zjy;rfR(L-j+my{L{c8i{GmB(@-F#uF@T8| zjY~OsQU#Tc_(j>)K>>W)lkxfpoeEnp(60m=ub^YLYX`J z0og!0@-tiU<e)!l)WE zPgz$D{TebC25)IHZ}+L>Qn8_>Mm;=uukab+kSjB*UwAr!A2i@2SQGV#OaV5#3+#O+ zYKATPK<38H8}sCv_xoRP?AbOAOLGJTE9b34pi*7GbYbnn0qxeHbc(nitg{<()J0$O z*f}Oxc3M;PMs)V8sr@6hPt*;|`@l7rdzR}Li&OR!C(GZ8!N zxQo)1X*yX8uL}rcgP;CVN~63NVD?FuCf-O`!9|j}9f8;)&>0NjEu%AU zy(zYBLAej{N4En;*#bHd8mT|VT@f5jHHKY3{l|xtePuu(1oJXX}qTTeU zhX+0y*{T}tYd!fiTR+4X+lK|dtPSl?{77u>C&+xQ*{%ac{a#Rj*{N#VU}g*uUMO3 z`RoMh@3E&>rsdB)Q8(dTQgG{_p)D#{2%3wbF!5i%djXsuZUG(23o`cT^ z0lY!fGYNEyu1@W+x zt!$H>tCo2(#smxOrOA5if}OM+ebm{UyY%1Zq`A~|3v(d_5YaYJ>YLw z8CKlqcV)a`L^>qQt*H71^Bx%qFG)hi|`uV|I!nK9)* zT;iMmu1wH_or`Wa1({vrHaScD?!lJko_28lcuv%1UMc3CH6!gjfnCspC0c=UW@rgB z@BbVGZ6r7@dbH%yOI>0qrSrZxzm08|#Is?}is)*>u1v5TZ@HJr*39UJ$_7K9R;eA}{w&(;*x115Bx7%NNrx?r*a#v|Z?-| zxaYcb)H$D1(Dni}AIDp{1hgEokE@z}l#2&Mne%|fMC=`1Wjx~aPv~~RzfPMr&};>G zF~+eM{<;Qe?i3h1%sund>8>CAQKt;$%I$+c7P<{+d$!;0Qc7MPd zydNy@prdO6+Ksj!3G4$k&J)iCk(t6jVE?I@*u+ zpbx^ElC}4Zb=^fyYnZHb+Vf_eM;wfrjBte z!XK>vLtS;~+zas7UC-*drrt5Q$({=XTvd0QxLp{BFKB!+nXt?FH2cH@qpHJC-hE=4 zbK*gIMyq$_o)xs!55Jh7?eav3lbf~|P^!@-ldE(dPY^xEz#nk)x!r4N0CZ5D0lr4aFeDz5rqs|}nJH}c}+&r4G$v)Da2 zc=E-k6RS-TPMCdcnz2u3F)fdLivf8&EmWG8Cj;pwWiT{v%Q63$1~49lP4;m+n6MdZPy< zyWO1KNHR7qB>ZzB`!V)2^HBFm(`a)#_L=SGu{l1_MIPBm%mB!MV+fu)8sIJCW7(7m zKnM8nF9Ss>sAQk{Mu&6Jc9LabI6cM}H*QMbt6nG1GhB!6wMUsmk-z%1Csc8=<>$Ea ziK|Md>)S6s1EVuno|}Nca=$g~Oj13hx|F>gv5 zqu9Ppf}K2}hE-xZx^*wSHjWSd^74nV0_}6lcOFPvHhau2IbNV|XXWRT##+EI=@)4g ztpc|3CShk=8=q4|_ip%1jyUDHAN> zP1z`F-rE>e%WU za}A`2)_`wE|4TNO{M)#}oLGPS*^2@+QkBx6^ve=hy+Wh*E4+bxPZdJN>)dL;>%TM`en$SMy)va`P zPz- zBX!!*)AheU<@fp=)<>VgyY!^iWmgQ1N^g;~j+-zT%4pZSH&DgoSy$1ut<_^n zCj4roVuWS$J=;3!tk0weZu*waG&Z(=ORoO2bd=t2&;5UcfMd@8)kL1L4CVU?;f+G? z7bpd~&5=uGi_U)5)$NEK9rlP4vkZ-Z?+zGy3%JeQp0quH40^xV9;x@WsrQ@rbECW}Z3$)*~M$JPf7blrKV?Bu1g2a-YYMK<}^y03(`-S z)hLFPM~O10gqD~qT+uT_=VN1s%-c7ue$cA{DLtW&Yc057Q<|zeE5{L4ueG3olqhYCtDea^JJ97p~EP8CmsJSD`_@drgF_M$H8-GbI3 zN8WZQc|mJH57?(jzu>>xFKiDt@0}1g<^1_)rhTVQi>fj4#Mvvatxq>U=;vrlcC!yW zG#ztD_x|ZI6{N&#^!=RMt1k&l%>iJQ5b(Y%Qu&QK_=xtijsrG4F~?EkERgYY``hlc z)|-;Xe5CLI)I50^u{m$}2(`YJqi=_I5OkCu0D%o~<4~QtL0m=rqOUic4&wCDjt;cl zvZ?cP8uZT7>Fi^h(>hPCKj32-3Vu(>#H)pee)jj`mAqQg;>3yLV!l5kXH|IXIukQu zkN?(xT-s&VsrjD5wjeRgthRePc`d zoBJ87d$9N9N%@{d_C`KwRa4|Y$(*oe9X9@ul@vWpYtL5F7bJK59$EWxzb`&>lMTyS zWwW~d&;2k`^Z3I@*?DY@Dp}in=KsBz&b8Q^zNL>~G1D9a&<2bD;UT>hXiF!T^S-1n zYgZqR>Nt#1$aNoD_3J~nSV7u~lidS_EgY^lVf6cIb3AfoUa60}@X$7$nR~Ba-mr2| zDYOFx0np6n8Z?vo#-|(xvY{70*`_6@&mEQ1`-gq=(Bdac{`4LJ<#1U&FK;H%N;(8`vqkMM$ym}k%jTH(iBm6$ zn>i5vhj(0?w&+a@5w_Q|R-a`P_dWKZqAjC&SIK%)^za^AO#A6Pe%Qw{l(8r3tuw_I z$0z%9KJ-yx3w!Q1HJvl>v{oJ@^RowUAM*ZvK5(|HZe40yOj}+$Pre-39!rRsjH*!E z`K06a3;gq&G{iu!zxU?!`j*Xg)fY)YyVg*O>uK#{+Ou@`mF-X??F0!w>|3J7i>3;dUVL3^ROJL)7l^3$VVAH=)uf>bENB>-JLPxz%*;m zsp>}#uBOk)vFTcwO6&z+?svtW^*Jr+ijNa~^S+1EdVO>(!UczNpCYRb1@cL&1g2AQ zQq}6kisg-xUFJ4` zzo^k?!aa>g>TMW6iJ9W+1Z_$�sxZoUV_8MOeOMO?x{1<{)*gDOrUbAoF|Ff36~~VE{8lzzJ-bD zm6aY&J18A+*j~Ae@N=GcHk6tXYanrH-tpIuMjzYBryok)>fwRh8inofjp`SVD0bK; z&e+e6wLksHlhVP{SU?;Mux~f%tzXwIUD(NR|2_9jryY4>M>kesonEoLaM|KI8*Z3> zs8K4qI{TJ3KnDUf&)8rc)MH0Yv3Wd+;}X1mv0FBE{+1&>Q~sS!@GE*1WW1M8_JKkD zwi4bCe-ZEb*K#xW{ubc(q{ezhZMkA=9eJ3bAr`MX|m0&RglH_bw{N-Y+YI zOL}65Bg-bYoIOT#KrSv&+R_khZe#24F%R|riF$YgK8DC!w@s9T&-(>+<=tRW4-dvh za|1=4eXKJjfE~oq2fOv~#^H46NgwT&-O=hiu)C8v-QP&pIm>!r;op>q;`kR<8+CLqBQupfA+9w}QPdzl`fTZ;A4tC$`fHdb6E;e@H za7jGLTq;&*9PrS4(5v+^jd}Z8uBi)0Z-{)>H3N+8J81tFAZ88lHDarE$j5(d?VqPq z?gi*)ci$L}x_>~;;|ouIP8a1`U#E*{Y>R249sV=I4t!?b?9#^+%Dys}yQ%w}ywwZ! zCrW?JGFg=HE5FEH;H$-7U;1E2x!Z&Z`Rm2yWV8%nd8JC5@5z>Ft5*(%ZyG6|#Wp)g zTgt|g)2dh2ryEubnY^r!SvPRB)pqr38v9@5Yrk4n)_wD~9va~fBy=u&-}rUi_Q*!O z3m_entxX#`hud_n9zNn->Dvo z{{c(7ap|~=4vSgjc)91g6LH!T~pd*yMj&<*tpRG8x6QYBJW0w zIyY);tUAE$6ZQ5-XFY+B-a4r30bkqQQCzQ>&i=J-t@7#IzxVR9%hNND-Khxn>j)L8 z3eMiDm3Y1FqC~gHf`$_D**EB6n{k5YsY*WR3h;c@AmR8+e`iP;%AY5isfqiXX|1zt zYCkBtekgUTuX)Q*fb=_H+gpH`?g#D<*v!||earpMl)li9x4-)nvU#V8{m6MxF8aD( z+800bk=lQz?A*fD!ifj%r)RWy-bH}}@<;#5e^I9-I8=b&aPYkcA5YJ}*11zIoujW7 zPu!W4j&e%O$tS=O6Skd9al)E4MOS=^Q}%7_q$zSre{qja{1LkFlSZVXHTLUsqt(T{ zD57o%%ce~=^4Hz}&I0tA$6rd1-T8D>jhf%G`OSRWU`(*<)LrzUt|Ovqp!~-5o73Ib zEU)uq#f~?R0C6X<{Y)BKAdHSWT`QZ`z+@!$2s5a#Ih!duP0aKbjD2TJb*fAs`uS7o z)mNHZbedj8Rylfda3Dw{TaJ@v(vv6h#86h2EX%O{PMUx2thDF;yA@^g`{e zsZ)*w-d1r>P2q&ZITUI6xW9;0ZbY|Fn{PajA9 z6TJ z2K6i3jfKq{8TqiH> zuLBQHhs-~>&SHTShYRBX=RuDRGH)6OOK-4kmPewLZ##TnXa<_TwhyCH2Tc0L)&QS~ z_($11F&%m2h}bN{yI;iq0`Gk63$;F`v5vglFP#RPbuo?e!uDAqG1J25jy@t^eWUNh zRV;Y@mjE+WsvK~(g`DKw`jh!vYGx)q5tv+^amaycFsiSHDZGEda#$di5rT^9Xe!AKR8HoC#DZk`lz=zc=_Dc)b zYc*?If$hKj2ajr!@p>oe*`J%2CQjad?|r&e&w{=2I^ooT?ips!G*j%Ho~U=+&)r8a zm|zZ}26M~7XGPH!JuxNJWD6Pogo`=E_TVk8GTnRQ$|j;5I|POjhyXcun&8UuqLz2; z*j84aik^jsv#MvOc%|bbw`(Fy)-5Uy^MtCbW)+)swY`~iiO3)9#qUl#@03pa#E=Ow zv#tw;^>`3t(Y$J#o6a)L2-az>hXR(1G*q^}r2`pDu6^PRy^SZl7og4ly3i0;m z@;-e=XCY-Eby%#G48D#V;nEW>9Qc=!uhV2}gFYTw!3~mQ=Ty{XfSPw3ShkI0Qsh0} zpf}b!{H===J|cePVIT7zvpy+X3rv7Me$vIQJV>VBd+|@4y&&zU+dRGTrLy$X^5^nP z&#f9Vx2BsEPx1O+_QXT!iRYhmQq#M{u-r+4n8@~i)YbAna9dgr1)^N@P796xF`aF; zW9tX4UB}Os@8^h)1JU~db;bJ0Y_I`)jczmtt?kxBV~|HV^7bLvtv7Yv_Ngg;?j)v8 z}A^W*_8DqF6|Tat*>l3bc+w~WUpcAwO_4O zW0H@*^tbz^=jcr(;$UY!Qu~-IF%LD2WV6o>+HC>%!AQ+VPxrIoDDjRB zT4U@#*pO>q`c2(;>>KlQUeOocdix? z^6?vI{pHpv@#b-$G0PRZ9P~d&?}p!_S_Qgq;i~*H5XTZVDDfoPOm>JDCOeeSM3WR0 zT6_ydKu&tj?eMhwUXxL`@W|~?r8hM3D&@#2=Ux02twoP6qTGNSuelN>&T}hCCNCzD ztVCA8SVCvXEz?j>u1p=DHH~TDZI61XJ9#4;?1v2h3kQl9>V2s2bFuPs4mxb0Zv!l` zq*pE9aZM~HP0%wy9~M}S-LlF>x5f$jI_r_6&iXpwWANJ+bKTR(`^LQ?I5I+abIVl}bm7#o37akxi6&gCYd9X9}V2MAvh|VKYxwLsl9-m=B zZ8V~N*n}T$gZt)il-PER*A!bs;@^m#{$h&WzV1g*>&;s>wQtM^Tl6;%d24u4WA*)TVzqTd6);_OHxtu)FQaVwd(_)X7e0%k1Pvo%4 zCHs&LRzG}D?BtIPd3gyh#W+jI)`K1^-~sC+r3IsG-Yr{G@<9hgThaXD|Nc8|LT7pR zGuvSm>%LVFrWMaU(P^#o^a}Ys^_Jp6!p#rdmR?xjT`g1oHw)b%*@rpA8k$2)y!Uam z5v_p79{WbwV@(s;t*`a;dHb7Ui-_JbWjO7SJ1jnNO7}T?`3?l0Ui)0S{jqpwrEXn5 z_=q%XzZvR(TpTILn?&>5MAa#Klc{p?#1pGBDymw!CDp8Li;hdrzO-h@hq~nXpwL&( z+{Tf*ZKICu9!a*%ZEk9PKN|C_%ftr6bVc(ne>w2HpTLIHP;Z=NHy(kB&W&S=&OwXL zJQU3S{PHP*1ce}6!-bDDyb1TYZ zOUDET**wr@9=SPA^b9?nbCIo_1mbIcCw*=B*X)rgo$B77eyyV$t8n}!zg1^JoN+ZW z4s`GgmQX4g2TtVZTQ;KjMc!%A0nt`8<=1-qMB5I&e+v-vfN4LzwjoC(#w|PEw?^Hc zpvF24dh)g1{R?Ve_-Hq8J2EG4=L=gnOkWQ^N#D}SC$Z{7W#T2O&?g&PFp6{L*oS3*pRGfb125effH^Gukx?*d5@U>3- zH}sanE%|J>l;8ZaqD8VA>ONw*-83bzjzVY_!!JhF zuspZV-TZglA=!h8lg6hrt~jcv7%<1AWbr>dT<7|_6uxR3+nepaRM^@6qt2L-V~I?L z#Ev|(E~oIZyg0VE5-K?7pEEN}p3+?fTKdx!iB-N5Q#o41GYL{RD9UD1)WnW-Wg6PHr z%a2IxzY4guDjxf38IKon9I?(6zjuRG@$J6LdjzA77cE~qe^BJ6j{B=VUygO+je{r8 z=JBAxK#m(_b=B?N#dAe&;zbyPh9BP9Ib4uE$Nojbl;izG^wmfbbRHv+;TbEet9i>c zbvpDlx{RpDpYiBiVGB%vj+lOUD-V*n>h_;oAu@s zzW~&Hl&y!lZ<%)=Gj;4^kNwQ)Ok@ACA2g!-ozvJB)0(#pith8m&KcYn=O39TP-Rk8 z`c8RPtAtXkS3(CD{VsPkY096BHOS%5sueBLpF8W|*0VVe4YCF*wGhYe-k)W5La~z| z`{1iTVC}}&(~lonnfdVZg7~G4OdszpgzC z1d_dpD@~sAU)Cebj0?8LH6QoIPtatfUfJS#wTEweN?Dg!dFO52BB)w`Wu67o#Eq0p zj53&~#toJjdnfHOc|tn*(%Fq;A-oLaiDl0zPdW3S(@-XPy6j@elP|JuZ1yKf;pN}( zlXu@GEx7PdqzKa`oG2u2LBdd<@~dBXI^DaXd$Hr>ImZw574lT^ZoM&Y<(k!XZF$LpJ7f-}#sS{weTz)g*OVmI zVXu7{Kdd)ziXI;Hm=kU2JZ_;k|7r2u7Td!1_^uh5zw+9;w5anDVLdCl&p~PKjM?g! zWfiNeWO0Fn+XBJTpP_lB%YIp3E~GHMD*wfI-Np@i+hAu6f#RPQt`M>hXk$~cS&kGQ zzz=|q2EA|W>-IOrZrgB_^7;Y(?+(`g{*>S2Nb^B}ep^kwymhrbwy0YX+7I!y-e+4; zCgMTc+#{eymlyLQ59{q0^TCeZ6yE;m77Kj_BvxT~_ciIYmzn)Aq82PetN1>DIg$Nx z8Z=EhQS-cUWBvudn%({L_FHeJ`}54snlVz<CUtiWz59T z>j$1aMce`>TzuG&r6697n3uBLEpv^^S&?TxI)Q59tI;d3Hr@z8{xcT)>7SaLCa7&A zEW72&JnJH_e1^tsOW{a5&f41i&>KUZJsnE@c;W!cVa#EnYwr4z21WcmbKV-5k7g^;O~jMs(OW< z%A?R5V!4eDmG=)mM)Z;Qu@cI`)HWzOsP)K3bfCu`c-u^|S#N(Rd2+6FnXrWe_ufC9 zqM7RU03M{PzbIWE)KaDCcj~ktyfN!qn`)JPAgVt9Lak(p)qCKZhd!@gvPkdWXPEB5 zB1!Vhhyk8`1C3yxNZpo^x-Fy5yxTkGVV<(!0qabQw*2%woTn*1@KJUUd`XnQXv>0mVBxpY>#x4j(TzozbL!=3_q}=HKnE_C zG)Y(waLCjSG#`DevrnY8OWA}>KF%M?jqwa@Eqp-_YK_-ZFrIADwaTQcbTj&1`RfI1>02d>)<(2N zP)7B#7UuKx#p11j8@d_CG!pPvz+4{uq_Te9=FRDckFKb*W`=TCrHnOoAL&O)7JaL1 z+J091)oGk}?HBWNnm>q2wtwNbyA!C|e*eN%>4hiPM|C#zCW+mOl--yKo59_Jomy45_oOiveiTG+vURb^G!K~ z5c?Q_H%1-f27oQ{_C;s(_<;=1*kay3HO0?9P7~8v_euFp5w^gCTDkAHn!zv|H*ZSU zEm_#fY@aE6C*Jj6max2()uZ&&?s8UdsjDn~(T{sNap}!B-%K~$cXOv43G0OS3EjS? zvE95cqRzbi?NiGm`C*Ixk(Vjkpg?W2E*l$r=G8X%jtV#sy(dsr$`v%F4sip`52v@? z)QxQFAV)S2b#R*!_~uP(9eU>jZ*-?|nNAn;!A2dS*rN}ATycI}*uo==zn3;^otF{U ze|hvRBW>%Kb&~MOkOyENMH6iX+p=H|-+PZVYfmmLtI?WPvS;}j?wFqSO855n{ z@r7+44b*)j%J4OgCePt~V&3*acZ0BVrgq;=tI~^4)pM7Ugtn?DVLOlCDV?SFyKhgZ zSI$%xdy~2Y@PMh7?Yj2|F?ryP7HuOmE!&$!<@EWuT~@6WdpZ)DFYzW>Oy*@f<#OEv zt9PYGxbr8EE1rlj;;PW{FqPzHLe)-+Gy%hhtI_R5t4T)&Ow52Y^rn$|g=yI>tDF3? zOW_rD?4P`vB{e)hWx~~zkKE|6!&6ov@8s3U*r!VF$*Wn2Y%s`b7OfapwalZppQ&|C z%lD0KZ2e$}Ui)E)u3y6UQrFapi)s16y!Q~xHA`+xOu8bJm>5?sz%*~f@M>13noQ9@d#lrB0#$)JD%&k!ec}-(yh&+DeiOz^Fu4*Ain|as8c|;i>WNZ=1_b-Gky!poKdL^Mg zw&i%t@12r4kF1JOQ&Kd?`PMh*QU+mcWk4oPc22WT`MBPr%d-PL;8sG)92Cqu=L>pz zx^oeYtUoRsFC^ZmjR!-^^*|V9{I><=KExm05cnVtltU`{9w~Hy9H|ddi)c3w?T0be z;qBwT;XdF#Vt@A~+xyYj7wvCqf6LMLbE*fE{}y;qYZraws!XbM8{r6(t=yNCG6$l5 zayOKO93jgx{;3|+;-gv-E;w{X6gzy^lLjlAoJD0s7geuq*^++v$cj#W5?&GB5tMaR zgQ>&Tezn|jmSDT7?X`@)pU(cK|F;<^gZ-kv?LGv8o;G;!*40C9Ej;m(*=eupi@W#_ z3Z8moZCbwMxjMUhr5K`=jt@hf6`gn6b5c5FUULl2$7mX*d(6(0F(zbct%C3f`bTff zcKyh($3BzOyfY4tMWr<>U(t%w+IEt|l2e&z(SBjFBYp0q*rPu4O)L^~lK0wwcYSGi zW~ZFBPj66JFXxygfBg%bmpJk@%I1ealpSt)U*^M4=1V&C`_I}l9d_ISow6kG(AFWs zeq%^!jNUloam3?HZAY$c_K}GeAA;xsY#(&^O;xbdfFD2kqBL>*#DUdYdn;|y`NL0_ zF6!jDzdp`%>JcY&bYm6Pyz)Z2?UB3dY#xb6Q zwt2!;9_H#zy4;ToEfcQm?zyS1@MbA|5zTFCAItqHZD>9I@NQS@qb~ZJH;w+o@h+pb zqaPD+fcc<7zm1H#*l_Uu(1DMQptkG)ul3mNW9oFAsF{aawyySfeXxVb)Ze_R{r*9= zXM`=V(zIpsMjgg=Y%sRghCvTBR6}p(^I+_8A=~gPgYF2 zVvO8`T_&fa&+D$9DNF8?IZNn4uT+YP^>oqZfzP^{I$g{M9X?JkLwE!_| z=*L^<_>R;(KA>YA-tiBOh>vCZ)NqJTwDn6<`|#9pC-C>0yi5An5wr5it0!4HcP+9) z$g5fOFZxst$jmDz`cwYHzmYPK}Cw7dw-??F>`OLO<#yGhkC4H5DQ=MZgC zCi@x2AR4?J56AnRjMY_guR?z4H8PL&{wEt?q4w zOis@E{PtbZy1CDUPZhhqshsw2yjRu)<~N*~b5lO$_sX&cTRWl(D&T-L{9`{5;RejP3FCAKj(+cvDg&;9|>FFqhq`R zaC^YQ`)^GzRgZ|AJ@2%9`zY zEo|PnK0S2HcXa6Rv7;PCkc`>ccGEV8UQ&+z`S_o+8kgpt`RO!KS1}^od+lGSLs6x{ z0AETTKe2OeLB_u2BMyST8cFS<9y!{)*`uYDSNo#-fWo-H_&@$2rFVTVz;@WGJ_izF zHG<`S6dmXoH*Xp0K2!6^M)wo@ZjHu%hi~l9mamY$v()mq!&m z_ip^WR!ZL1D)cL|PqqV9QRW}U9C?>}{Q`rrDud99H$RW?Q?LsO*85T#lNIi z9WJ}&i8{N-q*x)W+lop4dBPSZ?KUCJI(EP2*j(a>_{s64pK{#RPHHAlBrL~q+0Zrm zLW`|UqdmLd%stZK)hf`cWzVMPu~ z*&}~Fx!o`^&nx@ zK+#3n`r)X38I$aT?iAs%!WL++L4SaZ%<11hxP!aNd%&vZ8AqM0eOD$`ouX9L*1<;^y>(Gv^HFwQrphAc z58W=nLFR)7-uRKI8!b|J2Q_>h*yxPd@HJYF)VzJ5=vvqsi3lOV7q>kU= z^tKb*pyS+n%eJ|HSns}PYI~H;LnGqL5|4Gn)UOL$n7Yfv^r@p}=UWAN5e6Y3q9AN0 zRXM^6U)bEUlpf#w+X=9uE7`NU^mog+o23WSCQnMoAFyvvA#PAn_anXAef?&x*>vD0 zNkfz!kNyJt2QaPqY^;8wZ)JMdA}jq&URt>QhmWQWFO`d z(XO|7sYDm9>x!*(wz$rfts_sHp2q8Iz{FIB99+agi7O1|J*mpiXj!jBIo9weV6#4x zh98&Cyh1Bm5menJ*DcSJ7-lp~W}pzu`DBR+P-CZ4wv(Q$FLI8GoctZN_0h4E%s-&F zM3#w^{F^*r+Pu&;>5E^f59N}g*fQJl(x%h&-u?>c)0YU<0vVuS94FvGd~2KiO=CLi zB2^YS4|ImeXPx%l5S^XhV@f^#T`ZGT6K)$EH1HCP7ppG`Z;xBxn8>~H=8!|$_1Yf7 z6(EQV=1XAp;>5DkKy$c@GHyhBw3+uf1Vv|Rd!)8O$roz;4e>27x?Fb3#jQL@=C()g zPEW1v+%%O=JMzRddBUV-^3+VUJh5sgRlNxq{j=OQ$b_poPM`}DttVf6CO!OQ=a9{2 z+$UTsbi3Bn?G^Q;LmzeU(bs&W!}-+tAOn&1^RYepa*T}noYB^PIJmr@;0o82I-wg0 zy(1!ZqdUMk(TX}`2f9zfq72{9C;FO)l6RfnvZ?)IJ-uB@p7Y?on6D!kUVCLtdhGTe zSkz%c$IXbcv`2Iw+eP`ye%Kp_GH|0uw^N#T?ibR|)qTKAuK6>yws%9X*muh<_~>uy zmKVmi4zRKX#DUKP95nLO-hS}5n_34QEy{HVjMM_d97G%#{itIZ>KLzi`$W6@2Re{` zg3kTLvUT=HZ(B{7T!Hq5UM}RScMG4HHz(~raYA0DqHp90R;Gog-+MT#RxEukt-AmDXimqRdq~><(9U9I$39kpk5t`#&Es`8+$ZyZ z(A#4>JNp6)&9xwVcNKlA%`0wDL2z`wT8` zN!;+#>-q3rp0#KuH%y>Z4ilafUxZ;jYW_H}!mpL}As($R%{->ltYv?o7@4DV4I?|98 zlL_M|=yteHH+a`&IN|GCHm7UuxzVC2&!XNfWP1r!?+17whi^kY-eiM*kfYr^dX7W( zk7J5;_(q?R(!}&mH%sZy5w>vhoZ|-m0WxP^e^Gj8ouBcF|GZM=NmgF9^6FKYXqEok zybwcvZGUc7xR9d{w~{(0T(;=V4bQgIP5?XrjfmZTrsyK}(>o8Rw~zJ1QDxnh58n?+ zJ7(bE^PYls%xEfJ`wZ7mZrHdZYCZ-!JSS=Rn2&aB{jm12Y+q#Zbl#@c4_7DjY#a11 zgiO3zxaaCWQy|OK48W}crqTJxV%a|BSc_GdeG8ACRj1h}f6T6#u6^o}w0ha1j&3Z% z)O`+4hn;k3oduUCj}kT&%QOyv=sn3ok8FEPvo&7Ha$0B&r0qa6Fm-evW??IR)P2)^ zlBObl0oxqcj%{T31ylU3kNpKZvi+b@7iIh++GZ+q`VD>zlg5uvpE-KAE(SCcErm&W z5k`^eAHH4ClP+|0A^ql{YWWyRQof;YT75$kt_V{mPE4mx?>zS$%}tKO2WxbW#i76U z@h8$Nuk*Zd2Np^iqHH|8e-Ik+)R+jg3V|rKd~q0 zgxQ==u+IMMyzK2C%nvSpN@>Zjmp`r2=GT;+PXih~l_{qMQ27dT8uMz0J}X<3<;7Bf ztQm6?4a3)CO7tbY+ zQwLfWFWcC#>D4;V8>MiEq|J$%e&GGlL{-~giEI#kKT6)U4Zo;|w+`xY4Bki{_&MG0 zh$rS+xLnV*Z4bEriHFjoPcQFe&1>DK?m8vh63C>ARjYtZq`bP-Q2HlWR<)RrHGNiF z_sXm5(w&bj>69a3rSJ(M+ix%r?T54(dQinM9XwF$tS{Cv{&sfu z81s6!$-P1Ss5jnuTaMHX>qfL(cL4Mb_IMI!-u^Lt^!Ldex|qg3PH)<;KG>*xEW`EU z*1^9Owy^$%)oJCD>sxsU(;Uxz6v_5HAWLIgV=RL?dQ9WeG5VPCxN6mDi6(V}gcB~+ z@uaCo$d{{Z)XIaSsdc8-L;LBh$JP&}?&vK;0n+S%(YF9`Yv{+vK55>4z_O|1-!jzw z1{yK;HTS`&b6)O8G402d*GyL$`ssPI)84w3&$kJdZ)hnP=r=-7u(B-wq9<6wi7tE- zJ()DZclE0E=U*$gx4F!7W=&5MtJiPaX4>YZ%-|`r13@fy9e_0WUHS0i9XAQD3vU*N z6#00AA8ibnnn&)3(GK4aoUWhEy3zR9H&Rw7|0pUpZ`_iu`RYCC@w=am>U5Oewm1EV zA{Dp_5DSI=o>HzN%pCrs%FX}M^xt>^gyfAed(|&qo)8>s|1gAq++oIol#w%v1 z$9cjtVxecvJ0%^s*Zxs8CcZ>MO5eZznod3xf7!{M(C^sajllC?KfIG`869Bde#paG z(kWmvKqETBF~ZljsPj04kM&R^dVCmz%-d!f#~|m2jn48ZVGDe3?C2Rsw(=mE`jz+u z$2muzRwh(Tr23$GWbUt?M7i(A+XRbEe)@yIY1yqBkJ!u(ERq!4xgSr0s7I!q(77#Z z+5X#Vce;L*a%;d3Fr{5uzL-Y&yZ-m5{2oV__Z<4v_?>So`t*NKsl69~I(e&1twXNS ztseE(nL7G?mWIC8A=?f$Z(rwC%cQeyG(5i4)#Y1`RPB-Pg%T-n&yW9DsYo^qTO_*&BFL}zegVW4o&$nx)&pdi(dhT(S=pXr!Z1@?5ZQ{NL@3^%d8Hm*W=xU682Algv^s`Uw zGqzg?b-%UE^tWVdf!WWO^nUj|vr>QHZ5Z?s&^m&o@N^k$1Rj6Lg?EPN42(WsTk@dA zOeg9+@@E}#P)9dfA(lxR>Js6G6;GzM8<}KvV3DNmw69~%ZH&hIrEBd;wcT<}oubww z1E!JRnlAeOl?3kBYvJyzA8&ax=Y46u+3iB__|aqOwRJ;Q3vWMU!?U>i(36QgjY&2q?VCnY|ER}u!;qI)rU zqlafZoIo(FAm_8FJR2%4=I;R zc9{`X8WNOpsSi3;9tzRVP7}tZQ$9W?Y*A*JTAnNP7!fHB9vh;b){e6IygOv8DfyrS zqOE8?c)$NKR*B=tCr%x5i%E2OSNs#tKdINwb&q$a&N(4XpLXE8%5S(tJP`K6y0x|W z7Af2z36YT?hmahgL>16PR|zl4*ceBm-aORkJe+4tW1oK1>As@8b`iF~>l(KQtkV8j zx}y7<*0~4IN&D@wPo8b%*9)?2mHT9wf#nZmNv>Z#*z1>NMwVq&n&So!(f#b9+v`nW z1%blZ?g(x-bmmRb(RRqD=G~^XY`=bX=Q|RmOrZ8D+5!D?fcc;S8w_~EBX6A>7ugNG zHA;tYr!XCELKRQi@I`k1PKyj*V?V!Ir({Fwg3mL;D=`{geDb06^uxF5P&xF^J{_7z zx`()WkL^b{U|=@YieKg!=X&A zt>!I50rJ=Z!?b`7ru46V5cU0dbVlMj^6m@J*q5SyI39oZS@*lTZ(4tmd^^_+FF*3| zwEynA=E+t5XpuUGCs#SJ8oBfr^&v$^)00|Wc{V(ppg5gxMz^m}VZmZ&VGG=ia+pzuP^TQ~^w6Ine{WooC7dM zY}5WDzxNcHGLKWC9QRr{arT0A*#0wHc~H!iw_TlMAm+C{CUf;`^Hd(kdmraqJW&I< z-E$Nb6_MKk+n{lrspa8p(a(9=H_GOrj7dzceqGoC6Q0wLJgJoj$$anUKOVAw&N=$@ zJb_{|MPH=f8KYeKX>*UDny7bdz`rbM;X@z&`6G7@dC$MLW1*032HXyjw=PoJu$HZh z6yAt`&Br`O;$wZ_BajX-rM+6dm`4B0M;G1~pwiXBh>dV3iFLAmv>R#v`oKw^=s5Tq zT%KvIk7b(gN6~@gW8Qww)6{x=2}J7SH2C~i;b(+xJ-M&%;G;UX1mLH_G(eJ+W4dC% zg=b^)!|}cs9u$LD!MzVUA{}(ZDe0LFGz)-qVkW*}Q9h`rOmg{)Zix z)~r|>^EgYwcZ6$%y$bbswjDi`PQ)Ne<>9S&n{_v;*yYyd3vVCu&>=uPj-@jV$?%sz z(Wjq!t43;5Y>sREEt`+~933(c>DGLre@ zSFTQ}b1y(i$^>A8uvD;6_Q7yqn<@Pa-bgz0_CW{K`Y8AFEvZU6^CMC6U^}2(0s2?M zzOTQSrMEQPzwil}w-2n;y>TzE={!eEK{pA#dPqHejN;vVGlebiUi7ukY)CIXt6d|{ z90O48*SxX?HLukucDFAENLjy?ZSPE_((L|$xXp0p_ooe7j6Vhn5E&cU|hN7iraXD8Aqf2zdK zE?OBp{-VRu&#!ux^-^Wul^ z>|{G*pMw&}Di)CYrK?Aco2c;^Wfk5#jbqZaQ|VN>ld@qtUdXoa{>A$30LMiK2iX9o zMe51uYb09`z4@9tjrI05je7e=s;oP8vJL&f;eDHMu_b*@=GfS}^VD*`MsPZOn{df) zQI7%}JAi{I5PhReI?Lvr7c}b3<8PaFrr09Ha;Xn~KCm-_TqkVTl%INNah|X+aTykJ z@HfT53p6I?0gF+t^bP67XIu6emyZ3&uXiR~*q2`W)pWsc{GZux5I+k(`jvFuAODXr zD~>E2{r^fxg=hm$s%qKo!bX|LAK7TVW%8)`vf(l`HmLog4gC%XEzqZ*+8X;3I=3gh z_YpepAKOjQ;|F+uL2ZwEc))(}(GS^hzL-vD`PiIU>5x6kLoCi9pWHSnN|)h;mjXQ0 zh;L-v3g1bPD|5>ePp1c-@7~^W+KdC!p885MR|H1`rTpTV%h8Yj&{9``!R@M{D_UD* zS|r42qyn*xgzSrv?4z+Mi%uSvq1ytK#rC`^{8Qn75DtOs|N7n`y6vo>t#1^%elgDf zNnQ4uRj(xQg96+JIq%FF%+J)gO}ETjB84cEpgh4sCj~p8-J6m)Idd#x0v23&Xj=H~ zWoi9dE&>#IL?1kxd&Z13Y4Z3y!1`7Wwx|Nd}_oeS5LGC>rH8+_w3>wWvN1k3qKVr zHgCoedGZr=gW!Lt4>a*0RfJD&huahD2aZ%8`(UAv?KPkS-1e5y!CS6*PnhB{58iTZ zw+(%?MSq#v@eI9zSo|#uP`)O8!LesY)xh}g-*L^5{d3MSXXLRc_dl8>|Cj!l3zl5M zNT#ZfvmravM@4gbPAmn>^wy0Rs`va$O+Vyr1OP|xtDL;iMc(a)t{-n5wC1B+r|&1n zawD&-GgyZJpF!K)^44t&l1C_XiMXT%*4b@1*ab%)s&*?CaD^R&)oyKXMu zG8A8@wchFeKiN94R9_kH2ffq5Vs%7E-_m)o$eWWlj>*|^TsmO(Non6%-Pd+KcKZ+0 zhP6+}EXKqicEZJJkNs!Wd0Z$3XT8+KgI}p+k*zc3KvmnZ*>0PuOs#+9u@4Jsn-T59 zCj2EgaG(MDY^Gz^CdY8}g|{9GSckkdjQt9p+efT}*7(n*Yk?j5kB^_110@HHb}_45 zIZ*WDK2f?w)DuAT7ko-E*5}Pulj539gpKy_+XD+j)&a32;?bsT-hOm8^9vq`D_I~KRumRDhXv(kk_KCI~{4g!hr@s=j05tOG9Q#nf_Na%C z=sps4@WbhAKVG=fx##x6!wyMDOxr6@vbrz3a39FMLBUsg5?ur?|20w65E%+*`MxzT zrrVzEo&y|r;C|_VDZ8o~!$Xse$}lu#H=^~W4vi$z=VMy5%R=W;B}a_=#WHQ?ePMe) zYCmj|@8=tJE~lRkTR*6M@j(Z?BwPja0f1|pI?x_~>)l<}EVd8i@5IS^=e%mo$eT~3 z6jy~0TLbGYYxs7g&U$_dkjF_Mo1J!@$|1c6y!V|KanM3};slj%L3H9HW?M<0%i;%J z&OrKx=T^l&*TpWEN3YxT#u9XK9EoFyc~gAj_<57?s;*?AJ?T_fP&3vB~E%LZtNShLTU;tZ_V zP07Q4k&>qKo?{+jy!ZOQ)FEc;$L)R|<#>-pr)|n3%h_>UI%NKNY0v!+rA{q8c=LDC zrVX6i3{`0d9|EWTqx@ptEdh+i-)W{h&2Um35}JBlm@7;OcAJp;W;3 z@2_qe$8nV5-4CDzav|dO`9N>(`0H}*2$ws$E7=;T9a0<;}NV5 z+%Cx{_-;c}J6eVUkw#nO-JV<1MLRrTTBP#ZPiNoS)=&PFoZpBUz7ISXzE%m{HjzFr z{Tks18s?rhJx$$b*AkZmCoP?^GS!2K2`6|Kd5f!1yUR?WUOGEfYPfXHE+9R{GxBo z+YSYyY`$2E&HQ^s|FsI57z<-X%g3>(f498$O1>TK>bq~~WUND)4;K zZI7^c+=0g=t!gyh4~RK@8akXD`zij&fccn5KVSP{GsZOV^|_q$KUK^r2nJqEF>T7; z76(Sl*PU7tJnuqWb9bzLakLyWelKeDjS>Y1vKROebId z&s*JCn4QN@O!F`Na=Psw{;!zLR0-z`uMx6AY(Eua5OiBa-a7l?yHmRTyZ@_}E_^Ql z`GDB^q10Iq#V-Q>PFvfe-wr;s0DTv+nVh=)f=9bwt&8o9&S|_~E!*GJdZ>BZ$P+L< zTKq*>)BK#N*Q_7C-##Vgv!h*miJC5>Op2{1HkrcCPK@C2<^MOf#l; zkU$_LiV!M@COQ&AUHHwy~7ZlM75E9VEkGR-Z7qQ~!fE*!adYW#5{rW>;{iGMw_1!n2Rg;thHJMNS9HsaB` z)iYbB^7Z6Z559oK(-o%tcMJ4UlA!$v=(IgtddczcZ7|nb6QDz}*hx%VtHS|%W{b2f z7tY+j=JT8~VRAU*=#xFv75UDU_lMUu*MEfN>dXk6=etQieelM(Kq%7WI7e4#tK;k! zyGiyz?iA<)1kM4yj6WD$I9z&}o_vLyYm6ry4%Mz`0d)HVWTXl4rRUAt>OaWwhC{lE zjv49ggY1-wm*U3Z9jn%M+3rH3lA$zjnWph?*ru(Un{M%BLHs;%GWiUsN0F}|$*N1$ zH#l-r@HWXxNB$))I~?-5G2;%YE)@KP=tMRwuR z6<4?hsPgih`os_NxZ;+$e3X~`!2cR&QM{$^mhUGu9$#f?=NcrH#r)GT86?S3kB&pa zk*8c9Mi*Z>Tyg6U!mb^yhn%al_EBI_H`0CuD~O4a36PQyB-1yANrw9%d&%W5+@i&_ z#D&8f$?B$~tuex{D9D(FZ1)Kz34@pY8(+n1;`nQviTiXUd%r)iIJI{Bos*Amzj|d( z`TRDXH*&fd|12i0P6AaTxf^&)UE}W_TG_Ukx@7K5i(RLtqngrJFZ1cQ%9h8eCHKh2 zSK^|so^t$x?rVlo7fMntU=Vz7L2V~HE510kjTU%Tid$3& zJfj8&+w64kGGHfgCgKBr!g&FQw;_}EHVf=8qHh#TCFpt{^+YT?~sQ~F8i%FvxC?<36meU4xV($k=4EIsP+`lyimV zg(mKOW!JDQxg}jcOk^D#D#v0%2WI<+mh2dx^e601gWY_aU9R=OZE2oH4j&ONJ?R1u z^;_ly{r}7m>I+|m^go6o}ES)%=f|(-C zvMn$Es%Y_?yqZf4d z;}5jGaKl_~FWgX7KVB9luDVvei({9~*LjoOV@kItPI*ege?W+X$$mxF=uJE8F;yPr zZjyz0uEqjuY7k*?8nle-<2Tu4_vv^{aV+U7ZDdiCFW9vf*2)R~#GU^fwr^q1H_{ne z((K5Sk3AkowouuSa7!Bm6Yb-z!-j_i=lohuDX|^zYzj}>Eri{o5u?Vm_Asi-0 zO>(hxP8VKs{7MX6@k?%|%7*L-Ur0P@gWF!$0OQt(K3fufU*u)=c`(Uv1&tYT`N-ev zV6!b;Wuc-)QzwU04x17iwY6}uMz&T?BO-6nBj!3l1PnAv+mx&pSTAqc8m@lonFQwT z5#z>&V~9=W5XtQz-YaR=H5Z4oL&>J^utcUt7-|)~a@q@TU zLLBpY*#n~=zCJKk(KTjuOaPPYeM(%mGEE&9JdP}XZV#fLRm5{`13`D$BB=bx^C|OA z2*ZaCw=tH+OKaRw8O0x^j#VUEy_9hWZL?$7&amW}N9x89xP_Vpadb1z{(BO>H?Dev zmw~v#BzwNMw6Q}?e#xQ=z==xh7H$5<*uUgRvfojMy;KdUyuK#9P4eEjTD8efEvix2 zkVg8vP zx1Ulcw9+chV@5Tbn#ch(Bbu`r!76smMMj1HvhAvaM|Dr*!mhQN?RPT zHp;VE8$Jkht^pgneq711X&hkF81S;NChpV5u5k+gQwz0O?dzv5%xADPc=2bwhM^FH zMWho&5@?D#7C7UiHFiSyhesX{yY`T6i!PlvGsa;KYmt{*Xe>?6RQaS5rZSOG#4X>I z_U4UMujOA_T1h-=A~T#afJ??Jr(V^u>JFQH#eF@3NiSvTo+A?=&ztb%)!NEmh}X;b z(mnU(^fZLEnca!T_tCbd=FZQdr0?U0& ztFhcEQlB$F?zOWVb^jqa1;`yP@z`h7>{a$jXn>o}C1RRIRIgTXIymw$*rE$&`zY~) zWG}9HGoA5S0-VHDm$z(H8g)xAar7+Pii(|#Iw=az^1PD|4~NV= z0bzUN_%;QDE4-AY!y)1iH2t<^P`kc&)oGW8;r0t^x5!!}CUwn&-0t6^1MFvjZr=yT zS^UqJ-QLP1NnEyOOo>#lsbH~J#6o%7&^fdeegFt zFmv+suyFPQ4|PSRjkxF1)3Bk#!pU<_$Q9FiP%A#1r2(bAGEN!^2CNg~60$>1ShD(& zuze@DQrGBq(-cu$^&uVZ<+2WC7v97@hFt1OqbFV~cve)dCR@KT4mSHGS;KbPagY1T zWIuY+y-l(jNqQ4kV|McGlrR1w$)pb{AK@O8P5fWX$o2DzR^0mij`#I(q85n};fV=d zHYChxgAgbC;Uhb&VM;~%TM0 zdm^P%`hbfhd#-pYugVYZG2#A%N_L%ai8rN*ueW>I+xSK5CKVXI{heK5-HMmv<*D>V zVmqh}wTqXwph8!O{lNSq&x2L*~ zu9us9f7jx1xv`?F?4x6Dq*i!;g@>P69yYw{pO3PyNzOjj-XG7~Z}L5Zj3cTg{wG=6 zdj5v8X9dxDS)kotd=1%P9xz#2xJ#v8Af*UbT_Jm2b*%btvdKqjNN-9@m^kHtq{AD@ zoxjxV!~c)PXN9190cxgREBO%KYybDuksXH`GF7E{Q;A7ee6M?1jpwmWvTtB7s~n2w z^-k%RIB|#%DII+H3E}SmMccQ$6`s21$5QR^-}zV`E#7W&>)4;RX?tiTW?@KWEHh-`|>QW6fj9cHQC3~)% zOU&z?zucd1xFObpNt*#LS{0I~gD!6@+oo8Y@CW_~G&=1B)(CJgCJz zV!a_IUAZbxFufL0cJ$S6ycM2&wf-c!<*~xTix#L4NIYq2E6j6bA9M`r#PP+}lijE1 zWlDh#d0c5FOUov`^cUWgmQNR1Wi7dEn#@aH`pXWEEZwJr>}{r2emvR216~z9aqn~a z9zyG9aWO^Tr0*pjF+1W#C@;3u|Eeol>No0UF>cNx#^sP%r<7hwx86TfAQhs(9um+!*Bm2Ue&S8IB~jHFex4K$th9N*{>opl1ZMa|K63Jh2jnA7 zSKoiVRW%LKqHf;@$A_G*d&vKW6!Y3{+xuF%Mo9%x-J<(|6FOrTf=h?{F^f%O8(BgA zii_TxU~ZAPZHFt46-^pDF`T{Nv`X)$P`($RUlmA~;)9xikmz z#o{;Y+_Rv&Oy?Yx`VpzF(0i&w*~AqOy(wUquejp$#3_gD$Rdv+4+wDp+3(3ZMW+f- zC3(BYgqN7t*?+Hld2crCzB4GU$MAu$LGnpjYb(5o|AV>l)OJOW-t_nNTl$zRGjWm@ z$zkof_d({wZ-_l{Ju%EX?V~Av-oCYQZCL;4tu?Ga3h$Mlz4)RsiDM0zZ$Xoq=aRLs zLKo)cv?FP9>m-wJ-g(3t2^XhMD!Jx4sO+@{dNFQEL;YQ2Mmk)Wxc8SI5Bz-zgJ`zB z`tOn>XT(OP@u~)-DSgPcnh%)ybZYjU_J)HW*doi*xRE2mnMX{^I})d}YOs61xOl2c zLmtX2d7-C;p%z>r{N#!HLt#0+`s3*dRDHlj)SK$j+r6y1^}ZhSc$t=LKD{Pc?*n>X&q!1Vt0JC|LjDXlRGV_<)!CdNk$@eLMPCe8qG22B@+& z`B%o8@XK)i=ZX@10l|frT)yH;qvWIm2618!A3(i>z)EM>1YDc;8e0 z&b<&89C1`Q>hL4GnN_&wi3bDkcrWNK)Ad>|T3v*!vF5SdGt@((Pl-tj_FzHZa%7uSX-o_o5MEbTLK%!GIq%5EA=4@Fw(;uneI0IIq7 zPO-s%?~`p8uQ&$Y?Go_y0gMg_FS)N*f851SZ6MrZvMIjwGJSN&bIHk4YuoUCLF#4=?8BJ#;{n-I)4`Odjcb=)f0x${a<)mI_rV-%9pUY}W*-_t+r0o< zWOZ6-DUE-7e06yBo!SM9aG`xnYnbKDp^Bqb>yVXnb|#KN1fx7GUAgjjj9$p8%xmA= z9QZj=h=5Z00 z`f$QhUtkrEjz677i>Znm#g zh}YY4t6khhsoV6k zB|mR_gX`aH6DC)7AggBmS`Po;V56`{!f-^*s z6_~#ep!$(tE0b2c$Va@Xj)lR!Oq^0zI!&B%cpM!nG4uf=4lExivJMfcheG=b>{NNB zPjB@y+cI8hM{n{0YogNE>)){mTuQHK*$tg<=}Xh6x%PNm&m$=Mq~jLzz~~86?W$A# z7qVB@Jr-VEwWP|szrvxj7KX#-w_Qp<)zUr8LZ^ikg_MoHi@q@K zOpdM8e)Xh2j@hz;{1_-@J{(ZHMPu67(c#MZbE;W5_3G5vWy8|f70OJdD?a)jo3XxY zFT(J2;ILugyje%IO;K@j^Nv|g!;yR$nu%kI=g&Da7`J&{{p2$_R*i8*i+T{LPS90c zVV-MpDltuDvP;LWl!f^;yi6I;MY2hUd#se@SK`v)kk?DOEVFdawK(#NCE2l+vi!ki z7Z>Jzz+SRYJ77Nd)N5h$t6O8c$Ay-)apKl+Ji8V91aj(^-pA64tsDD{b(Z9-g_9iu z?Z1VoKc()F@GMtxV{&N|P0BpuWCspqQxqK*ZEi!^Je~VlZd9mK&sVmrDQ)2Rt65&A z*%?=HY-Nr-?7*Snq|1)1XOuoPWzSaB<_AAlAXDHbE)2U5C2r8W$Y(U`}ioifM86$`8l zo8AtOy!49P#koghos`U#ls{I;#bcdhO-^tE6CJ+NC427EQhZ?+b+3~fzMZJFDP7@S zSKgkh+?}{4a@o;4VWmxa$yuxsgiHQOZ6~ede8u+wEuyNcSR8xp;Kl+-i=ce;GPSbN zFQT1(5`B}6xUmyNXRz#NX~cB7iWSq({xOdfDW#WjD{RW2@?ejtinR`i39jv0lQQRo zsrJz;?(A*RvnyW>Z@;lMbr74n%d1)MTB|UlGp$E%Q7ai6w-ZKNK4HvGr(JPW7|sX7 z>U5>40;$oaz~1EAuDCGo57rb6ZdDD6Bm5z8>a~)SsrTC)_hO52vX6D{ac6|d<0pBj zEAmZ`+|l-iIlGGW#Dt0VLsPyNpdY)_SjO`0dFI4;zs&;mrm=5gl;OTUu|sf;Z^iML zw@LQg`!>nK;AJ3xQSF5#Ty4Jmq_#&U`K8S_uE!tv)`is5{ctscik zuS6=CcINL*-VN|P(@>4LyMoJHYNMJlD?TID8Fk(yd(5|sWb93{PfuxhPEour85}~7 zWOZ4|!n(mZpnTwo$`SQ(XH?-m@jfM~a=;bQ%Z62WAy@7hZz|LOYc?kHik4mf?MjY< zJDxK@O3XW_gQfS0Ln{uh@%B8`^kdHO?qS1KnIrLGPaojoKkQocgSmzjU zu!U5Z#5M7OA^R&^-sa;-#xA*un>i9Tk(VQuCp*~(c|U5PX^iY8xu&te&xBw1@pud1 zr;j@-O%6;DMex2&vhN&C_P=|0Wew(b!3*cih$n^^81Zh{*L+dZ(4<_*)#yx}GiM%| z*?wwo9rOo^oY+lmiPnL#p0F|rLD)1&iOU~dc5#n+f61~djgl+RCR{c+gilF|=W$_8 zyey9|rxrqRaOqz5izeaXvS}gZxnz6^(&3L-SYx50RS&)pc5K_77B+d|!vc&26t})c znZAL|0+&0hMNnL*S>K6AwA6oyBj30n%V|}8Oz}yJbP1FO6&oU4d1hN=F-r7^alumL z#9|%>z(rmL<86eV7#~mF|3dxBm9-|g0t2bKnx*Hu@imWrW z(LnBCnOPt5@{L?}6p|@1^k*$xgHhC%Tl(VBD$a~?7*!LeCdN3hq8-zau<+t9E1QZh zw-<4AlTNtgx60(Wr~Yyani!inyQ0a_bNOQL6wFlFN?AG_B8@>Bz#8!5r;piiNwhY5 zef7j+!-2!}BX;jn^PQxp7m}^^8V-3V#*dbx;?O3 z=vb#fjtef6>vlUJ(`g_re;2;0#y#wQJel}hQ=(#d6rg;w0? zoaPPk>J8y{%$Y}oLub`4JcqT9ZVX$u?6C1+&0_yeSGK^h zf2O_iokeOvyv>$*0m8@uLJKN!KZoF>d8Fjnnu5U*hnIwZjjUeUu$n`IlH+h|{dwMe z+EFKl8HXJ1p{~ext-L>8!SIQF^pp>G^*ddp{qOdC5$#yet)>eU_b$Rb*BAy9E-dz% zjAN7D6IWcNS8~due2}3(O02c*_V>n=^0iBE3cGC~?CGpyPPGE1V#cwNix&L*n_T8C ze{cvr66Yb3yUjU>{D&thaG&pM~h3Wz#3mJU$#cdr>W$XCAsSY%tp7SN0_KABw3FtB#V<0T3K(k|T@G z5s7imDXwho+5K)<^?+U3vQ})}7HF~oKF~9Cb*qxIPh4lwg80B}GjYn8xWG#}S6iN& z;lRLLpam8s)Z749&^#iW_Qdl}HcO^eJ5C0NNTa{>7i9t;JN>c}>X)t)^`9-NUqI&# zhnFlqzh7yWk$L0b{j2J4H#DDTEUdFYebKb4H)P0jWa+ZyUXi**w&MfNn3OC(jdkhb zO1spB!#&L~!wJGOYmkbvv zE;{lcy-zjZS6{gZjD;^*n346!zI?)hz&qE~a82C%_eJLQe-AuwKTJ3}aoXfCb4r1DauU+m!lKTUhM=AOnvlwB+~il@*%}^rj$aOI^t@^QCL5x}oH8$EbU!np`@q=+ zq4Ys-!jzuurH@bBlBmjtERqj8a!JCPxG>@%_Y_JW?WZrh_Fp!y`xO)NE(_j{46Yy@ zE?@Fe9NuJfwS|hdytO^7UA{4`nBppl2db?7<7z0ap6C~>jN&T=xku2BeE4WZm3$2| zpP?4a)LMa|6L;v)oMt^7bKm3Q0gV-1a2mRc}ss+Ci*!T6DqeFk#B* z8o2tQ7sHP2yW+}<)@-`GrOQ@~Dc!$>9LHD|SQ(_=kXC7Df10l+*Rn$4!7ZV1<^#(M55TaV!s0%Wp^QcZz9OFacn0xT)Wwkh_yltTk zG>rOGeVG^uy~KqHS34=LZx{KLT)yJ6ONWa*R^riza|}5T1m_?!NbgY%^w5}PBdcRj zGRdC*Ras;{u+@pu0P1A5u^Er|nnUoDJR}^k;6vfy!{^l^KXvcFhpn4xSvC5IX(KF9m~yg17bfl}OWDO+JJi}aOQPCQFXP*h@`TsXK4=*58lav3#OHF* z@L}N#CvXljF!b%E^-Edg!dHr``pgO{?`%d~pZDVw*WHxWKfBwQRHrRQZ~Dm$D>8{ErU z@W@Yk6IVRhq_-ATU-{quuLnZVy#Ot|bz%qR<0&opk1W)FA6|CT8e34YxU&8c7fg9k zL|x64#<;g#dHUB)6^Ld>g`e)MyBsVwoKu$YO( zAy@K?o2(dvfJ9yv1|K@?fWSh?)2Xz^JZBHk#brY;G5N_>a*L9x3n3FeA}##& zaR$?6_sD8ECU(t(H`Wqm(f`qtc`--#=67DrlD=stwC(!if5k)o42yQs?nXM@xzi&6Ve4Xil6G{`Jonv@R+$Da2aK9#U%*N(8_wjb88{wTvnjIyg|Unr9pZ@@Vg z{G^A>2{)WUjSQFWeZiZ8e6c~^Cn0GoTt415D8I)wV0}^T^;yfh_6xx079JfA9(CY= zlxXdno5Ia&Hq_ES=8%KK>2N9 zgn6$0f-5K;{Dg^X0VNJDSzNxdand0hytKb&SnbU9<@Ilcm)E_Su3XtYgj}tPZ@91{ z8pq^xrO6I#`5r$OJtjs!7D7Wap00A)RWJSwPw5!9{!}fXhMA4~6bbma&{|X_TkMw{ zA(b9|)cjL_HI5~9T3jNIcQizyd2{mRbHkVk^)b5Y!RNzn`yzPk2G$~6*-DEk>qI8U zR}u=t9v5FJ{n&HZfYIgbU-DU>YWwkp5`WZ!D|wXJQ!u!fORQ785|?htRe2#p*lJ{T z+J@B}Ovye+n8NHVxATaC zZZ33MRTNv;6k*aC!y50xJeQp@2`=9zE==)D-J++SR`9Qx&iP73SDkuUYiv|#|ApHg zySwc{y3^*JXvYIih0<7vQ{iyFEL*kOr%lZ=r zE?X(%(^Fe{e|*beP^~QJ9jRds>gc<#hIlyL*T{qMx>^Ht7Efj8@j4ytn>A;3AQ9SQ=y+aW`wt3!hb8e5xcP9j-u3$t5RYVUj&ZE(89;I>i-ku2q?kOTw==B)^`xxA}blS5Uc`uoXIC zvTK1P4$cA!PTWp@?NJ%)N)%2W2 zQ^R3%+wPJ$+su z$FV2nA8Kse^0)5g4dJCXxv*Fxe%RlVk(_euggNJ`o^TN+`KRAFe^dzRUI5=F@>83k z%kH^v7vYLq>fn&J)B*CoS!;1{6U%cob}`;+j_ zTdyb$Mpa9eL=$Slp=&*3?xxcOyBHwA3{vW^dUusx?l#!#yhhxt9 zOqm3$JYKy-KlKxyUs!pW9rk|GOANeIFjI~2gT#|IxP3or;3~`Xc%Sj7kDX`FOtmf4 zy~{q!8@Fr?*VsGPJsmNAY&d4}BwK_fl+j!t(k<^ejItU#n*qY$!dPr=+P)+3s>6a_ zHT_WwCwlh9{bU1KyZQT%_xbL~JXV1H73|VWnu58&Uq7Aazl`V7@S-CgF zvB$!TKo^Ovon-PVh>W?{j?xe`Coq3NU5Z*)#trdz1!@{=zLEQGpETxELcjrt+M9kQ}?7 z^*(xF3T82Z!28_`qAu&Y z63OC}QZk%Q_$@%u9(zU1id(-gwMWjVC@BuQJYF=|qH3FYr(YEg98-U1FE7U6Ho|Vv zqKm&84j5LSge`4OtTl+?<)R8q6Cd0MAJ5D37iQ7Yo@Oo_K1lC#4Om}PJMimnMMDk# zEqlNFK+$)Xt+WrzFw9zX`Mg=C*tEsTUi{00k`Wnkc%4-G)T!)b*MREJK=AAB>&QFz zaGqYHn@o!-I}~?+9PoCJYmx$Ml7)HhWu*nygv$3TsW~H51TU9LODlBXAd+t_<2TH-osc6O&EFQnyEM#q_~nJ*o)17GN0P7NS`?Uqpse2 zkwPzBu3h?zu&21H{;|_8>t*cTxp8IYx`%#II&U}NJ`1Wl?6D~kjD&-E9GftWbQsUDkcYa7dgwspMq()!Ux>xBtz~V)R~s_jLa+vfR$_IbJb; zPMA4?XOX*4n|ACB|FV(`5iOcLdQ>=Z+M#{Cq7_G398c;!aL!%nSuDP`?%Ex0TlZWm zqa?juS!|RsBROa zlc2{%mx(N}s>K#MZy&UH#?vi^>9~y^uWoT`VYIb)#7Pg_GEEDuyl|rohQ$S6EKp9{ zN{c`JHQFmKtlFKL;18E12c7+yV{KkxEz_dM@7a*9W^tlup}exiX;ItlgJ9XevPH+_ z%PesE$9TnijqE{RS zt2vm0#YS`uPGdM>X_%X=0czsCPc4aKs}?DQ+hTGpkh&46o>YI}VCbI9PrS4%zK`QE z#oHSv|B{IFz7PkDLE67+ps523mG`-@zz~xAfVft2b01dMQS0VoZ~8)T5() zjq>bYYkQvRksJTkCP0p3-J!*o{f>?IwrBv?U$Hns)vQ9XZhSK-CWc($|Pw9D`{J}d> zPqr#fhlQ7qE7@}|lNP!!n{@Wu zWn6Z+AJfEnF_AjHZ5bd& zakL~c>ea?-*hXrlCeg;cl^c{R=6tdEv2~BPe3JB>v1dxKgWjqBsB`Mx+rgxJyKr&& z?2S{TCLeGJTci&2jm%x;96Sc;eW-z=k=6L(Wr>Vz*O-Z(iXm%(k9Yv43X?|h%;#rB%@qb5!C zP}k%gw%}or<%#cfoiTk{udZUX;-#*_*wJuHC$#2aCaf&3Zdtpb6(vb;8GF!y8tZVP zrG(oj6ecAIKV$_TAFQ-{%-fpcdCZb(oYdrl9YT*(9QOqGWUJzulVqPhWrx5@8NDee zzD~w;;#KO3>laC0Ry_Hk_hiTK-z?C+6)nB~>D)h3)21Jptj}0yq2FkMW#>TF@?hD9 z-2w|JY?9-GiUm}Ro9tR-MV|30oxIqJ`QXCV`h$C`hgwFZw zv2lSXJDjUooFvAEf#*e3b(M4cz$l$!BeDmjjw;@x$xNfCu-^OJ+(vZ3McIs$3!fe6%Zv8wvseP zZ3I_;4C;tmtuJ}{k=P%Kc()e1RMm|#qW&Os$*NQNqU)Fo=8va}S90PgD4kB+T-ylW z_l-DE4ATBr15F)>QmV6f&dTb$eiydA{d(ZrSG^&BG>iTka7l@SPXmxSTElZ2)?J|EdjtlT%*wNIMN%es&s-21X{(&7nR@%F}*2E0s@xV*vZM;0o2I^68ZFU3(h z^xagBhLR0xg-p4e{OnTvxxD2`oN84e8$IZ z7T}(w!+pRIq9i^lKz`}$f3@@m8t(-ibe&!mczc0@bW%*hwOCThd|4A~T65bJjhu%zOi zpNh_&d3abj<&aA6rm)A((!RH>e*eU%VZ*{XGpG0YvKHfq87z(?D`f0^ag$^&YyD!) zvt{x5fPx=P8t3@s*tOY2#|vg3NdD;Mj3lfRC$4NtN0`@rK3*<;WP@`CjD26p$5lS0 zg-n?t;o{(uW!L^`#pHF#iYMKly(vH0#D(L_3Ju(^)X-Iyf5nVn6j?RS>o87OhkGY zLA8uLa?J2>;w48GxyO0rR0AoU?P26z1{3bNWPJM*EP9W`-1W!abItl*N0Cyw=3x@k)}&BW?NxlqXp`cmV#himE0m^?j9w+HCb7%`KTERGq*L{w_-mBQGE zY~;57Y=EaZYKUDjdptg@S4Bv|9mc3bh&q7OUg*@LC;50AK0O^n@%8PbxbWTt@|Pd7 z$YaRw3$^{u^S+eg1DokToqpoAA@uQHfPLNDvXl6ZuFeGCWZ{PvzOIZmzxI4sv-E~A z?=)QzlBJz1(aK^2Gh%V}Vq!(JpY`d}zZ?>dyWsQTnI$*aAa9K|9^9I~;H)Z%{tAmP z{aU#3uWJVyCcDcmeAfap`wb^h5MdP7W7vfGI#Yaj+QAkVW~Lf}L-qOL7DT^0L--_JOMnn^^O-=-kf^vWh*WiN-mn;s>L`D$ixtH6}s%j}8V;zz$Y3ZSyubHBkeyc8JR~q6aj$Yd2vCgW8Ju?H#4!J~>EWpdp0C_qTJvUj{l&M# zA=AfN{x;v_a{SUVi!vvaCZv-~<|8YoFnWR^$uEk*s?VSy;f$;1ho#rA4tsV-Cmf-V zS$Mz#Ui6`I{$LL^kFWqDvap!4>7HXN19mWYNo~06@0`rn&fYJvSKD#_U^mHXD?GIN z(eV81FV>P}vDNLo{{8;zEVs65xWzJ79Z?4FfUJNGlCFmIIvAk|^R_10V~SsL@)!Mq z2_vMyKXFFwH$iXem(0h`_V^#pEwwX`KG}9M6%XD}uMuOOiDXQ29INOG=8(p3;}?s$ zZR*~XNY#(YSVRV@C)GVR)hDv-9%nuRSDc=>(o#AlCta}pmIfDG?{m{|Lg5asbQX17 zN`~x7ovG|-=-;yNTNX4+2Npi@wSJlr;rWb^K^F(7!!&a;Ii-P{ixPd1)8sl~%4~b6 z>YVWGV|N&qh^tyJuYEL3nR9}me(_n0E(ix7HZQ#K;%cAHd8Yi>0@t(|d=yF(q;yS) z!Zmryo^~X~?Mb$z8wW3O)DHM?kQzceMrCl*%^En;68n@-aPhRs;q>YJs5mh6_r(>h zVfO3)g|la9*hJn_Q>USp#tD;G>R_3h0%jKu!Y;d4gL?-HddGB4YBH%`!4;ZpV6wyM z@1+jrgZzX^_wjm?ExB4A@&$*`BkjxhAP!?oX&5Ixba6~o9?k~-R~EjE@IDYv65efr z{wvaczzN^AP%W}>dh)*3f&+bjp@09s?+>HL9~kDH!c7IKZ^yFPUijvzt8mD;gTs+iX2rg( z{>^+814X2iMrp=La@6x+1W$il&B|A_3|aHiy70=T*IE&hbiXkcS~=AS(q5{&2;_}? z;6A9{#br|(!ac`FcKLfwmgtbjOF4P8e5ohJlWo8`2Zlk~ziXhWdrDz)sfzrsa`7M(R? zS{Q%eh!obTwGloY>3e_mx8p+S`@H}?Opv}zC^=hB&cqbA)I&~Z_M)?95^54{Nb*W zE1OR~gC277`M+_z)6y8@%2pg}aeoP?CV_K7r7#rrD z%vHG>aSO&A3lx%|IYSF7am^9p-dAyyMiaNBt30qn;-&mwqBU^wW&857g_XVBr_`vaDTyN8k1LOH{)3KPK25yIu5{p#c+%2&#n+|4$BY^sE;#-} z9_os`eb&s-88^EuRWZ?U%uQ$a}{i-lIUAJ&+>;frNQ%B%YX5`aNZZcA*f&c0J~*k;YD8z_x@xAgxY)y+~HdlppLs3ttroKXA;5Flm~7 z-7}t}bG;*xotW=#$y~ThXDo5Y-~A=^6v<%L#gaMv#WcWvX>T3czGc_w9ou$sOHoOm zG=PKQtJ&boOd8*}aEt|x*Sjpx0l~=)U-_4^nQHb@_wi(dL$Z}}sZ)>DB@L?Po_~Kb zeEdJ3j$cJpljKyZ7K$D|s}!Yf5B7}K;o|YpXZLZ(8l)1b&3KW){LcF1G2!W@^=FaH;|dG^j|JQb znl~ssK{{LkKiWw?;@GiC_uSxUgM;G-dHHP9YiI0d9(_vJ-x*E^=61dZpI#OVpv}Tr z3&NbKGo^NgU-!_>fdxZBx0tT_Q$65<>_drZY`|qh_CfLG>yLxjgv+ll{NomXHW`a9 zJMqE+c^%|0Y=QOmwk^uW_^A)SZNEq8UVt;~V_M85^!?P`*$t~abgD3^5sMq^Woc$I z&cNZn3XG=|?t5x!TU3jGTPvZO*;0t)>kt6FsZHc3oBYJpHl9mX+`aKwpkhOu{elJ; zT>C{c#DU^ZD152tEb7?c>O5dgo$tRJ&LmRB;hC+={?J|BVx%!jqf=Ty#nG3sWN7)M zq|86YBUuJ#2!|hiMwm4H=)moYqE+|(B%FN3AK2BXwqFqC(kH&)++PpNZuoZCzJaR@0T&r}E@xuo~ZX`Qs4L-BgSTnr`sYCPT z){NRo^NjIh!=>|P^**6KdVXsuw?@_JVtc>4h6FsHkJp_FEtcM})S>he45qp?X1EpV z`qk@-Sb_UP?#!V2S4txFH!$J2NvA(UaABSc_Z(RP-zM4n`}8DRw6^@f+Q{mRI9(y7 zmHSAiGPUs;QGjF=%II?s&KAs~(!jSP% z_%oBI14579ye?ey#YJ%;gr0^f0vdYaXjXrkzWi`E93eRZ0+xqPx zCfn^RT03^_3^y*jwJb6d`4S7N3+%pbR41wz+2Ihl0`=Y``$d*~;H4~GX~0EZ7WSVc zzq3uPUszVMO_t{@y8U-^s6Pj=c#vgh8fC)wxc{lFoxNSZsOzn7eWJxKds4XAU7 zQ^K+ndELw0|8xftMC%{BJ-qS4(^7ln@yM5FLk6K{PEC+ZtZ{OUCf-2!o3-@bwJ#v9 zxa|iutUt=|k)y(~=Y6hB;$Q>LvOxbOP;8xSNS93FtKHudr3tN30=_BeSdeK;ij)}*HG z?p=Gr13z6=)k!RA&XxLUQB7&V9i-#Wn`v(v99OeG^YBYy$2+@hoV89U8FSU`f=;Hi z`gNZ~;@FJNDQ?PES__GICAPN5A|E7M!21ik7#+~)!`@1InG1qTe!Z63 zDf1SG5w@s{JzAwT4~>slb3TUE9_Md5e$nF?qs&23z}LhFpME&(wsW?EZg;wJgIkOk zFne6G+DCR@*R+l7;>44WxG=9P{U%OcCE>T<5C?`q+D~eLGlvdEDRFf++$DSd&00E^ zELXE?kfF zaXI7;2dW?>)rs(7LV}tmCNO{Ufi6GEo+~|J(&1$=D3Zs8y{FvjQ~RWRKV-s5Qs5`q zH!Pg{EWv%6+8yTaSoQ~1J|tb*@dCQIK0$os%amwKt$CUG9-?S zO+M(5bj1^Aztz5A{$vInQd@ePmt|L4(&a-s4_l~R0(#(QtHO3$2>FuNkA8laC`O{X zkI=;VZr9nLSx^I6+^t&r0)r-9wX)Kr=dmIb#}Wl(*v7FbyIm1We)*X#+pDN&-=j`B zJWQTZU)-7L0t;0hVs`D)UzBO^%fvkyr1_xOj7h8t;4%4mUoiS9T>VyYN*ufVzRUWCzYVnKC{z1rQ=@S(%Q$#YLIODY;X$=<88Oj%RgCeEUZX8`x? z*%Ox9YyJy*!*qX(hwl_+-AMPGrw&Io_gxxzisCI{V@B0AM8ICc6^S!o-EDsmQtH}U&_Qooc%6g~#E@cdcj_&>!={3;|IPMJ%R-Q!>$FWZ)M z)W+DF?B1tS|4>}d@#XB;Kb{}%{qd9G-P)Kbi)b-2)#_Ye1Gm*__|T{7_xA1FwmWbi zVXP?I9Pqo}{4XJ-djV?ieK%V=^UQ;R)s?VnMV2cZ9L&nV#n46P&kExYsju+N#98$y z3+$MmGra8Q5Xq{Dy>Ux6rY%2o2%QJxhM|Z)b>8g3ko088fN#Y zb(8q=Zb9_1J1x)*o}|a-F@rchl1ok>1@rwHy&MPLPjZ=7lWs}Xh|5~EcSTp7$|Ak{ zboB!_)Dqb#^7fzwGKjH^i&RhG zl2s6|BU9JDz9oy(PVy^x8MjQ^qQHxP zm#yrK9OY_f1W(@mqp)?8-iW27nt2+Rw&i4m@s?-K$SOIVs03xJHo%Lu=_b3@!CPYlAHz;Q(78qT64t{IYZ-^sNyjW z7$U7MP+r!0=*5@A^P6kut;Bx70{3e`MAlf2K;8(`1O+S^nz22e7Mix%#U+upjun)lG@36$(&uvg_ZNHiSKOM9#-7-Yz>>e za7vguXJUKb?e?E3UG(1UDR7U=Px1R~{8cjU7?*c=G<|!=7Dsa%qbsY9Nh68RaU>GuVNI2EQ zwg}&@67_NY(pze2^0tMGPdKlJ^;cQG{_(J89tui-N$1#)B;tE5;srjc~c&6>4FiZ0u3;LB@nfP|{^+FNQRfi?VrsGT}KjJjS z^Z6h{o|ks%$+P7v{e9&e3oTt?f)n>aW@k(I3qdC zJExOKeDYw$7^4ia`};l>MvP*fY7t*FwpSsyfPP9RUiLfjKv5)frRk|qiTg!WIoWAK z6!-DGta!r65*ZREp3uSVKG%TtE9#OHWPUNMXza)l;d2YSKba|mA1r?|Y~8g>YUG#A zon;Nod!zG7Ts5I3z2phZ@af|>*gJScfZFS zj$_isp%z>$pv;hh$(Bzg%LhC8N5ABQeDWo8?cu%pg3i2{OE?a(V`>*a7f9}86Rx&x z;+AyP_SiY*(SIxY^jTNLK34%<5ZLo2tCxl6Uw^Td+Qo~{3!@KgyQ)|7>8tY72l#!r zMl`{#7GhImODUsw!eoQ{e(d{lY4f`JvrnfdITtZ9e!Lmwfsdd55ohdlNq^OzI61J`ntw*^E}pRYwdcc{rR@iin8-eCCLM9K zy|4GIGA6g@9ASZetzRA~D7rXush2kIgRKk(RT#1-d?9h_q>__qaJ!cp_>$RYc%LsW zIwp*>Mb|*kw%xnK4<3E8mNa>tcEr>?n9|8v99VATftHi4S~_)@WYzHC>eeIn;jVRW zwmp;ekY)awD}ctZAn7I7LkYJ=2WR&<-FhB(@?uDpW6nzN$X#E zE-YX7c$wr1175PAek{8`et`KtEgSt+xaY`Tr|(MFc#tmNy>Uw-rXfET!oLkDnl$!c zYai|k?k=$qet6CDTB38O%?eW{9vb_5ES8n3eri~AuZ|`<^xn!6wFMx#nUO)+6Qu#1<`50q6lF+QVDDrfYPS)3 zimyLpR_qcjzN~i4BUK8+uaQtHSehVvnh?EBaeB*rK8h1FAKdi42CNTJhn&RxsgKR= zf?ry^z(ZY?f3$LS+gF4?Y#-1XX7{eg6Eo|wtqF~jwf5}Out_5(jRl4hlbJeVn%J&u ze@2V3`3no|yHZAm&=sVM%hyj%^3~)eo4Ch?!6hsH-Z*7wB9*DhHsDz9KfmuY8a{GZ zIQi1H6KL;ZGS#h6H^ufdQz!n!y&D3z&v`oC-n1}mxLwE0@s2%vMwmFYewE++zi2^M zFv`r)A-JEHsbAXAliCUlxol5#zeqwSjWXa()tr2Uq@ul%5KBf}|| z&8tP<^5(X%fsbIZVB!yMd!4Rs*;6Z&3SBsAmJ=S$Egw=h|I@EHDhwaNp{GV4GYwGf zpmy>)?Sn2q*)*?+`?y}VsIiT?Vlo~@#7C`I{B)D3v(+!IwcFC3sU`cN1*e6{#WU75 zhka14e&Bk`jV@WETTEmBMcx5fq4dcuTiK=4Cy9eDOmeAvKlun#zMfl>6@9^^$(3fp zm8V|Z_4XnCpWJWtMyGak=6qYs(U18vlj=40ZFaxz9%JhKEl#Ueo3;7K1}T5{KFM92 zZFGA^$~FK7Baoxd6e^{C;7_XbIFRkH*QI{5pniQ@(1(%vVgy;g|dO%Pb}zWC!fI#WPfV(Rjnk zJC#zuIbTYiGUtRc9WHMjYJvVt!9ADE%q7`#WQ8&=x(}vC9bOWCkT`Wx$;mXh?N9@s zF}!wW_3I1ghl5Aye6&NXeT4VeS=o1&t*pgnQFr#t!}GvN(^(uuY%*z*lMYu4r%n<_ zNV*^13|z%}{FT?k)30+Opho;^SUxDJG3y&syXmt zhf|6xB)q@;)GKP;n>buG`G^G_d*J>Ufg5~m~<1Pj}}W!9N#9q6SmR( zYxf?leRQLJ4fxew)-gru9i8)Q3)e65SH3+pYcMQ9*eRD?{RUkXQtgvkd^Kb0L>alz#8WgoXm zwrDX8*=VC$`y6B+`n+h-x%hRJUVroR?y(y8ovhVsutIgHhCygtP`S zb4MJ!@oWaWebLxHgf*o2Rj%#BUah3|Nm_KtSIc-ze3xZVlbSfYfGk~HlO=Wq*`)V{ z=j;brZ)+f*BvZ$K?&AzQ;DGQIdlAM!(Y5wbtMcU(=gpoTU%Zh{*z9N>&*&%tCuyyc zXl$h6LebNB(D2bvz>l$U%~NYzk&{GfHJ-_giAH57K^G12(99=aDR4 zmKwZM_&c+`<0G)};nnXwl0PfSBUS^Mu@}v0W67kGO5P4QYvK8|WM6#RKCZE5SqVzJeACyoD|0^*$0H@aW%~n z|AfzA?n%?)oBurK<$M_H!-A_7o1^i4KdC>{hxLq)*UrVN2wY7&;iB1mQx>x#QxAp-&}V%>uAJY9qMX30Z9<3@%@fd0cilE2%%UP!T8Zmn=TN(z_{Kv*hQt-|TdYxJ9v>v}M~n z;nv6QE|YxO0G`capGEb8Bs8}Otr7#M`ZgTgbM;O9kbOUwtT-v&?~nUUyA-SU0?4NP zN=~}MkpGB{h+8`^KZ(EpcD#|s@q6*5hk@5U` zCd2vLX&+X5_?i0iVCK%RM~_<|Mc-z=PL&%R94fI?=dHHEE`OyZpOSlD$t91O<}_s! zR^#vGoK(xWgTet}kUqFIpw1@Fo=Enbv%Y`yyUEv#*Dm7+r@fOOdC{9en+J3p($b^j zkZ}Bmzi4nh)}uGF=Iag(u}k46v|ricA+d`skg=ci(1nR>B5ayC6<5BB@3|!z)ASg! z$jib8xvK%U5dAX;hoRPQzIGDlqys~Dta~mz@oIe$bk>Y%_Tr6^@uVc3lreN5o7^Ot z#zfVkBr^`w)#3a=4#ul>fA-XxmU~Om6)jR$g33i)-sr+4!*!D3Wj~Q2QB<~4puNga`beiAYj0huU(v| z*?GYOc@*>|(>eBV$xFu@$$0DdB-xBMLtF5nBdUB5US9uJcy+_3)}qO~e~wtD-Jy$~fTF<4KmUbjz+*26U*jCv-pCGT;YkKdu2A zTy!K+C)wN8@k;(}b7JDG=;7COf;XlrK(dE{0$8-p=Q+E}`2Ku%Os)-As^yzv4*igt(QpZSS!(D=z^VwW1gVym3gq{IEfih~H4>^iXYmRr;|(t)Wm z*a5B$tBk+e!iBc`XZkAm$o$!1)`W2$>Z<%V5Ah>wi&zZtA`Fd!YS0KUS58(usWn@j zr2UH4+BY_Z751D~L7a^~>o6tk`$tuT?5D{J92xRH!b`4rny{oRu5jsn;i(j<9H~I{ zooVL_riUJ~gj@+dCg%olpKc$DAr|H*PsOaGv*ZlwN zy$84+#hK?@$|xd4l0aEVD58V_ArK-OFwr<*Gq%Sb8{1<~+SzdL>^?j5?CyQ;-RGv~ z?(Eo}v1ja=@vhStlY_}gAS5J|Ba%P~5J*BfqWk`;``_Q&_4PSA()nad;=Je7_r|KO zu6`?ZbytTSTlN%uqll|*Wh;fnFRin)r$*WzeHXNyeMTb9=uD0-4;aKa(nmJN>o@!i zdlKWE^iXtQ{Vn+GK-ue>d8|oTs)tVl@&3d@0i8N_vfcl#EBHndkG}r2tzhggC3)v% zH`7y>WUXNi*b2$xbDv3**kOX%xsbvKvWt+>`jt#J-VVP;4pxw#dpR_6$-tr&Pco0q zEZTuR{JCeK8~rXDX;N;F+4b?jmiuGv`&oFh|HB4<=a2sa^2?|p2G=#awmD^u5Pw9O zwfyxWT2AaXOnpuvEhF@ji56D*f@LGRC0OwlH^n**_~eA)|9QoKpw_p+$yWk+qo66z zOywIa!mktw{K2|qVcn9Ky{JjLu3e@EGn9ys(UFfLThhAri{-Sem3YHGu+h1{b~#3$oGn#mQL8u!n_?cHcbUn6*_1DTY;w~Yr|Fo`KXz) zD7~mffqm=Zi;y1|c*FKwpS^*{TWT=*ymP|Y-MYmdja%Wh$pduFu1w+k@jN0%2Or=2 zh}d4#TK;A^O2+*LQwyXF27H~(B-yXzsm&o1sHgJ1nwMcie6JTTJMlV>$2ums3b6fM z!{;mmrRmJ&)i2+)I-mFfe(~yq<+sUu^gb<&yK2x8(aU4&$@2M3RO(g zs`3_rOG)A+ub@t%$t;v$ls%zM_v~_^LO<8u~fG1!yVy`O`Bsp_p`$2)sELu_}-*m=G=+Ds#0lM%ngj?LAhZ|nGAcAuDh(=ab;D7|#%`mlA= z&Qh9}+U^@{J9s1*eR==Tr(az=#WQ4GEvPLbp{Cj`wL$(+={54#8lMo)Ko=V9CsLkn zZbDV1d&bf#Z4L6uub1108Zm7w@ZweqOZdPROIG~Zm?DI%knB4qE1>evA{YYcp3Gu?+~bML4GdyRX^f|YIw#Df(u@N#@rIxxHs(s>{Ew2!ZNic^Ix z`4X_~SSp~~FXf4u#scN`;Or+#iFWDKIZPUMk)3@0XSsy}qMzqQ=C*tS=^MWe2O3ztk!>cxaVzQ|0URviCC4N>?_X*RhZ_1>fbAw ze{|?zSn%YJO0w24d?|VKRou`n0S^^UwLmV^v8huO4^C+>BW_c{RD~|3Q$Cgi!ZyfC zzzvfBA}y0|Ts$rnHH=@iWm|a0UeNL|`s_Yo!0A1U4y;zqvVoYl^>pU%HA(hjmhpDA zx!@1j%UM*m1RghDV(%BoBv%x8IwC-NupdlFj-5I!(ZWinO0W1S)-l0Vfc@_pF0~9W zNiSg14MRh>o;*6-SWwIR%Kghq3AgXiE?jCKWNHbtGY7fs_BvZe^fy~zKu1vfLtCXF znTeun3F{hwb!|n92g_d=P+hZW<1a$Uo&`XM%!gD)T=f3A!YZBt^{*@xuzSb8F#FM! z1>Y!w9Zz^E&chnp26D$X%YvPKHn9Rzf3rGVOuX{v2(#7d+%}d}7Lmbhtp70)>p<)S zX$f>P{w51)zoEL!HG}qgc*%4vg{PZtiiHBM9CKOdYhS5tG{lLJZ42muT{y0vaFrLe znx46AcHoP+2{75>IW+(uRe6aA1L&#(S1Dd8rksDE}5*L+Lmd+;DWaLpsrOzf`gT~DW zz0Vz@7!fad;TK`=&aEX`YnbuZf3@zb$(9~V|5Y6ynNx=+ELoQEYQ1#AWZBbIJhC>Z zE`iY|zQwDi4LmPQ>eu+ehK}+5*$a!uh$Opl_~4?2&Tow89b9ISc5<1^XEj1M8X`7V zV3Q|0w|92#4zo9wJ5Nh(jfGVf@EPi275F-e9-dAJ5cW27UVgx#4og_^g^~OCDUS~D z*xO6)ZDNGq!B1&rfa70w$A!l&8}GgKVR&=yrc(N2rwA?9FDP;0xVa1c3Ih{cj4lnb^>Q;S*})5B2M-;=%Bi3w`#52D{{P<2n=&on52qZz_p;vE zxhp)hVs&vW2zK7M@XRx^PRy!Jt$Jl0obS|pC#x%%UEoX;WhOutSF}o1H6HeH`=#y) zL<>s}Mn*>jh!(EuppZ!~EIZ*;4xa`cPjYx5JJp&W!xxaWONIP+5V`*_6aTbNf7nEO z>}zPB+~JTQp=J5|5G@yPfr#&#W9i- zZv2`ZtIm~K*~&6f?jVt=BlHt6Xz9@VpmdtTW?mc%n>J{=1csW5_f!g)(D&>xbwIy@ zZ#3}(`^+b$_VPI6s(!Oei(ljBVD27fPSGyBzk5%Zwf>zn;yM%1 z!C|I)>S@(Y;zV8;+h?JG7k;|9s85Bkl>~Kd`sd0kTdbZf zdv!y0EK0#3&Y_rGX`*HFIQcR~)MY=pzItVA;&nqq_tQ)6MbfueU_egU>za6+(&A;$ z_571OzM%~6yzG-A8-^LEFR{%JmTy>9N^|;{so|_%eM+*{GWXAVEKPzd@stJ5JHILs zi>|Uqh8`c39T;Era_PYe(t91UmIgB|;PXcHEjS}?nQ}v5i^xX9eXl%HO46FoF!jQV zE!X&D=Bu26ZXlG6OI&2=fz%5X{ZZ_f+J#q^&#rr#!y-Lnfie>K@@h(^GDk0al{(a9%BaxXWR+;OJmRmCrJq;Gj}&N!k5`*wX8mf9nPjRGD|95V5`G&y?Z z5f<=qb$Z}*GDYUSUNrI}1v4qxzzpwyQ)sf%RH@k9#^HU<4fnUbRMGzgi|^y^~3e{Ao;Ok(z3XWc)sS- z=zUK2Fv>QZXf)_DzW(^Cz(mW#(R{Sa$2(`x>4Eo+61d9v^DJZ>Gfmt*;Ww_NQ42^`f@2v}OtX`BRi9mOmyl=+RH60l6*z2J2 zJ;_9at7!RiQ+lP7VoR<#20Gw78OnayoZ^NFGrXuF^r}s3!os(>=T?J}=U))|*lv)~ z594p{lV%cc62z?5?4jbc%9_NiSv1~CJN8Jq3^^aqn)_z?+a?xulZEA^VW4yO*UHPK zmrUj8%bRPUYlB`oVaYsJI>J7_WE>L^4?Zb@#-a^QjS`SEayFuy!~fl%cD7l?kG&|X zxw#}8QW`^Tu7IqvqocOxIQB>F0w2HX(*5*OtogtB`|y!{P@+*V@#b$-CB+>`pBele zUY(?9;i^tk@qJv$z<|f8F0*L+r34raf5{icNZV!bnxO-{sG;;fzOgjyKH&ZA+x@=r zry=NBfSYVZOC41<=CTg1iRQeh1Nr499pB& zE*eY+1gIktMyC!=GU0UaA-OPd0e_s7uF{j=rULdY0xPcqS`?w$h}F>Zy^4=pwU7Jj zdud~6d?MYxylcdqO15Qxa9zNY}|@^(n}pziEMMSioIVkI#zM zh7aBeb5^}k90y|J&~ahNxdThG)-u0-^-(LfywGcjJ!K?91!*V;fg*V<`xJ8peL$aI z)k-W~m0g;)@&jfzTH2ZEy75rRa50p7%?b z(MQ?eoK^GdKCEWyr!6#>nlC4gqVo6hl$MP!dR>bYr*W%zjw?B|z{{W`SXmXcWG50X zRs0hvV{Lh=F}j8yP?M9UnY71^XZwShVz# z9SK|15@25fMw=zU$A|e>cB-BI>3(a<_*l?h)+l<CSVfE*-)Y8xS`d)Mc-n zy&-H~x4o47CAQ+hmh#66d|~;l{=L%Zx0;}v1rko%8+brvAR0N>LuKx-tNcKiNfNZM z+}1*}i3V&sr#8m2DofZt434-zegeTZV`3_Wd#sOJ7+Z4(&fg?ak`6 zRq#SCXKavBl(7U4htqm?3KOp%>JJ0W%HtFZDsNpU^((@X3)BBF zK`UVJIRlQ%PIesN!Pg#-=Ug1jh&wK2#rTHSGjBhgOmV^4 zSV(RV%A;!XFa32|^bIAsc~&JXN)Ps_%~YmT<0 zR5+z$5l7%@$vzg?{DN~o)56#M2@kX_)^8p@$cq|E|9p{sq;b#QQkvJ>x3fC7&ze03 zS=m6WWKzM{$G7A=gur_=b-38(tV;~PMwvMiir>}6iWK)744x_4>3Nvr5r z?$;AGSu(7cwRCIurVh-RUUvJ1sk)~L7}Ze@I41*k4cd`;x~i;n{kYc9*noBI!T63a zwC6S1LN8hvJ7~|RvLBkh1TX$_Nr`U+!{nBC+&t{vxj(F$XHToy*{Uni#L6nSJyK}a z=$K4JoYTd`E2?WYm|GljZyo_rG|5Xc}EEMqR%Wt&^RhO|aIVR%6 z-P^4d#Z_Dv-&xg}EoOIkAu&WOt{oP1pN-{(aw_Tcw$`N&JM5+P?$ z{NuSEU{#!64sCMUQyLmfGP+$@^!Cz#gCy{R@p|SXIY_jWOxTyF${8I&IwT=< z^dYgf$Al2YS|1S&K)Y~SkKSSUr99eKgKh7u4r}K>Qxmk38aQ@(IBQ_}852wLN(*OL zz}FSXp=ggKk7v+A8`+w9Z0Eo!U{eAslR8<+;M8O63BK|^x_@}dMWI7J18Vf?vdr}N zbIZ>$b?w+OTw#04(=KO!YOV7TS*`7k*@<4AUnN~+WLnaa{*e`H!~R3`H6{3sada{s zR7ZuS!%i}=;)xz?GDfz)vhY(2Keq7C7XGIN>M-)BEj(}GB?}C_W?7hHftlYz3lzpO z3rrL zlwfKy0xv0B$$YSJM_4|qJd++Zt#U8$qdwAWTG~=sX|{!)c1m)U zljn7<oWoy|FOuu94xi?xof7u;k^p z?YZ=ivO3L}i~5}*J2*6q{l{f3<4en1IiKsI$D4i6v%1>e?*r4!Utj_{7+>zb|Iqc) zwW8m-%O}Fev*Pn&Uf2NI<WI=c&5q#n^d+cB)aZbI-2u)chCHLI!~jQjmMIkI_kxP|J*{UfNsC!M(i32+jeaa&n=o+O7q;_ zXNO^Z2bq7$cT4I@ChK$T*P6=GpIg>K$A``mU~)y@z&ZQ#RXJjg4p8}IT#c_PYiyN< z$5ph_^>HMtI7jQ3jh}q6M`*YJcEWj*^K71L8BT=0Et6 z20NnVQw&-?T{U?^@8wLm6ucbzhZYL>$R;f>{Y&GQx0qn@9HEDuTla>wi{6W8oz-h2 zjflbW!SYry!Lm|_l?%P;U}78+jI_yfI_~GM60RNab|ZBi(rbmabl`g|bhA*vr|jh{ zo=^rbOJTdp#QnF|E>=UMqno^RrmU(pU(_!!a^5X)Q=2I3ZHeXtzV{ztsXdjF% zd{A9@T`8sa)+`7cm%mn$ zwT8K1%FUtI*#pxgZZH9RSmFB$oRp{4c}gyf4RlL_nMFHr3dAo#KA0@YPEm%*7K8i( z?!9Z1QDPf);WWp5(VOA}$A@G|GxH2w^>?eA2ykgO-qd1er-$MV` zinU?yLHd{yq$^rV-1~rJbV2|;L7kOoCR0qDe%rt#=UNLb@q5`QkK%=qdpflfU*M!} zeA5)C6-pzHbt^!hMpN{GeFwwbC*CZn*+OvWguW$NYnW``cZ#G0-!P6!L0hCCURdQQ zOak&H(Wy_#myo9c=OrNbIHhIR6n@6yue4CWoX6e_ySCL0&I|pq5?6k9v^|$j@zh|U zt)el>i}gEJXrt|nvLRU}r>+e*{+JsMAEDc^qSjrP<)xtv;a%mcQg)nl(eohHH3Wobn- zH839k1Hhu?hhB~hy?ji(Y^4KN>AW2JNa2sornJ0FnKc^r*aY>7dC!)T#TP~oywGxJ z{>NXQQ~a?6j#dAIzSaxbOIrNp<1g^cWp(2zQhC0VZpOIO3oSA)r?QNeMIxZE)(9K$R17mXdpVmoYy^AYjLe6|=ahePp1ANr8&4J}6dx_-NQJ zTy*_ctU)Nd-Mq*DE=}T?dA^!F{`%if<99WIW_QXQneeoDVS1-jk4}1F*$A6S5l=F( zz|)eQlGv_2=gQ#tkNgRqaaxz~*)b!$sG;;TtJj4UAC&GJ#772uc45&b_G|gz>d1$U zXJBT!XxVC3CfIY}V0g-ol>i6k4kF+?2534Ubwa9(O<~zyPeQ*V_)m7+ru>{DX-OdK zvP)@rK*gJJ`LDST$L?(5>t4bB?xUv-^rD8+bAPis?6$AtdYJa9QPysGN$Y5K#uzk~ zd29)W8h4Hb&Rc<_LaPitZQ`Qryssfo{Yw5MIXr_|@z9b9Yp^SAh;&X0$eyD)DYDv@ z$s?2iPM6oVN=rFqm6LzpVsIC=c+6qmW8b$u+V7}Mh^W!IUvuXqHV#gv5lA~r{V=&O z+o&c>QNZ06>vj4L3H?V+4eOS?Tx9a?h0llg-(FJnZdGA(G{nd&J{{&f^xwmt9b0^~ zn~izY0;4A?KeF}5rLkI~>S z0-t!6VCEaOY-IMtQIL)a##KtlV~+p(6Gw;64Sc`t@l|hywcED)!!Q!K)55R4FsC^e ze{K~fQLl6Mdg4j0G*Vvn3|9M#0kf+_`wlYqn%2?kw$nY`dDL&hbw9suh%L%w&jJi- z-GM4%SG=!<71y6ONI+Ss?1U)@AFzd2xxy=6Q@F||O#>SMIpB{8t+rD5K9l{4h4U;# zn0eo_F#YZ^RePt)2JtK+d>`pEh%%c&+;hhbR3 zif3Xv(UuH4{DF9s9`rz)6qDxqW$*iZPBZ3b7T&SIxvQ_BFCjKE`kn%MbnPB)m^h>0 z8%fNv?dIOu`d%r?8z){9m~=G?D8C0^dm>GaCCe9bA?FPUXI%9!Ha=iQ86p#wUOc!t zS~|rOwrEA3yzCE5emY74?`Xxzj06q9>>l#S>-Cf22}8zOuJXPp&sE(V0y%yf5&cod zFc#wvW&ATakERk>(W*u-Etuh_9ikDZz!U9K>EOkm%GUPrmGojP?s)Y#K-dPjY&U-2hnRJ6h zO=2bL4ainj*{vCfSu~zQT1Pha+Ao(TT*Q0hi64gRzW)FCB#xP8yF=a!oBhE5=hNU0 z<8%uQi0K4%{#1NU0U5M((CGwBafFFi70@FWcwRJ^vT4V4l;Np@IWIexP8R;mpHq*{ zox<;pAL&I6rRQzh9A4j89&k)P@0@T}x31A2DQSv3Epc5GGLv;&T$}77W)~P~CWBz2 z^_w@}PV>%eO;MuiB67R3y9^PHEHS`^7 zUxT%r5n`_cZD(MeWt>z|ubEI$huTLwtJ~2gn|u*ZrQFW!rrE|TKmXrfDvo0W{lX_L z{Kx{}r3-|-& z-~nDTVai8xPp2|;euV_yhtyttiY;1kJahuCU#VT;<5^dYylIy(?DCeoj`i8Q5}K=dJ#1T%}^Z6!OYF%3f-oSG$ov!>u3IHmvEj7tM4_F^vql{$xP$Z3p| zMcVj62St?Z2g1m9q3^H>;hZ5CGD<6asuSK;v_*^M0|q3Y zXmn}hGvT@+p-1o2qERD%;=tMxocZfzrDVDG6??OnY>tJQ{m9C|qjd@V)OeL|+778q zQjE_C&;ek4NLaomzpBEd(~`g-O9L(JMN9q@la-c6U$TAQ{fd+68v<9~GcM~|TxyG6 zd^^@_&OX-p_}&$C&YNw#*7lPEgihIZCW4kRPJYOSTew|C1fE zl$M?3!roRgFd!cKGZqFw2yUKyoo$xXMQlSbeG4Br^U$e7r*QGG@zLL+%aJYqGbWSA ztkz=D-^9t4i6Wi595j8BRD|A+F1?`PbRGLPj4Nl?`NE}YiC(M~aE9gwcd%WJ1_sanM z-r5CW!?IVss7YG2xiDaG%4j7*Mn^u1+J$k~eyK2pIBZMo3!eN@N!A*si@k6<>Pp(x z67)0fQVZll9b}576HfK;(t(=_rYdyMfNVu)!dgK^{$&5L)8RZBF#a2VE^M>@`!?Y^ zUa)G*HrovlH?F~$bIuI?d+}Jl`Ivb-?Q_~hQwapILnHSFx>!*LZvsy3p!w<(_Q+iV zyNu^9uLqwrbaOl%6aZ%MI>N$G3k7g49yaaR6@Ia3r5ClD{?p4QhRz+hbS3!r#&P$8 zBJuSh<(sP;(vdLnfGR8*`akB!!_+%2^lU@vZSU<0OJ}Ytr8(jf8*JNqRgDDv@3n{P zuG=;{#w-i|{GrpJ@|T^;RdznBslTC6(v{D_Dt)EyZV9TbRfyeq9 z4gH3l5x71cHm=+fwrt#9)bp&aXA>&uIJt_GFc>@OyfMghx!#zpoxsL|!PNQ>qA@!E zgaus>VOtHjhl0gvCSLUqCSD?#%q=$ ze@sjYe?y)XX69?~_zp5*Pou}jB$JJ>mwOrVsz7=GIv#H=pp&gI-8kv$f^QVTBc1Es z-&jg+(y(!%YvNE5U8X(a_-g;54YSOtH7~aXpO< z;B7r0l^>HIX1Gig3b=dRh;U}N#;-`|0N=A!9u1l#yRii;TC%NSZVK2%w4V|-114P0 zu2~!CL} zD*F6hJ?wGB3)48sDVHwY9s?-?CTN2Oe`vfJd6~0nVNBRGGpnP+rhg<%SFZ<4=J1HP3r}>UW-p zPJ{Q=R44ui7H=O16g>Ao7izvDQA6;IFN~{8XVHu6vSV3J<7CMuRg{EQuQ-7?sbVsf zPq?JRFwIJ=@K=ZbvIQ|&=6PuO znv&&Ud!cFgnP)^IlRNjbM;ID)(3$iTGrF-=JKJa2)0)K9N9}B8D zzLI!47MIyoVPsXo#8K|?*NrJn(J#Mq6#9k(zH#utq4;8vPyEdDdxjBH&hw&%&=p(r zF_L-Og33{41n+5hf!ARpz=u2!EuRAe&GG2)L4s%_3z?V8F2%grb03ieo8H_S-kiHh zY{M`!@xkNDOD=CO*&KGfzdM_8*%{R8emiCHI1chm(V=7e zaOrKOn@F4k_4H~Bx)xvsBje68e$*RS-Z~y|{e-LPcER}N%!l@J7L9`I#!nBt*p$FE z#?vNnARSCJ*!!ZFo2rO|Ts|Zl&o?#Fm+jHuKjpvzTK8L~Traj^c=6lI!txEPO6iRm zI5PC=W-ep?6#w#Ew`3v_#|u`H^iQ!z%KIbagjbfoUOJXB`z_4Og&Ya+*CLZ-ztVdQ z?XLkeGRcKyBfYR}g;P04mJYfK(4X``gU|-2b_t}8>K$9O$Nz6y9P=LMPNf5CdJfqT z)EFm=Dk^_br*r#g1D!PtLnq%Dm>GFk^vqAf{yp}LZ-ZdsP2aGr)w#J}-a?sWD#P$T zpz}6f=jA>PaxWelNcl{9YW_0So#apLuF)5S^Lp})nd1XLczuyQLqR)UgBynr$_84o zOUqhxe&b*&w(m7tDETXHnsr{8KJvN#`L{NNt$Ry%?J;`J0uwDjb&2Z@i1%3XwBvy% zTw|d#MuK~6ClM03_u_GN%Uj=lZ9zkqx6bH#n$3#G=+MHwjrg_&e7>oPR#S5MepB^D zdSPaG_gW~R*O}eI=qm;ke4~l^Pp=6(laD{ek6y94e*@uzjXT21*&EXoXPe+d3p#(w z$(I#0fL=QBic^(Q$#5NkXpf-@@;}c@?q#wAd%c%`FHg@<2G9R&vB-wvt3F@8gTo|g z(Mz=#w>X$4RNz?uE!P=_kRBbUV_!XvvPi}gM9f#G`GRr+w;JEkg073M2mJdMUbIla zd|QH8``)^OZxq2cgV&6^qLkd`58vcF)c-Gg6Yz!bNIx>t5Yp^vHVgI1t`c0 zGnQ~HM&zl_q&Ct^E^w~YyOrO z3w|eBvEm3Sj+wH)DeIryi^HblZ`GzXbz4tb8Z#~IwxBjZ$qM^&LnfYbCIHA(?xMwe z?CCTe=_CgO;yo>yY@`EIH@!T{PKF;f-X}v|+w#~<2_z0EXB1XUc*#7L9Lz0|R}}%g z`t>KmhwrZSq9*BBqw|I#8qpBM04Lh4?7w~cj$zDVTAA1Rng$p&7axbV;stegs3vXcqB{Qo)kvsz$@e%le1&hO@|8gEjNtwP(@y+DX`ishC)iwY)8eGSh-Bu$q;%6X>H+B#56) z&<=+#T{~6z1j}fwI{Ry)`eCwb9+>b4A>_{j{L1V~U@udZ_+w?rX8}guGUbN2TS6f- z;$LPyTz7ZK>yMUZ1*qZ!Vb@wZ-rtllWU*v(?0t_z!z;*6K9yqhlBKlbdQAGBhyGU< zN_FM7Ga2^WI#@@e3G!-q6sux4Dfun*5I3RkRC591$8KFj9y;RUBIxmE)Pq zxL;EO%ISFvoU;d&hnM*>^?Kyev8w3#R8Bm}yq(fJ688BKO`KB#N)C5QoKf3JRF#07 zTsrYpm`d{3G6b_@#T!rjy%$++ueGVM;-hu4Xk6lR`zV9X8j0bT-5yTs!42~o%zyG9 z;&X*HN$aSIH{Dy604pz9MlwuY5HDJTN92-$tAgTsY^E_yVd>ycRk+$tY??p0n@0=@ zL(b^sMGd9@@r@;6_kjbYG;iQVEtA9nQ^cr^GofTixF7&NjBCne3%L>5SBu+)m)C6! z@9);)RTj^FpSQ5H;7Gm<@YE?mmaWXr)7a3Nu^M=;jKRF<#`Bi~CPV3?eSA=CPyL6! z6y>OKw@#hHH>cE1lGzzU0~P9qznibR0v6p=ouPQEg6vs6D}epor`$Ze;2TNc4{t5r z9M<#oTL+VFs9eEt7DrFA?cm(U-%KNJFu@87Di4(byr;b$IZ$OIIXclfS&8sLK)&c{ zWca1WvhKr8$=67%rl&l|t8MV+1gxiS7Q;n2a4!jf0kTi<13atbo6309=xqdX%t zv5GckVDqpderA{Lj?u9*?p*?I5Fn(?g`o*c@?a=~=NHYiF9B~C+c132>1TyYM)(CL zJNUZgT^hNHc6zs7VaAxt(>xC}L0Um$k)?(?pX%V{GnnYA$?izJ?DFIzLgsT+r=A}y=om*|CTQSRL-WPUldrxUJi?jB?HxM)qSpyN9j7Vn<2m7&) zy5cjX^w=3=`78IAWUXQP4Zk?_yh1a~6)S)dN6V@zpyPDEIC$c`mgv=Wd8AWS(G zbf!8veqFx*SD#}}=3bi=94}yE@lW%Ym6AW*_L9Hk{Opm`Y%rBgu1ZFgOjh(<0=tGmZu7%QjPdmg`sz!C$;+R2X=A&or$snP8{|>I8v_7WD#J*wbF7IGjpVFmYhTxim>1f9+s< z#JKdT7IqDTSAM;;F0ISz-^Ho3IaeRNU@C#9a=xlOyo|Dg7l`*V{Kxa6y?;S_ zkWMo3vh#d%bd@i^Zi$v|v-1Hg9-nM9Ty*u|!25I_ctmsGuKiYb@|CT8B?}u?5K0rS zTt|D6>7x;bF}@~X-A?ZkE}Aho+PI%VM&q}>fAGG3pR)}I4%x@}<~{304Wn7jO5vf| zPaI5Tt>JIK^rqOKCuv-l^YS^V^Ae3LG=2g=A1PG%)RE#ypC02oCMY!`ci4F}8u)mY z?XzD>_WU#Zg+XWAB0aujxr;s)Fa@}@z{t7b2knFG!JbweVd+yFpO$D#st(Wr!Wnmx{?ZV%!O@k#{IB=7hO9;V z0vuh!zeQ{B;G%`ehG^mM`n(;oePb6q^&>B`>RhYOGyoBttZWSDZJ;fd*^j*PvvnWB zn)le>S%*lSZwySl@#|?q)C0DW$}*B+4Pty}sZLZfkDH3?9!i$TV2BqtuRk3Q)PD$xEVc?p^&cL1 zOwEIBN7%lRt7zw+**8qK4@4zE;d7TuK7_i;PrO|1p)mF>q0@AInmWEPIvUW@0Du)h z-koYR+&}9v`vQD<+;YXJOEbT+;_!b>9Xsofa3vhdF#ih3j3MmawA*fT7hmaAVM`{0*GYCtVH$xpIE6~UItiRsAX->*kG+$7 znq{Rm-f;1AKMnhKYfR}i^|ZAKMaz&i6j4$e%;pUR4ajM>&+3S&^ieg~zWMF2dLD~w zje)`AuL*t5OP_8CS6S+(Tc94KlPNNcTv)X1yj(hDM+#Q}V5Q~b0AB}#Il{b7(PAQ!EU%JZyf;Y$VFb5Z>SgDTDh>r2TyTU%_h`9B-HvUTUE z3vB1c@}s?$IVvklmDGi%0_QiTv)~&|%zAi5e9X+J zJN3@`GWYspq1P;UH*8wHHI2$7op=3I4#Jd&=oI@jQe72~e*y9{&u5zQZwHAib8 z=4~Fc-n$qcdrC4eBcqyG8A0CjM=uB81(?;3J^g{#C z1|OdikP~~%*~0^pQ*o%h-$y7oV4u2Q^z8o-n_)K}$cCdPb~10AH5&OiAllT%1Y>60 z6WZG+)IH37*lvPaBh(PEH|ZdBz>a@X2L z$vjyuI%ng1VSREOI}uhOwpk!rnk>9z!s?7t9dZJzKG#|(U|65i!(~>6MBpGPeARXa zs>SAJc$qQxbC-U8<>P&t4pAL&GU zz38gA(g|Y&T?O)0@1xjZ?`5f-bVtJYdAU!q2~fHj2?uN~@J`>fPnZ2=-|hop7)BTGDTuM;nsc>G3msymY0 z9X4KrKk|Yer*#i(S>I@Qs(4LXSS#iib;xgEFwAE;JR$I6C2!_VHu`_QBZ5 zgHyn+1XeC}vXa4x$NYt5iE=35+xAVS;{`Y^>~zRuXpICOPh4!_EC)*3zyw?Vd&bUd=p~H%j?2N2M^haapT2;R`uowms$Sif-SbW4P$LFQ;krFu^ssu$GxxM~RF~2}3J1Wq-8t z{P0Dgh(F=6T~E$+sD;n@%#QDScKpb$mo<`pdiC33#RpqUNlqJhUfhi@9}eX$wcOja z+!l*W1DTsj{t%-7%m_`cJk~^udZG!Iu%f*G9;kxg*LU873JF^21;)huWQ$`<-p7O3+D9M2CJ zpVqnDF5LIZBZrPOz%Me^0)n0N`kqlY(VT8k{S1bpi^Z2?Xv#oX<&oN$Nwo2DWFEXu zwDjWtL;^663*J2WS}$q{{nY#y!uH)eN@-nc?|(5N!hzx+IkKR6L-fITidnQ`?4(rM zg*$e2w)@=qvjFr}-2BTstyp78@|9(C6hoN$#*8{YqQnt5Cqt*}0nV?}UmgC-8WR)E z712P3UZ#UIhsllf&{cr`lml9VHfSpWIlr9T^US(R!%lXr_^AFt;mT5^yNRuYwHC2e zq1DJmYCRa;5_+FII1Cs&J?izde#y-6?rN6A8v~=S`h4Bq@|H3qJ|1LDdr6HH$+V|s zCyc%&!OSX0H%@F;A%UAm-&~z?-Sxa@s+xhwf1o z`WqGsV77JXts@G)(Zojk$kW?PHkXo}aP5%Ltyg)ZS~55WCVNJ)Zg#Q7l45C#^Hxym z;=w6+T}sPVa`D2HbqZC!ncBd6JI{-jFN+tJ9Q&zHC2$QWDrTt{g@51ZV#YWI|@>|FX-&~@;> zztQB}u_@rSmGi2;K3nLIme7d4yKV0~W9+n2^o?8I=}@tYLdoW67-kf=Tzq{Rg*WD| z$c0u?h3}+x=m0QnD&Xx?+UpfpGH?~J^nR&VTs3xTIMZIDY9xGQQ~8JIlvXVRUwfW3 zbX-5SS{Hr?`0`6-zPvq6 z9C*cr7VojA6-TzxDGe|n-qVuFMmjKc(#xalWc;aWcu&UjXv=d_0*QmlS%tBN_Mu9K zo{sUKPzrW#*%VgI=9%+q$Qp-g9nx&>ko7^uJR&v&?W&fZ!u;QqR@a(NxCNR{UF?~p ztG@FxY>wIi>6EquvmczB=Q;hG@> zyr`k{><#aR*X<=L4;PT!tTk+OvIRk%c zEH^0pV3Iljui*KrdNRgrFSk&@_-lq7xBcDex?jGpeiE84i4>V>1+j={6hC0>Q-bG= z!w&>$8-QHxfOMXhUOLZ11JVg2Q`_Wa$UVr1g{2ekv1o6DtSV5Eej-EgmQ6dt;$+)L zuRm6riSoraT~Lb7bE0oAeLv=m;9fip=vcz^%Y}#G0bw^IJj>7RiURnCzrsr#&fP7 zs*dYrN20x+bW`3R3!3s~>AeKEPWeQMZv?~M?(b|Z_eYlupJWp)yJ2J3x9Tg!(YKmA z17^t^}$ax~gI*;`fbUO}c=&k9^NMC989>_sNqJEsV9Njg7yK4%*wZypVz7#svKcsz|rRzIJ8Yy@>NHDzbEX7L) z-&`1V&t{u=WX}Smb`~{4DlhR&YK{ez_-_IK8#{)7b|mnkR>i=)VOjZpvZ6)8*AE+H zZpQ!$_Y0h?DE5na*L9!vkwiq++aBp1Syxezn>%sdz7G9A)mbxdFYOz3K1 zx`hHJ+O8sF&pET;k3{e_7I=K*$;04r=Y;b|w7krH+~(++_4~UYFnjJm%WwR7i%v0z zkUew?vPUjjIJNb>()8H3ZAwpi@xm=RhCkj9jN$wXc>ZULita+)#*uIlxf$ZByT{Z$ zhsxylaD1AM_Q9$q?S$1pNUCxsGfFF1!q`PyF!ua_Lwup|blXWK0Y>!*RX!>|%66WG z)fNhP+MYvW-|j{O`NGJb(qt#Jk}5${yq9~r z6xlJ&*$gqePdeK$(N>uTpJV-TgCX7Bq@z8rIOT!~E&gJ0cAL+fCGUjbGZ`ycITdj` zo7lY@*1Y?6dIsDCEI(KuJv90szDGGJ5LYz)kLVOj=dq`;7vEIeRPK-Mb;wI#bqxI} z0aW@nI7Le!aa1|2FlX&)bp*aN&=a_2^9!%oy6MgE_QL1As7acH3&?t@toO-Nj|ZlK ziH7aMID2u+1Kak@f8y`Gs8w{=o@a&&FaK1U#`z|oV~E)?Ox+-O9$GSC$x?eCPqyAh zv?W;paRA|zj>?w&G&eD8N&F+(kiOZ_nSR~d^2OVVfw(tm6}|lZt>L*f>q=?x!L9*z zTpI#KXU&{C`z;yPF*|!9~#e~k~{iLvIvv` z*vsIVe8~oy73I4u6fkh~*`fc)vkJb^#Oy~_*v>w7v&g~a$6<@ZjjOhVw-%M}cH1Fe zvrxCpWwOi}Yna?2lN=t9%wy!bD3YhP@W%q)f7AF)=_CjHu)%mngM19D3hzGUn=XN40UTD~UQB|nT#e3Te1x5+&?9?~CHwAZUs zLUuzQKcX{iqIC1{!I4&8y}l{Bn|(;^_mlUum|*>5gIBW#_wE%wH+DptjeaJ$(}MSX zCQzJ$>OoWZ_soXXyaJBLghz=VvXzY29$qOn;<&}(6$4AMMlk8F8+n>9=Gb4})Tc0! z%;P+29r9@@ZffMyIa*q2>NqJ*kJTJcKZ5@k@WRg)8||xys*H&rzw+XdA?OIFhK16*0)x)(A1)d)8hu0IzE>YDrN>RXsTW=x{U!QX=4tWQ z8=?a=W}^QQpUHFs&}>Y{-;O;y!=ktA9)G4`dn+b|gVqBw1+>uAMUSONPGEB7Z9MJc zNv`*qTi*3*Yg!eS35Z zpBgpHiyBIA+OaDFk+mx-=bWl3shO5OUsi$>M0Z2fTV;_zXE zSGNA>((!Sd#9A&Q^#}HgPcYHaN)|M}>+6T-(SOlG0Ze|zT{WoS8%->+J=eFs!?xQs zI3C*_?%A60W>!*AG1)v<_|xx4`I{OK+~5&{~>x9c*$OE^H$~r~EBTlvVZu?zqhI7cW5} zYBjNH)0(=y`bYI25zgq*yYLzFnE0=QRkh*LSqB`wBty`aWA#ghUtRvXO~mO+O2CCo z%MrdxrpVHRkrBL1wAaZ-GVw}7ajLK-U!)H$kS#oP1S_M0mh5E4RQNVHMN2@VW;;DK zKIxgtnbCV2XjK4@|2mf!>CAid@66In6wu^!(WudC2#h|QvkVMl2HM$gv_X+IEJG$- z7kZsFK(QiT{M=8&zTMkPveqz@ZvJL|+)^s+)k{XG8-y%VHC}RcnBuGAc$w_L06IYF z3un@Lx%5?7ajNjq+S9NXP?caV`$NOrF)ZL~lgEb69VnLL1FUErOgfnxhYv0~v|=!X zJIc2zvLbg>!C;cDW?7+|@*(>&as~XRLMBmkV`IPslCA=D>ZG7ckJk`MD@IKvK%zwS zpf0Jat!#Z|V)<_E4x3>8cwS@Q(>kYnxA5iUIF_H@7CvV|^#bezexLHJ=#?rtI}IM! z{oRxIr8w3lw+y#zG!|w*y3#%v#aOxo&zHD*rn$@Rt+A7>+`-FaD;x1(*$I1@r=@QR zCLVz8k%0VuH#K_kmrK=c)Zx@N<4GnB^_DZfFs@Fx#C9Oex~O~t6(?1enk5(85K@jw zoBjx^Sy`MArkD?PN_IN5WNx>h^27f<{h;v&EEMq2Yt1z=g;}!%?N>DN}?}AxADE#x&{5ZZovo&R^^E`A_Aogh%z6K%S+^-mQiP(nWU(I~D zF0H8}lT|GI$iUAV@==eSKz2&HX7HKJeoYlm&7bga|4ohk|}G`bY1+|PR9TMM5H@4wx|fKI(pj@uJ(j7-)gqsFM_yjDe$Qk40-> z>Eof3UUFgSgpnT&C|+~erfP%ae%zG`s)z$#s9;0xnM)~#&)!@R{|o0VT9;hPta3%!$%dRh8k zuyB?I{GSyokD>8vg6J8?IM9*oyp3pK#r3>su!mX3yzT{TP_pfv zUFD%!Zjg2xp7&{#t6QcSVDJ15ozS;rwr<(`w%O9oXs)ziiW@ds)ygMZtZV_%)`pka z>X+odnXG5ZdZ&_U9TU6N{LHLZvVO^WBl@REw+nMuF9>_>oxTK~HC}B% zqKd4_3LAoeNfLkzS{T~%k_&sCXmrwxZVsDy1Z)HaZFR7qr99bjV?FO=FO3s8|5o96 z%!s>0b-j0zZB?v9Tr^{_Bf5{# ztMh=oIZ&Le&}){GBk?*fFRab~yv^54(XUV%@zqIVLbpylF?M`_m6_cK%BuvQxL|NR z7(P@Ckf1XUs?11s7Ia6Rv$7tI)KE7>G*eBk9@Evq$4`5`Wt+%Ifc@j*imxYoXw%NFJd zEHs`$HadR3YTjaX52HbZqtp$_6X^;PmRvI zC2KLPglOX81j^E;FH*b0)jDkb6X_%4(1+=#pe;}+{9I;RzVlV)kL-Ow2X`7h#{%b# zLYQmvYYVgk1@NBMpWXJ2f^QKKGt`7-p6TnxPY?IoyF_~r)NZ!iZ(F|47&px_s9k>!SA%J`a+dW&`evKy%tUrk7qf^DWo52z)$?33wCJKw8qH&a3G6nW zSu&aM7Z6^-$ozn{vPb93OSWDn8tg+FS9VInV~!0VYYLjvH5KQilebN>PJ&Bj6^+W& z__gl_o%_mJ4~Gdi-W&RycR?-1QUg@3OP=E~LK>LZ5HZ=Jy|BiGfpUkAox`XrKNIFY z^8F&+Jv%-K%U=Ff7(3%0tG;Aa5yr`ixldGRBSXN4tRTMs=s|i(>*{UpO(a3o8 zEwC|J2bN*p&tes8&DQN<&c=63$H|$ytAve|o!7H_T*+!G&Ar!uy-x^7|17}W<3_|S z-rwK7r%3wei&g}}vBF;+{>!6^`dPDn#=>$7jEb^rk<9|=fas}_e=*k?%HVkJjInde zo-oIr?@QsA|HsRz=y>t0p%dDmi4u7Af#qSw7sgiYFj0y@Wv&~`A#&|Ot&cbyhTE5h z`(JowV9P_n+J*0h30K<(+~TsPbwDw`CBQ#3Dwem9nYVI0mbcnPfn>QY31VY&sorOI z4`Xbnm4&vayaRekT1#H|WX6U6gSq3f#=;^C+)<72^3vDBm#+TIF@0q>cC2*;lcn3^ zwC>@`F_(oW-*`ShlD#>&d40P@Z!HVs1~slRzx9$E!vB5$_ljc}L7o5CUhvzbnQeAI zt+97w6xdUq-(#pFkI;^k*~5hC}(U`CR-nyky<-uw}>A zG?^Dous;`89mKph`WiZ~$NvCi(9(;S%wtbCW#`jT`p7(;+DMlfX?hyDQ|E`u-UcUd z30MW>e)kDGqeorwnb0}; zLL&RVf5eBq1RSRoFyV%;wj%cLH0o`3bb3e{)RqP^>}{)fFN5}A)7>!03z%{B>eAVZ z8cP3c;qtI`&)!m+*AE*QI+%ZCE@mENE)YZXrp&b3E$}!MKu+g^*33jonL7sN^W!dz z=R;On3#WDL7)bAT2n?#|TL>y{29*SO9+JGpLIHftYQU(o3ck_A6Mz4PwHd8zCmNln zw)Dy+!15MbHy&xM6_*ICatp^)xU>q8d3&HET=n^49uq|@KP{fQF4_?OV3P+y5@r+r;eodDk-GnD@vvVQIH2JcvrwQ?ccJn7ol zr~2bP_}UYmZ8goddwj_2M1b}-^F1D4EPmj%$3yzE+ZCf*xvXWLz|bcPxP?LF+l86+ zUo2+u^MVE34#*=EH6Xg9$)(pM$;*)`ke+C4WG}gwN$1l#5^YJ=0dYPSK*eu^6T1Zd z&5518EO+m$nj3a~_@343X8!VxbmPlfL~}hMtpp8Bta*9kf=Q}_U z^usO~ykz8o=M<+6JU{m|^(5u%k2~N`Kz0I5+A|rG_l(YQw8`#P#|XcJpSwm6kNHdw zou5J!*Y0?xR`vYu0|)AbzdZ>DC+keAxRb5P6*7G2E(={P6mZ3-8+SLoNYoJ@CA+CO zRU3H%mB6bH*={0x4;7OhO=vt$vso#w&)yEBK%sD35Zh=UE z^ZvruB+F|>VdLeV)u6NSPmKq=7cw&W}ad}-+jikXDJ^k~cL#HmKGMM++-xjuoLg^HO zmJy3z_<73=PfRN&ToevEUcu-zlNbZDOse$121Sz?G?o3~79x-gw^Mr)sUwjtz&Dfr(uTs@?JIM;UgXcX++ ze<1vF@ybRecP#0A-y$*5l23v)@#0z3&sZp+NAJ^)>;CQ>=o4R?WH)cfLK~dOC9wO$ zz2S|g?4?Q7X&crt5%NRCY}JW+p9OzDlEpEbI+1A5E?n`MQqvU<9{4CMeRV@TR!pD6 za6DV_s#@8KdCDhN%0p!H{KdoB4=;ixK%;W21=WYNk$K45@7%L1ypTMa=Jlep7ED|{ z)&RcHSlf2<H_+|GzDNB}?jbMNuV|6Xvw2$OM6+6=+M>hB1}q(z z6)P|Ebd}!6g;pS5bHUVgfIT3bai{2STd#9sU)X2F+p*&P;_&Rne$jPbiI4iJtx!XQ5l;k0u-+8TJh=wc>*DPuzq=XYw-(V(AcXASwX}EltLk`B(`r}7d9+?rKCH5 z)YLHPrf-&HCknH2)`RhdHJ6`$MzieqD+@GQ{>0UYA*-TITn9c4(TZDIVeM#ND*gl8 z4MRqCDYa5Sx<8n^sFX@4JLU|Vlw{qRPDl{FZdrHygMv> zZmsR8(>`>yt>Ut?u%pIYW*n!M1qgCP86h*q$_i0@1d(!sH+i(TfjKK@kuG!=j{&>2 zahW_zC#FTpW+xC;_z3=eHHjsF^! ztzQ{-?A;kU+l$w~4cNMKTlnjT?X`k@Ou;dK{?Y?s%J7NDRE}w$?L%aNT}wRh?$?AN zV`F(|K1v&r{b8r;;Nysn^)Kcua|f<9eT!DHKD6)JEVXyD5?E&ZdJA$4Tu&ewO}FG= z@z6fRCf;P}BZ9JT3i~vgqAl47UN70l3b#$NPSDG36_CO6SDVV-&JQ+*gLX%S4^Ooe zsQQ9qX}n`x*NIL3p-E3pGA6tVT8l}BPfkS5C}UA$bJ z$)hYeF450R=)O-b$YY`8NBh>55mB)(=A>RQz{N-$E=Xo zEF!h!`Jb9;Elq?|Zu{fVZ}`QD<%z+vKmE$r9u9}xc2btJZ(F#}fdPI>=*xR&*e)7$?!^#h~m6Dh?@VwB|cI3z=2` zb;kLWU!<8F7@x^a7I|b@IAoaejpc3WW2-^L_w8e66~J8)W>2bAlC3g(2Cw>VqxymL}<~K%1f9;UT+8dU>y{+<=cx!doBt8V@3-nuGE zuXB}`$#jV+gZYe^i}}prR+&?m97*^u|CQe!3eYERmit>d&xHHnyok!3e0g4;v&5fV zI=APsKk4)bMO&oTHT`_{jP&^{*?8N1z~gj#8fL4^ z>K(zQYgeSFmd&$8(YsB{y>lK+Eb3~wREs%YL$DuJG~_k&QdBV|-r!ybl?7D^)nj}R zAbY%{g}n~U$QmI)8$9UR4;&Mr!?=ut4!eO_3$)**Ek_|>w8CvM{jonD|*_xr0g`^n+$6zC7RPjE!wAxE^If1Y#q-`Ya3 zJU-|@2`h~Hshq`|ZH&-F#)uNJ+5${6zg={$jXN4fj?^yR*y-1`lK1?gN7LG8XP3OT zgb+EMJ^ajgTVDly=0Vmb4IFdI#B|D}iyFpWhEd*=Fa2aURUDXdi5v|C$H3dggNOf4 z+<#KI0xU4Tb@B8I(*@(Pg$R+^YM=l{J19H5l;L0 z@gujl!m@jJ(ZuuA24Zm~ac*}gTAPE>ff)O}hgygAALFEF{aO8Xn-;!LJlpdl76Wek z7c{r+MjR)?h%^>Q;iD1q@Iw|peDLrC??;D?+K)K!fsi@1A71Ouz8_|P@656~+DS^; zH|o?Kaom_R>5@;>tn}itr_-wZ*qbe0)Smf!H!x&78gxb-bVxe)#@}c~$64%)^mv;; zF2f@;v9NNSpTcbcMoN*zY-y93n_`(2*Em}HI6q&qB5l*oO$UCKY1Ha=7wz#^#19judevujO4~jH)ljrc zDK5omIaj&bj+C3I%R0*2^mF-_ou*7vkEUn7S1a}kKiV8|$=&mX`~Enk{EYxtLj(1k z0K6bFZxFA98${>bh>kizUn0L@e9Oe@#p~1ZIWPHcq2`IQn`mPIBtk0i1Qi``xv&vZnGUT8F-$*EN&U3MExckd_=mk z6OY8B<#;3|`Uye6@n4eTpo{|SYVFR0E6lJelLIVn)bZyvtk-VM13F@UUx(B5!|df# zTEFS&amFP&Dt*zr%G%6DkEiF?uWi}5XyWvAu-+l3{;l0rvl8uKF!27YidW6kR-!M* z*zoA$C)1YplPj2;A=e#%i3lEkL|Wrw7e0F7B3y(GzkY`+cy*Yt9kQSY@`)cn9#C+V zQ^7;{LG$V0!?5?20Pn;cS|{tGPyTMY=##&vzSyp^CvN)_wcY{Rsg3@U4aKh`%Q$ui zw2hAJPP_is(}DUu`v$4zd~wNJ4I9XS3#?thl71>d}An$i8n$N^eoEBwxgKkMvhq5okW?G9ky<7mg! zud3q)F)~JYcz%z{ji*g*10)E zCI;}jrUp<;KbXHHz3|4HE%CpeepWexpL|4K49}V+y~xGG0flck`E{J{cE2^!H^Ov(4h zGIf`jAKNq;@~p$45Wn9sLbzNO;b%SUU;ddh)96Dvm!*OE4?L&$>U=KZU7y&wQZRw1 z!RfeV_FZUVC*J2US6V_YDZsTXRCl>N>%d0QjIU&@vTAf>7JkSAA80YueLoBR z(8aIu+E(y`7rufRKo10i$p#9u&-3Zn!>|{XfI8vvO3t_HBprV2sp-rsKAU)~ZQS`c z)}plfnVE?r)(iN^udcR>?Rk)JF_4rvDrEGbN9k0+FIAkBHm+NpmOpuyCdIeR`NB$8 z?3c&4%d_l0Ir`34>~5JK?V`D2AFY^8#lN?haY?f<{F0%UX7nGeyh=4KmPP|&WXo~ z?La+tp_kLu7?^(M<@M>l<*Ql>=bm(2I!*6_HP64h*xP2KTHi>-L}rtiF3lpzCo}lL zGhf+vUj-qdJJ)IW5hm*>>d>=ztOzMc!s8DNgUJR8MO;8PEDN8%d}3ZJ*dcmc@Yl4+ z3jGKZ7MgVD^1!Zy#y1>3apk;-pd9XBu{v$+9JnDho~xnSJ&06=1GGDM++4QqIInt- zk9Jpl?-2U8UNW&OV0ESPvMRHS3L-D8tW;&NEUMs}41I9P%y*in#xYU!!CSP5s3%)! zA%vCvD&epU5EVk4;D^w)_Q>_5@kggAmyT)0wfOPp(+kUA@p*%TMRa@D+R+l-!_!#g z^Gr+$qN&d87^^r8<&uw2YQ^>XhRtdDTx}*v51TfwU@P0RC*bp94|`_LrzZi;`p?88dZh9S^okLiWW~*Kand@1t$?Wg};}4yE znc;Eakc_#{ao)nd4SYEG!ind$GBJDUoU~-kvKDWs#GDli(xTPRw$#r(LqA>}a#%GV z=|=z>j=P=(*b%o7+`xVM$piiefyuQEvo@njhN=Vt1j#nTa`Uz zHWM|Srl_B%6EPxv!JKo~U+bqrJpiteXopr`zVZq`^UBLyo_3$B+=6$=c1LG~I$1AM zoiKK4L}UKqM{dzd?|H>Vjt78RCVbv~|Inj9_1a$@An@MDX7zpK@$6)^;A^7T5{(sB z*N>TgFrR6;cK=JJqDRC_bkxbi;mh;DP@5a*zA{+RKm=LC!j0Wt4MoVlPlH5HToER!o4eoH7r7?Nm}_^(B2|zT)!m zMHiXLu|t^bTLeU6|1J}Oj)D0Vo!Q|PymGeMkL|7P(&f+enWxY(Ke%QD4xqXjE^YzUW zd$!SJ?UQn&uqvQN=pW%)doY|>ap#lwN<4H>?e>P+1s6}fgR=nR58r6nwTUYC>zs7Ym2BJjPWr%BXEDUb zyq>=MyG`W6usaZJ-?lj|eE3H#F-Q8HIR3po<)IbfyCg>FLHntCcC!PtA1Z>yRqbK| zul-nn)x0jwz6du21vr=d!c$JP`h!`QGIjWFVdZNZ)2)k^x6F(`=EyYZ*rTjYy`8w& z+D@ez_Y2%M#0`Z{WZYEbiOeLKMo)rRNHvQtzX_>n!=-C~76DkcX||gE zw}2wru5$nO<8xc)MvokszH;8#Egp%obdg?3B8@FS=cSu>&%a)fT=P;|-C4}Z)ID;W za%7mM9a$9F=Qs4$z6Hn4b#HQ|vj<=(ZIq<D*sF zmtNJ`>^_I6$H%jSHcyDQg$~1!aGa)&SZoE4LCS|FbQG)H;l~`D&cAVdD+#Z>@J3p- zc%9u=qUSE`dRXE|U@}Z)ypF?h4>$Ud1Ji{co!E*;ga*)_Q;ec!xS4*r4se@3c=c{h ztf<>;)F+Xha?DBTym9S??JT_)ELX$i zQzxY3^!*uaX0^7qtSwmOOPh99fqI|!N%0J~D_;X~e)!8@cup_o^_2gk3brwut1W}~ zqk|tlH&pqi%JgeG_-uOx-u1&y_#qEEe3BS=?ZXG)8K|rZ9=s2#&ty0Zdrt}ck4g$J zGZeVs=5Ojbg!aSK>}<6_?e)o@f2U#D44^g~?YO4<;e@RDkl7(J2wOom{lz#Je*9aS zx(MQd^}577Fx1pxGlVV0mkeouKk&5OMVyZCPxz-r*t&9D+3@h2(ChAh|j-9nkwss1C@%TKQG0qs?z)h3J8KL{BT%)Sl z%5aTGD8HFt>NJ&57=5O*d|8BOJc*uv0mwGS9*y z%^-V0)i*vNez!hFa{J=tY1xK$+y26|Dd~hm53#;uH`~%{(rnYaaXdewb9tB~r@5O~ z=$|cG)=e@McULw~yJ@0n7G9sI1F@LcQ|G%AZ@;!ur)MzS>MU|pD^f;Qv0iqWuyTy@ zHlB3QV^P&wJh5;JS@`hG`Lji_=xQ&>8ha(YO5JowpKRfZOa!<=_Kh|jXOuih0vdV9t?a=ta#{|$7)s! zR}Hu5Ago?9VH&w1)cQAHcw?xWuN6`3y<_&P>^gzqLDQCk4j*3dYrn?Pt!eOZ zVP_y;sy%@2ehaOesr|MT_JWM7gYl-&@6|~;^r#cGqxFTFW%!u0F1JVMYQeLQ-;rKj zyG&h5?YZOV@B{ssjgEc^FJbhgmR^oHY(gG+!Wn781s};0_g>IB>>QOCJ6St+_@jfi z_~PeB#ak9(++@cBjK!B6;}1!T^cbxaaWY8vC9&;WH>bHXzUL8Jpp04$91lD5eyPU> zD`3&W%Ub&yj6U>8o#N5nO;jHEneuYEfficu>GwNxVHpG5+{BbLPj zGXA6o4{%wnlX;ac0o<;KB^-8!3PaayEY@L&8{kTD{OPPYkOlY%*jLX#$MOXeJ#&P! zw3}ygSe9o*(u=aecfRKpeC4Yf)7%$YKe%I_yT6w}GGL~5r;0v5g!t308Jjq6?)?Fh zHZ+KGF4`29fV$48`(i^Iul8*rCEc>1QkkmPm9`^-?{~BoR?{)5-3GGMF?|J(Ep}!>7vA;Kw)5o2Yx*zp=Xu&M)+N~W@GGBP*B`t1e zwRBHC_T)6?xRVFo&62iiz7+-{#uCck>mXyQhxlUgy4TjHg{zlz!_E_f1XH0H4&&jA zWuVqq&wlbnIc+J4%2Zt)uItTyWDj^?1#} zHLKw4BGte`=ECuXZxDvuBv%`R7VvG~e6TaiwmsOawk4Mt_+5;^gro9vNAljRG$Xj`|u=~O3;p|@_; z!o=dQhRK(IIvsS#;WcZY!KZ)Zt16nFZv1s+fam}FEwoS$K6uc9Gnj+Z9^)MXp(*{& zCFfZ~uy$29h+NjedZ%OPU+GkbH@9uKxZ?7$Nn>q-(!EP=gNi*Ol2VNmrn$9jpyI<dV4U+_^L_q_l%j zFR%hN=i$2Um`w#1w&B-oMQ3AnM2#z`iT~~Z|Q!ehAYp$WWdFiT7&)7 z`rzu3#)|r&E{xZyl!q2RW^Jz%{vOe}Wo1K7qZa%K0v(WVqD%)IS^cOWH2jd&e$5X( z{DGWAMFw62VfJgFqYuOTTmtHRM=1&au1>}wM;@iW*_J-x}-WNu+aM6)6cYYjyYs>y7<%+b6kos8?m@snr*@+F(xu0j%QzLv3MW^ zxXFc^Zt&F3bl+FP=9Xve4`%2}g^i2@nz}Xy5Jn+S;`_G z;*b{7?s+h;l}|4Z^4>V?;rL?l13G@N1N;R0Z06NjjpT9c;I(y_Qzspp#!ll^mAwzF zYz9e|6`kZ>|4liHtkN)pVzA~*vL~UgwLPwgy(di)3dXM zeVm1nUE#-#IYP%Qw&Rh6U9AanjK2Vd56&2Ti=3=CFxVH3RkMv^g*A2W#-jn$`d6NH zaXR`iZb2Hj^|4>PtM|hiOYbjs(WLXz#FNLh5^>+$hZ8$ig8@fB&Um_g2E}nl9Fr!T zG&Z+`rjDs4>Kag0n=7`v)nc@dub|WqyIOoa>q#Az*Z~Gy#@MjZY7dpJ-zYf)9c$h4 z1IG^+y7)C7y20y5hmG2onVfmBCLX6cSM>|R?pF|->-X zmVF_(=5_J&Z~8`yWd}p(V^%&HFbLHBrS2DP+!rxEXp~VRCJFja^o#75UK0GA3I-b9 zEo*^v7X0#`w0`m(the=*)w1A%C%MJ1u*O%KvBMw##O?hOKC}%@Hdjo!{4+Yysr{JA zQ+NJldYg}VnQlp`k=wU!N{b(D|BA&S7~NA0J@CGBo&Mq9>?W2;`q$-RLDltRBJbzJ z`yb?5+o|8wNrOzxRxc6;&;-0*ysZ5i(xLl*uLTy54%YDD31=i8gVQ~Y$3g5>1T{{$ z+%huemPj-FSU{MLO>E+eKx(qAgL3Gy)1&)a*9$*er`uY1@qw`AzVmkXHAiJ-LHzeE zU!CT39_9PgSrhbS>7#ObA_X4O(o9;5BlqOP^Xd8a{Dapt?`3W6{bKQ*#gs=C^GWzO zh~0Wd@XGgLM~gZc49I_h3bld?K`AsL87G*bERw)=sduxi{Ibj!P`+piomf1D4?pzG z$7Wu19iRo5>6HbRWay(q9Q+Z#r9nKSbK)&+H7!P=hm)ro5=H$ zx2LY9hPFQGJ(JE?`YReU7d?@-#(hf@U4vYd$y-yzps(wS(WK_3O5U3x2H= zc7j*a1K~1f5ON6c{T5;}+;0nDFUa`eO2fac({b?ON2jUR{aVfHi}Uy#I~g^wle1~# zIv@Piw97taobd*xLtMr*H6xG7{U%a=)iQ<>8BE-wF`^a;)c@`RS6q6 zj$z`4|Dv(!;3u&4@VayoQB;m6!p3`QTi_wPE7Ykc6b`M^cVRuK1{Cwj= zekgCi(rtP1bXwZUgB&;ORt7mo$oz4__M&U6`s;1I2VuM{^(W-aWD{edX;18~7C7vv z(LJZW72#flc-4_DWiFSM;mR5^%W#7@4?X4B^jQF*C*R5!I`hiniXAH){EXWoi!jD> zKX_r!?PFUDupCD#PD@{v76KZ0nljONA#ON(1$M<}&(x#ct*4mnV2ro6^B7@%l#nkD z=Y0^&>MSc9K#-bcAHW-ln zqko~7<4_>`Go2mj6z%ACPgqo8Un<8_1k^;=v9ncx$KGy#f_w1(pc?Kk$XshD1t+p5 z#7EA%HXYPCgo{V#J_vAycJ0pSypMMGQ@>2>Hg4#fJr-JbJTapsJ3_A+oO#+LYY+B? z>!@3*U(OnV%}lLeMp(I92UeM@iZbeBh3bI&phvaf>HsGkb(t2E_;p1G4->KniPYEy)4#WFY0_uEp78YB_*GV}4WB;b#$#S)d;hgx_ z-9@s4A@2D*$#Z{*XCHi)5L^7>)PJg-uV6=5n~XBkZ*4J~!{-<*A@C|-N3I5T+ZI0b zLq(OpXv`w3IG-rvkc?(A~izT?}{@da9)4*Pyq#W?7Y zBhti6+P`>J;(Z5IGdlOiuXNMP0_!t!ETCec6@DzR!e<-^$F?Lt0u=bF9z~<2dNo@O zc2VHEbj`1Z~1Z0Z`j3)MHgal9vi&-*_G+l-Y*v4B{NLG5k>fsrt^ffo|U@v zlkeBj?vbQEemB*X3S|&baCi{>%a4VX@r)V#T;430^1>+=Pc6D5L#MdBlVuCBY{&5d z7Fg2p2Ab1Q(g{tvoll;u{y_vLbIh)7USaJ~np>PTXAk!`+9nI_F7v+vA8J< zt7^gHkD)V1$T7W{&nP7vy^3$fC170Zo_2`dya%<5W&tZZ7Czg_VbLW5UvLqx2Qs$Z zqVhC=L!Vt;#@(Ccat#$9{%pBbas_yA>dh_jMTNp16shal6lu2>jVN$A77=*g2l#FnW&GMEJvv&!#goq0c*qkLG%5{zv;(VD zuj@uNYT7wb&iMXkKVw;$8?CZ`YQ?;M2awz+3oMGd5D`X1*21I|eDuNLhb;WyhaWm2 zul>NoZtW8XQ1iODYq*XpTSp1@{^Qe;hv9uF0S#;ns)xz_ejWX2JtsZw`hQ)s2E{pb z?ug^Yv~=d(_kBCY+;@W*47A`F&@m~fKQ%Lg&}IN}T>S@L{&QXWRI3M-G>p68!|9k) zClwlG=@WNqw{3&sVu7V!BH!LFGc2$)j+md*2#rook1v)yuF|Cqkx`Z4L5CcfCSJ@O z)xaxyA8f@_Ga80$y}WWkdTGVnh9~UID?h8nEU%F4`*7x!pX+&yQn{ib9Vi!zsoJOS z*E9<-7FS=DD{$PQ2d7U?8DEOts)goYf39Y@@kFD5XIIF;oYAIS?=L#vfjUUvfnt&6 zc6C)Cb2-kANi$4ma$xH6I~HiW?-YaXW{a-iG3ALL3$eIAg$B$9;lRT;Nps1n#_$QV zk?joYmcu~$=+(NU2l(ShpK_$Sels7zU$G2r(w56c3AcPn8s$R1y{x>PM(T#b%0m1f zE#MV_7Hk&vV2k5L1&elt`I{gDc&F&{PfrPQul0RcF3B75U(eLzto1hnBr=^`5wrnN zcu~$)j=K0RRF;X3#g+Ma0aX@Ml8@~2MAu;gPk*@M4_Ys2MTC|JBJvbj%8Zv2OG-^< zNiLaekkD&5dlh!g7tiTABYoDbi|u*Ax7gmQR$JxeEj>#Zt1+9*Y_8Lwg06B`2A1GE zLoZW}NhghK9Z;2)KJ`M{^x761)HG-1fr$Ys4^mb9Wksh*%%u5FSj;){Nf(}?m&Drf zC|TFZ1+!Z#t?eyVTr=i8AoZO z74h>v^7VA!!7Rpx1_z8BmCpO<*IUZlHou|MAO2ATK&-YH1Ol)9E?;!4z|=QmP@$iK z>tgYUa=g$v`b={y-gzJSS}RR&X-|I1EIrq!L9Bdei`AFX1&wW6B`Q>`#@JW4I7`}d zI>NG&!C-aHwO`iV2@8k@X5ap2Mp(Lq&Aa#ejWA=~^Wu9H_8|u6)pt(`QS!&iT_H!| zu&Sz8S>W{l+I>}Y>&3y}&{uE|)GCOYxAq`km{p5m2)dBlL6_Z+zH`Znw0Qk1EzwnH zoSF{T^Mm|C?B<(mn6@_93KbKWGs|SAImv!ZWP+2(CO2Uw&*5$5g&q^1>9`&t;*S6x z*6q|+Yz0(kum<$W)y`ZuFm>?cnitbU+S;n&h6$&qX*v@hD0v|DB8(2a(5m>zufl-4 zlfL23dvw*>v_faRci@+zS$Sa^2p^m*@jTRYh=8ddIwKu*;?T#n2)Czv@?3y2GSYb= zgGYO?+&W>nwF@nF+KV5F;IRk_r=Tr5_}LGcoq8c&&=y|twT>wi?R=%$aJ*dK<~vRT zMdF#9JVt&T=L(?YVERG4k3Q{X`gP-4DOkJgm9%=1o|R{TQx;U3(_+D+))xyNJ3+(d z74gjX`9sqQBbZMu@a4$!)mqfmz;lZ8AFj5yN*oFUPFy$zGW$7hn8+;_;|W9GIQkIB zr`s>1iOnx#AC+S)j*5tJd4_nc4#?>kw#6dBP$BBv+6aMiC98E56Q>??a+*H2T?UWp ztG{bDw71YpfPDyITk5lywYRr8Zt-Wce$mRsFXzonFTJ|1#XEoeG|hpdY;ooF%smwK z-_$7qpaY^@y-%u)t(f8JPh}tek~PazzAr|w@E;cPdJx&icr1h>qM%U;Ug(7kKU~Q0 z**2?nYS~~ovT#BR9z6Vjf~#x_9>V>_ry~!;9xefOGP)e64*dH%YMy7CcEixOwIVEw z1wOu2!`vBvr=yzmOcrgM!5}7x$`2X+Bo<)>j|Gm&ulO4Hgt|X_a(O64nMZ_ z_)vOw)@^C47J%{WV4Rd;iz~LX!daTV|6AV#&3$>qh`O(W@6eg=C#P}eU7r)+9v|{r zvuKtPEIOCWy0s-gW%9-8*fD$mPwlNl?}Rw_hTrH}Y%0jF$}x$v$QsD8;9`&OYjOpS z9DPvw%vlpv#s2G^)d+3LszAQ|CO-3XSmS5YVI$(%nO~=Jurk=09pr=$8-M(0`?x{m zza86JmSH%ach8gOKXa{2O5UYkmkWHJ8HA_%{PfTB7JIPfN;8-tNA_~bw0wBnFosO1 z&boddPQ$Q>`(8Y0%HmGJbg~|0Las`gie{l>yo_5}+M-EId*eymSuojRiPJ7RuIPoW zDx592!d}QpSMW#`Nr5(mtT`{z*UBtm_W`)(SEpLO>ZHcG@9{F0`Sg!!ZZllGehaM_ zsJu^Q%WBQjt{t0>J(b6<8(2JRjUE%(n)@I9(EB197M?imBo5Kw)=xcr(S>ILsF5A| zpxo*Dpj^)n4j+*U_XN~TeP6as^qq1AI5zLGCB0kQD5%DZUX3=)1}nO`&CqUcy6}cZ zK^Xbr7{U(%+%Ww^>4c;Fhp5QUZTdp<>$*PxsHN^{8Kd;$?ey_ebG`DTu-7H^X?1EL z+jwS`x}cnWytNK$;^?OjEqFB4!^iBsa#Um;1$0%pMTU;uA!Q%BcF1h%?)d$9VXG^v z?R9aPZQy?$@%`IVie~|gkNz;^5~w@&aG~tk68IfOcS;>4A4=*Tt5?eh!bfUr``jCU zqb26t^F=i!}hACn_mteeY8Yw(#h`=9wQnxTLPb<+6G z5u*zhsLOPk@W4#%m^`5!ni?({b5i2@MK#IXo`@mGYrF{EglcNtZy|!FIe^2p2@V`y zad{#%J#^eAUWA2Xw2SN-dKIgn-PF~z-;60Ql;a@fgR#&FKO$K0^7=Gm*~%cz|B!Y8 zFFfT$OJ@jjR)RF&%K_B$N|{jkG5H$}&-f$KvJJ1L$2%{DN&Rs-PQ(bPqXOrq@k1fR z=TWKg)7y{j)bjTouXCWpP!1F(o*}`L&%(!LxeSrn0t;GB1?uG}T`j1LXMB}wUSJjd z2$y}+C%ou-M=S7V(dBWIz=*H-Bs!-Z6ugGW8$6Snuu*Dwb*T3bKV`x(Y4XMGAMTdT zc_FP|y)mDT!Th2Aly|s%eo+6(bDH%ZaQYja5f2vwjL|Nq-FH9ywtd@MY2mErEsTXV zTe^eQd{yGAY?y6bB7RCv;vBE|LOIdovfk%vS%KYvI6HX-=hs4ZAqsFz-cUiUq@`+Y zGIOYJ2r*KNr_Wq^b1Stxih0}PcN_0EHjt0r&RkALk6!SLukymyehbX8!>|WSK%I*&$BoGUR7cAfbo-py zU8@X=^OD!-Lyv0d%$@PK>8tu#d+ee%RK z=FH1Vl0crD`%8OK>zN1sxy9$0M~-m(Ai}x5=f2b4enl=8Rkct5hdxN|>v9DSJ#bX| zyuO618fhjNNv-s$7t?I6)Q8oM`Nd4ulMICp{Mlpe-5C;8_@>okxiH3qAZHLzUp(oVyik+tTajcjLhZ?*G${ zR@dI|UdjO!6-3^|@_Z>LQ3mA7vd(1hd8HBfudbOEANud-EbNI(s(wd80eO<-8JJaJ zSIZshEWdu}^XJs$`+|F(2ano^=3X5Oo68Dm(3F5Jm|#@6EqGWsiO<5w7f+&fb6t2@ z-lCDq*HS-KC@YJ_zv9^RG?EX#TH(_pvr@-JE{-@NG5Q%Io|I z=T`GktVygArHfB>x^F}&PcLsq&`s}AYm(>>WA%&QYXqJZ#g84Gjx zIQW5w-{sYGZKsRNl-Yydh2VwXmG5^n^Xo9|`4aesqHO(K#72{!3a=+>4L z(A$*PL$^Ihyi<5ayEDJk$DwMNcGB^QbLDAI-lnkJSZ}a8RD(D3ncqej&EzJPSWvj1 z<>DjbVz(X&;D0b3+=gknZV3AL(L_D%Qiz5M`Ys*OJ-hCe#Jg=ZT%g^o%T7Dl`T~4U zDM-N#_58U!a46lsx5~!KM`2&tygB{i*`6D^XJvV5j)p7|NkoLt-11?$0*4%Nkj{4> zSLnS5@nn}`id^Ib+RdqWE?0>mHyYe&_iJ1#wn*xIgfJFQ;hSDOMrdJjU1Z!ARw_;6 zIKUQSRXoz^jhn=wPX=5!(wnp7p2=IBLU`Ch?llO9NQ{}%e*A0kW6!6J8#enmWRLMR zi>YNZ)DnFue4*(lTx$5)V?)^4%Jr=lQ3tj56O5?$ z(75WXOVd$@9T{TtiHpM`_I2PxVxPK%r<@yL$L@Rg{U9w^wWwhVn|#Xnbkw078l`$x zg1qT6vXcU*%Towq*MYK}Q~jJyZhfV2oqj>Ho(%N{+%i z_r~9>S?>s+{^76noDo0g?!U1w`o>OCeHcZvpkcD`1rGhk2NY=UKi*GV$K@o?^%!mV zDJG*28xH*w5-c=K~Ekf>XXEd zbmI8)(h1|H)lsEY3m$6GD4%ofm)g7!7)6*}7tbXEL<=4#ba(Oy}m)N}chS#z>xBl_sX-W@Hhx9oa!O(0=0q0zZ;0ZvQm z4WzUcZ!^v}JOWgH4pMRYqXVVS_z}c1AWrzf-{7Wdi!0lm;&H;fkg6A4=*QQJ&9><%Jp70=Y!J8N zGnfzqg@mf0j3UTvAH>M-?UVIIA&c3%M4ZmQ*bZZy>*kG)AFb8)?mQ|q5hS9dvwom zKlQIHxbn|(=+oDj92jYABBAdIH@8(qg%zP2&l zws?81lExo%WSVsB(O$16$uTNO*p8|Uzh$6IbI!HNPAI<3dqVgF{F?d3l_WD=*VGEn zDbnH4FMcbBtYLUo#tRpAnnIH#l>7Nr*ynWe-_JT<4(61>tH(B*k#PeR`HKt$?PcM4 zwS1Cy6IMJiwrzha{b2r*lH81N6xZyEnR95_!wNKQsQ`n^rf7?p)p>~M)^3#Q6f__AY_^@-DMqSwS3ILWg*A0T}t z`JQ+qD>TG|@>@#pBXRC^W7Bb+!z%dT)s{E6^(?e}A;cm`BL^-oaA=}sJ_FBkYq{1H zy-ANgW5^h{p??Yyzz3qbU)l|sT2G)JQdEoANR4l8q-rQ1KK~kh z(fGhhOWSH6kS`i@Z2QMAzOj|JZEtOB(WmRZXAaU(t$W2$c~*gVj!?&d^+(6-8W-!w zss@Uc+JNSa9%~P7#$qHCJ^OnRZfM_#W z>FF2;x;(hLD}$0Pz}}XYUS`F#?QEH5UU=;weIArRr=4}B-rE|VURk>=m@R+G<)79h z{i;r9++@3bE#U`vE~0LK4{T!B%lh|ixh*+*B^0gEnb zR>kZ$8`3l!U$cx)FpRA{;OPeC4GQ0WY_296D!PFyPd_!i_{N+0CduO}3qDzn2`xfc zTdkr_#NFvvuJ}aak+@w2YbUf@ zD<97XQ4txtK3AS~NxFA#^Hd0jH$15Kizexjx_uokn{ttT!FZsrL0>d+x|e!4qfrxu zPxa8Zvahxf2VT%FYfO1O9ljM)Ig-sfLV!|hjBpzKaULV z*ugBwpyhn7K5qEUE;L~QCOv=`%BNoYtLediq^Nsg#$W&EgxgQyTgo>RbrjpDgtuPo zx_4d`%vBi$ni!MyRc2i;X0He8ii8?@@=V~D$tUf`rAQ$-ZS6)ls*Fg(i_}Mw^KeQ>()V-wROw>0nUy!P^&Y1K0?r8CYx!CE|Bh@G<$ z`Nxm96;20r$@Au7pSY?=-^R~!p)&0Bo1MW4 z|F$lUo-n)^;h@J(zrmhi+&gl)SEYizhVgO?1h}utwcztpCtKm>D)6d^a+W4Du$Q{2 zN$%y35tp>g^$`E%zDMm&rAIK#*rgKqJsmyB6b*#$l+W-5FNkY}uHOxG?hIt<{uQg! z(hWT?fjl5H+!@qROo>Tbx~diqc^RkT**+haG$OIRg|i7mW0ZWO zNn9St%!AJ#1R$R;8jl>+zGs?uzrJXk<;7MG)Ej4x+u~QKJaL&iOE$5x1 zWzo^cLInMDxz@hOPj!qQX&)4_rQVf8!zp7zHgwu?r>3(`n>47|52Emb3Dc}RcUq>Z z9#&KSgF5BdtgJF6S+lK$-lEmd+8OVia^n}RsMBtp*A)vMbl~{hDEZA&gV%IFo#2I? zu8rW=zM`@CYaVz&!POTE9>V?AZ;v4RYborxe7Cj|IW3~ki#6{$HTz2Y<}?iYY%He8 zI0KsvZq|*F;h1fV(URNXW?h+asc|FqEX664FTva}^p*f^kbXZ>uJzCf&&?fo@WG{w zW0=fUVf7HaUb9vaR!hCIW@gy5y?KVTpWZNU=Ou8kK5};R9N= z1qDwR(=*(4;MRctk;fmZuMLl_*=gPL8`IjQ8}h=fnV(`Ym!~w@Z_-k5^(0^LML7Ln zsI(k?(&72}{8m&kHrM@(KDJ42My<(5pyn)JP_uT5*W-gJYXIP{;Lvj2BM5hDcpq0F z&l67`H*hXgL{?H!RHn(|%F7E98~QI}sz1-pQ|fwnzxRv9_4rDQ?AI#0e8RxP2SOga zS{^*V*=jGwLKgb9oq=$Vv3*5%7zQYT;fZm8g4vyHb-LUo)2+4LEpNV-Rxjicf!zzU zZ~yb%Yw*3WrXu0A3|nBSi~1zUGlL@7)rzqOT44N)m*m7iXAgiBuHE`w{<;64mvdTc z+Dc)+MPE|RpOd5Csy051XEvs}N_QXk?{2UahJxtjA3hRk(~5f{~@=?3FJUwyH? z1QqnKzw0INdzVhP7e%@uV~`1cGwfKGaCRPw&^4!v-4KXcynaJ^L|>Bb0IzIwB1V9B z$fnwZZXcuZ^e4amrIdE-vjCBRVSl&;`jr6B5Kg~goV84!Dj3LooHCz4KCnT`A%mCo z@_6U8>)O0SIKj30uvguu;LW@LIX9_WSThs5R+71W#R0GDP;TvPdHA{5lWqs@5zTmA zx^0%j<8+KxfLjLmqA`nu0vT=bB&_oncP*&$!KtTv&m%A9K8VQlap$zkBl2ivrb5B@ zGQtlhEo7qYc|p@+H_LalLbhhZi)rZ#E5c;>jJ2GI0Z{BU)O8FVbd9rE3Kx9*SVYyb zz-w6@W-vU`)zt}p@zn>=8HQW}!*`^6wkiC3MaLVj>?jz_zwaLsA4J++@cM?eY2m}| zmn1mi@PeDandg<8m1X)BDLrxcO82gTvM3y#FF&vVC&HWsg z@%UiL!Rbr?`yJ`2pZ|FppTjeb51xe&HTof(@&mRnoc$cz`3%F(N?@~&2IP^x51nypI{Soyk1VBD zRPqJ9e>kVeN+nrOHhoxsHG6(5Y&J4OK0yb9km+2})OrT$IX~OKaSY)B3*%7yf?$vbS8Aq9I=8h3>D-AY}(H1#fhqMqk<7I_2O`vCTyQHV@ zSypUkgvZEuF$*9#W44)*uRwM(2L0_)IfKWSJe5|jTVpR81#f45&)A@X&Ph{bJJQ`x z-=DgtMw~nLoHX{t)5ws_lN5X)<9LSSzk4X=BD!pHus*V@v2qmP%df3ZV^2Cg9bX*> z9dVeigy4IV#P^-7%q=ftk@b+-iA%cgi5Io{>}ZkHd*#Sx6r|7|JY=*U9CSa&&Q+}w zc=!>frfWRpU0-HPeBd>p3xw~~@U@h{@FJ@e#U4RcDwaFtZjRWN&b~8k+PH4FJwo{8 z9pCObQje22jvuRMj_pQ-#gs2F=#RFz&hRBGb@K1b-~FI0lZ9ieGuK&7fsBUm|<}!>HfL+kh_}$QZ~6R zZi{F#PPSby^K2qEW-h-R7R3t0?Qq(1D`)hh94I`)owwwXpHFy0j*Z>w*U%TKOBL~-v5W_wtaAP`s zNCiQeMm-TA+KTm~T1J~uJ4OHHH#eo*7A+Unf~BICwK%;ud_=`W&6#ib?)`v!!4jn-u*r+=c4+;GXH)0KI6>CtDA<12;U zhl$OSVi&rp5Q+iMz-e91yZ_aFt40p|#CC^kr673NNSGge81I22Wod z$u8oQ<*q`s!&bP+M@cesgEZvOH#UCI8oi9qoB}^avs{M^ud%RkT>4c~Np|5!LbBYl zWI~S*F}!Pkby{nY1)mRWopAb*YJhnWrSy6K60JZUkGFMeFG{Pmb@D-)S$xU%%g^b_ zmPN6Vfn|dSb>)Vc?BhrwegHVTS!I!>7}Kqf-<7`k(XWI~@s%4&$LVaX#VeXwMLK*; z?+zVnSi_*7J<*N$2k{w41>(eRFrK<8=a!iABl35bmO|!>D8B>>+0Pv#x}W#5TE|f zucvjTq+9bxoUHV-1TUfpUn~R5V-SP*tw^$7Ki(iFhDAdxfp+`xK(t#R8h{g`#Q)o; zo=IDJzgT>a=?r|K`Y=a3;;ymz>0iWxhgYh-MQro|?;9k;ZaPCGUE^`dfM%rEOcb3^ zAj%vrvz*2_Vde2x-I!baL0jCFoG>zp(`O|LZsFm>%Y23jyV1(yMH7d{1MJHkP`*4& zBHJ!>@Vm00BP~`=!NkwD!lcm@1~1AM_RQJGtHM~j|0 z_>Ju88Oy_D?K4&fP>TUkV)%39mWBZT9_>QCzUhscHDuiC5N$e+fUhHOc)fiv&bedH zPNVd7d)2tdJ!HLRD;ukeU+@-O#r)<+ITG58(-H|JZSd!RE0$=GYv_?1EP? zNWE2BgQBFb?_Vozm>^wfl5YQANTk`{$3o`!1i{Bg_K?2*%_kCXj zFzx@{!BcnsW!kQ1s%w~X`Dcvu#f{p({d-@t?%)@(5r~gtz_h`{xE@|t?>~}hDB)@o z3<&Bz^uQH}!Guz9L;TWH9F?p_D?O59Q1M6~QChshn_`AZk-M`S8ty<$2R6DsaHHmU z42;>=BFp&p5ifS)M#@DPmvI+HoZ$+cZ@fIcvQN027Ci7sD=7qw#+FldMKfH3xo(74 zz^sdEgRh#$kD@(rk2omN=fgp-k$YUOz(Xt7q;;Fx?WhPd(tju|HQrTk(Zl0#t2o{> zqG=IL6s7Hmg&fCYiQk{Qs8Mo^_p_dstIY%+Ju<>T<9OGgz>CXYJ#gc?H|ZMYN`zqr zM%8W!Ncsv3y{t>ODv%W-FOasd$%_`F?c(9xrtI1#w(%V;uBmE@S!y@mxD32ar!Tk4~h&TJW{Lm+hIL|Zr&bXx${h&)}4UR~^RWG|r z4{p6zTHDW3((K^r531!Cofg-KdMqk9^AAK}^o9#g%v4?7dYroEOH)I=_zND&@3rP> z)1#BMvbZ9C{pZD3!AYC`-rl|=@orWJR)}7id3CPARoexiZz+_JE!(!HyPo=G%__J` zw%|1qeW>P<&fBy@Rs&x!KIgP4xv97<3u~w@wv|=IVU;SHf7)jLgW+k>HTdWTo~6Vc znufbdt9@Je94^x8Vcx~n($b_kcC8o#njNd&1(xTf(eXu>)7$mr%wtR9gT#^2^pY?Z z%7Aib4qT;g4jc#?UoEXghYUWB1y!wA>jxfw=+$)C3t#41X@RZ*yu;E_zyEg%ZOlkL2qW3SDVVOJ$baa)4D zx!dlQ<||)FAq!kScRA?mPR9QDSsUE$aF9XcqlSLOk#XW7B#SHI|Lw|?^mAu@E&#S% zM+intzwynq@k?4l&veOYh7OQ;(|+IwAY*~nf<=$5 zQS+qz(@eb|cwca}NMFc|i*8<37(XmpcyGek1I$*j3tjQ74=YXuLC<%h95=i0vyLyY zG7V48cBV3JeVhJWXmqiq4rDIuD=dqz>~eytaFUhu);MDadCNvUBNlDRQfl z9LDh4UZYl-isbATpYqSq4%zQbOXP#Jn?^B)o0<=qKef1`m*8s{trlOpBhG`TdD2<(`19$FS9*TfdVnld zk%LmnKsR;26mNq`^5vGt?(DaFQmYi}w5l4S%pYAc+ur-CWoK)#b+F!lt_!;rUZ$`H zuf-STcNO##msov8)aK{DgFL%tp_6&CW7e&Y!1E%(B4O-(D2v%RL&I`h*|f(k!s8~Z z5hpgGeSY_`qKY@iPdI*JM3ZdJuZRo$UZkP1?n#j7v?S?Pv8CBv~6?yv4P2#eM*zN z-ie|w+AnSXZV_F<`h#^FhCs>1^u>yX6I(SY@~r1eEu;)J0m5yNQWL_*7A!6bU;mk$ zU<5~(KMU(LB9j?XUC_@^p}{H=>;W+-X2Rlf9MlI$;z!*VM5LV?C#MY;aex+88XmKb zr#EDtZU#^3U`u+2bEkzZqwxjM*%{7FnrPAxcKjwU_>w84*=_0MFvMB^Wm{&?;v13H zE`BU+-?jy+hI>R{9MFoez*5% zckcZ71*jV|Xo5v^sBzvsXgzPV=+V_;^@q4w!8gD&)M@KpurlBKTc%rl=f#2v8{Re4 zTUZpyfW;-;Zi3*^rHsxxqFGe67gkQUdFre5W3wUoD0k(tLhKMXZU%k?7&?rK7|9s?#>qP2k0l2A>lkM;ltTH%KTe9$)CP&c{e${ zJNu{{eJV0m>%gO=0C4nq^t-_NjT_R;g*`vA6?s3TDMOe3^wHMAv^p{NfOOt?oyKQ> z9x-8J@f7vw$(9X@zt)A3!Q)aLi)T-Y;G4d};Oo-Q`N9XQ-oG$cc)5=Ig&UM-aeI4n zTOPlqUJWw(u=y&3h_6{hF>e#Tk+tLFee@A}Y>?@-1^N-zzD`X&2Y~mZgU4iC>wwoh z@Swxz9y9Q+AGUVp%S2xw+%OEG1fs(ZL3LQ=U6w$MH2CZ)+)>+GHv1Rp%~#n?-gRJc zHRrCsX$2g8*wN|qX;=H8Zwo9RhSk4nX8;?Xc@(Q{|jTI6Wp^t`z5 zX+>AGEu!$1M)YcD_L?xBSJC{TZ{ZnuUB}cT&+_dLUvdQyjSo$lK^=ErGD4Vc&Xdw& zcVLyCg_qu{1rJwcU?>B6%(^l_4CWAm69Ld_k32d!vvcG(%0mTfQnhjqqZSV1?{x{( zm0rhn@A6gY+4Ze;5n`S_-6Y3ES_d7h2_R$e;9!gb&pxqs$M&snb=tIAJZ%^V>$H7= z-EaxbUBP_%h%1_k;np{#9E&IV9Xj~+;tkw&BrUkd?>A4v?Xf=(i`@tlKD?1Ez;bx^ z)xQ}NB#X4f?o}+7NH*bm!**?PN?qehy6X5ymkfrBIQuOOZ46S_cjHqpHsmM|@0IUy zROL=tQpF?w`OogSFeQH$Ao3x0DQk4ZHz$-o3+Q|^0VB%`4YFI{4|n_lcBWpfW9m-R zsTdVJJ9}-~{Q5S{sZ6lhX#%Dz)!SQ4x{Qgjz4gW`Thg+pUaTw`%Coqy%hf%gWL11w z=mAz;TWjvSIrn|u%7tn1^Gj=UY09Y+(-DUrVFm0jU3rqMBB(-36+F38GmurKSp~US zuRyn9E15hYl5MMGuVSn@R_ug5I)~#Po`Iy{m4?$NT(8V0ZJWfc!?_)|@15Q!_K2?; z1Z-f4Fd7`}7aeGD;~$U~tXi1XX(ikNwlAa3C_P**z@9or#y|_yJ~*;iSp{ADH6C`t zN7j`itDVRe9WwX;w1L`w!GkyKdkN5)hvEGxfxoDu+Eb9SA&%IeQ0n<=lRx<(~kSkBzFtW%BB6)7_>Ra60ple|w zY!2`JverQt$IVX;7dBXJmDNvmucHRruN^dDE>Q;HRhT-*0WxH;c zS^dMy?VxTu-lQamu!dTAZ(-FH16}CDLWTIR{-PhB8bRLL_I6tM$SN(UG)Jlz6yFwA zdC_G!GSS)1y?=!rr5SdzA8*>ypMXdSGov zPMJk`{)Fk0J%Z=|^;;8+~zFLvmkoO6V|{QX!cw! z;Nx2MLRHP&3I3Lz6?^)w@3yoKI^^&)Zu<4IrM`euU*j1;N8zDv7xQdr6IqfAZhvmp zaLb7a)*b_-y@bU57Cw}AEz?st^{U#6aa6G7!aq<^=BwXAy5e|DM^E}5y5P9TbAj8du zu;;mdl~NGG6lfJM@}%*(ZH6gZA{ z6KyB_T92(Qj<_ta@R9a2glhKIerO}Mm@u6sS+1Go?hSOk;3E^#(I+0(5-)u8c|E(o-R4{7Tys0w&6@YX%9afgY^GtXj zi$p5N1|iF2@qQ#M`l2!4$!^y27gwgImbHI$8Gq6l>4YPXsY*ymuvBnm;0af0r3-Bd-@wA+Hxv zJHsQ)V7?`ylaAoEe6@G%48s5=5S@1b@$O)M^1<8m=ze`J!0xd60kc7;_->u2*R&(C zYJT6F=31?V&(k8U`wZg5i*HUN4;*a+zbWT#6+PVE$D)tG7n9|f`MKXP2r*H?#A8Pb zo=t8xNb)4GG3!5w_t-@^q1#Sx|U-;OIU0KibN9vi> z3(6N=fd~IqkQ#obFB&T;23^0_%No6`Va{G#0^fOJt_=a51l}vUZWoAaI&el006!*p z@J({uI2PdRuipP;C_)!0NbY+P4LY=cbD(0#+0Bsk75YY&%lEz(JT+|H&>Ju7TDZ)M zg%*nh*HQn8#S%O@lfet8J{vb^U7rsP;oL4^mDB;9+wNJQ44@x2!?!%w`42p&x!3Lh z@iClbXJwPi@SWmbPl_w*EYdZQpY`haEIs5Sr^Repx#v7Y7<3FL4;eS#nuzL2KFvow z+6*`s!WfF@DB?C$jL>f@=-JRw2c#=McUFtNZOe9@aIvQBSk*gQD3(PSy+d>C^NZG{ z_0RMBRtwnCq8|ZhM(XgPf#5mSTvrHfok;VrUJ`ogwfNnnQ5+0oN6YR7Dy54ioi8ev zvwSO7=XjZtqMRx#v#Ok}o)C-YxYvg*Vr0nb-{vpust@6epYh-+yzf#WubkxAbvvQs zan)@raMLX|t2jC~EsoHWj2G5Q`rz04H7;2!w$QnEZfsJj5E__~(Wujq1WjpzhpXE} zJBHa-@DO>?9*FNBm9A6Q4_8!`_A&lWj^1DC!#jAy*^Tl$ti?4qYf z6NxKyiygP`3p?x&kY_>o(kwBssusC-#H zNUzizd4-O8r=CSvJqOQ(nMW&=zO|Q(BMvZC_`wT56igP(@LhPxCnl%Ejv8HQYT(ol z3ETC}1nV4{Oyf)2@u*-qhEVhE{0Dn>ItbSDrmc9d*?efn%Yc<2TM0EEE-6{J8{Mb!^S23K8|)+{85trkaNybnv=(R~|He*AYFufnYG% zKw2RDz@X1KZ^4WLk%-0vwVMm`BtO5OJ9{iW|`g%4h8kl(DP3geV zhw4`~J<1S$g+*2FO7v_{214Wve#Q}P;KB&$gQD?}YGBkA6421X#TH(8*fKNOhS_Fd zHKu$fy&T3T$2<^gQ0f_&%~qCY+>?k^kIf>(zN{7qTK)mrZez3=p>q*A0tg^pepzGy zXVGbCdRw}RiZa?EUGxoDpi0IsS%kL*>)TS~U-qFrcJ#;tk7o+sp+!?J@Yq!!A-=~; z`R!2J@B!)|gN1Qi<_nmOis9uMVpm)>ul3X)LVhacF|eGOC^VyM9ymtA=dAbNmos32 zb@$Sh>9c1|6zyg2*Q@|nX^Z4}c{H+N?46as+28p;RR??KS%B|8xgc%P4Ppmoh>kW9 ze%($`haXVm!Ncdy?W-UG&*)LYlPBtNBw33lA8k7Ni#I5lln#3Im$w-|tV7F^`k-}F zd<^qrk#5}0hao-Gtvlk=f2D)SyC(gYfO%}l35oO|$(9|ht|g5XuT>H?|GmV?%n+cR zr~UzkWXj5`fB0VRkSz8FgOh{Cm}0CAjL}^*!ZtV@S(SEc{?qBtUZ4# zCT_$BIvy)0-qKUo2K?w5;Jg&XO71i{gI_#(U;4s_KAjFZ@ZeyXPac_%YWT^ccj%0G z%r$V?lne8mm!qOA$&+2s6CY6l`Wg(bStgHp6R`d+JWGux6&Au&3=>0kn7fUKml3m) zFBL1|iar{2t=T!u0QfnIa^3-aIzl^SeVH@F#i!Py4&TcG|Z^d z5fwmAz@sP#6m<9m1MkX$#t*!^%hfMj*xgm1bO3}iP?;Azc*B0X1T;ep!yYVwYG^32 zey3nU-qxqY7e4rdn$;KIA-yMm-U@1u3r@J`W9k76=xY1c{Czl$h+}|6-1Kf?`nTH= z4WvK}CPo;~7eyLfY!KlWpR$W(15E7fIKSS3VKrluUG{A!N52+{X*PVo;?BwSf=`+( z4n{po1B{$L^DUK@TI0*Ug~1L8@r9lR^Y9H1rpFm+u$?7^Ck>tsrBlDCmFI!jakAYf z2Gz*37FhfetN0Gk4wd}^XFFZU8=V72qK7>1VwnuwX_xsgc+C!(XbTg+>=8ftBHeC# z!&oKweYS)#& zTe`L#ct4xutK;Ipj9vA+cKVk^zw&vPJf**N(1`L$T@$~LtGRxS&iMsb1L&qX+K7c` zoRS@DCp0@bAM3%z=g)jvmjk*1H6A?tD?fWyI_Qx0@741kTxmDf)(JGh);n9Qu(;=5 zq6OAgz1P)&hdOkm0G~bw1TXqn?ITC;ryv2Z>v-aO=X6bnwDDhk^R;x()9oMZjyvL* zG~uMNxoo_gyzscHZerdHGfJJ8t0BgJP3ldYohWILA7A4s8T_TR-WUh0% z>>`P_NBC@x&AUtrhuruI@3_cp{Dt9WqN_j^B2?(6{h84GM?O*0;8$ zM;Em=_9+j>#GY*~@uvbf}6jo$w4>w1Hj@(Zb`4cu)HN5f5Au6=tGaxueaJk2}wq} zG$S@Vd;<|aq4*%l0BL}aqTS>$Ec#%Ym2-HXsEmglbQR9DEdYZkJPF{-$E?L+bSxZT zN~Xcf2W1#bi&s5&Aja9?vxUW=U$qg*LRV zMh6w?bsbZrum*$?>A5-gb)^JmkwrjDSyq4!WBq)>ZIQc6?z3_RoCW^$3+vMPCm*l8 z1-J40uo~eow%3)w59TlK_m$%Nl+f7PqDs9%dg`mjXnbhe6@K8*;jqJQ*6DHvs~4?L zZ*16<4nO7)qiILh>~rfZ5m*Bgl^d0f5$cjVWRd=ByH<2gh0`$M36MJBv1c|Vrn~WD zcyuj}q&D0%;fFBmSgN>)KLproe!wN;Lrn9|vPJm}E%!UMv@?ZD-j1;~zDQK&A%!rR zF0P{ynD&rqpD#q(f{L*|>zR#;Phi3ndmx!GA2A3#_xfEAIbw9;5yA)mrFj*RFBm^J z|K&7p+KJZoutS;BR%maG^$Q-Ze{3+zyw5mW&T2Bp6?pit!E*{r%An~%A>#>|ugD!C zXK>4`pQn#qaGl-!2M^y<%!}=WyhJYvdy8 z(*&81EFALCfd|fb8+h&4c!UWbSwAPXK|fjWY8_XO!VSr=-?s#Y7hHXdWw$fExAr$n zBmS=3{}y5%4O}_@!F0y7Yl7AGHE_(nv$dXWFQ5Yqx;&uF0;B;YZa zW*HuXMbIM3;6Z<~9VU#~KSn{MLjd!I$tVjWXyoqYAsHPQk~2^h9Tr$-OMcH{%fndQ zc`z&CEDUloks;fUJSZkDkQ9}M8OtAocRX8U=}|aKK8rp*c4#BKCK(o9;K<%$qVdNL z_0y|`XHjdrN|NyCAaa!X!#+`r7%87}%j;%)>=3c33K39tL_<~x>LMY~0QngNwr<*( z)-2&rqZZ5;rL(mQWb4E*Ak-1@>*&9dE4&|SXX}EKPY}au$^;0lKFwrNwIrKf!)j?X zVT?U|*@GoO1K_d24r~_vh+L<}ti42=2_GFMZ2%rPez>3?74LI$2K0kPv(}^wKQ_T4 zu{JMB(-sumZnE%AkbS4Q!Lyj?AUradX?BzOK|{ZhnA{dxioqTk%%-qw_X9>tF-LBT zE(MFU2+bhHodOZ|La(h}o!!RSjgeuhe~#WsYTuouGUR2X&`Ke=(YiPe!`qQ9vXtCd zY`HHzwH4Y~OiNAp%qNVQ(IrFEE?pLJAP(8pUpObt{P9A)BOBczrFjoLm&Q&#K~31( z9OdfM&5rcS%WtMNOZCoT8~9;@AGrde@r4f`Jm~+wy*CZEZM*9GwgeBSlGDK7$;@N#*V=@$gWBtq>@0&`QXGwTB%}JQk5^shm@V9;$U!`2HTPivWQX1 zPYo(043ba-4|PvZeP`(j=}Ceo>1F)JT)%b3o@cGKo3qck=f1nfy>pK-{-c?5&2FuE z_CEVinH9R$v~zg--^TbG~q|kAGv!hVBE)fAl#W8?DR!2W=f)^svRhe8q<3eLe%x z6CG?VUxwhDrWPlbj=az-gK6ZWu?em2K_2QyYwc|~4d7osvmixI?CtQ!Rus=I_+W^R zd3}6rup>St!Z<^;h?YscjPCd%_oa@xvYdmLa?krbbGq&7IsPjh(F5xt>xfRfgpi+& zoIexgHgo^>>;B)_L)LrW{LeR^{_wkWP5$^l{KCzH9`;SKp|j4qoyFym%e$^|{lbAw zVu;H#OTL(jLy>X_ZYV^sv1A%v@T0N;$B>7vIKTwjehisH-uTj-FYSQS4rioi(`pB< ztcxFCCPtQ)+f9va9*o>6gds{nTQ3rXj1t)h@413^x5l4TtRMg($X;)Gh!nysP z*8tJK1GT!&bl?xh?`VDX%X~s`i(XeW0$SoTQ0xrJR?^ZeeD^m$X??a1x5w{medhb0 zwR!YI;$4ZnzG&y*Y!gAiD)oA`t@73P!0mT_aPyu|-`!WtFXMC2nj(gf`2)eZc|#RL zt_2@Xa=#bl;Zc(K_CXF=jbg6k8W~iWlZE+3y5S%d3nkuEj)T;o^FxdF;0kYk`3x8- zXhg2BY;|S{u&u@9)Wcy8#-fypNJF+%eU9O^j$`5%W7vZh9S$;{F4v)ra^TOJJMJl( za!!OjbniK70v|9AQ`qyJhL96a9&*f~wzq>f9p6@39JLc8HMsy9-~m7U(GS@?>)&|d z<~R6?<$>=0_&uACyyw%K@A&RV#WaiY;T96U%(eV;2j{znPai<%Ee!+jZ8WOh=!utJ z9>Nbt89x%kSH3^{N#7ey-o6&}UwO-GHlO<jU>Fpzb5CZY&Yj-0&muo z6o-h0?Rd@H`ec2r#z7ovkBmb*^SVMAuTOd4Z+`FFHlO+Yx&8H5qY6GRN_9hLC7+XN zQQc>eDfm;WEzx?9w+-!!= z0@Qc2a>E{CXaQ}{pDYb^kQ0h5tvAHeRg9`b=3ObAeL9Q&%#SW`hvX;}JnJLn@FR2D zdmpE-aCzx{FTnwT0pN*_yHl&WeG>-hxhqi#Ny3N9AV!0Jgh$bbT-l-E*}CveV@72? zEuF?pwz=zVw>O8g*UyIdjx#kv8^H;Ni#1Do?Xh19cszC88=dxx@fVH%y=Ol?lVn@Q z_6-AHvsHQYtKcf%h#vU6(|@s;rvk{y#Y&KzH$8?y#$;aZ8HQO(UzMYIt>fqro`tIN0hgn<1Z`r#6I&4!cD6|08mbr!Kx7uCXX^d3mbe74-+^qqj;uQ~LaRaa)_%V3G!^2x{A7RfM z#n4C577k5pnEBumTh#-;CTwwYX{WD{o=+&ALPnG zE4GQOC7&L6wSx@x`NUm!Z@%U6kBq4n;}R?5L+|?3 z=AO@mGb zO#NZYqC9NOd2q_$@M%0Z-%Llnw4|9*vMvq>!-E4qFeiWhbR2)~g#6UJT}3`$WLO@$ zq6gz!F??9#=X!a)k&El+GfX_x@wJ5f%~8V`Bx+i4=h_^#IA6?V2ZmQY4*=^%}A?IGqS zFo3A8y)$T(BGrj$xSkVdz2k5H;Nv%&v+o61x~9XbzMS^%)rPa7JKJ{x6V5#aeb_UT z#S6UJQs;hGY#anglo+=e{;8*cZMv1#hgF{!{gr2I-ub$_at<2x&NqB;^DW;M2dgCm};O?hQQ z{n%gKeU)Q))56!a#QBOY4Xq!4I9hG^pflEvw|dX@kypEN%>mYzfJO{i!cQ9InA4-I zapm(pFy08~T~^B#^#BXu=c4@A*2g;*AAINQH;;S93ljS99ly5uSoaMG-}%%(wt2*( z9~%!Tv1i9Zhw_yJP=0q;YzRD908%&{uMV>504@G}>o_`>lXJVMS*Yrxo@`A72fqh9{yt}RYmz=paxJhjYcxiVjX2q8^I~Bi1JsX+!iJ27vA}<&7XMUW3vLfG)rhCQ65|y16=8b_rStmEWT@@an1)_ z+JtYKHn=!)EuRmaIr+>eNq5B8u>INp__?adnIZ%+i&!69hb|uW@`o4Uful2Zc!#U4 zy@=0s$NC}=I`pCOK zz4`RVKDXgruP=T6%bO4W&Zn4-nR$qQTX5g)z?QRh;8n+6-PsQYx$deAM)`#(FO8CP z?^nLOdDUBAv-wMZto?BIU%vY-n-6^I!(r@_>PCxQ5aMt%|OAHICxLaduzBBL>V7y`QCRAS-<&;ztc7G z0h>ScM}Igs0ycwODsnuzvkV8Ve2N$_TwGQjf^suSL)$de&+Ki}Y!dx|kYt>JlE7k%;A0B$oYpGM zjd^$nj_rf^z`n|N6Am(cl?Ni?^V>2)=WZQ1Pi}criO~EwVRmHT#6-oBseP=1)PYe2 zjc5;ifelEaSx-9TOWrh)s28HX8M^Z=uZqw7^M=6|{pyxRb!}q?uE~_;)jm`XwBgr# zUl-+1pg?c@;72zfxcm1ukN-x#?4Y`)IbUdgwN&}cYn86D2WI|C@oP&P`ngbb+|VuD z;CorDa%6sCVLW8r{fT=vAAkSdn{VY~Rq>Cxkq0CWQz5h7+e6*(?iPzVf8Y@u2ey-; zX111$iI-d|aPtjo2Sy#xDC3mW>O$0~;TL}$sy~;A>$@<`CBJaup&v`0leD+wJ5+Ql zbWa@hAyM83qDvbdt)+Y81)le!)Rh618%O7%A2L@e6EEI#n(wv)zo*szHR9iX!-qCM z@FU-|dGA|38hdr#foc%zBByHsotYI^t~S>gp~#l%!$%(eqbTD?zhC(quipII&-=GF zx5Xc5eEAz+8KyJhU8V2;p69mNnfJP(p8{cJ=Enx3o9@dm+wi_N#1Hp)vvIHF-LP>C zB1anw-o4@=W$yOr7Hv7#96MvuJNAhoj_n}h<2B{fL0V_Rn})PNG_FwAJ?NSm*8K8X z$ejP%_26#XU;SIJ%YCb)M0`yY5;!g)j#M$^eoVnC-XFB|2UqU7IgaHvmiVODn@3sf z=I8&zE8ZL*wmthSz~x=hdf>XezTG!76PQH_@l6@O9OZ{lAbvNC?|#4M?oV!bSF6&u z|7*|PeB)!D6z|0FQz#A(cQGcDgPwOc;Eu~g?GMt^EZCr`?5diW0KJ~DR0$M6?{59;#Ru+9{A#yzr5=& z7Bj32j&q_m%6U-d1$OZI5P+@^T>14O|5TKb_$VO>CI$n zDiD;5u+7C-9?dqgs3$NYQ-B9GEYSvJ{z05!nR~`o;hMGLi?Y|U=!Eg8?3s1f^V0E| zcQ*QHhpl~MXkgn)nv!z7z!Mo{l$;Upysy!9fpB|k#&d@)a-m-o;EJ(Tb_#3^OuRQQ8?Vc6u zv3-UVZ%QA+JYkk)aSd7hfIXigqjKM*y`E!5I*m%i*kWQ{F=O)vg&IBJ_?Y*ex7uxaGp6GBTK0EdqNtt{_2k+yAaY-Fj#rWc>hA-9_z z2On-!iHC`MM&7u{S>l^gY$U{!rO$`L_k&R$8zt#?<2&D<|D#V!nRS@!DzvGCR$Hfd40CKaOeGL6Ek11Q zfy4Ssm&^{{gu#?q!f~*+3mTQY{NDe}igeK2F7<3@Ih}TB6?L8NZpB$Y?e@J4kL}*h zJ)UX?K|Q@fQD>0yI^p6YKeP4kX0`Ys&k*ZXwRzaIu^x>BfBug>ee?7G)$Jq7e0=Mx z?%aI#6Zgc;a*KFJ%k@Ji+tF2?T65av`qW&=>MslVC!#z!O42X>`mb!h=^NVbXouXb({gz)2Gzgni^;be6h&y-}?uB zh5sS|S_~11Yyr~-cf92Kp}W6otGw>H;MHc1DITs#*q|k!=CQsW-2>|(>*%gKX&{q) zSi+mzLw;YBCtAZhZ~q6IFMRG_sV4unCp~-f&5wKP+3#g#biBAxad_c(wjwRtV}2bg zcVzyDEv&h+T3FjGS`*qPNFEL(Y`};RkI3PHt{qC_)@yeNfV?0y-7W zl0J@xwk`!9pL{9nIoolUut)ocnofBcWm_DtW2+^D!{`^rybs`wjG_HU$^EM71FjG5 z`UXZ&<}CI^6HGH|p$r=J@A;#T-F)j49=ZAWoqYPb(T9F_`yM{P?Ar`U9TsHQnz%l4 z!u50Q83Py7pN4Sb_>m|{cYge?&HF#D*7zU#jwf%v<57W+beqb3|K40X43?KfbF9)zDd*~w&e|Y*t zQ_sdK>WvAPI(Yg;{lg?oeftv6X~t^Uiiv6^@n}X+Gt~BcIexG0_P774?sERr%5SO! zrwgaZuEkczZfY#W>cf1?*)%(62s|(w3tfEGK>Kkh^4^XjI z*bs*!QOOhATE25d02^e>F}|Bc&akH~f^!>W_Zdp7Z)g)3{<+QvEVO7~;LVa7UHT=I z^<^&NjL+Od#1=N>&vb)--DL3%ee473jQ-%J0Nyet%l0S20h7MLC*nP5wbt^{!pKK# z*xDaK4tdCxe6&Yi@a%p@qNzhuv(l~oYG~`w8qzl(+R@QHcYk8@;op5@hi-VwlZ}0d zOf&PSQcb1|&*lY3wiu`1uLSTPM)?K}|KM$RZ2scYo~$~yIf3BzG z56t|GEX7nA&a@s-xYf14?ZE_Myz+%T~5Z~ zAeG zg5dOPt1J#rA>?yHJ!A!vK5;*W$NzB3BiRre0ROQKzNzzqD*=O@{>3Lv&D`|#-0vmp z-*O!iSr@#>XvVzhUB9`x`yM_Cx<&j@$vqH5bWjq!jw3QuF@3;l5r>o8m?@tgBW9`u zd=3X&>4ZJ-wI|HArn-0!un<_!Y>fYTl<|u(LLTYS@ov_)KH-_M$+h3d%7Y8PZ<~8V zYyk1qUxp{?+gAkh@w!qRSIdbI2J$P<3;nOzC z7DE$S9weyQn~YB&)|uv^{xD=*Iu5caMw>VWWSmP}9+H5DKz4XH;efexs9jG8{LWRd z{GB!odDn=1;TS!edGfZCH8G6yy#>L*v`7#L1TtrdA#Ll57)5!>eG zl&@n*!QT7pugKrGD-hz@UA-AvC2b=H85(nrV{^0C4-UCe6Z`on{NCVSjFNQ6r#`*; zt&e?j^UQDima)61+KcnqxVG5!_3C5D{Q+a1c+di>?`q}JjP;cdRpG~SY3l~ZZsxIGlW=OQ5;d1( z=L)#b{4`_Bd%3TC4DzZe`|=ViJ;fDmjnj~D%scF#1It0r9b3QP#wSnR3vTl%x9@=T z!&+s%8y{=lg89M0tAFmdHvj4)excx&8HJAxR$^+gPT*CZ!(3DN`XCR63~86&85ni` z0LGpk_s!qAdGceP5OAdXrAPdkxVsOZ1!zZolC(aEkM@O6esL5>typ7i&TuT_hqrBg zwdaY*6#zZd-kIV61=Q~KJbkpk$Jm|?@^J(IEG?!T%y@bjVz`$_O+${C-uDt4mUU6p z(9kA^l2F5|at6;zCA7tJbt5%ds9k0l14jOpx4vfd90Wd8$mjZdik8#sNy>2gyy|t3 zwK=$R+&uc%e7oeOre#Yx z?|aiA*Wx;^MI#Ozva)*=@xUZo^X~98%MoAt!o8a>od0XZH1YSN{EaAIh(hO6SSyq& ztGkx;scTMK&+!#QhMpJYe@m%*dDw%$Ve@rgyZyu6ty!`4e9;9js9QZ$o)@<8vs=l; zBSrJv_8%&o1Q!}}l<9mWJ@N-%zBk_1;u9rX^!m{LVL=>AAjcT4?AXHHFX4{09IO^_ z{p1xxCa1jCgIwF79Po%M!#QE&v(gbgu)ei*M5mn~gbC}Kb@HND{O^T6dC2)=MdadyQjCi$>QVYYn(bZHfV6LJEOcg%5$P5-E;4~n|nUXbM;C$Q4hQ} z+Ui)tp{gS@-Vj_JUgv}Jr04qezSKJ&vbbk|`eVFfy3*PMw@eSPfBL!Ix~}K|^(wpm zuuYYI?YZW9U*1`HZKFi_*YR}h^mFXw`s=sEtFQT% zxb4BGQq@O!Jz&)?I$Y)*la;PV4>+mel-ERYLk{skdf$(~EPfSp{a%0ryX_`+XA}Ol zD9)cHjxTmNeozO4>N-*fb3Ijm*Owey^{x2JNw(6Z_5j~E@`)() zJ7YnC>Nr%nj+ZiApUNG7Fn#0|Q^vMb!aPaDtaQR2SPxt$?93gh_L;oZhsE-L@5s5D zZ|xr7+c4&SEV;D7GPfjTH)b$pIeGN?#gNZ^X*fYE-4s3WFQS!m^TIK9+~xRlgLfXt ztABj}5Z6!snaI)qVqriltvzr{^Z@JPO;K16mbzBNTrY6foi_a|*H3KD_veOu!B)DY z9^i+NjDuqUuX-;#W{k6AEw(SWjy|_|Z3{BgW9rS??8+;@U=OUnpLM|w+!bDaKMU6f z4F2$?!r_5UPg!TcTpQF z4nKryJaOdhv3>H@7~8fKw~xcH>=p57gv07o0JW zV`I$Jk^Au!b6nw7#;)G`QkVJU{d}PAxe}(a?d$Jo(Hj?}^}uz(4%`*KGEy zXRqcxL$;-pUeLap(&0l3~eQb@#4(+PHd2PSeJj#_L*W#uQ<_7DA3kQeS!lsQc zTsvIcJYW#q>xWaEKg=<7IGDO~4h~lBy>32p_I033%5rdJ+uQs0xORB8nWx&1k&nqU z7i0V8>{IPC1{<<^oh#zb6~`Pd=A2<(I5*(p@T!~NT-JdOaSdJvV-3MA&JlDTW}GL^ zC2(YQ?Z(Tg1G#eLfX6l{Axby4mb&&bpUydb&X-GYe?BW+x1jq zkjW($Vy;Ip3mCF%P#vtwRi~}$`lR#33x4?TLpA4=f1hVp?%rRWv49*CWX2G?V~LHT z4!1b2@QNW%x>zd6heH@}!M`g)Dp z<>c&RwH?B14(o*54swYrYn$>D!&kD7DVsi19RDTi^HrVxp@3DHeCl%J&>?ZSww`lM z%ptE$U*}f6nTA`=4f7jkEL<61hX$=J!OpL(q;%;`g$Ddcio^&<`fN0uvNvmSDQ zPxS%w5@8h{?s^%FKK0>d0cQud3^-zuY>E0f)eCyT0J{zK@QNFyA9MXnt+w}pPqMB>UUNL-^$adgkUj28)+JDCBZ@KR7ND;fC@fgz$^#zj#vjsNNQ_4_ieEscu*q;>DKk`&km+MQN<|5X%fJw-`zc#pbu|9`&=nJRK zeH~s0_G5^AJ4;2|uXX#K3DF z>V|sXs%tlgy!XjO-HX1$eN1o1uVVUFOrIXp*ShqBPuZ$pm1|z*%8{oEeOZf}HNN7; zX%pu`8rk^~;#Lg0vB+Ex{*d#4Sm%`23#JUO`9n=IFAvkZo$-$N7=0UG^>kByRf^RSmg;P}>D=u)l6HEV8iDdVqhY_*AQ+8Y`N3Mmwq0}E@ zp;w<>GWFqOy;D5QTci&la?<*dL3`rjd_81cybmvufk|CA8FM-Do%HH%B@gpY2{NMlHACIqJ89ob8^Yn7Kk8_K7 zi0iU|F^8Bx$ohnsKGY3W|Aw?z{o&+bF5+uGl_zWT^(EQ1PxiG!*4gU+DUqFt=&%5d^kUCesId~S?v4>tX{PG#y;Cm*r} z+Y*_B$wUks0yhV8CGcCFu45nArhdHYSm{$SWbnDTxftS%fty3Dw}a0W*93OgM8#ZJ z+R>HqL)Txu+L3F)hxXR>fewxzvTGs4Q8%}k4+rXUbNi4%PVx~i7sHogz3TXZLBv6} z7ays{>W@uazpLi9rE;$W4sJmggFBw|ueP4!qhD`RCqJ_F^gK-GY+x&0au2Mhu1oG< z(s#eK#pHKWa03CGBJqB~ptGPLa!_1sZfxe(PhU9kY>E*6ZgO>zf|0|;t!{nh2Zzwj zlfdxeHDFh+_9@xiQ?@y5F6Njz=VIn?+JtSc4RdYY7q9gySB`9P44n@ZcP#r&yKeCM z%jfddw&UygG6$R!)dzduKg2l?@wHB5=aRWSahbjsCc7ujoKzXeB^BO4oH{zQ!&?%}K}2uvA&WZGqXZ?hfYl4KqD zRU3HKaTe=wY^SEVI>5C}$@VWcWjKX?vGjVy_3yd$fh$)Y@Q5Qgh#*Gck9Ceed8)UeQn+tef zzx9v@lAF7s?NytYvFN=`pCwz3ul~E_TGy@|yX>_t@*d(}V{4o;yqD>Jabyb^8@9R} z9N8FkF#YA#2f32EbBFkz$OGwHWifb_?X$|M!!d$anVjm#$5PFEY>uV4L)y6HYhNwZ zrA^<;k!x`-IOgd6T)@^B{dVzMS8Z4AZ?6}QKRF?N(CbgtwSm`Ia0#1p6CZue*~h69 zUuE>0iEie$UuHk=+b_&NeUT+`vFHBuPyNnj(>@CTCXPRI4HB;$8#=s~#pL+8?!=6# zxcR`0(GQ*LflJuL;2fB=qgz5unY^r}DI$(y4&>@fF3G(3DBBi%^fewoNdKmteOFd(PIzQD z4`db_x*W^`2FaV&UfHl%yp`Axdknkvxyhl6RT*D>Xai)skZTM&1U5x;IKE3ZbM-#9 zNBgmyJbmuDT&JqD=AX(b2JdAtIAbA4u7z>0dX=Ya=EV;gLWb1sCvBf4IE8#0uS#0Ge>Vfr;b)t^km1fV654vfX&dEQ;S#0>j zp(z=R!a{{hzqz^Ja=#ac=XRmX(9=q)N*`9saQlgQK@M~=?9Myn z8W(En;&9^412-4k+#znwPjtz8)pzkj{deWVzHiQb{j77z-+tA%>dN*n+(Ms4%Ixy;$LNTxP*PB!+W5P;~~+P-QRuw zxpq2VXnXY$7lSM7D<*edqbs{!#MD_^6?5%jSMKu~Yn*nj6`LfEj$GG8^|O!MdySWy zr|#**d+>srpBQAw`kHor|F&N3Q@r|^7r%;6#fjAzu|s%`SFRkn7WxGpK3CjNu{*~4 z_PThjV@i%xTkXI{s&ZoM#|G`1)A99?HPe@${4am_uWmLs7aDMcL9O%$4S-bTZXMS{S61X-v@m1Ha=lb;cl5YMbpHtho9_X0%w#fZn zt*3g6O>wYV7!&l$ktuH6@`~vrH)qw6A#9cC6X%CMb$;NFZr_=k9LnCH(aB-FoY<+t zwU{@=(FeIw^{M#2@>RuOw4VNY=ugOgmW$EXV#;ufbEW6n<=WNd=C0hjxmNH>>cmSv z#Gz}~W`Bvp`(i!tzL3WpJ;x?#Q@?K9>%VU8x~ldZIzGqtmvM0FthzGSjO-jnu7$Ot z-E{)jr($(oRoVEOORVOxjbIY_SyK@4?6KwngQoh^T>Wz-xyfe?oI2d|;rNKdE%+fr z@}_O;#`k{px{SLl-q)qxbFK-j7BTo#acnDB#=pwsoyq}L>qPFUVpZ<_R{aD$&@Yq| zq+C<%*#k^&Hk8T4Nw`I`OM{mEL@pf$Z})EthcWd zT~dY44|3(m5Zsc$p!c%=;w!(o zdcgU1s#cp%&iqh@GiN1rFyvaqR7@fED4`2THn#y>W-5s>mlo8UAa3^_wm8G04Kj2jD9X0bY&MB8x;>0q)h^rhNi~{v?L(lAUy7&^ejwPQZ+rIJp z`dQZ@{`Nny%9Y#Sl{;?mzBrd=`1@bk=x#FGzhtURDGmpLnjywYiUp!V;v8S8i-o8zK!wgJvHRh``EmR@%)z^jZu z^Bpc;*He}CoyuXo@<^A2ZvShm7&hu~|0M`JF}P%2 zb#1m!{8YT=t+M$mrd&C)x?_p1?6^(wI?l@SDSvZXr}fg-D))MC!@kmqd%y?r6Q^C% z?b!oNayE;e6J8g9GK-1L3LP1$I=RHtIM`XB#8B{utQVaaHc>EfWqFuJJDBxU86OE; zQl@@oZTs+JV)n^mz)LOdsfLDt)}^SbyF?{h5q_O^Xv_w_UXq5kx-)XVli_Bvm{=jKAiz3Eq8 zW6dvCb?Q_{=DH|gaK}BwEg9WDkk>rQmD6VO;g4+o5K~4sms}fsrL_m_0p}+=G9*`K z4v8_}AoRXqs~EbNd$MZ_TufO`4&@LtCwk2-UODZ=PhyQgaC53xKWyyPHNP@`K1Y~` zdXTXp_a&fn>kl7t!ly_apUa|MxvO2d@?Pfp!6?e`xzdk)^)pZ9=ByYpR5A55__DsE zWlx-*uZOJD_vvEB1K!!H8wm@@O|V}u)`y*i+0)cQ1FQLv!Kjmhu3UM*V;ndnpOdxQ z4)qK^;^m_8M^t$wA@TW=nvVge%{t}c@JiKQ@j3ZW|31$?@%#AHyodPHx0R}HzhhT+ywR26b7krw zz*u9{twT<|OGLcmuOHVAUa8NgF0XlrcOHS;m+&f^Urc@Fw{{OOry%Erdga)vjz9WT zVGfG-GJ0R|?bnxbhWzSqCzxDJyTt>nM$(y#v z>r3x@$sS&-%VZC%r>@Cvxl)iB&n_~>X^1vg2AmaEaWL(y&Pw9!fcnA7;m(LoK8k+o z0gW~=>lUwE89(bqhI+lBSMzEc_N~pf?(2UQezi@?l_U4Xow#!3sX}bs+0l)wa=_zQ zfN+(4E`P_RUWZua zy-YuYQIOS@dtT=YHuM@B>Q+8=cqL-ICg%^I2Y)!ZtBh_5aje@m zfI+?9(Dn6J>H+3cPs|H(c&}F*vFgl42o6S>l3hpI@u@{S92~+|+_>7V>IY5%!?*f} znxqekKFaA(aP7pf&L}mfI2@|H%GE}EwV_MkRTn=M-<7+!t5@F3=oI^Wih~_eYCiMR z2gsGGt>P)KZ)=_Fl9K}KA?u{wxnHt{x3^dfZXj}!R=F-97mu4O@7+V{*um7<{2*{Q zLSz;;I`!bIjNX@klE9(Lm7Ar#f>B4MDZgr8S0F7?Wdsn{<0m>6^J6T6R(`3~WAyp-*K@+iZdFUsoh zrNWr&+vf|@xCdwhlj?PogZ;-jH02`(H(%xEkdq%7-j{&R%_A_VwyPoQ>s9suV|pEl zb~!itHaZV&w|3?qq+JeaH&_dAQ_Sf!CZpjt=1i!C7Yzxwsy5WJsNQD;Zt!)G zq}_ZCU4A9zeno%ffxrB{o6Y!HfKy)^eOsyO%J5yKZq`#W7&)}5_jzfH7&27H)H&hY z!d`FE*om(gx;UJ=ozLd$dAN-AVw0*2*6aFJzK>7!ukxX>`}*(eSKBft)4!53yqCr5 z*gGGvDKByA(O&a`t0O~RqqsWp$kqvWzJ!=$OwWmR?#RhCS4d{Py^Ic3dqZxWSNeCQ zvmUTNoD=GBu&P&?4Cu^H2)XLu>eU8bZB^F~4$bM!+Ftdct>lteE9L=r?a1{rUXC3? zCyu-pY~BaN^a)cEI3#b{d0qTe+-p4~-&9P+l;OQBCZ`{&tPVd!HpdkA+OSR1`i|CQ zx11DQ4_qhh&i#{3ytBnZs2hz70}Ppk=Z5FT28JEI_c1O^V_xg1@yPh>>V#`1Wn2Z>3&VhxcV_KHvjx znbXaMAA}CcmDRmIboa8l59vo7q+ipv+fFX*aO#KGJucM5z$uWtm(y5Ys$YRGP3*7x z(kfaL@9cqHW6|5uEzBF_em$V;BNvk!rw&){Z8b-@C4p5^r%fJ!u-DkS2C+Hs@!?Z4 z)DQJB_*u+>j-NTS2Q*^HkUmwevEb-X^=-)RS7Ud3PJ2wnS4_EbdcsJ~-k`>Kwtt~|x-T)zN~&-J1#$Je&}+ zV3M*t@%0@oyXSKGdf>X;{$0#I@y-_hECe`KE{Z{ zp#WhnW8ERmRBmBX+Kk!+PXkP_H+1eZ5tCz_~ef{mwaT z`ia4NSxinG^A#OZhC`L(gRYM`dYw4PSY-1vx54{4j$N$U#o)b6jC;5|pz)gf7;H6% zdc|yq93N>4({>0a&#^QWE4IX|4Vlu9TaQ(Hm3u$)aZSApw$cskf%TAe13RCp4?Jk~ ztz|*^py9^YZ&=vDs+~BnTC~e+o^Ty~mEn+nO*_-61M2M!y`Hbvt*!Q9-&^8K9NSW1 zoVE8d^gSv^MqRsk;I)vl+NN|cNniCTzK_pTT*Z#faZKDH zG4yXwVpAM!sW>0-nUX8k=b;^7Qv}X+$@NiZevlgRc^)#ngN5S|UbSpI!$CgY;?I8Sb3L4;jw7!heqR#ZHVl zh=Z)F=D`PCU*qIcHgeUt>VxeniMscp-cMal4rFy@d9{Pl*V^R!V)#SGDNk{*eC|wP zeYx}=xU56g&;9H>|K4VElYAE7(tG(d6}XVd)^A7^!>?|F+Kj=5JXP>Dw%Wm`bZ`h; zrK;1Os#ibcC0p;i>a7RrTmVsD;@EbTep6Ss4&uQi^PwX%me}Fya0tw?R%Wj0FJ_#& zd@hGL9Kv6!vKV|0uRc@qv2iumF*%NjsdZM_@eos=;$XXqb40(YSDE_s>FZYChu8U1 z=MTBv+mPwAr9xfWkstOG_B}dYba@eWv1ipMCYY zTos>^#o!j|RNlqy?lz78$Y|BQ^+} z4bMf0PnC^_*AnWeD_ch&Q*mWtB>mMZU$Rxd`J$GK<18HyC*{go!un&}bPL$Y`3b&|uwl9)bl)`9K7&$U)%*OR%_iN_D!IIwD0heOz? z4_>L)tL-x7sXm#jZ_2k~%9St4*p4Z*&p72N4z|(_=z#~s1O5%DcCB)%2ObbKg$L%w zh#4RKl&AEHx%pN9Y8UVGSDlGcV|pE5CzF0)JuTUK-&JotV87P>=rQ%mk*9R=U7Y&# zQHD#{7*DylV?4#drV6nYSC^Yhy>euz`ZQ!3)3(H?axMAK+4j%#Gk^Z}zrNW_eHLK< z9CNKW!x^id#HM(~kSEGZ?|aF#Vaj=|+eBO(@)XC}i--5J*de^nIi*(&xsq+D*eW01 z1C06oji#>2xy`j|ublaWUNL>e_u+Ff;!_;V`E31QK5%HOI#^$fsW@2GktOk}_cHD2 zWALf%M6W*R`%<50-?-jyDNnU2Pw|Q&PwC>7)0R11TjjmHzO6-%TrsVut}E{FUDeH; zy!uUwG<74K(v6wo_={H=Ow!JxR@NrhAC8|Su6|K|>UVUh_8B;iRJAhF?3`|OueDoui9*5<-LqPhiTJJEfwqW+KOs3PP;bSDK6KpKF8o*qc7~%ol7nQ=bK>0iC>*yK~)Joy%GADOs%YUamR~La$hrwO8KD z>$_R>$PJ?Pz;%N_O_qEo0XiZbG6`PX9;8mvXs#m$UP3aXwPFmm5vL~*duZOIw@AHFd><(K~ zn~onLB(HY4HuX7fY;U)%;`+f|{MDwve95NI$`9)S`@Kf>nEF0^%4Z5&s!!Y0c#AFZ z-bTJ9%=)Td#gxsVehPjWIce8re(IVlrd;_xGWAqH z>lLeZZSv|Pw(?uM2b}Bs_CoKk&fHQamvZG*?tPYQ)wjyVb_ zw!s{8<5F?uU7Y;-EXlT``ijl*Lwx6Q?2BJUUB~8hoTl1dZBw$?K77uHda>Dl{lF`x zTye3B@|qJ_GOzl|*B&^w2kN}8Idrk&TRrXpdhJ0P9wr^LPrhF?lB`;xz+P2!~m~Vlra-}`69=NWwyDwJRusPNZt0t^C zvPGZX4i3g5*4A@<;868#$l0s0XUHmDMGw?X*!n7_EXPM#K80zMS0CpMnABr+K1|8` zV&t#BwLaxtd@6od%qi_Ry8fkWspE^Ex_k=T$2R4&)DCU6uclRl9Y~Vb#9M zYY*((19i^#F}t*z$sWI2K@~i8C^^kRSeSNZ;VsosUUCmoJsYB!` z--@xgt8SfBn6?|uceTxHsvFl;w0W*w7i~u$U-d&|edYAYl-ys3J{7Mrd8&S>OgvQM z8gll!WIOXZt#l*mtXHs9zhhGUr{vzR>QnL(Hf5vEiq|&J;nioCtX)ohb22um`iR|p z{7Vn|3qQKq?DtuKoA2uGFD z_4RnwPbFIy*IRQ|Tp3>bciF4&D&N{YFg1UtdSuG4$L92k&&kwTZDSku zeb_$n^91$Rb);+?)yH-gVaZn9u0*uGhgLQhH--0D)#qf|Q*B%A+jq>jf>!0ydSE?d zU0T;(e&S(cw4%B>RT=xCx;f>2{4QMk%CDve<~H0OU(ye;9(c!6JPn-0~hUq z*|{Hyr))h|^(xb@U3#|NHI~XAFWo*~*G84ebcjw{dw*Lqhiku?ct0jnZJ+Y#u|xH- zoYshEeSGS*Do@2}Tlv}pb3M?njX6K@DgS-2-lx~izwIE2sDaY&9^#dmRYUFd|lxGB8HdVQ>B*Ji%sCQY8Xv7f`2 zV#apu5SDW92)#DuQ(VLBxK3QRx{}~ z+ozM>D|p>|U~W98@O`no=3Q%SpN=_l51(5fb9imaK0ej|M#)oSaY$R1;^%C0c-DK0 z@jm1NR^P*WU_E3V-e=b~;>gX>>si|9mTbmf6~B>fS={ihssj$FXRf`6;#uV(8~Pq| zk(|56a+lq%_}RDqU;oZ#v;0|reRG-fX5{-^WUscOC4RxRNb05aXx01p=zCrG;yCQl$X)Tf><8_sd$ebD zxx^k=4_uenxi>2H9-AXg+h=pz$LF#y^E$NQvagZ+J2%!N8?h_zF8hTx^gZXImv-z} z9~)=AHzPmhns<)%L`N*eEZH*Eu@8QG&YOMuU>~1}%U+?i2d<C?Ch(qYR}ElW}Rx|J5?Urwes^lu=l2X zK6Jms+^IM71LtDj>|1G0UG=pdGMD!DJ>~p4VD6lv?@p1UpTDbqYxKb0>tmri_RewP zHTI?Fz5HL_^;v+Kh90mc&9JU3d*T8=P`&V&pS59?Za@#L2d*2?=~rFl5u0P*#C5am zd*piSd*8I{WnATEeu6n!n||5ytvLS=oQCDfZ4r6?k*L#1iYdvJ$pGSA*+Aev6oT+t%uVW8fr_Fqw zm)CKIEVt=K&YyL@ERW4RPzWQ$WzwFlN7Snh%T&wej^uhz8()*e`UVC{jm2i6`~dtmK> pwFlN7SbJdYfyEwp-R9vx`Qm^2@H=q&Q-AV@e(+^4`gi}^{~z=mRb&7F diff --git a/assets/logo/123D_logo_transparent_black.png b/assets/logo/123D_logo_transparent_black.png new file mode 100644 index 0000000000000000000000000000000000000000..46614e00ee7886bf24885f8c1899c2cf6a38adab GIT binary patch literal 2486726 zcmeF42bfev)`p82X2?0`Axq9EAPR^Q1wlX%10WbNqGDzY>#pv)?wZr?{)SZ$#e^VY zLPT99m?cNaAVJ9t^M40tlrYoXxBJxXy4_zr&!L%nt4^Ks-dt6u>b|r3^(axaYEffM ziC#Us4>Tsdp)m#SFPx4om#lecCw_`t({tE3V`_dZQsAnuZa4)St6YEj(Cg2>`r_-y zkGyV-89#n}^UJQfV%$X|uNl+)>gz6f`sb6Y8k1pqb?-XprUGWl!vn?^$j;72IVKVy z0TLhq5+DH*AOR8}0TLhq5+DH*AOR8}0TLhq5+DH*@P|N(Kh>}n5+DH*AOR9c8Uh{u z^)m(x{5BRKX`H5`BtQZrKmsH{0wh2JBtQZrKmsH{0wfSE0d`bGOOEA8fCNZ@1W14c zNPq-LfCNZ@1bifLf5Xw$ITpZ2;H-)SNPq-LfCNZ@1bidFjtbvUvmz280TLhq5+DH* zAOR8}0TLhq5+DH*AOR8}0TOVGz_iQm{+eR}96KnID<=UG@PR<~Ise|tu>d}RW)&nr0wh2J zBtQZrKmsH{0wh2JBtQZrKmsHX6@e)a4;ahdim1@B5DAa~36KB@kN^pg00|^9ft@d$ za1qA>B=HGA*GYf`NPq-LfCNZ@1V})a06Qvlc~Tn*kN^pg011!)36KB@kN^pg011!) z36KB@L`C4@t#c-DEI?G~Scn8jAm9YpQ4#P1Nb^a61W14cNPq-LfCNZ@1W14cNPq-L zfCNZ@1W14cNFZng*f|ljBNk|TZ>zQ9ZH@&9^dY42BtQZrKmsH{0wh2JBtQZrKmsH{ z0wh2JBtQZrpiF=r70N)Vjs!@61W14cNPq-Kz1xT=iN_R+r z1W14c+$1pN;Q?dWTjA!hp;aV60wh2JBtQZrKmsH{0wh2JBtQZrKmsH{0wh2JB#>YP z3U#@z!SQ1O*in&Shnen>011!)36KB@kN^pg011!)36KB@kN^pg011!)36Oy61lT#@ z`VpdiBtQZrKmvgxFy)>S3yvQP5GWUDED4YR36KB@kN^pg011!)36KB@kN^qzN`M^| zzT#$OBtQZrKmsH{0wh2JBtQZrKmt(^c)9pv|HrWaQGjC+5+DH*AOR8}0TKuT0d`ab z;V98Y5+DH*AOR8}0TLhq5+DH*AOR8}0TLhq5+DH*P$#hT$#0(KSO9gbROp!iJ1RUQ zV*wH%0TLhq5+DH*AOR8}0TLhq5+DH*AOR8}0TLhq5+H$?2uyi+z*zQB#DtKANq__# zCa`wg-ohLU;P3&WX(T`bBtQZrKmsH{0wh2JBtQZrKmsH{0wf>=*ipgrfCNZ@1W14c zNPq-LfCNau1A&(Y*XYEt03LubD+!PQ36KB@kN^pg011#l7y|652m^?_Nq_`MfCNZ@ z1W14cNPq-LfCNZ@1W14cNWeOQNB(}+8jc09j+qsZ013EBfE^WX9v50g0wh2JBtQZr zKmsH{0wh2JBtQZrKmsH{0wh2JBtQZQOMsmd343UrbL{bPS?6#pfOCh129W>>kN^pg z011!)36KB@kN^pg011!)36KB@kU)G1u%jZr{8>8*kN^pg011!)3B*XC&Y-LMax6fM z5LuiANPq-LfCNZ@1W14cNPq-LAf5!KJUn15dn@931Xwo-kN^pg011!)36KB@kN^pg z011$Qa|E^?cw{NZ0yuYAXb=gI011#l0ux|IMFJmOx<&#dKmsH{0wh2JBtQZrKmsH{ z0wh2JBtQZrKmsH{0*(+!D>8U{o?`*nQQ^oDq8TJW0wh2JBtQZrKmsH{0wh2JBtQZr zKmsH{0wh2JB#7e)tzXSXd|GPq%f=@s?*ZvB5JkdHV zaIUt8U-v>zgD{Z*3HVQ7AF6x?_m;8#SIrtpfCNY&xd<#+bn$2Sha=Nd5E(TZNt%O9gV9K{K9bBX=Yj7MF>w!MJcQ8`E+4Gu zZ2KRIosDyKZFI%mDV1}sX2POdAv{;zIW)8+c?oQU>+NuVk&aO&5+DH*AOR8}0TLhq z5+DH*2nvCTpH-d0u>e6iT8VDwl!ph5Wp72IA7u~zp#}6ZWPi*?y-RH z1>l49(u~e^ioXd-Kw1({#<{vWsNUtAtB=F}OM>bH?IwW)B_MM_HNyQx!VZ~8fCNZ@ z1W14cNPq-LfCNZ@1W14c!VqBRL>NHHvKtMao#5*%NNSWmOGw43Xp{-$f)()p8rc6} zl)kf=g9Hvve)9SV#sbWR=^b(29&pfwuqNTWy#kpY37e4_NPq<5L0}#3E!E;tG3z7& z5+DH*AOR8}0TLhq5+DH*Ac3F};9Ma=J7N~>MPsLQPCN~Xwv$4vl8%m1@}Hn3_+JAy z%LM;S$w8nk+`Kr5uHsy+0=w@EqV2Sq1d@b6Ra~ccCW)_fGkFR8b^cJi_GOq}-MbD- zUJCS_1W14cNPq-LfCNZ@1V})Y06QvFaRye|`LMi^g7r6}fu?|h9=}!Gf(ncHT`B8} zD}nJiRfBl{y9v~a#g!Z@Cjk-&M_^n)pM~N4;(ih!0TLhq5+DH*AOWic8r?nV3yuY_ zikQktfCQW-5X6Q$r-25|I5egdgw3}p*(2RbOaR6L$O!d9dD+ix2~Qx;^VVUnZ-8w@9k!3AkpKz&LEtt< zRr~>i|04ksAOR8}0TLhq5+DH*AOR8}fjkLpzOVd^?5)TXO>%5R>xnEzA@dz+rka|{ zZ0dslGSUAin{`mP(*%Nau9gGIOPr=j<4Ax6jwB$Vg*}ekVCVl&e*V^~0E`8&<2<#J z011!)36KB@kN^pg011!)2_z5!c2p$L!HmKc38`ohMY&K#(nIZZY@c+}6n4xen&TgV zAf2n%150uL(6A;FAOQsecPQwf3KAdz5+DH*Ac1%g=^L7RZs0CKmsH{0wh2JBtQZrKmsH{0wfSz0tK?O zvx93rEsu&oP6WkwD11Ux3i2Qil3K>^IN=HAdis;YzkSPac9hp@Lom29AtNi10loU7lu zY&FhRSv7kFq`b>^(l!zxfrton#y#hgh;lL$36KB@kN^pg011!)36KB@*eB4nd&i%= zjRml86KhCv0t~B2z7s$~DzrN(gyKuCcT(gWYKBzgfJow>S6%O<$T>Dw;+LHiNqnNx z^@JjDYeIQLXGnkqNPq-LfCNZ@1W14cNPq-LK!E_GC=?(DMTO-Eil%7rUFb3k_+JV3 zNd!evm+ho&3Iu-m^Vlun#{x(+MLV3g^Au{drUK_}F({YwwxYF4sF(yuz;6O4;@*Sl zOPEN21W14cNPq-LfCNZ@1W14cNPq-{0HY|9>pU0?cWp#aOmw}IBIhvQh+lS6B-c5A zthaBv-Z7D5@a6br$HcKmkAIUuunF82Z1ZV73H(Ojo|JMu!jA>{cnb{LO(`bA_t%>N}=9UF=E6B zbH*8G(2(g^jr; zB#h$TP^|8}ZgH0=#sY*=8PmR;kcvxVQj$mE(xpqAK7IO_LWK(Dv6q{oB`|gB)M$%_ zD;M&-4OiHY{Ww?UWV%7SuJj_s4|h@&Dq2cgHT97|a0p~)n{VHLF5dQ@h3mXII!(Wc zw;`;b1W14cNPq-LfCNZ@1W14cNPq<5O@ND2#QO;NQ~%khrn!ZxCI=*fLKdY^kDwSh za3DJ-luxQ<%a)n>^XDsTJ*@6soU3zeC|1@v8v3d!>omLYpe}1&zS6dGDC5 zKP`&4*)sp4Rv5?0i>F}>g6DYw;*EgylK=^j011!)36KB@kN^pg011%5@dVgWaXgcR z+F~)9A{(Io|E+0%9R183h$FTG^YpkBtQZrKmx}SD7)Z&Ouovo z0LOCS-y}c+BtQZrV3z4@~1-;rTrQx@Pw$ed^2ARTzWwB!> zEdnc6tT11GX=Q=y4{@%}*0R&0o{`X9OIv@_ZWoxs#mi``r9Kh}6oD0Ay=OMB`rZ|r z<=yKhSB#+@BtQZrKmsH{0wh2JBtQZrKmsJ-3xRj$&-_P-u>c`EDl)St_+ld~u}B~% zq(WBg)J`()Jt!d+muoxEkP2<0zv-JaX_Bpwio5Mttfw_6q(Z&*QbHno+ce1W14cNPq-LfCNZ@1W14cNPqgnstTCT`_L;8I-?V*(bM-?Tij{Sq2Yt1bb(+Sf zo@=NiVYvQ9J#sb)_oZ3AQSoO_EX46XN9$%?&LA`U*0~Tf_0TLhq5+DH* zAOR8}0TLhq5+H%N6DSyW3uu5%0$M9|9%-XQPy}J3e`%c^d!*qS|1VLZgz43*mxe0p zID){$i4$!_P-Grajg$YyNNOvMl&NWj5J54@NuOyH3HV81{xs(zD01CDAJ_lA=uBOb zyMezXKmsH{0>=;-+VAH@O>oT1}!0tATs4%9oRx$+cS7+Z$PbWjAzwh` z+~0*?CK4clI1rdp{QS3k919Q!4_Rdp3Hb6t7DO-!V6&}gihEsWgO?zA!Ma@APKl=I z-MhE8+N9Sf(G-Uc$)Gd!beFnDWra9bWfb0cWwj=?V7h79bBwlH>LY=`5tug>b8@+z zM&LdmQ57rQHj~zo011#lun6GZDxJa75ZehNgSSLvV92o%5ZL5vl2Ifgryf}=>|Ke! zzswOC!Se;44F~bdL;^7r!1*PS52rv*gLHv3f%x4PR1$Sc$B0O7%7Sg*;}4mt_kGAL zocGe9!9)TiKmsI?LM){F$iz$cMg`F>l~y!P()cM>shS|5qOYGGM>} z!w8BaC@cJT!-frJ_UzdTnvPU4^5dp295ohT0rpNmGDn#IXJ$_blf&ZfA2 zLD((5b#Fjk$9ca3zf2@R0wh2J2~MB@ZuklAV)D6%2fD;hYWG&iiP9eDi_U*S2Mz#%`{|*F9y0H}_nr+y z&%(JXFSX7loT~+3+}99^vsOzfsRhlse|pc9C|^xYCl!%E5DC2bxATp3Py`^!1kUwv z&-y61N9k@cn|f?)63;eC-gaLEI^*QeA&1gwS0wZMt+${;4lGPKXToi z1j%iTm5z!;P0Qh!yyw0NnT=~9D^VS#U(pl5H6bB8GPmpnkXF%aa;Bt=ntUGeBF_2E z&RCVChT!~?Z`r1hN=b@9+?KGPBk#Yv@b`Y%k(2h~cTesg_&tDg_tzu-=6^|m1o9xj zu!=mO1bGvl7x;)&@1zhUZ{nuvbSFj5ak~z`s+|;~_pGzda;}r2Sg~TJWy_X_J1KH} zKm3!Gm1UMKTV@t7UTkY+>LaU*`v20Q^3g{hMO{8b!SAsTlPo%)2EszsP73k(gidF( zlfwB!PXlce*tl$gl}?K04NIDD7Rfh>YAOV6H$Y_cAX5?&z`L8Q(kqQGU6Rm!FE0NA zR=47P^kFZBm^bkV;5?AVF=>1q4yl@W9_HzhbYxzeBOAbABL4pZu7@Cmyyxi>;%Ck zCK5;t0*t6gjN|yb7r62L4}ZvmLpw<{#h2PjHTA*wQqU(Mp=C7nN{4_%Q^>C?Ng_Nt zcI;@<)6?@cNmi$xH_zQ~mq#Cc)XbPMBVYSt*fALA>MJpnw_X(Is(et$^&~IeYN}lDRzdTeEDn446_)@=;P3_pWuSTB*!= z0(cxqN7Z~tW9N-^$G}mzR{rCTIkdzN0=QO6pdRU*7zJtVN1Xww<44%}ZxLJ<(wN1R zBm{6x6o#uKAy+|U1?*%=3*qQPkpJMEm-iblp%fGhLNzuJy1=F@kX>-kmb!-rk#)j)`D5g&537iEB z*-7E;Fx~1cJ97t_0N#5WK>i6IWO}?uAnc?#`hYgVpMQm{1*iW&+8w={eCWSB_6IRfmcaPE-AWRR_oHM!Y2op6(thoy`lQB6gQ z7BzkQ_Epsxt1{_yC|x?T8LjHos~2l^R?5Hf$}8sJK`R~mccWA7kQJkKmENYS&KQZH zXw~adZMD=#0)ZniZ>qFi>8EQ){MIQkxMx*Ut7;Mb3XHi7&$5bXx3y_&T zA<_z&&o=@se5)yODyoZ%r&!{+Lr?M|fMeVa@+S7l5NdhpFPT?9Zu^~p^m>ngd;xB= zAU%HH!~Y}%0UXnU5PADw2KhUrNgirLUSTYO&zN>@*zZcGA^)SlioC zg;ZEDB#g?du|FxoC>k_q5C+8`yCnpqaAEmYaVKrtw$0Yc*-PQ&%kFrZwBZC}ou)Z({COj^ML=4w0P6fzW0hDI_a z7lHgIZ%i%@UcKc|K(E9Ck{|C$9U#+S?iUaVJuMIr(_J$26d3akIL?I(gpgsxN2hv) zX)9wN3lMR2QDns7?1N%5|IXu(N>Qjt`|x-J@8X+qb%@QQ1mp#g%xbl|QO7{y_pHKmtiX0QbIzkT>A! zrw|#`!4x|InP2o($p65$TkPtn!EXXMZdD+YQSVepBfskpSbZl5C-9mAsU9%fX)X!G zn}9^8o&>oRA`5^?hs73LOL9%!2I&g1^6`Uun39FS4aJ_HAHi4vc2p$534t5N8E`{F zxwO+XH2lhFCfCzdFtDt)k^TGk_uMId8RRu011#lQV_tqQF+M2a8{RM)_Mv|60X6cbh(!`RfI1JCZ>C$)7zR!B~JJi}3%p3Cw--VFXj? zuTyhuG)1l-=k=>@s#LHtwcgcu&Xvinhrc910wj=#1aO~|bP;@83>gb?D+D+Zp&KJ> zRm=nT2O%XB)5YL>g=13z@;W?u8WP#+gu!=P+XHDYkPcAkoU5j-occ&0NeRe@Pj|>) zAoCy_aeci6k+m&K;cq4qh?)R9DxyZ`MnT*JLmYtyAM2t0D=h*t>UctZMQq zFCi8C^V;FqHY?X^>)w|-<|fS{0sjfioBF7gFp3*T*9+;)$bmIY=81CWwRpyH z_+*suSd0Wnz)b?UuSvQV)_o1>>ZX-RV^tM+`XY|U>kwIam?@3~Hke3>6OI8Bx zs0i?p$Ax|x%vT?#4V~|z;rEjOppjvx6}0V?d7ny@D50%3YJE7)rha`HedaAKTC@la zciG+wS<1a@*Df<{+B6&5e}dBgv!Pg1CyrqU=#x=VswtyIH&Y>_nW|1IBY~h1*s*Dq zS@xy={iS)slBU~9p`-RBwBoXJGi)G}P6|%!W`itsk^l+Bg#g}#r14eaYyS@6N}O>y z3VBv4qxHYT@fn)uUT(Bb0PJNRl@}p@gGiJMlRg1iq~d0X%mKxLAo_f%kpzw;AlI9; zQ$7V*k8AE#$N-3y(OpMY%>M_M06Qmwd+fyWJ1m)hk0rfoYH?$e)kS3#mTD?oxUg|; zQHp3HC}e)6_3PKGvWZ368*jW}cJJODi;Dcp-iHpDeZTFz`i>{j7o##SKz_C9Y_~G! zK%2f-=vYTBBoJT%^QJy#4jsx;_h9^(dM1*!7e^tgqI?+{IHaD&;koBduerK=R7e6O zkSqjnFY66oWeS@f$>MXs{FI3fWrFc%A@aS&eQ*8eTbmI2IToy1o`5c1=ngD7$D(} zs;OqvQ%uF$%~f?$83_c9z|PHU%#zRFP&d0-gOaAl$rU5gTeL{3xolWQM0q`ClExy= z=jIU_vs)stC3D90B;5u@Z$Z@G;bS(C3XUak0XBSzW7hQ89sFCL z065oz);W-q^|e!D2m+0p#(ytvU^2SrI&Di_jixACw5TUnU5lo8>#etptdU_M-H%SSJr?w8 ztIUK#33*gat^05$7*(KD7Dxi~r#)dMg5vs7bt8(P$b~j$NG(&Yw6#bIZx4Z7qxee# zBtQajB!KsISwHR-hA`eZMD=#0)Zm%>(A@WqR(DaxA}xR#ZB*1 zy**-zA+%!5keceYKw%}w#WoaECkc=M3D_cl_n&Ie{V7D|d0+}20a-lcIUL9PAZ#yC zK7L?Y2P!{;WI&kWM?e;-n1SQbC4MHbW)etx0@8E)Fk}_3#R-tmD+4AyuEYhvmyKW^ zTU+4{cd6j$y!>4(`K31&ARnV}f!;i7sbCB^$?d&xN& zrc{YEb$g(2Jf3~hXv9PUBtQbr6TtmShUH5qMKkA(rGeoH+=vXX$!&@VO)u&G%Mo>h+$x9ZAppezGEu^i``Gf_%sw#1bF9e+uH1V|t%0(cMVfP$Yu*a#aHK_?5H39CNBu~q*TbJAOjMuA~jsDB5N zVX+#j_D(=LD`cYl7T$}p1PPFUG65Nwd<|q7uE~3I?wiV0P@Rhe*iqr)Q3-96PSnHk zb=-OUCbzcW##9&P^_#|W(8Hh z$F}Q#m9m*sM*_hkAQ2Q<2llBOe%;7AQAJSX8gl8-3{$))VhZ(>L{b#eS4)i~KmsJ7 zLjdge35{p1Oz3_nyvBGZi9@2~3GepaI;Plk<+7m>$~o>Pe_~V#Q{=ZU$PryadYBPizDY z^kJm&2}VE`j+MdMi*apU1YsL?U=Q#M=iNLcvatXj4svGpguntxEW>SuKHTm)Cft=I zq+*=5ow6v!fB^%v)yAYRnve=v0K%n3DQ3)=VSfJk=a@L;Q}{vj$ZXGNpG%u>gE1a% zvXPnIw%?VJWn?ZANMHgB-g(9x+`m`d!*L_)nu4hXV%B@b@Y+UJ)wYms#j`Nof?ld5 z0TLjAs0rY{)Cq;(hRD1rOo>fE7FPHG#~iT-Opy`*uWON)BMu@37TZJX6oN@ z@pX%}lR)4J$b7iZK)%HFc}n2Dp##Se;7#{9ByMiN1=sa91J>%byI~}O-#)u zGE$Mr5duS>t%#_e4AZN7*Flb$lw4-){dK$fVb)9PPS>bh#GKtjM$^Tf%9ctqBL+$o zgnE*Ajq}vCQXvVD00~4%0PjD|P*gf8I1D*Th5;-ljYZN~Q9ppT`))Eg$&38`zE{i& zVQ*P0n3sA30NZ_^AXO%y>&9g$_JM<%c(dd0d`b`#N*V?OiR|>VM(u=THJIb zg5r8Lg*l4S)6

      i&E6AS<~~P6tVz>OA!<^XU;TRw{F!wT%zwGbgFH$p;%d`L{O;D zPT#g47NsZ<`v`AkU#X4+d?oPJjAzV&eY@4`ltn4h)AU}6t~{@fDN@)4m<~r}J_hCVzh8HbR9W#PwvA$*1h z?|^3F_}Q3uJ9s2$F$uUyKt>IIkL&a*h~5XCo4zK6RXm6RILx@%PJ)?dSTOZdH2i*G zL9e#TOQ2A~i&c{>N>QeaERy9$N=ZpE(JV^gQZz+YR+g=3id{HLL5QZP13QOnpHby& zG%^{@yK1YYJ`xBNfqlDnnD0J(LEYwRl?t1oeRLl&70RZY3;I`Aw-pNOLWbE;Or0b^ z0wfSA0eqXt3$%1n=-xadHGuhCARrGpS!q}r;FrJ(aPU`MvkvGn+PTvQfn&=v3QmL$R{Xo1ssAq*R+T$1<8i z`9M)!@CbZ8^FQXmUg@*cO;=x_8%>dGo#-uG$Vybj?RYj?QJiZ#e@TD@NFYA~IrpEJ zvA;=vdAL1k2sDDHZ{&QhC5_XOI0_~gAYcDT@-mOJ1S-L@C!Dp8hLHdXVe-EQL?ZrwL+8GOwnH^mx9xmRRFj~G$R8yI%^-S$n-Boo` z83_c9!2UhInD0J#Uft|U&)ttHYo|5+DH*h>XCWk&Q1Vk+DjK zTqncH=kXrQNlJ5$Hc-d{9uFqN!A}Ao2jY01p9Fr=MG^=)fiyTEi^_b1YnJm-1n_9G zx5E8{b~jA4@v@icy6d>96oQR6YTGGc6axkf&{i9hzFM_vnZku7Y9#XHS{TKLAAV?S zk&GwMvt}heBXZgi<&-dr^oVkL$kY~L6r5wx12ASyNCMxy_pI5sd#Ac@7Q-D5+iELJ z1Ii_wg70wZX9N`7du?cNFC zVv>-Gp59A_E%7-Te%}q7EB+Zq!vyu&y?gZN;ao^XG>cL!T(~g)M<~aFLx&C-OCc3| zQN;t!IGT?kxK^uR*NA*}N4>dBmAa;G+g?$ZXF(E31Ol>Z_P6gpr|ycy1tfmAHFx2Q z=U!j)`!=QjG=?7Oakmo7req`4kN^oJ5COdFG=w|HbW-g7RlbXL z)7bNBn<9l%b(Q9!?Yfb5%*40Xn*9e3<*_%WO%hcx1^3A8n6$8Pyb0`yw*jo51i}%> zc`yGj@)Zx4H=z6FUGr1O0?2a63i-wDHL8Wtz)Qa0szT~Q>Oks2+Cxr)lz=2+k|;k} zb+|3K3B#mI@Li@GkyTuSoOZx?xfbR?K8GxU{Dfl~@k>k&hYXaH1#CJ)PJy(91i9^L zB>egpI0zdi5+H$K5NHN7zr?kB4X)j%gJE*=S{dXAL_!}mTtKbiU~daf&V$Yw7WC$* zytC+^-ccV5FdE8bG=^$AZVS}c5AsG{^>SNa2z(Y8OKE5_6Sxb`w@DxXOX15*$l(TAyvs_*gj#|==|Gm7 zzR1V9it)}Uz2i+GvX(<9NKZ%^NTMY1L04k&jQ| z+U<_ti*fl%XDpM01Z1=WlbZxGVU~>uirZa(pyI}n2H$VgJ}hZzX~wk(iqzCpQ?jJI zt3{r+ZQJHl1jQ$xd}3CuS{1nik>`|=Dyt*U9=Dtl?IR)1s;N!iD@;m?{Sn@(MpGFH zC=>YpqyL&czid_3YRnbGYn$Rl{rfH+H?poNn5ytdAMx^#HTjPPn1JVJOqFD2Pf%w~ zh29BZyodR~doh+E0e=YKKGOv?sfRlIWBpOJ;C^cU29aU^^&yu-UVy0fR2)^~F@K=0 zEXdc8M+$kY9dBx1R*N5^ZiTM^V!(5>O{F1R59MUMQNFxei(W}@h1cX-7{%ntlfAkSNq!lA zb$=uo?4YuiwwTs70+#51+kQXdHfia^$ZeF&qF_8Q$(szjO@InbXlid@TU zS1)P?oGxqFs3&<}=%uce3VkDx(-3qPDtr>M+qa5Y5eY;<0Pi_68slk5P`?@X!uEed zWZs!txZZAn$oG&F3AP-7OoxnuL6spALpu#34aY%FH{sklH9!tjSYHy;?`4=?-Mgx^ zJ&%R&((&Yal8%U%AT4nY_Je!~vC=t_9|+X98FC-?*M?jUkxs;*qcoA^=#TPdU38@&LkNr_^E?BUj z88Bc#)a7jzJaFKE5s{FONRot9Y~H*%lB~Yu`TXN}9c1~#5O?@Qb3W4uuO*FfoigW% z6ALWB4(UjI9Q>Zcue{4#3vv1R2lEc!43_9()+inMg^$=%2>(Cw zel6#6ZaabOleskHUQs6J|MK^dQVM^U&e_IxT7a;W3>>f^V-by5tfKJ+ljazosQ?p%!o@cqmu-145gvc z>CvP(*S0tbk4C8zz_D%yr7|ERNNE?0{VU`jxSnh3yORw{%|KieG8Gy$&Lc zVnIrEVC-Fxpq)d}Fdlk-WC79V3S<8<8i(}i(iqp@wY0Z z1vkT1nYzl?=#!8ZCC|>4;J!W4-NpGO9UgK|k*U$75lbSpBs5$8FCQ{BbAF{`gDE-! zGB~I{SP#WH71;NM=v;TR6gwx}J1!GoQUMEQW#Xb!AG)b&CvFVVqj{62PSGV(D!H~O z#gRTPUcA^;sZzyAq(`W+) zOw_@Sid@kAB>@tMi2&Zu<-6o@h`grd*uz(vs%xot#HEP&prky{eCoFhnT}4Y(Feh|6 zTU#ylkwBmb9LhRqC4!=8kyLYWoFXW44?x4(#ZB*1WdM|V>WKSN7j>;vNCG5~tON$b z@lJ7ae(RbCwnRG?U=?icfkuW4AfY-bGP7l9SWpw3TMdypXF5W@2#_DCu=PniuoAfu zLM|R@Ep#z`1*6*G+;P6AA{QQP-3O76uHleS6O$`nTeiTt7vN32m-eJ4uVI3_^} zskk|6a`_aLkczXY))5D8cuwuEYAiq45K zgKL@cWzwT7tXS&$QFYDXLCT6cb5z}~uaX)`fCQ3_06x%V5^Wi_AB6NgjFbfsW(L97 zSZ#!#pF@XqP}~9e~T?1|Z?T>8VU4 znMV$+;Y2h=uC3$8)Uy>;acgcJ{3QVrAOWug@P1wjWz-jq^J+(!{Ohq_rnC6JF!>UC zH~jw}9Bc)Vk+7~OnIv63a^3-_M>}A{u_mm+2I-`bg-rsV;Kv*|As_b#9CYR!+%x&{ z=HP*$Ns$x4HMR*d333TU7P!cOTn3pA*$YXgByF=YMWIZQ#pE*q8x7+=yAb^9aPmt8 z#Xt+5NTctY7W8VXln4sB5vr!sPCLz1s#Hl;CzTm9apFW97^E}pPc{^5>cq8L4EkiM zC)HHAcxlu4)M2VRsf+}IMnEDc7EFIa-Rwf?DdvjdZVd=--ms+Uc2aqDU!kxE?o%#~ z=(OQ0by_Dd@`*-e;yD(;x`V7BGy(ZI#~I*kS_sQdL}N;j)@HB<6C8ep$T!s;5T^&J z!M>~TtV)=VH25Ne?uJ8LO8a2UAeI_%nf z*meru7iE+<(-8!~L1Y#zD3sHK+r$FSymmAVPO`QZce zRMP!;QIe4WOg<5?(L(N%>4{MdPWZ;~#Ktnu(xm))04SXY8yjfb*{xeQhE!-ng}zCX zCRxyWSmnL86l>|lwOSau)I%x?6)R<$c9qM3i3F0DfOM?SpZ=sefb=wkRGjNpNJXxb zZJU-hr?xMb`={n_AKb6nX)2^H5+H%3AYf%qOC@qLqUCfnkZe}cmvCyp=6N{P3$oK; z=Pt$bsj9>Fc`@xAFPVMFI~C`EwCE>tx)IJ)! zlJKEwkn16f17UphM&60*`_IuAXGt#v*g4^amYuxP8)xGsed@zpM{mjf4}VF31V|tp0lbq-?Ag`f z^2KTY64cuR?}|Uiss6xJ3q~Kn*fSw}95uQK?2^WwLzhn(0ATM~XUb)4UO z;iru5+V2qiG}zL|A$w>F30Nn9YjF$Y0aSDXq#I;1Bny&==_cIsogY-22;glrkS78c zUwptx-Ut;9wosWwP)xC)S6k(9C~T;$R3a#X2c2b?-VT|6KX)>5qF@i~M$SI|+~g2^>Q}Mu%5AW}6FtO6No$G>B|)!I&V}09GHv zPWje6;HbS9;rUd`QQIOls{=Ad@F0&=UCjCJ!Es+1vz7oTfS(Is=HDGNwZCKb&>Rx* zg#fNcNgqOnph6ja^$28_FO~aS$<26fUFC0;$yXaYD&l+0Z%6euj=`O{LHHO=;T<9_ zZi%2!ALYpi3h&1vk`haoE;S1l$h)9=nt^lmQ+2KS3dcaDgf*(B=G`weg^HF`)k$R} z5HtcS7R)qTe_E(+_Rv1nOwB4XVAJh%{iwP&3@iYpx7tulog_d42}$5;2fdQU^|SGw zxWqx*oHhx}-iCn}KwSJDlo#L8E&&|jGngF2Wj5wOunZpqZdQi~H?dKsm+ zQ9H?8jjw7ewV-bpbgD;FbnV*JRI8@mo@~MWV5xlRrI!pIHa3j@lMSVsI&rP0L7#eP zx~!Vr>@@j?WFmp&CNOW>6Y2m`Qwo@?&ab1c*M`DV+LbdWwJ2jl^&s58n%YoIog_d4 zi9-PQiO%q*wSykrf_Kl49JDJCCV|~V7&_ijW3R+{7v#p=UJh~n8;Q?j6@vyvF*Alz{rRa8 zyDC+xWEwVXXc{$YWNOx|X$lo8lwWq|wl814+{~S8W$lRfajwpG&f((?8ih?VBtSJu zPetM4R$3@k?W8giut{LmHy@hKE5B8*VsNjjrcRBb>UwP{yl$km(2ARF*-X78KmrLv z;1UNtnF^a^;%lbZ2nfSBVpCzIw7ggMwxZdxQhdi&Kou=4R}-mv2`6|e(##2#?`in6 z#vz~Bg6Yt~pegYufOejrA@Utk2l6;15f-nIM!eVX9B74KCS?Lq_fRO?kyPq%GD{eR zjToUHp;uO|aXpFeCukelrArs*!YC?Ks9^#KB3YbtrFIHz{Sq;B#R^js^|{htcV#UK96h(?0KRLSCD;*coY2k8Oy?V9z{PWLk znD{Bq)mb(aE9<-f`f4ldG)=k;H$_U6(^gA;BoHV9YrdatHY}U3Zu5ZCE1P{x)YcwgM;8;u072rRe5MfqTI zZH}PcXy~aNK>RXanhQxf@}6_SkT}{PQ5AdQSpSh#x5xR+cD5sl@c$%m1OYsYzJ;8I zO=m;a9kDBj|CMLgbX@aRMwtZ>h=$F)fof3npaUmxJ!Bv1p-7}zmmuTIk!Fr#K8dDK zpDg>7Q%-Rznj$SN&9rRU(nyCys8pjy4O68`l~7sYx^LpdiMFCCCdAdT2r9?5Dh+(< z(G;m^g%C|KG6D;jg#;3tz`UvYgWn`b?V1bg1`=G)&J|6|MpoW&hvR-Gk(^8ocY3ZjT(@$*sgLZG&VV69WI&iZsZO0b5wATVd0k9v)~qpe z=FG8S+UGb|-?O1uS?6%*Q*YdB(q)(_R=T3HsZ>V-!6LAB;b&(368-gV`gN;p8r3Zx zEEq>vd)){t&jTx+u1A>7|B?U+1dssU%QIkhCx=XzQT|suWKW`*@>e*t*fFPU4BoTm zBCZ*k61Ii43cM+P-dpyB&Hai)p0#tx7Mel=&JsXd(00fe7}y=M%vnRj8`d9L)Mtwc zUu|OMVCRI7#}zL$GPLtt3snXsfKi; zxe;-GzcNmqJlR$R#a&uH?7ivFIG^l}1;DY9hP~WQwajeI_>a_r>89lwqqTTY4+#X8 zz}&YURkvO4tCm)yR<|`jg}qO$WSZ0~k>BoEwqJmoTjWowSZZQf5+H$~5EuwUU3w#x ziT33qhsgo~u$A7Ru@+RvqOx_)2Xibs{46UAEy^mY@lG<|^K0=o!jbxA1l&$XY-#C; z9W;Xk0zm-Jr4L|ebI1b_r{}Q`0rW0B$4(EipSvO?z}|`oK`CXs4SH;ZQQU6_3PK0*|TRKza#c7 zG86qfv8%DJL1J^%SDS6rd59@irjm8zSpf;SOh6W-ShwhZ>L&I&rIKk{-%7*2x~=&s zwAjHS52YLPtC!nJ;AjE`=dEvMdn~}wHS({x6Oaj_T}jf=_!n1x*dLtNF(!{x2E=?5K!_S`=k)vPwwBc~KM%S7a$NObnOFf&J2(fX`mtBq0^G zYSq$J8iO{kAr(^9fddB)OAOVj7o9X|lF7=lGKJWkc=$Ly@gS~M=~PpX-AhR=Xj=AU z1cmy^N`-+V@Z}p1S+UvDd`VVp4W+bSw@S9=t-1)eH+LSGp&)P{36OwQ0=O4cLj@;U zt;C!12l0Bi#aj{Pj*@_MS|5nAtc_wp>YR|#zAhx`WMcY*!25svH?IGaD4XzUO^0Vz zb1Xo3PRxL;BT*yI) zrnpTTwX`P>9xPKzxsxQC!fSNIrcG{j7H`U;EYZcOsmGF)b++gVM4%`J>9u6GAfEtYN##J!=zYdw-7T5I6#OKCOe%r$O$81a11KcCh)M5T+;zxbjjOrTM`qhKH1lS{!b{T3Kc0 zMGJbpsw{l@RXCL*TO&Xke$_{yoP6@hPDN9sr>A>bpduIe^5x5O|Ler>E3dp_E1E(& z)q)UBag+9ekZ6ily)V&LOMN5|I0AFudc=y&*Ia0&Vc&|aktiK_Mio=9RNZ4NmHBN|vv#t(N*oAW#Ihtp35Q{!TwatLus7&513_1d2sCV^a}H zan0|M6t8}FI=c5G8w(Il^HKW?^tINPq+qk^tV-CB&+$BhE>SIfo(o zd@%4qxe!%O@wwVi)twP)rwjXV-N*#wE)0!Wg5$Tx1tzZ803LE5`O*=4*jwSqfuI?| zCV=Z&7B@W^vMOMnJcZ{)*?`&Xgt_dkuzVPBV%9_z=Ub>|8Fap2L9e#T0Z`aNTWQCR z9i0oK$jHd>?B!ieD;;36(!0Dkx}K!lUNsd+No5#?Dp4v6DuKCg zVoDDS(NyC`$=XkemxlDNYU>%ylw$#82$GGFh+YrL*Io-GNq$#8u-Q?OBqto*v`+x<3g4lY4v=~F zYg4PC3N+yZn8_;vc2syJ<~{#yC}QJ9=1;g-t+Xh6cQNl0Xb35QJ-6h&H~Ouj_MCIh ziC%$)l8F#fv2EKn^ZxtqTcBfPq}F5`iZylOypp&=nN(0U)o_V!F_iz^?6hg768fvGH`@Zq9t{_#l!VzO#IZqtHK{H4oHUfB7 zt%rtFA#cT|$=A~7;+ZA`wv#z+n6U59QH}+0XZ5s%Jb*YkC7R+w3(l>A&i`7_tF2O2 z=xd{`RH7-OiC)mQBUhg+N)e4cu0>PGq7?i0@6UBh^H)06f)Gt{v!>Ict3ZJi)2i3S z+G?qf1Oi83-qc5}L{r>wQN6&i@M7?=zST|5DpnR`yaMkiL0Prb3o_;<0TKvLKtA|g zNI%1fw_PwKiEVJ{dr~5OTu!buK+C9NrsD~?(ovC^qi2ri%`GJ08v(Q>?LuYJ@E@eX z4Nve}%iAf!C<<@9yym_I1#Co6$O^<(T9Lgv7$(2|)HwwT7}pl1s8_F^DOj*z7!*hE zUcY|5Q;Ska1jURQGmbV==P#L-Y@&`Tds^_|I0M={Y1dGzA43^WRmkxLvc-Mj|Q_Ii(duRK@vLqAH4k+trE8j=qrq2^>!V z?>=eR+V1#0aoO@}K)!9_0>`IHUqn6GKGhYjnhxPI#Cg9f9Th*tnS-m<%Ro*S5<4nf zI102O;RxV;Aq!4nx<2z*K%A3lb{`FhtxlN9jtWbM0WbeGpn9YQ^{b)tc?)_~Rl}IHOLcWp8wmuEz`VB}!EzM(fmGwiBz6Qv?x7xjW_44wqAYl=p032Rnjg{XSgDW% zoF>o;)>#S7io)wjN0zy0>4#V=BTi!hGP5TfX-m@nAFV$t;rtv&0Jt7j;mQI}>l`^= zu9>mZ5rg=m<;a1c8Hq;#&n)R(9}5XeFTh}Ye4U+mN5YOr?5(iFRkYp$Nw=W>9$b_A zE$G!&X(gm$@ZiDPcE+SH+K`H!J6(ByGDd89=bd+A;*d{a8PeiXEP0-0;(^f>J38mN zGlq?ITAyLcRc{!B1{NiO;1k%fX_Z+zce=VGZJLxeT~92puGd9{h0;@OEmBbu{xY1x z#e+_pNWdNeSB9(q3KwVFbCa4q5SZd2ug$DY@lKBDRcBKv2P``P8#X&&hl?ica>OVJ zRb?UphX{<`+I4>1#sXO7kkeUlB`SHuYDId=AHZ|Yl@TL)URlt@jtUDB5$R!*R@3{>esLDc~OdW>((8PTCj84dDr^OFTa>+)24YZ z7{0_BWU#M~s1U<7t;{exwUPWC=D-H*icNJBtQaA5@_j&Rqr}t2hA|%Ye3lU2#6+* z*pZhRE_CWMnj)_w^V_yP`S0>|kBu2Me06}8kw6d#*WopCjh z9Tm#>aneOq|Rh96+4-_VR1Vy1jg*-1xfg7kZRR!-NGbyJr#)^CS%{#x0TC2f`z7hpsIzOB@@1@i z^InT*HztK=B7wsM-t6)trlgG18UZ{8{t&=5FVhXw_oue7wXmZiEFyn0;N4xq9%uVg zTi9A$9_3Rkto8OYSP6l*Z?-Qr*cdPxjp0^a+M+RpWk?{=3E=r95oQ-cTzQ7&hrB3u z+?U@@ZvTw{J1X?ggj=A}#$01TiKe(iAE_~}MN`PMQ>m$`o?KnOe!WxC6uWotHdCig z_2ff@nb#v<5IWUPgq>$Z=u^bmYBoF7RLp1|aeihbfkY(m>(A@VqR(DeckP5a#ZAx7 zuB=vLv18L0=Wd5<^CtmzwgDlepY`xfiI5V6qhZv z;!xfeMQu z74mR+I%>E+6~qIg6{?blnQE%uxTC33zpbiHDkFiQ5%_i62D50+6m_#3WE3}NcCDnY zH%JPL7D+YISz#fSht7*F=%q>$AOUv>NLYmnX`Ty4(1xQ3EIsP4`2At!0jK1S3#%iu zqay!<>B?XW7u#?0r^Y=bkRSxmcDe$t^oO{74Yq-O=O+kqbvM{ip@bBTot2?#w350b z)dXe!oSrbc(~*X2{J(A6woXM*6fRsiAr_^0`|Y=lte|Zn{S6O>-4^s}tDFFZ>O-;H z^c|~hC-sp);0P?3{*=i&s6U8o+^D)q7D4gp9d+<#5o%t5i_fWLN|&(GULsKyHU?e< z?tG@fB#@v4>N?_=tlG@v76JM8{M7-Is^K~0!W?CJq0oQH$*Y#o+jfNBoz#0>S)tbD zM$)7{LFjzq6F{5P|KQ~~$349b&p8(&^qj-Qjtad)auZb9_^6jfDS|L{j+F?Cp+kr2 z9U%ox(MC{M>J*5|qrH3g8d*KtLfQ(QPgu~asuB+a8AW)es!}Mc+VDhEwSoEq^4exn z9|>3`uzSa5v+$G2>UGqvUeuh`9j*3ErbMxVW{jOk3IS|mzy*&Q36Ovr1YGEx$OejU z-M~RBatYvDQ;v%ZkrdMN?#~KTh2L&~6(vKB1<1vd*RKoHW4T~MUTj?0=EA*BIIuIx zg|mS+BrO3v+8%}T6CDOn54O40;OPh~c2p=F2sC`k$Nd-uRmZB35iS2ZcB5;5!+%+c z_+)Ln+O%n78Z>C2tu|_X5=~LFW=&7!<-LFL;>DiI*v>p{+BCCchn4rH2jH&@Be23D z%tQwDdtci#uLxH!_me=v6Zq<#r_I6rd)58C9?=v9Q`Li@)a~?3;bp^WnNoHlDQ?Cy zy|7*dz6A*YAVi4J>~W2#{#G| z)@5ePJSxeaZUlg$79ji?Z}dS3skqI8?`Fu5AvV-Tty5O=PEAeqRDSK+wdU7f-D$wz zzkk1x)w3-mS-I#D3wl*m;;GOG3J0nxg|bR@TbXJnbkJ5yeIyVl0(*bmW`6j1lDf?q z)ry)yJ*%qg4YES%sTg&34I3_0fzlB+6jLV&kU(4s$hB?bTg9tWUwbXU{Js%b?ORQu zD=Hg$cYOBZn|v*5J{9sYM8Z(~N?E8#R{T0GAngKvaXGdg8J_Gy+U$Z6w1EVYnLy5{ zh(Tbm%TW#|z^=iL+7>snI1yHi=+W3I%XnTAqnf;mZ@|W9^4j6jHu&Ei_H@>^sdekt zrconlM|LAg7)6a5-Wq~%Q#4DKxH2l~H;&V%PdD4PZL zW7;I475~Ql-Gzy`V{(LrNq_`?Cs6D6|Hbz|?5Kz@|4_BN67J$^ScSoJ@dp4r70<{r z_!aRZZ9={7jC%9KV~=&+v7?{RAw2i;HD(669VLMvW| zoql$FE0}Fh1v@HYXL|!w*cg@c7o4X-h@g;$U-jf#1VuE9Qn(gDap1rK^ZM%uz0yxR zpfU&%6f(L-Jv666t)`|%WA!P|^p6rXl7K}5dw<<-zWeA!^%|;GDs0Z_%?OITz?LnQ zW=0OIVIF>AMP9pO*(Q@4o)7s~EJ|3G1W3Rs0y00X3+cFtP@h=wAAFBSyGDQuE+wQ5 zt~7@G)d3LoLB#jEi~zGT3M^`)lDlAIxbI2_*P7%$6!e}1oFecPtdYot)10#EH)ERN zX8=SZ=9oZ0ePnSmQAMv;dH8E9n&N5KS;Bw0$WPI>vsJ5Brg39=$8aOTPgA{mbx+1f zXQ*q7QoQ@_yJqXwt)BddF!MiiUeY7X9=B}Lsg|l;d%ONu8FP5>j<#m%BZ1%$_-5vR z%z?eT)D4yhiu5$Ts^kauis7|Qk;3Y?HL30vJo`A9B|q@oP6ELr;7Uiuy5ONp8jD@& zoZtdUNps4nIjY_s5rbvRV<^j_B%n>;{vj7*jRuYdkSI8bRu~O&2EPX$&BmFru@~kSRjd#RUI-kO>MNrQ^Sv71}Y2}`2!%*rZ0TPIafGZspGPggI zn*>~GyeZ@+9$Muvfo_i2mIXUjI${gWAOR8p0ko^G#E-Pwx|mv_o$6E<4U30O?3{>_ zI!>bUD0*p>#X}X_h@DS{%HYC2_}>+FsE=4^(V~TE(xizCF8*#qN{YA3L^^0)i=db} zbEes}Y18jMDgNhS`0MYSn=i~>^=52i0d9f+I-`R#vnL$IvhA5;8B%c+5&n^k1iqR1 ze`eqAo$9V%b3q+bDBar)S>1>LD7^ZD+NN+JE1?y) zjp0{+q@<3|CyP?p2&4Eb9%`<<93JoWA6t%YrTt@jO#d4?bZAWFZ4}^R+84TeZH)Xw_X?E%lK=pa|^W^Q-yh z{b$u}mJopp&SDrvBw#C+laP~Yk!1CnrydHZFX-dd2Vz?vN^OUE`Az&BG_QH%sC z#8_UV_|JH^4#FaQ8omZmhb!R>?5GIh*%LpT&qCEj<5&GrHGgx|U;KjvNWcjKc=wQo zy^A3(b|el&+f_v;K*iYzc2q=h0C4ii%1oCn}otNB0t@I$k4qeSbdr^j)hw-T$NY^j7%sDJ241Vw=YyoD+s zE2;|^f$u(e-t618L*42t&#PlAGCVYeQ&Pg3W-2BDdjy8rQ|(#v{QOI7nm?&E#{%TfhkHoCKLTjOk_Nkn{HrnInxxHY zRK&RxI3qtkqdU0QqnM4Cx%+U@xH2(lC^C14+8OOW7o#b1eOb70p^;9AaB1n%rA|dt zh|QLwDfWWJgW-6@dp{c3WJvZ1Z9B?TscY)A?WwJn`bZ#91P<)mW4?Ls|I}?RTPn?r z7-(g|1$A3pQCO`~VKc0cm8gnF@bPR{{G=TuKmt({aG|4OUDVdIpdSQm%oGrf-TR^` z#4^c80N;5H;B^m&JYV3DEi{D$NZ@z^_h8fJ<2T2B%f%cRW(+$gJf4R*X(WQ;3XjDi z$u1ETPehV2o_XNEL{P}W6z1;covyPn7NA+PX3i~8aU}DdJ9nCS^X3^@jXJ+1Q$D%2 zK*dKNePm=sZwu*3bgBj6gHu*aQV(Bm)AtHSQ&>2us4`dtzW?yQX3sA_s~dgA@Y<$W z5i9E~sN3q2!fQv=HR)+qUhyXI<=LepOFXPv-} zsKKQd#<|u#W(6cb0#Oq{`;|-sa7WY%>=mpH)h>0q+VeDoW-DK@A?iznUE|<{jR*=^ zk0J;W6nEJ;h{J}t6seiFhZ{C*FtcXOGOJgwHv9MgHjSM`DQw-k)y$nc*DP4Dz|;B_ zdDo)1RcxC)d9t3uKQzhcroaC2rHWg#x5)EUBrfAww&y7~nzaC!c&`D~#e_0eSh7;WcRP(@kyqUT#uSTpG2b z>mRj|KqLgd|L7&Nd&lNTvUCg;QyeMByJV84m^R*-Dm`T&tnuA%gK%8p21MdKD z;NGvUSk+1@BLNb~kAMrqlq-_dbndxjfkSniy7wEY--cxRYi-MbEQ#9^8dGXWV* znI5wiJq7RSsiGzlAOTwh@L7|E?;9D7{;sV$qSV_5c2x^Vn_HAFhAzgiiqL$H-HZ8- ztb|nj4JY9*$5!Ux-*?)u>AdsKv!T|y&QS9|S+6`y1=5yeDWu{6st!U(MS0jcCJZLe zyGvH6Vd|gM-*Y(@Ab~_8kacjs`Rd(g)LklByr3B~gdr8NfURA3R#$fNteKT4aLejZ-74a(SxmfU&?3- zuguZE5PR`Ks`Si|U!wlma&Dyc6#NRutPXmO4p* z1dbx$LZ})eDvmlN&i&y^N5vNB$j~5X2;enD?q!cbTzVgoiJ?Dm1`-V+0TPIWz@H+? z7dX~51_F(2oV&S+K!99;lqd9P@B{`aXy(>UwzdE1Bsyc ziw(t^I`Pt44*D+B)G4}(m#tzNb{L?ominA0aB=-{nAM2$0vvy!4rLuQU%jIrO(Cmh zGn(Rf-qCESQ=_Qqe_G{eiiRuG1{ux>mx=pHfCStkfOjPM_LGr%@lK4Wi1!i5r+%Cx zm9VQsKD+(itUs@$-yX0&n*{EHa@R&VzK?J5?KYjJZW15?T>`jfr$ggBU9B-|>wxER z^_aB;Q~^6GA~h(C!tRD28k0+IKeb;-rR3@6287K zzWBl{UAokUpD*AhzQTrLWt~>0Wjh=}QII3Nm5&wG1&qMLIj@*swyssTdd!eord(;4 zrW96p(N~4nUsM+nUseVf{;`y{uO6{75+H#X2)GbIA?6<+MKSi}nHX8(+E{?hY~JZi zlNe4o?V_8s6;fYf5I`eEKX`JJgWgPb&?cHh0wfTgz&+u!_?kn$=8|>|CH7WCcCKBE zY&JqF{*Kn)AcRy{d1q%xMPw(R*IX~Z{Ib^~k>t-prXYk=NYCQskvQZfPtj5p%n6;& z_ELm-Ng&|}NEpR}X-}&ARTNAH-<;9XIaVG(}_oa|3EVQV>r99zTYVuqw6fOn7jaQaz@EBCY9V<7WAy`K9Ae@Va* z0*^m1;{x}_0yqLEA2Z&-_HXmq7q87j;#G0H>t#npm?MCPqBMM76((CWyCp6#2#Zq4 z91lIBaWGUF38`q&qD81I+!qCbxpU{5B}Q8H|im(Z!UIBc%?XRy+#C?OT8X@%nN z9W5Y%I1*U=`5R{EmY>uu7&Wk_DPM*m7206OjjC%bEhTZgwyo4h0wiFM0N&sBLHlZZ zHAJjA2=9>*JCk4;UFoRUg7fuYg0ZDLF%m#~Obw`*0kJV~G)C*p8#rgT#j1qmNq_|W zCxB}=8@2w;|C&SAcv8+iFJz`f+{w;~@JIC;WGNFqo97%7K_ML!uBW?Up!#TqVZ(;G z?sp*Vn>^V{gsF^Zx-afP>i_xSnDDA5#+Iwx&scHB<^8sG+j>_b^r zqA3ccrVo_2DK->SCkc>%H3IRTZ&pSr&a&nm6-P|~ z_j8#atV7fabPHOUz*g5pYV(r-&d$IuKkM~S*ApJ{GAjv?fO`aFIue&6`J^2- zDW`m5M@5K>Bd4+R>JV9?-L(S+AC9)LU*+JxL{Rk9uCHOkhNe}kR@!QlUf+TR3(Su{ z{%C{4WOS;1XG5{FPU)CX?^JBrW3(xlURc>wsw0755m@rSx6Jm9E7Xl1F`$O2T%Hk1 z>R@ve-Z;9Rtw@U7Y}rh`@gXqy)sizK919R116ivkfd`;WIxA%Kpx;T}C;Q{QQl=4Q za*u#~pcQw-EJocpav)qV1ML!ZV8;iL1}@l>hYbs{?ZZ5_a}x=WKtKrK-DEFJ{f}ej zp6!@DzL?|6s%pMC8$~73Nul2G37roHB%0z**ebnvs>#J@iuCk!Q?+VUQ?g`9H0l&E zzx?uxS-*b0*|TSls_!d0X6^b<$9=aG-eu-!E2cgYAOU>> zxK<@P_DWp0(m~M)QbE6#<21@k^be4aP(P>eIS!9A8~Qk7i7|^DvW2F^kpS*hT~NJD z6;M8oW`wKyQQX@i>F|B7V27rTjs?ifo)FGU?k9mHB=97B9tTlx42_cIiMT12#QkPx zl;s0ijJ*~495y)_uuJBC(oQ>|@F8ub+WO#sYv}B)t+!sidd`JZWMpKR#*G`B)YR0Y z+-=mTky*BEnOU@Gk%{a@L7%?8|B9=XLsv-kjI3x~WouAxpN;h8srLwb5H4X5RvFsg=t%O6n(pz!Shb zowNqCML6(BQq_UMs@h^x#!!mb_yxW++90HH@JfhG7@y{_L+fGNzZ|xYrjY;%L`VSd zMoV$)L3pt_Bf=uKvq?LB7s#8oD{@Ic=X}clXvjmcZ2r5W-t!Nf{6SbiY7 zgG2Q+#sUl|lLq&QLuG7+&tEW)0Ql@Fw;kkeEmjsfN07i?=pQbUbksd z+H`4O&PJ`hasO>+Los!d011!)3D_rqdw3Jn(%61Y5!WDd=rAQI0i2KWV)#DfpAeTj zDdY(K4edL@nSVhnW+DL+2t`2ugEv%d;rE>uK1*WdU`ItB#}g06;@E6sQHq_=D-Tln9q3%A$OEcf(B=E?j6Nio!yA6E9u!Ea=r% zc_|dCFAv+|^id|Qa4~ILsgDE#MPT{-cg@zH^atDy>s#H_tWqRUEPOJyOXaqhnkB>Z z>fUvbl`6-Nsc%IulyboeTV$!11W14c>=SS)nnJ#r*->Htm^)Vk+93)-u7zRWK)O0- z;PD1+!X}v>hKU47AaMx11y{B>%^pY*p?y;pRH&8XD7b#(yb~{g1CzX*v z5DD0bph!(AV6J2Yg|DYW`{t!hr`Ba{)OjZE!)10;AjxiZ8&dt1;J3-NCG4f z83A0=vY^&$k>&O!S0mgXtNK!tBbBhD;+O-06F`Pg+gOi6Muz<3nChJS6aKe^QDlsLRB+J`c56dkls~Iof=&4MICGyg*RMOH%yMGc1uIo%~6$P zArc?~5+H$i66k@-8^qIqW9t6=n7{dFf)YS`Ln+8uxc5Ee-;i1Xa&+omSW`YtSo2LgrLh23Iw~@=C)hq{*7eH*6*fXD9!Bf$&(^Jvy#hIc za%HHdVZ(+w7gB+@q=|kg$o9A$33r*78{oobFG38Sb| zrHUtOcJJQpDZd|?moHy#=FPLRcEo#l>H5SEA7ZRy6w23%QO9w`n|3?j$g0^)$xdM4 zM@2C5obv)0v-;avX46WUBUm>LJfn)KS4$qdOz|KvZj`mqid*9059=fW5+DH*@J;~V zf6_qQ(|bv4C8Xh(9TnE_#z!&Ev+|H3kmq2*2FPs4MUbGr2tE^#?`0rnA^{TcN&wfl z%=J(jRg^HNw-A8Dvr9Kh}9041PQlub) z;%Y`v#PwY0dSZEVVv927e_wCYwnG{p?!hxj=5Jsk0TLjA&;%;fYPp6H2Yuo?uEZJ(e3ThVuIp0$2gA63A36Tg((cczQ8 z6WDoH4i2@SM2M!y%E~e;R;*BCqM>N@>ec3pFTT)Fb*zq$aP#~4*o_YUeIaa8f8lM? zb+{=~!pcgs4kDyUB#<|OpMLn%tY0!W@0~Gh?bof6X;e2Tiz3G0sN+Sixv-8I_W$f% z2fP(E`i;_2dXdhf7Zntcq9{cy*j=giUeZAAGd-Id5^Z|`8KS;14*%JlD>#h?F|3yKe ze(_bTSkW|U)W{k4Wj~5irAh^|M-&ukX=#D-*iO8B`EoO9(j*(j{D7PBI2*vRV^+QM zzc*z#7vN@KSBoA0c!aZ>-)Qm|DslY5?0=;m?v#BZrXT}eF|cOgRI_USWObXbXm*A< zvuXi#TmnYNz&~!U99_n&bIC}uNGlk~02v?yWWYfN@Df-ZX5H?XX-n`zGSe}elF^*M z!Kv&Sw1_2in2qF=v$wl<}BTDxIsjhiE9Zd3QtyR>16 zM+UrOVDcAlTCv&E$8amQh5@woMQ4~gHDsowdbkDm=}PLT1hmG$`M>=6{0!#;SSyjh zWPl7f!hrPqU0eS-6w^f=sQYZh=-A>qI1KW|V41z3d&6-?vCjkRH;#w%E`pgH4=U}I4t zsaUjVQPgGH$hdm-YV+N9GMPg?$V%m3s-x-ywg+SlZP;2DwKt~~3D!QeZ6F>QNHha$ z7tJs$=IBedE^AuCoKs8IF=lXu0a=4fHmR@>WYOwJE#MN843GgbKn7x90PlPkAiMOV zT?@TjGbl;zk>@i*O$NvS86X2%4B(zD%8B<7F-+}Ln2Fa9nJmRX23%r5Ubb&T+q5z}&?QG{3mG5- znKD2nMW)o)Iidbh+K!oaP|;Bn1%-`GDRu$3vmLATP~m?qptTKz_}loKF+DQHv~Ja+ zO#~T5P0^%DlL%7ACrP0~g_2@}iZyH2mlfD9-x z@OR*xt%Ris(^C_SSY$wnfk!Y%(O(IRFl2xX_{qR{hs+Me-V~u8s6b=857CB#;ypaj zeAtiTN%*hc20C`^7~74C;4tt}HuygoAOmC|K@8wtTpmRSy@$uc+Wjb`cRUJW8Dt;; z125q{+4&i$0sLW7GC&5RV_Y&BT; zUkzaG)Bw|g%9k&1&OiTrZP=*sL`D&GQ;Guz4mh_d#rpN@&G_--qvj!tjI(j>i!9E& za_U+bQdirU>P@aQ1&f!{h9w>u@QQ&A%jcLSzkZ``^MwtHnI`8HQO6~q9s@Z}J;hks z;XDtlv>{v7bDl_KfDDj7s|Fiu8e0+IW-3)6>=M3AOqoMVCt~9tw<^! zzKfCyA6izM%gdO`<@4Jx^?po>C~X6nm}GzqkO4A~Jp;H0OGo#r>{BKr1!t-!gmxC1 z2?MXjfKU!pppIpx-@h%? zvZyJX?{vX`Yif$7O`BSvH9E*>)f8!IGGOXX*sx)P`TFaxZ5T8Mpr6|S*2F}6l^3k@ zG%*FMTBEB?;WH{}!xE1Sc*VfxRSV7H>0hbaEE`f>P_L*uE&=r!5LLzBZmp~jY>aFM z_J1~jiAe^?02v?ySu=qCv&QD|&pqlPMw4tE*01q}F6ckUv|D4**)ini0&6+g>yC|Dd z?B6d-qq-9Y4jgF9rsvRJeNa%SOKn9#QUA&Z+^J+*;u-^YU31?p%5wo+bMa(*ra(<$ z&YCDFY)JV;LGh}z z8kYn7mqy(|+r~0w%9sl-xIh~=YCKU;lq?zSdx(qsQEcA4*^C`KHfkQS$hZ*a`e$+8 ziBo8=m%$MAjZUgIXl)9YvNCJf3EpS~8Hj{|Eo&B;v?-$^Ng60k{WA-j<_(Jl%EQEo zWgu71Q_MrRSh*+4w%mVDtamJ(43Gi07&zZ$HDr(}^-3>q}Z)}T+fkn{e zz}{gIKL|8s6+zf?JuNzJxnQG>g$MTn_Z1KPm#;@RYTGH23YRveC|tO3U`d6n`6O#U zxfHf-+h)d$8DqmFar9Fgz?zu2URwh1LQPD;s#5PVQ>?MNRc9@M2TQCctt_6c=6(> z%e0Yk`}XZ-)F}N$<6-M^#9lUR*2F}6l?^GHYhnskrMefJ5*5$Vh9w>u@QQ&Qn^u|m zKMhs4`K)RM%w*col;ZOs1bY_A6e!}1}j_&Lxv17 zyLa!lVUnmR_Spbd#!OYlI|5hlst0Vq5|a#g#lY0z@0mjf^zV89x{C@5uNp!o#@=~D z1#?F6Jei!0>G0q2@?Rh(1ZE}!WPl8ifs+`(z4dQ6C@TlJ8e|^|8xsay^_O<}!+?Ar z`4{3WTt`hc&HnEduiWI@T!7TWoir&R78yuN1{OHtRj}#Vju;)pj8KeL;K^f4QX!vv zeAtvi*1W>gQa6+;RmwDL)=U>P2DG9@izbDnV%M%+X5`3`F))#7*3~%EBh#~voWS+k z7^YmJZBB)=o12m;b+loLM+UrNV8^D_X6~ec>NeM?bh^2+86_3!#3lexq!ssfNU>3J zQGh;Z1DKd(fDDjeU0go0gX5oTTE5FTOB4ckT@AO9UB2QekE1iwMdNlH{o%i6c#0|H}I#P0Dm+AZZzx zKI%hDl8P$k)Kj@DMY1%UyW3PS#f!w&g~)`9f1#5tyJ|C#0Wv@a$Uuw?;C^~GAU;FP z5hIk%>>C08u+0L9O$L-0kn2dkp?rsHtBMjPVaNa(@SA}}4w+xvAzRc%)qX#>&@KnWm8SvP`P}5bM3{YbU{Ogw!3|=Vp+@uNGKcm^W`x2wHGm_ zdVtU7K6O+Ak^wS6213RF`p@zZmKTTuj=TPEOq5#hxQ#TA3>?dVY+5lL*VxU+9^~I- zfDD9^fpzfuup^!)^^#9XKTv^2bSt6_HN{8hXf1K1k!EGae{fQLDrCEM?VQ?_qF}*- zS#glzIZ;p?IB+1tbBQ`AgA_YX?p&lN_3x9rfR05I#GC&4G!T|1}qDC8v zXk`>xk;ZR<)k6_48mO5w?>NWRxd74l!#we4Kn4Im!FAUe(PhO_{Fz~iWFSNg;Cq^U zJly1n%N(e1q#^>Ffd`4aVLu)?WA+(DP4RN}$r76a{#SqnGEk!$N|r2Xnm2E*iWw^y zd<_J#Z{I$%c=6&ud2A=%vuBSPK76>1Vm1KuLmR-Fn5e_Lz`I%#Q?Sa`Y-~zbYoHBF zJTl-F1G~3vFuzUcr*3nla{0}lE-kH&OTff4P~fzj=3YBWiZkHd-HG>*7LWllKnDC{ z0R3f|fHn~EJjboy2m9{Fhn>TY+e!1tKvoR=6DNn`x+|PjBAz1yWFV;-a778|;NZm5 zTYr0Ae=a~AHU~;Q+$q6rR@ObLY{7<}=ytNk#ki?M=>{acDmo`;_6) zpi+cIixxR2sTeV0gxR`vYli;Cc(60ts}-$AjCP%vT^0ikHtcjFLI%GNA3kKheE+W* zoZ%rd5FQ3Lu9yo+g}$=t|8B2ra^_IKY{OGAj(hq*#}xBHzolmDcA2E18Xm=)e^)%4 z^c$tBnFJ#PWPl8~!vOA^vLr%J#7o_=DpO0I#XWvbre}GA4EV!_lQ#be3rdS17_Fe|c!hll9#9#w=q3MFsoFCP8j>i_V+JZ!sN z+dh#|v}n;n8#ZRVUAuOf&6_uyB1MX1i{+w>V(;F)X2_5s+1egO0@0+s8%4gjW(2#9POgsgs$8kOrtRfrjyuL5$;Cjy({q};u%mXD z9%?l4VD$bb?9|95*8 zlRFnLTO~T3g%Ksi>G`e9YY`=ddOyWkB54>I2t5M@H&$)Sxd0l?0QXK&%KIP@78Uoc zco%VNH$aU_AV%03kTsx2<63MTb|UCM83-8zt_)PjCRIrrlJW}!9vGsgc+5hriJIaC z3%II~@L$vvGCNT<;o1tC+eUm zJS?dxWbLT86XjQ=<)XdHtdw?oHHQdlTR z`YN|sz$GLZAOmC|sTe?CSq3GZ^Tert;GJusC$@&k%DO>vaK)vH&V@#DvvB}NV` zP3qwdv{zSdq**!F7bj22Dgn=t0W#ng1An`S-TK!aqWUN58jkwPRl2AK-9w% zc-haTj!Hl>KnBP_k}!aKVJ2w-|dE0W;@pmpj>fuhA_{GCy zAh{Sgqj(;3%eCc_%PD%3B?H9@=Qg)pU*5diXGxanqBvO*d1Qu3&nWUT3mG5-WFV;- zK*#w;IQRjg53_niw|-AjSBT%v^@I6W`|W~L*2_nzuDC`|!3bS9r!1o}WPl84Gr-Xb zZALS|OU8MSSHe#lGKx>};Bjvr{>hap({0p;*xIydPyR1srS5jX-7@Rlmv{Al;~qZAN3O$SGwMe+r?>@eYasr=7EzXB zkdN!K*}~BeUWL<-<;MS~Axe;)TDdmk5h^nLM`m#J0k2np;&GB$;5WlZi4h01=b}h_CSvXMSQq~{LG)!1 z4yHyN3Lxc3_EScJAEC*ndcT6&^SstB9$GjX;#>LrCgTU>_=gHj;rL3{)budmWr?iTd}N6pfo1+a3}R5)H^il77~yQkO4AakpX<=Yzz24h`u~| zx53sM@MTtg>Xk*B2^~iU&^9)qxQXfUNXcmDvaVDM#LLoSwIe5z7UbpML`l@kh`oWD39o5u|e=$$D)g=FgvRrcIls z4NE*QaN237+1Qy}w6=G6Lhuq#?4+4wfDC|vDYLei@2AOoCPP>m*t7SbdAH9J8}9qC zPN5CIiAe^?02wg&nArg|_%tvf%MRR#ckkaEuD&n-o9{VK=K?s)s-kIVKRXeJBR+&! z9nkd=-$L|d=3RDd$g%^Fje-iW@aC#1q;*X$AU2Ll<1g#omqX^4LXhHPjd z>eBIuzK2_JUH`>*CulzzAOrtnz?E-n`~PQA!vDxysOHo5<^m++K*cZ%btXGN-Dd$; z6%r45ks$@%6)QU<=FOW&RYi6rMR~qq!-i4j%raX=oV_i}6b_t3d))#v);M4aO&|kg zAVLO2Q9)Hjgk&XIGTAx%txp!)@M5Mq5IcMfpiS*V9EjKg-Zw=Yz51}W_dC&Y$9!Z!n}HFNK7ZccTmWrr-N1XL8&wr<)I@x(5H-b` z4OY|?J@H{8sXN~Bttpm92FQR944}_0lZtvE`ZP%KE$scX5B$d3C@Ewn;sousBl}ZK zojO$;)(T#^a^;K+0MxEs+mtR{+LSC=(xjxMm?lk{n8uA8JGCpg44$-V)yf7BDFF3B zN%0))l%47s$N(Aegn{p-ZZeambAJj?s!PR8iwcT8Fz`(kBMCwV$bicXTr+qmR9Gpd zb*mP#qKmg7^>8N(w&M%toq+ua(WjA$MX*IEITQI z`v#lL zernJ`P{FbVQnqYabMCq4W-3)EQKH0=5sOn#b*JgKZQC|edt>D3KWMM64Etu~%HOj( z$#Z0Y47kU@tKHMwGl~{jW8h=RC|0kRfd%#OF8UR#)KLjY2FL&zaFhY`&F_b4T@l@R z8Offj^+<9#?$xN7fhe#RBIM7;H|gd5xaKd7D`6}wTnua)g?YJ2Fc-j| zT61O7;-sD^;8O=F;-;J4iL$W{l1MA=iP(??C4~PXtr)Le{KAC`ot9SQ%9ZQLI#1d8 zU8qo@BWputmo8rCW$;8~AQnPJVBTy2R~zzU0M60|B_0_d1NIpBcJfB^!;H=L3LvsG z42Y!SO*@-X$jHZQ&Nxa#$N(821C|-U2g`q=h<6cvdIy(vm9N74$tNi;tcHd%NS>FPy1!JV6G?fa?qtbHqU325JM*=2OQgkYh;v3gWJynIcI4 zC>}0UQba&hypxnEQ^v>uMI=Esu81^kBzUo=Y1giuEhWVhXsgL);m-uJq*tI2R!9w6j1mkj?-GDPG2(PkTm)Oe(q;@0X)Ivzz87 zIRj{u+YxVvo3|qF^~BwKaUVF_6I*E}86X44GvLZV#g^mEO0Hi{&$L#cS~`NV^R>1D z;&D7&y5pg^Tfs(~5KjVJmVQzVefsn<4H`5s;6N2OR$t+s5$n4wa#*%KL zsu(wJoH=mdK&(`pm|ym%=#BQO4t!!k(f)>Qhhd}4E|GOdRl}9lE19>hXs(J$Ff!m9 z13xTZZEhMkTHWl|K1ws6zu7r@-k^whvIL2&EBnh_5PFh)o5Qit&8vNJQ0*NB6xYGizDkS_DX~oy7 zpa2tT#pKB%w{$D)+$n3PMjmqJ%;}W0qGZXErbUYukvkB1O3{zp6nXN5r5ucsf6*>| z)Pf~u-UcqBugxp*{9>R<`O>Cg*;498fAiA@^W!hPzSN0z0C3;_L*^}en^L^$fQvMN z43GgbV2uHMMU^j--4T5psdy9bkuO>!hrnb&i2<}znVH!Hae)$~O_+;uU%1w00mLQ) zWWWjot_)5jwUi>M2Pe{-Fa_XyXb#F_YF$l;ulqIvV?kvkB1 zN>NgDj68Y5Qp%c7ziO90V*U~{e^ zY|Mo0=K=)BRnD>hz+qczioG!P6=ib?M+V3M8E}Sy^pBAJU{HH!jEdZlK}aeaQn)o6 zEpq#rGNcTk$!>(}EfJ?VPC-@Jcbnrj(mXOi2L526H2!pDW-|vW{-_sk|B^-TZA=Ax z3>{Ec8fsRR(z@rGPFyt?aaH#a4o5&wwZ>maVqYZ8F{P0bxb<7BB@FAOmE; zKL*gxJ`Gk6N4(xY>&*z*+!6O^XE(D8!fBhwM__H@6w|s@i#9>>F>P`%fOaf1)GkMy z<+w-x#(l%ei@)Q3(>yZZBLfv3GMf8RI8+%?nSuwEs46~+Ds$GE%H#CyStm*SS@{10 ztoTyflBG+Rnje4sQ5!ZUyscZenxM-*$?n@#s)*pgjZmsoDPu`hA+t6*xZ!1%R(y`L zGqX$)#mN!#7gJRc1#irfTnsdYs-i&|eO1L*lh&K5vy*yaM{+fUEW8l~#hbmX3_R^a zvD}X$3+{P}43GhT8NkQJg0OxR;w2t>@eOQli+4u3Pcx7KZy7)vUJv6hMzqqAQ6fg( z$=fMm5rzzq0lN%TvI`#wUDRSOgl8vz@tD=Q0J6qbTtn(%E0g;p_TeR@l(h!5!{g|k z`mn&D74}`+q+;&exq;;#DJdyVNh@TNijYeyp4YxmWs{2cCaVvEXq!nqGGLv7zgXI& z;?-{4q{4dT#i4*c!>vjxmd3#a7C{Ec02zoY19%rKf&#xnyfCf>>lF4AV6MeiUMpHU z9TSN{1|nwwZFwD1UxTP7K^aI1n!#fFl@_rR4juOqibQ#g zhm@=_U_(w(0l2p%%CAVvh5w>?7#(T)NYja&;)fqZso_r8zI}UCa*C=|Wmbb5LF5$8 znl-awOf`V|Fm+=9>{K5~7(Q>2N!ygv@;Do9Ch0L77A=qV7S446E~;48inL-Zq!rU< z^IG%y;t@&3e?PV|6LCLy;ieQmHxAlP2FO5$4B%U137i;*cwUAH+&(A^`M1V9<94^r zq;+H<7z1e2vyr1C;=y1!sm3p^G6G<;1V=G4D^M9V(BU?6KT8SzD@;2 ze5#S9kO7w%$XovM389z^fWCEU5I-K#&Mr`PYgGm*F2Q|T7W8Kz14+jK+V)5|^@zic z{n=sLXc`$H1HUsMlYU(YYjIDLiC;+@LN!o<%3O|`>}jDkD**FW3%II~@P7&b)m0VC zmMt@rCQVYcHC8ZDRjgXIDiDlpQsI)SqIBuf=Aw%(3gknCiG5L3^oKv{JD&}Ms$$V* z&h(D3tuR@VGSDogtU0HYO#IXhW4>E&rvJw4OPAI7poxOw4SNcTm*e9FOCbYffDFWk z0Sru(M@i!mYsaTV)ly~vO$H`5siG2$47kTY7nmf26fTBK((fZK+D4naVBqye=R6*Y zxd5)z#@UV-veFSdteFw2fr@k%U%>x1b_ff#s-kb-zJV1C)>?Hsa5Sol3Kc4t{Q2|S zHZ5ZO4jnq!+MnV{bc~$7{_*l8stQp-sRmJ1y!-R7s+a^L13ohF*G6_GeRlpR%||*y zZ*#wqD{ZMMKEnNcN$3eC0c3y-Bn<=TLsx<`kcyb<9y%q16sOL?L6wn;jlLO9 zd&xkw44}QQMc#X&&2Ks19k{22?jxOLP61?0WLycszI3``bpSiUdLw)u9NCal zJb{Nx&H(%;D(O=wIM~h&5m@S?oMQg``GF-K<;$0MPEH}CC>Fv~u6KsenLG%;<$6^O zBByw7GUXJiq!P?a2L9T}&L$PF^`M->rv}k)2VKIn^7Ydt2Q#t-52s}~-YcVXF&?Xo{60u4WT&(d zG5UH^_T|f$oA18+?xf?1JRs7F6)RQ*GC>9fs#U8ND38s=Wy+K>O`A5gQG_g}>%-@Y zzOZwiHUUHCq?yH=w`s!?j|_OlK#NM{O`S7}tJ^F}iWzgbNrgJG763&-VR2Im^>zC! zP(VmBKnBP_whUlkLKaH=2C-DOiQP*e>pWeIci;`~nMRAqK-?JkH%hY8Ic;|BwRmCG zm`99Ao3upiqlU&J@O2eQ?_s4-k@FCD5Vo{oxTs?rOKYFVhU9iHX0Q% z{bJxD3!7BPQoyhEu(EuRU*vjVZGWgK(w6PCVd=-Xe=oKHOiVIB2FO4>89-mTKFa$F zu|zz}RVqs+sEK-lgA_`H62?yka1E@3<*QG1VtQ>$XR$ka#;alu*diHc2h?4d0 zrJ}XNlW5F7C@E6mxVn;}PoF-4RSVh`a3WqbN{X^&%Q~f`=+L2qEhR-N+N;y|0=!g- zl0yAM7x$+)@k-(Eurlx$3;R=ulA=pb?oW|%n_Awp2J0qmAu>P)l86Cxm|Mb~F^B~{bZY`^z6|$bJL}zeNC-_P1GpY!HpL?j zGI5QAHqj(95M~Bs0rpcIAz%saT{e~ka>Tvg&2ah)rER9tUz}}8D>~Y?KURGBKOGoD zW5qc!|Eg81oRd@R-o4w1%p*v^3yG;-y?T%=R?~`{;(`lgHkW#kT>!6FN7V|ktCv25#hZCK)w0k0TnRk?yW+rlOlBgU^avlj6Ba8+$r#!~2FQT1@)0gdUe&*cMQMlCY@r^gNN4sr{BJ{A@f13^K1eH`g#U-MTUp<} zea*px@~y(1uwcQ0z;X}S!Mb2UcP37C?AXzkwBl*BSEug>c-fK$r7o>FfQ?JuoBXRg z4V9LVfdnz|V8goVC5yD8bN7YnxCD%yfq`RIn}tiQY)a7!_v;0*TSx>lKnBP_Vi-W5 zSUwaELClvJ3t}pDBywMa`|z%qP?(tvBq;;9HqbjYFF592E!=l1I%X5iAp>D%KvWd2 z1ogGgqjW6CUl-#x6^C^Joc=HwC3%6e;W?Ft44($d6j|b`NY*~Gq=`!f|7W7~FSIRi zRaLQU*|I>cqQl_aCKczOe|{hzB1|l*3Lj=~d;&YwKY$IyCKb!Kan^T)ZH38_lz~gC z*ilsshpJ-sLSA1kH5*Y=yw*dyKk7jSl&Pjrr zbv@qi_9$W!iVP$p0|VjIT*n-9>BFdF)X*F<5I+Xc4>%pAog2Rrvn}bDY*Qpbg3u36 zq}SoI_}_+-q7xc+4iDPNQ?M~KN{XQBju0s+MET>&dV3jOHh9^x^5ILA6z@-#aXN<3 zG4SiUa!cYe7r-xO58p_*VGa2Qin2ev94ha1Y++q3tx848)ZITqp7!wk396W} zg6-L}#{^YUNSQ9}Pf?*l1=FNSlUS+9DE~P)=);CxqNG@qkpr8N_c+SX%?l^c0Uh-_r#LK8W9wP(Ez`$p4!llhzI9ibmt%^P*0|Qq$hW*ZidJyy4YD`NZr3@L|lA#Z!~L8tESyAOngFpzC`-;NC~{;hpV7&)>s=dvX6&e~&w{ zApRx;$;1Gz85x-9?~qSt;a*h8A$w?wvkd&b(X?-UoD1MAp;;Mrc~&Rmd~OErTe8eZ z(gzN-LHwT^iU`SqI9MAkvD)jT_rAM;76` z+6J&PW-s6^R>m~_A+1=k&85jS+8z>*3}ny1WmQuwNh{Wx-_p2AMfNpdFU28jO7X_W zR@UE=PV@8jEGIGfw_-`P~$9^B*i(ke&?4J=iNMWaW5Hg$XgQB_Rg zh802Tf@zbF0l5+UrJ=s6LOzne-eXbn5$2IcL%v>P=A>B}jO~m2GSw6wwFR0=2FQRB zb@Ef-;bl*`cnbI3M?JNjW|ILY7{E0&35K}Tf6?D1&IzVz1R3y#0hg9~64E$O;Z4&~ zG!ze~1%R`qqzLr!Rne`8q2RwLDL#)lLxc&}tXX5e`R1DlQ^zOS-o1Ox;>E!x;e(&} z7#CqF6)RRWjT$wIFm<40qNKPoP>#eT?ulZ9Rbm8o_M0`|tfZ160;ZTGnHjjeT8gPz zyokDs7C(-w+nQ{E76*r@pm4vjRO@z8-e~3mxbGP)Bm)WzV30y4yvT$#--5iH{1fje z&-!LK?Ii<_GBDI3yJ|XQ3r!&dVPxQX$DE$*m_63a@nM#~HRm${eh$agRRzG5_5OX} zTRQ3L{)?*OzyTZEeq>UM5gblSOABPv_U-O1Q_`VB2V477JdO4$lc}8zcqtPl#Y@@- z9#K+ErIJFMSK|4_fL#0!+EP;VO!JG3B(e4jC@3r~x782#f=KLI&c(06Gg* zQBcFU6c$+_>ya!JJ7^0XK72iotlE@Pq4y`1Z(}#5+Q9vjl9|?E;Qwy`AD~^px^?T! z`0?YlVPnGEw{Ks>(uzaaVR^-h6)}}(BXi}-l}*EjR_>qmV6SU>w~Q$4frCpjIuXmU z{<9XCH9M^AG8>Co%u5F1%0O#KD{9z~R(xe`7w5S0<5OY3?_$2lhRO2fOnv=58-@~- z43GgbfDaGy?kgKn{LMo)M8b9_-qqgs&~}0-Mcex zISD@6v17-uODohD72bF7pm~2Xr4YkIO13l z+^b4CVhhb617TypwSkK9VX6SvKbj|`F7SX7C58GX4Zu7T(X|E6PU1M-fAvi|L`gAj z+_;m6HfRdtmG5t*{20T2?!Qz}#L6P`lL0bdl>u~PbE5$Hu;0OIL3Y4*0rqO#Ylqo^PcqcA-{{>oadQEZ zjLcBGt2S!93I2vUvg>LfM~TkcBctRdKBa zTvbT;FA@TERmJ-C>&@3+f30e(1{nN3vhT#?$&-)lpCJPmGiS~;+qP}ffVYlOrAigk zpg{v0<%p`{G8@3knB9Q4R2ef}RUz+)3}hg@3|v{gk`-0Oz%i@M!lhwURm86?iGreY z_k}hJl!+YA+W;me86X2>K#KtkQsf2JU_|E??HTwiFCmxX9c63==Xi(=kO5r=eg&K; zmRtyDxL^crAOm4x0DXmg@bxyw{B>?mij`Y?#BDA>f(I&656iTD*F!1{v>~l{3Ri^F zQ`OTeM*Nq>8dXF8{{5YkR;W6dAg~*6ywTPs70zF)c$3=)+F#YCe(!;Urt4HnE3|nf zo?i@zwBpa_*^*W)@{0`5tQ`SK#jFKZBo&|IzRR1VXYGOJl7TQWfLE>4;O}t6D?Idj zGi<&D_t}Xa+D?*geQgu=Ri zF%^J^)NBCA^c(GP4gjyw2Gz!c|01XOOdD6^6kmPyl{ReX@kCBhzkYoiJT%4&-laBx zl`%z5u|gTs^qDyin^aIvq0MW;@ro7g@)Fkth^iW{tWn8SDOxD1yjf-XeDo?xE3zUe zhI1mRu(ZY&kSVE%p=y|w43GiK44@BN07Z;Kyx4M4HsIF*v<2SDezpNlOfor{D@uy}`(;l7 z22BPmDk)Mm?X-cFKKtbrZER6ebekHgwE(p#Bwi9QkOPma2OHK^_e3_Sc>QB4Db$Gt z;HYocnqTHxS+{T??zgVYG_*k-G06ZKh>?MJ_fM(q!dw8nY88envS!kSF}jq9?5j{{ zbKGx#NkrjcC=>0Y5Mt#x#BwM@c5i3!hk?b8m`X*3BNah2lAi(Gr)Ab}L&rRwf_}hi z$85504hJf-p%{%qR25%jlPsFkvW5Ir(PWHg9{9fy<@M1nbHj!WR8?q`3cRXStD1Aq zm8PX0n&8HLi8`u2;D>-*sSW$-%(-UmPO2)jc_p4-3|w8KvZ-7|Ur8}w)GCvjnIAWq}%g#7L3(ZJp;N6K&;4znCTDNM^hNS2+ z@SrZ73feFg6wg37P!U;$J%eQOa;_ax(Q)xXRq-qwS65XG7%;$4RiWLajOp01qdw^G zh@!4>I^g9_))b|x!s%L~alSHOQ&sT>RTZkuV)VpyX2zVYs+j3ugK(ey#Rf1j$p9H3 z1F(1zc4~ z_%BL|PgOwyCQ6F2W2K+Spvgdu8Z}J4dRF@0qNK31rtgL|i-y>l3t&Z6(filAW?d|* ziqylMH0zvLWFTP-TvOAIw4&e0Rc6t0N|&@M_O%{XK6uHj4Vk^bKnBPF8E}dLbXd#6 zpb3a)Ib~R6#)yPpMk-_)K7$U&5S9)`Bg{ zDMVJ}bimDg4-D+AZKP~c(S4e{KQfSk@G>BBiU-cCqfUTqQb9R|4y_`inEu;V9kAn3 zhJ>}Vd^`SNhp-n3AL|SpqIpZ1 z_excfH*a23vt~`xv}sdw@x>P(`Cop^ahJl4H{NK=9BBtm2Xr*8s48|JIAFR?o#nLe zG>#0|X26!JV*dfEDq^j>G2gAXrKb1-_thz}+QPHP3C~+WB`6sn17siy z2G9?!gKXmw%S4eq(OE{rfGhA`<-!NiL~{|Z@;OiuFQ)x3#|3F`PXB472V}r$2A+g< z1st?c{adSpK4xkX2P!h9CestL`>hQnh2Z)SWCp!@2XDZD0nRHaiWV(u>esJta^}pq zzmO|et|RAV(885)(@i(oGR67!ikCi7QgqQKVAuWw*q?$*3Tw>*^N zZtLp;h-G6ZtvAzVTiKgpDDJI+)D)?Qoqt?JRhi602FQRT3}B?90ZjP@v7{p=1u(r@ZP?vGDE31M;P9gSK2+iSRGYh!dkRqp8s}0)g7s@HLIVGNd z4E(9K9XUnc;VUSoh@v^X+Re(Q6k^G9QJBLlWPl8i0d)q@_dFklZn`*d{5|># zOTtkF8GD)aSJ#a5@Tf893t5p?qAfXIkkfJ#ZkXcQI5^6|vg?C4`TW~);5M6fWije^q z*4Of}-Ns0}m)Tv|?OgpkiguMMum-X`FP>N3=UgA>tL|I^WD%`&Gg+$;-?;-oD9 zp#I<-IB=k|N{S#Hop#!3rfk`=L9$p)d-KgV+rsxjNwE{ozot!qC@DUol0uu-IN@#k zd4GKN2#AwEZWNOf%iTTDP=9$jswyI{-*G=~FhBib*lH*<@OFiXwz~5~S*gzA= zK(aCLXE@Z*QKvpaHzD*h^f*uvW$Q%a`4L%dC@GrY!FX|$X2mTV{1+vKx+F=I6r)Cs ziklyi7PNi)_DGZdFJ1NO)typOoORY&|Fcc;kBiY>FI2>{1@#Vq)s+-Ir_Z61!d8tD z*Bb`fV1Ei5N{Ze?SD58%Lf=ozwjI3ok+uCOp2@awCLjZ3fDD|-!0KBs$L?$?rgf_p zZB9HF!|(J#iUELdVXgR#JV`y=DWk)G9FviXNB{WAzY@rR3!@e8EDoH&ig3JW7bF!z z(FVtPqIqP%D+bV^C=G*M-FWX1Y9ZB|912z|0e^XWI*H;wl=AF>yyQyY}=CtNxP|3yjBUE3y6Qj8oqQX4iVJoswn&z~O{!zCp}ojP^Q znP;9E*oO!*T#EMEG=g-APGUt#(F40_Q%RBNs%80!W}y99)vYKgWRr?j>)cubFwtB~ z`JL@;N*9`SL-sSWRnD zRq=#X!xg}B@0ZO*{;L3W9E2z-dUAh?<2d1u@G)RZNwIg|A@e5pr#P-bO`NjPOqykO49f2L|vRLDadgBKkHkA7~uqekF5Q*m14%;Y;%lCKKzPOvl(Fz61 z(L0?4m~RxIPK0QVo_n(s54!dn{;!5jp^;WZvq?p@YSo)Pp(>$(BsxxIz{z zEHOcGE`V%Up{{{QCTT^53KfpyKaoG$wQFaq_|Qr#wquZj(h6J6inzWokSoWj=HB{e zshdk_#UGS@H+7Twewr0Y#aMKhC;m|q{~`lqfDG7W03Q=1$d1#__a8H0LUh)xao-%9 z!8smshym1*%ye*JdiuEz(Lz&>VF34`0{H2I>_t9Yy0S*uF%0ofGT;XTxRy_YrF{|e zIc%{EMtcTs@L!Y^vf!I)*uH(c88Kpn zDy9ZlG@De2k|IZr92&*PjB)O{=bGBJYsZX}0YGcC*M=D!ckR%dFi8FD-bdJ^VjGnd zuGKZ|i8BMTNkysCrC+EUJ|66?lH&TZ6+0`=1^9#4SGq0ynSCP@!8xPwT?{56zZZ<9`Jx z{qZ7j)Rh#22M=~`lZw@=SDP(cwgjRrZ{ED7V#SJq^4LslQArUO6;LeP;9(#$-_$p$ zIHIJ$vQG?TAj}Np#(?bo=jrzeMM?2??XXJFLGwao+11#n@YLI&rks))YgZRWhtCU^ks z*#XirDH$LG$20I4ep*{P3z)cMfDDj< zV;MjnQD!RlLcH_XxiBL5= zI;+J1`W9CK%LhgHqv%8Uvzh7l*FQEY@LYh{Yt^ZNiqylMV)w`q1Y2+XPdegXc4oYU z1iP|J6!iR%Aw$frUFxtw%{VUonl) z29Z|u`o)zOB30K2Mg}5gK%^D-Hn1hFSRAn($&^qe6|ymfg)k2H!*4C%5|Rv%0WxqL z1NfSd8wWl^yz#gbe*9PpGcUsZlASz1>gz-+UHJ;^%2)9d`3dcB+?Vpgc~@rI%?fQr z&~Y*lB?GvIWeRR@M5jJ-XW-5x92=OyaUKE#u2-ISm4puKI2di?CI1p!bWL3+1pcpq zjiHfK%$qkaaFvLhB4T-&1Q)h9L zibU6JhUK5~X`M$CFc%<0Tf%j)<5@MZVTHc5;-i7#lvcd>p|wpZo(WgWq_<>%3?zbq z^pAn^f__6H%8#*3(X`1RMOutF%$_&~W}xT;i7WnpB{#vfAOm&`?l2&OM|m7EXPP5+ z&4)I%UHK=o!PuO*mv&SxpO|GLFYoU+kIgBg$eppCUEZP-doGC&4gW?Vp_Lq(IyK!@gYG*t-nB&^%;B%qM(p9pO*P%IPLv|0o0$or@El3kZYmYAC~bi z2NGWA!=rf=#Oil)zX8?T*8Jv)X;ww0BtTFxZ9QwfnI}>Ii2P!O3k4E3?wfDH=b=rNzrYBn^e5i)!Mu2)5+WV=ouLx z11>OtzF+~^F%t1o7widWgUo0a^~9=x`Iy{I1|~aV)|HOf5qdLle-cH@b&e466Ye#f zVeLp|xMoI+6%W4%#MpaxQ=De7kem}x^XEmwnBCz`{S`eB!6xWRM@LyTdImD z?3t{J{36hHs3HQ)MODT6_3O>XjlpKRimIYgrApfLM2*+Jefy~MW|PtR_KN&r)0}adg-t33Ei)T7?{m~^J+of=(3+&; zDLpHRLO71LN`IQoP`(%WjV?gn!4K0UIyt(Kt~B*M|=q z`bFcLZ+Wau*^7b*SK4P3rZWo}hz|p}cjiG!0}-!?Pl+*?G7AWuV@3%907onAR`PR} z;C=a^@Dlu2ms1QGGQ{lIp$;2URc+X?A+Vf6Mk+!er#J<~IS$c1Bv!uwg@6(u(WQUVT_7P~;TbGi-D*9PBz} zW@d7V7*Ls&4EV*s&2{W3DY_3#ZY9ObT^HJ_XkR8g*z%vaWPl7Qxs0cV&>i1FjV}^!16U4i`$j4@R6G(2%RP=l|IDT!0B^ zs(S<4#pGllGz{S0Dc9`?M4vXj`ViOkq|j7SjE-`kB1Qru$o_l;X`)H;WHgx)kmnVY zss8czi!Z)#PF1mK(7rV9%5Y3H|~1yRRcm%VcDL43L3{8NkPb3P>!2 z6g4Bx z0C)l}?sa8uoyrda;G>h^XWurw+6r+B5CH8lmd#!+vD1SA7w0R6lwVBuRt zD;-J841WatTkzg;z;Zc+Pdo#%SBfj)0apwOp&hc^R%wU)mXELuWPl8uzySIdGOJgV z2p61i%H^M;-n$ch3hr$cpq4mL5rC{1l0Fkd-mJ3zC##bVoqH9ws4FRCaANy*b=a6_ z6D7s^_3Hy+aI3#R#qK@k(_e!RT-mB;;*x*TPaP`{T`^PdD;Fr1EY^o{ZJZmrYFh@qTS{8B0fWr)+ zLn$%}kyBK3*v!mKdmAV2MyK##W(jyac^E)FFM~HT9Py|*t{op{b2&nnjv3(m6d?Um z2TcX#H?+&(3rVXKO%O8RE(5q!8^gLE5Pg$szKLs^Bai``7Dp=r5*AHzG)!4I&4#q% z8eFgqqOmESdEmd5FTlwprzlmbR6MIB%d#3ZYGg}JaRb_`4+|A{20`k>VDDp-3d$+6 zYP6>5%{ZmDoSzgE`j+iGLl4q@s5Gi_NseftjA+2{I5; z2GGyA0j|q>eI-4k;8)oDsAo1iVJ-(M;#UpNS}N)ZOSsywRzCj9CKX$^Zq>FmCcMp? zH=DI<*9OAKpFe+~^fnXYL15apZEK?l>3}=mUegEG)YmE73~5ClN-J!%D`I-YK>l1g z&E56vNGsm|Y>7uqWNxa+D8Bk3m^?3YwByeFi2K`@#~tAhGC&5%Knx6^&sY!Hzd@9R z_I(STa8G>1H=}8<3InbTPKYyYa2-`w@roeXG61emL!y_mO&H$<9Gr+R-z?2x25^n$ zK$K-^AWJv-95yv0({|(FP3T+f$>^k4$1a;U8#CflOzT!H+9bem4phX6bTroCC~C5W z;;zMo?2D@6Ep7kz?Ac?64I8En8#CVg`SSx|96EF;Pm8#iujDpjgvqnNg6uQKz_ z_3$bTR998VCKbC6aNT3qE1&k+W#E>&mQ)q}l0#MT(uda66i?f&S)!8xG7u&P(4T7z zf4@d7=9%B3o_HAVX7V9~A%qN|?tg*Xiyd)22kdyx5nIAwhNxCtQh&`)Z;xT%o<5R+ zEEzz*LN<(%4TPRV^zmEuzi~a!%F;=m%#;BiD?C$YJUC&a;~_KCANRm_F|iBcHf>vF zlM2_A6kE1zF>BVWiP%+9UbqzSU@-05x3^)Ev{zr06z^*juwmD3)0avLZCWG6dwpQ* z_Yyc4AW}2Kl}=O@cb$8ty3;$cVZ{d+tnfMfI&Yg9J88WQ7p4Mqj16F7k^wRhJ_gWd zya-;4LaVT6-pe4xgZPjjN^gd6GtkE&_eE9F$RT@@(iCu<0>}R8m~*`xvxnx8frK!C z>$4RKo{rc&AqK===m0>dZ+IHJS^gk!pdyZSg^uu06gR~}k=Nm3c7DSx_`eapzoqTJ zC@EywC)dKfdGoNYxV!^pAC@k4?}J*CCQVFAN=o*|2TvhN3jOt7g4>Z@j#rRiM|O#V zp8sI#Oe!gYlE(B&$H1-U)HX%)4t`v(5k?~O)pv#hJ&Qag;^6XqlXQ8Ix0UVcQmHf2`T8BxL{I-6`raR*<% zVygx|0Au4Xn%vJvlPQ}#(xDMuln+6sE^W~^bolV$&Pgk_ZQEudS^H`E^5tgTI=tdJ z7Vwa;C9RPAj`LC=JX|)w&JVP!W&KV_D`w8qh9w>u@QQ&Wa*BHTa*FLc4;WuJsSp{( znD4AS3q(dS+N%aaW61y+aFPM^4X=b@!w~a3X<+6?$uPDo7~M1TBs>!)2GADf!`~kq z@>$kUyVoImlFAg^yY7KQ*E;6ZS7?XJ9kYq%IL5%Zr>;KRtGNJC@`r1*DYD9Xe1Ajq z^*i+@*nT~(>z(+YAqEC~Z2&R2obaqN|KEnR;!n7^>m=N}Yz)9fD}xlFkygk+#hf{F zOh%hefD5x|(IWH9FLFP2Gn{|^`KDsUiZ+aEkM`<=v_f`S)0b9EnQ8VMkQsdpWFWi@ z+;&baE7FP&KJ!yr@k%#ql8R1v-cV8z-b;<1lK~Y5&~N(_fCeGDGz&Qr-}WL=8{7u_ zMUt3lLd8HY$Gm+J*O^NjCpbn=bmoB58p!#2bh)kO>TVUC;AFs225?OlK$L580%BE1 z&5Xn>`JmGQ{fkABBxahd8Q|bV*8HfSP5(gff;zA#x5)GDxv<9e!Q7Idv?U@LfNQ@>@e}rFzwp4%Pv{K^S2Ifhj~jWrgf_p zZH{D>`_N+nLE@eq*RH54KGZH)R284iqN+lhSK|4_K>l30%pK>}RySHy6*5@iQ^11m$r`*2WZ*;w`ryF! z6AvcvcVU#>1J|3p<1;8Q04n94CG~uo0-Pqq3V;pP1SA$2aGe3Uw;|jLJLN;j-w}QP zHvK627h_%L-52|OZw$Whe4?F0aKOgYdD(Rfaxk|8F5s=O@J($iWs{1*gWY__i)}Js zF>TtkBT>{9J9el~&ex{&2wu~sO>HVEYFaT`7xWdt$;+*3cz-gL6sm+0%u5DjaH5EX zO)5UJaeg_@bc=2(&#;HcbExo0QwU3;rDllA0w7d0uH+38t$9~emLU-2PzU!6==lr(coDNMaztfCoJIF zfb0U$&Dy{tM~-xEe+q3Mf?)s}5+8;n| zaq;^it&rZKY7qItpg}?{gC+wOr4=2~US+{h*Tc&&P+eN_-sE4oNrme*Py4JhaQiv6 zP2s$G)XRCVuOHHim%1*%21Zssl%%H5j!@^8fMkFSBtHY_pUGFg_Yr-2UtABHTj2ip zWAfJ$J&ctBw37wE7#k~&a{gSv`wZ7$S!LYNz=6BgaP~b%9qxg4x6x7CXciezVF1@) zMMT+vMeeaO^TD@S51H{W5hvQ?8a|L&0v?Z!0pB{C(Rt}$E;MHO67ie`)1*!xj|jK> z-QQpfZ`HW~5tW;9qBn5ZinLoY)MjP<%7Ia?MDr87$p123%aXL)JtYiEFWtXIv)STyS*~^W9*I=fcY|KfCCkoa}$po&GZWI z)KgEjxk-id?G=}+C@ExtX4N1{iqC%&6(j>02rmN#Fi3Ixxo4^qAWDjE1D2`dY5y(2P*$@tn&OwrI5zAzgG*PGbqu>`4RXEEf1KJGWnk$C*H{i;W_Fp1i zj(7@UvIQBWkR>HoCYxg}`6C~+84}CD02FUSRnZTDUf=%R~E*|KG`O&3Lio6%mYN0BeyS)@a$uBvz!`%~;YNL59=E0g89 z#lW3d^2vs(;+;N;Q&nWe@GIRG+S-re89ZC5pva1Ro+ATf;5Y`*2b1s?4nBO`2|s?! zhMCQ9+jDbN+7AvxZzbB#NRTkaQ6l7n-w<4*r#Wh0!p#CV7r=;b5KAW9n9L3M0PRdZ zax#zsZyCU~mILu}L|IZzKGw^`W9L_MiGlFN$lDy(uKXX8@NuJQ_>>y?x&ZpLI6P31 zdbpFmziuL;)gOXoHgwfqEc%~bD{_aE9q&}bs8OTL#*N&dN|RK}(uzklZMA`g>vc77 zyK7^w-o6u?RLG1#1~L#{21HtM8`go+4zm7J_s`w>h&6BV4m0eVwc4%umiI0_0ta9N7KkarVQX3lM#v*h;Jj2tXU>&JGDj3gP6=gq?*#lSdz?6y5gI| zBNVPs6c;;W7*cj&(GDGP1F4}6s*Q(>eFrfAtBvcsoC0;HO^FI#vu4d~$tiAy&(&0n zv;kO8+DKp#vdHhqjJ3rYJHeuTj-anIY}o8h$AO$N}G#=^4EZd!N5s)qRYM_jjO zJ8WazOal)R9zlV_5c9{a;OGi^f%mHDh-0qAFo0_-KjIaLGLbPm{yqepwCq{y2$uQ}_ivrMa2t;|(d zU1hGk@=A05`RAK5Wy)A+9??OHlA>)}88lZ9(q4U0QuNUFUX&Evq(Ylj;`zpaEhWX~ zZTp?sq+ zyiaY3-Vf#^0~s>#Fpj#sC~IZdHwo8rhYYO`$wBxp>WLwUFCpf1+-+Gm=1s?KqdAQ3tmJ_*b;6J$9gpn$!%8f8-*p5oUcZ0IsRLh)oe6 zMwCfUYY?X+zKnP=qVyTU7-W#PnXU1>1#@JU`ePPw zZ9vNGf_2)!W5$dz8#cJD1kay8ziHU8p~;m?CbwlAL{(9%R;`Rr#`&lWn6zor#zv_^ zfb;DY*XwfFss0kP3aW~Ma~9g*m6&9}8wO;#r(5dk-|IGQO}wf?)D%O$mcGAw_zj4D zjLNIKPCzn127)kvj}S5&<_kn$KJp#wp3IuQ7X2{wB{+{Qgnx&g0ok9z?U%>jtEfHx zn}uuo>M+!p+`ACvd&!T8K1^(rY8iz4)o}TPfea*y0bDmS5YYhfF2pwxe}FAp5ho(P zf_NRG%RA7+yto8e{!%7F2saFIW`KheaV9*Q;`-wJylm1$bNXi7fT~B6F`jvF6WEFJ zx@ec_gOZ{~jT$Cr&g|8xqOd4fu%I?YQQ@^{(IOfpMdvzYX9SrGa2wigrKk*yZ>ASf z++LL;QxA7K4(IKkW>8vj93%V@Mg~M$aeD5|M{lF$B+=4}&fTp^DxSteRo0SZAOmE8 z3}ngxI$1IuTh?&6EYn2po{$%(wz${nbimy8la?iz0kkn0srYAt&4{%4Qb;%y*LH8j zijijWEggJo4Vy*sA|pw;J+WV;Uw`w&R+^av4B$E`fLIT)9pV#+BEOOuP0|)8BX&c4 z2(dAuFV_)FjZ1h2rQCu0|Ngj?#zIbFfTI;BQIfa=xDag0DIQInXMsw78>vGjrw~~} zsZ!Ff%s$}ZVq|2)g&=m{aKjBYOz}xh(NmiMky8wwOF4x$uf+3z7m|M@WBd7Ri zpeUi@9~Ld&WrloZO-Au~{5@icPBE~wz<1EnSYH>wDW7Rf)C{0MSqPa&BesYdBykx< z%6bdln+_z-1eP2!20nqm<2>^DCfGU;*ZJ#+#XYh+OH<)X6~qBZG!oGlDaBEr9>qP2 zi`yS%ga60O0Ir2Xh-V_UM!Xv_74aj)35csvz&1o#8?7JWbBG-f<>e~-DY8fu(y$MH z--&Cr6XM~Ja^Qs99H?-j8jdrf4}RH@R@{mkR<+|wa_|T42O_O_P205n`}doH0|#ou zs^I0xlP4lc#nHlKP@_;GdB042XxXx*tsTB^M|<@_PVoY2K^|zT;jN#zNrft*1oM)C zyXu`~PP3q-c)NFEHmP`}+d^~jkd;2~GkA7VLE&X9r_p2}J`A8wRTL$SMZ6$BC90P4 z4$$t#ea_|Gj8z>bSmGH#dy@zD{fP2z>PyH2L;sGLhHL(*^y}Ui+mEvow%0`Lg@bbu z+Z=Z?0YC0s`|yV`%mo+&liy2#*(}ln2GGv)AQnfgj3^3-W{6iI%J;i}AijXu4RHwK zcZhQlMHR6HQIth8aMu;F6XG3+7a>MA3l<3({>Oj}BsRmXzsLWM@Q;KsaLVt`;DnXW zf-_(MH=f&YxMv2ZVm;Iy^?i4&Ahz?v|I<*$B1F{%@??Fb_uqftlq{(V8#5g;P;u#{ zmj=RFyLPSl`RAVl<*}7`?AWp9t+(E?0sjHotGpmM9nfCUaW(g78~O4jO-Z}P9l9n<)c`cMt?(9Q813thZomucK?qB(F-zI&*KIY2uL_eu^_ zsM1QX%oxBkPx_JaLY(MO4DEhXqK)vOeAHP9*eQb)b$zh+1RG_6XqgSlkem$cf?MAs z4#xHD5MQn%Y>!jL^u)PjAUxojZBA{Nr zfg{ZwaHc10P@g0igI`55$SdEnawFzN%!il@QI=Xf1+g&V(ewC|A%P4?o9=+?RDCs< z1p4H4(Hxv`u=;TG&jGW3L#(cB-hSZK#07{C|HX>_>*^Th0w^1n6&(2Z7tX($Rhnqd zwQk+oJn+B+(PXun=aNeXSF7#6)DvVTre zEy;;Ga%G?*`dVR5GVnVChw)!$#K&=+atTR?S_Qv)IaC=(O~FMXvq)Y#`djgT8Nq!_ z5zi*ny8u`J+WqC1Uz&C6+#4uezC8Fk>8|Wgkt0VA)259T6|S_`M{G7S4QyPm%Yf5M z1M66fWm~sne~PqY5Akm@5IP2mV1J5R>eN*Cd*haUrpF+;bS;I0-&}*Y>y!2f5H-c1 zFRgqNkO9*Emb|eBn|ipDHEaTt0Wy#=18?FmM=CPrhDY6D;Cz@OUyqy%==jT*t-BD_ zUsU7ltc(K48bmAb<@qPX{~#tzYK5{(Bi@8KRBS?g15s@BJ-mY^lT;%WzE=qCCj-YY zunm8=!8Q7CM2=J(LyQZ5aVxzoXrS_FB;furlUzz*EC*tV^ptRJ#Zcy87xaH{%yl%O-x*`OMxdHY}FvriXn5cTpI%!2r&a9t+?5e zwBqBz!6j`$szPKG1IMU8#>50KWfVcUV_Gsm24ZHQ6cDoe@Eda>1Rw)S4CDh&F(oW@ z81(hO0qEoEz=;Pe0~4-8lu1{z+w&$29xOr}hu9M_74c5Q%Mj}$)p4pd}9j3bA8!44Z=DepjsA;l3NGc^M@l07)7F8vi% z#pj>9r@~B2OEdfT@6VKvj82p&QNk1}=0=}hR27zXD8C!+)dy9@GpHkVRmEFB&7i6x zqsGr;p<>|P`e&K^x%9ucz4^%^vwM#{QNn9Itf?uU!80-$W^;zB9n)Dd5N8JXo)>4r zSzOEvxZY;vy>wvAUb+uZ2`08cybZAvVt2&Rh*J=MK}=*kDaZ@F3{otygily9Kn9{?K<-!P;o9ts$Ym;`<1lWyT<-wHjR+SD z!UZG`I(e{b2l>L|{G$KqPRp8^E42yu>Z`AuQ&Pylgzrj<8*aEkyQ%~)H=>MuI~{Pn zE`@=gY8xp^ilOrsYr}>H@98`}uXJNBKxinp?r4#GdCg6AYN(g9e$zhF(~_z})D#2k z?3}$2M0}=B1_8+c86X2>fDDj7|$2R8o|4tF|(<=6M`dpV|B-l@uA0&4b}$;C}2+VMj@^*ou;(3l$XMYM}I% z41|w?)c^Y!@BS{V3*bKYsfT4QIz#d?fS1+n@aif=y9-DrA9cySdb!o-dUw>`ZtXZSY zYfN}DQXwN2K|{@&HG^icl3q5cuq3U>1B0D!uee@C1FbHtSh{7K89v{}prw^&Ku|JZ zg#nRM+-N~gv2NpD)9Z^Bsx~iQv)g<=X05(rS?FEB|$Sqklu?$|8a8>X~ioM_!B6JY*OK>v_f{(4ibtMEowww;X=6Nl1pq! zEAGK26=hxUF&i76#d&pU#eaX~CKcK6&eI`aKsKq!mouljr*C|`$n4pBPz|?p_cXI_ zzm+vmpT#p#{r;lnIDyCj86X2>fDD9y0X&Ok9g*u13Sy1eqGHsTHzcS*Y}li3OpFyJ_GpYI_X5^eDQcn^8+Mu~SYp(OA}W>$Cb0r? zsS1LkA_4;U{Xcq9$}OkNoHMig+s|h{x18N)XTGz0+RU?ScO3tfjsq`m_Oi!GixDVT zpjunnrD(R-`FNX({ml}g0D}_|behgD%cQu*Lsg@==zmSS|E8aAr@1QNGtWE|Tqebm zB})p*q}Xn|?K~VJ`G;IXUmSMpX7ANkS`PT#?M+?I1z1bbW67$xe*(_I`v) zelyIM!+s7bENv8es|+Z6a)$t7^@9`qI2;#DjyXZgzdr*V4xz5e#oU3w`--U zTvp6;9m}K${-XbSvhkKlF=U8kYKFDgC7%{8%J0&7mPxVEMjHju5W7i*r>u_CvYbiL zf9$wqDH8*NIL4_&`2pw+xEQ^;$>hvP2p|xsfL^2juBsiQVv`J6(l20NjCNJOf06#C(n17+6?j)QJ5psIor`0FttA!9 zOMrojytvA@vQ9dC!{hvtM~bOw?%~f&uaWnbRWW0R^V?q3RJpJpb}6cu?@zJjnrr&H zKZRvdTv(WD>9*4OI#GKYmN%1(D6s}o`7n#6D&6P>9llu&^S#uU8_l_*?wP5;( z3zNZ*&-5W`H!Ql>M_^(ifB*srAb>#O0(##+LR%lFlHV+|!gtA*r4;x>i)VBhind9Y z&Yh0>Zoi}T>jCQfdEfnu-@Xy3tge6pff5OPsQq+T>7bKwxC+;LDiM4scEe~zDU{vq z!%{1zxC{4fq$n=>ujd|n2YB_jwRZH&vSrJXVZ(+wismlNfH1UrB<}5^U-h~=%uPSp8|d3%j1(}ybg!c%haSS0;lh>Q?k|? zjh*ki@4!h#-K3)Lo!(L^uFz|s^XC)ifCNMU0R#|0AcF#WjUKLMAELq(`*imI(`wd1 z>gUhX**sc>K+pnqSL{ElY^`%}0H2A1?yG_;$KXW4bfw==C!t;R@Y>?`eEEVaUt4`H zzzQ*Ks`~F&_&elZZ&l@-57vwtH7W@)wPNB#o1LHE63PuLEVbf@BkX#u&KEmgFK`~! zaX_8B6BKf=+&Wt3JqNjM5x+cDHESVIpO^Fwuz!`h`$)xCi zmu?QUcv;>c{gi>R`0!>wK6d$BfUok8k;6{y1{L-?$btX@2q1s}0s#x?^?IzTx1Wk# zw~-}&fj(-&@#ydCabDvs%)im?>d$F@l>6f7An9%MWJ$K+OFn>(Z@|H zY-W1!7yYkcx51jKn%|xMc=p+6gUh6_0hCFTCgt5m%cRg3`FU^WdV?lSn)uA5INx=B zUB}b;YMB&6T*sZDOo}HaI!`8YT}|R4;8kGb=FO60J8bKGA2Vk!OzwLmzxe<&W_in? zn5qWe?Yxl$L;wK<5I`U@fwn!LQhN5P_Xr5P5&A6fmTI_{%IL6bO6}?qut{g>0~dpE0UDQPzY6>)Z?UGm7sH6^0B-oKutzEbK#@U_2NtU$fB*srAP|CpKCjMEJ@!|*J%svDhr9&l zYVBXB4{yk8D^^ATfkFf_1`cVCDiuGf+`kT~U;J z>NeKdYeR}f-ms!pmMAXzUsH2?=V!TKPM7DNdoGziJ-_L+PAgDLFiWkNFu`W$=eLBE zTG6drxBM!f+uF^vTJWhrI$z&bJ)K`%Y{J}m$v;S~$n8jBt<(xwYQ@nzx=*dJG>Q8k z&23VEt($KeMvfgaRqNfAQqL(_) zc{)XZ(kbdZCz{Pa;qhSR0@PVc9&jyiR>z%_rfW2I9X9#;#uGW*Y~uR63%jvJ4i&yq zuY25-A_pi|K>z^+5I`VQ0lk*rp}N^Mpxz0!j@0XG0lRd?zUr?kJP<&@i-3Ku8L48k zSlj7byFz7_mpu_QIspbMqU*A6Dy);(GAXX}P<_j!INw8DA0h8lQ0JKxqeqVpGLvHB z#EE$uZoBQa!Q7^Hok&6Ax1HGRq;we7>6YoC4bg@tsd7^HpKu zrd?pk)6>JbM}RBr_Y|l?WgDGqmZ@};iroZ;1%X-x7^tX~C60gUhPlQ(T8FDqf-Yr)b~4eNp>$wQ-XsO%jh;73aCCr>{6VUoER*xa;`u z&HaR|3fHK_i%y_*i{`pNh5M|E=^rjk20d2$cL|nFaqr05A1e}&Y>FZQ*&G1`5I_Kd zPzCgv;0e{OgNkKNl2K8fj`~cy?XJ>E{Xu$qdHR68rc1z{%U)Hndrj=3^J+VlUMiE) z)l6FBOMt-%U$Ui(dy{tcfrs5iF*99Hc9uzTrR(-9Zjkq*C zm%y2O?wqXI*!}yQK6Rv4^w&)(Jf&1ztrw+5>FOO?g8%{uAb?EZEHhWq(hfGda*<7bnM-{u_1kY(H?QoyXa{npb zXKjhSnt)DxdqU{D+5^S^!_p~Yxk<&{s;l#KiZNrx1eZ>s{!S)Ln2>kxuD!Mr2Lo8v zT5GMOTQ|E=lJmum*BH_%-c&swa^3p3KKeL$nwwO(W+h%^0$-<7{J>8-#gM*hOVrwW9JC?7qEdn8-(buqB~Y48|qx@s`59Ll|MM?K^6q^5n!Mq zAFXiLy;)V8?V)--Vwj$;CqhfDxWaY&6*tJ#is{p*=bc*d)mKIo#$q?A$ap%1B}z1u zPLX$K-9RQq-ezo&K7lhk?&2kr;=0>czDY&@yS!yk*vupD+L*pWlNKR>00IagP^*Am z_m`;Hmwj8S*!7%kA|i`BfzK7{4=NqxJTm97ou{=rm$?8rbFpG31nedg15}Px*+l1) zUEAqAl}A+ayY@#WxM*ad0t{3XDi_dpHBSoHd8kqpv(#(aeV2aP6Q|R~vMQ>os+`6w zHkkT5dH3CSa}Kj`;ljKJHgXPLz>1bt;prw7>!>>C6i}s-ZleCPtcnq?clze+xydu{ z+coW35Xi1T8{MSh$Q|r+zT0KmhhHW`pZI2S(AU`%BfLz$vuuhx+}50E2q1s}0tg@w zgMdCG%vHHib=^+IvMa-xX%++E>UFeqofn1n8^pEcgw9Xaw|LV5-V*RpD(_{;I-zgZz5W1|;lz zlC9-)di@F>?XFHMVSu+rIrPGrv?;S7fHG>1t>Q-}=}$QMpaoNSA%St(mJ07_rD# zjrGeVVAB9TF4xXO+^;@BRH^h(-P);KsnXCa#SoiO$~w){&aPHzC%3ayxLCeB{SaRJ z-L)oLk(Q{rZ>tPb>8Wyz%8n}Q=p5Ni7jb{)3pcr9AANOs$8x7E@T z?7W^bWlHkaTW=*xmxlQoW7ncO_0&^yP6^2a>ocuu$7|3ZaCEHhs(QYz(kPF%HMnYr zjWvfR4RiD&+l+!xGM&fTdp9 zcY;-`{Iql99i11JLcxMS&;krp1l>LPS57Y+muM@SW#M|cTu*9$avjulJhi{EV!o-e zoy*vYoAmGBKWW#_Cb5LKv})BVv9iV*Yb0NO`DHSD_Ux~JhF8;-YczD|(Bz(bEE~r8 z(o4r{h4ZM61M0Z4)QUqLhfVG{Z2#n^?KXEEmUsw6C2;XmW0Ik7+I#Zgmk(9Lt@NB_ zA1qi92wp(1T}P^tgH+ZiLuIFEpZn>2sVl#!3{?tum+C8fKm44^CgIkSnjw(bbw%vF zwlCLMauCqDy|Y&Pm5P07yjc#Lq-_W0bi6fEee6QmOcE$< z$JlXh87v>DSQgcazt7U&RaSKP+h++D1PT!tb<%131UDCefr>)9GT?3W8e-SKvAYqt z&Z>xFmb!I~4D3zY-S)f7W$(TBPA<8`#(`K|30PLeDW{y0eD;|gUQQRg41{G>eBv~! z<6t^gcT>oJso0x8XA4;s&L{~Kv4CY&bQ<_zvSgXttO^e#=jpkszlXSlM8KPXUcW3W z&}Q=fK&7d-9eo$rUf0K{^bh^D{ktlM@tgX}1}V-}`H_lc#IPU`ihz9pcu}RF&g;3M zR4S%===`?pdH+a2+!r>nBTz1Zs}86SO;vGj6465L~&UV2vUH- zi6FbBxJv1T<5ESqytpW}ZXL_abjzgpugdndwxYm)$~EcNuV1p&R$CPqfz4_Yusf6A zci(-rJyh)fdg=weve+QLx7TqsQv0wE6Hb>q4p$~c+l*#X>^eI6S#Wa!oOUn43J|zR zSrx;=&Z@B4P?k-xpn&GF2?7WpfB*srAn>(7%@@fX^@rUJ@L-i4Rbu}#+1?*LtMZ77 z4Nh=x4neRWfB*t%6R=Ttmh=f+uO|7phr~I0)3bi)Tzb=DGc24B)F^H;XpmjqfW@W2 zA%`52*!?v;Ea%2>ysF)mD$Y}sZx}n?11TX9h)bYH$6b=f-wXP!&i^qm5!W$AZ4t=2 zz)J%w?`1APkw-yg_?ILtvW09Of`Hy&EK?b)a*4`Ls#9x~<5UKyyrbg$1Bha}UhISY zIF)`X2dZqKwsch)pu$LnKv)n!0D&|L1pN^{jo_tdlU}$jU(7}_e>$29$HD(#{`M>8Wyn%KED74^_@rc~WJe{>Oqq1_U0SeTlA!&^GDPxzkY@ zXf91epdJCP|5Oi7%nxZg`MRqAWEFcu;&QQz1Gm+i2D@Nh(2ER?R|TD4SZJ@k_DU|j z)Go2b;!0q_f(2jSq+6J%5Xi2;)K5Q8 zety^RWNCOeskl(DrG2v7GqfB51Q0*~0R+k^pwA$7bD?ci?7GSJ(=HzS{hHE7KkbH> z#e6=ofrhy%RVx(xj>0As{#WmV?6VpR0tg_0K*j|aoJjEjpf?+TR`YB|g6pM1ZzOuT z4(d9d+HYAxZ>V%|9lKATKFK!Q*!wq@`ULvV+TrJg%msMhfd>*R9+p4r*zN5huG5e@ zb}g%-i_@SAc2~CplKr-{8=bKrkX?Zb|M7e>{OzE!;p`fX?eyH_d~`Ouy-dpyKmY** z5J14SfIg#GmiqcDl;v0Du>UPz?9RVSYKjd$*xvX1Y^oe6a#u|JnQcLgoS# zyI;0P009ILKmY**5I_I{1iT0=9|89goS@|u(3c=Kpm2kSTG&jA9vnY}09^yI;sq^(;3iuDFK^5%v4`wGXOr7aEEb$PC zN?^?Z#Yy6-5gDy|TKZ&`sG6 z0R*xvz-UGKd%vHW_=$&RUZ82<_APMGi@pI~rV3u+qmqLL*^RncTnZd^*kMWY=FMG( zTpsIu9j`5x{}J#{AJ=P3`n@zR;3iWw1WF@t_K$W=8h{VDwTWvzm8RR-3jzor zfB*srAbB=gdD1%|8^bt z)mL9l-h1yo*I_dq&r&N6KKNiC26i|3;8THg?v7PGpL7jisTD6y`@nTr;vo=~KwG6& z99;g?3cFY{DHTy2IaC$_1Q0*~0R#|0009ILK){y(H>dC=8&KSun^aV(O0KgW>`NBA z=)c`5E9gZAN2`K%{U@i(W}9u6v~AngY0Rt#vzt_m8|P)#e-y{7j@5m147}j_IM||P zi=@NG8@LWjJOrW=_~NUtlF?N&l4_^5-hOHyJ!Q2`x^(VzR9=BE(d*~cd2PYU2q1s} z0tg_000IagfB*srq*Q=`ij;Qjs?E}upS|@zAGdvu;)@owzmeK+-+u41sttc* zD`2GgSA9=!*%XsivYb_6b6xGyPb}FLh~kSvov(YVi(brbH_>tgQYr99Wl&s|%9hb6 z1Q0*~0R#|0009ILKmdVg1b%bPwG%>|3lQl&Dk`fhqUrj8DynZt=BYY2d8m?IP$rg} zR6MAgs%bNp<{Z==G$22q2KNz{gtiFFCKxiU=Tp00IagfB*sr zAbu`2`+K)|^`^+i+9V=jPmRste`00IagfB*srtRO%}#R?c%_|;=p zh0RiLr!QCMhwdnY!&OHcuwcoqK$V)_UdQXQkO_3G?yLHZ3Av8c4FLoi6!?>#Yc6ka zEmlAP0R#|0009ILKmdWP3EX?@HJV4+HtEv2(@|M#JMBeaH32dzRwGIGKlEjZWmWjN zKSdO?)N6Wogno`ocTdy01_avb@%I$o5&+uzX4G}<~4uQUU4VqWSWBeNd1Q0*~0R#|0009ILKmdVE z3k3hoa;AI7Q{(kT$wvz9Bc0;=ifecN4StcqNLBLH;44Z6eF|*hGq|hRI#!=mtmjGqYzj#idueCNir!S>MW{?K#f3u4K#j&1_B5mfB*srAb>y& z0*iWm|1RbN#L!dJ5dj3!E5JZSdi%ECmg&or*;-*hy(<;=pk-CWvOKDaz7|Vmsi$)R zyYqV#i_X_ks_5t_DoZ61K)?jPRJk4s1Q0*~0R#|0009ILKmY**5U5*#!HK$UQuD~B zLr=Ichq(aPs=*5!H#(YG>iY7R8&){({K&$MkF37EBC9XeMIg%p_vF%&0kjJ0R#|0009IL zKmY**5J13@0HYO-AR`XrI-O!%gcg+%)|o07Ct%Z@=N1>DEL(Gvin6e?Qv~u7cuvnR zQH|W^)jC#2009ILK)|=a?vI?dXMJ-4eD9z52q1s}0tg^b76Aq-%EBJbPW5HVhpNrr z9M#CKAN7=@SS1T1gTHHMmR`Y<4S~R7eZiifh%O|%k$CesF3FZFi?@|!!PH?U9C0e=e%~^D-P5bsV?Hwy$64f zoUG7Q{=ySE5P@Z?$0<5arv_4=LaK4PHkwq(hHQfX0^S9B==tRX?|Wef2q1s}0tg_0 z00IagfB*srq(Fedi4=5Vu}#nyDRz^JD~gOWN89%qcz5z4WehEAmoF$;X6%Kt*=<&`lvg1RGs00MOi zY_$2iYcm(1PM#tksH~3iBBVvOn#~bF009ILKmdWd1RCknbzP6~|BMMVYSgHinmk0M zYaUJiQtO_m<7ikO+XTO=EVfo9?K)64eSHTNvtfxxV5zD)P33u&K`Kw^7)|B3`Z`wI zt9`#!IY7mZ)#fTJ2q55Bz|yqr+G~H&bIVPBcSUpr5I_I{1Q0*~0R#|8nZSuX);*EA z04eKKnp3<00~N&^rYeHI^t4o)b5$x;TBy{wyr>7y(3dp-sc)q;JXSM^(=-ij$v_)) zNLJ6wKs#$~BFAd2Ec_DzR{~4*{PLNrV8lTH0R#|0009ILKmY**5I`Wy0t`-Mx%ca7 zyT0ITsfRVBn;fLFi%M&iMJhIH^y``F`hw~Zqu z%mwh;ca_x@UgHxS0R&Pbz-UEkx{`(>mLH5I_I{ z1Q0*~0R#|0009Ifa6peohcFj_LjeH<5I_KdEDA7Ck;Pu8%?KcX00IagfB*srAb}_6dA0Kq>|*Dyu89aX`^Z1Q0*~0R#|0009ILKmY**5I_I{ z1Q0*~0R++_kcttCv~&jTKmY**5Xe{H?5?A=N!z4L=T1lEyDjS@fB*srAb+xV7^n!T zH>e8&2q1s}0tg_000IagfB*srAboYcYBNa{(ggD5{760tg_000IagfB*srAbXp?MMQzW<2q1s}0tg_000IagfB*tf2rya^MK@7N z1Q0*~0R#|0009ILKmY**(kd|i;~Bd$7a*;@OS=$2009IL$VGsGid>Xf2LS{SKmY** z5I_I{1Q0*~0R#|0009ILKmdW51bU7CA@v4zK>z^+5I_I{1Q0*~0R#|0009ILKmdW%34A!>$nBX6kh*TAK@J5N zsBnl$C0|5jY6u72EljoTW z&>#~lAb5I_I{ z1Q0*~0R#|0009ILD3`zMrT!7RKR5ZHhr2oCcO-~X90R#|0009ILKmY** z5I_I{1Q0*~0R#|0009Ji38Zd>!WR^A5kLR|1cDLR;qq&5iDxcAFlfEQmGI76GYS?*IL8fo0j*o!ig3_dU<^u`_c|dB5M8U1sigzH@&5 zqtgaAD%h()Db;AmNrTQ*s$O@ctY75U!z&}d`0aZ9s6YOspI)I|~D7|UpdD|Bo`0wh2JBtQZrKmsH{ z0tqIt{trDa=30ORzX9kv36KB@kN^pg011!)3HVEZfeL?lvNsYS0TLhq5+DH*AOR8} z0TLhq5+DH*AOR9^MPS65ITN`Sz!f?+A^{RGoB#tAhEE{nlK=^j011!)36KB@kN^pg z011!)36KB@kN^pg011$QX#^OYFm1++)_Y^ePoLph0Hddn;z@u6NPq-LfCNZ@1W14c zNPq-LfCNZ@1W14cNWfq3b3;`mKmsH{0wh2JBtQZrKmsH{0wh2JBtQZrKmsH{ z0wfSK0(k>2-*3;g01Q;bY?|o~36KB@kN^pg011!)36KB@kN^pg011!)36KB@kN^qj zPJqD)-DimUNPq-LfCP*p@Z@cc7Vfzgz$h0emIO$E1W14cNPq-LfCNZ@1W14cNPq-F zCBQ&MsJJ;836KB@kN^pg011!)36KB@kbnyUk2jq1FxLXO0LLaIKmsH{0wh2JBwzvo z1}aRLC2AxA5+DH*AOR8}0TLhq5+DH*AOR8}0TLhq5+DKJ2^3FTJcDZie8gcfeKA#h%!ik1W14cNPq-LfCNZ@1W14cNPq-L zfCNZ@1W14cNFXT$7@SDTOw&&iAOR8}f#3;@=<>oLHLe8+erS$B0wh2JBtQZrKmsH{ z0wh2JBtQZrKmw)`V4%X(nWJtJAOR8}0TLhq5+DH*AOR9c2!WyPuj|gW0126Fx=8{g zKmsH{0wj<~0t{3na@y%M36KB@kN^pg011!)36KB@kN^pg011!)36Ma-3A}RKV^XL?Bji6c<_^pM-R79ercPKQZ=1W14cNPq-LfCNZ@1W14cNPq-LfCLOAz(9q8 zlSauTKmsH{0wh2JBtQZr5D|e>zgt_%wEz*p=ddI|0wh2JBtQZrKmsHXR{{)F#C2xr z7YUF636KB@kN^pg011!)36KB@kN^pgfFT6>oc#lfYXJJzuaLPqpDAxI(3p|S*I8!I=N#1URYR&TX)GUqh>C-3Gda@lj%LZCV^lH3~Be& zoKsJ}YCQg%bo&2fu)^3o36KB@kN^pgfWHKq{JZBAt_ARyDSIOU5+DH*Ab~mvFi=qk zpafiQ0UvLKHFD|e%V@XUWm_YfO`kr!9d7(eFel##!O0dWHxDmJDuPR9e%dD2vaW^y z1xaNrYzH10B-#h*ICcruPad1ZID7nxh4Cd%&ckK_6<O*(-ewtZs0Whn&r(?v?*5eo^B011!)36KB@kN^pg011!)36Ov% z0t`-g!e@Nr=I~&w2QOYi+t)m_*S@J_P#`#Qk@i9AEY%M^M&A-$^ z&kFk|0TLhq5+DH*AOZggFk0b1Q&W0u3ZY{>h@FMDu}`VM9q{2E7_n)H1Q1!eBgEmf z8fy3hA9_uZ(#ju-ApxBT=IeQ!3oPLqcqafYLLg zM~|Kt;*eSmm1pIRhafL0Qd;>#F(jZ9fy0`=p&I3^+f$OQZrbxjzCr>dKmsH{0wh2J zBtQZrKmsH{0wjL_q$V;>rN`c!6-~f?3 zcSCV-(F1J~xE5gK^kq%J!png(yJ6%L5+H$H1ahqMnzAXjSHLma{lA@e%5l4D^AQP< z011!)36KB@kN^pg011!)36Ovj0t{3*p%tIH6W|InC~BF{bUJ+vE;ECo7DgIfdZDne zaE?ZLLY0zAm5;-K%|acNgONZ&2=s3DrfQm7lF?ga+cA?OBOP9dD}feoE?k_zwE%IQ zPWnXxBtQZrKmsH{0wh2J+7Mu%LYt`wO^9Su$c;_6fv z0+_4Y!yQqd(Z)>qupzd(^78wvk26$40-6y>BZK1DRxj1A%2I>Y&K+`0?Ui_y1W14c zNPq-LfCNZ@1nMMk(tcmzxWP_p$e@8k>#V%1LXkaj*AqS>0TLhq^9e9oVg4LC_24oz z@UjH^Hm3ucR=>2g^lE4>Fim$t^}aG{)TmfT&uW`jJFlMXHSYYXM;A9xmXg!6SxA5c zd?t`m6-cRgIlEphYsi{;L;7Y{nYBoO1W14cNPq-LfCNZ@1W14cNPq-(5MZEU2Z?0- zl)Yh)MCiK2K7*pXyj%{YXy}>{8-_{X4ID}_JIn!f8f(+G2BVa%TI-~VT1X%+1P*C7 zPqnDGGHZA6Vd}=LEBjaRu>6Vln7S6gKML%L1W14cNPq-LfCNZ@1W14cNFWvj7^sNF zG-l<5+?6WCw8nc{7!EA~>-#gm9g1MSIa^%Qa+6fCuP(gX=NDQ@CNW*a$ z5=az*gPXpiTIOY+O|b(`%C5L>$7B8^0TLhq5+DH*AOR8}0TLhq5+DH*s7BzBt6%?f zEo%X44OA4_6RY*crzR6<0P$Wnv40-zW1UiQ-+lMBz#VX=R2cFuPo+|~7$Vhond2rG z=h?P(mG2$1LlPhXWm&d5u9eF`sHM(WJ9kJ=Vk7|)AOR8}0TLhq5+DH*AOR8}0TRfL zK&=sq?7Cnr0|;CSjjcTByxl`vBbw&qe%W#KtuCKNPG69l zPB%qREs^fmg@@Nkz!L!rzgPWRzu>74q+twOJAd$P%?^9veQzDHH3^Ua36Q{c0;jzD z%f8{R1xRDuHn4qQvM{7QtZAh+tNgiRkJu)7UVY);R=l%`@jD;Q{=X&aejY)t6NPq-_ARwRj4~6Xy>kd1h@}~qP z!unn;ryqR;`?&I_JbnkZ8pc8bVG$_6K)qmy1FC~z^1ILjCPBHN%6fGCU)YDR_h4_s zvweQ_(@A|UH&vPlS zP^kl29{$WHo_k^kBtQZrKmu+FNM?lGi0ubEtkU+w&*~Wvcu9-094h8j+8q3po492# z783A-fMh-#2Ri{a0CpfOtl%7m%2>*A_{qS8%(njJ!xbx5{Jn0OH+--X$@QJokU;~7)?K@X8b$WR z8eY}yrF5$C|3MQY6}n9f^~8cekH+u&6{!#&>6B$(i=P+80uG%Z0TLhqa|lSP#9-J! zSbv!8QDaK!1fgQXU{bz=y#|xJxG%tD89ECII3ds#l~1oUiP}j7Pq!kPdRE$H_$d)O ziPrrC_7v<>7z+uI011#l%n2}+BEA!x57#7(qMb_@pGCV(?;dy=Bf=FHmEPZ{8DY*x;5}|BN*VPNTc|31mi|NNg09i16>Ncog;+Y?bNG#6Z6V9}a@GiGeR}U68b&s(*iF|FUgWI@^NBo2xu8#bXIP zvycD@)Ixx16}7-I{L*OXcN?S-*KaZ0|2pL(NO6V9L5e0#ny3K-2B>!JcAw_5vNE-3 z(IT~A!2(rLQBfxcLn<3-5P!5caTE3=9XJ2 z$)p%5PgqEx4gzvYuiShYP)EI_U)}%!H^3$*9R&T~esfH2zWxlhS3-Dnfu9p9?Hl}* z#XEn1$<14qWD}4Z#)DvEV6tDcoNgVL(g%)+-3@yNHU;()%!b9Ood4!CXKJz*ASVO4`5c*;To z@gTsAig?UpbuT2FVkyk+2Y2}p@Wbkz#_9R=>C>AaNU_}1Kt;1=&D80qpRV%rWvXi~ zOP4NHvu4ez`J#ERo`-A-xrvjwGHH@Zt!Q7o2#(8&6&4a80iOvB?{cr|)Ik2r^u3g( zD%5^Wk9=;q?>)1_I1$K$GjfXP0daCFU|%jm{|^N`2D{gffb7>Q!3mlF-ulUlOwmUe z`N2w)n^`Q$As}A_FNBSQ^+=A}+fShXE&P@L2jr$MOYj6bV2H)ACczKI5$Yfy0gbO< z60G>9(pKQ-XRzh4&tY<1woYRq0b>X-i^7lpFjz9kRT7wiaJzcwY-M$UfM<2=e7o*Ek98mEmqdo8-e&yavWq)D@ zDLgP_QxeETpi9F~f(lZ|kjSb~SK+6uaAYBYOay*}3k*_ZqEqwhl2yS#Ma^vD_^NDs zDSt;VfHj7(>`q{RJiQxsE9}oOS#drO#u8rw4dB`cn0&_AJ+(f*K3DZjf->?xzX>Mq z{b{gGFqYs5j78TBQUu4>NzV;YSu$C~G8G9@d<~PJMKSD?O2e%>wFI^m#zF#G5lCsJ zhGP66kdIET^w5!HP`vG-z4lEdg911BFV$XX27&wTyRT~1s#S)oyT5$MA&2b#f|<|m zm^g7_B3A(*ciXb9itEjsHmW88{Rs?g{d}+@Ep_plH&5L=h(QLQGwvte*8&vT6N3=P zK0OhL#sA!%2sm%t6;0%B9ZS3k^o4s*!#;+|vT;T#0HKy5UkWdP$rrm}!9IemgS`opzcClUBzVF9M*x8(90Ckfgu^+kQRGY8)gA`>7uv=?r2;;V z^S~(5rro-AQxbe|Qu6Z5{VtEgLv9~7Y}hs@{>G}VWnC95f9MPeXhh)n*3X5b#dHNPq-tARwz-kAxM%WM54QSgeN0@6?U3fw1~877}nzpx68L zFUn#q00R~75$o9U5{UAWO>wJ^PWh<`H@+`Jf6e^tGpxQOo1%B`-j0Xcw8_}LI=?6@ zEBn1;hhx%U^n=Bx*-9N26L;ta31~%NpMvFT--aKDr71sEFFhfw!8i^HkN^o7MxZ71 zPKL<}=+g|-Zi-ylI_qVaWLYp%*OXaPYB;2R2pejSWG$4Q0TptSbu5glz_pkXiZGNw zbLf`LB)L!c3akV+3w8}mZcOohG9a+{Lx684e>m#Ui=^FM=|RXp(Kgl@6q6@UZd@6h z@PH}0rfxDQR;^m4wrtrFooEjuW3GOF>7|!$_0WakO>J4Nz@k5a(@yA!?fpBcA%h0; zyQBWowWq>?VP#Y77}{2&R=hE|^^T|fNdhE50wl2g2I9rwAg6oE4aHnC37o)SjIaQ9 z53F(Eqj3PY1mr}*r(iQ+&Qj^!ju^jo4dIdmKqWZmWg%z$$j~Daa8Do~O$Wj5g313S zD`0%iqA{(m;SZX4WgBRsLsE=Vkt*HWEius9>YXoWLgy4IGZM_vx4 zkd%sfx;p2Ft|?QdG{l}svfq=DMN%r7G;tQ4Ub}X!TDo+pk^V|b#e@kHHXAA457BnT z^?r22K1hHB_9SpZ+vl);7dxtjTx?rix?#a-@*$pu1W14cl1f1KUy(iVWI4^iq(acd zZ#nefF_x!>B+HQBjVoYsbC2`k?>j*vqVhh!1D5u;YxYJ0W)k>&->)|Kv=*Qy z0J7za1O}(VzJ$FBll|#yS)G8ZEF@4#fPsp5ycHy0nkf~v%q7wiBuHW84I(*|qF1k8 zwRl(arTO#cE8Os}`J&#hu1KfT)Ag1ZoVw8uEkPcobxd$0uxAoTE`fIWU#aem-j51E z!_u-cN$+DJ0TLjABodIDug}4L4dY3i`n&^VwZ8nvC@W!Eq9Gvrs7TQ0moUk4Vey}U z1cI)EJr8T@fA{Q>1W2F?0r`D94kq_>{s;RT>_p6H*WA^}G_63N} z{oO{kE3BJ#dx1|#fCPLaFrdxzkz`ZsbilI5t$Y24TurnS2p^CD2_%HT$?#LQuo;{X ze@%0<1q91>9lwV)FipJSdS!27IhB@yqHrb>3&PYKSm$tt<0vGMcmfifI1~0H>uE?Pjy?gg|tlQuWiZyw8 zdG|YZI3^87KUCb``hLYhOgy3+B%lw0)_LEk0~#%gT2PbnyfQfXW+!7 zIIRURi+?=S;v>lUo&g@EjcAUDmKN$rKA^R@?| z$y+ezU$~u*#TFz$0=o#v=Kxu7y#)3w>@*m+V*>;hr5LC%`&}T}6xVui;U8!l>uicg zAAPg|R_IUgz$ntD`|Y=%YTUT7BMl`bCB|k`U~8*iU3Ae!*dsBS#jdcKL6OXicdfN zG-~N?$G>O%`0?MmZESoyYx{r9#=AiWNI*vd1KYfSJ1rI3NV6<;#j4o@8)_qnLP&rF zNWeb=vMgVM6bJj)C3}j9z||P{DHu}${hI&@;>mLU4*qq?o;(nc&tkGC-H{&pU{ewx z0q+DPKv4u+4!Z}|2F8*g0t{4`JK2(lD=RSE6dR3Ve<;PqjT^_nPs!&ruDH#hm^W{p z!X8(~`3u48D=I1^dY;HKdgjtTaGa+}CUW-abkGD^P_D1s7k3@g{71=K55)G8<;(_bbh&p*i$mZ6E$aXVHm( z1S_6}9io#cY9WDy5@-Ow$HSJw?tsbPO_l@@V4%XxNtV-PydFxS-CY|$r$RC)(BBn) z_8DehJ$m#|jT$*yuq204EMLAnOqqU;^_x+nMy>U;L!S{aYb} z!qO*$Vkb6BUHQKSr*Yraoj~}21W3Se0{x+X7K}F%44=pv z4~MZ9AP>-F<^Dl&f)^uSn!%UnU<|UxXj18l(FEk)p6n*G40aRD*^M$ZU@?mT0~Kb? z*ABIkP0?+~6Nf)#xqa-FmBYgg?V~1nX zV9bmqhbmj05)*gm1_@|IplQ7l)wAh5tu*ZtW4FmG+jhyW=X^v0BtQbj5;zvh{{@qq z2`up>upiu-1C#B%Sez4>h9+8^*z8<-Vp_C=rxHNoP0++lIUO~CKoe-Z1@;N-Vi@mX z1A>`l?dOM#%Wf@zgGny3Cz?qkmZ~MCLPCmei)=6&>y(OzAAUF=cfc=q3n=P#Zc-{% zuU@T8J(L0mh)frEbR9OsR-$0yuD-3G{FEqRO$#^>xv*uKDuKv+CWEt9Vj#{#^5d`>0*NAD_P2|t;OqQ~^DUwDZo4UoiY~8wbG#r$r z5XKd^X%zG48yj!hgh`TAd+)clhFZthC}80Y=PZliancSJSz= zYi-vcXTycMi=;jhAORA{K;U4!ELZzyJP@ z52cU{3WE=&m|9p^xYDd&L;uv;-)B(p@34PJ*%JxuBye1-msC!wZ0B=bAJ{eyoBHsq zdb;YPE)pOC639S6ZVJc-?y?_)&*jR?55zJSt_AQJYcSpIg>EHS(J`3rW7BJMc=AVB zPHa3%jw3~I^@QZOOHWC_I08eV_yZVr6k-OHakE#0VBR3B0gA>i{S?~Vrq@a~#Uzb= z^F`X`&6^9+k$hk_u1Ge;p@$yoShpNXVQAS>R#x_FL$!OBJ!a;T{Sl~;|5RB>fCPLd z&@gAc>eKv9pL-3gyM5Mf`0j$hdS}NZKmsJ-KY<;m%*ysZEXgF$9&SsZN^Zcj)J)(( zyv$X;HM7zERaqb^fu&Zu3#C31a7^I37k<|{ero|FoAWoYH(-+C$zm`81}e;%Jo(S^ z1`l%oj<$Ll5`z%9`+P^$~?AWniYp*geftLI} z^KWoqeA#i52^`mY7S5w6H3k8KmNzh~!kFozP!dQw0r^hyIPAcrgP{N0Pt^VgIOLPX z9t6(AE8O074=hH#&<1MnH%c+Zl0Y&E909i%z;1`}|06@+1H3V2&cyu$QhT_RD;X5G zxNK`oGn_{;5@L;9&6Q54Rj*#X9J4k!gJNq=PR`wq9nRdK;OcG&oCmQlK#@H$^RPZ$ z89BYUEACFpFF6YdkbutwB&DKnGg(n%Qn5RfY*_y@lN3`c36MZy3ETNUhP?kz;3+Yv}2lDSimb_RkBm!lvzBnawjd&5%-Y3B)p`!i*V{jad9lsW8LqN|mD@TGGW**8dqLnqo=7 z5CR41jq32`uN$IvcbQmz+I4M%{Ii=W^@rWB@EHk^010S8Kn}=z0G6cH@*Cjqa@e=9 zt+2H)iG**2ZGsh4n(Q&r6ec%9`T~= z-vRmE*zr3c?|ZQpunw?}upC%iium<&*u{Q3^jO1%Gq@JOZ>sE%1a>1JpIJu1B;)UF z*oQC{%?ZTmKM>7PyOt-}6q2^&w!}W0V)Eq4dAYf{SGxribvrlN6f0M*RG)wTdDPO~ zj{k6BVc~MOjg4BI$sR1dc&5qoC7D_53OR&o(ql?ZtT599s&+wO zvx_!tLIPnEsD}an2x}Pjz#)zIHO6@ZCO6ub!9K10xe7ns6uB|77pyC+3v6H5;jklN za?)oUi+pF5_sC%|VVJ~{3#Wf$xZ~@fUw#+nz}|u_fqe;c^0`Dfbb=iSI~H~v>`<7= z-+wNIehJhH8x|5E0W%25Y1;E(<6#I2vgk~}mB zTMK{#4~7F&dpEFjXd{6Nr)B>9`A(bYQokH0ohLS%V$4HJ_rXM*r^{qe2MNT9Kz@2N zvMK&!wo8`kQ8I7tu;zW$U(9xd>Kzbx(?MNUC4s02+=7AoL^XH-;})aOS+E^9WF;`+ zXOZt?Usc*`RXR@Nv1G~1%FceUlVC`si%k(<9)rov>C)JEmAxafYWINbt7}tJ-m8zp z{tS~ZnNETdsJ=&$xnELgPgP!RjGw2$&Vl^^X4*X|2|oS`b~H?Wzgb9t1k4~HnUB+9 zgJ2_I5{zZhkbtaqV9}R={3m#W2YG)(+eIGQYv1&VC!VN>o888qM}feKYSBV6NV6|1 zR;(~NgW}PVBS*@I)kGG%!j=q*dWm$NPLn{u1p2jnMM;3d_+r~PBa@=$&u5+Q>-Bbk zYXOSviN@oLg9FdQiC5y_2z`hxfdTN$*bm=TX5|Y!{v}M7{da?nhW!ENcccPcW-R5n zB45_-gUManHn5Xn55Y=caVST?iJ~|-;?Rc%sDHh~Hc?iWJMRC49S`dOyBvnk9G)W; z=%U859M;A%o8TH&;qoJN6@&hK*SZ(Qn+oc>We_#*kj zEF?ezJ`>1GZBa+Ic+KZtBkS&fHS^y3QDmcY;IIkEl2w`SX|PRU56Lh`96Og|- z|AEQh9Tv?9@DHnI6H-Nvq*UBk<+;;`e?^1EP8*q2fBN+4xw!FtwMm+*YsKl`N-Qrg zull7LpG!)`7hjn4`E55h{yb{bsQ>QvN`ju`%3PSPUVp&p;Ptm zwY--bRtlACjbZY)qFLo*d0y3;;Ca)^*La^6AOb2fFOtb9|IbSHSQ(5ZPy&B>bR~W> zI;kOp24--Nk;4 zr-iT^U=v_+GevHSnN+$!>@6_U-a~E}UxG$0+%ycSjogTv1p5VSTS#L%8Ag6@uYvu( z(hhUdID)#epYI=FVP~I2;Hq{*E{CFTwNvH4APIQL-xS#ztu;(Cv}AV%c`Wz8I#m9Y zzy^yu0+*s$H`v)QlRq!G^IpSN3{Ge`DUc+$Ft7I@?FqD9?4iB(O)E1fGF03Y!QS=ko zzrz~P!#E$J?J=-l!Nz##C+Ma{4NS81{e zwk?dMb^<5ivSgT@3R_ltRbCC6Kq}|~*uN_Rr=wLLS1rHa$MklWZH;MGUS577#C9@9 za4kaDuV1fTeDOt9R+c%_Y~{+8>eW|Yt)&;OFCkgPj{SQw(+rH9{z*#&DMn_x&eutR z1l$vl42q*$nDBkuxsUj}(R=OuA@ZS_g#<`|1i~gT3^#_4yI$VoQIU=CugT?eh(8B*H0*7I97#j#G+3N(LdY+V+%43xybDo%VJ3ZY z+6uj6VLyY(V_nNQU4>Csq6n;qx3W6)AFyA-WPi5-uy!y>xi}6c=>s!h^I)4{EV~mp z08ih7Nu-n|LIQFApb#Ne&qGUouiKJ{lnTJP)-50%+sT0wPdxF2TC!w`DlIMDDRIMw z4eG78-cnCJ_0;x5EOuTn=m8=Xf5)K|u}<@;Z93BFl;pZMtgQ~OkpKy}BXC5E{~(QG zgS*BCw6ZJgn+=dji6lS*@gk51ckVL8rAMLjVAw1}^m>&kH&7%%A^$gIUgqR=6o1?I zMt2wb+oe{$jn+!dE-~-m;qfqu;F(w+gXq&>y5`Cr3vF)q=QeRZsM$OMvX9Y!V84S& z(Bg3QoAWy17i-5_07=7;zt57kB>$Iw0%K9C6(GC>lLY}R5fR}2PZ5EyWmx$S`LJ59 z*LJA_7jLVrLL9DDR8(AGSytyb_)^t}b?ertmtTHarBW$XzkYp%)Dg91%a*EDjC>4# z?le-k52A-Z@L}_Ms-CW2V0`e9U64R@1ahoW)vu*V3j_wRDbG0u__afgEK9AI|jC&>`$m zs!oKh)li!gN%9_-G^uA{a#K~eA~$&D1~3Z=m_k4T+zTshN~P%5_&E?JflVF$Q{{=Kj zkYbI7+T2K*gGNJPyS5x~(@QXILTP_8K06l^E(Tu#vECuuia1u;*arubPDu z4vO3>p9YgHvRFbUz(7UF!0Q<9hqx~LMY<`HL2Q<(*4Bzr-(B3T}%>n4hNNWde3&(QvU*vT;22I9z=l^^l&zJHG^dc((SVAJeXhle%vl!-N)HythdNoz4 zz>9a(bRkZ!tzNzQJoqDdXeMse(%|6+%mPHM=YfkQobl0{QVsX(=Bv#81{ zdJuSd#E22E>men46=hqM&R_P!2f`6az()eTn$1AEU_4Ez?U595_=WC*%gAq*TPcY=MLS z2h(W8fPQ>Iu*fCPLda7eRxsztq(KKE))ceb78|AppE7nPELNd$DvppcvEa$@Ha zlR#>s^%>}pZE|Hnn{K5s1P#|o)EzIM_R#KY0UW85Ew*$iUqOg&H|~RGBU`cPv<;kO zd`XrK3kf8dz^8CtGP^p$WXmL-w;_Z)$K@Wh&~3{<$4= z>>sXv`;^0C;}9Jo0TR%GfSh^LTL&T4G|0BNv#Y(vr`Z$uGpb&bef3al$_*@&Z|3S0 zocJYJZVh$zq906ddKN`5W2XWMzVWl!P8@uYZ~|-Ke-W%R>@HYbvMS`>`R`%J!&tH> zaP9?f9qf25fa^d-k)7Ckk=aj)gPJ!ws9U|NdqDb=xKN7c7)UvdZ>be0%QHDL-<=GM~+P3<}BY+sb5i6sFdBV4o1980wmyqK+mS{sFr!jO{v&{ z3{e}S|4-cE2Y-?P31~xLq&7-+3HcWu{%Y4#K5|Jw82-a$+aR0CQrnY*93a5HC3X6c z0Q+~+wWJeFchVq6b!WhrFEsLpcQ7@Y3`)_0KnYYxI(Qe@R9GAiugHTRe~0ygvG`2D zfgV=YSgGv)t6Oy zdAWM`-FMae`STSHa@g&f$xkp>lGB&SGH!Blo~n=20Ifv2pBSf){=x7mj@JSthE6z+ zT9%CiDrSWuXzkn~J)0fz++zKuQXvVDfaU~bIq(^pX_V7Bhr{CYVL3X$!tesv zJ1|+|s#B3eDxTI!P?lQyp`I?E{C@{6a)NdeODUYj=W}(ZCe9Clj@WN^p$TisFpGe! zG8+w(V8y*K(}Q~Qx#?M${9mvP#^MtJ*Qpdfji2;xZt?&?4yE|WLwoI;%Apjv@qL5# zLNf^L(4m7m`Q(%L2vSJ(bULm2^y#Ap3>c81wUIBsgA9rnj8yJ}=(76wB5kR?edvl^ zkN^o(BXCgTg{n>d*VSH4>{H9Ol(GLeCU(~7I0@)U;4H{0&_vqw{TKYL`dWao(6&Mo zeQ}WSEu0tz(`}C+$*5>+AYiu(4*hmjf>4k6-Kk;y&4Av`nrJ>8I(VN=lgXeAEeX5_ zRVTpY04meJ;K~18&%v6&SbQSDKcGH=bkWW6Xmgy4cG5($DQF^2(kbqkP zIFLd0Z~da%=5cK|bnX1X2gTJn`b7dH5E_B=LmMf(f#l1+EXHE-m%svar&}g|4m6$X zubZLt=JrrYzs3Cr9WM8~Q^WdO1HHQZy^x!f`x(ZMauY@1oaXjp)z<<<4&X^BI1n}y zX7YU``Q86LOqUEhNHM$Aeizkj-*i`hvj>549>vEV+H2o*(xgc#-1wG#?2Ifud-lxw z?1@7T(Pe8>_oy*f?;@K*_7F~FNu?WK1jjoj(s?>f0s#})zsX|NHvg-DM=`GZR7FMV zTH{nxFbR-=P6RqZOh1hT$twTLG}03{DR;x6Pc(DtY|Zr4B_|gZbot`;XkB6xa{aFw z`Po+^EtEn6IunrWzDr<(U`urtT8FmNaoyN^mUZ|R`egQ_*AlbLXUhuKjYA;A+H4} zvM1JoUs5V8WnEK8HC`qG5^zbNe=DX`)PxS}cFtZse~7UMIo0G7Un2n$2$jH@7)_@o zfU?p4l28dGYcPoh$$nZomEI83RVP7pXpwaPygKTHak&D6{5_0;3>f1jXk4#}&OKu~))|z*d?hyi#i# zKe4lx>R_ODtt2y05&m>YZuJ0{!%9lU?JnCI(+nvUa=Ke0QYvIpwr$(C-He%_YC;iI zx@F?TiKa%qt9j$>=~+*xG?pJSgTfg$)+T|h2@GiUeAaanTJir@%{{SOLVc&(BtQc0 z3G9ugN4jtBd&_&!<7(e~WrsBpxEe2()^s7F*Gvjd$ZB65iUgThb`f}a*TYC3Wp}9N zxNHCV8P@_tI#C>4djgU|FTvo~v{$;Pz?OLFuP_}l?)Fq?$SVv~gg;TYco;JF*%V-7 z;l}q39vDU1R1TVOl1(9EKmYvmNHu#L{G$~sR>&XXM3xroi_d}Ma%vI_36Ox#1iCf) zP_-|>S2>p22&B_iT5dYAkN^pgfD;0;p|URD12;m;WGCWT*AoHxkM)n9dhoaLVd$=v zzg-5_n=YUEWs4)0T?F3R^)S*$9Wi?SNEL8!5-^*<*N}e#><*Y|3t{_0^KW1*E(z%J zq12^(RXqO$M?*<-sM`Ae!ap=Q}!}Vuyysix=;B zXyVV?lblVFvXrq~l$rQuqvm807|>c4)39Vg;QVjj8qz6?daObMBtQZ?2n^owK!cwT zKm)g~33noXj^T8<2_*L>Oxe?0hYZFt@Om@cw~O|peFTMaDsR+9-pl($ErR2dfZ+t> zZ=5V#J_WYUFkePO?+*>rtc_d-D#D$LNgjrK3~m4Gp}h%Bam0ajqX|N5(kK}exw*MD zUCQ{{^5x6b%9SQ=xwqY=rO2T8OGf8nexb$srNeAnWtY|)a~5=u1pFq@rQv^7$AYDP zcWY>Ww_Gp?A9b@lBOtdtW&4y>QDl2LM9WavluZJOBp{z&2WsNE+%e}e#PBCW4n2G_ z{9!YWcw)vY`dp9=$h&ADX9+ZH)aiBr2ui(@#G& zIi=#WR4OI!%S4vK2bY{_S!V7{mdFXC(?${K&|sPB+@RPf%^HZUXP4z%p@9xcAORB4 zpFkuxg)?B8mf>~296SM8b^c-S!(=tWUZ|x*hC*#(MWYVI=*@xy1o&xyeLLw|m-mSi z&8$lTIurOG#Px^$Q)iLYw4H;EPt7KE$5qChy zrjTFcLrnBDq1hC-j~Fqc)WrGns#&rrEZe%yYd>sH0who~ffHIkTk{3JS~~$tjaWVB zM1Dc8of5B-0115mgYHcTP$M#r5G#h5w@*dg!5!52cU{idCyt z`7b7%9=~YOqQ&pS8A`uV#>^}}4XXO+r;LgMC-B&eQv5;A@{EAM)x>^)eF~PVt~jKE z#T9}4RBoDC6|T^+5eX!QKrTG(tBJRNgN|*QXi2J!$KbfFiStKkq9c=$fW)hh2 z*FV$5e+DWvnFz{=9RXRuJqk9(5YJje=Y57~)j}o%6~Rw~+|V5Ca+pM9P)waV^>pwt z^2V=ZP#k#RfsUtl{`~ol+vw5&C!OClY}l~GewnOD**EAi7t}!laU*bi>*wO;QrP|! zu6Sdxd>CdS0TLjAnh5m5i}h=|5W#DIsXAu!_q!ueHKMfuMYfkkWmVlU{_%2nFh>(F zbWf|$+%*n z$ER;se9(JrheO~M+emT_diU1Rg|Yw^pEOB?;@#D_i__|ONtAORl`HinLilL4o583It`;JMX*`scw&h-#uc) zh)o{*Ft$0iCA-mB@f1!1niCk<>N#dm_&G>Qneik&DTe<0sMb=ihXwtv#K=&+^F7G2?Jpes)U4I+Vo3QZ=0G7>>R zGR)40>GFSACg=sY@bgR;`TF++7^n#Dolu0Xy<}5-5}Y(Wdd7|WoAglPT*aY>9;$M3 zavU`@{5*;^6%`fJ9621L%A;l$55O<;aWQg1X|~PqxNsSSQ|Yt5z@H4UO6)< zVltU@g9LmcAS=|ku(OVV~(d?qmA+jq~QpnZVngd+?`& zj{bBLNH5NosRbfIF_j`SR~=GaL>&*^$vj8ALIOb%C_%?3!&U?}fDir3O7?qTya%5F zCc$I?3Oi81dGwN?h7x%ap0DI}&>;{w$ZF=?UfL9gavww&K3IGd4)#6XhpyNK36MZF0$d zkN^poPe9TtbSMjS5J3&Q5h&j6S#(cy4piuHIx+(lncthbq*ZLqERk18fCK_0@G1H} z9j5c|U?1o^H$VnPcg;YB|I;TMX`be?Tgjlf%Vk?*njwQ?2*e(4jNmFlgI1R7-=7njV*8=S9-BQ-& ztLL2FXy*%jKmsI?WCC3^ab8YsX3>kl{~$+)%;~+MLx(-eYMLD#R?4rdNf=+-PN2Oe z9CW@(stF>>Ac4RL%tg;v1lGGh9p8v_1O`7bG65Wz1Y%5}Mc!)F zv&lO#_D~DgTBLKg6>6b`B1nJ)q9&k2aKZ+J_o4>D@%JDg^P)p0g=8wU-a}mwFQkty z!QCOZ`vT~&J(dm{f|waU`qbfXo%q3Ek{`3lK1jel0oe)U&+eNC-f~}zaCYFM7&w62 z8|ntS8j55|qm~0(+>}IQQ%svS?IghI=LS&JZ6uqbM~@zkWJorJ$>&i(|8K^RAOEc* zr(;yPq9VofC}K39bVV-$16n<=QdWgtqGG59cb~3aHG7~g+aSb{FkK=6BM4Yfsl&~? zWzdn>EM3jtDN8ib*FqB=)n)t=FRZTqVu;T_!dQ=lIG$-k>G1bWm__*>F>TJMmjr?$ zFajOxe2BIz+>Z6V_+DqeQ|f*#fNyY$?1@3W#SHAX$iur%QYv)$lo%NLii!&P%`~#O zNvT+}WQqFrTVrqbf0s_D6Pr>o>Ve{3O4)K66AKBDfX@V)=9Z{~o6h&SS9X_yK+AMN z{v{c%@nsSqfn*S92R92eaFzQ#X)p;HBz&IX^{`)V9`=A)j`tPn^@qI%lQfjDimYgS z5_SS?b6A5KIF2k&)}gG@K?F6BK*9;gs)(Vmjhb?hCD6k()fSm7ZiM9@`UB9_nJ&9S zm`UC4vaK=Arc9Y~Vk(t7#u&j>gf?iJMWmgp9#_^CXW~~BC*Rsu;x%scbBDy zEJ-JD^`+-L?`17O()rcl|A5vns2oead$WW@;OZ~mJnR1UM?KD)93cVcFeIQEfsUH! zVW2{jiKsz_F6l11rd0^C4`C9lXjWBydMP3?Sd#`g&;ZXVb)fkMkB=QPz4E)00>NnN#=rNr0G@;hM+TGk}<~u0&Z`t6W;Cw_Q%tqyT6q6 zGs&j7BkS6RR-8I@>L5rz)siXWMirl&=VO=biUdf&90IxoD!w)cS>lxH5}e3S9C=&Q<$ckMBPvGh%kN=Tt0lw!f*$Ts9y8K^PefEuA)Lq>XKb|vC;RnUIX3);VgUQROX|uX{Ao6RMOkGnYu<@yDSK z%E3q=Ap{!bu2+4Uzm*VgwR5xm+7kN$?Nm_^36MY-1at{h$lm@edJ@p(<_!btdcHq& zR&~54;$$Dw<(g=r3=$xL?+Gl&k83oA(F?j{3qckTaC<|Y@OBaChKFtsJdq5FJ9l*z z&qvdyO_P)=omMwE0*!;C9o5Id;A+8w1&-S2R2hMZ`*9w{|8&x{o0iv3d0^j=)&kg; zCDH86e<%RxzWcuf1EA}f2|U%|sCgl+1;{LdS3)7s zDwI)bAEX4AH`iW;S0f~#OF9Ds6%oD(3>^AL&|IL2?!}tupbQcqf!ztn-cKW7ItCKY zgs#@R%T4Ap1}ePGyQEaOy{VaqLn)?AnKBTJj7_PKw?}DdsrurJFXUmJB?c)KGjS+I zV)OA4q>;VJa_XFDRv-Zqa7^HsmanMXl#ZVX9lK(51KYB%SF;E5@0`)INbw}#m_TF4 zE&Okg*w2^#qYR|SMuGIoPDvnG0zVD5dq=&$lezY7CzVX&T7b-ac!dN)C9nje{WjDQ zvl~o;^A}}THQu!tsPHzwk}2T!%l^Y?`>D6#^>2;Sm}9X&&fK|kRe8DWH(7gGxNxD8 z3=Bg{X<1p>-G*xSEPM1rOS&jao#VM9c0d9oum^#H^hR}fi`VyfoiBt>z*4H~n)-P< zeX$Q80!Ji)tB??IQjw5-%k>WQ=5Zd<8~8`MDpB#=FUCfQdFtmc})dS=Id5MZFf4~py~ zOafP9Fr7Zjn0q52%vs}DBoG~ebr@R5ZRtBhmrg;~NM?wE3isH(Yzy-chK+p&1#WiV z>K?CtEtfA}t{#86zUK!=Ey=;6|asBP*?VInmv2= zcBwfzIkl!%nBY$pjgJl=K0Njjv?~7Y_As5Yufq%Vc6*UeNPq-%!N~+XKz?y1D;H)aQBY95zX8Nc^*RBB-{{| z4-By`weA08nBzEiqx|QFY4%O-$mt)oQxi#$}+QHaaz5>w=g6ujWg2zM6<^ ziYZg391Rl3*GYpFbRqB<4yE`+7cpV$NT+fnn<78#aXB6dB!R$DEnZa;tYFcUfMpjU ztAd{eHJu*HA^{%>xLsi7qXfl_3O{N3-n6Zfms^)v<+Hb(MS)a zkN^qPP2l#rtAt*|_&|jhV*EntoZby+;`RgngJ>K342o1LHOV0U$QQJsp^m80V2oBcn~WP#-R%iY51?)AQ!3zN5sbwQ0ya`$?l42D|0+jL|D+{) z9pQh^?2!aWAUCy5^=mnsAPgXYPr<#`&OK>}0a7WE1W3Rg0bQbz3}3sOzc5-I6UHD} zj8TC)YqF@vD#;_z4KDv4rpw>69Wx;NJw3JK34fA+CIs$YG2k2h)&gh(CxeWC;Pv-1 zT#M@Ev!WU}>TwyU$bJSopz3+qSKqCs97+-Ul!~cSrygnB_KCZB&L=t(cyh#u5xV^` z4vkkToxTc#H4JS)4nzV8B5*|W{~)DeLxMau+g01LZ#LTlswV*wsD;2*T>HG1>tS6= z>_KQ@0roJCE`bU;n4-*sA8Z;=0{g(N7hp{_bWd(>uGLT%C6NFL)J?!fm0#9fEz}xE zVl?@im$<^3IkXRF!qs;TUg2- zWfTjFB>_VSGWTRIG~nBT1ma8JcM-cL3u9i5SObTznSksM zCj0m5P>OXBK@D0Fz@dWbC0J*zRP7RT|E`C8L;@sWAc4oBS*J`s$r?-KmQM^+WcS-6 zQLGEHtGY+6q-0R^u`Fxw9^Uf>T?srrYSbv*P6`T*Re5Q80tRao+JGF01QJA`Z}T~- zQO>#qc^ng0ExRZtp3n^!1pZ#JadH4_0bD3ylMo2}3S&sHLRJrkUEarlr^Cv_9+=~0 zMW7dI>5^g0tedQ+EHRb3z|&V?`)TU&$I!)#eOZ%q+8X0$pX`eSNZ@+{IjJ&rY-?HU#$q6W zqgKy7X<*AEp63^B11FD?NuUk_6}WyWOac`BV69+5m(3X9L)dIs8H`0w0;gys>%SUl zp_FI|48ZVm70E**H_f$FHEj#4=}yZ<^I zdE}95;lhP#{`~n0dnK9bUQN269X)#VyqYc~;kBGp#RSZzj+>h%0Y80=Ie|XS-cXI1 zQeo^Yr!DIyi2t{-4p2A=kU(7oB_5^=0?QZl{Rvu96r?6Jr05u}j%l3vlLPaoBC>n8Mux%387XLdp=r~m>WCvGd#d(w&_M^Oz4u0JCLU#0VPPSX zPLo;6H(RnQr&%(+r`IG9EP>w5-cn6-OM)FF{=NV9wPD|6S_=@m_qMWb41G`zNCG54 z0wfRx0iCibWLpacDx!E7L_el15S;*93>z4|oH`Y(L=D*I_`@EaTOfTqrLuFM=zUiS8VB*kLN2PG==uf)#Nn*p})n18LO(AgN+W9BPE`87x_|!=PBtQZo z5s>e1KMiTDY=(IomE;hW0L$QiK0id8wEzM7X=K;(rTjwJe__9dHH4W^WI@M96NFMD z36Mb61j0Cflky*PA_Z491iFs=g5M69b*GFguq6DwtAH zJMki1HZr9`4xK=xrFQSCT&;e>c9lyeJ-pQ1CFrzVyUByHjt*i6`0u*tCQFp~<- z@KTbZA8`7QNl2)b1Y$*CHoRG-kw=Nz_bHGFg+`ORqm;5GJ&J_{vLtZ8 zc~8#vX)QpOBgUax&t?l$i#*wv!`}Eaw2lr`t#0v|I5lx{aKmukE_-5$U z&u}e(E^{T>6gqvUn+rYML^7OrM=pkujL8^a493c-jyU(V_k$)@;u zRy7T-ghMIhNa@o|biqwFMOj&ydhfmWOmugL<^{;6cyY&*r2VX4QGN-&e#swo77`!< zp9vh?_#M?I|7)LnWp|nouu@h^w!R#ui5|)z0TLhq67Wu7A9Qh%_ilXc;a|S?#?CSk zI1v{fhSip3z8=?0V5?ygFp}qzmIONDTqIyK940JSNI-i6 zW7oe}%(Vc%Ow+6AMS`|Xed*16CweR_mE6JdE_uY3Lwb88d!cn6}J*$}} z7jm9_e*kj6I;kOp1`c)698J~zI-RbSv^5+DH*AOT|u><8r+Xr_KS^e`eG{G`a~&vivAf>&YQ2(Lr3>M+)V&wbAO&Z1f@4s()8pSP` zuf(QNDBF_MC>9bRfdB~fZ>`hkwg4PtmpcfYwR--L?mM3FXS@iM+`MqDx3vKA;z4gn zAVCD=I~~>nX;l)jTvP+%vVSv+GXg76{U@+eXU$lf1W3Se0y=%+WuU_FH++N=KOadC%!)J`CsuCTAKy$Y|A011!)3208BAN0wMR^7@Yx(SM@9ywHk zC3^yLe@Bw}y`&2tbZ%S31HJ4EkiA~kBmoi#h5!!wP@e|Vwc}ppV4=Mnw=%GSkt^c^ z;(QhQ^^#2y`_oKva&jhuk&*A2!w)~)@u3uQa<<8bQdCq_Oqw`xqMUk`xN_DLDs9_L z%}w0I)8Wtv3~c>EXk&4p=n2@$`q`?vCw7Zo2^Ek436KB@xF8^z6H{T9i?%^Fk!_w9 z1UUfvCIJ@& z{A0D@qtT}rycVEk%a*PVs93ysvCFrI7R^3FHpSnxP!fTNRvVW748!uKS_JGlWc&&2 z*XTpFcfn`zca$DhC6G>AY1tl=g#<`|1W3R<0^=e1U=3wItf3}KA_4il^Efe(00|_T zz-*1YPsEl_3{-soj=K^+8o7M)O+&lbXHZ~8{>|{y$oGq!N8z-XoLqENilwQv#jZymo{cYXORENvk|>-P?mZJ9U!vNq_`MfCM5Xun$I; zzu~%-QV9BkZlb6s@dWMyfnO$`0s)W!3Cz<7fsU&Ue8*$t-DjUENJ_;Rmwie~#oaF3 z8q;jblqvh;po3w?2(BVj(kME1>{#WMx*wL7mMX*3D2kCrk=W%Qt=26$3kQeu#Mipt z>a0NmjtO*a_+QnrV5#GFY!C|qx#cRwbc$F^C7mDv5=an%De%?pzQ#df`X@SH8RP)F z`u^y$8C}g-3$RNwA4N<+w#~RMVhtRg1V}&+0`ldzL=P!yUrlrbDuaOvr7j0yrc~7O zHcO>aOsS}ak8YO`RJs)@6=k~V3PTTe+_fr{#SfKXz~&fX%phPx<^oNm zc7YDv4wu@6ru(@)3w&M+kn})BQ_vl~i?E|diO8nF0Tr^rzmeG#ty;BmoK3N9+cvdm z(IQ801kEZh`{tW(9@9)yXmUn9P(0MKwA_3*GzJ`q1cD;axxo_EsR6Sog5qmPzqxjf zEqi9NkN^pg014<#067HOZd>yjw7sb}KB`Ly0l67D33hct{H2>DKmyqln4f*sKx;}? zWSc-o(4h0A2Pdw;97uq|O}QV9Vt*(F4ml`-pJ_LMqHg2%P>QWvw?-}9_4vr3xFg9K z6t=Cdbq$XVNnjU&{mR;3?BQ$PJauo6-LN?ckN^pg zKyUvDo6gCh1Hswy)mu*bi7MKUNlbm-tnxMWZmejdf=&6_uW)REIMsyym}rKcl< zqIZm3qAMhz4S@~?pQF)0AAY!F&F{VUo-#b8;?7~ihHZ1~cw8D-71zhb75YH}x)I=^ z6uQkw4P36KB@kU(_;gYm*m)nC-;`5n+A`|PlgKvV=|1Jk2n43b4P z0~|LH0_V5=0)PKIsUd>~4h>|0WOw=n-2YA!1Wc>YWFq{QAt@D?y6iL&DHT(uOxYKE zKb`H8QKZczrDC6b_Hm>k1}PO^!iR?(IUS?QqaRpuin6V~F>;BnkbpJ>_NxE6+PBfi z+UTJWEePaW<*D&nD4_@vAOR8}fgJ=o;lUqax-Le17ux;=V{J&BSH z$0h+1*hN5s>czXBM*4_Z6p`W&ZgBZR-OOe8l0osc%eKZe!yyN<`X(_M6tdTo;TaUa zxb)IX%{^5k<8Q`&kNyauL}zOOtm`s%#|tDt0-gvQ-}*Uaaq*)khH-3+_r#d*7M|QH zj=s?+5+DH*Fpxk#H2)RWLSyaMLR*E#`Y0_K1Z2_beXuuRha|&gdPxE#;F7=+m+ky+ z#w-fIiE7YaV`#X{rH({oP)wgby( z^I-9b=%AmcPS}_PNT5an?dpG}x;Ory#%p{r@dO%_mMUganEW<){g_2FoUR2h*3ckJSOhY4H6&$5-^p3e80U|WAUl!{-) z{nt=ZD&XVKO!TvD+qSBG`}U5c%bri~zb_w7j4i7XUBar!WR@`xE**%2_>N7c_wtl> zH(5x41V~_+MhIGJq{VkBNeWan1U)Z@QvBL?`kHlk-+lLW#LZqKGbrlUukZL!3dx`_ z{7{ND_3G7=Q{59;#>`lHG#p131q%s~fZqgK<*iT$HD2g5+DH*2!=pgbo)==xMWSa@A=bWzl4YC-JrVAuM0wl1V zfGog!8rD&B9)LlJAdY3bA5W3m{y2iNeNUm0PG1afp0>)yIr<5rkP|@SeA9JF@mcJh4yN%)c$nKmMx}d zQ>=pz5^a6-}}ltPrNl>eDLH_>cs$CU6C6{tL!!Ewe_(iX;#_0=i@ZY8aeY z_{{mQ_`en)gg`}+JyDbQsq97Ic4nub?Fvo&2uwygoxTyB8<|oeP}Z(p>xhk{RFsyM zI%=a?<$K4EAOEdpnnIIfSE!pq8;k>yK*9(NZ2f{tSrrNMn(lffU@QB|RkH`mQePGl zAOR8}fnW&87x;%^CkE5Kuf5)i-ahiRM|MU6(GWNZ!@dPWehv!>kU#eG`z%XC5h2tB1y6$y|43B-cHJ@DeE26*x&G~Q)^PD&(!Yzat^LV^{?X4`=ENPq-l zLqL~6h1u0=&)Q%spMWgi48E~t@!VK1&+xl+CK(o3qWtmehja|b(}tZJsKOI zO|b*OvSrKEs#UAhA%`5II&|ou3JMC;)~#FBS6_Xl7B60`RE8u0Heo;qS<5+DH* zAORsD!QXogL4lkZHNz0CNt3xEX$}YIuiQKN6Ra`pfdI#0*Cap!dJx#EhYZEwgeDW= zR7O4;@H~oIs7VGzDwVphmMTVGk~1n^fBkhMwbvnf6R!WNjyiF=ysXJhlWJTGQ20>s z0V!LZskO63wroqqLIP$IXq3BN^=ZzO3NvRm;U(IuOs>Gcz;&jk`#1k-bS05F`Q4o9U$^Xu1V}(b0*qFKFe!0fk}PQy zd%0wIAKJw}jRKL1>)>as!*MR>U(06L@9F-+!ot|UFEhbURoFM-+W)h69&mD1)gOPe zn?f4t5NaAlAV^IB0VN1XKve#86a~c46+{rFWQi0(nzSVJCQ6YGA%HX~Di8vM8X%1t z(tF#T|L=qZQnt*zHut{wy`Rs?&b+zhcfNOaGxN?p_h$KTRzt3H9Ri6H_}-E-i83CD_SRBeQfuG2?TVy_b4z>>y2g-8$okhO8 zUha^<+g%swnLZ0r*rE)#SJQb06}b>_kboXXbCZG)g;yMH(5NO+MX0UCi7bEmQQwXd~ zpt%0j*)xCMiIs&bgW)*@AOL|<2>eKuY}LCj-_=i(Rz66SGP9MdV~0~5Ci|!C$Fg-~ zc5}V8Y}pO34ok1uLW4FkD|S+A^Cge?r#=FfO<{qby<|^Wg*Pv5H-)tTMuGx?OcHP< zqr(19XSVqA;iudQI9d(DW>Z|HwwYf6qSg6l$WIICP&^=@`MU0e6HfTl1F4Bftg5ZE zYzq4zNPz$ZVkWS2*MI3y3bH9;#!7cV;Sa$x{cjF-d3rH0AOHafK%is-`>N(YWWL;l zxJz1nIXt7}Nyu=;`{kQixuv5TSWv^v4rMU2#}>%2!$8-RS@6Ri+kF^Y6lvG;P%O}6 zOm~&Nk_U(J5dt|Q;7V{}Sx`BX__G+Cup92DhMcWAYIkYKw%#gwupn@w@_wIB4Zt_}`K7BRg;AfE)3G|xMdbOW+ z{%X6Dq{pIv2tWV=Ss<{VocO!Umlf>qO6yTs0FMt%5}2kMX5|zmRtB5NHeG64=;sbH zUxILwT5@Tpnvcr*%Ep)6ii!|`fDZ%+RHQIVSqxOze+U-A4^y63qe)>J`qV}@x_e1) zE1xvyTaLCwOdak{2@|flQuPszHpV^{)?_yA}G-V0|AP|zk!md?8iz-(> zZ-sQi_h!%i?$(1n-xr{!IH=TFC~i_}P3l1a0uabKfdl0K9Wr0Ou+NlMi>}O)Q6Nws zfvLLq@1?fvb=y~}zw8G~ZNp_RWZ-94`EajnC)vCV_=5`&fWTJ->Kp-)>9mT1me)JN zIW`$~_ws+TZA0`}t2Ud<-U-pxt0u#S4SPy+ZyyXOeh@fLCuk4xLu&jg$DVwo{Z3mX zk^%t;#7tn;fEK~J9mdDpE8RgLm|s_2?A2wP$3BBarU=-w>FXTuNLf08-T0zFAPWQ( zv<_~Q`4X70@62|6>`DO>ae_a~3bU;TU5~>pEwX&UB*AS#=gbpu3 z00I#Bih$jey+x9Dp{m49&C#YZK#79fj(3Y#)J#VG6vMhB|xx2=;b7p=~>-N>KHvX2?_ZN28r zn;)JrpGtP8=jAs%sLC&w+<=M@fI!IvI(|Am_+&=+lIv3u0woZbU$>|>w}&c~z=tXj zfB*!Hz+p05aczs`d?`0b=%v1hMW;6e7E0+qWwzskZI?7$=5w*%k?;8I0NNn{fv*X; zvS8SyK*iT^;(CzP;6#*63S)9+T!3BfM+Y8cHj|=Nt5(4lTWk^RvBw_4UVH5o?6lKP z!8+@#u-a;?dE&_`GS{CnWr}@~$z|zr*-e&7 z5jHz2m&|Y)0;Lk@^zn?~lj+?{txi1%G)BNOC@yZS0;0=Xlw`k4EFkcG7XHN`>6 z%-xad_C}dck&2I`cR!gef2Tko4FV%Ii2jl+*%xX;8Uw~WMsM}^mzevcI|x7^eFCIa zq`BBn9DVfBFG=qcK51@9Ys;1`gPnKYxuKW8 zzTUNK*I=uyw)*z|@6X^Ky^x)GO>wW#4 z2M`EBV199NH7f=~fTIZnAdm|Jr^w4|WIlZdpCi3Ll#P*5AW$g+b2YH-(Y>SWgGz}A zF|I9u)qUikLx{FCfdB;DAkf(j8QukI%fjZ<2cxnaTUt44CPgj|sZb_GLfOklqnG~N6?x^5&Q)>Z~RZ%P!{^a8vq8S1{5LonPTOVYi z(NzM);M&eRJoJgH^3VkV2tc5M1T2gFVwv5z^rhJ9^#f%8^F=T^Gf&`UdAPOg7LPn# zS6bN>z@rI6Dg;&{;7Xuk{z|$s^)%~&3Y)AbnG`=(`@Wf)@p_zDnn^LDo|nA3WZ4m= z77J)tu)@>QU58Zs%v0@=W%v5)jrXb(GASaHqyq@7N}$6;Wm3#sf7KVb0)Zw7Xl2NP zVqLR~nyALh6(?}q@9xn5piQj>sJKi7Kp?vWE|hEMc;=oh{u?M8?wM}nW}m<;xp=tD zb{SY8^P$-9(&j!0MI!_tunGZJ0u|F&QI??#c?eY42di@<%$cot$$YU@`f}1(d0VQh z%R6^&^UXK+`9$rr^L##q-lS1}r@eiTBOC&05LoyInG|Vo&e|ANLE$q{w+!3*nWonQ*iDPek{1(o zgqq@@s9n$=0vRRXO0;4|M)@vrD-Y2MbJS8REI%+zvCTNQ3)9f2HcG8{MVsk(omw$t zMtQ%hcG9-bsTDe;;-@~n$Kur5^YWW?np*LVIQyh82teQ~0v#t#Q)-2!r&AzM8UZbJ zSWqkmwp5q`0SG_<0$&lZJCe33{qY_Gxk8#R@K86B^GD!$xx9mHj0diNUm9)O4hjSy z;1>Z`f)k`t`1QVNQ0;jQzS8syd5I#gluq&Nh=U$%dM!ZH_|mU2cI?=)2es6SR;^l< z-OhyuN`*P&oO8};=7N^gH7KeU6sn^9ey1*(0U_WMf%&7``6L&u6(&$s2(It6-J>5@ zSO&vG00Izjg+Ox&`J3zrR|J)z<0jQTsf_wmg+QeVSdhYk6{9LGq=fP9fSS@y)-0hj zsz3k&i4(BJ{c9vXN@)WSs3?t-^R=R6QVfuHJ2~%RM1jX1d+e*~!rM(pr%s(3j2}O~ zEDFtc1Xo~neoR-N1hvd z)$V?-1!xTC!os4ezcf~X*B}4^2;_)>-GsbTc94fY+hK6FiM|r$C=l?9fb9Xbt8Ap> zsyC3nUpTH2c@TiWHw4zw!_~H6&1xF@8D5ZU7r=N6gPX*;&eNV!`ly}FTY&= zu3mlm^aN3Wi50C-;#fnrd+p~H6` zDQ}fPNj*~u0uX?JcLeMU#ly0Fy;B|pk4W<&vc(>1Uq$lt>so77eUD(kZvFRP5lDVDK_l2x&XI{EIhjsR6&=@?W3kal7p!4{t zoKKNHcHc%^R9#oN$T!jGg#ZK~kN^RT?ptQMlI6TBPfPIuvW4C$M=%6dB=Du4+W8M3 zt$5j~XO>BEpi_zv0|5v$M4)FwFI3>VWsA8`ZkpTs^+eYKq#mfKDGqW0Y;bM_6_#?4 z`BGI?WiwUu)lo~Wu(J!wDVuG!nYXDG@-@Gy6@v!VX{OIFXRfIW0SJ^%V8Pp@R+LUF ze>HVV&|2Fa#0C^CJ9VZjUUNo zHd(WB6;9XCg~|=j$Pj=)i3EC-STBugpQJHJrN&4-LQyG4mm62-LI*D|_q?2k7MA~B zISqWQJ8lC7ZRMrE61{;AsW{kI@lkd6>d@Ub{Ux0uD##qS z-}|;7)g-zWAR}x$jhz;JHf!CCc!L`;6HrP;@xqw`aPHH{CR7*REZnx{~T#^L2J^ z52>(BibC;%q}b9g1oBQ`9*0!q{Vh*G0hDy;TyhFroG2slHa^EfgooS6)Fge?R{f5eXxWJt1KCGLDql>DsOp``&(pYogEt0SJ6eU>!Z`^!2mKKKQh?9nL{cn!rjq97gbkPv*(_p zlY=Y>Kp=4f7NK}fW)TbD%IgwrUuviLCLFyGh(N&hO8HfU_DMFiXhoiH(k023z9A5m zz_+7z5Mz5%DPn}_Vpma8A_lwYP~4hyn)X@hv%GwSxJ5+h@6}gdEjyiJA8?kWQxr+3i17PDQwTIhVBXv98+)18@}cgb0b7lEi-T_TkfvQ7)P*Dn zKp+TQ+3(5bVqDfIB%e1lznRr2sTDh@n;(4BRW=_fwPLhxe&6q_ca~Z)X3Utf$Nu9w z1R#)K0(OGk)3R`xgAT9!@siPTjb+$r3cfWYo$MLKMICzDAc>)0l zxI)0CKt<;7cjY}$VQ=~AbBKWA>Ipt6n^3b%IM?(y=X|CLqD=n#Ma1mYmDjXHT! z)-{gKQto4<#C(&Z!E5eWZ!|at=-ml5$U=pF;!)PB=6fNlCFx zivNY|d|R~}5VEc7&1U@JSAAAl3vm4L$G;`v_qi^GwzjLExIAePEzK z00I!mA_2S8KSJi?33hgfhV50H%ziiOm;wP02`rN4-#e)LM-FO25(M&0U?(~6%8lP~ z(y}n?lR>oNs~LV+Kf1DcDI)%~;{^_femQL~RyYH4I-_j|z*~X^V%d%;= zZr%RROXV?$?)CBPTh$HwlElE0E+DWnfzBUH3ntCskcySLWc+z;U7?Eo^fErlxCa3U ztUzE-J+tqKZC1GI$CD={b1&I!Kjg-z@{0R6P%x%PFkrX-xlJpNk7h1gqDI}g5@Gwsqxc`0pWdccj2t;KPCIwu3LGf=5r%|j>l_wB@ z00go?z-|=YFKgwYGmlEMt;nA1p>`xgAPoXV4RU@O1C=qxCaT-LjQUiC00fdHU|AHd z6ju(WxW9J5v{cpt5UBX(trsP$Vk>p?lW)4p=A)yJKKkuqv3R$y-p!spJ9y`vcgm9Q zb5@1sYBL>Dv2R%pXQ=YDxh-y!JDK0aFGEv?D;^Q({J|8mDm^0r$@Ubag&Mw9oJ5g_0$ zfwq!n_jp|?ue%~7NgV_yR-`+4#iwOGv$mP%SyfeKetMlrv3Ri^JX3n<-Me?txwD@u zh80{nGe_z%&o7)TM>F5)m_@hnX(a+m z7rmp)w!>Xn$+MC=o#l_sS^0UMsw+Nz^kcxU!lQrc>}W%*0wKU*z+m+sBPxY0kBL8`FU3(44r;y z?%cU$XHu9$zMfC9G?OB?ds|MO+x(WGP#o*@n-Xyl@RUH;F`Q4~>C}}YTNxC=dF3>q zE(9O|fn@}ay71QHJY5U$EBSDfta_O*_5Q3W4yyOM0~c?QhQnm#Ezfd5I1(UGK?48K z*e)IBLbRfSZw7{jz;XoapeI*~?d4eB-Y~8xFH;B6idBJG+CbP+qlTGhsskSElX-ck zJcK2hK)csodo3t8wW3$AUOpdEF*w($6?LRmB=BoQmk?M&VCFl1rkpW-r4c9w!9Pf& zD2)}hAOHcc2^=H!LuEdGj~gQ017+T(y-Pa<0+k?OfeDvZVcP1~_LTs^n7$GC)|EGZ z?CDwn-%tm^o(@SKB`x0Ghb@Prj~~X$@h?o>D5({jX}|-*4C+&x7*HS(hJa-txl-(J(Dtr~K?ekK zO5oe_dwqv|&&7UE#@ojaVQIWi21gjgG7K`$Rfitb$WL$6h{I%3bm-6_!rP**86z=T2%?Q^{ zm~;UR8Xy3H><~CxuK4;H(X^kQovFYR2vmf?iyGCX1GwxaDFp%$s2l;8wt6xO1S%@` zUT|V$%cS^kNF|%7-N7N-decmYRE&|>JG~LSlF)hc<^`iik6!6&<4<+aL*o@3epTAH z95Q6cJ`PJvRN9nz&2Q8I;}RW&&LNOf0$o0s7JNB(&769lS-0z|s%kIDtatfytn10g zXpwl2V8Cwu_s^gAcnpE23E1DebDFN^Tn+nja*FIv&M89>1pFlMphL31<&YMnKp>9< z_I1qZmmSlSn4HY7{Et8`N>)XU#%&g1Y%iKBtHM6edz)1;YSgG;(V|5qvz{_#O38Iy zthh7_#_6nzq_{1~q$rR{krZG0%{PIsGAX*`8$#B+F9dgW+;&8H(fq72z#j-eAgcuI zv+(6IA2Sf@q}#T4x>QDi00iP8V2e~El-JkVPa(qu)&kf9SZ7L2(KcwHKpMirg zjypW^<|@~@009U`A#(Si&J zKeZc=gp+gOMXguKJ>%SGLw66|S8Ilp4wT?dK>8M6c_G@-1OgC%fXf7IA=&M+!(A5HfVKs?zMt%_243ef1RxNd zz;n^Nh^>c>V(W>HAOL|<32dc`PVKoSJeOJ;q)AIo*}mtVdxDo=&emZyOAi$(WaRBv zmK&Q6x@)15Nd8h*BPv4x0wodnDwCpXNp-2@EP;E-pm26>5C#DVgdn|Mh z*D|}DI+NnYm;1C%Yb`*90~IwzOF8hRL`khUOHO7!wW6x3%I^@^ap_O;>OX#qz|Plf zxqn(~0dfqy%X?FTDf2i{+q1Vov8q`2C(kq^7XlD~KtlxD=>_{P_x*+{IDCDQwDp($ z&tZ*7gFqGuyyl2gtoU(cB9M_A0>?V$X`S@gLW^9LX1VeJV5*P^ z0j~%w$)tF%YtVU1yT3y5f9g2+^Q0p(DoURyTZ!|0>D5=LxxStqxx$qUJ~{sQ4JB-<1)V5QGHEPU`bj*>ss5teCV-)EHA_m&)vr6pH%<#yKJ}jo_Iht|B7@f$rZu zvD%Tf03p=7-Q;)&2W`~|#Sa2Ce#nep zrDauA1%->_@0lJU0D-Eyx?t*iECTc4olpph4|LgXgx$QMKmY;|@R`7x692SpD-WF* zBh411u$$+U_zBnwSv!q<>-f8Fw8z!;>dCU(8?D8Qt`HdSh>*yv8`fB*#2Ca{hM zd{)*c?SW$$?=AIVkqTRePf3P=t){DFYQugS-^Wy+WM;$NUdK5it)(M6kO6_L6F5~4 zy0&+{-C)h-p1h<|l>h4%`FaN5Il4CX-I#^t={=;K45a9^a%S zt_o~K*GRp#i_8u&pd?3N`{cSVx#t~9u1Up85tv-5fma&0l0mhVr0^61nI&MUm`6G2 z-9H`Fl!_z*73E`&l3KBrh8tM^kbbooIB?*ELZNW8U#fGhb_bvThf(zM(A$@#*;1QbxgqKy!jdJs zuqwT6$!e)sDFTZ$uvv+v&pLnmSK?~{2vn3f8&rcp1Olh1t&0oFZ;#-U*G&mjgquFg z7zkTVSHsp}8~f5ufeO3%vA`F-<>+kFrcKbVU%z1AefJFxJ@n8eKka$z*8WbbvML6b z!yrp_Evq6Z6zqgM3Ire!Gl3;p74KV&p5hvTVh}vkx$khhQAU9P1R&re0c{<+WM5tj zR`^z)k>FirQ+*Tesa_W{{hNErE^o4DJK!+E2?r(t37IC)Mcy6bpoi~B(`yduN<|XE ziEx-lsM+)3ng-p}&c$DZCGMOx=zw?QUJGDZ6}tI-Q%GLUHtX55XK>I#2L)Siy>+nW znrjBDufF<{pZ5IVgAWdR^yuNN_9ldVXXwzO_Jt{zWwklYhRE?rxpW?6wk+{~dVRM6G;o?L(r&9>zoWPPyiZNtT z=xxxU`u%6(UJDTU-Bk<<4=0&TQBxcgStA`l00IzbjKH>f@oAZlpTO^v?jOtM$tdX& zC}U5b#6*!+kr>)oJ9Z&j;llehR;Y9a0fz`|C@sf2q-U8FWxNM>EPIm2pF}Id&w7;9 z3d=Ea@fRXIje1%hcieFkb@|46Uh?wN&O7fM=!Eb_wcG2v?6Qlt5YpMC{*@h-PO*!B zvg1+Qy7{7O)D!6x@!+Hv2&_P$%iGgPr&xhVg`ZT_kw#JBDPVXAKp+%>@2J(2vQD8| zSGLjLHT;1x7nb!`mPFj6U$fU*dPDRG2JF^<|EBwPfxnAOc$+4dzZxzaYLc0ipZR6t#cG!y2NWI5Dx)I32;b-Qxo%O=WQSRpHqqu z0|5v?plJfTsEREVYu|KL=W5&_MTf~2JEtrkf?U{N%g6U^KHeQGdg;Q}7cK3r8u5bL$zwQ_-Y9vtHP;SK}=Q%ba`i5FnvL3 z0pG0n?4lpVx~jo0>OvC)AOL|z2<)NP9+A0l6R{DQ^}ckauKr3^Tkoq}OoPC*GzMv6 zj1En_&dY8PaN%zZfeJU?3#iBz0lW8osiQ7Um9D=#sx38H1Sg`SjZCN$Q88Imfu}q3$p>SEs!!lgz1pc_!!}`lx z>b?M}%ge!lmR0fP2W#cPQ-@tF7KeA<{*nJWtPp7sfB*zmCa{k#JSc0u@(Uh3zfc-~ zEwlZmDU~DOLZG5!<=kiF5(%_az4}k2EU|-_s?Ch4AG(161im3~oF4Z3=BZPUY+-k< z?w!_0pFl-@RLZ;dxT=TEq_E|rCzdyyf3*h=9Qc`3Tbi@CW!kjzKhtir%{E?UQt0&a z{=wZ>6;XJb>P5EN?aPSW&>R8~sE0t;x27tSg74?`ph@LYp}NWz4p1Nf0SF{V zVBJTmzLm^c01HywEwdYlE|!{N6H9$8QgOPA%5$ix{BL&gLs~ojF6l950xqOfm`jsm z_KWTykaq&MTrSFr^CbDzsdGCpHTQ*RMLchTC~tI2t;l>1sFv!VFF(Bv4}SUOm%;Su z)606dSZc+F8*W%u8&@hTVsW-BTGH0BB()+4NUca4JmckpK%ur6d^u*FT=?pslOsBA z`{)Y}>Ov9(AOL~-2>etR@0Gb4ov2T4oYS%K)$eAAfUaYEMF4FKk6>McMCm8bF{l0$typ8q6TIqBK}B)+!X8 zT|E?|mvjXI2xNr7FXhLrvSyz7W2+(U@WD$w)17s>pF7}Sb7|=6fDR-yOrTf8FQxhl zfr?adGg!t6>?{9%;;@f)I4VU12u{TJHuzJ7u70bwncoDXuCDF^`8hWNr;0WmGiFRs z?%q%B+O-Qd-gx7R>JB@&GAYgq+qUF(#kJo2p5to)Hof}l+4ASFCHq802tc4@0+v-V zY0SDM*Qa7E1Rn3W-SFpQ>4{Dt009UjOTcdQ+u|nQzO&jX*+jc?mBSuVVd+LE zl{1)+b&oslxX*Q>_O(80H_f_nAH4eNt7Xgg^^giVy4#Q;Lv}9P;fyqxx3HB3C)f)# zBlC$HZV~AE_LN}8!pz6#oq=8misw2b1|g{tcxT6b?sI%CKx!-)$V&pYWs+sJ7CckG zSb7ha-Qt<DE zfr@zF22rvqEKt!YUODddR$E(Z2grGwRq^469|lvVOeqU&+qP|kZ+%PW33^y`p2(RV z%8o_ySD6%rD`M%4P9Ok*6$mWJr1*gIBUgZu)RSWHWXFDwg_JPWDvD6AVILuckEwv)dAr)J1 zy>(DsUG1IQ@^_DGuf29B?}W!BxVCnIrB<*(XiPlm1_H|y=sJ2DsTIouN$9bn&QVrU zB&3RNApijgtU%ygJ@fHzm94rFLOio_pxTAlTKHDpjvXlV9iTK|NQ9fAzMsg~0qSi%MbQqP!T7Z>tH*3}` zm#Gz-Y_iG9D7<*C14eS2TCu?ux6jncvm8?4#XA*|z7hB;wPGFLWc#M~Umf~B@|zTlL z2t*^$M$P{&YZa}VL|a=h^0q`r@UU|N74c7+eLxL+BcQF?W9net`33aPA|7%bvOsmM?3<`ma6X-H}x-uy`WZcVO z!ky3mp%QBWR^VDys2aS&1)e|v0uXSAfCVY8lKJ>O$UY=D|>R6qTp;M(T*J)N_10>w>xN+k` zrB-aVnH}omU(ubfv;9*YkK!e%6+!XJczUB32tZ(Y0#%Fag3mr&Z~03+j!K{yywIWV zqt8a|hV~GE00d$t&_ezFLw0D){ie~~cnxBAG+nvtorcd@8pD;lz^>dC&(hpR^<_16 z;L57J(NUeDJp^({U=#U#yUeMZ-1W@Ei@NsDdaiRx1YDmypC>75I>l)|J70=+OQ%r1 zA*IxHwN98rDlDC%dGqG3y40X9rM~Q)>vW11%`X+4=?wzn1rG?kzW%uzRA4QD2V`85 z*!4|ItytY9NnUHI3yNoX?EvB-0D&wLu$zJQ1${pcof{*~c7ypt549utD*~hRW3dA; zc8~@O*iqaeV87cf9g#EA5go{YfG-5rmC(mzuIK0%rO~#}v7eNB2oR{K2TF{W21n@2 zGAY)H(B6xtwY9YuNvOA(6cZ*)2)_8j4h1j0thU-}L7zT-N^js+&D`GbvP_Dgu;heC zw`K%25Xb<5uQDmt&wwWmxS$M*F55l!v;#_z009W(iNG1Z@1s>#J%Rzd_20j7zgz3& z2V}NCgKxz$D0Y!e@J%>+S0FG~&s@l=u-}F6t)Pr2l_qd-r3EYg}wZAG)Ku>Lghkr*Hi0xzdANC{)?T|ynIq(i_mT7HtwP)&~V zU)5lRb(5I#S>gAa{!=&MOIZt$OFkC#9WL9#1MlZa;~zcHSy71uDw4-PIKmJ|tF5Ik zWx6b0y!bD2(A%tvi4!LVpMCaOS#xAr6`OCqd0A~-sa#c6b*3v?($>-AvYRYOadq0` zG9Co-K%iM|ZSeVrJ}iQ9dNy9{-0#t+oK}c92tXiD1Ukss5i%cFjoWI+@5^kfT1qwu zxDu%FF`FnG?|@ak_<=5UT=jBAuGrrk3Irg~Gyw}R-Yv83^L!}hO5{gA2(6?>f)mNp zkD5vG`{X57b_C0$(3uBURaQl`(Zgm^v~1Zj+Q5}*{iAp?DK4-2vrc;Oc`ZQmLX|v- z99XL<4yw#VGAaa|C(uc$6|*^{BJ?~JtN8O08XgTH0D-s(bXJE?%6z$-u)P1z=!vC) zkV?Be75^VU?}!V%r6ad#>W<98G8qRgd*HxNSKglvpg;ly8YEzM)cz^ktHBy>y)asG zE_O?jn`($wq%$v3(kTW?V)t}pR&JEKx;ooS$J=y@Pd@o1`1I3H%K~VT3SSSYP&&mK zWjULn%IjWw{nSFhAr%>#Qe1I}fTdG>_7Uk6A*bY}&f7j_Hv%a+C-BCkhc@$jEkMpE zFnjK=A(t%0Ya0(;v(q^32FPp=wIjK40`KVM&l<0i+N(cJZ6pS2h=8S2{J5bPQ@%b& zI8}!(Sl|!6g#cCxeG#atif&?wk^~D zqmKIhgrwI3*x%0uW$L0j1oA|{es3Rs)oq6anCnigMoU z)P+Ea1eRn~j3uk0L|n_N{ioA*Bc53Hgr5+A00b&T!2WpKO}1}UX#6O{{#}g^ka2@B z3PtzZx)P{ZQ>ZK`MyU*emKyfgl^MF;QTYs4FBUFBU-CS*b0l&!!#_Z&6~Yn)A8I+oe^--Nhev5!s`PoUVgd!!`UoQ(W;eY zu6b8}qO6K7yi*;M;O2|#Y=1#-w=0VY1KmI%5dxh?Syn~6MEcHXr!^V%3%4Ku0e=YC zUi;65^q4 z?6~Sn&R((Xy(4AbF2b$^SppU5QXd>)xFgioelas$=FOXTiSL;d6DLkAYmS;XZys#A z>8541ainrV=ZT!{h?dl4SSE%3*BO%fzzhh191!>_lcIYjJRN`0nop;=7T`af`VAl9 zF1Q*_^>%l1PzHfm30NA1t z;P@*K{kAT=yYfYzLjVH7_G0jo%(qm{E2NnSWSMVZq7`KVi&#BsYDMPv*s*j9-2t~> zW^YTFbc&umdwS`wB1`+`Ih|s0b@RVy;y78+%XcrLT_@1#%^AVm#pUi9?7CM8=qnV@ zNuXD{ga8B}P%44VRq+|wI;A#nsNO}=a-vKNP$+&8n9%cJ?UED2z5w?-;*Ipl^k%3K6khGrcZ9=f6zQ>GBnUvDA_TV9s7f6RY@>bO z${-2;y>Ft?8w&wff)nP<{<6$}zmH|s;_0M~y7_ZFUB%W5!HL*V(oqHoRLh6Kvb$uy zZHzuonl1P_U*>mNYr>Dd6U$nFa2NCj zl~>A|;>C---V1lXAw!0Ec@%hAlhi=vMO7_qH6B?N4e;SI1fme=^u|oGDjEY=ENTzF zVzIF*yaoXXKp-IkwhhC-WStZ0KdEl*Z|12oyAeXkCxO@H_E<+;x9|7oI-(^HGVET8 zOW9xVOOL%DDG-3bR|m$}0YYcVe7skEnskq5BEu2zal*rS>Q<{LnG|-a+s1CW6Th0O zs;bM>tGAgHUw-*zFmBwqlEGN!g~z3!`O0@DMKKuW@%-gQ`tVVsx2oV;fV^N%&d()T z72|p&H-N`ITQ)_)fgY;p(s>l7`&Rph0s#mFyNSVTvRyo7akq3|?WuMrWD}^U%rrc$VWVVH zoKYFs>5aNHlj5)GjTrw(c0Ps9*eI#vx88cIq?37E5*wzbsK3z^p9r{gm$)Esm&>?Wlk7Xl7_XM>5M}jlcMe>7AC@Ja$`FW& zKx_45zniw7?V2(5nRYjJAjJ_fTMZqkbAyZEqXzZvvzND$irge7b52uhs&DpRAqK z)b>C7jm+2k)OPQ3U)dZPB?19o-vAL@cBZMNRy-3@jcsCS8@6S-EKRMjV1>8E0u}%L z?|9iegZ_cBRWag#ZL%Ah0B>VqA|Hdh)*u_K}%fxt=x?0||{D_yJP(+(Q^CRw49A~H6% z{mrs@c5NfkkEQ9pjIqWY2sA~&{Zn@w`TV zCZqzJyE8(z^`_a0C!RP{VlVYZa05c;%$c(!VDYbi{VRC#$tRZtDyC1L-au`yFCRE` z=+Hi1dk{fCzau2@Hd!;*g|4K}Za@!^J-*U)o#`?~{M=nS>;@hs9s++> zH(STkRjj?(X}gPJ?UBwP;5LD6B+u@J-XdGeZK)-xyH2$(E~zG!N+Lj@q9ja-R*I5U zaiElaJ5hBGbgotUmR0ek15%LCGy(gY@`t9YWwS<)i*EW{>z?dCDhdQ35Ho>h3u}W< zCiINCSKqoTj_R`gW40$e1p*L&KpF%FYLM$?w$-dp#a09DA$!gz(P*s*0sEfm+Citb z@ael1p^>dYkCHnl$LZ4d_UmzP%xeKG)64tsjy1(Wabl*gj1%Y~|Axu_EBlTt3&p;i z9hU`1T=IcvMdfG1!imp>lwmnjQKB6ohfTNHamO7uTL(E@p6;lO7JfZ8mh6yP(GVc6Lm&cyj<3%O=GEpmwW1hQokL1Rgf|dPApn7L2-sb|p)%jTFWL&H z{<8nbC=h6hz!+6{qNz$LzusKq{X@1+%Hw8fI7_1%;;2Kmuh$euwIK@vaS`aQj&6|I z-&kAu;M=MPNv^m2TUQT~DT|pBCjs9+Y{u!kOMOKNFzhEswg~0SN7sG0M$BsgLg^^8 zkpdN0s0pbRWpb-f^=i%685J74;=^{oY}jYoQ`VLsQXl|4d|p#43Zpys z+wQ@bkP!qkIyL)`}Bwao-Nx6Xj7^`-;=?Prz1^ ze3#6{ed-oUm!-{6Adpo8o5`{3Wbez4l)3nOC|RM(XtnyWj76!T@HO6uKt8mbG(NS&4f|`^iHoa6R0i}3OUcD zDBc(|Q@Voy1Vq3xDJG8Zl{Xe|b?&?U{dsd5e<1*Y)CpXq5uwE*VGpHlUv^ucyj)%#=wE1G7urK&t9Tf6D16{)dEquRnY3Iwu3 zptBtLjqDBC<1)LEZ$XQ!6$?`Imwl2o$6WGMH#Zpx+eI1Pb>3A9+CLnRTNWdZ#mP%mv;%v_{+uP{?SxZ@#i=}GrA{!^8Buv0Q zj8fbo5G9?$yg1t(SCUb(G@at&WO{e2*Q%;a{CfWE>Z`Bb$Stzzsi~_j{#m0fNN+?& zs)xY*gAPBbvTFhA5yho=3ABIBQY%)Aw^KKJ(-&I~rEqgnwpMr0g+g)qS-n(cbr)JFGEHX$1}Td)Y&>)@cqLVk|rGhrd-Jz|o{) z1nhTeFPSZ5u*CotC_P@bmMjm&_Lba4HX#qry5M7$j$3zuXB-+Vlfq8b4pa74qwQt( zp(V3LnH1N|nKS1UIk;xloYVmv!IoQY8T9PgGmxJ_ZEbBZY0{)%^ytyShaY;m-)^;v zWl|iGwYe(ApWc_;IPs&Cj<`wLB*&CepE?kLKI*{@{(a8Y1Nnk;+5f~@v;!1ZGL#$4gLNc82NfCc`xknOS5cGAzRXH3Y& zddr)-x~J^ZdSB&YA_S@u>6K2S5O_h2EkYT#)P!y5OFPS|(2d{BWmT-b_S(UL2Obz~ zzWL@$f)q9%y2&4`wboj}?z`{4#5~QiIBeLkVH^7L{)$^?vASTZW){ZXFa1FP0?O-u zeRj4pDGJ4N$e`ez4gm-xK)`M|+#`z+q)0$jXkT*9TX`|V=cgH(q+L23(ndW zv!BasJH-oS-Yv~1b%{{g*w_YQX0VFw@QwOXg2pYEgZI5b;UMKK6CpCS&J^Z|jD2sEF! zQ0G(hUg>I9o)+Kf)OQEFkw}361RxM6fmZ750hup1`Ih(nem(iIY~J!0c?^M=3E24* zlVk2PiS7b@K?3dc%GR<2 zWoO9j|IN2$_FjBhraLpii89+W-k0AaNlm5bd)JaZ%NM0G8iTAtfM~@kNSwJ4CE~D~ zy!f6oo+KnhX$RM+>rWHvKDBObA16DEqLiWtO|b2@76tdNG?!xgFqzBJ>+)MB1cicy z<|z<>K+FUzonqq0SxcuVzSD8L?eB^i72Rc-z{dyP`c}Tz0%Uo1lkly*I(8rUy?gd*Z>*pGxC@?;-UkymhQQL|>vHa_a8qsFdqH78F#I_Ved7Yfy5Fw(^Mr0oMrFm*MLi5M?>0QytLZi-b-RYJrKqz6f4U=hb>_ zt3LVCx*QZ9rb1vtjq;kTO)8_sH;CP!yhL`D%-6q@-;#D)``LZGX3JR#e{5k2L|unnRuD|cu)ji~Dl zfw@v@`ySsXt94NI7-_O?pL~1&*$vNgWcIzwR#Oj={a5BgvAztf6e9X4#(ZK5`8L4iK1d{Fko zijawK(2?rO{&qhgvu$BqDr-oLWlsEDwzW&bLTa&9+4qEO>uNJsvM5~TgD!svSOD>2 znPqG(_D6R1mHHZ)&i>4N$*N@jXoZEBhJWfL(VFJrCDH0y0wC*1|Y{LA=YXOu^F=w8we#~sC zDGti4Z#V`4R|#ApVMHoiogH*7Bk&zPu)9Yt7Q3_cOW7ALiYr5tMFA|m;$xYmSez-V zWUAF#8vmyR$m2?8_a#s0F>WJD?)n^K|`j!j5Ca%^eI zzv{VO>ZQS@R`j*r;fEhSUrSFf^i{v#rYl>(;HTHZD|F+RG6` zhYs!Gf|g2Xs45iB)42R+SPA%ynRx=MkD3!Ktg}5{{Vat-ad5E9)BJt#bNWyX0oMpv zj;?(Pr$E350(PUic@sjn+~}qCBCiE#VvMF@C@+GF#2HkF2TpfU0;=UY1JRFN^{K2)IY!SxNe~>|Xbz zp~wgjsPN-`5GA$3R;AbnjZBw?3m0B12Whw4IdSQqJ0>rKmY~9c-qaq zUNXyqut3H#TUU=amF+CE0I8+X-z0lUW`{Lc7K+^zJV9plvQ~`SfwBcza}0kV;5h;N zojg^xt8BdIz9cOFy0b61q|$2vzH*U+DiWUR7`uN$eeD*qzgN`GR!dmAYG#X8>DvdN zD}8lP`&b2s9(rhCKg%f(Jn%s9*=L`X)5L|ki?k2ahQ}X&d@ld9_~6%v_pYw4dRG(C z%!SE70|c@}U_tAaL5F=l3tCk9^|G9433y1u5j|bOV9-2&_JG zo^R(<{E71@;+ubZf&c_kA+Y0!jkQA$YXM4uT~my*6SP2}k_7BV>Nc`J$jZCZR!P;2&EXWDy$7Ov&b43Ay*=}{Y`L8}b-G@S z#+BXBkp?@teI^Tn9K52YDMAixpW?md~ z^GUNx_&q`Q5P$##AOL}w2z;V`?0fahnEIm|KMC9=*}45L>*thtqI!JG>YuR|Aj)Lf zo}j;y?Ie36N=LM<3<1(AD#JbOs1^}?B5Y%|J4cr3)QSTRIAEcCyeI;riZ^}ZjW>dM z^X8SMqg%Ibfu&dYR&<*7QP*F8{kp#Cj!SP{p)go|Esm={`hfrhz9Ge{RC_uk$q+J>wlT687DA8{@JGYbs6^)_aNXp0j>E8 zZjpUUcD>A=Q#>Rnk{wR$rua!Hsb;w5W=9~6;x4hD~ zZ{PA7q+9#DTOFy`SmXNww3P3;J+PwprPqI;(BVJQofbxeKt2exn!O+x|LF#Kf%F8fkO z@t6RC3jZeWB}tBwNnsn}XL{ZT4ji~hKH5n+-j=YL6zi?Go_9X$K#_q%hYs~G1tccY z#o8j76fv=+8wjjSpzUi5T+gKVQ?Th>e7RkD;&@&rfg`8>M{C9!UkgxXmsE!U1Rwx` zatRDqg9ByuU6hi20(N86w*8uueOK`i0?rb6Ps%J<_#N5v&Z6*p=tp|Rz+bfd-M#ft+wQM=sY>G>2b zd)2i7&uQ>IWYg0T;nOHJ#V9wceDV~nl_&7IBpfU2BfD3obvu--5FlFN=S)UTt*~X> znV$PBq*ja`J-X~eD!O*<8g%d8-N$!zu2U=Oip3gDTCI;0hh_-4PN2=m1;Jt`52+}g zOG<_7?;i9)00Izz00c@RU^mM4k+Ha0!HO8$h5P=7J)kGrn07GN{=YdKPxF4fi5H~F4g@!D&zl|4s}A6U_l zKdWgUsNXg8V%D!ONv#M-t;qU3<6mV6SZalPhg1|MbZWc$t(76ds1Sev1Rwwb2qZw@ z3H7;~%x zmVM-zZsgWa;1XRvRL0(3_4DB>1d<>yR-GOv>m|EbrY+bgStpR`8`W8#Jo#4>!N*P2 z)=v4#bWtY7EppJ?4~Mn2wZW)SqspEmJETHc31zpdaD&yFHEWjJOp5N8UH6Xo-(BH3 zV0Z{*l0fTO3miJ6qA;OjyS6uH5;RUh00Izz00a^uaNwaA>%`(7!GPWR@1M{RNp$k(iSIC z`m&$OUXW38Ou(B6MULmFDc3EXVnkDw8hPD9KbcOaIN*Q-YIX3z1&uIp{v}JNm^Euw zSvoD9qF1k8Wwmjn^0C)nfBl+{Xh~g$(kU#(iFAsHcEQ1{r9Kf@dd#KkFvpWGSf@15P$##AOHau3EU`A+sWQ_Q5>4SCUBh|?I;`f^%WjK z00MCmu;A^3vVCPeWw!mv7cvS2vVDqjj^?)}g0CB@t(^vy>7tXRZ&k43eP5ldtE&r@ zNwMv=+xmD&Me9Y27UlMkilvzp_xk!?MK=UoCeUi;ykPw28&pc-#Eu;UyFo#L00bc5 zGl9Rq{Ilk)1@L(W(Ozi+Z%RO4+4ZtwrNuD*3IwL<*#WYFGBRIQK*JLVL?&Q=?_QK0 zFI!voW7+*OI{=0Pfv*XWQSmhjuOIwJ>Z4>;oUQhiTUMF^ddsTNFz2T*RANKC`|i7C zXH|66U9k-|sMt-pC@hX0I&|n7Q9AIsZEc~hM*7LB@Ocu^?ks`Uqbie0Q78m|CWFG+ zw*|r=009U<00K!9n5_YRCEH8(Nz#MSe@O(MRV~{PpY3)_!bPRz34HR*U(d?iS^!%x zY&*HwB5BK1x<+P)z)&DiF98A-^Z|p<()gt6TV12 zfcx2;#qDm&6K8yd00bZa0SG{#UILHlY9E<>F0d8Gl=KPA)R4!@`pYJyKRhEsAaexl zZ`sQ-J8QyjKy54=Ec;MKfk2Z42u?JK$eRjLGbski(-z)%8$zgMQs}0)*O?UWzyE$P zbEbWkD!nKu6SQvKy7UGK)NFa?x^1JZ1<+Pq$LBPwqNZrSnO17+s_Y@D6-#LF4+P>R z&`O6?)E2Ab?X;2Jib3)Gb$7UneFPd|#7hu>00bZafgBN-E?5YyH@t8i~@ns1PD}wM(1QhORcaR(y&F_;AKCxLQC|| z4GYMHc9vQ(eR_GHT^BA~=z{dJHE5r!6Ry1S%C2QM%t(W(LUFKMVRcVN<`XyEBG78& z;#4vzib3H5GAP`dd(=Px0uX=z1RNr;SX%66pKVBaiOg0eQ{p2qOI@8VvwgN6EV#OY% zPI;4n810AjcXj)20$r!lr2;Kw9a1r8&YWPzj2WqDi+d2us%X}%nf>RN%hKbrn?}j; z(mzn=k`hisARK|#Gv=m|Rq^?}+P2q+Q%EZaKmY;|fB*z?M8Hm!{hiDz)&iWNep)urS0*moffaJZ>#T|oKKLM*Jb7|S^YP}JZaip4*ZP2t2;BO*Qm{C^Z5%cOf^u97B9{}4!q zzyx)Bq--PE)iS%`PAQ#$1thMKS%%6fGH$L40tEsq5wI;cY*FNOvZG{{9noI)J(&ee zEimzki~@mF2oR`91#{|yM9HLBPilXey5f`v?A*EYKQ!b!DGwXlaF$8&)KgCd3l=oK z803>rJ_+7tmmW{LB|kcV=ygHW7<-nKN2=0=AlLYWU`vY&lL| zognKeJ6|?QMroLUZIJwDnejSa#%B3~5L0pC|NfX<$z3D6T|vMV0{cmbrKdKlfS~ajb7$Ga3XJXJkQ+<%v$8dP6p!ZI5U_nk z?DyJku2D)O@NHE)OlCKY*D0+@rfSWXH}}dcAn}Zh)whC>V6l5jcgk2KkRarGy0-VZ zWw6*nV#}gh{_z)jK5nUfuAdYLgdyeWe zs<1EH6!!_t)ILx>fBDN_PIX^u{0hgPeB{=3LGX%t4jX)oUlct;AR`3kbZQfH+vB64 zs-WMpc*-fA`VGG{p04Nx0!|XxQmSkv@20YrDJZn>(W7Nj^yw!O?U zVki)Bg}_V+c~N$mY>F$QJkw$OdG8{#fQ4nGWH~5VEB~IB*$s0GQsnYGq5Q5?vG>|f zWwuzsf@6Fq6@-2YHMEtGh4EhNs-w}TFx;Ak_I1p?6HHEyOgVkV^2QfVf{Mq zohzrqID6Vv_jPq{EkGDYXj3TyQ~N8cqFXFk6_ab{&hOcEm#6Iu2L%EUfB*y_009W3 zLBMV#_mlltW?9&q$->?fP*W$0Et-5r_K?hi6P(Q<2owlF00LPfK%gRPGrFojQ8Fnk zZOqQ6$aE&f(4j*Ql#jL=gW^7cnF?0)P_QDGD=0tw&EebVe2yr=NB3tKg%GF^f!H!B z3dP@b>igIw6>@`NApijgBue0cSJ&05&K|*l-TLpJ=s;QSyr!7jMK@WVRv&z`6pLM! zGusc&_2`AAPS|O_&&VE=Jtwne=oAP*00I!mFaZm}du+Dx1i>-K z9CNpPv=2cP_X(`7Ln?mbzRYA4_WtY5uj-fCKB!Y50D+hZw3#|5m@vr#^pVQs+Bpk` zMe2dZ5P$##AOHafKp;ASFVuyt$g}VBo5(uJc9;E8W@!l@$SfF8$udrZD{VYDN4AGd zC#DBm%YG|+Tt;9*5GZ*daQ00bZa0SG`Kbpj`i zxwtKB0ph@3QydgWH}nAk2;_|b(Te=c_^WdD(FpFIrnVg-wD+N@MRV)~FNy~QwC~ez zFTecqtiLB4oQbNSs;0pgc>w|th(w_IsA5nTl>QN^PzWw&uct`JX$%1fKmY;|fB*y_ z009U<00KEAQ2ICT9Ky+v%ez0>|LK+20$5DjZpMcxc0)Mx%h;7#alib0B@7_iq)R|2 zX?IX+#qsHm>dt80FT26^e|pj#MJRzlb_leaGAEc&dTPa#+J!By&W`tZ0s#m>00Izz z00bZa0SG_<0uV@t0D+2hIJ+`R)J%#~xw02qZwDdX%F1#X^%k7lW6+cjL*+)&dkSC7Ys2WK@6v1Rwwb2tWV= z5P$##AOHaftU`c5#VSZLe<4aHMSIctP1Xq=d+f0f$j6tn?q^2)>(s1Sv& z{_Bk|28F`E8hV}U5P(2<0;^A+9gLsS(>eFBrPj zvb6vWR^kN+KmY;|fB*!tLV!R;R%WtvKcZ$;oTQ;Tl|G!8H7%>+TrZX9P_&&-apR3Q zw#%XGOI)t2EuNiM-|-s)NfW4ktMZ)?rw%Xq{31wEMk%AqST591Rwwb2tWV=5P$##AOHafI7GnH z2!%rg{gg6NvLA^cUE6R6T_d#jqN$eX|H%uXh%^Gf%X2zKu~2swZvzoH|B*NKhp9&} zV7LDJ0|WvI5U3heRex$lA-J@|miJ9hU`V=z00bZa0SG_<0uX=z1Rwwb2;_nQfr?yA zbG=SRIi#Ykddz$##nDF}{gC|pU%m9W=o*2}3l=Om&NWeK>FIsh%`a%IXVMyvaUhT< z0;_*HOPLhCR(5$tRr6LHO0hB(o&Z^KB`sYNzM^gwupb-MqqpDZT zq$n21plE~_FF^nT5P$##AOHafKmYd)y_p$*8dt7$YGkWRS243be1RxNBKs#ksOq|+# zSr0R+7gdo>v5W>kApijgKmY;|fB*y_009U<00JQi5S$1JE+fq>lj7kBzMP=8na`w< zkE~7)^CmfPwfR#jxE4TJ6{lTu%{8_)S$0ZI(E^I4m%1Pz4XAWn)Pz7;1gc)MLn?G; zeGpvUey4}0mDQNa5P$##AOHafKmY;|fB*y_009WBLV#$+DoAp7p++9t&6Y65(kV_2 z)6l0jN~ai3I)yhg)}^|-I=AT*y)V1*Dg8P>=gn*(6apR*uyl&K)7y`%UReECk7Oe? zdjyXC=l`8i!L>*SdMzC$1>0)_vyn9+XFon%lX1(<#z z009U<00IzzK*b3BujO`kuoj?V_!$@i5P*OP6u#QI02UB{00bZa0SG_<0uX=z1Rwwb z2tWV=5P$##AOHafKp-6gZ14F;-GxIq3{v{5P$## zAOHafKmY;|fB*y_009U<00Izz00bbA6#|)vP-JB?@B;!6fB*!_CGhLLpHq__!GPWR z?_X|P>O%km5P$##AOHafKmY;|fB*y_009WNK!8An3$uU*2tWV=5P$##AOHafKmY`!wEz(S(i8#^fB*y_009U<00Izj zo&bRg=cfpP5P$##AOHafKmY;|fB*y_009U<00J=+*yPLy-eE03%uwkL0uX>eSONqp z!jhvM1Rwwb2tWV=5P$##AOHafKmY;|fB*y_009U<00N;15S$2w&4Whww0mwSYXLl% zC?rAv0uX=z1Rwwb2tWV=5P$##AOHafKmY;|$TtB375ScjyoUe;AOHafK%h(l``*`n zBx?c6gchlKO>t19_B4h71Rwwb2tWV=5P$##AOHa`2oSCCVww;M0SG_<0uX=z1Rwwb z2tWV=5XdTlQByzJlC=O?on8Eb00bZa0SJ^rfIvkll&Av%2tWV=5P$##AOHafKmY;| zfB*y_009U<00I#3iNN4dhaBa}S^xqSKE1Kf3IPZ}00Izz00bZa0SG_<0uX=z1Rwwb z2tWV=5Xb-lf)g2-L|lLX1R#)80-w$AbEqe40dhJ+xD5daKmY;|fB*y_009U<00Izz z00c@PK%k-oNK}CU1Rwwb2tWV=5P$##AdnvdYaj8ap{xZ+eZFgogHj)p0U-bZ2tWV= z5P*P71c+9+G#hAv00bZa0SG_<0uX=z1Rwwb2tWV=5P$##GDqO^2lo3GYXLGhtvD1T z0Rj~hno5&vKiQ8svrOX z2tWV=5P$##AOHafq)edI!bd(}EkMf984dyvfB*y_009U@HT(|1Rwwb2tWV= z5P$##AOHafKmY;|fB*y_kQ9M`3_bd?AgJy>V7LDJCp8BBLcntZ+s=Go^2i&HTWD?! P*!@Sl-Mj1Iga3a3-pM{$ literal 0 HcmV?d00001 diff --git a/assets/logo/svg/123D_logo_transparent_black.svg b/assets/logo/svg/123D_logo_transparent_black.svg new file mode 100644 index 00000000..f2dacef0 --- /dev/null +++ b/assets/logo/svg/123D_logo_transparent_black.svg @@ -0,0 +1,103 @@ + + + +123D diff --git a/assets/logo/svg/123D_logo_transparent_white.svg b/assets/logo/svg/123D_logo_transparent_white.svg new file mode 100644 index 00000000..dbcd205a --- /dev/null +++ b/assets/logo/svg/123D_logo_transparent_white.svg @@ -0,0 +1,120 @@ + + + +123D diff --git a/docs/conf.py b/docs/conf.py index e1288398..14e6924c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -9,7 +9,7 @@ project = "py123d" copyright = "2025, 123D Contributors" author = "123D Contributors" -release = "v0.0.6" +release = "v0.0.7" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration diff --git a/docs/contributing.md b/docs/contributing.md index 8ce0f775..e7ac92fe 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -1,7 +1,7 @@ # Contributing to 123D -Thank you for your interest in contributing to 123D! This guide will help you get started with the development process. +Contributions to 123D are highly encouraged! This guide will help you get started with the development process. ## Getting Started @@ -27,7 +27,7 @@ pre-commit install ### 3. Managing dependencies -One principal of 123D is to keep *minimal dependencies*. However, various datasets require problematic (or even outdated) dependencies in order to load or preprocess the dataset. In this case, you can add optional dependencies to the `pyproject.toml` install file. You can follow examples of Waymo or nuPlan. These optional dependencies can be install with +One principal of 123D is to keep *minimal dependencies*. However, various datasets require dependencies in order to load or preprocess the dataset. In this case, you can add optional dependencies to the `pyproject.toml` install file. You can follow examples of Waymo/nuPlan. These optional dependencies can be install with ```sh pip install -e .[dev,waymo,nuplan] @@ -42,7 +42,7 @@ import numpy.typing as npt def load_camera_from_outdated_dataset(file_path: str) -> npt.NDArray[np.uint8]: try: - from outdated_dataset import load_camera_image + from optional_dataset import load_camera_image except ImportError: raise ImportError( "Optional dependency 'outdated_dataset' is required to load camera images from this dataset. " From 8f38064e43da3735ae5a71cac0601a2e6af8f5ad Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 17 Oct 2025 19:33:15 +0200 Subject: [PATCH 091/145] Use white png as logo (#57) --- README.md | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/README.md b/README.md index 87ba2466..c72f55cf 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,6 @@ -

      - Logo -

      123D: One Interface for 2D and 3D Driving Data

      -

      - - -

      Logo

      123D: One Interface for 2D and 3D Driving Data

      - -

      - Logo -

      123D: One Interface for 2D and 3D Driving Data

      -

      - - - - -

      - Logo -

      123D: One Interface for 2D and 3D Driving Data

      -

      - \ No newline at end of file From 3400c2316a4a348a252a72f60aba49aaec6157b8 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 17 Oct 2025 19:39:02 +0200 Subject: [PATCH 092/145] Move `visualization` folder out of `common` (#39) --- README.md | 2 +- notebooks/scene_rendering.ipynb | 2 +- notebooks/viz/bev_matplotlib.ipynb | 10 +++++----- notebooks/viz/bev_matplotlib_prediction.ipynb | 10 +++++----- notebooks/viz/camera_matplotlib.ipynb | 4 ++-- notebooks/viz/log_rendering.ipynb | 6 +++--- notebooks/viz/video_example.ipynb | 12 ++++++------ notebooks/viz/viser_testing_v2_scene.ipynb | 2 +- notebooks/waymo_perception/map_testing.ipynb | 2 +- .../common/visualization/viser/elements/__init__.py | 7 ------- src/py123d/script/run_viser.py | 2 +- src/py123d/{common => }/visualization/__init__.py | 0 src/py123d/{common => }/visualization/bokeh/.gitkeep | 0 .../{common => }/visualization/color/__init__.py | 0 src/py123d/{common => }/visualization/color/color.py | 0 .../{common => }/visualization/color/config.py | 2 +- .../{common => }/visualization/color/default.py | 4 ++-- .../visualization/matplotlib/__init__.py | 0 .../{common => }/visualization/matplotlib/camera.py | 2 +- .../{common => }/visualization/matplotlib/lidar.py | 0 .../visualization/matplotlib/observation.py | 6 +++--- .../{common => }/visualization/matplotlib/plots.py | 2 +- .../{common => }/visualization/matplotlib/utils.py | 2 +- src/py123d/{common => }/visualization/utils/.gitkeep | 0 .../{common => }/visualization/viser/__init__.py | 0 src/py123d/visualization/viser/elements/__init__.py | 7 +++++++ .../viser/elements/detection_elements.py | 4 ++-- .../visualization/viser/elements/map_elements.py | 4 ++-- .../visualization/viser/elements/sensor_elements.py | 2 +- .../{common => }/visualization/viser/viser_config.py | 0 .../{common => }/visualization/viser/viser_viewer.py | 4 ++-- test_viser.py | 2 +- 32 files changed, 50 insertions(+), 50 deletions(-) delete mode 100644 src/py123d/common/visualization/viser/elements/__init__.py rename src/py123d/{common => }/visualization/__init__.py (100%) rename src/py123d/{common => }/visualization/bokeh/.gitkeep (100%) rename src/py123d/{common => }/visualization/color/__init__.py (100%) rename src/py123d/{common => }/visualization/color/color.py (100%) rename src/py123d/{common => }/visualization/color/config.py (88%) rename src/py123d/{common => }/visualization/color/default.py (98%) rename src/py123d/{common => }/visualization/matplotlib/__init__.py (100%) rename src/py123d/{common => }/visualization/matplotlib/camera.py (99%) rename src/py123d/{common => }/visualization/matplotlib/lidar.py (100%) rename src/py123d/{common => }/visualization/matplotlib/observation.py (97%) rename src/py123d/{common => }/visualization/matplotlib/plots.py (97%) rename src/py123d/{common => }/visualization/matplotlib/utils.py (98%) rename src/py123d/{common => }/visualization/utils/.gitkeep (100%) rename src/py123d/{common => }/visualization/viser/__init__.py (100%) create mode 100644 src/py123d/visualization/viser/elements/__init__.py rename src/py123d/{common => }/visualization/viser/elements/detection_elements.py (96%) rename src/py123d/{common => }/visualization/viser/elements/map_elements.py (97%) rename src/py123d/{common => }/visualization/viser/elements/sensor_elements.py (99%) rename src/py123d/{common => }/visualization/viser/viser_config.py (100%) rename src/py123d/{common => }/visualization/viser/viser_viewer.py (98%) diff --git a/README.md b/README.md index c72f55cf..192d794a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

      Logo -

      123D: One Interface for 2D and 3D Driving Data

      +

      123D: One Library for 2D and 3D Driving Dataset

      diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb index 12f8a864..53d95c86 100644 --- a/notebooks/scene_rendering.ipynb +++ b/notebooks/scene_rendering.ipynb @@ -66,7 +66,7 @@ "from py123d.geometry import StateSE2\n", "from py123d.geometry.transform.tranform_2d import translate_along_yaw\n", "from py123d.geometry.vector import Vector2D\n", - "from py123d.common.visualization.matplotlib.observation import (\n", + "from py123d.visualization.matplotlib.observation import (\n", " add_box_detections_to_ax,\n", " add_default_map_on_ax,\n", " add_ego_vehicle_to_ax,\n", diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb index c63afdb4..7e23f782 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/viz/bev_matplotlib.ipynb @@ -85,16 +85,16 @@ "import numpy as np\n", "\n", "from py123d.geometry import Point2D\n", - "from py123d.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", - "from py123d.common.visualization.color.config import PlotConfig\n", - "from py123d.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", - "from py123d.common.visualization.matplotlib.observation import (\n", + "from py123d.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", + "from py123d.visualization.color.config import PlotConfig\n", + "from py123d.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", + "from py123d.visualization.matplotlib.observation import (\n", " add_box_detections_to_ax,\n", " add_default_map_on_ax,\n", " add_ego_vehicle_to_ax,\n", " add_traffic_lights_to_ax,\n", ")\n", - "from py123d.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", + "from py123d.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", "from py123d.datatypes.maps.abstract_map import AbstractMap\n", "from py123d.datatypes.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", "from py123d.datatypes.maps.gpkg.gpkg_map_objects import GPKGIntersection\n", diff --git a/notebooks/viz/bev_matplotlib_prediction.ipynb b/notebooks/viz/bev_matplotlib_prediction.ipynb index f8ba4b43..3dd4b920 100644 --- a/notebooks/viz/bev_matplotlib_prediction.ipynb +++ b/notebooks/viz/bev_matplotlib_prediction.ipynb @@ -65,17 +65,17 @@ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from py123d.geometry.base import Point2D\n", - "from py123d.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", - "from py123d.common.visualization.color.config import PlotConfig\n", - "from py123d.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", - "from py123d.common.visualization.matplotlib.observation import (\n", + "from py123d.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", + "from py123d.visualization.color.config import PlotConfig\n", + "from py123d.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", + "from py123d.visualization.matplotlib.observation import (\n", " add_box_detections_to_ax,\n", " add_box_future_detections_to_ax,\n", " add_default_map_on_ax,\n", " add_ego_vehicle_to_ax,\n", " add_traffic_lights_to_ax,\n", ")\n", - "from py123d.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", + "from py123d.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", "from py123d.dataset.maps.abstract_map import AbstractMap\n", "from py123d.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", "from py123d.dataset.maps.map_datatypes import MapLayer\n", diff --git a/notebooks/viz/camera_matplotlib.ipynb b/notebooks/viz/camera_matplotlib.ipynb index 85a2cc4a..97b38a12 100644 --- a/notebooks/viz/camera_matplotlib.ipynb +++ b/notebooks/viz/camera_matplotlib.ipynb @@ -19,8 +19,8 @@ "\n", "from typing import Dict\n", "from py123d.common.datatypes.sensor.camera import CameraType\n", - "from py123d.common.visualization.matplotlib.camera import add_camera_ax\n", - "from py123d.common.visualization.matplotlib.camera import add_box_detections_to_camera_ax" + "from py123d.visualization.matplotlib.camera import add_camera_ax\n", + "from py123d.visualization.matplotlib.camera import add_box_detections_to_camera_ax" ] }, { diff --git a/notebooks/viz/log_rendering.ipynb b/notebooks/viz/log_rendering.ipynb index 611fe58f..4048c20a 100644 --- a/notebooks/viz/log_rendering.ipynb +++ b/notebooks/viz/log_rendering.ipynb @@ -9,7 +9,7 @@ "source": [ "from pathlib import Path\n", "from py123d.dataset.scene.arrow_scene import ArrowScene\n", - "from py123d.common.visualization.matplotlib.plots import plot_scene_at_iteration\n", + "from py123d.visualization.matplotlib.plots import plot_scene_at_iteration\n", "\n", "\n", "\n", @@ -30,7 +30,7 @@ "outputs": [], "source": [ "import traceback\n", - "from py123d.common.visualization.matplotlib.plots import render_scene_animation\n", + "from py123d.visualization.matplotlib.plots import render_scene_animation\n", "\n", "output_path = Path(\"/home/daniel/py123d_logs_videos\")\n", "# render_scene_as_mp4(scene, output_path, fps=30, end_idx=10000, step=5, dpi=100)" @@ -45,7 +45,7 @@ "source": [ "# Create an mp4 animation with a specific FPS\n", "import traceback\n", - "from py123d.common.visualization.matplotlib.plots import render_scene_animation\n", + "from py123d.visualization.matplotlib.plots import render_scene_animation\n", "\n", "split = \"av2-sensor-mini_train\"\n", "output_path = Path(f\"/home/daniel/py123d_logs_videos/{split}\")\n", diff --git a/notebooks/viz/video_example.ipynb b/notebooks/viz/video_example.ipynb index 3868681a..28239dfe 100644 --- a/notebooks/viz/video_example.ipynb +++ b/notebooks/viz/video_example.ipynb @@ -78,18 +78,18 @@ "from typing import List, Optional, Tuple\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", - "from py123d.common.visualization.matplotlib.camera import add_camera_ax\n", + "from py123d.visualization.matplotlib.camera import add_camera_ax\n", "from py123d.geometry import Point2D\n", - "from py123d.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", - "from py123d.common.visualization.color.config import PlotConfig\n", - "from py123d.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", - "from py123d.common.visualization.matplotlib.observation import (\n", + "from py123d.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", + "from py123d.visualization.color.config import PlotConfig\n", + "from py123d.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", + "from py123d.visualization.matplotlib.observation import (\n", " add_box_detections_to_ax,\n", " add_default_map_on_ax,\n", " add_ego_vehicle_to_ax,\n", " add_traffic_lights_to_ax,\n", ")\n", - "from py123d.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", + "from py123d.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", "from py123d.dataset.maps.abstract_map import AbstractMap\n", "from py123d.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", "from py123d.dataset.maps.gpkg.gpkg_map_objects import GPKGIntersection\n", diff --git a/notebooks/viz/viser_testing_v2_scene.ipynb b/notebooks/viz/viser_testing_v2_scene.ipynb index 3cb9eead..4cf601fc 100644 --- a/notebooks/viz/viser_testing_v2_scene.ipynb +++ b/notebooks/viz/viser_testing_v2_scene.ipynb @@ -55,7 +55,7 @@ "metadata": {}, "outputs": [], "source": [ - "from py123d.common.visualization.viser.server import ViserVisualizationServer\n", + "from py123d.visualization.viser.server import ViserVisualizationServer\n", "\n", "\n", "visualization_server = ViserVisualizationServer(scenes, scene_index=0)" diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb index abf08b80..9930025d 100644 --- a/notebooks/waymo_perception/map_testing.ipynb +++ b/notebooks/waymo_perception/map_testing.ipynb @@ -201,7 +201,7 @@ "metadata": {}, "outputs": [], "source": [ - "from py123d.common.visualization.matplotlib.utils import add_non_repeating_legend_to_ax\n", + "from py123d.visualization.matplotlib.utils import add_non_repeating_legend_to_ax\n", "\n", "\n", "fig, ax = plt.subplots(figsize=(30, 30))\n", diff --git a/src/py123d/common/visualization/viser/elements/__init__.py b/src/py123d/common/visualization/viser/elements/__init__.py deleted file mode 100644 index 45d9d60f..00000000 --- a/src/py123d/common/visualization/viser/elements/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from py123d.common.visualization.viser.elements.detection_elements import add_box_detections_to_viser_server -from py123d.common.visualization.viser.elements.map_elements import add_map_to_viser_server -from py123d.common.visualization.viser.elements.sensor_elements import ( - add_camera_frustums_to_viser_server, - add_camera_gui_to_viser_server, - add_lidar_pc_to_viser_server, -) diff --git a/src/py123d/script/run_viser.py b/src/py123d/script/run_viser.py index 33be1b35..32206141 100644 --- a/src/py123d/script/run_viser.py +++ b/src/py123d/script/run_viser.py @@ -3,7 +3,7 @@ import hydra from omegaconf import DictConfig -from py123d.common.visualization.viser.viser_viewer import ViserViewer +from py123d.visualization.viser.viser_viewer import ViserViewer from py123d.script.builders.scene_builder_builder import build_scene_builder from py123d.script.builders.scene_filter_builder import build_scene_filter from py123d.script.run_conversion import build_worker diff --git a/src/py123d/common/visualization/__init__.py b/src/py123d/visualization/__init__.py similarity index 100% rename from src/py123d/common/visualization/__init__.py rename to src/py123d/visualization/__init__.py diff --git a/src/py123d/common/visualization/bokeh/.gitkeep b/src/py123d/visualization/bokeh/.gitkeep similarity index 100% rename from src/py123d/common/visualization/bokeh/.gitkeep rename to src/py123d/visualization/bokeh/.gitkeep diff --git a/src/py123d/common/visualization/color/__init__.py b/src/py123d/visualization/color/__init__.py similarity index 100% rename from src/py123d/common/visualization/color/__init__.py rename to src/py123d/visualization/color/__init__.py diff --git a/src/py123d/common/visualization/color/color.py b/src/py123d/visualization/color/color.py similarity index 100% rename from src/py123d/common/visualization/color/color.py rename to src/py123d/visualization/color/color.py diff --git a/src/py123d/common/visualization/color/config.py b/src/py123d/visualization/color/config.py similarity index 88% rename from src/py123d/common/visualization/color/config.py rename to src/py123d/visualization/color/config.py index db9007e3..1e522f32 100644 --- a/src/py123d/common/visualization/color/config.py +++ b/src/py123d/visualization/color/config.py @@ -1,7 +1,7 @@ from dataclasses import dataclass, field from typing import Optional -from py123d.common.visualization.color.color import BLACK, Color +from py123d.visualization.color.color import BLACK, Color @dataclass diff --git a/src/py123d/common/visualization/color/default.py b/src/py123d/visualization/color/default.py similarity index 98% rename from src/py123d/common/visualization/color/default.py rename to src/py123d/visualization/color/default.py index 9ea270e8..6a8f2f6c 100644 --- a/src/py123d/common/visualization/color/default.py +++ b/src/py123d/visualization/color/default.py @@ -1,6 +1,6 @@ from typing import Dict -from py123d.common.visualization.color.color import ( +from py123d.visualization.color.color import ( BLACK, DARKER_GREY, ELLIS_5, @@ -10,7 +10,7 @@ WHITE, Color, ) -from py123d.common.visualization.color.config import PlotConfig +from py123d.visualization.color.config import PlotConfig from py123d.datatypes.detections.detection import TrafficLightStatus from py123d.datatypes.detections.detection_types import DetectionType from py123d.datatypes.maps.map_datatypes import MapLayer diff --git a/src/py123d/common/visualization/matplotlib/__init__.py b/src/py123d/visualization/matplotlib/__init__.py similarity index 100% rename from src/py123d/common/visualization/matplotlib/__init__.py rename to src/py123d/visualization/matplotlib/__init__.py diff --git a/src/py123d/common/visualization/matplotlib/camera.py b/src/py123d/visualization/matplotlib/camera.py similarity index 99% rename from src/py123d/common/visualization/matplotlib/camera.py rename to src/py123d/visualization/matplotlib/camera.py index f2de42d5..344a2b1c 100644 --- a/src/py123d/common/visualization/matplotlib/camera.py +++ b/src/py123d/visualization/matplotlib/camera.py @@ -10,7 +10,7 @@ # from PIL import ImageColor from pyquaternion import Quaternion -from py123d.common.visualization.color.default import BOX_DETECTION_CONFIG +from py123d.visualization.color.default import BOX_DETECTION_CONFIG from py123d.datatypes.detections.detection import BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.detections.detection_types import DetectionType from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera diff --git a/src/py123d/common/visualization/matplotlib/lidar.py b/src/py123d/visualization/matplotlib/lidar.py similarity index 100% rename from src/py123d/common/visualization/matplotlib/lidar.py rename to src/py123d/visualization/matplotlib/lidar.py diff --git a/src/py123d/common/visualization/matplotlib/observation.py b/src/py123d/visualization/matplotlib/observation.py similarity index 97% rename from src/py123d/common/visualization/matplotlib/observation.py rename to src/py123d/visualization/matplotlib/observation.py index 4dba948c..06173a88 100644 --- a/src/py123d/common/visualization/matplotlib/observation.py +++ b/src/py123d/visualization/matplotlib/observation.py @@ -4,8 +4,8 @@ import numpy as np import shapely.geometry as geom -from py123d.common.visualization.color.config import PlotConfig -from py123d.common.visualization.color.default import ( +from py123d.visualization.color.config import PlotConfig +from py123d.visualization.color.default import ( BOX_DETECTION_CONFIG, CENTERLINE_CONFIG, EGO_VEHICLE_CONFIG, @@ -13,7 +13,7 @@ ROUTE_CONFIG, TRAFFIC_LIGHT_CONFIG, ) -from py123d.common.visualization.matplotlib.utils import ( +from py123d.visualization.matplotlib.utils import ( add_shapely_linestring_to_ax, add_shapely_polygon_to_ax, get_pose_triangle, diff --git a/src/py123d/common/visualization/matplotlib/plots.py b/src/py123d/visualization/matplotlib/plots.py similarity index 97% rename from src/py123d/common/visualization/matplotlib/plots.py rename to src/py123d/visualization/matplotlib/plots.py index c7d55ffc..41228303 100644 --- a/src/py123d/common/visualization/matplotlib/plots.py +++ b/src/py123d/visualization/matplotlib/plots.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt from tqdm import tqdm -from py123d.common.visualization.matplotlib.observation import ( +from py123d.visualization.matplotlib.observation import ( add_box_detections_to_ax, add_default_map_on_ax, add_ego_vehicle_to_ax, diff --git a/src/py123d/common/visualization/matplotlib/utils.py b/src/py123d/visualization/matplotlib/utils.py similarity index 98% rename from src/py123d/common/visualization/matplotlib/utils.py rename to src/py123d/visualization/matplotlib/utils.py index f4b2aa52..1ab1ff3d 100644 --- a/src/py123d/common/visualization/matplotlib/utils.py +++ b/src/py123d/visualization/matplotlib/utils.py @@ -7,7 +7,7 @@ import shapely.geometry as geom from matplotlib.path import Path -from py123d.common.visualization.color.config import PlotConfig +from py123d.visualization.color.config import PlotConfig from py123d.geometry import StateSE2, StateSE3 diff --git a/src/py123d/common/visualization/utils/.gitkeep b/src/py123d/visualization/utils/.gitkeep similarity index 100% rename from src/py123d/common/visualization/utils/.gitkeep rename to src/py123d/visualization/utils/.gitkeep diff --git a/src/py123d/common/visualization/viser/__init__.py b/src/py123d/visualization/viser/__init__.py similarity index 100% rename from src/py123d/common/visualization/viser/__init__.py rename to src/py123d/visualization/viser/__init__.py diff --git a/src/py123d/visualization/viser/elements/__init__.py b/src/py123d/visualization/viser/elements/__init__.py new file mode 100644 index 00000000..542ec2dc --- /dev/null +++ b/src/py123d/visualization/viser/elements/__init__.py @@ -0,0 +1,7 @@ +from py123d.visualization.viser.elements.detection_elements import add_box_detections_to_viser_server +from py123d.visualization.viser.elements.map_elements import add_map_to_viser_server +from py123d.visualization.viser.elements.sensor_elements import ( + add_camera_frustums_to_viser_server, + add_camera_gui_to_viser_server, + add_lidar_pc_to_viser_server, +) diff --git a/src/py123d/common/visualization/viser/elements/detection_elements.py b/src/py123d/visualization/viser/elements/detection_elements.py similarity index 96% rename from src/py123d/common/visualization/viser/elements/detection_elements.py rename to src/py123d/visualization/viser/elements/detection_elements.py index 62e5da4e..639c1632 100644 --- a/src/py123d/common/visualization/viser/elements/detection_elements.py +++ b/src/py123d/visualization/viser/elements/detection_elements.py @@ -3,8 +3,8 @@ import trimesh import viser -from py123d.common.visualization.color.default import BOX_DETECTION_CONFIG -from py123d.common.visualization.viser.viser_config import ViserConfig +from py123d.visualization.color.default import BOX_DETECTION_CONFIG +from py123d.visualization.viser.viser_config import ViserConfig from py123d.datatypes.detections.detection_types import DetectionType from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 diff --git a/src/py123d/common/visualization/viser/elements/map_elements.py b/src/py123d/visualization/viser/elements/map_elements.py similarity index 97% rename from src/py123d/common/visualization/viser/elements/map_elements.py rename to src/py123d/visualization/viser/elements/map_elements.py index 11c07978..c54e5e07 100644 --- a/src/py123d/common/visualization/viser/elements/map_elements.py +++ b/src/py123d/visualization/viser/elements/map_elements.py @@ -4,8 +4,8 @@ import trimesh import viser -from py123d.common.visualization.color.default import MAP_SURFACE_CONFIG -from py123d.common.visualization.viser.viser_config import ViserConfig +from py123d.visualization.color.default import MAP_SURFACE_CONFIG +from py123d.visualization.viser.viser_config import ViserConfig from py123d.datatypes.maps.abstract_map import MapLayer from py123d.datatypes.maps.abstract_map_objects import AbstractSurfaceMapObject from py123d.datatypes.scene.abstract_scene import AbstractScene diff --git a/src/py123d/common/visualization/viser/elements/sensor_elements.py b/src/py123d/visualization/viser/elements/sensor_elements.py similarity index 99% rename from src/py123d/common/visualization/viser/elements/sensor_elements.py rename to src/py123d/visualization/viser/elements/sensor_elements.py index a467af0e..758140bd 100644 --- a/src/py123d/common/visualization/viser/elements/sensor_elements.py +++ b/src/py123d/visualization/viser/elements/sensor_elements.py @@ -6,7 +6,7 @@ import numpy.typing as npt import viser -from py123d.common.visualization.viser.viser_config import ViserConfig +from py123d.visualization.viser.viser_config import ViserConfig from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDARType diff --git a/src/py123d/common/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py similarity index 100% rename from src/py123d/common/visualization/viser/viser_config.py rename to src/py123d/visualization/viser/viser_config.py diff --git a/src/py123d/common/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py similarity index 98% rename from src/py123d/common/visualization/viser/viser_viewer.py rename to src/py123d/visualization/viser/viser_viewer.py index b8b9ac78..d42952b2 100644 --- a/src/py123d/common/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -5,14 +5,14 @@ import viser from viser.theme import TitlebarButton, TitlebarConfig, TitlebarImage -from py123d.common.visualization.viser.elements import ( +from py123d.visualization.viser.elements import ( add_box_detections_to_viser_server, add_camera_frustums_to_viser_server, add_camera_gui_to_viser_server, add_lidar_pc_to_viser_server, add_map_to_viser_server, ) -from py123d.common.visualization.viser.viser_config import ViserConfig +from py123d.visualization.viser.viser_config import ViserConfig from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType diff --git a/test_viser.py b/test_viser.py index 2df564d2..65078c45 100644 --- a/test_viser.py +++ b/test_viser.py @@ -1,5 +1,5 @@ from py123d.common.multithreading.worker_sequential import Sequential -from py123d.common.visualization.viser.viser_viewer import ViserViewer +from py123d.visualization.viser.viser_viewer import ViserViewer from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder from py123d.datatypes.scene.scene_filter import SceneFilter from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType From 2e9b0932094c40f134daf71d58a12de88f27575d Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 17 Oct 2025 23:32:30 +0200 Subject: [PATCH 093/145] Add laspy compression for lidar point clouds (#58), Include lidar in av2 sensor for both `binary` and `path` options. --- pyproject.toml | 1 + scripts/conversion/av2_sensor_conversion.sh | 3 +- .../datasets/av2/av2_sensor_converter.py | 255 +++++++++++------- .../datasets/av2/{ => utils}/av2_constants.py | 11 +- .../datasets/av2/{ => utils}/av2_helper.py | 2 +- .../av2/{ => utils}/av2_map_conversion.py | 2 +- .../datasets/av2/utils/av2_sensor_loading.py | 22 ++ .../conversion/log_writer/arrow_log_writer.py | 19 +- .../conversion/log_writer/utils/__init__.py | 0 .../log_writer/utils/lidar_compression.py | 69 +++++ .../sensor_utils/lidar_index_registry.py | 13 + .../scene/arrow/arrow_scene_builder.py | 16 +- .../scene/arrow/utils/arrow_getters.py | 102 ++++--- src/py123d/datatypes/sensors/lidar/lidar.py | 11 +- .../config/common/default_dataset_paths.yaml | 24 +- .../common/scene_filter/log_scenes.yaml | 2 +- .../datasets/av2_sensor_dataset.yaml | 8 +- .../log_writer/arrow_log_writer.yaml | 6 +- src/py123d/script/run_viser.py | 6 +- .../viser/elements/map_elements.py | 1 - .../visualization/viser/viser_config.py | 3 +- test_viser.py | 5 +- 22 files changed, 391 insertions(+), 190 deletions(-) rename src/py123d/conversion/datasets/av2/{ => utils}/av2_constants.py (96%) rename src/py123d/conversion/datasets/av2/{ => utils}/av2_helper.py (99%) rename src/py123d/conversion/datasets/av2/{ => utils}/av2_map_conversion.py (99%) create mode 100644 src/py123d/conversion/datasets/av2/utils/av2_sensor_loading.py create mode 100644 src/py123d/conversion/log_writer/utils/__init__.py create mode 100644 src/py123d/conversion/log_writer/utils/lidar_compression.py diff --git a/pyproject.toml b/pyproject.toml index 9e46379c..a052cda8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,7 @@ dependencies = [ "lxml", "trimesh", "viser", + "laspy[laszip]", ] [project.scripts] diff --git a/scripts/conversion/av2_sensor_conversion.sh b/scripts/conversion/av2_sensor_conversion.sh index 8f4ce53f..9e493387 100644 --- a/scripts/conversion/av2_sensor_conversion.sh +++ b/scripts/conversion/av2_sensor_conversion.sh @@ -1 +1,2 @@ -py123d-conversion datasets=["av2_sensor_dataset"] \ No newline at end of file +py123d-conversion datasets=["av2_sensor_dataset"] \ +dataset_paths.av2_data_root="/media/nvme1/argoverse" \ No newline at end of file diff --git a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py index 26c9a618..a6495839 100644 --- a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py @@ -2,25 +2,28 @@ from typing import Dict, List, Optional, Tuple, Union import numpy as np +import numpy.typing as npt import pandas as pd from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter from py123d.conversion.dataset_converter_config import DatasetConverterConfig -from py123d.conversion.datasets.av2.av2_constants import ( +from py123d.conversion.datasets.av2.utils.av2_constants import ( AV2_CAMERA_TYPE_MAPPING, AV2_SENSOR_SPLITS, AV2_TO_DETECTION_TYPE, AV2SensorBoxDetectionType, ) -from py123d.conversion.datasets.av2.av2_helper import ( +from py123d.conversion.datasets.av2.utils.av2_helper import ( build_sensor_dataframe, build_synchronization_dataframe, find_closest_target_fpath, get_slice_with_timestamp_ns, ) -from py123d.conversion.datasets.av2.av2_map_conversion import convert_av2_map +from py123d.conversion.datasets.av2.utils.av2_map_conversion import convert_av2_map +from py123d.conversion.datasets.av2.utils.av2_sensor_loading import load_av2_sensor_lidar_pc_from_path from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.utils.sensor_utils.lidar_index_registry import AVSensorLidarIndex from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.detections.detection_types import DetectionType from py123d.datatypes.maps.map_metadata import MapMetadata @@ -69,9 +72,7 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]: assert split_type in ["train", "val", "test"] if "av2-sensor" == dataset_name: - log_folder = self._av2_data_root / dataset_name / split_type - elif "av2-sensor-mini" == dataset_name: - log_folder = self._av2_data_root / "sensor-mini" / split_type + log_folder = self._av2_data_root / "sensor" / split_type else: raise ValueError(f"Unknown dataset name {dataset_name} in split {split}.") @@ -93,7 +94,7 @@ def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: source_log_path, split = self._log_paths_and_split[map_index] # 1. Initialize map metadata - map_metadata = _get_av2_sensor_map_metadata(split, source_log_path.name) + map_metadata = _get_av2_sensor_map_metadata(split, source_log_path) # 2. Prepare map writer map_needs_writing = map_writer.reset(self.dataset_converter_config, map_metadata) @@ -111,16 +112,17 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: source_log_path, split = self._log_paths_and_split[log_index] # 1. Initialize Metadata + map_metadata = _get_av2_sensor_map_metadata(split, source_log_path) log_metadata = LogMetadata( dataset="av2-sensor", split=split, log_name=source_log_path.name, - location=None, # TODO: Add location information. + location=map_metadata.location, timestep_seconds=0.1, vehicle_parameters=get_av2_ford_fusion_hybrid_parameters(), - camera_metadata=get_av2_camera_metadata(source_log_path), - lidar_metadata=get_av2_lidar_metadata(source_log_path), - map_metadata=_get_av2_sensor_map_metadata(split, source_log_path.name), + camera_metadata=_get_av2_camera_metadata(source_log_path, self.dataset_converter_config), + lidar_metadata=_get_av2_lidar_metadata(source_log_path, self.dataset_converter_config), + map_metadata=map_metadata, ) # 2. Prepare log writer @@ -141,10 +143,8 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: else None ) city_se3_egovehicle_df = pd.read_feather(source_log_path / "city_SE3_egovehicle.feather") - egovehicle_se3_sensor_df = ( - pd.read_feather(source_log_path / "calibration" / "egovehicle_SE3_sensor.feather") - if self.dataset_converter_config.camera_store_option is not None - else None + egovehicle_se3_sensor_df = pd.read_feather( + source_log_path / "calibration" / "egovehicle_SE3_sensor.feather" ) for lidar_timestamp_ns in lidar_timestamps_ns: @@ -160,63 +160,99 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: source_log_path, self.dataset_converter_config, ), + lidars=_extract_av2_sensor_lidars( + source_log_path, + lidar_timestamp_ns, + self.dataset_converter_config, + ), ) # 4. Finalize log writing log_writer.close() -def _get_av2_sensor_map_metadata(split: str, log_name: str) -> MapMetadata: +def _get_av2_sensor_map_metadata(split: str, source_log_path: Path) -> MapMetadata: + # NOTE: We need to get the city name from the map folder. + # see: https://github.com/argoverse/av2-api/blob/main/src/av2/datasets/sensor/av2_sensor_dataloader.py#L163 + map_folder = source_log_path / "map" + log_map_archive_path = next(map_folder.glob("log_map_archive_*.json")) + location = log_map_archive_path.name.split("____")[1].split("_")[0] return MapMetadata( dataset="av2-sensor", split=split, - log_name=log_name, - location=None, # TODO: Add location information, e.g. city name. + log_name=source_log_path.name, + location=location, # TODO: Add location information, e.g. city name. map_has_z=True, map_is_local=True, ) -def get_av2_camera_metadata(source_log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - - intrinsics_file = source_log_path / "calibration" / "intrinsics.feather" - intrinsics_df = pd.read_feather(intrinsics_file) +def _get_av2_camera_metadata( + source_log_path: Path, dataset_converter_config: DatasetConverterConfig +) -> Dict[PinholeCameraType, PinholeCameraMetadata]: camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} - for _, row in intrinsics_df.iterrows(): - row = row.to_dict() - camera_type = AV2_CAMERA_TYPE_MAPPING[row["sensor_name"]] - camera_metadata[camera_type] = PinholeCameraMetadata( - camera_type=camera_type, - width=row["width_px"], - height=row["height_px"], - intrinsics=PinholeIntrinsics( - fx=row["fx_px"], - fy=row["fy_px"], - cx=row["cx_px"], - cy=row["cy_px"], - ), - distortion=PinholeDistortion( - k1=row["k1"], - k2=row["k2"], - p1=0.0, - p2=0.0, - k3=row["k3"], - ), - ) + + if dataset_converter_config.include_cameras: + intrinsics_file = source_log_path / "calibration" / "intrinsics.feather" + intrinsics_df = pd.read_feather(intrinsics_file) + for _, row in intrinsics_df.iterrows(): + row = row.to_dict() + camera_type = AV2_CAMERA_TYPE_MAPPING[row["sensor_name"]] + camera_metadata[camera_type] = PinholeCameraMetadata( + camera_type=camera_type, + width=row["width_px"], + height=row["height_px"], + intrinsics=PinholeIntrinsics( + fx=row["fx_px"], + fy=row["fy_px"], + cx=row["cx_px"], + cy=row["cy_px"], + ), + distortion=PinholeDistortion( + k1=row["k1"], + k2=row["k2"], + p1=0.0, + p2=0.0, + k3=row["k3"], + ), + ) return camera_metadata -def get_av2_lidar_metadata(log_path: Path) -> Dict[LiDARType, LiDARMetadata]: - # metadata: Dict[LiDARType, LiDARMetadata] = {} - # metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( - # lidar_type=LiDARType.LIDAR_MERGED, - # lidar_index=NuplanLidarIndex, - # extrinsic=None, # NOTE: LiDAR extrinsic are unknown - # ) - # return metadata - return {} +def _get_av2_lidar_metadata( + source_log_path: Path, dataset_converter_config: DatasetConverterConfig +) -> Dict[LiDARType, LiDARMetadata]: + + metadata: Dict[LiDARType, LiDARMetadata] = {} + + if dataset_converter_config.include_lidars: + + # Load calibration feather file + calibration_file = source_log_path / "calibration" / "egovehicle_SE3_sensor.feather" + calibration_df = pd.read_feather(calibration_file) + + # NOTE: AV2 has two two stacked lidars: up_lidar and down_lidar. + # We store these as separate LiDARType entries. + + # top lidar: + metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( + lidar_type=LiDARType.LIDAR_TOP, + lidar_index=AVSensorLidarIndex, + extrinsic=_row_dict_to_state_se3( + calibration_df[calibration_df["sensor_name"] == "up_lidar"].iloc[0].to_dict() + ), + ) + # down lidar: + metadata[LiDARType.LIDAR_DOWN] = LiDARMetadata( + lidar_type=LiDARType.LIDAR_DOWN, + lidar_index=AVSensorLidarIndex, + extrinsic=_row_dict_to_state_se3( + calibration_df[calibration_df["sensor_name"] == "down_lidar"].iloc[0].to_dict() + ), + ) + return metadata def _extract_av2_sensor_box_detections( @@ -278,15 +314,7 @@ def _extract_av2_sensor_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_ti ), f"Expected exactly one ego state for timestamp {lidar_timestamp_ns}, got {len(ego_state_slice)}." ego_pose_dict = ego_state_slice.iloc[0].to_dict() - rear_axle_pose = StateSE3( - x=ego_pose_dict["tx_m"], - y=ego_pose_dict["ty_m"], - z=ego_pose_dict["tz_m"], - qw=ego_pose_dict["qw"], - qx=ego_pose_dict["qx"], - qy=ego_pose_dict["qy"], - qz=ego_pose_dict["qz"], - ) + rear_axle_pose = _row_dict_to_state_se3(ego_pose_dict) vehicle_parameters = get_av2_ford_fusion_hybrid_parameters() center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) @@ -318,49 +346,78 @@ def _extract_av2_sensor_camera( split = source_log_path.parent.name log_id = source_log_path.name - source_dataset_dir = source_log_path.parent.parent + if dataset_converter_config.include_cameras: + av2_sensor_data_root = source_log_path.parent.parent - for _, row in egovehicle_se3_sensor_df.iterrows(): - row = row.to_dict() - if row["sensor_name"] not in AV2_CAMERA_TYPE_MAPPING: - continue + for _, row in egovehicle_se3_sensor_df.iterrows(): + row = row.to_dict() + if row["sensor_name"] not in AV2_CAMERA_TYPE_MAPPING: + continue - camera_name = row["sensor_name"] - camera_type = AV2_CAMERA_TYPE_MAPPING[camera_name] + camera_name = row["sensor_name"] + camera_type = AV2_CAMERA_TYPE_MAPPING[camera_name] - relative_image_path = find_closest_target_fpath( - split=split, - log_id=log_id, - src_sensor_name="lidar", - src_timestamp_ns=lidar_timestamp_ns, - target_sensor_name=camera_name, - synchronization_df=synchronization_df, - ) - if relative_image_path is not None: - absolute_image_path = source_dataset_dir / relative_image_path - assert absolute_image_path.exists() - - # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. - camera_extrinsic = StateSE3( - x=row["tx_m"], - y=row["ty_m"], - z=row["tz_m"], - qw=row["qw"], - qx=row["qx"], - qy=row["qy"], - qz=row["qz"], + relative_image_path = find_closest_target_fpath( + split=split, + log_id=log_id, + src_sensor_name="lidar", + src_timestamp_ns=lidar_timestamp_ns, + target_sensor_name=camera_name, + synchronization_df=synchronization_df, ) - camera_data = None - if dataset_converter_config.camera_store_option == "path": - camera_data = str(relative_image_path) - elif dataset_converter_config.camera_store_option == "binary": - with open(absolute_image_path, "rb") as f: - camera_data = f.read() - camera_dict[camera_type] = camera_data, camera_extrinsic + if relative_image_path is not None: + absolute_image_path = av2_sensor_data_root / relative_image_path + assert absolute_image_path.exists() + + # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. + camera_extrinsic = _row_dict_to_state_se3(row) + camera_data = None + if dataset_converter_config.camera_store_option == "path": + camera_data = str(relative_image_path) + elif dataset_converter_config.camera_store_option == "binary": + with open(absolute_image_path, "rb") as f: + camera_data = f.read() + camera_dict[camera_type] = camera_data, camera_extrinsic return camera_dict -def _extract_lidar(lidar_pc, dataset_converter_config: DatasetConverterConfig) -> Dict[LiDARType, Optional[str]]: - # TODO: Implement this function to extract lidar data. - return {} +def _extract_av2_sensor_lidars( + source_log_path: Path, lidar_timestamp_ns: int, dataset_converter_config: DatasetConverterConfig +) -> Optional[Dict[LiDARType, Union[str, npt.NDArray[np.float32]]]]: + lidar_dict: Dict[LiDARType, Union[str, npt.NDArray[np.float32]]] = {} + if dataset_converter_config.include_lidars: + av2_sensor_data_root = source_log_path.parent.parent + split_type = source_log_path.parent.name + log_name = source_log_path.name + + relative_feather_path = f"{split_type}/{log_name}/sensors/lidar/{lidar_timestamp_ns}.feather" + lidar_feather_path = av2_sensor_data_root / relative_feather_path + # if lidar_feather_path.exists(): + + assert lidar_feather_path.exists(), f"LiDAR feather file not found: {lidar_feather_path}" + if dataset_converter_config.lidar_store_option == "path": + # NOTE: It is somewhat inefficient to store the same path for both lidars, + # but this keeps the interface simple for now. + lidar_dict = { + LiDARType.LIDAR_TOP: str(relative_feather_path), + LiDARType.LIDAR_DOWN: str(relative_feather_path), + } + elif dataset_converter_config.lidar_store_option == "binary": + # NOTE: For binary storage, we pass the point cloud to the log writer. + # Compression is handled internally in the log writer. + lidar_dict: Dict[LiDARType, np.ndarray] = load_av2_sensor_lidar_pc_from_path(lidar_feather_path) + return lidar_dict + + +def _row_dict_to_state_se3(row_dict: Dict[str, float]) -> StateSE3: + """Helper function to convert a row dictionary to a StateSE3 object.""" + return StateSE3( + x=row_dict["tx_m"], + y=row_dict["ty_m"], + z=row_dict["tz_m"], + qw=row_dict["qw"], + qx=row_dict["qx"], + qy=row_dict["qy"], + qz=row_dict["qz"], + ) diff --git a/src/py123d/conversion/datasets/av2/av2_constants.py b/src/py123d/conversion/datasets/av2/utils/av2_constants.py similarity index 96% rename from src/py123d/conversion/datasets/av2/av2_constants.py rename to src/py123d/conversion/datasets/av2/utils/av2_constants.py index 8e682b74..56a6285f 100644 --- a/src/py123d/conversion/datasets/av2/av2_constants.py +++ b/src/py123d/conversion/datasets/av2/utils/av2_constants.py @@ -5,14 +5,7 @@ from py123d.datatypes.maps.map_datatypes import RoadLineType from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -AV2_SENSOR_SPLITS: Set[str] = { - "av2-sensor_train", - "av2-sensor_val", - "av2-sensor_test", - "av2-sensor-mini_train", - "av2-sensor-mini_val", - "av2-sensor-mini_test", -} +AV2_SENSOR_SPLITS: Set[str] = {"av2-sensor_train", "av2-sensor_val", "av2-sensor_test"} class AV2SensorBoxDetectionType(SerialIntEnum): @@ -98,6 +91,8 @@ class AV2SensorBoxDetectionType(SerialIntEnum): "stereo_front_right": PinholeCameraType.CAM_STEREO_R, } +# AV2_LIDAR_TYPES: Dict[str, str] = { + AV2_ROAD_LINE_TYPE_MAPPING: Dict[str, RoadLineType] = { "NONE": RoadLineType.NONE, diff --git a/src/py123d/conversion/datasets/av2/av2_helper.py b/src/py123d/conversion/datasets/av2/utils/av2_helper.py similarity index 99% rename from src/py123d/conversion/datasets/av2/av2_helper.py rename to src/py123d/conversion/datasets/av2/utils/av2_helper.py index 3dbb5d0b..cd0c1f62 100644 --- a/src/py123d/conversion/datasets/av2/av2_helper.py +++ b/src/py123d/conversion/datasets/av2/utils/av2_helper.py @@ -3,7 +3,7 @@ import pandas as pd -from py123d.conversion.datasets.av2.av2_constants import ( +from py123d.conversion.datasets.av2.utils.av2_constants import ( AV2_CAMERA_TYPE_MAPPING, AV2_SENSOR_CAM_SHUTTER_INTERVAL_MS, AV2_SENSOR_LIDAR_SWEEP_INTERVAL_W_BUFFER_NS, diff --git a/src/py123d/conversion/datasets/av2/av2_map_conversion.py b/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py similarity index 99% rename from src/py123d/conversion/datasets/av2/av2_map_conversion.py rename to src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py index ea5abf4e..18766ba6 100644 --- a/src/py123d/conversion/datasets/av2/av2_map_conversion.py +++ b/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py @@ -8,7 +8,7 @@ import shapely import shapely.geometry as geom -from py123d.conversion.datasets.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING +from py123d.conversion.datasets.av2.utils.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, diff --git a/src/py123d/conversion/datasets/av2/utils/av2_sensor_loading.py b/src/py123d/conversion/datasets/av2/utils/av2_sensor_loading.py new file mode 100644 index 00000000..1e89b551 --- /dev/null +++ b/src/py123d/conversion/datasets/av2/utils/av2_sensor_loading.py @@ -0,0 +1,22 @@ +from pathlib import Path +from typing import Dict, Union + +import numpy as np +import pandas as pd + +from py123d.datatypes.sensors.lidar.lidar import LiDARType + + +def load_av2_sensor_lidar_pc_from_path(feather_path: Union[Path, str]) -> Dict[LiDARType, np.ndarray]: + # NOTE: The AV2 dataset stores both top and down LiDAR data in the same feather file. + # We need to separate them based on the laser_number field. + # See here: https://github.com/argoverse/av2-api/issues/77#issuecomment-1178040867 + all_lidar_df = pd.read_feather(feather_path, columns=["x", "y", "z", "intensity", "laser_number"]) + + lidar_down = all_lidar_df[all_lidar_df["laser_number"] >= 32] + lidar_top = all_lidar_df[all_lidar_df["laser_number"] < 32] + + lidar_down = lidar_down.drop(columns=["laser_number"]) + lidar_top = lidar_top.drop(columns=["laser_number"]) + + return {LiDARType.LIDAR_DOWN: lidar_down.to_numpy(), LiDARType.LIDAR_TOP: lidar_top.to_numpy()} diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 62154cf6..7b623e61 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -5,6 +5,7 @@ from py123d.common.utils.uuid_utils import create_deterministic_uuid from py123d.conversion.abstract_dataset_converter import AbstractLogWriter, DatasetConverterConfig +from py123d.conversion.log_writer.utils.lidar_compression import compress_lidar_with_laz from py123d.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from py123d.datatypes.scene.scene_metadata import LogMetadata @@ -20,13 +21,13 @@ class ArrowLogWriter(AbstractLogWriter): def __init__( self, logs_root: Union[str, Path], - compression: Optional[Literal["lz4", "zstd"]] = None, - compression_level: Optional[int] = None, + ipc_compression: Optional[Literal["lz4", "zstd"]] = None, + ipc_compression_level: Optional[int] = None, ) -> None: self._logs_root = Path(logs_root) - self._compression = compression - self._compression_level = compression_level + self._ipc_compression = ipc_compression + self._ipc_compression_level = ipc_compression_level # Loaded during .reset() and cleared during .close() self._dataset_converter_config: Optional[DatasetConverterConfig] = None @@ -57,8 +58,8 @@ def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: # Initialize Arrow IPC writer, optionally with compression # NOTE @DanielDauner: I tried some compression settings, which did not lead to significant reductions. compression = ( - pa.Codec(self._compression, compression_level=self._compression_level) - if self._compression is not None + pa.Codec(self._ipc_compression, compression_level=self._ipc_compression_level) + if self._ipc_compression is not None else None ) @@ -183,6 +184,12 @@ def write( lidar_data: Optional[Any] = None if lidar_type in provided_lidars: lidar_data = lidars[lidar_type] + + # Possible compression step + if self._dataset_converter_config.lidar_store_option == "binary": + lidar_metadata = self._log_metadata.lidar_metadata[lidar_type] + lidar_data = compress_lidar_with_laz(lidar_data, lidar_metadata) + record_batch_data[f"{lidar_name}_data"] = [lidar_data] # -------------------------------------------------------------------------------------------------------------- diff --git a/src/py123d/conversion/log_writer/utils/__init__.py b/src/py123d/conversion/log_writer/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/conversion/log_writer/utils/lidar_compression.py b/src/py123d/conversion/log_writer/utils/lidar_compression.py new file mode 100644 index 00000000..db7749c2 --- /dev/null +++ b/src/py123d/conversion/log_writer/utils/lidar_compression.py @@ -0,0 +1,69 @@ +import io + +import numpy as np +import numpy.typing as npt +import laspy + +from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata + + +def compress_lidar_with_laz(point_cloud: npt.NDArray[np.float32], lidar_metadata: LiDARMetadata) -> bytes: + """Compress LiDAR point cloud data using LAZ format. + + :param point_cloud: The LiDAR point cloud data to compress, as numpy array. + :param lidar_metadata: Metadata associated with the LiDAR data. + :return: The compressed LAZ binary data. + """ + + lidar_index = lidar_metadata.lidar_index + + # Create a LAS file in memory, and populate it with point cloud data + las = laspy.create(point_format=3, file_version="1.4") + las.x = point_cloud[:, lidar_index.X] + las.y = point_cloud[:, lidar_index.Y] + las.z = point_cloud[:, lidar_index.Z] + + # Add additional LiDAR features if present + for feature in lidar_metadata.lidar_index: + if feature.name in ["X", "Y", "Z"]: + continue # Already saved above + las.add_extra_dim(laspy.ExtraBytesParams(name=feature.name, type="float32")) + las[feature.name] = point_cloud[:, feature.value] + + # Write to memory buffer and return compressed binary data + buffer = io.BytesIO() + las.write(buffer, do_compress=True) + laz_binary = buffer.getvalue() + + return laz_binary + + +def decompress_lidar_from_laz(laz_binary: bytes, lidar_metadata: LiDARMetadata) -> npt.NDArray[np.float32]: + """Decompress LiDAR point cloud data from LAZ format. + + :param laz_binary: The compressed LAZ binary data. + :param lidar_metadata: Metadata associated with the LiDAR data. + :raises ValueError: If the LiDAR features are not found in the LAS file. + :return: The decompressed LiDAR point cloud data as a numpy array. + """ + + lidar_index = lidar_metadata.lidar_index + + # Read the LAS file from memory buffer + buffer = io.BytesIO(laz_binary) + las = laspy.read(buffer) + + # Extract the point cloud data + num_points = len(las.x) + point_cloud = np.zeros((num_points, len(lidar_metadata.lidar_index)), dtype=np.float32) + point_cloud[:, lidar_index.XYZ] = np.vstack((las.x, las.y, las.z)).T + + for feature in lidar_index: + if feature.name in ["X", "Y", "Z"]: + continue # Already loaded above + if hasattr(las, feature.name): + point_cloud[:, feature.value] = las[feature.name] + else: + raise ValueError(f"LiDAR feature {feature.name} not found in LAS file.") + + return LiDAR(point_cloud=point_cloud, metadata=lidar_metadata) diff --git a/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py b/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py index 9242366b..35abe287 100644 --- a/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py +++ b/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py @@ -60,3 +60,16 @@ class WOPDLidarIndex(LiDARIndex): X = 3 Y = 4 Z = 5 + + +@register_lidar_index +class AVSensorLidarIndex(LiDARIndex): + """Argoverse Sensor LiDAR Indexing Scheme. + + NOTE: The LiDAR files also include 'laser_number', 'offset_ns', which we do not currently include. + """ + + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py b/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py index 451ff9d2..e5840f76 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py @@ -21,16 +21,16 @@ class ArrowSceneBuilder(SceneBuilder): def __init__( self, - py123d_logs_root: Optional[Union[str, Path]] = None, - py123d_maps_root: Optional[Union[str, Path]] = None, + logs_root: Optional[Union[str, Path]] = None, + maps_root: Optional[Union[str, Path]] = None, ): - if py123d_logs_root is None: - py123d_logs_root = get_dataset_paths().py123d_logs_root - if py123d_maps_root is None: - py123d_maps_root = get_dataset_paths().py123d_maps_root + if logs_root is None: + logs_root = get_dataset_paths().py123d_logs_root + if maps_root is None: + maps_root = get_dataset_paths().py123d_maps_root - self._logs_root = Path(py123d_logs_root) - self._maps_root = Path(py123d_maps_root) + self._logs_root = Path(logs_root) + self._maps_root = Path(maps_root) def get_scenes(self, filter: SceneFilter, worker: WorkerPool) -> Iterator[AbstractScene]: """See superclass.""" diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 6fdf842a..8996b71c 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -1,12 +1,10 @@ -# TODO: rename this file and potentially move somewhere more appropriate. - -import os from pathlib import Path from typing import Dict, List, Optional import cv2 import numpy as np import numpy.typing as npt +from omegaconf import DictConfig import pyarrow as pa from py123d.datatypes.detections.detection import ( @@ -26,11 +24,14 @@ from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import VehicleParameters from py123d.geometry import BoundingBoxSE3, StateSE3, Vector3D +from py123d.script.utils.dataset_path_utils import get_dataset_paths + +DATASET_PATHS: DictConfig = get_dataset_paths() DATASET_SENSOR_ROOT: Dict[str, Path] = { - "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", - "carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs", - # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini", + "nuplan": DATASET_PATHS.nuplan_sensor_root, + "av2-sensor": DATASET_PATHS.av2_sensor_data_root, + "wopd": DATASET_PATHS.wopd_data_root, } @@ -110,10 +111,13 @@ def get_camera_from_arrow_table( if isinstance(table_data, str): sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset] - full_image_path = sensor_root / table_data + assert sensor_root is not None, f"Dataset path for sensor loading not found for dataset: {log_metadata.dataset}" + full_image_path = Path(sensor_root) / table_data assert full_image_path.exists(), f"Camera file not found: {full_image_path}" + image = cv2.imread(str(full_image_path), cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + elif isinstance(table_data, bytes): image = cv2.imdecode(np.frombuffer(table_data, np.uint8), cv2.IMREAD_UNCHANGED) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) @@ -133,38 +137,58 @@ def get_lidar_from_arrow_table( lidar_type: LiDARType, log_metadata: LogMetadata, ) -> LiDAR: - assert ( - lidar_type.serialize() in arrow_table.schema.names - ), f'"{lidar_type.serialize()}" field not found in Arrow table schema.' - lidar_data = arrow_table[lidar_type.serialize()][index].as_py() - lidar_metadata = log_metadata.lidar_metadata[lidar_type] - - if isinstance(lidar_data, str): - sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset] - full_lidar_path = sensor_root / lidar_data - assert full_lidar_path.exists(), f"LiDAR file not found: {full_lidar_path}" - - # NOTE: We move data specific import into if-else block, to avoid data specific import errors - if log_metadata.dataset == "nuplan": - from py123d.conversion.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path - - lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) - elif log_metadata.dataset == "carla": - from py123d.conversion.datasets.carla.carla_load_sensor import load_carla_lidar_from_path - - lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata) - elif log_metadata.dataset == "wopd": - raise NotImplementedError + lidar: Optional[LiDAR] = None + + # assert ( + # f"{lidar_type.serialize()}_data" in arrow_table.schema.names + # ), f'"{lidar_type.serialize()}" field not found in Arrow table schema.' + + lidar_column_name = f"{lidar_type.serialize()}_data" + if lidar_column_name in arrow_table.schema.names: + + lidar_data = arrow_table[lidar_column_name][index].as_py() + lidar_metadata = log_metadata.lidar_metadata[lidar_type] + + if isinstance(lidar_data, str): + sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset] + assert ( + sensor_root is not None + ), f"Dataset path for sensor loading not found for dataset: {log_metadata.dataset}" + full_lidar_path = Path(sensor_root) / lidar_data + assert full_lidar_path.exists(), f"LiDAR file not found: {full_lidar_path}" + + # NOTE: We move data specific import into if-else block, to avoid data specific import errors + if log_metadata.dataset == "nuplan": + from py123d.conversion.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path + + lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) + elif log_metadata.dataset == "carla": + from py123d.conversion.datasets.carla.carla_load_sensor import load_carla_lidar_from_path + + lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata) + elif log_metadata.dataset == "av2-sensor": + from py123d.conversion.datasets.av2.utils.av2_sensor_loading import load_av2_sensor_lidar_pc_from_path + + lidar_pc_dict = load_av2_sensor_lidar_pc_from_path(full_lidar_path) + assert ( + lidar_type in lidar_pc_dict + ), f"LiDAR type {lidar_type} not found in AV2 sensor data at {full_lidar_path}." + lidar = LiDAR(metadata=lidar_metadata, point_cloud=lidar_pc_dict[lidar_type]) + elif log_metadata.dataset == "wopd": + + raise NotImplementedError + else: + raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") + + elif isinstance(lidar_data, bytes): + from py123d.conversion.log_writer.utils.lidar_compression import decompress_lidar_from_laz + + lidar = decompress_lidar_from_laz(lidar_data, lidar_metadata) + elif lidar_data is None: + lidar = None else: - raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") + raise NotImplementedError( + f"Only string file paths or bytes for LiDAR data are supported, got {type(lidar_data)}" + ) - else: - # FIXME: This is a temporary fix for WOPD dataset. The lidar data is stored as a flattened array of float32. - # Ideally the lidar index should handle the dimension. But for now we hardcode it here. - lidar_data = np.array(lidar_data, dtype=np.float32).reshape(-1, 3) - lidar_data = np.concatenate([np.zeros_like(lidar_data), lidar_data], axis=-1) - if log_metadata.dataset == "wopd": - lidar = LiDAR(metadata=lidar_metadata, point_cloud=lidar_data.T) - else: - raise NotImplementedError("Only string file paths for lidar data are supported.") return lidar diff --git a/src/py123d/datatypes/sensors/lidar/lidar.py b/src/py123d/datatypes/sensors/lidar/lidar.py index 5f8385a1..3e72e6fb 100644 --- a/src/py123d/datatypes/sensors/lidar/lidar.py +++ b/src/py123d/datatypes/sensors/lidar/lidar.py @@ -20,6 +20,7 @@ class LiDARType(SerialIntEnum): LIDAR_SIDE_LEFT = 4 LIDAR_SIDE_RIGHT = 5 LIDAR_BACK = 6 + LIDAR_DOWN = 7 @dataclass @@ -58,14 +59,14 @@ def xyz(self) -> npt.NDArray[np.float32]: """ Returns the point cloud as an Nx3 array of x, y, z coordinates. """ - return self.point_cloud[self.metadata.lidar_index.XYZ].T + return self.point_cloud[:, self.metadata.lidar_index.XYZ] @property def xy(self) -> npt.NDArray[np.float32]: """ Returns the point cloud as an Nx2 array of x, y coordinates. """ - return self.point_cloud[self.metadata.lidar_index.XY].T + return self.point_cloud[:, self.metadata.lidar_index.XY] @property def intensity(self) -> Optional[npt.NDArray[np.float32]]: @@ -74,7 +75,7 @@ def intensity(self) -> Optional[npt.NDArray[np.float32]]: Returns None if intensity is not part of the point cloud. """ if hasattr(self.metadata.lidar_index, "INTENSITY"): - return self.point_cloud[self.metadata.lidar_index.INTENSITY] + return self.point_cloud[:, self.metadata.lidar_index.INTENSITY] return None @property @@ -84,7 +85,7 @@ def range(self) -> Optional[npt.NDArray[np.float32]]: Returns None if range is not part of the point cloud. """ if hasattr(self.metadata.lidar_index, "RANGE"): - return self.point_cloud[self.metadata.lidar_index.RANGE] + return self.point_cloud[:, self.metadata.lidar_index.RANGE] return None @property @@ -94,5 +95,5 @@ def elongation(self) -> Optional[npt.NDArray[np.float32]]: Returns None if elongation is not part of the point cloud. """ if hasattr(self.metadata.lidar_index, "ELONGATION"): - return self.point_cloud[self.metadata.lidar_index.ELONGATION] + return self.point_cloud[:, self.metadata.lidar_index.ELONGATION] return None diff --git a/src/py123d/script/config/common/default_dataset_paths.yaml b/src/py123d/script/config/common/default_dataset_paths.yaml index 0f77918b..acd2e59b 100644 --- a/src/py123d/script/config/common/default_dataset_paths.yaml +++ b/src/py123d/script/config/common/default_dataset_paths.yaml @@ -1,13 +1,19 @@ dataset_paths: - # 123D Defaults - py123d_data_root: ${oc.env:PY123D_DATA_ROOT} - py123d_logs_root: ${oc.env:PY123D_DATA_ROOT}/logs - py123d_maps_root: ${oc.env:PY123D_DATA_ROOT}/maps - py123d_sensors_root: ${oc.env:PY123D_DATA_ROOT}/sensors - + # 123D defaults + py123d_data_root: ${oc.env:PY123D_DATA_ROOT,null} + py123d_logs_root: ${dataset_paths.py123d_data_root}/logs + py123d_maps_root: ${dataset_paths.py123d_data_root}/maps + py123d_sensors_root: ${dataset_paths.py123d_data_root}/sensors # nuPlan defaults - nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT} - nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT} - nuplan_sensor_root: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/sensor_blobs + nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT,null} + nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT,null} + nuplan_sensor_root: ${oc.env:NUPLAN_SENSOR_ROOT,null} + + # AV2 defaults + av2_data_root: ${oc.env:AV2_DATA_ROOT,null} + av2_sensor_data_root: ${dataset_paths.av2_data_root}/sensor + + # WOPD defaults + wopd_data_root: ${oc.env:WOPD_DATA_ROOT,null} \ No newline at end of file diff --git a/src/py123d/script/config/common/scene_filter/log_scenes.yaml b/src/py123d/script/config/common/scene_filter/log_scenes.yaml index 750bf0f8..726b1d73 100644 --- a/src/py123d/script/config/common/scene_filter/log_scenes.yaml +++ b/src/py123d/script/config/common/scene_filter/log_scenes.yaml @@ -2,7 +2,7 @@ _target_: py123d.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null -split_names: ["av2-sensor-mini_train"] +split_names: null log_names: null diff --git a/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml index 837b0c9b..933862fe 100644 --- a/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml @@ -2,8 +2,8 @@ av2_sensor_dataset: _target_: py123d.conversion.datasets.av2.av2_sensor_converter.AV2SensorConverter _convert_: 'all' - splits: ["av2-sensor-mini_train"] - av2_data_root: "/media/nvme1/argoverse" + splits: ["av2-sensor_train"] + av2_data_root: ${dataset_paths.av2_data_root} dataset_converter_config: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig @@ -30,8 +30,8 @@ av2_sensor_dataset: camera_store_option: "binary" # "path", "binary", "mp4" # LiDARs - include_lidars: false - lidar_store_option: "path" # "path", "binary" + include_lidars: true + lidar_store_option: "binary" # "path", "binary" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. diff --git a/src/py123d/script/config/conversion/log_writer/arrow_log_writer.yaml b/src/py123d/script/config/conversion/log_writer/arrow_log_writer.yaml index 9707e057..61a4ead6 100644 --- a/src/py123d/script/config/conversion/log_writer/arrow_log_writer.yaml +++ b/src/py123d/script/config/conversion/log_writer/arrow_log_writer.yaml @@ -2,6 +2,6 @@ _target_: py123d.conversion.log_writer.arrow_log_writer.ArrowLogWriter _convert_: 'all' -logs_root: ${py123d_logs_root} -compression: null # Compression method for ipc files. Options: None, 'lz4', 'zstd' -compression_level: null # Compression level for ipc files. Options: None, or depending on compression method +logs_root: ${dataset_paths.py123d_logs_root} +ipc_compression: null # Compression method for ipc files. Options: None, 'lz4', 'zstd' +ipc_compression_level: null # Compression level for ipc files. Options: None, or depending on compression method diff --git a/src/py123d/script/run_viser.py b/src/py123d/script/run_viser.py index 32206141..d1809fcd 100644 --- a/src/py123d/script/run_viser.py +++ b/src/py123d/script/run_viser.py @@ -18,16 +18,20 @@ @hydra.main(config_path=CONFIG_PATH, config_name=CONFIG_NAME, version_base=None) def main(cfg: DictConfig) -> None: + # Initialize dataset paths setup_dataset_paths(cfg.dataset_paths) + # Build worker worker = build_worker(cfg) + # Build scene filter and scene builder scene_filter = build_scene_filter(cfg.scene_filter) - scene_builder = build_scene_builder(cfg.scene_builder) + # Get scenes from scene builder scenes = scene_builder.get_scenes(scene_filter, worker=worker) + # Launch Viser viewer with the scenes ViserViewer(scenes=scenes) diff --git a/src/py123d/visualization/viser/elements/map_elements.py b/src/py123d/visualization/viser/elements/map_elements.py index c54e5e07..b1e0e752 100644 --- a/src/py123d/visualization/viser/elements/map_elements.py +++ b/src/py123d/visualization/viser/elements/map_elements.py @@ -52,7 +52,6 @@ def add_map_to_viser_server( current_ego_state, viser_config, ) - print("Requeried map objects for visualization.") if map_trimesh_dict is not None: for map_layer, mesh in map_trimesh_dict.items(): diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index 40d08b5c..843a008d 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -22,6 +22,7 @@ LiDARType.LIDAR_SIDE_LEFT, LiDARType.LIDAR_SIDE_RIGHT, LiDARType.LIDAR_BACK, + LiDARType.LIDAR_DOWN, ] @@ -66,7 +67,7 @@ class ViserConfig: camera_gui_image_scale: float = 0.25 # Resize factor for the camera image shown in the GUI (<1.0 for speed) # LiDAR - lidar_visible: bool = False + lidar_visible: bool = True lidar_types: List[LiDARType] = field(default_factory=lambda: all_lidar_types.copy()) lidar_point_size: float = 0.05 lidar_point_shape: Literal["square", "diamond", "circle", "rounded", "sparkle"] = "circle" diff --git a/test_viser.py b/test_viser.py index 65078c45..d395dfd9 100644 --- a/test_viser.py +++ b/test_viser.py @@ -4,13 +4,14 @@ from py123d.datatypes.scene.scene_filter import SceneFilter from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType + if __name__ == "__main__": - splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] + # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla"] # splits = ["wopd_val"] - # splits = ["av2-sensor-mini_train"] + splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] log_names = None scene_uuids = None From 81f9609de5f244d09ef1d82d3b4a761b0843cd17 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 17 Oct 2025 23:35:48 +0200 Subject: [PATCH 094/145] Remove most notebooks, except bev visualization (#42) --- notebooks/{viz => }/bev_matplotlib.ipynb | 3 +- notebooks/scene_rendering.ipynb | 179 ----- notebooks/scene_sensor_loading.ipynb | 123 ---- notebooks/tools/merge_videos.ipynb | 137 ---- notebooks/tools/plot_map_sizes.ipynb | 67 -- notebooks/viz/bev_matplotlib_prediction.ipynb | 246 ------- notebooks/viz/camera_matplotlib.ipynb | 397 ----------- notebooks/viz/log_rendering.ipynb | 105 --- notebooks/viz/video_example.ipynb | 319 --------- notebooks/viz/viser_testing_v2_scene.ipynb | 94 --- .../waymo_perception/lidar_testing.ipynb | 301 -------- notebooks/waymo_perception/map_testing.ipynb | 653 ------------------ notebooks/waymo_perception/testing.ipynb | 280 -------- 13 files changed, 1 insertion(+), 2903 deletions(-) rename notebooks/{viz => }/bev_matplotlib.ipynb (98%) delete mode 100644 notebooks/scene_rendering.ipynb delete mode 100644 notebooks/scene_sensor_loading.ipynb delete mode 100644 notebooks/tools/merge_videos.ipynb delete mode 100644 notebooks/tools/plot_map_sizes.ipynb delete mode 100644 notebooks/viz/bev_matplotlib_prediction.ipynb delete mode 100644 notebooks/viz/camera_matplotlib.ipynb delete mode 100644 notebooks/viz/log_rendering.ipynb delete mode 100644 notebooks/viz/video_example.ipynb delete mode 100644 notebooks/viz/viser_testing_v2_scene.ipynb delete mode 100644 notebooks/waymo_perception/lidar_testing.ipynb delete mode 100644 notebooks/waymo_perception/map_testing.ipynb delete mode 100644 notebooks/waymo_perception/testing.ipynb diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb similarity index 98% rename from notebooks/viz/bev_matplotlib.ipynb rename to notebooks/bev_matplotlib.ipynb index 7e23f782..18c19e49 100644 --- a/notebooks/viz/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -49,7 +49,6 @@ "# log_names = None\n", "\n", "\n", - "# generator = Path(\"/home/daniel/py123d_workspace/data/logs\").iterdir()\n", "\n", "log_names = None\n", "scene_uuids = None\n", @@ -64,7 +63,7 @@ " shuffle=True,\n", " # camera_types=[CameraType.CAM_F0],\n", ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", + "scene_builder = ArrowSceneBuilder()\n", "worker = Sequential()\n", "# worker = RayDistributed()\n", "scenes = scene_builder.get_scenes(scene_filter, worker)\n", diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb deleted file mode 100644 index 53d95c86..00000000 --- a/notebooks/scene_rendering.ipynb +++ /dev/null @@ -1,179 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from py123d.dataset.scene.scene_filter import SceneFilter\n", - "\n", - "from py123d.common.multithreading.worker_sequential import Sequential\n", - "\n", - "# from py123d.common.multithreading.worker_ray import RayDistributed" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "import os, psutil\n", - "\n", - "\n", - "def print_memory_usage():\n", - " process = psutil.Process(os.getpid())\n", - " memory_info = process.memory_info()\n", - " print(f\"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB\")\n", - "\n", - "split = \"nuplan_mini_val\"\n", - "# split = \"carla\"\n", - "\n", - "# log_names = [\"2021.06.07.12.54.00_veh-35_01843_02314\"]\n", - "scene_uuids = None\n", - "# scene_uuids = [\"2283aea39bc1505e\"]\n", - "log_names = None\n", - "\n", - "scene_filter = SceneFilter(\n", - " split_names=[split],\n", - " log_names=log_names,\n", - " scene_uuids=scene_uuids,\n", - " duration_s=15.1,\n", - " history_s=1.0,\n", - ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(\"num scenes found\", len(scenes))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Tuple\n", - "py123d.datatypes.detections.detection import BoxDetection\n", - "py123d.datatypes.detections.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n", - "from py123d.geometry import StateSE2\n", - "from py123d.geometry.transform.tranform_2d import translate_along_yaw\n", - "from py123d.geometry.vector import Vector2D\n", - "from py123d.visualization.matplotlib.observation import (\n", - " add_box_detections_to_ax,\n", - " add_default_map_on_ax,\n", - " add_ego_vehicle_to_ax,\n", - " add_traffic_lights_to_ax,\n", - ")\n", - "from py123d.dataset.scene.abstract_scene import AbstractScene\n", - "\n", - "import matplotlib.pyplot as plt\n", - "\n", - "\n", - "def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes:\n", - "\n", - " ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)\n", - " box_detections = scene.get_box_detections_at_iteration(iteration)\n", - " traffic_light_detections = scene.get_traffic_light_detections_at_iteration(iteration)\n", - " map_api = scene.get_map_api()\n", - "\n", - " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", - " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", - " # add_traffic_lights_to_ax(ax, traffic_light_detections, map_api)\n", - "\n", - " add_box_detections_to_ax(ax, box_detections)\n", - " add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n", - "\n", - " ax.set_xlim(point_2d.x - radius, point_2d.x + radius)\n", - " ax.set_ylim(point_2d.y - radius, point_2d.y + radius)\n", - "\n", - " ax.set_aspect(\"equal\", adjustable=\"box\")\n", - " return ax\n", - "\n", - "\n", - "iteration = 20\n", - "scene = scenes[20]\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "_plot_scene_on_ax(ax, scene, iteration, 80)\n", - "\n", - "box_detections = scene.get_box_detections_at_iteration(iteration)\n", - "\n", - "\n", - "def _get_dxy(box_detection: BoxDetection) -> Tuple[float, float]:\n", - " \"\"\"Get the change in x and y coordinates from the box detection.\"\"\"\n", - " center = box_detection.center.state_se2\n", - " # endpoint_x = translate_along_yaw(center, Vector2D(abs(box_detection.velocity.x), 0.0))\n", - " # endpoint_y = translate_along_yaw(center, Vector2D(0.0, abs(box_detection.velocity.y)))\n", - " # print(box_detection.velocity.x, box_detection.velocity.y)\n", - "\n", - " endpoint_x = StateSE2(center.x + box_detection.velocity.x, center.y, center.yaw)\n", - " endpoint_y = StateSE2(center.x, center.y + box_detection.velocity.y, center.yaw)\n", - "\n", - " return endpoint_x, endpoint_y\n", - "\n", - "\n", - "for box_detection in box_detections:\n", - " if box_detection.metadata.detection_type in STATIC_DETECTION_TYPES:\n", - " continue\n", - " endpoint_x, endpoint_y = _get_dxy(box_detection)\n", - " ax.annotate(\n", - " \"\",\n", - " xytext=(box_detection.center.state_se2.point_2d.x, box_detection.center.state_se2.point_2d.y),\n", - " xy=(endpoint_x.x, endpoint_x.y),\n", - " arrowprops=dict(arrowstyle=\"->\"),\n", - " )\n", - " ax.annotate(\n", - " \"\",\n", - " xytext=(box_detection.center.state_se2.point_2d.x, box_detection.center.state_se2.point_2d.y),\n", - " xy=(endpoint_y.x, endpoint_y.y),\n", - " arrowprops=dict(arrowstyle=\"->\"),\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/scene_sensor_loading.ipynb b/notebooks/scene_sensor_loading.ipynb deleted file mode 100644 index 49d7e34e..00000000 --- a/notebooks/scene_sensor_loading.ipynb +++ /dev/null @@ -1,123 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "from py123d.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from py123d.dataset.scene.scene_filter import SceneFilter\n", - "\n", - "from py123d.common.multithreading.worker_sequential import Sequential\n", - "# from py123d.common.multithreading.worker_ray impo\n", - "# rt RayDistribute\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "import os, psutil\n", - "\n", - "\n", - "def print_memory_usage():\n", - " process = psutil.Process(os.getpid())\n", - " memory_info = process.memory_info()\n", - " print(f\"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB\")\n", - "\n", - "\n", - "split = \"nuplan_private_test\"\n", - "\n", - "log_names = [\"2021.07.01.21.22.09_veh-14_00016_00656\"]\n", - "scene_uuids = None\n", - "log_names = None\n", - "\n", - "scene_filter = SceneFilter(\n", - " split_names=[split],\n", - " log_names=log_names,\n", - " scene_uuids=scene_uuids,\n", - " duration_s=15.1,\n", - " history_s=1.0,\n", - " timestamp_threshold_s=15.0,\n", - ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(\"num scenes found\", len(scenes))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.common.datatypes.sensor.camera import CameraType\n", - "from py123d.dataset.scene.arrow_scene import ArrowScene\n", - "\n", - "scene: ArrowScene = scenes[12]\n", - "scene.open()\n", - "\n", - "camera = scene.get_camera_at_iteration(150, CameraType.CAM_B0)\n", - "\n", - "plt.imshow(camera.image)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tools/merge_videos.ipynb b/notebooks/tools/merge_videos.ipynb deleted file mode 100644 index 92629b3a..00000000 --- a/notebooks/tools/merge_videos.ipynb +++ /dev/null @@ -1,137 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "import subprocess\n", - "import os\n", - "import tempfile" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "\n", - "\n", - "def merge_mp4_with_ffmpeg(input_files, output_file):\n", - " \"\"\"\n", - " Merge MP4 files using FFmpeg directly.\n", - " Requires FFmpeg to be installed on the system.\n", - " \n", - " Args:\n", - " input_files (list): List of paths to input MP4 files\n", - " output_file (str): Path for the output merged MP4 file\n", - " \"\"\"\n", - " try:\n", - " # Create a temporary file list for FFmpeg\n", - " with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f:\n", - " for file in input_files:\n", - " if os.path.exists(file):\n", - " # Escape single quotes and write to file list\n", - " escaped_file = file.replace(\"'\", \"'\\\"'\\\"'\")\n", - " f.write(f\"file '{escaped_file}'\\n\")\n", - " else:\n", - " print(f\"Warning: File not found: {file}\")\n", - " \n", - " filelist_path = f.name\n", - " \n", - " # FFmpeg command to concatenate videos\n", - " cmd = [\n", - " 'ffmpeg',\n", - " '-f', 'concat',\n", - " '-safe', '0',\n", - " '-i', filelist_path,\n", - " '-c', 'copy', # Copy streams without re-encoding (faster)\n", - " '-y', # Overwrite output file if it exists\n", - " output_file\n", - " ]\n", - " \n", - " print(\"Merging videos with FFmpeg...\")\n", - " print(f\"Command: {' '.join(cmd)}\")\n", - " \n", - " # Run FFmpeg\n", - " result = subprocess.run(cmd, capture_output=True, text=True)\n", - " \n", - " # Clean up temporary file\n", - " os.unlink(filelist_path)\n", - " \n", - " if result.returncode == 0:\n", - " print(f\"Successfully merged videos into {output_file}\")\n", - " return True\n", - " else:\n", - " print(f\"FFmpeg error: {result.stderr}\")\n", - " return False\n", - " \n", - " except FileNotFoundError:\n", - " print(\"Error: FFmpeg not found. Please install FFmpeg first.\")\n", - " print(\"Download from: https://ffmpeg.org/download.html\")\n", - " return False\n", - " except Exception as e:\n", - " print(f\"Error during merging: {str(e)}\")\n", - " return False\n", - "\n", - "def check_ffmpeg():\n", - " \"\"\"Check if FFmpeg is available on the system.\"\"\"\n", - " try:\n", - " result = subprocess.run(['ffmpeg', '-version'], capture_output=True)\n", - " return result.returncode == 0\n", - " except FileNotFoundError:\n", - " return False\n", - "\n", - "# List your MP4 files in the order you want them merged\n", - "video_folder = Path(\"/home/daniel/py123d_logs_videos/wopd_train\")\n", - "video_files = [str(file) for file in video_folder.glob(\"*.mp4\") if file.is_file()]\n", - "\n", - "\n", - "output_filename = str(video_folder / \"merged_video.mp4\")\n", - "\n", - "# Merge the videos\n", - "success = merge_mp4_with_ffmpeg(video_files, output_filename)\n", - "\n", - "if success:\n", - " print(\"Video merging completed successfully!\")\n", - "else:\n", - " print(\"Video merging failed!\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/tools/plot_map_sizes.ipynb b/notebooks/tools/plot_map_sizes.ipynb deleted file mode 100644 index c37c0581..00000000 --- a/notebooks/tools/plot_map_sizes.ipynb +++ /dev/null @@ -1,67 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "\n", - "import matplotlib.pyplot as plt\n", - "\n", - "folder_path = \"/home/daniel/py123d_workspace/data/maps\" # Replace with your folder path\n", - "\n", - "# Get all files in the folder (not subfolders)\n", - "files = sorted([f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))])\n", - "sizes = [os.path.getsize(os.path.join(folder_path, f)) / 1024**2 for f in files]\n", - "\n", - "plt.figure(figsize=(10, 6))\n", - "plt.bar(files, sizes)\n", - "plt.ylabel(\"File Size (MB)\")\n", - "plt.title(\"Map Sizes (CARLA/nuPlan)\")\n", - "plt.xticks(rotation=45, ha=\"right\")\n", - "plt.tight_layout()\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/viz/bev_matplotlib_prediction.ipynb b/notebooks/viz/bev_matplotlib_prediction.ipynb deleted file mode 100644 index 3dd4b920..00000000 --- a/notebooks/viz/bev_matplotlib_prediction.ipynb +++ /dev/null @@ -1,246 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from py123d.dataset.scene.scene_filter import SceneFilter\n", - "\n", - "from py123d.common.multithreading.worker_sequential import Sequential\n", - "from py123d.common.datatypes.sensor.camera import CameraType" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "# split = \"nuplan_private_test\"\n", - "# log_names = [\"2021.09.29.17.35.58_veh-44_00066_00432\"]\n", - "\n", - "\n", - "# splits = [\"wopd_train\"]\n", - "# splits = [\"carla\"]\n", - "# splits = [\"nuplan_private_test\"]\n", - "splits = [\"av2-sensor-mini_train\"]\n", - "# log_names = None\n", - "\n", - "\n", - "log_names = None\n", - "scene_uuids = None\n", - "\n", - "scene_filter = SceneFilter(\n", - " split_names=splits,\n", - " log_names=log_names,\n", - " scene_uuids=scene_uuids,\n", - " duration_s=8.0,\n", - " history_s=0.0,\n", - " timestamp_threshold_s=4.0,\n", - " shuffle=True,\n", - " # camera_types=[CameraType.CAM_F0],\n", - ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(f\"Found {len(scenes)} scenes\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from typing import List, Optional, Tuple\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "from py123d.geometry.base import Point2D\n", - "from py123d.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", - "from py123d.visualization.color.config import PlotConfig\n", - "from py123d.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", - "from py123d.visualization.matplotlib.observation import (\n", - " add_box_detections_to_ax,\n", - " add_box_future_detections_to_ax,\n", - " add_default_map_on_ax,\n", - " add_ego_vehicle_to_ax,\n", - " add_traffic_lights_to_ax,\n", - ")\n", - "from py123d.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", - "from py123d.dataset.maps.abstract_map import AbstractMap\n", - "from py123d.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", - "from py123d.dataset.maps.map_datatypes import MapLayer\n", - "from py123d.dataset.scene.abstract_scene import AbstractScene\n", - "\n", - "\n", - "\n", - "def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes:\n", - "\n", - " ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)\n", - " box_detections = scene.get_box_detections_at_iteration(iteration)\n", - "\n", - " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", - " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radiuss, route_lane_group_ids=None)\n", - " add_default_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", - " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", - "\n", - " add_box_future_detections_to_ax(ax, scene, iteration=iteration)\n", - " add_box_detections_to_ax(ax, box_detections)\n", - " add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n", - "\n", - " zoom = 1.0\n", - " ax.set_xlim(point_2d.x - radius * zoom, point_2d.x + radius * zoom)\n", - " ax.set_ylim(point_2d.y - radius * zoom, point_2d.y + radius * zoom)\n", - "\n", - " ax.set_aspect(\"equal\", adjustable=\"box\")\n", - " return ax\n", - "\n", - "\n", - "def plot_scene_at_iteration(\n", - " scene: AbstractScene, iteration: int = 0, radius: float = 80\n", - ") -> Tuple[plt.Figure, plt.Axes]:\n", - "\n", - " size = 10\n", - "\n", - " fig, ax = plt.subplots(figsize=(size, size))\n", - " _plot_scene_on_ax(ax, scene, iteration, radius)\n", - " return fig, ax\n", - "\n", - "\n", - "scene_index = 0\n", - "\n", - "for i, scene in enumerate(scenes):\n", - "\n", - " iteration = 0\n", - " fig, ax = plot_scene_at_iteration(scenes[i], iteration=iteration, radius=60)\n", - " Path(f\"/home/daniel/examples/{splits[0]}/\").mkdir(exist_ok=True, parents=True)\n", - " ax.set_xticks([])\n", - " ax.set_yticks([])\n", - " fig.tight_layout()\n", - " fig.savefig(\n", - " f\"/home/daniel/examples/{splits[0]}/{scene.log_name}_{i}.pdf\",\n", - " dpi=300,\n", - " bbox_inches=\"tight\",\n", - " )\n", - "\n", - "# camera = scenes[scene_index].get_camera_at_iteration(\n", - "# iteration=iteration, camera_type=CameraType.CAM_F0\n", - "# )\n", - "\n", - "# plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n", - "# # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n", - "\n", - "# scenes[scene_index].log_name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "scene = scenes[scene_index]\n", - "\n", - "\n", - "scene.get_camera_at_iteration(camera_type=CameraType.CAM_F0, iteration=0)\n", - "\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", - "from py123d.dataset.maps.gpkg.gpkg_map import GPKGMap\n", - "\n", - "\n", - "map_api: GPKGMap = scenes[scene_index].map_api\n", - "\n", - "drivable_polygons = map_api._gpd_dataframes[MapLayer.LANE]\n", - "\n", - "\n", - "\n", - "linear_rings = get_road_edge_linear_rings(drivable_polygons.geometry.tolist())\n", - "rings_lengths = [ring.length for ring in linear_rings]\n", - "max_length_idx = np.argmax(rings_lengths)\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "size = 16\n", - "fig, ax = plt.subplots(figsize=(size, size))\n", - "\n", - "for idx, ring in enumerate(linear_rings):\n", - " if idx == max_length_idx:\n", - " ax.plot(*ring.xy, color=\"black\", linewidth=2, label=\"Longest Road Edge\")\n", - " else:\n", - " ax.plot(*ring.xy)\n", - "\n", - "\n", - "ax.set_aspect(\"equal\", adjustable=\"box\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/viz/camera_matplotlib.ipynb b/notebooks/viz/camera_matplotlib.ipynb deleted file mode 100644 index 97b38a12..00000000 --- a/notebooks/viz/camera_matplotlib.ipynb +++ /dev/null @@ -1,397 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Tuple\n", - "\n", - "import matplotlib.pyplot as plt\n", - "\n", - "from py123d.common.multithreading.worker_sequential import Sequential\n", - "\n", - "from py123d.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from py123d.dataset.scene.scene_filter import SceneFilter\n", - "from py123d.dataset.scene.abstract_scene import AbstractScene\n", - "\n", - "from typing import Dict\n", - "from py123d.common.datatypes.sensor.camera import CameraType\n", - "from py123d.visualization.matplotlib.camera import add_camera_ax\n", - "from py123d.visualization.matplotlib.camera import add_box_detections_to_camera_ax" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "# split = \"nuplan_private_test\"\n", - "# log_names = [\"2021.09.29.17.35.58_veh-44_00066_00432\"]\n", - "\n", - "\n", - "\n", - "\n", - "# splits = [\"carla\"]\n", - "# splits = [\"nuplan_private_test\"]\n", - "# splits = [\"wopd_train\"]\n", - "splits = [\"av2-sensor-mini_train\"]\n", - "# log_names = None\n", - "\n", - "\n", - "\n", - "# splits = [\"nuplan_private_test\"]\n", - "log_names = None\n", - "\n", - "scene_uuids = None\n", - "\n", - "scene_filter = SceneFilter(\n", - " split_names=splits,\n", - " log_names=log_names,\n", - " scene_uuids=scene_uuids,\n", - " duration_s=15,\n", - " history_s=0.0,\n", - " timestamp_threshold_s=15,\n", - " shuffle=False,\n", - " camera_types=[CameraType.CAM_F0],\n", - ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(f\"Found {len(scenes)} scenes\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "\n", - "\n", - "def plot_cameras_frame(scene: AbstractScene, iteration: int) -> Tuple[plt.Figure, plt.Axes]:\n", - " \"\"\"\n", - " Plots 8x cameras and birds-eye-view visualization in 3x3 grid\n", - " :param scene: navsim scene dataclass\n", - " :param frame_idx: index of selected frame\n", - " :return: figure and ax object of matplotlib\n", - " \"\"\"\n", - " scale = 2\n", - " fig, ax = plt.subplots(2, 3, figsize=(scale * 11.5, scale * 4.3))\n", - "\n", - " camera_ax_assignment: Dict[CameraType, plt.Axes] = {\n", - " CameraType.CAM_L0: ax[0, 0],\n", - " CameraType.CAM_F0: ax[0, 1],\n", - " CameraType.CAM_R0: ax[0, 2],\n", - " CameraType.CAM_L2: ax[1, 0],\n", - " CameraType.CAM_B0: ax[1, 1],\n", - " CameraType.CAM_R2: ax[1, 2],\n", - " }\n", - "\n", - " for camera_type, camera_ax in camera_ax_assignment.items():\n", - " assert camera_type in scene.available_camera_types\n", - " camera = scene.get_camera_at_iteration(iteration, camera_type)\n", - " if camera is not None:\n", - " add_camera_ax(camera_ax, camera)\n", - "\n", - " fig.tight_layout()\n", - " fig.subplots_adjust(wspace=0.01, hspace=0.01, left=0.01, right=0.99, top=0.99, bottom=0.01)\n", - "\n", - " return fig, ax\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "def _add_camera_with_detections(ax: plt.Axes, scene: AbstractScene, iteration: int) -> plt.Axes:\n", - "\n", - " camera_ax_assignment: Dict[CameraType, plt.Axes] = {\n", - " CameraType.CAM_L0: ax[0, 0],\n", - " CameraType.CAM_F0: ax[0, 1],\n", - " CameraType.CAM_R0: ax[0, 2],\n", - " CameraType.CAM_L2: ax[1, 0],\n", - " CameraType.CAM_B0: ax[1, 1],\n", - " CameraType.CAM_R2: ax[1, 2],\n", - " }\n", - "\n", - " box_detections = scene.get_box_detections_at_iteration(iteration)\n", - " ego_state_se3 = scene.get_ego_state_at_iteration(iteration)\n", - " for camera_type, camera_ax in camera_ax_assignment.items():\n", - " if camera_type not in scene.available_camera_types:\n", - " continue\n", - " # assert camera_type in scene.available_camera_types\n", - " camera = scene.get_camera_at_iteration(iteration, camera_type)\n", - " if camera is not None:\n", - " add_box_detections_to_camera_ax(camera_ax, camera, box_detections, ego_state_se3)\n", - "\n", - " return ax\n", - "\n", - "\n", - "def plot_cameras_with_detections(scene: AbstractScene, iteration: int) -> Tuple[plt.Figure, plt.Axes]:\n", - " \"\"\"\n", - " Plots 8x cameras and birds-eye-view visualization in 3x3 grid\n", - " :param scene: navsim scene dataclass\n", - " :param frame_idx: index of selected frame\n", - " :return: figure and ax object of matplotlib\n", - " \"\"\"\n", - " scale = 1\n", - " fig, ax = plt.subplots(2, 3, figsize=(scale * 11.5, scale * 4.3))\n", - " _add_camera_with_detections(ax, scene, iteration)\n", - "\n", - " fig.tight_layout()\n", - " fig.subplots_adjust(wspace=0.01, hspace=0.01, left=0.01, right=0.99, top=0.99, bottom=0.01)\n", - "\n", - "\n", - "plot_cameras_with_detections(scenes[9], iteration=20)\n", - "\n", - "\n", - "# def _add_camera_with_detections(ax: plt.Axes, scene: AbstractScene, iteration: int) -> plt.Axes:\n", - "\n", - "# camera_ax_assignment: Dict[CameraType, plt.Axes] = {CameraType.CAM_F0: ax}\n", - "\n", - "# box_detections = scene.get_box_detections_at_iteration(iteration)\n", - "# ego_state_se3 = scene.get_ego_state_at_iteration(iteration)\n", - "# for camera_type, camera_ax in camera_ax_assignment.items():\n", - "# assert camera_type in scene.available_camera_types\n", - "# camera = scene.get_camera_at_iteration(iteration, camera_type)\n", - "# if camera is not None:\n", - "# add_box_detections_to_camera_ax(camera_ax, camera, box_detections, ego_state_se3)\n", - "\n", - "# return ax\n", - "\n", - "\n", - "# def plot_cameras_with_detections(scene: AbstractScene, iteration: int) -> Tuple[plt.Figure, plt.Axes]:\n", - "# \"\"\"\n", - "# Plots 8x cameras and birds-eye-view visualization in 3x3 grid\n", - "# :param scene: navsim scene dataclass\n", - "# :param frame_idx: index of selected frame\n", - "# :return: figure and ax object of matplotlib\n", - "# \"\"\"\n", - "# scale = 2\n", - "# fig, ax = plt.subplots(1, 1, figsize=(scale * 11.5, scale * 4.3))\n", - "# _add_camera_with_detections(ax, scene, iteration)\n", - "\n", - "# fig.tight_layout()\n", - "# fig.subplots_adjust(wspace=0.01, hspace=0.01, left=0.01, right=0.99, top=0.99, bottom=0.01)\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "# def _add_camera_with_detections(ax: plt.Axes, scene: AbstractScene, iteration: int) -> plt.Axes:\n", - "\n", - "# camera_ax_assignment: Dict[CameraType, plt.Axes] = {\n", - "# CameraType.CAM_L1: ax[0],\n", - "# CameraType.CAM_L0: ax[1],\n", - "# CameraType.F0: ax[2],\n", - "# CameraType.CAM_R0: ax[3],\n", - "# CameraType.CAM_R1: ax[4],\n", - "# }\n", - "\n", - "# box_detections = scene.get_box_detections_at_iteration(iteration)\n", - "# ego_state_se3 = scene.get_ego_state_at_iteration(iteration)\n", - "# for camera_type, camera_ax in camera_ax_assignment.items():\n", - "# assert camera_type in scene.available_camera_types\n", - "# camera = scene.get_camera_at_iteration(iteration, camera_type)\n", - "# if camera is not None:\n", - "# add_box_detections_to_camera_ax(camera_ax, camera, box_detections, ego_state_se3)\n", - "\n", - "# return ax\n", - "\n", - "\n", - "# def plot_cameras_with_detections(scene: AbstractScene, iteration: int) -> Tuple[plt.Figure, plt.Axes]:\n", - "# \"\"\"\n", - "# Plots 8x cameras and birds-eye-view visualization in 3x3 grid\n", - "# :param scene: navsim scene dataclass\n", - "# :param frame_idx: index of selected frame\n", - "# :return: figure and ax object of matplotlib\n", - "# \"\"\"\n", - "# scale = 2\n", - "# fig, ax = plt.subplots(1, 5, figsize=(scale * 11.5, scale * 4.3))\n", - "# _add_camera_with_detections(ax, scene, iteration)\n", - "\n", - "# fig.tight_layout()\n", - "# fig.subplots_adjust(wspace=0.01, hspace=0.01, left=0.01, right=0.99, top=0.99, bottom=0.01)\n", - "\n", - "# plot_cameras_with_detections(scenes[0], iteration=0)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "def _add_camera_with_detections(ax: plt.Axes, scene: AbstractScene, iteration: int) -> plt.Axes:\n", - "\n", - " camera_ax_assignment: Dict[CameraType, plt.Axes] = {\n", - " CameraType.CAM_L0: ax[0, 0],\n", - " CameraType.CAM_F0: ax[0, 1],\n", - " CameraType.CAM_R0: ax[0, 2],\n", - " CameraType.CAM_L1: ax[1, 0],\n", - " # CameraType.CAM_B0: ax[1, 1],\n", - " CameraType.CAM_R1: ax[1, 2],\n", - " CameraType.CAM_L2: ax[2, 0],\n", - " CameraType.CAM_R2: ax[2, 2],\n", - " }\n", - "\n", - " box_detections = scene.get_box_detections_at_iteration(iteration)\n", - " ego_state_se3 = scene.get_ego_state_at_iteration(iteration)\n", - " for camera_type, camera_ax in camera_ax_assignment.items():\n", - " if camera_type not in scene.available_camera_types:\n", - " continue\n", - " # assert camera_type in scene.available_camera_types\n", - " camera = scene.get_camera_at_iteration(iteration, camera_type)\n", - " if camera is not None:\n", - " add_box_detections_to_camera_ax(camera_ax, camera, box_detections, ego_state_se3)\n", - "\n", - " return ax\n", - "\n", - "\n", - "def plot_cameras_with_detections(scene: AbstractScene, iteration: int) -> Tuple[plt.Figure, plt.Axes]:\n", - " \"\"\"\n", - " Plots 8x cameras and birds-eye-view visualization in 3x3 grid\n", - " :param scene: navsim scene dataclass\n", - " :param frame_idx: index of selected frame\n", - " :return: figure and ax object of matplotlib\n", - " \"\"\"\n", - " scale = 1\n", - " fig, ax = plt.subplots(3, 3, figsize=(scale * 10, scale * 10))\n", - " _add_camera_with_detections(ax, scene, iteration)\n", - "\n", - " fig.tight_layout()\n", - " fig.subplots_adjust(wspace=0.01, hspace=0.01, left=0.01, right=0.99, top=0.99, bottom=0.01)\n", - "\n", - "\n", - "plot_cameras_with_detections(scenes[9], iteration=20)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "scenes[3].log_name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from typing import Optional, Union\n", - "\n", - "from matplotlib import animation\n", - "from tqdm import tqdm\n", - "\n", - "\n", - "def render_scene_animation(\n", - " scene: AbstractScene,\n", - " output_path: Union[str, Path],\n", - " start_idx: int = 0,\n", - " end_idx: Optional[int] = None,\n", - " step: int = 10,\n", - " fps: float = 20.0,\n", - " dpi: int = 300,\n", - " format: str = \"mp4\",\n", - ") -> None:\n", - " assert format in [\"mp4\", \"gif\"], \"Format must be either 'mp4' or 'gif'.\"\n", - " output_path = Path(output_path)\n", - " output_path.mkdir(parents=True, exist_ok=True)\n", - "\n", - " scene.open()\n", - "\n", - " if end_idx is None:\n", - " end_idx = scene.number_of_iterations\n", - " end_idx = min(end_idx, scene.number_of_iterations)\n", - "\n", - " scale = 1\n", - " fig, ax = plt.subplots(1, 1, figsize=(scale * 16, scale * 9))\n", - "\n", - " def update(i):\n", - " ax.clear()\n", - " # for a in ax.flatten():\n", - " # a.clear()\n", - " _add_camera_with_detections(ax, scene, i)\n", - " plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)\n", - " pbar.update(1)\n", - "\n", - " frames = list(range(start_idx, end_idx, step))\n", - " pbar = tqdm(total=len(frames), desc=f\"Rendering {scene.log_name} as {format}\")\n", - " ani = animation.FuncAnimation(fig, update, frames=frames, repeat=False)\n", - "\n", - " ani.save(output_path / f\"{scene.log_name}_{scene.token}.{format}\", writer=\"ffmpeg\", fps=fps, dpi=dpi)\n", - " plt.close(fig)\n", - " scene.close()\n", - "\n", - "\n", - "# for i in range(10):\n", - " # render_scene_animation(scenes[i], output_path=\"output\", start_idx=0, end_idx=None, step=1, fps=20, dpi=300, format=\"mp4\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/viz/log_rendering.ipynb b/notebooks/viz/log_rendering.ipynb deleted file mode 100644 index 4048c20a..00000000 --- a/notebooks/viz/log_rendering.ipynb +++ /dev/null @@ -1,105 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from py123d.dataset.scene.arrow_scene import ArrowScene\n", - "from py123d.visualization.matplotlib.plots import plot_scene_at_iteration\n", - "\n", - "\n", - "\n", - "log_name = \"1005081002024129653_5313_150_5333_150\"\n", - "log_file = Path(f\"/home/daniel/py123d_workspace/data/wopd_train/{log_name}.arrow\")\n", - "scene = ArrowScene(log_file)\n", - "\n", - "fig, ax = plot_scene_at_iteration(scene, iteration=10)\n", - "\n", - "ax.get_xlim()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "import traceback\n", - "from py123d.visualization.matplotlib.plots import render_scene_animation\n", - "\n", - "output_path = Path(\"/home/daniel/py123d_logs_videos\")\n", - "# render_scene_as_mp4(scene, output_path, fps=30, end_idx=10000, step=5, dpi=100)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# Create an mp4 animation with a specific FPS\n", - "import traceback\n", - "from py123d.visualization.matplotlib.plots import render_scene_animation\n", - "\n", - "split = \"av2-sensor-mini_train\"\n", - "output_path = Path(f\"/home/daniel/py123d_logs_videos/{split}\")\n", - "log_path = Path(f\"/home/daniel/py123d_workspace/data/{split}\")\n", - "for log_file in log_path.iterdir():\n", - " try:\n", - " scene = ArrowScene(log_file)\n", - " render_scene_animation(scene, output_path, fps=20, end_idx=None, step=1)\n", - " del scene\n", - " except Exception as e:\n", - " traceback.print_exc()\n", - " erroneous_file = output_path / f\"{log_name}.mp4\"\n", - " if erroneous_file.exists():\n", - " erroneous_file.unlink()\n", - " # break\n", - " # break" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d_dev", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/viz/video_example.ipynb b/notebooks/viz/video_example.ipynb deleted file mode 100644 index 28239dfe..00000000 --- a/notebooks/viz/video_example.ipynb +++ /dev/null @@ -1,319 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.dataset.scene.scene_builder import ArrowSceneBuilder\n", - "from py123d.dataset.scene.scene_filter import SceneFilter\n", - "\n", - "from py123d.common.multithreading.worker_sequential import Sequential\n", - "from py123d.common.datatypes.sensor.camera import CameraType" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.geometry import Point2D\n", - "import numpy as np\n", - "\n", - "import torch\n", - "\n", - "from py123d.geometry.polyline import Polyline2D" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# split = \"nuplan_private_test\"\n", - "# log_names = [\"2021.09.29.17.35.58_veh-44_00066_00432\"]\n", - "\n", - "\n", - "# splits = [\"wopd_train\"]\n", - "# splits = [\"carla\"]\n", - "splits = [\"nuplan_private_test\"]\n", - "# splits = [\"av2-sensor-mini_train\"]\n", - "# log_names = None\n", - "\n", - "\n", - "log_names = None\n", - "scene_uuids = None\n", - "\n", - "scene_filter = SceneFilter(\n", - " split_names=splits,\n", - " log_names=log_names,\n", - " scene_uuids=scene_uuids,\n", - " duration_s=20,\n", - " history_s=0.0,\n", - " timestamp_threshold_s=20,\n", - " shuffle=True,\n", - " camera_types=[CameraType.CAM_F0],\n", - ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(f\"Found {len(scenes)} scenes\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import List, Optional, Tuple\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "from py123d.visualization.matplotlib.camera import add_camera_ax\n", - "from py123d.geometry import Point2D\n", - "from py123d.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n", - "from py123d.visualization.color.config import PlotConfig\n", - "from py123d.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n", - "from py123d.visualization.matplotlib.observation import (\n", - " add_box_detections_to_ax,\n", - " add_default_map_on_ax,\n", - " add_ego_vehicle_to_ax,\n", - " add_traffic_lights_to_ax,\n", - ")\n", - "from py123d.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n", - "from py123d.dataset.maps.abstract_map import AbstractMap\n", - "from py123d.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n", - "from py123d.dataset.maps.gpkg.gpkg_map_objects import GPKGIntersection\n", - "from py123d.dataset.maps.map_datatypes import MapLayer\n", - "from py123d.dataset.scene.abstract_scene import AbstractScene\n", - "\n", - "\n", - "import shapely.geometry as geom\n", - "\n", - "\n", - "def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes:\n", - "\n", - " ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)\n", - " box_detections = scene.get_box_detections_at_iteration(iteration)\n", - "\n", - " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", - " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", - " add_default_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", - " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", - "\n", - " add_box_detections_to_ax(ax, box_detections)\n", - " add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n", - "\n", - " zoom = 1.0\n", - " ax.set_xlim(point_2d.x - radius * zoom, point_2d.x + radius * zoom)\n", - " ax.set_ylim(point_2d.y - radius * zoom, point_2d.y + radius * zoom)\n", - "\n", - " ax.set_aspect(\"equal\", adjustable=\"box\")\n", - " return ax\n", - "\n", - "\n", - "def plot_scene_at_iteration(\n", - " scene: AbstractScene, iteration: int = 0, radius: float = 80\n", - ") -> Tuple[plt.Figure, plt.Axes]:\n", - "\n", - " fig, ax = plt.subplots(1, 2, figsize=(18, 5))\n", - "\n", - " camera = scene.get_camera_at_iteration(iteration=iteration, camera_type=CameraType.CAM_F0)\n", - " add_camera_ax(ax[0], camera)\n", - "\n", - " _plot_scene_on_ax(ax[1], scene, iteration, radius)\n", - " for ax_ in ax:\n", - " ax_.set_xticks([])\n", - " ax_.set_yticks([])\n", - " fig.subplots_adjust(wspace=-0.5) # Make the border between axes super small\n", - " fig.tight_layout()\n", - " return fig, ax\n", - "\n", - "\n", - "scene_index = 6\n", - "\n", - "iteration = 99\n", - "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=35)\n", - "plt.show()\n", - "\n", - "camera = scenes[scene_index].get_camera_at_iteration(\n", - " iteration=iteration, camera_type=CameraType.CAM_F0\n", - ")\n", - "\n", - "plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n", - "# # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n", - "\n", - "scenes[scene_index].log_name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "from typing import Union\n", - "\n", - "from matplotlib import animation\n", - "from tqdm import tqdm\n", - "\n", - "\n", - "def render_scene_animation(\n", - " scene: AbstractScene,\n", - " output_path: Union[str, Path],\n", - " start_idx: int = 0,\n", - " end_idx: Optional[int] = None,\n", - " step: int = 5,\n", - " fps: float = 2.0,\n", - " dpi: int = 300,\n", - " format: str = \"gif\",\n", - " radius: float = 35,\n", - ") -> None:\n", - " assert format in [\"mp4\", \"gif\"], \"Format must be either 'mp4' or 'gif'.\"\n", - " output_path = Path(output_path)\n", - " output_path.mkdir(parents=True, exist_ok=True)\n", - "\n", - " scene.open()\n", - "\n", - " if end_idx is None:\n", - " end_idx = scene.number_of_iterations\n", - " end_idx = min(end_idx, scene.number_of_iterations)\n", - " fig, ax = plt.subplots(1, 2, figsize=(18, 5))\n", - " gs = fig.add_gridspec(1, 2, width_ratios=[6, 1])\n", - " ax[0].set_position(gs[0].get_position(fig))\n", - " ax[1].set_position(gs[1].get_position(fig))\n", - "\n", - " def update(i):\n", - " ax[0].clear()\n", - " ax[1].clear()\n", - " for ax_ in ax:\n", - " ax_.set_xticks([])\n", - " ax_.set_yticks([])\n", - " _plot_scene_on_ax(ax[1], scene, i, radius)\n", - " camera = scene.get_camera_at_iteration(iteration=i, camera_type=CameraType.CAM_F0)\n", - " add_camera_ax(ax[0], camera)\n", - " fig.subplots_adjust(wspace=-0.33, hspace=0.0)\n", - " plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01) # Remove all margins\n", - " pbar.update(1)\n", - "\n", - " frames = list(range(start_idx, end_idx, step))\n", - " pbar = tqdm(total=len(frames), desc=f\"Rendering {scene.log_name} as {format}\")\n", - " ani = animation.FuncAnimation(fig, update, frames=frames, repeat=False)\n", - "\n", - " ani.save(output_path / f\"{scene.log_name}_{scene.token}.{format}\", writer=\"ffmpeg\", fps=fps, dpi=dpi)\n", - " plt.close(fig)\n", - " scene.close()\n", - "\n", - "\n", - "render_scene_animation(scenes[scene_index], output_path=\"/home/daniel/scene_renders\", format=\"gif\", dpi=100, end_idx=200)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", - "from py123d.dataset.maps.gpkg.gpkg_map import GPKGMap\n", - "\n", - "\n", - "map_api: GPKGMap = scenes[scene_index].map_api\n", - "\n", - "drivable_polygons = map_api._gpd_dataframes[MapLayer.LANE]\n", - "\n", - "\n", - "\n", - "linear_rings = get_road_edge_linear_rings(drivable_polygons.geometry.tolist())\n", - "rings_lengths = [ring.length for ring in linear_rings]\n", - "max_length_idx = np.argmax(rings_lengths)\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "size = 16\n", - "fig, ax = plt.subplots(figsize=(size, size))\n", - "\n", - "for idx, ring in enumerate(linear_rings):\n", - " if idx == max_length_idx:\n", - " ax.plot(*ring.xy, color=\"black\", linewidth=2, label=\"Longest Road Edge\")\n", - " else:\n", - " ax.plot(*ring.xy)\n", - "\n", - "\n", - "ax.set_aspect(\"equal\", adjustable=\"box\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/viz/viser_testing_v2_scene.ipynb b/notebooks/viz/viser_testing_v2_scene.ipynb deleted file mode 100644 index 4cf601fc..00000000 --- a/notebooks/viz/viser_testing_v2_scene.ipynb +++ /dev/null @@ -1,94 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", - "from py123d.datatypes.scene.scene_filter import SceneFilter\n", - "\n", - "from py123d.common.multithreading.worker_sequential import Sequential\n", - "from py123d.datatypes.sensors.camera import CameraType" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "splits = [\"nuplan_private_test\"]\n", - "# splits = [\"carla\"]\n", - "# splits = [\"wopd_train\"]\n", - "# splits = [\"av2-sensor-mini_train\"]\n", - "log_names = None\n", - "\n", - "scene_uuids = None\n", - "\n", - "scene_filter = SceneFilter(\n", - " split_names=splits,\n", - " log_names=log_names,\n", - " scene_uuids=scene_uuids,\n", - " duration_s=10,\n", - " history_s=0.0,\n", - " timestamp_threshold_s=10,\n", - " shuffle=True,\n", - " camera_types=[CameraType.CAM_F0],\n", - ")\n", - "scene_builder = ArrowSceneBuilder(\"/home/daniel/py123d_workspace/data\")\n", - "worker = Sequential()\n", - "# worker = RayDistributed()\n", - "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(f\"Found {len(scenes)} scenes\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.visualization.viser.server import ViserVisualizationServer\n", - "\n", - "\n", - "visualization_server = ViserVisualizationServer(scenes, scene_index=0)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/waymo_perception/lidar_testing.ipynb b/notebooks/waymo_perception/lidar_testing.ipynb deleted file mode 100644 index 82a5ac27..00000000 --- a/notebooks/waymo_perception/lidar_testing.ipynb +++ /dev/null @@ -1,301 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from waymo_open_dataset import dataset_pb2\n", - "\n", - "import json\n", - "import os\n", - "\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "from PIL import Image\n", - "from tqdm import tqdm\n", - "from waymo_open_dataset import label_pb2\n", - "from waymo_open_dataset.protos import camera_segmentation_pb2 as cs_pb2\n", - "from waymo_open_dataset.utils import box_utils\n", - "\n", - "\n", - "import matplotlib.pyplot as plt\n", - "\n", - "\n", - "import os\n", - "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "\n", - "\n", - "WOPD_DATA_ROOT = Path(\"/media/nvme1/waymo_perception/training\")\n", - "\n", - "\n", - "tfrecords_file_list = list(WOPD_DATA_ROOT.glob(\"*.tfrecord\"))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "import io\n", - "from pyquaternion import Quaternion\n", - "\n", - "from py123d.geometry import StateSE3\n", - "from py123d.geometry.bounding_box import BoundingBoxSE3\n", - "\n", - "from waymo_open_dataset.utils import frame_utils\n", - "\n", - "\n", - "# Frame attributes:\n", - "# context: \n", - "# timestamp_micros: \n", - "# pose: \n", - "# images: List with 5 images\n", - "# lasers: \n", - "# Length: 5\n", - "# laser_labels: \n", - "# Length: 0\n", - "# projected_lidar_labels: \n", - "# Length: 0\n", - "# camera_labels: \n", - "# Length: 0\n", - "# no_label_zones: \n", - "# Length: 0\n", - "# map_features: \n", - "# Length: 0\n", - "# map_pose_offset: \n", - "\n", - "file_idx = 0\n", - "pathname = tfrecords_file_list[file_idx]\n", - "dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n", - "num_frames = sum(1 for _ in dataset)\n", - "\n", - "\n", - "def read_jpg_image(data: bytes) -> np.ndarray:\n", - " \"\"\"Read a JPEG image from bytes and return it as a numpy array.\"\"\"\n", - " image = Image.open(io.BytesIO(data))\n", - " return np.array(image)\n", - "\n", - "\n", - "ego_state_se3s = []\n", - "front_images = []\n", - "dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n", - "\n", - "boxes = []\n", - "\n", - "for frame_idx, data in enumerate(dataset):\n", - "\n", - " frame = dataset_pb2.Frame()\n", - " frame.ParseFromString(data.numpy())\n", - " # print(frame.camera_labels)\n", - " print(\"Frame attributes:\")\n", - " for field in frame.DESCRIPTOR.fields:\n", - " field_name = field.name\n", - " if hasattr(frame, field_name):\n", - " value = getattr(frame, field_name)\n", - " if field_name != \"images\": # Don't print the whole image data\n", - " print(f\" {field_name}: {type(value)}\")\n", - " if hasattr(value, \"__len__\") and not isinstance(value, (str, bytes)):\n", - " print(f\" Length: {len(value)}\")\n", - " else:\n", - " print(f\" {field_name}: List with {len(value)} images\")\n", - "\n", - "\n", - " # # 1. pose\n", - " pose = np.array(frame.pose.transform).reshape(4, 4)\n", - " yaw_pitch_roll = Quaternion(matrix=pose[:3, :3]).yaw_pitch_roll\n", - " ego_state_se3s.append(\n", - " np.array(\n", - " [\n", - " pose[0, 3], # x\n", - " pose[1, 3], # y\n", - " pose[2, 3], # z\n", - " yaw_pitch_roll[2], # yaw\n", - " yaw_pitch_roll[1], # pitch\n", - " yaw_pitch_roll[0], # roll\n", - " ],\n", - " dtype=np.float64,\n", - " )\n", - " )\n", - "\n", - " # # plt.show()\n", - " if frame_idx == 0:\n", - " break\n", - "\n", - "ego_state_se3s = np.array(ego_state_se3s, dtype=np.float64)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Dict, List, Tuple\n", - "\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "\n", - "from waymo_open_dataset import dataset_pb2\n", - "from waymo_open_dataset.utils import range_image_utils\n", - "from waymo_open_dataset.utils import transform_utils\n", - "\n", - "RangeImages = Dict[\"dataset_pb2.LaserName.Name\", List[dataset_pb2.MatrixFloat]]\n", - "CameraProjections = Dict[\"dataset_pb2.LaserName.Name\", List[dataset_pb2.MatrixInt32]]\n", - "SegmentationLabels = Dict[\"dataset_pb2.LaserName.Name\", List[dataset_pb2.MatrixInt32]]\n", - "ParsedFrame = Tuple[RangeImages, CameraProjections, SegmentationLabels, dataset_pb2.MatrixFloat]\n", - "\n", - "\n", - "def parse_range_image_and_camera_projection(frame: dataset_pb2.Frame) -> ParsedFrame:\n", - " \"\"\"Parse range images and camera projections given a frame.\n", - "\n", - " Args:\n", - " frame: open dataset frame proto\n", - "\n", - " Returns:\n", - " range_images: A dict of {laser_name,\n", - " [range_image_first_return, range_image_second_return]}.\n", - " camera_projections: A dict of {laser_name,\n", - " [camera_projection_from_first_return,\n", - " camera_projection_from_second_return]}.\n", - " seg_labels: segmentation labels, a dict of {laser_name,\n", - " [seg_label_first_return, seg_label_second_return]}\n", - " range_image_top_pose: range image pixel pose for top lidar.\n", - " \"\"\"\n", - " range_images = {}\n", - " camera_projections = {}\n", - " seg_labels = {}\n", - " range_image_top_pose: dataset_pb2.MatrixFloat = dataset_pb2.MatrixFloat()\n", - " for laser in frame.lasers:\n", - " if len(laser.ri_return1.range_image_compressed) > 0: # pylint: disable=g-explicit-length-test\n", - " range_image_str_tensor = tf.io.decode_compressed(laser.ri_return1.range_image_compressed, \"ZLIB\")\n", - " ri = dataset_pb2.MatrixFloat()\n", - " ri.ParseFromString(range_image_str_tensor.numpy())\n", - " range_images[laser.name] = [ri]\n", - "\n", - " if laser.name == dataset_pb2.LaserName.TOP:\n", - " range_image_top_pose_str_tensor = tf.io.decode_compressed(\n", - " laser.ri_return1.range_image_pose_compressed, \"ZLIB\"\n", - " )\n", - " range_image_top_pose = dataset_pb2.MatrixFloat()\n", - " range_image_top_pose.ParseFromString(range_image_top_pose_str_tensor.numpy())\n", - "\n", - " camera_projection_str_tensor = tf.io.decode_compressed(\n", - " laser.ri_return1.camera_projection_compressed, \"ZLIB\"\n", - " )\n", - " cp = dataset_pb2.MatrixInt32()\n", - " cp.ParseFromString(camera_projection_str_tensor.numpy())\n", - " camera_projections[laser.name] = [cp]\n", - "\n", - " if len(laser.ri_return1.segmentation_label_compressed) > 0: # pylint: disable=g-explicit-length-test\n", - " seg_label_str_tensor = tf.io.decode_compressed(laser.ri_return1.segmentation_label_compressed, \"ZLIB\")\n", - " seg_label = dataset_pb2.MatrixInt32()\n", - " seg_label.ParseFromString(seg_label_str_tensor.numpy())\n", - " seg_labels[laser.name] = [seg_label]\n", - " if len(laser.ri_return2.range_image_compressed) > 0: # pylint: disable=g-explicit-length-test\n", - " range_image_str_tensor = tf.io.decode_compressed(laser.ri_return2.range_image_compressed, \"ZLIB\")\n", - " ri = dataset_pb2.MatrixFloat()\n", - " ri.ParseFromString(range_image_str_tensor.numpy())\n", - " range_images[laser.name].append(ri)\n", - "\n", - " camera_projection_str_tensor = tf.io.decode_compressed(\n", - " laser.ri_return2.camera_projection_compressed, \"ZLIB\"\n", - " )\n", - " cp = dataset_pb2.MatrixInt32()\n", - " cp.ParseFromString(camera_projection_str_tensor.numpy())\n", - " camera_projections[laser.name].append(cp)\n", - "\n", - " if len(laser.ri_return2.segmentation_label_compressed) > 0: # pylint: disable=g-explicit-length-test\n", - " seg_label_str_tensor = tf.io.decode_compressed(laser.ri_return2.segmentation_label_compressed, \"ZLIB\")\n", - " seg_label = dataset_pb2.MatrixInt32()\n", - " seg_label.ParseFromString(seg_label_str_tensor.numpy())\n", - " seg_labels[laser.name].append(seg_label)\n", - " return range_images, camera_projections, seg_labels, range_image_top_pose\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "from waymo_open_dataset.utils import frame_utils\n", - "\n", - "dataset = tf.data.TFRecordDataset(tfrecords_file_list[file_idx], compression_type=\"\")\n", - "for data in dataset:\n", - " frame = dataset_pb2.Frame()\n", - " frame.ParseFromString(data.numpy()) # No need for bytearray conversion\n", - " break\n", - "\n", - "(range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame)\n", - "points, cp_points = frame_utils.convert_range_image_to_point_cloud(\n", - " frame=frame,\n", - " range_images=range_images,\n", - " camera_projections=camera_projections,\n", - " range_image_top_pose=range_image_top_pose,\n", - " keep_polar_features=True,\n", - ")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "pc = points[0]\n", - "\n", - "\n", - "plt.scatter(pc[:, 3], pc[:, 4], s=0.1, c=pc[:, 5], cmap='viridis')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "cp_points[0].shape, points[0].shape" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb deleted file mode 100644 index 9930025d..00000000 --- a/notebooks/waymo_perception/map_testing.ipynb +++ /dev/null @@ -1,653 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from waymo_open_dataset import dataset_pb2\n", - "\n", - "import json\n", - "import os\n", - "\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "from PIL import Image\n", - "from tqdm import tqdm\n", - "from waymo_open_dataset import label_pb2\n", - "from waymo_open_dataset.protos import camera_segmentation_pb2 as cs_pb2\n", - "from waymo_open_dataset.utils import box_utils\n", - "\n", - "\n", - "import matplotlib.pyplot as plt\n", - "\n", - "\n", - "import os\n", - "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "\n", - "\n", - "WOPD_DATA_ROOT = Path(\"/media/nvme1/waymo_perception/training\")\n", - "\n", - "\n", - "tfrecords_file_list = list(WOPD_DATA_ROOT.glob(\"*.tfrecord\"))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "import io\n", - "from pyquaternion import Quaternion\n", - "\n", - "from py123d.geometry import StateSE3\n", - "from py123d.geometry.bounding_box import BoundingBoxSE3\n", - "\n", - "from waymo_open_dataset.utils import frame_utils\n", - "\n", - "\n", - "# Frame attributes:\n", - "# context: \n", - "# timestamp_micros: \n", - "# pose: \n", - "# images: List with 5 images\n", - "# lasers: \n", - "# Length: 5\n", - "# laser_labels: \n", - "# Length: 0\n", - "# projected_lidar_labels: \n", - "# Length: 0\n", - "# camera_labels: \n", - "# Length: 0\n", - "# no_label_zones: \n", - "# Length: 0\n", - "# map_features: \n", - "# Length: 0\n", - "# map_pose_offset: \n", - "\n", - "file_idx = 0\n", - "pathname = tfrecords_file_list[file_idx]\n", - "dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n", - "num_frames = sum(1 for _ in dataset)\n", - "\n", - "\n", - "def read_jpg_image(data: bytes) -> np.ndarray:\n", - " \"\"\"Read a JPEG image from bytes and return it as a numpy array.\"\"\"\n", - " image = Image.open(io.BytesIO(data))\n", - " return np.array(image)\n", - "\n", - "\n", - "ego_state_se3s = []\n", - "front_images = []\n", - "dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n", - "\n", - "boxes = []\n", - "\n", - "for frame_idx, data in enumerate(dataset):\n", - "\n", - " frame = dataset_pb2.Frame()\n", - " frame.ParseFromString(data.numpy())\n", - " print(\"Frame attributes:\")\n", - " for field in frame.DESCRIPTOR.fields:\n", - " field_name = field.name\n", - " if hasattr(frame, field_name):\n", - " value = getattr(frame, field_name)\n", - " if field_name != \"images\": # Don't print the whole image data\n", - " print(f\" {field_name}: {type(value)}\")\n", - " if hasattr(value, \"__len__\") and not isinstance(value, (str, bytes)):\n", - " print(f\" Length: {len(value)}\")\n", - " else:\n", - " print(f\" {field_name}: List with {len(value)} images\")\n", - "\n", - " # # plt.show()\n", - " if frame_idx == 0:\n", - " break\n", - "\n", - "ego_state_se3s = np.array(ego_state_se3s, dtype=np.float64)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "frame.context.stats.location" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "from collections import defaultdict\n", - "\n", - "from py123d.geometry.utils.units import mph_to_mps\n", - "\n", - "\n", - "dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n", - "for frame_idx, data in enumerate(dataset):\n", - " frame = dataset_pb2.Frame()\n", - " frame.ParseFromString(data.numpy())\n", - " break\n", - "\n", - "driveways = {}\n", - "stop_signs = {}\n", - "crosswalks = {}\n", - "road_edges = {}\n", - "road_lines = {}\n", - "\n", - "lanes = {}\n", - "lanes_type = {}\n", - "lanes_speed_limit_mps = {}\n", - "lanes_interpolate = {}\n", - "lanes_successors = defaultdict(list)\n", - "lanes_predecessors = defaultdict(list)\n", - "\n", - "lanes_map_features = {}\n", - "\n", - "for map_feature in frame.map_features:\n", - "\n", - " if map_feature.HasField(\"lane\"):\n", - " lanes[map_feature.id] = np.array([[p.x, p.y, p.z] for p in map_feature.lane.polyline])\n", - " lanes_map_features[map_feature.id] = map_feature\n", - " lanes_type[map_feature.id] = map_feature.lane.type\n", - " lanes_speed_limit_mps[map_feature.id] = mph_to_mps(map_feature.lane.speed_limit_mph)\n", - " for lane_id_ in map_feature.lane.exit_lanes:\n", - " lanes_successors[map_feature.id].append(lane_id_)\n", - " for lane_id_ in map_feature.lane.exit_lanes:\n", - " lanes_predecessors[map_feature.id].append(lane_id_)\n", - "\n", - " elif map_feature.HasField(\"road_line\"):\n", - " road_lines[map_feature.id] = np.array([[p.x, p.y, p.z] for p in map_feature.road_line.polyline])\n", - " elif map_feature.HasField(\"road_edge\"):\n", - " road_edges[map_feature.id] = np.array([[p.x, p.y, p.z] for p in map_feature.road_edge.polyline])\n", - " elif map_feature.HasField(\"stop_sign\"):\n", - " pass\n", - " elif map_feature.HasField(\"crosswalk\"):\n", - " outline = np.array([[p.x, p.y, p.z] for p in map_feature.crosswalk.polygon])\n", - " crosswalks[map_feature.id] = outline\n", - " elif map_feature.HasField(\"speed_bump\"):\n", - " pass\n", - " elif map_feature.HasField(\"driveway\"):\n", - " # print(map_feature.driveway)\n", - " outline = np.array([[p.x, p.y, p.z] for p in map_feature.driveway.polygon])\n", - " driveways[map_feature.id] = outline\n", - "\n", - " # print(f\"Roadline: {map_feature.road_line}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.visualization.matplotlib.utils import add_non_repeating_legend_to_ax\n", - "\n", - "\n", - "fig, ax = plt.subplots(figsize=(30, 30))\n", - "\n", - "for road_edge in road_edges.values():\n", - " # print(len(driveway))\n", - " ax.plot(road_edge[:, 0], road_edge[:, 1], color=\"blue\", label=\"road_edge\")\n", - "\n", - "for lane in lanes.values():\n", - " # print(len(driveway))\n", - " ax.plot(lane[:, 0], lane[:, 1], color=\"gray\", label=\"lane\")\n", - "\n", - "\n", - "for road_line in road_lines.values():\n", - " # print(len(driveway))\n", - " ax.plot(road_line[:, 0], road_line[:, 1], color=\"orange\", label=\"road_line\")\n", - "\n", - "for driveway in driveways.values():\n", - " # print(len(driveway))\n", - " ax.plot(driveway[:, 0], driveway[:, 1], color=\"green\", label=\"driveway\")\n", - "\n", - "\n", - "for crosswalk in crosswalks.values():\n", - " # print(len(driveway))\n", - " ax.plot(crosswalk[:, 0], crosswalk[:, 1], color=\"violet\", label=\"crosswalk\")\n", - "\n", - "\n", - "add_non_repeating_legend_to_ax(ax)\n", - "\n", - "ax.set_aspect(\"equal\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.conversion.datasets.wopd.wopd_map_utils import extract_lane_boundaries\n", - "\n", - "\n", - "left_boundaries, right_boundaries = extract_lane_boundaries(\n", - " lanes=lanes,\n", - " lanes_successors=lanes_successors,\n", - " lanes_predecessors=lanes_predecessors,\n", - " road_lines=road_lines,\n", - " road_edges=road_edges,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "size = 20\n", - "fig, ax = plt.subplots(figsize=(size, size))\n", - "\n", - "for lane_id, lane_ in lanes.items():\n", - " if lane_id not in left_boundaries or lane_id not in right_boundaries:\n", - " continue\n", - " left_boundary = left_boundaries[lane_id].array\n", - " right_boundary = right_boundaries[lane_id].array\n", - "\n", - " assert len(left_boundary) > 0 and len(right_boundary) > 0\n", - " ax.plot(left_boundary[:, 0], left_boundary[:, 1], color=\"lime\")\n", - " ax.plot(right_boundary[:, 0], right_boundary[:, 1], color=\"red\")\n", - "\n", - " ax.plot(lane_[:, 0], lane_[:, 1], color=\"black\")\n", - "\n", - "\n", - "\n", - "ax.set_aspect(\"equal\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "# " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "lane_idx = 158\n", - "\n", - "lane = lanes_map_features[lane_idx].lane\n", - "lane_id = lanes_map_features[lane_idx].id\n", - "\n", - "\n", - "import matplotlib.pyplot as plt\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - "ax.plot(lanes[lane_id][:, 0], lanes[lane_id][:, 1], color=\"black\", label=\"Lane Centerline\", alpha=0.5)\n", - "\n", - "for left_boundary in lane.left_boundaries:\n", - " print(\"left\", left_boundary)\n", - " try:\n", - " boundary = road_lines[left_boundary.boundary_feature_id]\n", - "\n", - " except KeyError:\n", - " print(f\"Boundary feature ID {left_boundary.boundary_feature_id} not found.\")\n", - " boundary = road_edges[left_boundary.boundary_feature_id]\n", - " ax.plot(\n", - " boundary[:, 0],\n", - " boundary[:, 1],\n", - " color=\"lime\",\n", - " linestyle=\"--\",\n", - " label=\"Left Neighbor\",\n", - " )\n", - " ax.plot(\n", - " lanes[lane_id][left_boundary.lane_start_index : left_boundary.lane_end_index, 0],\n", - " lanes[lane_id][left_boundary.lane_start_index : left_boundary.lane_end_index, 1],\n", - " color=\"lime\",\n", - " linestyle=\"-\",\n", - " label=\"Left Neighbor\",\n", - " )\n", - "\n", - "\n", - "for right_boundary in lane.right_boundaries:\n", - " print(\"right\", right_boundary)\n", - " try:\n", - " boundary = road_lines[right_boundary.boundary_feature_id]\n", - "\n", - " except KeyError:\n", - " print(f\"Boundary feature ID {right_boundary.boundary_feature_id} not found.\")\n", - " boundary = road_edges[right_boundary.boundary_feature_id]\n", - "\n", - " ax.plot(\n", - " boundary[:, 0],\n", - " boundary[:, 1],\n", - " color=\"red\",\n", - " linestyle=\"--\",\n", - " label=\"Right Neighbor\",\n", - " )\n", - " ax.plot(\n", - " lanes[lane_id][right_boundary.lane_start_index : right_boundary.lane_end_index, 0],\n", - " lanes[lane_id][right_boundary.lane_start_index : right_boundary.lane_end_index, 1],\n", - " color=\"red\",\n", - " linestyle=\"-\",\n", - " label=\"Right Neighbor\",\n", - " )\n", - "\n", - "\n", - "ax.set_aspect(\"equal\")\n", - "# lanes\n", - "\n", - "zoom_out = 10\n", - "lane = lanes[lane_id]\n", - "ax.set_xlim(lane[:, 0].min() - zoom_out, lane[:, 0].max() + zoom_out)\n", - "ax.set_ylim(lane[:, 1].min() - zoom_out, lane[:, 1].max() + zoom_out)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.geometry.polyline import Polyline3D\n", - "import numpy as np\n", - "\n", - "\n", - "polyline_3d = Polyline3D.from_array(lanes[0])\n", - "\n", - "\n", - "# Create left and right boundaries for the lane\n", - "\n", - "\n", - "def create_lane_boundaries(polyline_3d, width=2.0):\n", - " \"\"\"\n", - " Create left and right boundaries for a lane by offsetting perpendicular to the heading.\n", - "\n", - " Args:\n", - " polyline_3d: Polyline3D object representing the centerline\n", - " width: Total width of the lane in meters\n", - "\n", - " Returns:\n", - " left_boundary: numpy array representing the left boundary\n", - " right_boundary: numpy array representing the right boundary\n", - " \"\"\"\n", - " points = polyline_3d.array\n", - " half_width = width / 2.0\n", - "\n", - " # Calculate the direction vectors between consecutive points\n", - " directions = np.diff(points, axis=0)\n", - "\n", - " # Normalize the direction vectors\n", - " directions_norm = np.linalg.norm(directions, axis=1, keepdims=True)\n", - " directions_normalized = directions / directions_norm\n", - "\n", - " # Calculate perpendicular vectors in the xy plane (z remains 0)\n", - " perpendiculars = np.zeros_like(directions)\n", - " perpendiculars[:, 0] = -directions_normalized[:, 1] # -dy\n", - " perpendiculars[:, 1] = directions_normalized[:, 0] # dx\n", - "\n", - " # Create boundaries (need to handle the last point separately)\n", - " left_boundary = points[:-1] + perpendiculars * half_width\n", - " right_boundary = points[:-1] - perpendiculars * half_width\n", - "\n", - " # Handle the last point based on the last direction\n", - " last_perp = perpendiculars[-1]\n", - " left_boundary = np.vstack([left_boundary, points[-1] + last_perp * half_width])\n", - " right_boundary = np.vstack([right_boundary, points[-1] - last_perp * half_width])\n", - "\n", - " return left_boundary, right_boundary\n", - "\n", - "\n", - "# Create the boundaries with a 4m wide lane\n", - "left_boundary, right_boundary = create_lane_boundaries(polyline_3d, width=4.0)\n", - "\n", - "# Plot the centerline and the boundaries\n", - "plt.figure(figsize=(12, 8))\n", - "plt.plot(polyline_3d.array[:, 0], polyline_3d.array[:, 1], \"g-\", label=\"Centerline\")\n", - "plt.plot(left_boundary[:, 0], left_boundary[:, 1], \"b-\", label=\"Left Boundary\")\n", - "plt.plot(right_boundary[:, 0], right_boundary[:, 1], \"r-\", label=\"Right Boundary\")\n", - "plt.axis(\"equal\")\n", - "plt.grid(True)\n", - "plt.legend()\n", - "plt.title(\"Lane with Left and Right Boundaries\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "lane_idx = 59\n", - "\n", - "lane = lanes_map_features[lane_idx].lane\n", - "lane_id = lanes_map_features[lane_idx].id\n", - "\n", - "\n", - "# for right_neighbor in lane.right_neighbors:\n", - "# print(right_neighbor)\n", - "\n", - "# lane\n", - "\n", - "import matplotlib.pyplot as plt\n", - "from torch import le\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - "ax.plot(lanes[lane_id][:, 0], lanes[lane_id][:, 1], color=\"blue\", label=\"Lane Centerline\")\n", - "\n", - "for right_neighbor in lane.right_neighbors:\n", - " # print(\"right\", right_neighbor)\n", - " ax.plot(\n", - " lanes[right_neighbor.feature_id][:, 0],\n", - " lanes[right_neighbor.feature_id][:, 1],\n", - " color=\"red\",\n", - " linestyle=\"--\",\n", - " label=\"Right Neighbor\",\n", - " )\n", - " ax.plot(\n", - " lanes[right_neighbor.feature_id][right_neighbor.neighbor_start_index: right_neighbor.neighbor_end_index, 0],\n", - " lanes[right_neighbor.feature_id][right_neighbor.neighbor_start_index: right_neighbor.neighbor_end_index, 1],\n", - " color=\"red\",\n", - " label=\"Right Neighbor\",\n", - " )\n", - "\n", - "\n", - "for left_neighbor in lane.left_neighbors:\n", - " # print(\"left\", left_neighbor)\n", - " ax.plot(\n", - " lanes[left_neighbor.feature_id][:, 0],\n", - " lanes[left_neighbor.feature_id][:, 1],\n", - " color=\"lime\",\n", - " linestyle=\"--\",\n", - " label=\"Left Neighbor\",\n", - " )\n", - " ax.plot(\n", - " lanes[left_neighbor.feature_id][left_neighbor.neighbor_start_index: left_neighbor.neighbor_end_index, 0],\n", - " lanes[left_neighbor.feature_id][left_neighbor.neighbor_start_index: left_neighbor.neighbor_end_index, 1],\n", - " color=\"lime\",\n", - " label=\"Left Neighbor\",\n", - " )\n", - "\n", - "\n", - "ax.set_aspect(\"equal\")\n", - "# lanes" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": {}, - "outputs": [], - "source": [ - "lane_idx = 48\n", - "\n", - "lane = lanes_map_features[lane_idx].lane\n", - "lane_id = lanes_map_features[lane_idx].id\n", - "\n", - "\n", - "\n", - "\n", - "import matplotlib.pyplot as plt\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - "ax.scatter(lanes[lane_id][0, 0], lanes[lane_id][0, 1], color=\"black\", label=\"Lane Centerline\", alpha=0.5)\n", - "ax.scatter(lanes[lane_id][-1, 0], lanes[lane_id][-1, 1], color=\"blue\", label=\"Lane Centerline\", alpha=0.5)\n", - "\n", - "for entry_lane in lane.entry_lanes:\n", - " # print(\"right\", right_neighbor)\n", - " ax.plot(\n", - " lanes[entry_lane][:, 0],\n", - " lanes[entry_lane][:, 1],\n", - " color=\"red\",\n", - " linestyle=\"--\",\n", - " label=\"Right Neighbor\",\n", - " )\n", - "\n", - "\n", - "for exit_lane in lane.exit_lanes:\n", - " # print(\"left\", left_neighbor)\n", - " ax.plot(\n", - " lanes[exit_lane][:, 0],\n", - " lanes[exit_lane][:, 1],\n", - " color=\"lime\",\n", - " linestyle=\"--\",\n", - " label=\"Left Neighbor\",\n", - " )\n", - "\n", - "\n", - "ax.set_aspect(\"equal\")\n", - "# lanes" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": {}, - "outputs": [], - "source": [ - "lane_idx = 196\n", - "\n", - "lane = lanes_map_features[lane_idx].lane\n", - "lane_id = lanes_map_features[lane_idx].id\n", - "\n", - "\n", - "import matplotlib.pyplot as plt\n", - "\n", - "fig, ax = plt.subplots(figsize=(10, 10))\n", - "\n", - "ax.plot(lanes[lane_id][:, 0], lanes[lane_id][:, 1], color=\"black\", label=\"Lane Centerline\", alpha=0.5)\n", - "\n", - "for left_boundary in lane.left_boundaries:\n", - " print(\"left\", left_boundary)\n", - " try:\n", - " boundary = road_lines[left_boundary.boundary_feature_id]\n", - "\n", - " except KeyError:\n", - " print(f\"Boundary feature ID {left_boundary.boundary_feature_id} not found.\")\n", - " boundary = road_edges[left_boundary.boundary_feature_id]\n", - " ax.plot(\n", - " boundary[:, 0],\n", - " boundary[:, 1],\n", - " color=\"lime\",\n", - " linestyle=\"--\",\n", - " label=\"Left Neighbor\",\n", - " )\n", - " ax.plot(\n", - " lanes[lane_id][left_boundary.lane_start_index : left_boundary.lane_end_index, 0],\n", - " lanes[lane_id][left_boundary.lane_start_index : left_boundary.lane_end_index, 1],\n", - " color=\"lime\",\n", - " linestyle=\"-\",\n", - " label=\"Left Neighbor\",\n", - " )\n", - "\n", - "\n", - "for right_boundary in lane.right_boundaries:\n", - " print(\"right\", right_boundary)\n", - " try:\n", - " boundary = road_lines[right_boundary.boundary_feature_id]\n", - "\n", - " except KeyError:\n", - " print(f\"Boundary feature ID {right_boundary.boundary_feature_id} not found.\")\n", - " boundary = road_edges[right_boundary.boundary_feature_id]\n", - "\n", - " ax.plot(\n", - " boundary[:, 0],\n", - " boundary[:, 1],\n", - " color=\"red\",\n", - " linestyle=\"--\",\n", - " label=\"Right Neighbor\",\n", - " )\n", - " ax.plot(\n", - " lanes[lane_id][right_boundary.lane_start_index : right_boundary.lane_end_index, 0],\n", - " lanes[lane_id][right_boundary.lane_start_index : right_boundary.lane_end_index, 1],\n", - " color=\"red\",\n", - " linestyle=\"-\",\n", - " label=\"Right Neighbor\",\n", - " )\n", - "\n", - "\n", - "ax.set_aspect(\"equal\")\n", - "# lanes" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/waymo_perception/testing.ipynb b/notebooks/waymo_perception/testing.ipynb deleted file mode 100644 index e83a9b86..00000000 --- a/notebooks/waymo_perception/testing.ipynb +++ /dev/null @@ -1,280 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from waymo_open_dataset import dataset_pb2\n", - "\n", - "import json\n", - "import os\n", - "\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "from PIL import Image\n", - "from tqdm import tqdm\n", - "from waymo_open_dataset import label_pb2\n", - "from waymo_open_dataset.protos import camera_segmentation_pb2 as cs_pb2\n", - "from waymo_open_dataset.utils import box_utils\n", - "\n", - "\n", - "import matplotlib.pyplot as plt\n", - "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "\n", - "\n", - "WOPD_DATA_ROOT = Path(\"/media/nvme1/waymo_perception/validation\")\n", - "\n", - "\n", - "tfrecords_file_list = list(WOPD_DATA_ROOT.glob(\"*.tfrecord\"))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "import io\n", - "from pyquaternion import Quaternion\n", - "\n", - "from py123d.geometry import StateSE3\n", - "from py123d.geometry.bounding_box import BoundingBoxSE3\n", - "\n", - "from waymo_open_dataset.utils import frame_utils\n", - "\n", - "\n", - "# Frame attributes:\n", - "# context: \n", - "# timestamp_micros: \n", - "# pose: \n", - "# images: List with 5 images\n", - "# lasers: \n", - "# Length: 5\n", - "# laser_labels: \n", - "# Length: 0\n", - "# projected_lidar_labels: \n", - "# Length: 0\n", - "# camera_labels: \n", - "# Length: 0\n", - "# no_label_zones: \n", - "# Length: 0\n", - "# map_features: \n", - "# Length: 0\n", - "# map_pose_offset: \n", - "\n", - "file_idx = 0\n", - "pathname = tfrecords_file_list[file_idx]\n", - "dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n", - "num_frames = sum(1 for _ in dataset)\n", - "\n", - "\n", - "def read_jpg_image(data: bytes) -> np.ndarray:\n", - " \"\"\"Read a JPEG image from bytes and return it as a numpy array.\"\"\"\n", - " image = Image.open(io.BytesIO(data))\n", - " return np.array(image)\n", - "\n", - "dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n", - "\n", - "\n", - "for frame_idx, data in enumerate(dataset):\n", - "\n", - " frame = dataset_pb2.Frame()\n", - " frame.ParseFromString(data.numpy())\n", - " print(frame.context)\n", - "\n", - " # # plt.show()\n", - " if frame_idx == 0:\n", - " break\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "for laser_calibration in frame.context.laser_calibrations:\n", - " print(np.array(laser_calibration.extrinsic.transform).reshape(4, 4))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "for frame_idx, data in enumerate(dataset):\n", - " frame = dataset_pb2.Frame()\n", - " frame.ParseFromString(data.numpy())\n", - " if frame_idx == 2:\n", - " print(frame)\n", - " break\n", - "\n", - "print(\"Ego\")\n", - "ego_transform = np.array(frame.pose.transform).reshape(4, 4)\n", - "print(ego_transform[:3, 3])\n", - "\n", - "# 1 [ 1.5441613 -0.02302364 2.11557864]\n", - "# 2 [1.49672397 0.0954948 2.11616463]\n", - "# 3 [ 1.49442485 -0.09637497 2.11519385]\n", - "# 4 [1.43213651 0.11612398 2.11625087]\n", - "# 5 [ 1.42936162 -0.11545043 2.1150792 ]\n", - "\n", - "\n", - "# frame.map_pose_offset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "plt.figure(figsize=(64, 20))\n", - "\n", - "\n", - "def plot_range_image_helper(data, name, layout, vmin=0, vmax=1, cmap=\"gray\"):\n", - " \"\"\"Plots range image.\n", - "\n", - " Args:\n", - " data: range image data\n", - " name: the image title\n", - " layout: plt layout\n", - " vmin: minimum value of the passed data\n", - " vmax: maximum value of the passed data\n", - " cmap: color map\n", - " \"\"\"\n", - " plt.subplot(*layout)\n", - " plt.imshow(data, cmap=cmap, vmin=vmin, vmax=vmax)\n", - " plt.title(name)\n", - " plt.grid(False)\n", - " plt.axis(\"off\")\n", - "\n", - "\n", - "def get_range_image(laser_name, return_index):\n", - " \"\"\"Returns range image given a laser name and its return index.\"\"\"\n", - " return range_images[laser_name][return_index]\n", - "\n", - "\n", - "# def show_range_image(range_image, layout_index_start=1):\n", - "# \"\"\"Shows range image.\n", - "\n", - "# Args:\n", - "# range_image: the range image data from a given lidar of type MatrixFloat.\n", - "# layout_index_start: layout offset\n", - "# \"\"\"\n", - "# range_image_tensor = tf.convert_to_tensor(range_image.data)\n", - "# range_image_tensor = tf.reshape(range_image_tensor, range_image.shape.dims)\n", - "# lidar_image_mask = tf.greater_equal(range_image_tensor, 0)\n", - "# range_image_tensor = tf.where(lidar_image_mask, range_image_tensor, tf.ones_like(range_image_tensor) * 1e10)\n", - "# range_image_range = range_image_tensor[..., 0]\n", - "# range_image_intensity = range_image_tensor[..., 1]\n", - "# range_image_elongation = range_image_tensor[..., 2]\n", - "# plot_range_image_helper(range_image_range.numpy(), \"range\", [8, 1, layout_index_start], vmax=75, cmap=\"gray\")\n", - "# plot_range_image_helper(\n", - "# range_image_intensity.numpy(), \"intensity\", [8, 1, layout_index_start + 1], vmax=1.5, cmap=\"gray\"\n", - "# )\n", - "# plot_range_image_helper(\n", - "# range_image_elongation.numpy(), \"elongation\", [8, 1, layout_index_start + 2], vmax=1.5, cmap=\"gray\"\n", - "# )\n", - "\n", - "\n", - "def show_range_image(range_image, layout_index_start=1):\n", - " \"\"\"Shows range image.\n", - "\n", - " Args:\n", - " range_image: the range image data from a given lidar of type MatrixFloat.\n", - " layout_index_start: layout offset\n", - " \"\"\"\n", - " range_image_tensor = np.array([data for data in range_image.data]).reshape(range_image.shape.dims)\n", - " lidar_image_mask = np.greater_equal(range_image_tensor, 0)\n", - " range_image_tensor = np.where(lidar_image_mask, range_image_tensor, np.ones_like(range_image_tensor) * 1e10)\n", - " range_image_range = range_image_tensor[..., 0]\n", - " range_image_intensity = range_image_tensor[..., 1]\n", - " range_image_elongation = range_image_tensor[..., 2]\n", - " plot_range_image_helper(range_image_range, \"range\", [8, 1, layout_index_start], vmax=75, cmap=\"gray\")\n", - " plot_range_image_helper(range_image_intensity, \"intensity\", [8, 1, layout_index_start + 1], vmax=1.5, cmap=\"gray\")\n", - " plot_range_image_helper(range_image_elongation, \"elongation\", [8, 1, layout_index_start + 2], vmax=1.5, cmap=\"gray\")\n", - "\n", - "\n", - "frame.lasers.sort(key=lambda laser: laser.name)\n", - "show_range_image(get_range_image(open_dataset.LaserName.TOP, 0), 1)\n", - "# show_range_image(get_range_image(open_dataset.LaserName.TOP, 1), 4)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "from py123d.common.datatypes.time.time_point import TimePoint\n", - "\n", - "\n", - "for frame_idx, data in enumerate(dataset):\n", - " frame = dataset_pb2.Frame()\n", - " frame.ParseFromString(data.numpy())\n", - " if frame_idx == 4:\n", - " break\n", - " break\n", - "\n", - "\n", - "# for calibration in frame.context.camera_calibrations:\n", - "\n", - "frame.timestamp_micros, frame.images[0].pose_timestamp\n", - "# frame.images[0]\n", - "\n", - "frame_timestamp = TimePoint.from_us(frame.timestamp_micros)\n", - "image_timestamp = TimePoint.from_s(frame.images[0].pose_timestamp)\n", - "frame_timestamp.time_s, frame_timestamp.time_s" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py123d", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From 4bd761ae7dfe60bc1962b0b2ad122f15467b3f7a Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 18 Oct 2025 13:39:58 +0200 Subject: [PATCH 095/145] Use `laspy[lazrs]` (rust) instead for faster lidar (de)compression (#58) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a052cda8..09a4c7f9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,7 +47,7 @@ dependencies = [ "lxml", "trimesh", "viser", - "laspy[laszip]", + "laspy[lazrs]", ] [project.scripts] From 1955ee319c3f306100710d37ae1bd5f37b5bc765 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 18 Oct 2025 14:29:09 +0200 Subject: [PATCH 096/145] Add lidar (+compression) to pandaset, fix coordinate issues, remove duplicate bounding boxes. Add coordinate axis to viser (lines). --- notebooks/bev_matplotlib.ipynb | 47 +++- .../datasets/pandaset/pandaset_constants.py | 98 ++++++- .../datasets/pandaset/pandaset_converter.py | 264 ++++++++---------- .../pandaset/pandaset_sensor_loading.py | 45 +++ .../datasets/pandaset/pandaset_utlis.py | 96 +++++++ .../log_writer/utils/lidar_compression.py | 6 +- .../sensor_utils/lidar_index_registry.py | 12 +- .../config/common/default_dataset_paths.yaml | 6 +- .../conversion/datasets/pandaset_dataset.yaml | 6 +- .../viser/elements/detection_elements.py | 45 ++- .../viser/elements/sensor_elements.py | 8 + .../visualization/viser/viser_config.py | 2 +- test_viser.py | 3 +- 13 files changed, 465 insertions(+), 173 deletions(-) create mode 100644 src/py123d/conversion/datasets/pandaset/pandaset_sensor_loading.py create mode 100644 src/py123d/conversion/datasets/pandaset/pandaset_utlis.py diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index 18c19e49..ae61f557 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -2,10 +2,18 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "0", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Dataset paths not set. Using default config: /home/daniel/py123d_workspace/py123d/src/py123d/script/config/common/default_dataset_paths.yaml\n" + ] + } + ], "source": [ "from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", "from py123d.datatypes.scene.scene_filter import SceneFilter\n", @@ -16,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "1", "metadata": {}, "outputs": [], @@ -31,15 +39,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "2", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found 82 scenes\n" + ] + } + ], "source": [ - "# split = \"nuplan_private_test\"\n", - "# log_names = [\"2021.09.29.17.35.58_veh-44_00066_00432\"]\n", - "\n", - "\n", "\n", "# splits = [\"wopd_val\"]\n", "# splits = [\"carla\"]\n", @@ -73,10 +85,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "3", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABNIAAAGBCAYAAACjLZwtAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjcsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvTLEjVAAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3Xd4W+X1wPGv5D01PGTLe2jYkmMnIZCQhBkIG0qgUEopm9JAf0AndDBLoKwuoKWE0bIKFMJeCWRBEiDDieWd2I634yFvy0P6/SGsxNjxILblJOfzPHmor17de5SCJZ173nMULpfLhRBCCCGEEEIIIYQQYlRKbwcghBBCCCGEEEIIIcThQBJpQgghhBBCCCGEEEKMgyTShBBCCCGEEEIIIYQYB0mkCSGEEEIIIYQQQggxDpJIE0IIIYQQQgghhBBiHCSRJoQQQgghhBBCCCHEOEgiTQghhBBCCCGEEEKIcfD1dgDe4HQ6qampISwsDIVC4e1whBDisOdyuWhvb0ev16NUyj0aeZ8RQojJJe8zw8l7jRBCTK7xvtcclYm0mpoaEhISvB0GZ9+ykkTrCdN6zYH+Pv710yxczoFpve7h5NJ7PkCjT/fa9T995tcUb35L/j8Sh6XKykri4+O9HYbXzZT3GTG6a/6+Hf/A0FHXuFwuVt40mz5H56jr4jOO59yfPz/qmv7eHv61fBa4XCM+Hmeez3m/+M/oQX+jYtc63v/LteNaK8SRRN5n9pP3GiGEmBpjvdcclYm0sLAwwP2XEx4ePu3XdzqdxOrjaK4umfZEmo+vHxH6VM5fegKPPvrotF57pqurq8NkMmGvL/NqIk0dk0JoSAiVlXvl7qI4bLS1tZGQkOD5/Xq08/b7jBjd9u3bOemkk2ipKUWXmjPqWoVCQVSimYU5qTz99NMjrrn77rt5+rmXxryur38gGl0iP7jwbFasWDHksbvuuouV/35l3K9BrUsG4I033uDUU08d9/OEOFzJ+8xw8l4jhBCTa7zvNUdlIm0wOREeHu61Nx2z2UxLTYlXrq2KMVBUXCJvuN8SFhZGSGgY9royr8ah1qXQ3t6Gw+EgOjraq7EIMVGS/HWbCe8z4uDmzZuHQqGguaZkzEQagDo2nYLCgoP+fzlnzhzaHn0UR1cbAcGj//+tjjFQVFw87FyzZs2irekx+hzd+AUEjRlTWGQ8vr7+VFdXy79j4qgi7zP7yXuNEEJMjbHea6TBgJfMyrJiry31yrW1cenYbDZcB9lacrRSKBQYDAbs9V5OpMWkAlBUVOTVOIQQ4kgVHBxMYlIyLdXjex/W6A0UFxcxMDDylnuLxQJAS83Y59PoDeTl5Q87bjQaAWhtKB9XTEqlD+qYJHmvEEIIIYSYZkdlRdpMYLVa+e9r/8Plck37nTWt3khLcxMNDQ3odLppvfZMl5lhZu3XxV6NITw6EYVCQVFREYsXL/ZqLEIIcaTKslrZWT6+ynBtnIFeh4Pdu3d7El4HMpvNKJVKmmtKiEmfM+I57PXlqKKT0MYZ2PZ+DXa7HbVa7XncZDK519WVEZmQMa64wqKSKSwsHNdaIYQQR76BgQE2bNjAvn372LdvH42NjZ5/Op0u/va3v054x4vdbic/Px+bzUZ+fj4FBQX89Kc/5bzzzpuiVyHEzCeJNC+xWCw4ujvpaK4hLCJuWq+tiTMAkJeXJ4m0bzGZTLz13kdejcHXLwBVVDzFxd5N6AkhxOGou7sbf39/fHx8Rl2XlWVl/abnxnVOrd79vmmz2UZMpAUGBpKSmkbzNy0baoq24OMXQHTyLFwuJ1+/+zjb3n2C2WfeQPq8szznWrhwoeccERERaLQRtI5SFe3oasdetwd7fRmt9WW01O6mpEWqy4UQQrg99thj/PKXvwRA6eNLSLiGwFAtAaEaqgo2c/rpp3HNNdeMeR6Hw8EPLruMzz/fREN9LQAKpRKtLpmeni4am+6TRJo4qkkizUsGt4E0VxdPeyJNFZWIj68/NptNGhR/i9FopLO1CUdnKwEhKq/FER6dQmGhbNcRQoiR9Pf3U15eTnFxMcXFxZSUlFBYVERRUTE11VWcc+55vP3WqlHPYbFYaGuqxdHVTkDw/oayLpeLjuYamqtLaK4poaWmBHutO0FWV1d30PNlWa1sLS6hv8/Bh48vx9HVSrAqiqCwCJqq3FVjquhE1DEpKJU+5OXlDUmkARiNBppqSmip24O9rgx73R5a68tp+yZx1tHa6Fmri4kl22zmmquvmuhfnxBCiCNUe3s7IapILrn3I/yDwobsfHr+1nns27dvXOcpKCjgzTfewLjge8w6bzFavQFVTAq+fgFs/+Apdn34pFd2VgkxU0gizUsSExMJDgmlubqEpFknT+u1lT6+aPWp2Gy2ab3u4cCztaa+bFwNqCebc6Cf9qZqfPwCKJDtOkII4fHZZ5/xyCOPUlhUREV5Gf39/QD4+Qeg1iUTFp1M9KyzCdJX8vFHH+F0OlEqD94KdvCGVuHnr4PLRXNNCfaaElpqS3F0dwIQHBJKRkYGx580D+tNV3HVVQdPWlmtFlav3YivXwDZp1/Fl6v+TFhEHAqFEvPCZRR+8QZJ2Sfj4xeAJiZpxPdgq8XCypUrKf3yPc/1DQYDJ8w1YzKdjdFoxGQyYTAYZHKhEEKIYaKioujpbBuWRAMICtXS2Nh4kGcOZTKZUCgU6E3zMBx37pDHtHEGOjs7qKysJDExcdJiF+JwIok0L1EoFGRkZNA8jsbEU0EVk86uXXleufZMZjC4t+/Y66YukeZyuejpaHZXG9SXYa8ro7W+nPaGMlrqKxjo7wPg9KVnTMn1hRDicLRixQo+37Kd9GPPYcFxP0Qdk4Jal0KIWofigIRZxc61lH75HlVVVaN+wDebzag1Wr747/0EBgZhMps5fVE2FstlWCwWLBYLiYmJoybjDmSxWOiw76Onw87ss35CVcEmWuvLufjOt1n7/G+JSZtDUFgEAOEx6ezKG55Ie/TRRzn++ONJSUnBZDIRGxs76Xf7XS4X1dXV7Ny5k/nz56PVaif1/EIIIbwnMjKSgf5e+no68A8aesMlIFQz7oq0oKAgkpJTRhzKo4lNByA/P18SaeKoJYk0L8qelcV7n33plWtr44zYPn1GSnK/JTQ0lJjYuEmZ3Nnf20NrQ8U3/WzKsdeV0d7gTp51d7QC7oRqfEIiZpMR86IzMZlMnoqD+Pj4Q45BCCGOFGlpaeQWVbLw0t+Ouk6tSwGguLh41A/4gYGBFBbk09HRQXJy8pg91cZitVoBaK4pQW+cx6nXPsxrd5/Hpyt/RU3xFo4592bPWm2ckbzNrw07R3h4OFdfffUhxXEgh8NBQUEBubm55Obmsn2H+58tzU0ALFy0mI0b1k/a9YQQQnhXVFQUAD0dLfj4+uPjF4Cjq92dWAvRsG/f+CrSwF0lvbN8aCKtp6OFEE0M/gFB5Ofnc8YZcuNfHJ0kkeZFFouF/7z4Ei6nc8jd9Omg1Rtoa7VTU1NDXNz09mib6cxmE3vrxpdIczmddLTUDqkua6svo62hnNbGalwudxNolVqD0WhkwaJsjMaLMZlMmEwm0tLSCAoKmsqXI4QQRwSj0Yj92efHfM8Mi4xD6eNLSUkJS5YsGfWcOp1u0obuGI1GfH19afkmkRaqieHkqx7gg7/dAEByzv6epFq9ga376mlqaiIiImJSrj/o1Vdf5Z133mHb9h0UFxV6tsBqdElo4kykL/4hEfEZ1O/ezvb1L465BVYIIcThIzIyEoDqoq9Y+9xvSMw6kb271gFgXHAh9Q17x30uq9XChs3/obm6hA8fvxHLSZfxxasrCAqPRKtPkzZB4qgmiTQvslgs9Dl6aGusRBWdNK3XPnACmSTShjKbjOS/v3bIMUdXm7uy7ICEmbu6rJy+XgcAfv7+pKamMdtkwnzOwiHVZREREVL5J4QQh8BgMNDr6KbTXk+oNvag65Q+vmh0idM++djf35/UtPQhLRuSs0/huAt/Tk97C5rYNM/xA9+DTzjhhEmLoauri8suu4zwqERiTcexIOciIhMy0MYb8Q8MHbLW1z+Q7R8+xe7duz1tDYQQQhzeBivS+nu7ADxJNJUumRB1FI3528Z9rszMTFobq9n16b9pbajgi1dXABCdMouA4PARWxQIcbSQRJoXeSZ31pROeyItLCoeP/8AbDYbp59++rRee6YzmUw0r3yWz5673V1ZVl9GZ2uT5/GY2DjMZhPmeSd7KsuMRiNJSUn4+sp/UkIIMRWMRiMA9vryURNpAGFRSRQVTW8iDSAry8qWvKHXnXPWT4atU+mSUPr4kpeXN6mJtODgYCIio4mfcwbHXXjbqGsjEjIAyM3NlUSaEEIcIQarnP0CQph9xvXkrn4WZ38fx55/Cx0tdeMeNgD7v6sa559PY0U+DeU7Achc/H1aakvJ+/hTaRMkjlryrd+L4uLiCA0Lp6WmhJQDtnxMB6XSB60+XUpyR3DGGWfw+BNPEti5hznHmDGZzh0yKS00NHTskwghhJhUKSkp+Pj40FpfRnzGglHXqqJTKCxaN02R7We1WPjwk8/GXOfj6482JmVK3oNn52RTWFUw5rrg8AhC1VHk5uZy0UUXTXocQgghpl9AQAChYeH0dDQz99zllHz5Dv19DlLmnEbpl+/R1dlBT08PgYGBY57LbDajUChobahgyfWP8trd5+NyOYnPXAhAR3sb1dXV0tdZHJUkkeZFCoUCq8VCU3WJV66vijWwUyZ3DmM2mykpLvJ2GEIIIQ7g5+dHUnIK9vryMdeqYpLJ+/R5+vr68PPzm/rgvmG1WulsbaK7vckzofNgVLGGKdkWk5OTzaav/z2utdp4Mzt25E56DEIIIbwnIiKCnvYW/AKCuej3q1D6+OLj609gqAaAxsbGcSW/goODSUhMormmBPPCZZz3i3+jUPrg6x+IRr9/cqck0sTRSLrLellWlpXWuuFjhSdLQ/kuaku2jviYVp9Ofn6+pyG+EEIIMZOZTSZaR5mq7HI6AffkzoGBAcrKDn0C80R4WjZUj/2+ro2bmqrw7Oxs2ppq6emwjx1DvJntO3ZMegxCCCHGp729nW3btvHf//6XRx55hNbW1kM+Z1RUFN0dzQAEhWkJCA73/G9gQts7s6xW7N/0/oxOmUVUkvt9LiwyHr+AQNndJI5akkjzMovFQnPNbpzOgUk9r8vpZMdHK3lzxSW89+drGOjvG7ZGozfQ2dFOZWXlpF5bCCGEmApGo4GOfe6JY12tjXQ013oea6kp5X9/XMbn/70flS4ZYNoHDqSnp+Pr50dLzdiV5hq9geamRhoaGiY1huzsbACaqgrHXBuRYKa6qpKWlpZJjUEIIcTI3n//fa6//npOOPEkdDGxhIeHM3fuXC699FJ+8YtfcO+99x7yNaKjoujpGP57fbAibd++feM+l8WSSWv97mHHlUoftLFp5Ofnf/dAhTiMydZOL7NYLPT3OWhr2Is6JmVSztnV1sSnz/yKyrz1KJQ+6M3H4eM7fGvLgVPDEhMTJ+XaQgghxFQxGo20NOxloL+PNU//nKqCL9Cl5hCdmk3+ulcY6HOgik4iRK3DPyBo2hNpfn5+GAxGmseRSBt8D87Ly+OUU06ZtBiMRiP+AQE0VhYQZ54/7HGXy0XhxtdJsCwiMt49cGDnzp2ceOKJkxaDEEKI4VwuFz+47IfgG0R06hwS5i0jS5eCSpeMWpfM6qd/PimDcqKjo9hRPHzb/nepSLNYLNgbqujr6cQvMGTIY6qYdHblSZsgcXSSRJqX7Z/cWTJiIi3vsxdpqiokOftU4jIW4OsXMOr5qvI/Z83KX+JyOjn12of5dOWvSM4eeZBBWEQc/gFB2Gw2zjzzzEN/MUIIIcQUMhqNOAf6aW+qJnn2EqoKvqCjpZbGdQXEmeazN28dqXOXolAoUMckT3siDWBWlpWN20dPpA1WoSsUCmw226Qm0nx9fcnMtNBU6a5IK97yNl32BhKzTiQ4PIpPn/01FbmfknbMmSy57lF8/QLIzc2VRJoQQkwxhUJBbGwsfvpjWHzZH4Y9rtIlU1S8+ZCvExkZSUt9ORteuoee9mYcnS04Oprpbndv9+zt7R33uTIzMwEo3/kZCoWS5poSWmpKaavfTXNtGRkZGYccrxCHI0mkeVlMTAxqjda9DWTO6cMeL9n8NnW7t5G/7hV8A4JJsCwiOftUkmad5LmrADDQ38dXq/7M9o/+RXzG8Zx6zUNU2jbicjlJzj55xGsrlEq0cQbZ2y6EEOKwYDQaAWitL8N68uVU5H5GU1URP1zxKcWbVlFTvIXELHdCKCwqmaLi6R/mY7Vaeef9j9z9R10u2ptraK4upqWmhOaaUlprS2muLaXP0QNAbGzspMcwZ3YO7326BZfLxddv/43W+nI2vfYgygOq0+MzF6L08SUizkBurgwcEEKI6WA2mdheOnL/TnV0MpvWvUR/fz++vt/9a/r555/Pq6+9jrN+O/HR0USnxBMVNZvIyEj0ej2XX375+OM1mwkIDGT1U7cCEBmlw2q1YF2wlMzMTE4/ffj3VyGOBpJI8zKFQoHFkkn9QbaBnHL1n3jtnvPRxKWTknMa5blr+Oy536BAgS59DsnZp6BLzWbT63+iscLG/At/Qc7Sa1EolZTnriE6NZtgVdRBr6+KSSd3566penlCCCHEpImLiyMwMAh7XTlJsxScfNUK/nvnOWx8+R7am2pIzDoRv4Bg4Js7+9vfnvYYLRYLXe123rz/Qlpq99Db0wVASGgYmZmZLD71OCyWq7FYLFgsFuLi4iY9huzsbJ7/9ws4B/o5+coVvPXQ5ehSsokxzCUwRM3m/z3kSThq4kxs275j0mMQQggxnNFoYP2mkQfBqXTJ9Pf1UVFRQVpa2ne+xqJFi6gon5xhO6GhoeTu2MG+ffvIyMggImL0idRCHC0kkTYDZFmtFL/32YiPqXRJnHD53axZ+QusJ/2QC29/la7WRip2fkZ57hq+fvuv9Pf2EBYZzwW/fhldag4A/X0O9uZtYM5ZPxn12to4Azve/RCn04lSKbMnhBBCzFxKpZK09HRaG8oBCFHrOPGKe/n4yZsByD79as9atS6FbTXVdHZ2EhISMtLppsQpp5zC5T+6Al8fJRbLjz0Js4SEBBQKxbTEkJ2dzUB/L/b6MmINx3DsBbey5Y2HmXvuTZRsfouIBDOhmhjAPXDgqzffPeQKCCGEEGMzGo3Y91Ux0OfA6RzA1z+I3q429mz7mLiMBYB7UM6hJNImm8lkwmQyeTsMIWYU+cQ0A1gsFv719EoG+vtGHApgXHA+lfmfs/7Fu9Cl5qCOSSFj8cVkLL6YPkc3NUVbiEmfS0BwmOc51YWb6Xd0kZKzZNRra/QGuru7qKioICVlcoYdCDHI5XKxb98+1Go1/v7+3g5HCHEEMJmMfFVQ7vk5be4ZZCz+PnW7t5E06yTP8cHJnaWlpZ5JltNBpVLxn38/P23XG8msWbMAaKosICLOyOwzrqO2+CvWPP1zXC4XmSde6lkbEZ9Br8NBcXGxpxeOEEKIqWE0GnG5XDRVF/O/+y4c8tiZN/0DX78AiouLpX+1EDOc10uQkpOTUSgUw/4sX758xPXPPffcsLWBgYHTHPXkslgsDPT30dZQcdA1J/zwTkLU0Xzy1C0M9Dk8x/0CgkiaddKQJBpA+Y41hEcloNGnj3rtiDh3vxnpkyYOhcvloq6ujk8//ZS//e1v3HjjjSxafAIRkVHodDrOOfc8b4cohDhCmIxG2r6pSBt00o//yKX3fIB/YKjnmPqbRFpJyfT3SfM2jUZDXHyCZ+CAQqnklGv+hI9fAI5OO0lZ+wcLRCaYAaRPmhBCTIPBXp/tTdXDHovLOB5NTPJR+b4lxOHG6xVpX331FQMDA56f8/LyOO2007j44osP+pzw8HCKioo8P0/XVompYrVaAffkzoMlvvwCQzjt+j/zxv0Xs/l/D7Pw0t8e9Hwul8szkWusv5sQTQyBwWHYbDbOOeec7/4ixFHB5XJRW1tLfn4++fn52Gw28mw2bLZ8Wu0tAPj6+qOJTUEVm0764stpb6xi3TrZNiSEmBxGo5HWxhr6HN34BQQddF1gqIbgMLVXJnfOBLNzcthZVuj5OShMy5k3/YPmmhJPGwiAgBAVqkg9ubm5/OAHP/BCpEIIcfTQ6XSEhIbR1rCXy/64mlfvOY9+Rxepc8/ALyCI0MhECg/4niuEmJm8/q02KmpoI/wHHniAtLS0UcewKxQKYmJipjq0aRMVFUVEZBTNNaWMths+KsnC/It+yRf/vZ/4zONJmjXyNM7GChud9nqSc04d89oKhQJNbDp5eXnfMXpxJHK5XNTU1GCz2TxJs1278sgvKKCt1Q6Ar18AWn0q4bo0TCddiUafjlZvIDwqAaXP/l8tVQVfUPj5/ygrK8NgMHjpFQkhjhSDd/PbGiqI+Kaa6mBUupSjNpGWk5PN2o3/HHIsKslCVJJl2FpNnIkdO3ZMU2RCCHH0UigUGAwGWhsqUOmSWHjJHWx86R4Mx7oLGtS6FIpsH3o5SiHEWLyeSDtQb28vL7zwArfddtuolVQdHR0kJSXhdDqZM2cO999/PxbL8A+GgxwOBw7H/u2QbW1tkxr3ZLBYLFRVj/1hf9aSK6ku+IJPn/k137/rHULUumFrynasJiBYRUz63HFdWxWbzs5dkkg7GrlcLqqqqjzVZfn5+eTZbOTnF9De1gqAn38AWn064bo0Mk49Hk1sOhp9ujthpvQZ8xpavTt5ZrPZJJEmhDhkg4k0e0P5mIm0sKhkCgqPzjv72dnZdNj30dXaSLAqctS12ngz279+c5oiE0KIo5vZZGTD9t0AZCz+PhmLLkbxzdA3VUwyOz6qpLu7m6Cgg1ddCyG8y+s90g60atUq7HY7V1555UHXmEwmnnnmGd566y1eeOEFnE4nxx9/PFVVVQd9zooVK1CpVJ4/CQkJUxD9ocmyWmirKx1znUKh4OSrHkDp48fqp3+B0zkwbE157hoSs04ccXDBSLR6A0WFhUO22Ioji8vlYu/evXzwwQc88sgjXH311cw79jjCwlUkJiZyxhlncPsdv+O9T7fQooghc8l1nHnTP7js/tVc/fdcLvzdKpZc9whzzvoJKbOXoNYljyuJBhAUHklQqIr8/PwpfpVCiKNBREQE4So1rfXlY65V65IoLRn7vfVINDhgoamqYMy1kQkZNNTXsW/fvqkOS4ij0gMPPIBCoeCWW27xHOvp6WH58uVEREQQGhrKsmXLqK+v916QYtoYD+j1qVAoPEk0cFekuVwudu/e7aXohBDjMaMq0lauXMmZZ56JXq8/6JoFCxawYMECz8/HH388GRkZ/POf/+Tee+8d8Tm33347t912m+fntra2GZdMs1gsNP/jnwz09+LjO/p0w6CwCJZc9whvP3IF29//J3PP+annsfamapoqC5lz1o3jvrY2zoDD0UNZWRnp6aMPJxCHF5fLxXXX38DLL79MV2cHAP6BwWhj01DFppO19ES0cQY0semERcQNeSOfLAqFAo3eIAMthBCTYnBbjL2ubMy1Kl0Kzc2NNDc3o9VqpyG6mSMtLY3g4BAaKwtJsCwedW1E/P6BA0uWjD7tWwgxMV999RX//Oc/PdN0B91666289957vPbaa6hUKm666SYuvPBCPv/8cy9FKqaL0WikvWUfvd3t+AcNHRin0qUA7kE5g320hRAzz4xJpFVUVLB69WreeOONCT3Pz8+P2bNnU1p68DvOAQEBBAQEHGqIU8pqteIc6MdeX+6ZpDmaOPN85pz1E756+6/EmecTkz4HgPLcT1H6+I35oflA2rj9W+8kkXbkefXVV4lMmYv1lMvRxhkI1cROScJsNOqYNHblSSJNCDE5MswmPt0ydpXrgZM7jzvuuCmOamZRKpVYs6yeyZ2jCY9OxD8gSBJpQkyyjo4OfvjDH/Kvf/2L++67z3O8tbWVlStX8tJLL3HKKacA8Oyzz5KRkcHmzZuZP3++t0IW02CwRUFrfQVRyUOTZUFhWgKDw47a/p5CHC5mzNbOZ599lujoaM4+++wJPW9gYIBdu3YRGxs7RZFNj8Eeby3V4x93PO+8n6FLyeaTp27F0enuZ1W+Yw1607EEBIeN8ez9glXRBIWoZODAEUihUJCZmUFgqJqkWSdNWdXZWDT6dIqLimT7sBBiUhiNxjG3djqdAyh93C0OjtYvJLNzcrDXjN4jzuVy0WVvIEQTQ25u7jRFJsTRYfny5Zx99tnDEtRbt26lr69vyHGz2UxiYiKbNm066PkcDgdtbW1D/ojDz2DP4H0VeTSU7aR4y9t89dZf+eSpW1l1/4X09nTR2Ng4qdd0uVzU19ezadMmXnzxRe69915uvPFGysrGru4WQgw3IyrSnE4nzz77LD/+8Y/x9R0a0hVXXEFcXBwrVqwA4J577mH+/Pmkp6djt9t56KGHqKio4Nprr/VG6JNGq9USFR1Dc00xML5kotLHlyXXP8qrd5/H2n//lpN+vIKaoi9ZeMkdE7q2e+tdumy9O0JZLRbeXbPFqzFo9bJ9WAgxeYxGI51tzTg6Wxno78NeX0Zrfbnnnx37Kmipr6C/zz1oKCIiwssRT42Ojg5KSkooKiqirKyMq666ashU8+zsbP719EoG+hz4+AXQ3+egpaaUpsoCGqsKaakqpLm6iK52OwCx+u976ZUIceR55ZVX2LZtG1999dWwx+rq6vD390etVg85rtPpqKurO+g5V6xYwd133z3ZoYppplar0cXEsu4/v/cci4qOwWQyYj51PkbjFVxzzTWTdr3nn3+eG2/8Kd3dXZ5jIaoIers7USgUPPHEE5N2LSGOFjMikbZ69Wr27t3L1VdfPeyxvXv3ojyggqalpYXrrruOuro6NBoNc+fO5YsvviAzM3M6Q54SVquF8glUpAGERcRx0o//yMdP3kx/bw/OgT6Ssk+Z8LXVMrnziJWZmcm/X3gRl9PplWo0cFekAeTn50siTQhxyAa3xfznl4vo6+0B3DeF4hMSMZuMmBaejtFo9PxJSUnxZriHpL+/n4qKCoqKiiguLqaoqIjComIKC4uoq632rFMoFBQWFvL88897jmVnZ+Mc6OejJ2+is7mG5prdOJ0DKBQKklNSOTYnh5zLziY7O5vs7GwSExO98RKFOOJUVlbyf//3f3zyyScEBgZO2nkPh77PYnzWfvYpO3bswGg0YjAYCAsb/26iidqwYQM+geGccfXDhEcmEBYVj39gKB8+/lN2794zZdcV4kg2IxJpp59+Oi6Xa8TH1q5dO+Tnxx57jMcee2waopp+WVYLua+9M+Hnpc09g8wTLyV/3StEJmQQFnHwYQ0Ho40zsmXzm/T39w+rChSHN4vFQp+jh/amKsKjvPMlKVgVTWBIOPn5+Zx33nleiUEIceTIzs7miSeeoL293ZMsS01NndQvrDPFKUtOY8O6tQD4+QegiUkhNCoZ/ZzzyIxJRa1LRq1L4fP//pH8gqH90HJyclhy2lI62tuYvfBUsrNvIzs7G6vVSmhoqBdejRBHh61bt9LQ0MCcOXM8xwYGBli/fj1///vf+eijj+jt7cVutw+pSquvrx9SVfpth0PfZzE+ZrMZs9k8LddKTU3F2d9DyuzThhwPj0qkdPe6aYlBiCONZExmEKvVSsvf/05/nwNfv4m9SS685LfgcpGYddJ3urZGb6Cvt5fdu3djMpm+0znEzDRYrdlcU+q1RJpCoUAr24eFEJNEqVRy443jn059OCsvKyPtmLNYcPGvCdXEHLSyWKVLofDTz3C5XCgUCgCCg4P55OMPpzNcIQRw6qmnsmvXriHHrrrqKsxmM7/+9a9JSEjAz8+PNWvWsGzZMgCKiorYu3cvCxYs8EbI4giWmppKV7sdR1cbAcHhnuPhUQnYPqtgYGAAHx8fL0YoxOFHEmkziMViweV0Yq/bQ2RCxoSe6+sfyIlX3Df2woPQ6t1NL/Py8iSRdoSJj48nJDSMlpoSkr/Dtt/JoopJl8mdQojDnsPhoLCwEJvNhs1mIy/Phn9AAM8+s3JKqrwyzCZKGnrHrDZXx6TQ1mqnsbGRqKioSY9DCDF+YWFhWK1DpzGGhIQQERHhOX7NNddw2223odVqCQ8P5+abb2bBggUysVNMutTUVADaGiuJSrR4jodHJdDf10dVVRVJSUneCk+Iw5Ik0mYQT+VQdfGEE2mHKig8guAwDTabzXNnTBwZFAoFGRkZNNfs9mocmth0tn31Dk6nc0jfQyGEmGwOh4Pi4mJsNhv5+fnkfZPwOvPMM/nLn8fXHqK3t9dzjsGE2a68PMr27PZMIA7XxhAaGU9N8df89MafcPLJJ0/6azGbzWzNe2/MdeoY9xeloqIiSaQJcRh47LHHUCqVLFu2DIfDwdKlS6Xpu5gSnkTavm8l0iLd/fX27NkjiTQhJkgSaTOIWq0mJjaO5prSab/24OTOPNl6d0TKslr4YO3XXo1BG5dOT0835eXlnjd0IYQ4VO3t7bz77rvfJLzcSbMDk11hmijUsQb6XGH88x//4NFHHh51C8vAwADnnX8BH3/0If39/UPOoU6cz6L5P0KjN6CNMxAQHI5zoJ+nl2eTl5c3JYk0o9GIveFJnAP9KH0O/rFNFZWIQqGguLiYRYsWTXocQohD8+2+z4GBgTz++OM8/vjj3glIHDUiIiIICQ2jtaGCbe//k+ScU/jyzcfod3QD7kTaVLx/CXEkk0TaDJNlzaSkZmKTOyeLWm9k167tXrm2mFoWi4UXX3rFu5M7Y/dP7pREmhBistx999088sgjhGl1qGPTUSct8CS7NLFpBIaqAai0beDdx66mvLyctLS0g56vv7+fjz78gJS5Z2I56Qdo9QYCQzUHXa/08SVCnzplPSBNJhMD/X20N1aj0h28YsDXPxBVVBxFRUVTEocQQojDk0KhICUlhar8TVQXfM6WNx72PBYeEcuePTK5U4iJkkTaDJOVlcXXL/7PK9fW6g1s2vBfent78ff390oMYmpkZmbS6+imvbmG8Mh4r8QQookhMDgMm83GOeec45UYhBBHnqCgIMI00Vz+p42jrtN80wvUZrONmkgLCAggJTWNoFANeuOx44ohPCadXbvyxh/0BAz2LbXX7xk1kQYQHp1CYaEk0oQQQgxlMKTzdWEd6phU7HXuxFnWqVfQXF0oiTQhvgNpVDTDWCwW7A176fum1HY6afXp9Pf3U1LinYo4MXUG+++1eGHb8CCFQoEmNp38/HyvxSCEOPJkZmbS3tKAo6tt1HUhah2BwWHj+h2UlWWlpXb8vy+1egO2/HxcLte4nzNecXFxBAYGYa8rP+iawd/tquhkCqUiTQghxLekpabS0VTN6Tf+DaWvHwDpx55DWEQCJaXe7aMsxOFIEmkzjMViweVyYa+d/l9oB96tF0eWxMREgkNCafbStuFBqtg0mdwphJhU471RMJFkvtVioWUCvy+1cQZa7S3U1taO+znjpVQqSTcYsNe7KwYKP3+DqoIvcA7009vTwWfP3c4rfzgT29qXUMeksmfPbk9vNyGEEALcAwdaG6vRxKRy4o/uZfaZN6BLzSE8KoGysjJvhyfEYUe2ds4wnsmdNSVEJVvHWD25gsK0hKqjJJF2BFIoFJjNZq9WpIG7amP7O+/L5E4hxKQxmUwolUqaa0qJSZ8z6lpVbDo7x7EF02q10tnaSHd7M0Fh2jHXH3gjSq/Xjy/wCTCbTWzeVUZHSx2fPfcbcLkIDNXg6x9IR7M7eaeJTcM50E9/Xx/l5eWkp6dPehxCCCEOT6mpqTgH+uloqcO8cJnneFhUAs1NjbS1tREeHu7FCIU4vMg32RkmLCyM+IREr1UOqWNlcueRalaWldY67ybSNPp0uru72Lt3r1fjEEIcOQIDA0lOSR1XBZlWn05RYSFOp3PUdRaLBWDcVWnhUQn4+QdM2Y0os8lEa0M5oZoYZp36YwB0qTlEJmaSMud0gsIiiDEcg0qXAkBxcfGUxCGEEOLwNDjoq21f5ZDj4ZEJAFKVJsQESSJtBsqyWie0pWQyaWKnrmGy8K7MzEyaa0qnpIfPeA1O7pSqRyHEZLJaLNjH0dNMozfQ09NNRUXFqOsMBgO+vr40j7OKV6n0QatPn9LJne3N9fT2dHDcsl+gjTPS3lTDaTf8hca9+aTMXoJS6UOoJga/gECZ3CmEEGKIpKQkFArF8ERaVCKADBwQYoIkkTYDWa0Wr1UOaeIM7NldisPh8Mr1xdTJzMykt6fLsw3IG0K1sQQEhcjAASHEpLJaLdjr3L1F+/scDPTtfw/rat3Hu3++hk2v/wmtfnzJfH9/f9LSDROqDlfFpLNz167vEP3YjEYjAK31Ffj6BbDk2kew1+/ho8eX095YRercpQAolEo0umRJpAkhhBjC398ffVw8bY1DE2lBYVr8A4MlkSbEBEkibQZyT+6soq+nc9qvrdUbGBgYkA/hR6DBrUreHDigUCjQ6mVypxBicmVmZtLWVIujq52NL9/Ls7cex8f/+Bm5Hz/Dq3edS2Xeehr35hOiiSEgKHRcv4NmzcrCPsbvywMTdhq9gfz8gimp+h1MpNnr3F90IhLMzL/wF+zNW0dAsAq9ab5nbVh0CoVFsrVTCCHEUGlpabQfUJHW5+impbaUwFC1JNKEmCAZNjAD7U94lKJLzZ7Wa2sPaJg8a9asab22mFqJiYkEBQXTUlNKUtaJXosjXJc2rmbfQggxXp6eZrWlqKIS6OvpZG/eenZ//QG61By625tIyVniSeaPZwum1WLhvQ8+weVyoVAo6GptBIWC4PAIAOpKt7H6X7eRMvs0Fl76W7T6dDra26iqqiIhIWFSX59arSYiMhp73f4eNrOWXElHSx3aOAM+vn7718akUPj1qkm9vhBCiMNfeloqX736Jm89eCntjZW0tzR4HktMTPRiZEIcfiSRNgNlZGSgUChoqSmZ9kRaQIiKMK1OelgdgZRKJSazmZZx9BGaSto4A7nvfeT5cirEdFuxYgVvvPEGhYWFBAUFcfzxx/Pggw9iMpk8a3p6evj5z3/OK6+8gsPhYOnSpTzxxBPodDovRi4OxmQyud83a0vJWXodVflf0FxTyvduf5WmqkLq9+wgOedUAFQxaezKG/s9zmKx0NXeQndbE8GqSD76x83UlW4lJnU2mjgDhRtfx+UcYGCgD3D/bgP3jajJTqQBmM1G6uv3J9IUSiULL7lj2DqVLoWtdTW0t7cTFhY25nkbGxvJz88nPz8fm82GLb+AG66/jksuuWRS4xdCCOFdN9xwA3v2lJOQEEdq6jmkpqZ6/sTGxno7PCEOK5JIm4FCQkJITEoed5PjyaaOTSdvHF8yxOFnVpaVTz7f6dUYNLHpdHV1UllZKXe/hFesW7eO5cuXM2/ePPr7+7njjjs4/fTTyc/PJyQkBIBbb72V9957j9deew2VSsVNN93EhRdeyOeff+7l6MVIgoKCSEpOoaW6FIVSySnXPMSrd53Dlv89hK9/MJGJFkK17i8JGn06ue99iNPpRKk8eIeLA7fDB6siSco6kbqSr+loqaWpqojk7FMo2/4JydnuBF1YRDz+AUHYbDbOOOOMSX+NGWYzpas3j7lO/c3kzpKSEubMmeM53tnZyVdffeVJmOXZ8sm35dPY6K5IUPr4oo1JxuHoofH+FZJIE0KII8yxxx7LZ5+t8XYYQhwRJJE2Q2VZrews99bkTgM7d23wyrXF1MrMzOS/r73h1WqwA6s2JJEmvOHDDz8c8vNzzz1HdHQ0W7du5YQTTqC1tZWVK1fy0ksvccoppwDw7LPPkpGRwebNm5k/f/5IpxVelmW1klvmvgEVoo7mlKsf5P2/Xg/AvPN/5lmn0Rvo6upk7969JCcnH/R86enp+Pn701JTQnzGAmafcT1VBZtoqSnhsvs/oWDj61Tlf0Gc6VjAXSGm1aeTlzc1W9eNRiP2F18e8/e3OsadSCsqKhqSSLv++ht46aUX8fH1QxuTQnhMGskLLmFOnAFNbDoqXRI+vv7sXP08X7/5MAMDA/j4+EzJaxFCCCGEOJzJsIEZymq1YK/1UiJNb6C8bA/d3d1eub6YOhaLBUd3B50tdV6LIVQTi39gsAwcEDNGa2srAFqtFoCtW7fS19fHkiVLPGvMZjOJiYls2rRpxHM4HA7a2tqG/BHTy2LJpPWbyZ0ASbNOJmvJj/ELCCFl9ume44OTO8f6HeTr64vRaPIMaFEolZx6zUM4nQN89uztlG9fTYJ1MT5+AZ7nqGLTp6wHpMlkwtHdSVdrw6jrAoLDCVVFDhsa1N3dTUzabK75ey4X3fUep//kr8w7/2ekHXPmN33W/AH334/D0UNZWdlIpxdCCCEOWWFhIc888ww33XQTH330kbfDEWLCJJE2Q1ksFs8EsummjTPgcrkoLCyc9muLqZWZmQngtW3D8E3VRmya9OETM4LT6eSWW25h4cKFWK1WAOrq6vD390etVg9Zq9PpqKsbOQm9YsUKVCqV589U9MgSo8vMzKS1sZreng7PseO/fzvX/G0bEfH7+9+FavUEBIWMb3JnlnXI5E53pduf2Ju3jobynZ6+a4M0egOFBQU4nc5JeEVDDfbwO3DgwMGodCkUFw+d3Gm1WuhsrhoymGAkmgOGDgkhhBCTraqqipzsHK699lqeXfksP7n+Bm+HJMSESSJthhr8QueNxvCDd+vlQ/SRJzk5mcDAIFq8mEgDd9XGeJp9CzHVli9fTl5eHq+88sohnef222+ntbXV86eysnLsJ4lJ5ZncWbO/Kk2p9EHxrT5oCoUCzTiT+RaLhZaaElwul+dYUtaJzD7zBiITLcMmIGsP2DY62VJSUvDx8cFeP3YiLVyXQn7B0JthmZmZtLfso6fDPupzg1VRBIWopGpYCCHElAgKCsLR6+Bn51zL5SdeRPneClpaWrwdlhATIom0GcpsNqNUKmmunv7tnf5BYagi9ZJIOwINTu5srvHOtuFBmth0CgoKhnw5FWK63XTTTbz77rt89tlnxMfHe47HxMTQ29uL3W4fsr6+vp6YmJgRzxUQEEB4ePiQP2J6mc1mz+TOsUxkcmd3Z9uw7ZTzl/2Ci/+wisBQzZDj2jgjwJT0SfP39ycpOWXUirSBPgfNNSU4B/ooKRmaABysSG6p3X2wpwPfJBr1aZJIE0KIw1RfXx/5+fn09vZOybnLy8sn/Bne4XDw85//nPnHzScnOxtdtI6/v/cMT330b8LCwujr65v0WIWYSjJsYIYKDAwkOSWVFi8lPFQx6eyaoj4vwruyrBbWbJ7aJGnxprfo6bSTdeoVIzbF1ujT6exop6qqSrbAiWnncrm4+eabefPNN1m7di0pKSlDHp87dy5+fn6sWbOGZcuWAe7G7Xv37mXBggXeCFmMQ3Bw8DcTr8d+39ToDez84JMxG/cPVoc3V5cQotaNed5QbSwBQaHYbDbOOeec8Qc/TmaTibzKPXQ012KvL8NeV4a9vozWujLa95Vj31eF65ttpcfOXzDk9ZlMJpRKJS01JcQa5o56HVWsYcp6vQkhhJg8u3fvZseOHd9MY7axa1cepSXF9Pf3c975F/DWqje/03mbm5vZtWsXRUVFFBcXU1hYRGFRERXlZfT393Pffffx29/+dtzne/fdd3n00UeZb5xLm72N8753PmlpacyZM4d58+YRHR39neIUwlskkTaDZVmtbCvx3sCBXXkyHvlIlJmZyetvvDUlkzt7ezrY8OLdFG9ahV9ACJYTLx3SiHuQ9psePPn5+ZJIE9Nu+fLlvPTSS7z11luEhYV5+p6pVCqCgoJQqVRcc8013HbbbWi1WsLDw7n55ptZsGCBTOyc4awWC3l7x65I0+rT6ersoLKyctTpwSkpKQQEBNJcU0KCZdGY51UoFGj16VNW0Z2ZmcH77z/Mf351AuAeiJCckkq22YzpjO9jMpkwGo2YTCaio6OH/I4PCAggJTWN5nFU7Gli09i65S2Z3CmEEDNYfn4+2dnZ9Pf3ExymQaM3oNbPZsGc79NQtpONG9d/p/P29vaSnTOHqsoKFEol6qgEwqOTUSUv5Pj5l1O8+W0+W7t2Qom0jIwMALr7eujv76erq4u77rrrO8UnxEwgibQZzGq18On6f3nl2to4A7kfr6Szs5OQkBCvxCCmhsVioaernU57PaGakbepuZzOYX2FxtJQtpNPnrqV7rYmfHz9Scw6ccQkGkBYRBx+AYHk5+ezdOnSCb8GIQ7Fk08+CcBJJ5005Pizzz7LlVdeCcBjjz2GUqlk2bJlOBwOli5dyhNPPDHNkYqJslgy+fyrl8Zcp4ndP7lztESaj4+Pezv8BNosTOXkzjvvvJO0tDTi4+MxmUykpKTg6zv+j3JZVitbS9yJtG/fTOnpsPP5f+8ndc7pnsmd5eXlpKWlTfrrEEIIcej6+vro7+/nzJv/SdKsk4f8Tt8dHkHRF2/Q0NAw4WovX19fGvc1MPusnzDvvJs9U50HtTdWU2T7cELnNJvNnHfuedTW1nDNOddy4403Tuj5Qsw0kkibwaxWK+0tDTg6WwkIUU3rtQcrhgoKCjjmmGOm9dpiann65NSUHjSRtvbfv6W2ZCspOaeSnLMEXVoOSuXIVQkup5MdH6/kyzcfJTIhk5N+fD9vP3w5ybNPHXE9DE7unLqqDSFGM56+HoGBgTz++OM8/vjj0xCRmCwWiwV7QxV9PZ34BY58E8jR2UpHSx0KpRKbzcYZZ5wx6jlnZVlZs2n8v6u0egPb3n5vSqq5QkND+clPfvKdn2+xZLJm/RcAfLryl9jrykjOORV1TApfvLqCjuZacDk5btkvAHeiURJpQggxMw1u2e+yNwzbZTL4XS4vL49TTjllQudVKpWkGww4Ou3DkmgA6pgUdnxUSXd3N0FBQeM+51tvvzWhOISYySSR9g2n00l9fT3l5eXEx8fPiO1mgxPImmtKiDVMbzJLE+v+4Gyz2SSRdoQZ11Yll4vW+jJyP3mWHR89TVBYBEnZJ5Ocs4SEzIX4+gcC0NW6jzUrf0lV/ufknHEdx15wCwXrX0Xp40tS1kmjxiGTO4UQk81zo6BuD+GRCTTXlNBSW0pLdQn2ut3Ya0tob9kHuD/UD241GY3FYpnQdnhtnIGenm7KyspIT08/tBc0yTIzM2lvrqensxVHVysN5TtpqNgFLhfhUe7PPck5pxKi1hEYHEZ+fj7nnnuul6MWQggxksDAQFLT0mmuLh72mCo6CR9ff2w224QTaQBms4nNu0YebqPSpeByuSgtLSUrK2vC5xbiSHBUJ9J+9rOfUVNdRVnZHvZWVtHb654WotWo2dfYhHKCW9smm9FoxMfHxyuJNL/AENTR8VMyeUx4l4+PD0aTadTJbYt+8HtqS7fh6x/Awkt+x968dZTvWEPhxtfx9Q8iwbIIXWoOOz5eiVLpwzm3PutJypXtWE2s8VgCgkefWqiJTcf2yadT0qtNCHF0MpvNALzz8OX09nQB7t95qWnpzLNasSw7lczMTCwWC0ajkYCAkbefH8hqtbq3w7fUEaqNHXO95psqAJvNNuMSaYM36Ox1ezj5qgd49a5zUUUncewFt9BQnseWNx8hwbrYPblTqoaFEGLGy7Ja+apweO9LpY8vEfrU7/x73Gwy8dGakXusqWPcQ5qKiookkSaOWkd1Iu2r1W+RpPXn5LggErLSiNMEUdXczZ1vFVJbW0tcXJxX4wsICPjmLoP3JnfmScXQESnLamHtV0UHfdwvMITTrn+MN+6/mIrcTzn+kttZcNGvaKndTfmO1ZTtWMPmNx4m0XICJ1/9IMHhEQA4utqpKfqS47//mzFj0OrT6Whvo6amxuv/rQkhjgyhoaG89NJLFBUVkZmZSXx8PP7+/lRXV1NeXk55eTmvvvpfuru6eOpfT4+r+vzA6vDxJNJC1DoCQ8Kx2Wycf/75h/yaJtPgNqDm6hJi0mZz6jUP8c5jV1G/J5e9eeuJNx+Pf2AoIFXDQghxOLBaLXzy2YYRHwuPSWfXd+zZaTKZaG+up7enw/O+ANDn6CIoTEtwmJqiooN/lxDiSHdUJ9Jev3EuYYFD/woKa9sBKC8vnxFf7rOyrHxpm95EWl9PJ801pbhcLnZJRdoRyWKx8OZb741aDRaVZGH+sl/yxav3E29ZSKL1BDSxaWhi05h95g10tzcTGKIeMpRgb946nAN9JOccvD/aII1+f7PvmfDfmhDi8FRTU8OWLVuoqKigvLyc4qIivvrqS7q7u+ns6vasC/DzIT4ilHi1PxuLG3n33XfH1ew4KSmJoKBgmquLSbSeMOb6mVzNFRgYSHJKKi3fTO6Mz1zI7KXX8eWqx3C5XCz+4Z2etVp9OtvfeQ+n0+n1Cn0hhBAjs1gsdNj30dPRwp5tn6BLy2HHh/+iKv8LMhZdRN7Gjd9p94fRaASgta6c2tKt1O/ZgVafzper/sxJV96PKjqF4uLhW0qFOFoc1Ym0kcRr3A0TKyoqWLhwoZejcZfrfrx63ZScu8/Rjb12N801Je4+MjUltNaWYN9XDbi/DFz8/Uun5NrCuzIzM+nubKWrdR8h6oNP8pm15MdU5X/Opyt/xffveodgVZTnsaAw7bD15TtWE5mQQVjE2ImxsMh4/PwDsNlsnHbaad/thQghjnpLT1tCXn4BQf6+xEeEgHOAxqYOALQhfpxoiuSSeXEsSNOiVLq/SCx88AvKy8vHdX6lUok5I2PE6vA+RxctNaWe91F7TSn22hJaG2vosiZN2mucTFaLhR179m8DmnfBLVQXbaahbCfJ2fv76Gj06XR3d1FRUUFKSoo3QhVCCDGGwarp3Vs/ZP1//jDkMW28kbZWO7W1tej1+gmd12QyAe6eo5+/ct+Qx/yDwgjXJZNfUHgIkQtxeJNE2reEBvqiDQ0c9wfsqWaxWOhobaS7vXnExMV49Pc5DkiYlX6TMCvF3rDXM70uPiGRLKsV69LLsFqtWCwWMjIyCA4OnsyXI2aI/ZM7S0ZNpCmUSk6++kFevescPn3mV5z9fyuHVKAdaKC/l7271jFryVXjikGp9EGrTyc/P3/iL0AIIb7R39fH5QsSuP/CDBQKBX0DTs79y2b27OviNEs0//2ymg93NVB0//5K2Xh1wITe57NnZfHu6s8p2rSKluoSWmrd76MtDZWeNYlJye730bN+hMViGXMaqLdYLJms/fw5z88+vn6c/X8rcbmcQz5nDE58y8/Pl0SaEELMUEajEV9fX5wDA8SZF1BduAmA5JwlRCa6k2w2m23CiTSVSkVklI7W+nKW/e4N/nffhYC7/UvyrJNprS+nYM1n0utYHLUkkTaCOE3QjEqkgTvhEWQ6btS1A/292OvLaa4upqWmhObqEtrqdtNcX47L6QQgVh9HltWC9ZRlWCwWrFYrGRkZhIWFTflrETNHamoq/gEBNNeUEp85euVlcHgEp17zJ9597GpyP3mGnKXXjriupuhLers7SJm9ZNxxhOvSvnPvBiGEAEhOTWVf9Q7PB3k/HyXz07QU1nVQUNOOQgGXL4jH6XRh7+6jsrkbnE7Ky/aM+xqLFi3iueee49OVvyQuPsGdMDvtEiwWi+fGU2ho6NgnmgEsFgttTbX0drfjH+R+7w8MVQ9bF6KJISAoFJvNxtlnnz3NUQohhBgPf39/0tINtNSWctoNj/HqXefR1dpAcs4phEcljLn7o7+/n9LSUmw2GzabjWOPPdZzI8hkMlBfX8a85J9x7AW38uWqx0iadTI+fgGodMm0tdppbGwkKipqxHMLcSTzeiLtrrvu4u677x5yzGQyUVh48FLR1157jd///veUl5djMBh48MEHOeussyYtpni134Q+YE8lg8GAr58fzdUl6L9JpA3099HWUOGpMGuuLqatrpTmunKcA/0AROtisFotZJ1wrueDfmZmJmq12ouvRswUvr6+GAxGT5+csSRYFpOz9Fq2vPEIetNxRCcPn9BTvmMNYRFxRCRkjDsOjT6d/E/Xy92sGayvr4+ysjJKSkooLi6mpKSEk08+mYsvvtjboQkBQHJKKhvyv8LlctHY0UtFYxf/21rDgNPF3qZuZieoWVti58U/rKXL0ed53pWLxp8cuuaaa1i4cCF6vZ7w8NEnEs90gxXJzTWlxKTNPug6hUIhVcNCCHEYyMqysmlnCUFhEZx2w2OUb19DSs4Sz+6PvG/1vN6+fTsPPfQQuTt3UVJSTF9vLwA+vv4EBvjT3t6GQqEgw2ymdPVmAGafdQNhEXHEGOYCoI5JBdyTOyWRJo5GXk+kgfvu6OrVqz0/+/oePKwvvviCH/zgB6xYsYJzzjmHl156iQsuuIBt27ZhtVonJZ54TRCfzZBEmp+fH+npBoo2vUlt6de01pbSXLuHgX73l4GIyCgsFguzvneGJ2FmsVjQar/bNlBx9JiVZWXd1vEPsjj2e7dSXbSF1U/dykV/WDVkgo/L5aI81/2mPZGEmFZvoK3VTl1dHbGxY0/DE1PD5XJRUVHhSZSVlJRQVFREcVExFXsrGBgYAMDfz58g/wBeefkVSaSJGSM5OZmnqu0Y7/iUnj73zSR/HwX+fn7EpaQRn2YgOSWF5ORkkpOTSUpKIjk5GY1GM6HrmM3mqQh/2pnNZhQKBS21oyfSAFQxaeyUqmEhhJjRrBYLH3y0BgC98Vj0xmM9j6li0ocNj1vxwAO8/e6HpM07i+NmfQ+N3oBWb2BfRR7v//U69u7dS1JSEiaTif+8+DIulwul0gfjgv2TqFVRiSgUCoqKili0aNH0vFAhZpAZkUjz9fUlJiZmXGv/8pe/cMYZZ/DLX/4SgHvvvZdPPvmEv//97/zjH/+YlHgStEFUbt4zYyZVXX3VlTzwwJ/QxYWz5OyTsVpv9iTM5A6A+K4yMzNZ9c77464G8/H157TrHuO1e85nw4v3cOo1f/I81rg3n47mWpInsK0T9k/utNlskkjzoueff56rrnL3tvP18SVGG02sWoc1ysjppsXotTHEanVEhGnYVPg1D735OE1NTURERHg5ciHgxhtvpLu7G7Va7UmWJScnSwX2QQQFBZGUnEJLze4x12r06eS+9+GM+TwkhBBiOKvVSmdbM11tTQSHD/1sptEbsH0ytJdZeloaAYHBnPije4es1ca5e2Pm5eWRlJSE0WjE0d1JV2sDIWrdkLW+/oGoouJkcqc4as2IRFpJSQl6vZ7AwEAWLFjAihUrSExMHHHtpk2buO2224YcW7p0KatWrTro+R0OBw6Hw/NzW1vbqPHEa4Jw9PZSX18/I77c//KXv/QkDoWYLJmZmXR3tNLd1kSwKnJcz1Hpklh8+V18uvKXJGQu9NyZKtuxGv/gcGIN8yYUQ3hUAr5+AeTn57NkycSScGLy1NXV4e/nz1+uvY9oVSQ+Sp+Dro3Vuj9IlZSUSCJNzAjh4eHcdddd3g7jsGK1WNhZPnZFskZvoKurk8rKSpKSZuYUUiGEONod2FP724k0rT6djvY2qqqqSEhIANyJt7bmOhxdbQQE729XEKrVExAU4umNOTi5015XNiyRBhAenUJhYdFUvSwhZjSv31487rjjeO655/jwww958sknKSsrY/HixbS3t4+4vq6uDp1u6H/IOp2Ourq6g15jxYoVqFQqz5/BXyIHE68JApgxAweEmAqDb7rNNePf3glgWnABxvnns/6FO2ltqACgfMdqkrJOxMfXb0LncvduSJUePF5mMBjo7esl2D9o1CQaQKxmfyJNCHF4slottNaNXpHmcrk8UzxtNtt0hCWEEOI7SE9Px8/fn+bq4dVhg1VmB/4e3594G9or2d0b0+BZm5KSgo+PD/a6kVseqaKTKSySRJo4Onm9Iu3MM8/0/O9Zs2Zx3HHHkZSUxKuvvso111wzKde4/fbbh1SxtbW1jZpMi9MEAu5E2oIFCyYlBiFmmrS0NHz9/GipLSU+Y2L/ni/+4Z3U7d7O6qdu49TrHqGpspA5Z934neKQyZ3eZzC4P2TVttSjChm9kXqQfyCRKq2U8gtxGMvMzKS1sZreng78AkLoat1HS20pzdUltNSW0lq7m5baErra7QCHzURSIYQ4kjkcDioqKjAYDEPasgwOERvp5nhYRDz+AUHk5eUdMI3ThFKppLmmhP7eblS6ZCrzNrD9w6eITs3x9Mb09/cnKTkFe32553x9jm5aG8qx15XR1lhJ9Z7d9Pf3j9rjXIgj0Yz7N16tVmM0GiktHXmaYExMDPX19UOO1dfXj9pjLSAggICAgHHHEB7khzokQCrSxBHN86ZbPfHKIv+gME67/jHefOAS3v/rdSh9/Ei0Lv5OcWj1Bmxrn5PJnV6Unu7uVVfTXI853jDm+hi1TirSxGGpr6+PgoICcnNzyc3Nxc/Pj7vuumtCnxGOBIOTO9/+02V0NNXQ3dkKgJ+/P0ajicVzLFgs55GZmYnVasVoNHozXCGEOOqUlpbywQcfUFxcTHFxCYVFRVRV7sXpdPKrX/2KBx98cMj67FlZfLrZRnXhFlpqSmiuKcFeW0JLTSm9ju4hbY4CAwNJSU2jfs8O1v37d0POkz7vbPI/Xe3pjZlhNrN+88e0VBfT1lBGa2ONZ61GG8Hll/8IH5/RdzMIcSSacYm0jo4Odu/ezY9+9KMRH1+wYAFr1qzhlltu8Rz75JNPvnPlWP+Ak7pWB5Ut3VQ1d1Pd0k1lSw+9/QNUVVV9p3MKcbiYlWVl446Rk9ZjiU6ZxbEX3Mrm/z1EgvUE/IPCvtN5NPp0Wu0tYybExdQJDg4mLlZPTfPBt8gfKFYTTZH0xBAzXGNjoydhlpuby7btOygsLKC/zz31Okyjo72lnjPPPJMTTjjBy9FOr6ysLH5y4420tbZisVxOZmYmmZmZpKamSlWBEELMAMsuuhibLR9NTBJhUcloM04j5cRkdn/1Hhs2bBy2fs6cObz88su8/fDl+Pr5kZ5uYGG2FesPz8VisXDWWWcNWZ9ltfJ1UTUJ1hOozFsPgDbOSKzxWLa9/w/KyspIS0vjhhuuZ2/l70lP12E+/wSMRiMmkwmj0Thqr9x9+/Z53n8NBgPnnXfe5P4FCeFlXv+09Itf/IJzzz2XpKQkampquPPOO/Hx8eEHP/gBAFdccQVxcXGsWLECgP/7v//jxBNP5JFHHuHss8/mlVde4euvv+app56a8LVPe3QLdfZOBpwuzzFdVCRJycmce8Eibr755sl5kULMUJmZmbz7wSff+fk5S68lIERNTPrs73yOwcmd+fn5kkjzIoPJSM0ovSYPpNfG8MWWr6WKUMxItbW1LDh+IRXlZQD4BQQSEWdEE2dm/sUXEBFvJiLejK9/IM/clENubu5Rl0jz9/fnySee8HYYQgghDiI0JITUuUtZct2jQ453tdZT/PnLw9bfdtttzJo1i/j4eAwGA35+o/ctzsqysmbdRi6++yNeu+c82hurSM45Fe03n8ttNhtpaWmce+65nHvuuWPGW1FRwZNPPsmOHbls35FLQ30tAAqlElwu2tvbCQkJGe/LF2LG83oiraqqih/84Ac0NTURFRXFokWL2Lx5M1FRUQDs3bt3yMj1448/npdeeonf/e533HHHHRgMBlatWoXVap3wtb//4+swmUwkJyeTlJREYmIiwcHBk/bahJjpLBbLQcdlj4dCqSTzhO8fUgyqqER8ff3Jz8/nlFNOOaRzie/OZDLxSdGH41obq9HR2dkpVYRiRioqKqKivIz5F/2K5JxTUUUnoTzIEA1tXDq5ubnTHKEQQggxOrPZxJ61Xw87rtKl0NS4D7vdjlqt9hxXKpWcfvrp4z6/xWKhvWUfLucAp13/GDtXP49x/vmEaGIIDAnHZrNNqIrs7rvv5j8vvkSceQHxx1xATkIGEfFmervb+d8fl7Fr1y7mz58/7vMJMdN5PZH2yiuvjPr42rVrhx27+OKLufjiiw/52nfddRfh4aM31hbiSDbYJ6elpvQ7JdImg9LHF01sikyF8zKDwcC/m58fV5WZXuue3FlcXCyJNDHjZGdnAxCi0aGJSR11rSbOxLbtO6YhKiGEEGL8DAYD9v++PuxzmTo6GXBPT583b953Pv/g5M7mmhL0xnmcdn2O5zGtPn3Cn8sTExMJDArjrJ/9a8jx/j4HSqUPubm5kkgTRxTl2EuEEEeq9PR0z+ROb1LFpJNny/dqDEc7o9FIt6OHlg77mGtjNNEoFAoZOCBmJI1Ggz4unqbKwjHXRsRnkG+zMTAwMA2RCSGEEONjNBrp6Wyju73Zc8w50E9YVDzAIU9PNxqN+Pr60jLCpE9VrMEzuXO8srOz6WhtpKt135Djvn4BaGNTpfpbHHG8XpEmhPAev2+akY40Lns6afTp5G980asxHO0MBve0zurmOrRhmmGPd/f2UNtcT01zHTXNdfj5+kkiTcxYs3Ny2FUxjkRaghmHo4eSkhLMZvM0RCaEEEKMbXBa8r7ynWz/8F9knngpa/71c3x8/QlVRx5yIs3f35/UtHSaq4d/ltPq0/lqyyoGBgbGPZFzsBq8sbKQRJW7RZPL5aK7rQl1nIntUv0tjjCSSBPiKGe1Wti8y7sVaRp9Os1NjTQ0NBAdHe3VWI5WqampKJVKdpUX0NnTSc03SbNaewO1LfU0t7V41mo1WmbPzuGSSy7xYsRCHFxOTjbrPh97CFFkgjt5lpubK4k0IYQQM0ZaWhoKhYLSL9+jtvgraou/AtzJKZUu5ZATaeAeOLAlb38izdHVRnN1CW2NVfQ6HOzevduT0BtLcnIyIaFhNFUWULxpFWERcdSVbqWm+EusJ/+QnV+uxel0Dul9LsThTBJpQhzlLJmZfPDxp16NQRu7f3KnJNK8w9/fnwxzBq9+/hYAoaGhGNMNZC3IYZnRiMFg8PzRarVejlaI0WVnZ9Nh33fQQSqOrjbsdXvQpeYQro0hNzdXEsNCCCFmjKCgIOLiEwhW69Cb51NTuBmABRf/iqaqYgqLJiGRZrXyzrsf8N5jV2GvLaWt2T293cfHh3nHHkdcXNy4z6VUKsmyWmko38merR8NeSzGcAx5n71IWVkZaWlphxy3EDOBJNKEOMpZLBY6W5vobm8iKMw7AwfCo5Pw8fXDZrNx0kkneSUGAZ+t/YyioiIMBgPR0dFjDh0A6OzspLCwkK1bt05DhEKMz+AWk6bKAgJMx1K48XXizAtQ6ZLZV5HHx//4Ge1N1fzg3o/QxJvYsUN6twghhJhZjEYDZQ3lnHr1n/jvnWfT291O6pylDPT1suujD8c1IGo03//+93n/gw+J0UVgPf8ELBYLVqsVk8lEQEDAhM83e3YOr729miXXP8bqp25F6eNLZEImeuOxgLv6WxJp4kghiTQhjnKeyZ21u72WSPPx9UMbk0J+vgwc8KaoqCiioqLGXOd0Orn+uuv55JNP2Fu5dxoiE2Ji0tLSCAoKpqmqkE57PetfuBOA8KgEOlrqcPb3ERiqITw6EW18Btu3v+PliIUQQoihzCYTu95ZQ6g2ljNv/ieOrjZCtbGoY1Lo7Ginvr7+kKanZ2RksGXzpkmLNzs7m3/+8ylSZp9G5ok/IH/dyyTOOolgVRQhqghyc3O58MILJ+16QniTJNKEOMoZDAZ8fX1pri7x3DHyhnCZ3HnYqKurY+UzKznOOIfzzzmdhEg9EaEarvn7rd4OTQjAvS3FYrXSVFnIiVfcx87Vz9FcXUJM2hyCwiLYm7eOqCQrSqUPEfEmtr//D5qamoiI8M7NhJG0t7dTXFxMcXExlZWVXHfddWg0wweBCCGEODIZjUbs9StxOZ3ojfM8x1W6ZMA9ufNQEmmTLTs7G6dzgJbqEhZe+lsSrYvRG49FoVCgjTNL9bc4okgiTYij3ODUnpba3V6NQ6NPx/bFy16NQYxPbGwsYWFhmOLSOXXWYgC6HN1ejkqIoebMzuHND9bh6x/I6Tf8ldfv/R4ul5NZp19F7ifPMPec5QBExu8fOHDKKadMa4wul4vS0lKKi4spKiqiuLiYgsIiCguLaKiv9axTKBTU1NTw5z//eVrjE0II4T1Go5G+XgcdLXWEReg9x8OjElEoFJSUlHDCCSd4McKhsrKyUCgUNFYVEJVsJWX2aZ7HtPFmtu9Y7cXohJhckkgTQmC1Wvgy37uTO7X6dL5u3Me+ffvGtb1QeI9CoSAzI4PKxmpvhyLEQWVnZ/P0ymcY6HOgjknhxB/fx+qnbqWzdR8KhZIEizsJrNIl4+cf4JVE2tNPP831118PgF9AIJqYFMKiUkiYdyFZuhTUMSmodMmsWfkLCguLpjW2jo4OCgoKyM/PJz8/H5stn77+fl584T9ERkZOayxCCHE0GpyY2VpfNiSR5usXgCoqblImd06mkJAQUlLTaKosHPZYRIKZ3I9X0traikql8kJ0QkwuSaQJIbBaLHy8Zr1XY9Do3ZM7CwoKJJF2GLBYrWz4aK23wxDioLKzs3EO9NNSu5vIxEwMx55DTdGX5K97mZj0uQSGqgFQ+vgSEWckN3f6t5w0NDTgHxjMJfd8SIhah0KpHHGdWpdKYdHUTVfOzc1l27Zt5Ofnk5dnI89mo+qA/ofqqHjCopOptG1k7dq1XHTRRVMWixBCCLekpCR8/fyw15cTn7kQcFcyd9rr8Q/RUjQJkzsn2+zZOWzeNTSRNtDfh6+fe3jBzp07Wbx4sTdCE2JSSSJNCEFmZiYd9n30dLQQGOqdHjyq6CSUPr7YbLYZVaYuRpaZmcnLL76E0+VEqRj5y78Q3jRr1iwAGisLiUx0D1VZeOlv8fUPJGnWSUPWquPMbNu+Y5ojBJPJRG9PFz5+AQdNogGoY1LYtboCh8PxnSapjcZmszFnzhycTiea6ARUselEWpZiWJKONs6AJiYVv8AQAP5923EyFEYIIaaJr68vyckpFH3xBjVFX9K+rxx7fTm9PV0A6E5bMO0x2e32Ie0ICouKKC4u4fbf/JpLL72UnOxs3vvgY3I/fobGykLsNYU0VZcy0N+Hn7+/VDSLI4Yk0oQQnsmdzTWlQ5qZTicfX3+0McnyJe0wkZmZSbejh8a2ZqJV8qFIzDxhYWEkJafQVLX/zrivXwALL7lj2NrIBDNb/vcWfX19+Pn5TVuM+7ftlBMUpj3oOrUuGafTye7duz2/r7+L9vZ22traiIuL8xxTKpU4nU7OvuUZEq2jVwmoY9Pkd7QQQkyja665mr/85W/Eh0ZgnrsYg+FqjEYjRqORtLS0aY3l/265lb/+5c+en8O0OlS6FNqbWvjb3/7OpZdeyimnnMK9993Htrf/jMVq5cTTF5GdvZzs7GyysrJkW6c4YkgiTQiB0WjEx8eHllrvJdLAPblzV57Na9cX45eRkQFAZWO1JNLEjDU7J4evi4b3avm2iAQzfb29FBYWkpWVNQ2RuRkMBhQKBfa6PcSkzznoOnVMKgBFRUVjJtL6+/spKysbYYBBIQ31dfj6+vLpp596ttakp6fj6+dHW0MFMFYizcDOXTJ1TQghpstvfv1rfvPrX4/4mMvloru7m6CgoFHP4XK5aGhowGazYbPZKC4u5sYbb5zwjZm8vF1EJWdx4o/uRaVLwj8wFICv3v4bRZ+7B4Ydf/zx1NXWEh4ejo+Pz4TOL8ThRBJpQggCAgJISU2jpca7Awc0+nRsm1/zagxidE6nk8rKSoqLi/H396eysYa5adneDkuIEeXkZPPxmrW4XC4UCsVB10UcMLlzOhNpQUFBxMUnYK8rG31deCSBwWEUFY08cODZZ5/lzTdXUVhURFnZHvr7+oBvDzBYRpYuhY0v3cnmzZs9iTQ/Pz/S0w001479+1+jT2fz56/R39+Pr698hBRCiOlSXV1Nbm4uNpuN/Px8du7Ko7CwED8/P77cstlT4Txo06ZNvPDCC+zKcyfPmpsaAfcOEKWPD5WVVaxa9eaEYjCbTOwqqiQqyTLkuDomhaamfbS0tKDRaNBovNMmRojpJJ+ChBAAZFmtfF3k5cmdsels3VdPU1MTERERXo3laOZyuaitraWkpITi4mJKSkooKS6msLCIsrIyHL0OAHyUPlQ21ng5WiEOLjs7m672Fjrt9YRqYg66LiA4HHVUHLm5uVx++eXTGCGYTEb21I+eSFMoFKh1ySNOaHM6ndzwk58QFpVEnHkBC+b9AHVMCmpdyogDDPLWPDfsPFarhc27xv79r9Ub6O/ro7S0FLPZPI5XJ4QQ4lBVVVVhNJro7u7CPzAYrT4dVUw6s844iS1vPMIXX3wxLJF27XXXU15ZS6zxONIWXcYxegNavQFVdBJfvPYAu/I+n3AcJpOJlvqVOJ0DKJX7q83UMSkAFBcXc9xxxx3aixXiMHFUJ9KeeOIJfvWrX6EcpcGvEEcLiyWT1Ws3ejUGTZwBgPz8fJno4yUul4uTTjyJ9RvcU1yVCiXRmihiNdGkquNZdOJc9NoYYrU6Xln/JhUNlQB0Obq9GbYQI8rOdldLNlUWjppIA/fAge07dkxDVENlmM3krvpozHVh0SkUFAzfpqpUKomPTyA8bTHHX3L7OM6TTEHh0PNYLRY++HjsqaCaWPd05fz8fEmkCSHENHFv4ezi5KsewLTge0NukBRt+C822/C2KIkJCXT5RnH6T/4y7DGt3oDt0//Q09NDYGDguOMwmUz09znoaKohPCoBgJ4OO0Gh7h6fRUVFkkgTR42jOpF2++238/677/Ds8/+e9maNQsw0mZmZtLc04OhsJSDEO41A1dFJKJU+kkjzIoVCQUFBAYszj+OSRRegU0fh5zty8/XEqHg2FnzJVX/9GS0drdMcqRBjS05OJjQsnKaqwmGTOgEG+nux1+6hsaqQ7rYmcqtrpz1Go9FIS91Tw+7wD8a3/YN/kmBZjDommaINI1cQmE0mCmpGr2obpI5JpfCLV4Ycy8zMpLO1ie72JoLCDl4NHBQeQXCYmvz8fC688MJxXU8IIcShiY+PJyQ0jO62pmFVxuExadhsw4fAWK0WNm97ZdhxAG2cEafTSWFhITk5OeOOY7Dqrab4S168/RTmnH0j2957EoAwjW7EqmkhjlRHdSLtuWvm8Ie3djIry8qfHnqYG2+8UarTxFHLYnH3O2iuKSXWMNcrMfj4BaCRyZ1eZzQaUbQ6iY/Uj7ru5FmL+KLwK6pb6njqqae4/vrrpylCIcZHoVAwa1YWDZWFdLc301RVSFNlIY2VhdhrCmmqLmWg391PLDEpmWt/dtO0x2gymRjo76WjqZqdq5+npaaU1LlL0caZ2PjyPTTuzaehbBfG+efT0tw04tZ3s9nElh2rxnU9tS6ZpsYG7HY7arUa2D+5uaV296iJNIVCgSbWMGL1gxBCiKmhUCjIyMigeYRexprYdPJsnww7brVasTdU0dfTiV9gyLDnANhstgkl0hITE/EPCKC6cDOAJ4mm0RsIUUVSeJA+nkIciY7qrNGxKRo+vvU4LpodxU033cSSU0+hvLzc22EJ4RVGoxGlUknLOBpOTyVVTJpM7vQyo8lIrb1+zHXaUDUXLjibHkcPp5122jREJsTEzc7JYffX7/PcrcfxziM/Ztvbfyasr4rzT1/EX/78GBs2bMBut1NRXsbvf/e7aY/PZDIB0Fy7h5aaUqoKvmD9C3ey6sFL6Wh2V8ilzl06pAfNSOew1+9loL93zOsNTgA98DwGgwFfX1+aq0tGfE5LTSmfPXcHHS11qGLld7QQQky3LKuFtrpServbcXS109vTwdrnf0tQeCSVeyvo6OgYsn7wBnlL7e5h5woIDiM8InbCN0V8fHxIS0vHLzAE44LvoVD6gELBrCU/RqUbuf2AEEeqozqRBhAS4MsfL8zk5RuOoXjn12RZLfzzn//E5XJ5OzQhplVgYCDJKak014z8RWq6aPTpUu3gZUajkdrm+nH9HozV6gDYvXv4BzUhZoI777yT++67j1deeYWCggI6Otr5+qsveeqpp1i+fDmLFi1CpfLOdnaAhIQEAgICaWso5+SrHyQgWIXedByn3fAXZp95PUofX5KzT0WlSwZGTqQZjUaczgHaGqvGvJ5KlwQwZAKov78/qWnptNTupreng//9cRlrVv6S3Vs/xLbuZV6/73sUbnyN2pKv0eoNFBcX0d/fPzl/AUIcgVasWMG8efMICwsjOjqaCy64YNjU3Z6eHpYvX05ERAShoaEsW7aM+vqxb2KJo5PFYqG5djev33shz/xsDi/+5hQKNrxKpW0DAIXf6n2ZkZEBQHNNCS6nk4H+PlwuF/nrXqGjuRZ1bDp53+GmiNlsoq2+jMWX/Z5QbSwKFKTMXoJKl8zu0lKcTuehv1ghDgNHfSJt0CJDBJ/cNp/zs7T85Cc/4fTTT6OystLbYQkxrbKsVuxerkjT6A001NfR0tLi1TiOZgaDgY7uTtq62sdcG6uRRJqY2aKiorjjjju45JJLMJvN+Pj4jP2kaaRUKklLT8deV0aoJoYTr7iXmqIt9Pf2sHfXevSm4wgMVeMXEEx4ROywL+Owv6rNXjd2nzT/wFDCtMN72VitFuy1pfR2tWOvK6N40yo+fvJm1v/nD/gHhaP08SPReiIafTp9vb3s2bNncv4ChDgCrVu3juXLl7N582Y++eQT+vr6OP300+ns7PSsufXWW3nnnXd47bXXWLduHTU1NdJ7UBxUZmYmvT1d6E3HAtDT4f6cPPuM6wCG3YQOCQkhMSmZ5uoSvnzrzzz1k0z+98dlrPvP73n9vgvRxBrYuWvXhOMwm0y0NZTjHxTGeT9/nu/f/S5BYRGoY1JxOHqGfH92Op2UlZXx3nvv8ac//Ykrr7ySRYsWs2HDhu/61yDEjCGJtAOEBfrywEUW/nPdXNZ99hl33nmnt0MSYlpZLJm0jlACPp20B0yFE97haSbbXDfm2iD/QCJVWkmkCXEIzGYTrfXuJFjaMWdiOv57bHzpHmqKvyR17lLPOpUuZcQeNLGxsQSHhHrOMZaRzmO1uBNpodpYTrziPgDmnH0j5/3iBbT6dPSmYwkIDhsyuVMIMbIPP/yQK6+8EovFQnZ2Ns899xx79+5l69atALS2trJy5UoeffRRTjnlFObOncuzzz7LF198webNm70cvZiJBntZJuecSoLFPZArMEyL3jwfdVT8iL+TZ2Vl0VJTglLpbou+r9ydOJt33s1o4tLZW1FOV1fXhOIwmUy0NtbQ5+giPCoRrd4AgOab9gODN3tu+/kvCAkJJTU1lXPOOYc/3HUPH23Ywfa8Qp588snv8DcgxMwiibQRNLQ56Btw8r3vfc/boQgxrSwWC23NdTi62rwWgyomBYVSKV/SvGhwinFNy/i2mMSodZSWereSUYjD2eAd/kGLfvB7AsM0uFxOUnL29x8Mj06msHB4Ik2hUGAwGMZVkXaw82RmZtJh30dPRwvp887CvOhidn7yHL7+AdQUf0lKzhIAglVRBIWqZAu+EBPQ2uqebK3VagHYunUrfX19LFmyxLPGbDaTmJjIpk2bDnoeh8NBW1vbkD/i6JCYmEhwSCj2uj2ccvWf0MYZsZ70Q5RKH1SjTO5srSvlmPNuJu2YMz3Hk2adjFZvxOVyUVBQMKE4Bm+2ttaXDzkeGhGHj6+/p9r5nXfeQZ1g4exbVnL5g+u46q/b+d4dr5Ocfar02RRHBEmkfUutvYe73i7mx1dcwbnnnuvtcISYVgdObvMWX78AtLpk+ZLmRcHBwcTF6qltHjuR1tvfiyo4jNJi7/bWE+JwZjKZaGuqpc/hrgwY3DJzyV3vEayK9KxTx6Swe3cpAwMDw86RmWGmbZSKtErbBr5+5284B/pR61IpLSkZ0svmwMnNAIt+8DtCtbG88+iVOAf6Sc45FRic3JkuNzuEGCen08ktt9zCwoULsVqtANTV1eHv7++ZnDtIp9NRV3fwavAVK1agUqk8fxISEqYydDGDKBQKzGYzLTWlBKsiueTu95h3/s8AUOvT2ZWXN+w5FovFXT3W08GJV9xHWGQ8cRnHE6qNRavfP7lzIjytBL6VSFMqfdDEJHkq0rKsVvz8/Em0nkBYhB6FQgG4eyEXFxeN+D4mxOHE19sBzCQul4tfvp5PmErDn//yF2+HI8S0M5lM7smdNaXEpM32Sgy9PR0EhEeRJ4k0rzKaTNR882G+f6Cfensje/dVUlBVSnn9XursDbR2tdM/0M+Ac4AF8xd4OWIhJq6rq4vS0lKKi4spLi6mpKSE0NBQHn74YQICAqYtjgPv8Ecmum9ohEclDlun0qXQ63BQWVlJcnLysHO888FqAOp2b2fN0z8nKfsUUmefxp7tn7Br9fMAmBddjDomhZ6ebqqqqkhMdF/HYDDg4+NDS20peuM8/AKCOe36x/jf/RcRlWQlVBvruZY6Np2du4Z/aRNCDLd8+XLy8vLYuHHjIZ/r9ttv57bbbvP83NbWJsm0o8isLCsfrt8+7Lg2Np2dHz9DZ2cnISEhnuMH3iCJSZvND+9fg0LprqPxCwxBHRU/4USaVqtFGxFJdaF7C3JrfRn2ujLa95XTUldBY2PTN9fOZPXa4f/Oa/UGeh0O9uzZg8FgmNC1hZhJJJF2gFe+rGZd4T7ef//9YXeIhDgaBAUFkZScMi2TOx1d7bTUlrr/VLv/2Vq3m9bGagAWZF085TGIkfX29qLWqHlv40Z++NhP6e7pQqFQMuDcf/dQE6omIz6d4po9XLDsezz44IPyYV7MaKtXr2bXrl0UFxdTWFRMUVERtTXVnseDQlSEaHQ0VhVzxRVXMG/evGmL7cBhAYOJtJEc2IPm24k0k8lEh30fjq52utuaaNtXya7Vz7Nr9fP4+PrjHxSKRm8gVBPDQK8DcE8AHUykBQQEkJKaRkvN/m3akYmZfP/OdwgK0w6NQ5/OV1tWMTAwMOOGNwgxk9x00028++67rF+/nvj4eM/xmJgYent7sdvtQ75z1NfXExMTc9DzBQQETGuSX8wsmZmZvPzf13C5XJ4KL3AP6nK5XBQWFjJ37lzPcbPZjFKppLm6hJi02Z4k2iBVTNp3mtw5Z84cVn/yMvnrXkYbEYnRYGDhCXMwGi/l8ssvB9xJvPaWBhydrQSEuCdj2+vKCAyLANx9NiWRJg5nkkj7RlVzN/e8W8LVV1/NmWeeOfYThDhCWS0Wcssmr9+Vo6uNlppSmmvcyTJ7TQn2ut20NdUC7lL1pOQUZlksWM6+HIvFQmZmpucumph+v/nNb3jzzTcBUIWoMMelkxAZR0KknoRIPfEReoIDggC457+P0NbWRnh4uDdDFmJUGzdu5LTTTsMvIBCNLpnQqGRic87DvDQZtS4ZlS6ZwFANfY5OVt40m+Li4mlNpGk0GiIio7DXjz4Jc7AHTVFREUuXLh3y2P5k3B5SZi/BvOhiSr98hxMuvwd1TApv/emHpM5xPycsMg4fXz+KioqG9GjKslr5umjo739NbNrweGPT6XU4KCsrIz09/Tu9ZiGOZC6Xi5tvvpk333yTtWvXkpKSMuTxuXPn4ufnx5o1a1i2bBngTpDv3buXBQukwluMLDMzE0d3J50tdYRoYuhua6KltpSGslzAvU3zwERaUFAQySmptBzkBrlGb2Dnro8nHMeqN99g586dmEwmT9+/kWIF2LPtE9Y+fzsZJ1xCwfr/uuMKcffZPP/88yd8bSFmCkmkAU6ne0unRhvJo48+Sn9/Pw6HY0hprBBHC4slk3VfPD/h5zk6W93JspoSmmtLsdeUYq8rpf2bPltKpZKk5BRyrFYs5y32JMtMJhPBwcGT/TLEIejo6CAuMpZHrrybQP/R73zHR+ilaayY8fz9/QE4/5cvE5VsPfi6wFDCNNGeZsnTyWgw0FhXPuqawR40I8U3eGe/tb4cXWo2iy79LTVFW8j77AWyTr2Cgf5eUuec7j6Pjy8a3f5eNoMOthXn27Rx7mvZbDZJpAkxguXLl/PSSy/x1ltvERYW5ul7plKpCAoKQqVScc0113Dbbbeh1WoJDw/n5ptvZsGCBcyfP9/L0YuZavAm8/t/uYbutn10tdsB8PP3x5o1ixNPPHHYc7KsVraVHjyRtuOjp+no6CA0NHTccYSEhIyZ8DUajSiVSmqKvwTwJNFSZp+Go6NJ+myKw54k0oBnNlawsbiRK664gguXLWPzps2EhIaSt2sn0dHR3g5PiGllsVhoa6qlt7sd/6CwYY/3dLR4EmYtNaXY63bTUlNCh30f4E6YpaSmMddqxfK9kzwJM6PRSFBQ0DS/GvFdZGZm8vxzz+Pn6zfm2oRIPe989RHd3d3TENnkWb9+PQ899BBbt26ltraWN998kwsuuMDzuMvl4s477+Rf//oXdrudhQsX8uSTT8o2hMPU4P9v9obyURNp4J5o6Y1EWkaGmfc/+2rMdWFRyRQUFg4/HhaGLibWU9XmFxjCqdc+zKoHf8CGF+8iKslKeFTCkPMUFQ19nZmZmcO24owkWBVNYEg4+fn5UlEgxAiefPJJAE466aQhx5999lmuvPJKAB577DGUSiXLli3D4XCwdOlSnnjiiWmOVBwuampq2Lt3L6cvPYM9u0tJ0qXS19dLa3MLtQ31XHD+uSQlJQ17ntVq4dMNT494zog4d3/O/Px8jj322EmNNzAwkJTUNAJDVJgXXUTJ5rcZ6O/FcNy5VNo2yk1Ycdg7qhNpD39YwqayVgpq3KOjX/vfKnTpc8k49Sq2vvN3NmzY4Cm3FuJoMViKXVuyFb+AoG+SZqXYa91Js8GEmY+PD6lp6RxjsWBddiqZmZlkZmZiMpmkf8dhLjMzk96+Xhpa9xGr0Y26NiEyDpfLRWnp5G0Hng6dnZ1kZ2dz9dVXc+GFFw57/E9/+hN//etfef7550lJSeH3v/89S5cuJT8/n8DAQC9ELA6FRqNBGxFJ67emjI0kXJdCQWHRmOsmm9Fo5MWXXx3W++bb1LoUCne9P+JjJpOJ6rr9kztj0mYz95zlfP32X0mdO3QrqComhYL8oVt6DmxMHWuYy8EoFAq0epncKcTBuFyuMdcEBgby+OOP8/jjj09DROJw9vnnn7N48eIh/14F+fuQHBHEGcZItvuH8taqN7n33vuGPddisdDeXE9XayPdHc20VJfQXFNCS00J9hr3zZTGxsYpidtqsbCttISlP32CutJttDdWkWBdTEdLHVu/fFv6bIrD2lGdSHstrwudcRGLT5pHrOEYtHqDpwnj7i1vSSJNHJXMZjO+vr68/9frAPD19SUt3cBxVguW75/u6WFmMBgkYXaEGkymVjXWjplIi490T/IrHKFCZiY788wzD9oP0+Vy8ec//5nf/e53nmqbf//73+h0OlatWsWll146naGKA6xcuZIHH3iQ235+G9ddd92EPoAbDQaax5FIU+uS2bnt/TETWpPNZDLh6O6gu62RYFXUQdepYpLZ/mEVXV1dw7bFZ5hNFH64fsixuWffSKL1BCLiTUOOq3Up5H60l+7ubk+18OBWnJba0RNp7jhkcqcQQkyHhoYGXC4Xr904j8Z2Bze+sJPu3gEqmrr589xYWjp7+aho74jvW1aruwr737843pOIi4rSYc2ycs6i88jJyZmy/uAWSyafbXgGv4Bgzv/VSyiVSvwDQ9Hq03E4eigvLyctbXgfTiEOB15PpK1YsYI33niDwsJCgoKCOP7443nwwQc9TXNH8txzz3HVVVcNORYQEEBPT8+4r/vDFZ8SFhl/0A/J0WnHsHbd+hEfE+JIFhwczOeff05FRQUWi4X09HRPfyFxdIiLiyM0NJTKxmrmGXJGXRsaGEKkSjus19LhrKysjLq6uiFN2FUqFccddxybNm0aMZHmcDhwOByen9va2qYl1qNJdXU1//d//4cmMJwbb7yRp//1NM8+9yxZWVnjer7ZbOKjDdvHXKeKTqazo33M6Xlj6evro7S0lLy8PGw2G3k2G7t25dHW1s4nH3/o+XIz6MBhAUHhkXS1NmCvK8devwd7XRmt9WW0N5Rjb6gkICCQjo6OYYk0k8lEy3PP43I6PTcGlT6+6FKzh8WnjknB5XKxe/duTyyBgYHfNKYeu8JUE5vO1q+kokAIIaba4JbNl7ZUUVzXAYCPUkFX7wBLH90EwLy5cw6aSPv3v/9NR0cHFosFi8VCRETEtMRtsVhoa67D0dVOcPj+a2r0+/tsSiJNHK68nkhbt24dy5cvZ968efT393PHHXdw+umnk5+fP2qz//Dw8CFf3CZ61/jAPiEjiTUcw4YX3pJpdOKodOyxx056rwRx+FAoFGRmZFDZWDOu9XFaPYUFBVMc1fQZbAqt0w2txtPpdJ7Hvm3FihXcfffdUx7b0ey2227DT+HLAz/+PZX7qvn9Sw9y//338/LLL4/r+Uajkf++9uY4tk4mA1BcXDyhRNq6detYv349NpuNnbvyKCkppr+vD4BQVSRqfTrquHk0lb/Gxx9/PCyRlpqailKp5NNnfklvVzuO7m++LPn4kJiUjMVsxrzkQoxGI8cdd9yIPVyNRiN9jh467fWEamNHjVcd454iWFRUNCSW0RpTH0gbl46jRyoKhBBiqplMJhYumM+uhnqSMzKINAxw8sknYzAYSE5OJjk5mZiYGJTf3EA5kEKh4Ec/+pEXot6/w6GltpSYtNme4yFqHYHBYeTn53Peeed5JTYhDpXXE2kffvjhkJ+fe+45oqOj2bp1KyeccMJBn6dQKA7pTvFYYo3H4HQ62bRp07AR80IIcaTLtFj44pMN41qbEBFLfv6Rk0j7Lm6//XZuu+02z89tbW0kJIx+w0aM34YNG3j11Ve56exrCA0Mobalgf6Bft584w2uuvIqrrn2GhYtWjTqOYxGI92drfR0NBMUdvC78eFRCSiUSoqLi0f9HHKg+vp6lpx2Gn7+QWjjTKj12SzIuQiN3oA2Ln3I9faVbcdmG95kOSAggL/+9a9s374dk8mEyWTCaDSSmpo67qrgA6vaxkqkBYZqCQpRDRusMLgVZ5DL5aKjuZaWWne/zOaaElprd9NS665a6+joGFdsQgghvpuQkBA2frHJ22FMmMlkcrcLqBmaSHN0thKiiZE+m+Kw5vVE2re1trYCoNVqR13X0dFBUlISTqeTOXPmcP/993ua5H7bd9lyo9alEKKKZMOGDZJIE0IcdTIzM/nvy6+Mq09UQmQc729bM02RTb3BmzT19fXExu5PRtTX15OTkzPicwICAqRn4BQKDAzEz9eP/21+l0C/AJ5d8zJBAUF877gz+fCd93nu+ecoLi4edaqq0eieTtZaXzFiIq1u93Zqir4k54xrUUfFT2hyp1qtxuV0Mu/CX2A58QejrlXFGA7aW2z58uXjvuZIkpOT8fXzw15XRnzmwlHXKhQK1DEpw7ZlZ2Zm0tZcx5qVv6Ktfg8ttaU4ujsBCAoKxmQ2M+94KxbLJRxzzDFkZw/fNiqEEGL61NXVkZuby+zZs0esVvaWoKAgkpJTKNnyDo2VBe7BZbWlnsFlGs3U9GYTYjrMqESa0+nklltuYeHChcO2PBzIZDLxzDPPMGvWLFpbW3n44Yc5/vjjsdlsxMfHD1s/0pabG58efduCQqFAlzaHdevHV5EhhBBHkszMTLodPTS2NROlGr2XRnykHqfTOU2RTb2UlBRiYmJYs2aNJ3HW1tbGli1buPHGG70b3FFq3rx57Ny1k5/e+FMeXvUESoWSCxeczfcXnY+vjy+vfvEWUVEHb9APeLYf2uvL8PUP5JOnbiUp+2RS5yxl7651bHvvSVwuJ2nHnEFYVDJFReNPpAUEBJCalk5z9Ti2ROrT2fnhJ1MyzMDX15eUlFTs9WVjLwbColPILxg6KOSkk04iO2c2Pr2VLFicQ2bmZZ4hM0lJSSNuHRJCCDE9Ojo6WLVqFbm5uWzfsYPcHTtpbGwAwJo1i107c70c4VDLll3Inx97jBBXC8dlWbFcfBqZmZlkZmaSkZHh7fCE+M5mVCJt+fLl5OXlsXHjxlHXLViwgAULFnh+Pv7448nIyOCf//wn995777D1I225uf+jsQcTxBiO4ctVj+BwOKTSQAhxVPFM7myq8STSXC4XrV3tVDZWU9lYQ1VjNVVNtVQ2ja+X2kzS0dFBaen+huplZWXs2LEDrVZLYmIit9xyC/fddx8Gg4GUlBR+//vfo9frueCCC7wX9FHObDaz5tM13HTTTTzxxBNsyN9MXUs9+VXFnH322ajV6lGfHxwcjD4uHnt9OYEhaux1e7DX7SH3o5UolD74BQYTHpmAKjoJVXQSRcVfTyi+WbOy2JI3nt5iRro6O9i7d6+ngfRkyjCb2bFnfIk0tS6Z4nXrhhyLi4tjx/Ztkx6XEEKIQ3f//fezYsUK1NHxaPRmkhZ8nznxJlpqStj6zt+HTGKeCR7605+4/49/xM/Pz9uhCDGpZkwi7aabbuLdd99l/fr1I1aVjcbPz4/Zs2cP+VJ0oJG33IydSNMb5tHrcPD111+zcOHoWySEEDOT0+nkpzf+hJ4eB3fedRcpKSneDumwkJSURFBgIB9u+5TNRVupaq6lqrGG1k731nhfX18MaelY5lq5wHIRZrOZyy67zMtRj9/XX3/NySef7Pl58GbLj3/8Y5577jl+9atf0dnZyfXXX4/dbmfRokV8+OGHBAYGeitkgbta/LzzzuMfT/4DR5+D6uY6mtvtvLlqFbfeciuP/fmxUZ9vMpmoqC9j/oU/x7zoYkq2vM3iy+5EHZPKe3++htS5ZwCgiklhy8b/TmgipdVi4aNP1o65Thu3f1rZVCTSTCYj6zePbwCDOiYFe0szjY2NREZGTnosQgghJldoaCiBIeFc+sdPh1Q112ti+Oqtv2Kz2TjmmGO8GOFwkkQTRyKv1+e7XC5uuukm3nzzTT799NPv9CV3YGCAXbt2DellMxkiEswEBIWwYYNs7xTicPXqq6/yz6f+xbtvvorJZOSmm26ivr7e22HNeEqlkgsu+B47ym3U9DWStSCHn//6F7z++uvk5+fT1dVFfmEBr73+GnfffTdnn322t0OekJNOOgmXyzXsz3PPPQe4Ezb33HMPdXV19PT0sHr1ak+PLeFdS5YsQaVSsST7RB675l5W3vwYhtgUVq/+ZMznmk1G2hvKAVh46R2EqKMp2PAqXW376HN0knqMO5Gm1iXT19vL3r17xx2XxWKho7WR7vbmUdeFavUEBIWMOHBgMphMJlr3VdPf5xhzrVq3f3KnEEKImS87O5uezjY6mofuBtDGGVEoFOTmzqytnUIcqbyeSFu+fDkvvPACL730EmFhYdTV1VFXV0d3d7dnzRVXXMHtt9/u+fmee+7h448/Zs+ePWzbto3LL7+ciooKrr322kmNTenjS3RqDuulT5oQh6W+vj5+/9s7WJIZzabbF/GL01J44dmnSUtN4Xe/+51nuIkY2YsvvUhnVyc7d+3k5Zdf5ve//z3Lli0jIyND7i4Kr9m0aRMt9hbmGXIAUIeoqbXv47zzzx/zuUajEXtDBS6nE//AUE699hEaynay7vnfoY0zoolJBUD1TYJpIgMHBgcetdSM3YNVE5tOXt7IAwcOlclkcm/DbqgY9pjL5aK7vYnakq8p2PgaBZ+/DkBJydhbUoUQQnjf4ICXxsr9/S37e3vobKlHE5MsiTQhponXt3Y++eSTgLs64EDPPvssV155JQB79+4d0ty2paWF6667jrq6OjQaDXPnzuWLL77w9PSZTDHpx7Dxs+cmtL1DCDEzPPPMM+wuK+eJWxcQ5O/DT09J5bL5CfxjbRmPPvQgTz7xOL+5/Q5uuummGdVPYqZQKBTye0/MOO+88w4Bfv7oVO7hAkXVpbR1tnHuueeO+VyDwUCfo4dOez2h2lhi0mYz5+wb2fru48w67ceedaHaWHz9AiguLh735G6DwYCvnx/N1SXoTceNulYdm37QyZ2HarBysjJvA/ba3djryrDXl9PeUIa9rozuTvcNBIVCQXxCImeeeeZhV1EqhBBHq7i4ONQaLfsq8lj/nz+QMnsJtrUvAZCYdSLbd0giTYjp4PVEmsvlGnPN2rVrh/z82GOP8dhjo/dBmSyxhnl89dZfyMvLkxHvQhxGurq6uPvOP3DB7Fgy9GGe4+pgP35zlpGrFiXxl092c8ftv+Evjz3KnXffw1VXXYWvr9d/LQohRpGamkpvfx9X/e3/MMcbUKIgKjKKY489dsznDiaZ7PVlhGrd7SDmnrOc+IwFRCXtnxauVPqg1iVOqFLLz88Pg8FI8ygVaYOTOrVxRra/8wFOp3PSp2BGRUURGxvHptceAECl1mA0GlmwKBuT6fsYjUZMJhNpaWlyA0EIIQ4zCoWC7Oxs9pTtpKu1wZNEA9Cl5JC79rlJnQrd2NhIcXExRUVFFBUVoVAo+MMf/iDvH+KoJ98Yx6BLzcbH148NGzZIIk2Iw8jf//539jU28vOrRx4UogsP4P5lmVx3YhKPfLSb66+/nocefID77l/BRRddNOlfboUQk+OGG27gggsu4L333uPtt99mzZo1/OIXvxjXf7PJycn4+vpirysjPuN4AHx8/UasIAuLSp5w77BZWVY27nAn0vp6OvHxD0SpdFd1ttSUsuaZX2E6/nto9Aa6u7soLy8nNTV1QtcYi0Kh4KuvtlBRUYHRaJQhAkIIcYSZnZPNrldWccrVf+LTZ36Fj18AKbOXEJVs4au21kmZCn3TzT/jxRdfxN7i7vupUCgI08bQ1lTL4sWLOeuss77TeQcGBigvL8dms5Gfn091dTW33347er3+kOIVYrpJIm0Mvv6BRCdbWb9+AzfddJO3wxFCjENLSwsr7v8jlx0XR1JE8KhrUyJD+PsPZ3HjScn86cPdXHLJJVx4wfn8781V0xOsEGLCdDodV199NVdfffWEqrr8/PxISk6htb58zLUqXTKFBWMPMDiQxWLhnfc/xul08v7fbqC5upikWSehik5i2/v/pL+3G11qDik5SwD35M7JTqSBe+tPXFzcpJ9XCCGE92VnZ9Pyl7+QOud0aoq2UPj5/0jKOpmIeDMAubm5h5xIe/HFFwmNMXPsJZei1qWg0iXj4+vPypuzKSoqmnAi7cEHH+TFl16muKgIh6MHgMCQcPocPYSHh/PHP/7xkOIVYrpJIm0cdOnHsG79u5NaJiuEmDoPPfQQvT3d/GzJnHE/xxIXznNX53DWX7ZMaFKfEMK7Jlo9ajaZyKssH3OdWpdC7kcVOBwOAgICxnVui8VCV3sLPe3NhEclUFO0hbIda+jtaiMmbQ51u7eRnH0KIZoYAoPDyMvLG1dvt5nG6XRSWVlJfn4+NpuN+vp6fv3rX0v1mxBCTIPs7GxcLhdN1cUsuuxOsk69Ao3egNLHl+AwNTt37uS88847pGsYjUbsymjSjjlzyHGNLnnUQTz9/f1UVFRQVFREVVUVl19+OUFBQdx99z2ExRqYe8FtaPUGNPp0QtQ63v/LNeTlTc0UayGmkiTSxiHWcAw7PvwXe/bsIS0tzdvhCCFGUVtby1/+/BhXL0pAFz6+L7+DPi1oJK+qlQ/+df8URSeE8Daj0cCmbavGXKfSJeNyudi9e/e4hxkNTu5srilm0Q9+T13pNnz8/Dnzznco+uINmmuK0ZuOdU/u1Kdjs83sLw9Op5OKigpPwiw/P59deTYKCwro6uoEwD8wmP6+XjQaDXfccYeXIxZCiCNfZmYmPj4+NFUWEJM2m8jE/e9R2ngzOyZh4ECG2cRHG3YMOx4WnUJB4f62B11dXdx7770UFBRSUFhIWdke+np7PY/v2bOHBx54AIPRSF+4kezTrhpyPnVsOnm2DYccrxDTTRJp4xCbPheFQsGGDRskkSbEDHfvvffir3Txk5OSJ/Q8p9PFgx/t5sQTFo97Sp8Q4vBjNBqx1+9loL8PH1+/g65T61IAKCkpGXciLS0tDf+AAJqrS4jPOJ7Tbvgzb/zxInI/epq63dtJtJ6Ij6+/+/x645RN7pwMDz38MHf+4U66u7sACAgKQRObjjo2neyzT0WjN6DVpxOqieXthy6b8UlBIYQ4UgQEBGAwmmisLBz2mDbOzPYdGw/5GiaTiVdfXzVsR5Y6JoXCr1d5fn7hhRd44IEHSLAsRB03j+Pmfh+1LgV1TCrr/v1bCgrcMc7KsrJm8/D3Ca3eQN6a5+np6SEwMPCQ4xZiukgibRwCQlRExhtZv349V155pbfDEQJwT39rampi9+7dlJaWev5ZWlJMeFgYr/3vDcLCwsY+0RFk9+7d/OtfT/GrpWmogg7+BXkkb+2opaC6lZWvPShbuIU4ghmNRpzOAdobq1DHpBx0XVB4BAFBoaNuYfk2X19fjEYTLd9M7oxMyGDB93/DxpfuAWDW6fvvxGv16Xy95S0GBgbw8fH5jq9m6qxZvYZAdSyn3HAHmth0QrWxB/3dqIpNZ5dszRFCiGkzZ3YOn31ZAICjq52mqkKaqgrZt9dG3Z7dh5yYMplMdHe20tZYhXOgjzCtnnX//h3h0UnU19XQ0dFBaGiopx/nST9e4ZmGPUgdk0pB4RbAXUX3+ptvD0vMafTpOJ1OioqKZLCfOKxIIm2cotOPYd16KTsV08vpdFJTUzM8WVZczO7du2nr6PCsjQwNIUkdhtbfn482byE3N5dFixZ5Mfrpd+cf/kBkaABXLkwc9tj7O+sIDfTlBOPwHj69/U4e+biM8849hwULFkxHqEIILzEajQDY68tGTaQpFAo0MaP3ghnJrCwra7/av+3FevLlVOV/QV3pVpKsJ3qOa/UGHI4e9uzZg8FgmOCrmHpWq4Uvd+STaD1hzLUzPSkohBBHmuzsbP776mu8fPvJ2PdVAeDr54fZnMH1f/jDIVd3Db5Xrn7qVhrKcvEPCqW3u4NgdTQAxcXFzJkzB5PJBIC9bs/wRJou5f/Zu++4Ju/tgeOfJISdEPbeCSCgqIAbtVWrrR12WLvnr+vWLtvb27137d5719raaodWq9aBoyq42IS9N2HPJL8/omkpqKhAQL/v18vXvSTPOKGa5DnP+Z7Dzq3L6O7uJioqivaWRlp0ldg5OiOT25C/fyM2dqab/unp6SKRJowoIpHWT96aODZs+oaKigq8vLwsHY5wCunq6qKwsLBnskyrJUerJb+ggPaODsB0UefjpCBQ6Ui40pE5MeEEqpQEOikIUClxtDZVYLV1dRP1zlfk5uaeVom0AwcO8O2yZTx30SjsrP++kGtu7+axVRn8kFSGnVxG9vOze+373e4Simpb+O2554cyZEEQLMDHxwc7O3saKgvNjxkMeprrymmozKehshBdZT4NlQXUVRRQVtY7MX80UVFRrPx5tfmuu0QiYe5/3kYq7ZlgcvYxJc9SU1OHZSItMjISXVUxXR1tyG3sjrqts7eajo528vPzUavVQxShIAjC6euqq64iJTUVTw8PYmJiiImJISIiAmtr6wE5vlqtRiKR4B4YTVX+ATrbTDfvJ164hE2fPWBOpAUFBWEll6OrLMAvcmqPY6i8gunu6qKgoMDcIiFrx0p2r3wVj5AYqvJMvdwUzp6iPYAw4ohEWj95a+IA2LZtG5dccomFoxFGmtbWVvLy8syJstzcXLRaLblaLUUlJej1egDkMhn+zk74O9ozQaXgkkkx5mSZn1KBjdWx7/Tbya3wVCrIyckZ7Jc1rDz80IMEuTmyaIKv+bF9RTru+OYg1U2dTApxJqO8qdd+rR3dvLGxgKuuvJLo6OihDFkQBAuQSqWEhIaSvXMl5do9NFcXUl9ZSHeX6aaFlVxOSEgoMWFhXDrvVm644YbjOn5UVBRtLQ206CpxdPY6dM7e7932Tu7YK1SkpaVx4YUXnvwLG2CRkZEYjUZ0lXm4B0QddVtnH1PyLD09XSTSBEEQhoCPjw9fffnloB3f1tYWP/8A5DZ2RJ95Nal/foVMbkNo7Dz2/LSUrCxT5bWVlRXBwSHoKvJ6HUPlFQJAVlYW8+bNw8bGlpb6cgBzEm1UwqU015WSlpY+aK9FEAaDSKT1k6OzFyoPfxITE0UiTehTfX19j6qy3Nxc0xLMnBzKKivN29lZywlydsJf4cAcNwUBoRPMyTJvhQMyqfSkYwlQOpKbm3vSxxkptm3bxuo1v/P2lWOQy6ToDUbe+TOPV//IZbSvkq9uiuXX/RXkVLX02vfTbUXUt3bx5FNPWSByQRAs4eab/o+lr7xKiKuRiClzCAsLM/8JCAjAyurEvx4dTsjXl2nNibS+SCQSnL01w/Yu/OHqgfrSnGMm0hxUntg6KElLS+P8888fivAEQRCEQTYqIgJtZQGzb36N+vJc3AOjkNs64OQZbE6kAUSEh3MgvwCjwYBe34WV3Abtrl/xUscht7ElOzub+fPnExYeTrfBwIQF97B71WsABI+bTXH6dlLTdlrqZQrCCRGJtOPgERrH5i1bLR2GMEx8+OGHbN68GW1WFnl5edTpdObnVPZ2BKqUBCgcuDjIi4AYjTlZ5mZvN+jN7AOVjmizs4694SnAaDTywP33E+Wn4rwYL0rr27hrWQq78+tZfGYI95wVilwmpUtvRC7rmaTUtXbx/pYibr75FoKDj9wrSRCEU8udd97JnXfeOSjHDg4OxtbWjrrSHPyjEo66rZO3moMpKYMSx8lSKpV4+/hSV37s6mZTUjCU9HRRUSAIgjBU9Ho9qampZGdnk52djVarJTMri+xsLVdffTVvvP7aSR0/IiKc5NTVWFnbcv69X5gfV3gEkZGZ1WO7bbu+I3n1O+z5+U08Q8ZSmbcfmdwGV58Qc9JtdHQUm/dkMf2qJynJ2EFDVSG+o6bQoqti26av6ejowMbG5qRiFoShIhJpx8FbE0fi1z/T2NiIUqm0dDiCBVVWVnLLLbcQ7ubCaA9XZkSGmHuVBTopcLK17IdAoErBn+m9S6xPRWvWrGH7zp18+X/jWX2wkgd/TMPRxorvb41nUqiLebsuvQG5Vc8E5nub8ulGyqOPPjrUYQuCcIqSSqWER0RQd2hy59G4+GrYtfNHurq6kMuPb9LwUBgdHYW2rH9tApy8xOROQRCEofTSSy/x0EMPAWDn6ITKKwSFeyB2HvDNN9+edCItLCwMXdV7GPTdSGV/pw1UXsEc/P13cy/Q8PBwdNUlSKSmbSrz9gMw8aJ7qczdZ066RUZGsvKX1UgkUs679wtz2wNnbzV6vZ7s7GxGjx59UjELwlARibQ+NNWWYmOvxPrQFJHDvMPiMBgM7Nixg3nz5lkoOmE4cHNzQ25lxeWjw7gmZpSlw+kl0ElBnU5HfX09zs7Olg5n0BgMBh564H/EBbvwy74KViSXcd5YL56/OBInu54XpZ3dBqz/UZFW0dDOp9uKue9/D+Dp6TnUoQuCcAobMzqaDTuOXWnm4qOhq7OTnJwcRo0afp8lUVFRJB1c2a9tXXw07P3lNwwGA9IBaFEgCIIgHJ1Op8NR5c7CJ1Zj6/j3933trl/Z8NESdDodKpXqhI8fHh6OvruLpppSnDwDzY+rPINpbWmmoqICb29vwsLCMBqNBMXMorY4k9ykNQCEjJ9LR4uOrKRVwKEeos0NtDZU43Bo+ieYJj+Dqc+mSKQJI4X4pvMvpZm7+OGpBax84fJez6k8g3FwciMxMdECkQnDiUwmIzgoiEJdo6VD6VOAylQxear3Sfvuu+84mJpGaV0Lv6dW8uqiaN65ckyvJBpwaGnn3xVpb2zIw97Rkfvuu28oQxYE4TQQFRVFXVkORqPxqNu5+JqmdQ7nPmn1VUV0d7b3+byusoBt3z1De3M9zj5q2tvbKCgoGNogBUEQTkFdXV188803ZGRkHHGbsLAwWhpqsLKx7/G4ytPUrkSrPXZl9NGEh4cDoKvsucpF5WU6/uElm4e3a6jMZ/pVT+Lg7IVvxGQUrj6oPEOorCijqanp796b/2oZYOPghMLZY9h+FgpCX0RF2j9kJP7A1q8fw6DvxlsT2+t5iUSCZ+h4tmwViTQB1BoNRVnD8w0/0MlUTZmbm0tcXJyFoxkcnZ2dLL79diQSU5LstctGo/ZwoLC2DWsrCXKZFFcHa6RSU/KsS28w90jLr25h2a5SXnjxRZycnCz5MgRBGMaMRiM1NTUUFhZSU1PDWWed1a9qq+joaDrammmuK0fh6nPE7ewUrjg4uZKWljYsBxlFRUVhNBjQVeSh9Ajg9zdvwcU3jKCxs2jRVZH47ZN0d7TirY7FM2QsYKooCAkJsWzggiAII9xbb73Fvffei5WVFS+88AL33ntvr20OV4I1Vhfh4qMxP364ekyr1RIfH9/n8Y1GI2VlZaSmppKWlkZaWhoHU1Jpamril59XERYWhq+vL7a2dhQc2ERzXTm6inwaKvNprMoHTIPWANzd3VEondBV5hMSO5erXtxsXrbp5BlkjmXMmDFY29hQX5aD36gpPeJReavF5E5hRBGJNMBg0PPXipc58McnRExbiPavn/EKHd/ntl6aOPasekU0QxRQazSsSdpt6TD65GRrg8rejpyc/vW2GYk++eQTmpsaMBqhprmTm7/Y32ub/52tYfEs0wVdZ7cBayvTBfDSdbl4eXpw++23D2XIgiAMM0ajkerqagoKCtBqtezdu5f29nYK8vMpyM+loLCI1ra/q7Fef/117rrrrmMeNyrKNOWyrkx7xESavruLxqpCbB1dSB2md+EPLzetK8/BysaeqoKDlGXvJnXT1wDYOjpj0HfhH52A3MYBGztH0tLSOPfccy0Z9nFpaGhg5cqVzJ8/H3d3d0uHIwiCQEVFBY899hhzxs7gYEE6Tz7xJI6Ojlx//fVYW1ubt9NoTMmzhsqCHok0azsFjip3srOz+zx+W1sbsXHxZKSbPnvkNra4+Khx8tKQm7OatWvXEhYWhlQqZdLkyWzetAwrKysCg4KJDg8n4qyFREVFccEFFwCmYpOwsDB0FaYE2+EkGoDKy/Q9PCsri/Hjx6PRhFFXpqWzrYm6shzqy3OoL8tFV1lIlrRpAH+LgjC4TvtEWmd7Mxs+XEJRyhamXvYIXurxZG77AW9N31U8Ppp4Ojs62LNnD9OmTRviaIXhJDQ0lOL6BgxGI9JBnsJ5IgJVylN2aWdLSwtPPfE4Z0V5cN88NV3dRjr1Brr0BvP/X5taxSvrcpimcWVsgJN5aWdaaSO/7C/nww8/xM7OztIvRRAECyktLSV23Fgqq2sAkABGwN5aho/KlnEBTlw8OxB/Fzv8nO1Y8n36UZfY/FNAQAD29g7Ul2nxj5pGY3UxdaXZ1JdpqSvT0lCeQ115HvruLgBCQy4epFd5clQqFZ5ePtSX5RA28XymLnqYLV89Suy5t+MZOo79az/CSm6Lta0jYOpzM5wmd7a3t9Pd3Y2jo2Ofz1dUVDD3rLkcTDlI7PhYNm3ehEKh6HNbQRCEoWI0Gunq6mJPzn50zQ24KJy59dZbUalULFq0yLydp6cnDo4KdBX5ZCT+gEdIDNpdv5K7ZzUKV78jJtL0ej0Z6WmMmX0d0WdehdLNH8mhamtdWWaPJZZrVv9GUVERISEhRx2KEzkqgg07U3s9bmNvSur9c3Ln99//QPqW7wBTEi4wKJgZk8dzyy03H/8vSxAs5LROpDXVlrLmzVtorivlnLs+IiB6Ogc3fI7Myhr3wKg+93H1j8DGzoHExESRSDvNhYaG0tHdTWVzK94KB0uH00uAwoGcI3yAjnRvvvkmtXW1PHTjVAJc7fvcZorahZSSBu749iBr75lsXtr54tocwtShXH/99UMctSAIw0lxcTGV1TU8cm44UzUulOvaueGzfVhbSSmuayOnqoV50R7cPCMIgEAXGwry+zcNWSqVEjFqFHtXv0vSz6/T1dkBgJPKmejoKGbPP4OoqMVERUURFRWFh4fHMY5oOdHRkeQdmtw5avoiitO3kfLnV4SMP4tybRIJVz5u3tbJW83BlN4XUkPpk08+YdmyZWizsikuLcHL04v0jPReDbdzc3OZM3sOjXUN3HPBrXyw7gsuXHAha35f06PiQxAEYah5e3uTmprKxRdfTFNaGpJDN+xdXV17bCeRSNBoNFTmHSB/3x89nvMMGW+elvlvjo6O+PkHILWS4+QR2OM5Jy81Kf94H7ezszP3QDuasLAwflz1W5/POXkEmZN6L730EuHh4Wg0GiIjI4mIiBA3toUR6bQdNvDXT6/w4zMX093ZyoUPfk9A9HQAyrXJeASPQSbve9mmVGaFZ8g4too+aac9tdo0YWa4DhwIdFKQewou7ayvr+elF1/gyol+R0yiAchlUt66cgxVjR08/nMmnXoDLR3dbMqo5pnnnsfK6rS+jyAIp72goCAAgt3tifZVMifKg7nRHtjJZdxzVigA2ZXN5u39nO0ozM/v9/HfeP01rr3qcl5+6UXWr19PWVkZ9XW1bEtM5L333mPx4sWcccYZwzqJBhAdFUVjham6WSKRMPOaZ7G2deCXpddgNOgJijnTvK2zt5qszEwMBoOlwuW+e+9DezCLOP8xLJp2AeUV5b0qCTdu3Eh8XDxtDS08f/XDzIiazAMX3cnGPzfywgsvWChyQRCEv2k0Gp566in0BgMqeyVymRUvPN/7/WlURDgdzXUEjJ5hfsw/ahqufmFotdojDr0ZM3o09aW9b7g7+2hIS08/5rCcfwsPD6e1qZ725vpezyk9g0nPyDTF5u/PE088wZVXXsm4ceNEEk0YsU7bRFr6lu9QeQVz0UMrzGvKjUYjFTnJeB1hWedhXpo4tm3fjl6vH4pQhWEqKCgIiURCYcPwXM8fqFJQVllJa2urpUMZUC+88AKd7W3cOfvYzayD3Rx4asEolu8uZXdePbnVrYwfG8PFFw/PZVSCIAwdT09PbG1sKKlrA8BgMJJS0kh5QzsvrNEyJ9KdG6YF8sq6HO5ZlsLGjBrKKir6ffxp06bxwQcfcNdddzF79my8vb3NVQUjSVRUFHWVBei7TFV1Ng5OzL7pVTrbmvAIHoODytO8rbOPmra2VoqKiiwVLsFBQYT7hnLNGZdywcSzgZ6T67755hvmzZtHva6emsY6mtta6NJ3s/7AFiQSibkvnCAIgqWVlJQgk8l46or/4eXigX+Af69twsLCaKwu5MwbXsRO6QZASOw8VF7BtDQ3UVVV1eexo6OjaKjo3QLGxUdDY4OO8vLy44r1cNVaadYuitO2kfrn12z79ilWv34DhQc2HDEOQRipTtuSjOtf24XRaEAq+/tX0FhdRGtDNd7q3hM7/8lbE8fuVa+TkpLC2LFjBzlSYbiysbEhwNd32FakBTgpAcjLyyM6OtrC0QyMsrIy3nzjDW5O8Mdd0b9hH5fG+7Aps5rVBysBeP7Fl/o1dU8QhFObRCLB38+XVfvKOVjSSE51K+W6dmRSCXqDkfXp1axPr8bb04PAoCAmz5lhbqx8OomMjDw0uTMfV/8IwPQ96KKHV/RIogHmG5NpaWnmir+hFhYRTupfBwCwldvgqnQxJ9Jee+01lixZwqwxCcRrxvLST2/z4FfPEu4bSmZpDt9///2wnJ4qCMLpSafTodfr+d9XT1NSXdbnZ1BYWBjNumpkVtbM/r+l5O/bQPC4ObQ11gKQnZ2Np6dnr/2ioqLQVZfQ1d6C3NbUoqa1oQalmylZl5qaio/PkadO/5tarUZubc0f790BgJVcTnBwCGMjIgg/+8bT8vNTOLWdtok0iVSK5F8FeeXaZJBI8Aodd9R9PYJjkFnJSUxMFIm001yoWk1RaYGlw+hTkMrUMDk3N/eUSaQ99dRT2FnBLYd6FvWHRCLhuYsi2ZJVi7d/EHPmzBm8AAVBGFGuufY63nj9NQ6WlOPm5s4lCxeSkJBAREQEQUFB+Pv7Y2tra+kwLSoyMhIwTe48nEgD8Aga3WtbRxdvbOwcSE9PZ/78+UMW4z9pNBp+XrmK+794irK6ClPFWVcXGzZsYMmSJZw1dia3nX0dEomEy6dfzDdbVqCtzOf3tb9z5plnHvsEgiAIQ+S+++4jKiqKX375hfbWtj6/w/49ubMQv8ip+EVOBUBu64BEIiE7O5uEhIRe+x2eLl2Umsifn97PuLNvYs/Pb5r2tbYhLS2Ns846q9+x2tvbk7RnDyUlJYSFhREUFCTaqAinNPG3+x8qcpJx8dFg4+B01O2srG3xCBrN1q2J3HHHHUMUnTAcqcPC2J6ZduwNLcDN3g57a2tyTpE+aVqtlo8//pgHzg5FaXfkqUF92ZxVQ3NHN19+9dWIXFo13BgMBsrKytBqteh0OnGXURixbl+8mHffeZuxASoqGlv4fc1qZs2axezZs8V7xSHOzs54eHpRX6Y95rYSiQRnb8tO7rzmmmvYv28/Hp4eaDQaUlNSef+993jxxReRSqTsy0+hoaWR2uZ6VievR6NWs3rNGvPFqCAIwnBha2vLhRdeyIUXXnjEbQ6/d+kq83EP+vvGuZXcBid33yNO7hw1ahQSiYTc5N/p7mwzJ9F8wiei72jpMbmzv8aMGcOYMWP6ta3BYKCoqIj09HTS09NJS0sjJTUNa2trVv/2K87Ozsd9fkEYSiKR9g/l2iR8wif2a1svdRxbtv6C0WgUX7ZPY6GhoSzTNQ7LvwcSiYRAZydyc3v3PxiJNm7ciF6vR9faRXN7N462/Xv76uw28Mr6fBZccD6TJk0a5ChPHUajkaqqKrRaLVqtluzsbLTZWrKzssjJzaGtvd287SeffCKWQwkj0l133klLk473bp6Mo40VT/6SyR2338Ybb7zB2rVrCQgIsHSIw0JUVBSFZf27KePkFWrRyZ0ajYZff/vV/POUyZNxtlXxn7OuJ6M4m192r+PRZS9S01hH9Oho1vy+Bjc3N4vFKwiCcDJUKhWubh7oKgt6Pad0DyIrq+9Emr29PYFBwTg4eRIYcybFqVsx6LsZlbCQ4rRtg/o+/tHHH3P3XXfT2toCgI2dAy4+auydfcndsYZdu3Yxb968QTu/IAwE0SjokLamWnQVeXhrjt4f7TDvsDiqqypOmSSFcGLUajVN7R3Ut3dYOpQ++SvsezRZHsmuuOIK7rnnHj7eVsq0F7fzaWIhHd3Hngz37a4SimtbeObZ54YgypHPaDRy1ZVX4aR0wsvLi4SEBG644QY+ff9jcvZl4mvtzuXTLuThhffwzi0v4uXiadHqE0E4UevXr+err7/msXPD8HayRWFrxc3Tg+jSG9FmZRIdFcmnn3563JPLTkXRUZF9NqXui7OPmsyMjGHze4uMikImkzIxbDzTIidiMBpoN3Yy44wZ/LnpT5FEEwRhxAsP09BQ2XuqtNIzmMysrCPuNzo6Gl1FDmdc9zx2ClckUhkB0TNw8VGTMYjv44lbtyKzVTL/7k+46sUtXP/mPhY8uII5N7+GtY2d+F4pjAiiIu2Qipx9gKmBbn94qWORSCQkJiaiVqsHMzRhGAsNDQWgUNeIi93w66MT6KRgwymSSFMqlbz66qvcfffdPPHEEzz5xRd8tK2YJbODuSjWB5m0d0VgS0c3b24s4Jqrrzb3ghCOTq/Xs/z75YwPGcMZ0VPxcfHCy9kDG7l1n9v7OHsecdmAIAxn9vb2WFnJ+GR7MdG+CkZ5K/gjzTRVTCoBL3u48cYbWfHD93z08Sf4+vpaOGLLiYqKov6dd9F3dSCTH33Qi4uPhtbWFoqKiggMDByiCHvasmULb735FpmZmWRnZ2E0wo6MPeRWmC40X3n1Fa6++mqLxCYIgjDQIiLCyfx9M6mbvkFXmU9DZQFNVQU0VJcQcJT34ejoKLbs+Bw7hQvn3/cVBn03to4qnH00NDc1UlJSgr9/70mhJysyMpIfflyFf1RCjxU9EqkUF59QkUgTRoTTtiItbcuyHj+X5yTh4OyFo0v/ppPY2Ctx8w9n69atgxGeMEKEhIQAUNjQZOFI+hbopKCopISuri5LhzJgAgIC+PTTT0lNTWXijLNYsjyVs177i7Wplb3unH2SWEhDWzdPPPmkhaIdeaysrAgKDMJL5c7kiDgCPfyOmEQD8Hb2JCvzyHc7BWG4mjp1Knv2JCF39mP+G38x6dktvPi7Fn9nOy6f6Ed+TSvPXTyKvTsTiYocxZdffjlsqqyGWmRkJAaDHl1V4TG3dfYx3Vy05IXQk088ycZ1G/C38WBOzEy69d28tPJtNmf9xcwZM4+rgbYgCMJwN3PmTOrK89i5/BnaCncyJlDJDVddwnvvvcsf69Yecb/o6Ggaa8vpaG1E5RWMi6+p39rhCcypqYOzvDMyMpL21iZa6it6Paf0Ulu0PYAg9Ndpm0hL/OZJSjJ2mn+u0CbjrYk9rj5XnqFxbNmaOBjhCSOEQqHA092NQl2jpUPpU4BKiV6vp7Dw2Bc/I82oUaNY8eNP7N69G//IOG76fD8XvLOHHTmmcd/1LZ28v6WIW269laCgIMsGO8KEhYdRXl/Zr219XDwpKChAr9cPclSCMPDGjh3LnuS9PPDgQ1Q3dwJQXN/GFzuK6TYYqWzoYP29E5mlUXDttddy4w03WDhiyzg8ubO+9NgVzgoXH6xt7S2aSAsOCcZd5cqtZ1/HzXOvxtPZnVtuuYWa2ho2bd6Ep6enxWITBEEYaFdffTXl5eW0tbWhzc7i119/YenSpdx8881HHaRyeLVG/b96YCpcfbG2tT+hgQP9YT5vee/em87eg7usVBAGymmbSPMbNZl17y1GV5FPd2c71YVpeKv7t6zzMG9NHPl5uVRU9M6mC6ePkFA1RcO0Ii3ISQFwSvfyi4+PZ8OGjWzYsAEr12AWvZ/ElR/t5bFVmRgkMh555BFLhzjihIWFUa6r6te2Pi5edHZ1UlxcPMhRCcLgsLa25plnnmHnX7uJCNNgJZNwSZwPTy2I4P+mB+Fsb81ri6IJ91aye/dflg7XIlxdXXF396Suj4uefzIajbToKnFQeQzaBVh/hIWFUVpTzqq/1vDyyndobjMtNT2spaWF1tZWi8UnCIIw0Ly8vLCyOr6uTeHh4UilUur+NZXZtMRSPWjv40FBQdjY2FL3rwReXZkWFx81zU2NlJWVDcq5BWGgnLY90ubc8gY/PbeQNW/dzORL/odB34WXevxxHcM7zJR4S0xMZOHChYMRpjACaMLCSN1YYOkw+uStcEAuk5GTk8PcuXMtHc6gmjVrFrv2JLFy5UoefvABtu7T8uijj+Lh4WHp0EacsLAwyusq0Rv0yKSyo27r7Wyq7MjJ6d9EP0EYruLi4th/MIXHH3+cl19+GW1VG1PVrqjs5XySWEBWeSNrPvnO0mFaTGRUJMWHLnqMRiMt9RXUlWmpL8uhriyHhvIc6stzaG813ViyZNVXYWEhrR1tfP7ncvNj7e3tnDHzDLKzsiirKCc4KIis7GzkcrnF4hQEQRgM7e3tFBUVUVhYSEFBAaWlpdx00029en3a2toSHBJKXWnvXrdOg7jEUiaTER4RQX1ZDonfPkVRymY8Q8ai3fUro2dfB0BaWtpp3ZtUGP5O20Sajb2Sc+74kB+fu4QNHy1BbuuAi1/4cR3DQeWJs2eASKSd5kJDQ1nz0/CsSJNJpfg7O53SFWn/JJFIuOiiizj//PNJT08nOjra0iGNSBqNBr1eT5WuBm+Xo18Muzu5YSWzOm3+jgmnNhsbG1544QXOPfdcrrrics56dQd+LvYU17UDsOTe+zj77LMtHKVljI6OYtdnX7Hq+YXUleXQ0dYMgK2tHeEREcyeEk1k5KVERUURGRlJcHCwxWLt7u7G3cmVBy6+E18Xb255/z42b95MqFcQU0Pi0Acb+GnnagoKCo667EkQBGGk2L17N3fdeQd5uXlU1dSYH5dKJUglEvLz8/niiy967Tdm9Gh2pWdQVZBCXamW+jLTn+qCg6iUDoMW7+joKP7clU75oYF/jdWmlQ2BY2aSuXUZ6enpA9rPsra2lvr6+h5DAltaWrj8ssvJzc1lze9rLDYgRxiZTttEGoCTZyDz/vM2v756Pb6aSUiPUXnRF4/QWDZvEQMHTmdqtZqa5haaO7twtB5+d7b9He3JOUUmd/aXlZUVY8aMsXQYI9bhC8uy+opjJtJkUinerp4ikSaMaM3NzezcuZPExES2bE1k965dtLe3YWVtS5drNOOnTMRo0JP82zuUlZXh49O/wUSnkhtuuIGU1DQCA/yJjLzMnDALCgpCKh1enUIiIyP5ovULgjwCkEml+Ln6kKcvZExQJFfOvITqhlp+2rkarVYrEmmCIJwSvv76a1IP7OPcMR5893cejc+uH8dPe8soyOv7e9q4cWNZufInfnzmIgD8AwIZHR1N9Nk3sWDBgkGLNyoqih9X/coVz//JiqcuQN/VgZW1Hb7hEwd0WWl9fT3xcfHkHnr9v/76K+eeey4lJSXMP2c+2mwtCjsHzpozh9179uDk5DQg5xVOfcMikfbOO+/w8ssvU1FRQUxMDG+99RYTJkw44vY//PADjz76qPlO4osvvsg555xzQuf2CZ/I5c+sw8H5xJYgeGvi2fLlKhoaGsQ/vNNUaGgoAEW6RiI9XC0cTW+BTgr2nGaJNOHk+Pv7Y2NtQ3ldJYQee3tvJw+xtFMYsZqbm1Grw6isLMde4YxnaCzjzrsLb00cbgGRyKxMN0haG6pJ/u0dEhMTWbRokYWjHnrjxo1j86Y/T/o4ra2t2NjYIJMd/83L/oqMjKSzq5N9eQdp7+ygvaMduZUVZXWmISquSmes5dZoxWejIAiniKCgIAxGuDEhkO92lwKgspfz+fYiMipacHTtu+fYAw88QGRkJAEBAYwaNQpHR8chiTcyMpL2lkas5NbMuPpp1n94N6FxZyOVWaH0DCU1dWASadXV1eTm5XLp1AtYf3ALu3btIjQ0lDPPPJOKigqi/MNwcnRiR8Ye6uvrxfW80G8Wv4W4fPlylixZwuOPP87evXuJiYlh7ty5VFX13eh6x44dXH755dx4443s27ePBQsWsGDBgpMaz6t090dmZX1C+3pr4jAajezYseOEzy+MbIcTaYXDdOBAoEpBXn4+BoPB0qEII4RUKiUkJISyuv4NUvF28SRHKxJpwsgkk8morash9tz/cM0rfzFv8XuMnXsjniEx5iQagL2TOy5eQSQmimndJyopKQlXFxfGjhnNL7/8MmhT2aKjo7GysuKZ719j6ap3aTa0MioykqKaEjalbGfZ1pVYSWUikSYIwikjIiKC1o4u5ryyA8mhxxTObhjcI5h7waW8/c57fe4nl8u5+OKLiY+PH7IkGvw9DbquVIt6wnwueWQlCVc+DoCzj5r0AZrcGRwcjEwm40BBKu0d7ezdu5eJEyZiY5QzIWwc6SVadmXv5ZtvviEoKOikzyecPiyeSHv11Ve56aabuP7664mMjOT999/H3t6eTz/9tM/t33jjDebNm8d///tfRo0axdNPP8348eN5++23hzhyEyfPIByc3MQX69OYq6srTgoFhbpGS4fSpwAnJR2dnWL6zSnAaDRSVlbGli1b+Oijj7j//vu5cMECXnv1tQE/V3h4GOX1/Zzc6exFcYmY2imMPDqdDoPBQFxsHLqKfCTHWKIo2jmcnAcffABvJ2uUXdVccMEFTJ0ymW3btg34eXx8fEhOTmbbtm3U1tZSUVnJHXfcQVltBW/8+iGJObuImxDPddddN+DnFgRBsIRp06bh4eaKnbUMI3DBBReQlpHF5q2JfP7558Nu6FhISAjWNjbUH5oG7R4UjbWdAgAXHw2NDTrKy8tP+jxyuZw777gTJx8XgtUhrFmzhpaWFmQSKVUNtTg6OLJy5UquuOKKkz6XcHqx6NLOzs5OkpOTefDBB82PSaVSZs+ezc6dO/vcZ+fOnSxZsqTHY3PnzmXVqlVHPE9HRwcdHR3mnxsbBy7hIZFI8FTHsmWr+GJ9upJIJISEhFA0TCvSglSmD6WcnBz8/PwsHI3QHwaDgb/++gutVkt2djZarZaszCxycnNobW0FQCqR4uHsjgwJa9eu46677xrQPkVh4eH8ldjzfbits53y+krKaitM/1tXSbmuktLaciQSyRGOJAjDU2JiImfPm4e7uwfzzp7H19/9iNFoPOrfZW9NHJu/+AmdTodKpRq6YE8BGzduZMOGjXx47VjmRXuwNbuWx35O47xz51NTWzfgSz3/3SfzyiuvZPLkyXh4eAxp1YUgCMJQeO6556itq2PT/VO5/asD/Pzzz6hDQli2fDlnnnmmpcPrRSaTERYWTn1Z7xUNzj6mgQDp6ekD0pP01ddeJTs7m1tvvZXGKh3uShdSCjPx8vJi1+5djBo16qTPIZx+LJpIq6mpQa/X9xqR7unpSWZmZp/7VFRU9Ll9RcWRlyA9//zzPPnkkz0eu+3jgSvn99bEsfunl2lvb8fW1nbAjiuMHJrwcAr/Gvi76gPBT6lAIpGQm5vLzJkzLR2O0A+ffPIJN998MwBuTq74OHvipfJg/OTz8XH2xNvFCy+VO3IrOUk5+3nm+9coKSkhICBgwGLQaDRU1lfzzppPKa+vory+ktrGOvPzzipnNBo18TMncYVGw7hx4zjvvPMG7PyCMJgSExOZN28ewe4B1DbV8dOPP9Gsq6axqggnzyNP7fLWxGM0Gtm+fTvz588fwogHR0dHBwUFBeTk5JCbm2v635wcSktKeOudd0hISBiQ8xiNRh564H+MDXQmwMWO9i4DU9QugIQpU6YMar+0fwoJCRmS8wiCIAy1iIgIjEi45pN9FFS3ANDapGPWrFn85z//4cUXXxx2NxHGjI5m856sXo8r3f2xktuQnp7O7NmzT/o8P/zwA5deeqn555KaMtShavbt3zfsfifCyDEshg0MtgcffLBHFVtjYyPPrWsfsON7a+Lp6uwkKSmJadOmDdhxhZEjNDSU7X+ss3QYfbKxkuHjpBRTFUeQ9vZ2ZDIZX9/9DnY2dkfd1tvZCwCtVjugibQzzzyTiPAIqo0NRE0awwKNhrCwMDQaDRqNBlfXnoM1BrLSVxAG0+7du5k3bx5qjyAeuuRumtqauevjhwEo1+45aiJN6RGAo8qdxMTEEZNI6+joIDMzs2eyTKslR6uluKzM3IPGxsoKf2cnAhUOlFXW8NVXXw1YIm3VqlXsTkpmwTgvzn59J1PVLpwb40VeVRM/Pf/CgJxDEAThdHbdddcRHh7ORQsuAEyJtC69gf/OU/POJx9RVlLMyp9/sWyQ/xIZGcnKX1b3qgaXSmU4ewcP2OTO2tpaAC6eNJ8f/1rNAw88wK233oqDg8OAHF84PVk0kebm5oZMJqOysrLH45WVlXh5efW5j5eX13FtD2BjY4ONjc2/Hh24RJqrfwQ2do4kJiaKRNppSq1WU9bQSEe3HhurobmzfjwClA5iquIIotFo0Ov1NLW3HDOR5qlyQyaVotVqmTVr1oDFEBISQnpG+oAdTxCGi127dtHa2sq8cWdiI7dmxY5NtHa04e3jR7k2iYhpl/S5X2tDNXZKNzzVcWwZQX3SrrnqKr5fsQIAha0NASolgQoHzvF2IXBUEIFOCgJVSjwd7ZEeupBZvHoTOdnZA3J+vV7Pf+9dgpOdnN8OVOKusKGj28DrGwq4/LLLiImJGZDzCIIgnO4mT55MXkEh999/P2+//TYhHg64OFjTpddjZf3va2HLi4yMpK25gfy9f9DaUE1deQ66Mi26ilxaGmrp7Jw0IOeRy+VIJBJ+/Gs1UomEH1f8yDvvvMPUKVP5fe3vA3IO4fRj0USatbU1sbGxbNy4kQULFgCm3kAbN25k8eLFfe4zefJkNm7cyN13321+bP369UyePHkIIu6bVCrDM3QcW7du7dHvTTh9hIaGYjQaKWlsItRFZelweglQOpI9QBdFwuDTaDQAlNVV4OHkdtRtrWRWeLp4iv++gtBPt9xyCxs2bOC1Xz9ge+ZutmfsZunSpeTn57Psx9UAGPTdSKQy8x3y/P0b2fDREiKmXIS3Jo6/VrxAW1sbdnZHT3QPB9XV1Uz29+bNs2fiYmfTr36GgSolqwfo5surr75KfkEBBiOM9lXiYCOjsrGDmuYOnnr66QE5hyAIgmCSnp5OYGAgU6dO5a9du3nwR9NN0a1btx2zD+hQGzt2LHJra9a9txgruRy1WsOUmGiiLj+HqKioAVnWCaZEmtFo5P/mXMXH67+mtLiEcSGjWbtuLS0tLaIyTTghFl/auWTJEq699lri4uKYMGECr7/+Oi0tLVx//fUAXHPNNfj6+vL8888DcNdddzFjxgxeeeUV5s+fz3fffUdSUhIffvihJV8GXuo4tm38BL1eP2S9PoThQ602NcUsbBieibRAJwW/H8gadh+gQt8CAwORy+WU1VUyNjj6mNt7O7mLRJog9JO1tTU//PADCxcu5JdffmHp0qXce++9LF++nHfeeYfWhmq2L3+W8uwkvNTjsVO4krr5GzAasbZT4K2Jo7uri927dzNjxgxLv5xjUoeFsSMrHVf7/vdwDXBSUJyUQkdHRx8V/f3X0dHBW2+8jpvChmA3ezLKmmhs70Yuk/J/N91s/uwUBEEQTt7BgweJi4tDbmOLZ8g4xp19K95hcXS0NPDH+3eSl5dHaGiopcM0Cw4OJi01Fb1eT2hoKHK5/Ijbrlu3jgMHDnDrrbeiVCqP6zyRkZEAbE3bAYDeaGBMUCTbM3aTk5MjKqOFE2LxRNqiRYuorq7mscceo6KigrFjx7J27VrzQIGioqIek+imTJnCt99+yyOPPMJDDz2ERqNh1apVREcf+2JzMHmHxbF71WukpKQwduxYi8YiDD1vb29sbWwo1A3PPlEBKiWNzc3U1tbi5nb0CifB8qysrAgKDKK87shDVP7J28WLrCyRSBOE/rK2tubHH3+koaHB3O/vcD+wcm0ScltHWnSVlGb+RVd7C+q4c8jZsxovTSwufuHY2itITEwcEYm00NBQvtM1HteNlCCVEqPRSH5+PhERESd87vfff5+y8go23jeFUA8HNmVUc80ne9Eb4bHHHjvh4x6vE7nJ2dXVRWZmJmVlZcyZM2dApyILgiAMtIqKChITEwGYc8ubBI45w/xcR2sjEomExMTEYZVIg79XYRxNRUUFF124gNa2dl568Xlef+Mtrrrqqn6fY/To0Vx99dU0NDQwzi6e5cuX897vn6NQKHB2dj6Z8IXT2LD4VrB48WIKCwvp6Ohg165dTJw40fzc5s2b+fzzz3tsv3DhQrKysujo6CA1NZVzzjlniCPuzSM4Biu5tfkNTDi9SKVSgoOCKGpoGpLztXZ18fimnbyz+4C5UfTRBDkpAESftBEkLDyMsvrKY28I+Dh7UlBYQHd39yBHJQinDisrqx5DM3x8fAgMCqY8ew8JVzyGT7jpu8ilT/yGf9Q0kEjwCh13qJ3DeLZsHRl90tRqNU3tHdS1dfR7n4BDnxknO6Tm448+wM/ZFmsr09fNaD8l1lYybr75Zry9vU/q2Efz119/ccMNNzB1ylQ83D3w8vTqd9VuQUEB42LG4uDgwJgxY5g3bx7vvffeoMUqCIJwssrKypieMJ3Fixdja2tPec7eHs/b2Ctx8w9nqwU+tw4PvPntt9944403uPPOO3n99df7df1y2NNPP40VBlYtnoijpJOXXnz+uGKwsbHhyy+/5Oeff+a7775jx44dHDhwgIqKigEd1CWcXoZFIu1UYCW3wSNoNFu3ikTa6UoTFkaBbvATaXvLqpj/zc98dSCTtdqCflUYBKhMJdBicufIERYWRoWuql/bert40tXVRWFh4SBHdfp55513CAoKwtbWlokTJ7J7925LhyQMopkzplOZm4zMypq5t72Fjb2Ste/+h8KULbj6hmNjb3ov9dLEsWPHzhGRvD5cfVDU0P+KaU9He2ysrE765ssjjz5Oh8yBGS9t47FVGTy3Oht7Bweee+65kzrusdz/3/v5cfkK5I1GzoyYSk1tDdu3b+/Xvn/++Sf7Dx7g8mkX8uxVDxHiFUhSUtKgxisIgnAyFl6ykNrKas4eP4v29lYqtHt6PG80GvEMjWPLEFynGo1Gnn76ac6cOZMAX1/s7OwYNWoU5513Hg/89z5++OIz7rnnHmpqavp1vLy8PN5//z26uru5/tO9FNW1ccut/zmpGCdPnsyYMWOwt7c/qeMIpzeRSBtAnmrTG9TxZNiFU0eoWk1xc2u/t1+ZkcOvWXm0dHb1a/tOvZ6Xtydz6Q9rsDm0ROWcsOB+7etoLcfN0UEk0kaQsLAwKuoq6dYf/UK9rbMdvUEPgFarHYrQThvLly9nyZIlPP744+zdu5eYmBjmzp1LVVX/EpzCyNHZ2UlXVxcJCQlUF2XQ0dqEraMzZ9/xAS31leQlr8VLE2ve3lsTR2tLMwcOHLBg1P1zOJFWeBw3eqQSCQHOTif9mbFo0SJy8vJ58qln+OlgPSuSynjgwYcGfSlNQGAAfm4+3HPBrVw+/UI8VG79fn8cM2YMANGBEUQFhBPsGciB/fsHMVpBEISTExQURFNbC1vTdxIYGEhVfgrN9RUUpSZSU5TO+zeF0dJQRV5uDhUV/WsbcqIqKyt57LHHaNJmcq6vG8/NmsKyS85m+42Xkvqfq/jkPNOE+f7eqLnm6qsxGIy0dRmob+0iKMCfm266aTBfgiD0i8V7pJ1KvDVx7Pv9A3Jzc0UD3dOQWq2muL4BvcGA7Bi9VNq6unl80180d3ZhayXjzGB/zgkL5owgP+zkvf9ZZlTXcd+6RHLqdNwzeRy+SkeWrN3K/H4m0sC0VEcs7Rw5NBoNeoOBqoYa3JQuVNRXUVZXSVldBeX1lZTVV1JRX0VtYx1gWqZ2uLekMDBeffVVbrrpJvPwm/fff5/Vq1fz6aef8sADD1g4OuFkpKam8uKLL5KXl0taSgoNTc1MmzKJTz77AqPRSEXuXgJHz8DZO5S5t73F9uXPETL+LPP+HkGjsZLbkJiYSGxs7FHOZHkKhQIPN1cKj6MiDSBAYU/OACTnHRwcePDBB7nlllvYs2cPZ5xxxrF3Ogaj0Uh5eTmenp599j7TaDSs+WW1+WdvZ0+02f17LVFRUUilUtYmb2RN0kYOFKTR2NbEZ599Zn4vEISB9M477/Dyyy9TUVFBTEwMb731FhMmTLB0WMII8tnnn2FtbY1Op2PJvUuYPn066969nar8g+ZtOtuaAdi2bRuXXHLJoMXi4eGBrY0NZ2sCuX5cVK/n/9k6YPLkyUc91oEDB9i+Ywcqezm6VlPhwVPPPIu1tfUJxVZfX49EIkGlUvX5fEtLC6mpqUilUuLj40/oHMLpQyTSBpCXery5kaNIpJ1+QkND6dLrKW9qwe/Qh8SR2MmtuG/KeJ7YvIsJvl4U6hpZvHoT9nIrZoUEMF8TxIwgX6ykUj5KTuW1nfsIdlay8rJzifRw5dZfNzLa0838YdQfAQoHcrSiIf1Icbj56oNfPUtja5O50tXR0RFNqJroSTFcFBaGRqMh7ND/uri4WDLkU0pnZyfJyck8+OCD5sekUimzZ89m586dvbbv6Oigo+PvHlSNjcNz8Ihg8sILL/DLj9/j7WRDc0sL58Z48duOvwgKCsLN3YNybRKBo02DBPwip7LoydU99pfJbfAMHsPWrYncfffdFngFxydUraGoofq49gl0UpI4gFWuLi4uzJ0794T37+jo4NZbbiU5OZncvFxaW1u57NJFLFv+Xa9tNRoN9U06Pv7jayp0VeRVFGJf1r8pb7a2trionPkzxbQUVG4lR2mn4M477+S6664Tk6+FAXW48vn9999n4sSJvP7668ydO5esrCw8PDwsHZ4wQlhbW/PZ558BpuEqjgol7kFjeiTSomZcRmtdCYmJiYOaSDtW32gHazkeCsd+3dxft24dALfMCOLHveV02zhz+eWXH3dMO3bs4Pzzzqe2rhYPdw+ytdk4OTn12CY/P5/oqGha20yriw4ePMjo0aOP+1zC6UMk0gaQjb0Sd/8Itm7dKu5anoYOJ08LG5qOmUgDuCpmFOvzismuref3qxZQ19bOGm0Bv2Xn82tWHo7Wcrwc7cmta+Cm2GjumTweGysZzZ1dbC4o5d4p444rvkCVgh2iIm3E8Pf3580336S0tNScKNNoNHh6eooLuSFQU1ODXq/vVeXn6elJZmZmr+2ff/55nnzyyaEKTzhJrq6ueDvbc3OCP/d9n0qMv5LfDlRQU1PD9IQEdqUmH/MYnuo4tiauOK5pmJai1mhI+7PguPYJUCkoSMk6oYmXg2HHjh18/sXnTB01gUsnn4+2LI/NWzb3ue20adMIDQ4hoy6P8Ihwpp17BldffXW/ziORSJDKZEwOj+OqmZfg5ezJvrwUnvn+VQoKCggO7n8luCAci6h8FgZKUVERGRkZ6HQ6Jk+eRFZZEWcvfp/f37kNjEa81OPxCI1l85bBHzig1mgoSN1/xOcDnBz71TrghhtuIDc3l6WffIJer+fXX5ed0OfRjh07qK+v5+qZC/lq8w9kZ2f3qjirr6+nta2VG2dfyScbvmH79u0ikSYcleiRNsA81EPTyFEYfgICApDJZBTq+leJIpVIeHHOVFo6u3ly8y6CnZ24fUIMv1+1gD+uuZAbx0fhbm/HdwvP4YGEeGysTB8cG/KK6NTrOUdzfF/mA52UVNXU0tQ0NJNFhZMjkUi44447eOGFF7jhhhtISEjAy8tr2F+wn64efPBBGhoazH+Ki4stHZJwFJ6entQ0deCuMC0PaW43LRmprKxk+vQEKvMPou86+pRL77A4amuq+z0N0pLUavUxp0p3dHeTWFhq/jnISUlXd/ew+bscEhICwJljprFg4tnEa8ZSUVnZ52daQEAAOXm55OTmsHr1al577TXGjx/f73PFjI1Bb9Tj6+qNTCol2NMfMFUoCMJAOVz5PHv2bPNjR6t8BlNlZmNjY48/grBu3ToCAwM55+x5XHbZZUglEqry9hEwZiYzr32WyQsfwEHlibcmntSUg4P+90at0Ry1b3SgwhFtdtYxj+Pm5sYHH3xAZmYmGzZsYP78+Ufdftu2bVxzzTVMmjiRd95+x/y4RqPBYDRQXl8J9N1TODIyEqlEyud/mqqcH3rwIVpb+9/7Whi5mpub2bFjB++99x633nors/7xnnw0IpE2wLw1ceTn5Q56I0dh+JHL5QT4+R3zYuWffBSOPHHGRFZl5vK7tsD8uNpFxV2TxvHNJWcT79uzImZ1dj7jvNzxVToeV3yBqr97EgiCcHRubm7IZDIqKyt7PF5ZWYmXl1ev7W1sbFAqlT3+CMOXp6cndc3t2FiZvga9sSEfgOzsbBISEuju6qCqIOWox/AKHYdEKiUxcfjfPAsNDaWmuYXmQ8Nt2ru7uf+PbSxPzUbX3kFqVQ3nf/sr1678gz/zTYmzgGH2meHv74+NtQ17tPv5PXkjSVrToIfBiG/s2LEUVpeYf3ZxdEZh7zgihksII8fRKp+PdB3x/PPP4+TkZP7j7+8/FKEKw1xpqekmiOHQvDt7e3vaW5uoLclk1LSFjJ17I2C6TjUYDOzYsWNQ4wkNDTX3jf6nwwPWAlQK8o7jvVutVjNr1qxj3kz+3/338+vKXyjLK2Hp0qXmxydMmIBGrUFbX8jZZ5/dZ5sBa2trrKysGOWn4bKEBdTr6klPT+93jMLwZzQaKSgo4Oeff+app57ioosuJjgkFIVCwdSpU1m8+A5+WrOZBql3v44nEmkDzFsTBzAivlgLA08dFkbBcSTSABZEhDJXHcgjG3dQ3XL0Ox+N7R0kFpYe15CBwwKcTBf2w+WiSBCGM2tra2JjY9m4caP5MYPBwMaNG4/ZHFcY/hQKU5Losg+Sejze1tZGTEwMDo4KyrVJfe1qZm2nwN1/1Ij4vD88ubPoUMV0SmUtK9K1PLhhOxM+XMZF3/1GTp0OuVRKrLepL5OvwhGZVDpshtRIpVImTpzI2r1/8snGbynvqOGKy68gKqp3M+uTNWbMGCrrq6lqqCGtKIs1yRuQSaRs3rx5wM8lCMdDVD8Lffl3MrayshK5tTXl2T0/x5w8g3Bwchv0zy21Wm3uG70xr4jzvvmZT/amMfrdr3l44w4CnBRU19YNeGVcQGAg1jI5tnIbikuK6e7uBsDb25tsbTYFhQWsWbMGV1fXXvtKpVKio6PwVLmzYOI5SCQS9u3bN6DxCUOvo6ODJUuWMC1hOk4qZ4KDg1mwYAEvLn2NPZllKEOnc8b1L3DRwz/gFzmVUTOuYPZNr/Tr2KJH2gBzUHng7BlIYmIiCxcutHQ4whBTq9VsPrj/uPaRSCQ8c+YU5n29koc27uDD8458x2V9XhGdegNna4KOOzYXOxsUtjYikSYI/bRkyRKuvfZa4uLimDBhAq+//jotLS2iB+YpIDAwsMfPrg5yWrqMVFdXI5PJmDplMlnHSKQBeKpjR0Q7h8M9PAsamoj0cCXe15NF0WEsT83mmphInO1sWJ9bhKu9LU62NgDIZVJ8Vcph9Znx+9rfKS0tJSgoCLlcPmjnGTt2LAA3v3MvADKpFIlEak5ICsJAON7KZzBVP9vY2AxFeMII8u9hUxVlZcTFxlGhTWLM7GvNj0skkkOfW4PbJ+3we2VhQxPPbt1Dga6RtOrdAKhsbQhS/X1zf9y44+v5fDRXXnklO7bvwCvQj1vvW4yV1fGlOgICA/l99e/szErCaDSyZ88ebrrppgGLTxh6a9as4bXXXiMw5kxGzboRN/8IXP0icHD+u11OY00J6z+4i6r8gxSlbiVq5hX9OraoSBsEQ9XIURh+1Go1hfUN5gmL/eVqb8tzs6awMa+YFelHvvu/OruAOB9PvBUOxx2bRCIhQKUcNtUFgjDcLVq0iKVLl/LYY48xduxY9u/fz9q1a3vd+RVGnsMXqb4qW2yspPi72uPjbG++oJ0+fTqVuXsxGPS99u1sa6IodSu7fnqV0owdlBQX0dLSMqTxHy9XV1eUjo7mijSAh6dPwE/pSEpVDQsiQjlYWcO5/6p2Nk17HrjJnSfL3t4ejUYzqEk0gOjoaM4++2xclM68/n/PsPy/HxHsFWCubhCEgSAqn4WB4ufnB4D00H14mUzKjBnTqcxNpruznbLs3ST/9i6rX7+BkrREKioqj3K0kxcYGGjuG7184Tk42/6d/L08OoyAQ0PZBvqa5Nxzz6WwqJCtiVtZsmTJce9/7bXX0tHVwQTNeBxs7Y+Y0BYsq66ujr/++ouvvvqKRx99lEWLFhEbF8/atWt7bXv4RuLYuTcSO/82AsecgaOLtzmJ1tpQzYqnFlBbbBokFhp3Tr/jEBVpA6C7q4Oq/IOUa5Oo0CZRmbsXZ5XTiJjkJQys0NBQWjs7qWltw93B/rj2nRMayMWRap7esovJfl69Jn/q2jvYVlTKI9MnnHB8AQoHcofRRZEgDHeLFy9m8eLFlg5DGGCHk6H3zVPz2vo86ls68XT6O5GWkJBAe+sj1JVkYe/kQbl2D+XaJCpzkqkuzsBoMODm7sHMhAQWLnwaB4fjv7kxlCQSCaGhoRT+o/WAo7WcpWclcPmK37n5l41Yy6TMCgnosV+gk4KDp+lnxoUXXsi6tevwdvbESmZFoJsf+/eKZT7CwBKVz8JA8PAwLck/3COtuqaahIQEXnjhBT69czz67i4cFUqmTZ3CrVc8woIFCwY1nn/2jXZ3sOOZWVO4ffUmYjzd8HNSYDQacbKzHVYVzwDnnHMOVlZWKO0VqBycxIAZC2ppaSEnJ4fs7Gyys7PRarVkZmWRna2lvq7WvJ3CxRMnjyB0lSW8//4HzJs3r8dxIiIikFtbU1uciU9Y72toG3sl9ip36stMSd3cpDWUZvavIk0k0k5AR2sTFTnJh75UJ1FVkEJ3V+c/3qAe4vzzzxdJtNPQ4ax3YUPTcSfSAB6dMZGdxeX8949tfHPJPKT/+Dv0R24heoOReSewrPOwQCcFa07TiyJBEITDbG1tUSocqWnuxENpQ3ppI1KJBFWlqcH3hAkTkFtb89ur19LWrAMgMCiYs2dOJyHhHhISEtBoNCPqc14dFkbRru09Hpvg58WN46P4eG8ac0ICUNpY93g+0EnBquTU0/LGYExMDAajgeKaUtTewfi4eLF12190d3cf93IhQTiSRYsWUV1dzWOPPUZFRQVjx44Vlc/CEWVlZbHo0kVUVVYikUpZv2E9kZGR2NjYoHJSoGtowtlJwaOPP8ns2bN56aWXsLW1JSEhgdGjRyOTyYYs1lCNhoIC0zXH2ZogPjjvTEKcnYDhu0rG2tqauNg4ftm1FplMxoT4Ey9eEI6P0Wjk0UcfZdv2HWRnZ1Ne9vcUcTtHJ1SewSg8ggiZEouLbxgqzyCcPAKR25puZG779ikys3q35JDL5UREjKLmUMXZv8nkNlz21O801pRQlLKF9C3fYW2v6HPbfxPfBPqhRVdFuTaJcm0SVTlJVBdnYjQacffwYsb0BKbfca1F3qCE4SckJASAQl0jcT7H/yVIaWPNy2clcOWPa/l8Xzo3jP+7ifLqrHwm+nnhcQIJusMCVUqKk1Pp6OgQPTYEQTjl6PV6iouLqaurM/+ZPXt2r/4xAB7u7tQ0deKpsOagUYJM4cHChZcCpkTbB++/T3JyMgkJCUybNg1fX9+hfjkDSq1Ws/2Pdb0ev3fKeJztbJkZ5NfruUCVgpbWtqP2bDpVRUdHI5FIeOPXD2nuaKW+SYeLswttbW3mYRWCMBBE5bPQX+vWrePAwQMsmraA77f/zLZt24iMjATgoYceYd26tdy++A4uvPBCAP773/8OaXwtLS2kp6cjkUjQaDRsTtlvfm5OaM/epAGO9sNylcwf6/9Aq9USGRmJra2tpcM5bWRkZPDss8/iEz4R73HnEzE3CJVXMCrPIGwdnQHITfqdP96/k4ipF6OO77kE08krmF3blqPX63vlY8aPG8u6rXt7nbOzrYnakixqijOoLc6kvjSTxuoCVjx1Qb9iFom0fzEajTRUFZqWcWQnUZWbTH1lIQDBIaGcc2YC06f/l4SEBEJDQ0+7O7TC0dnZ2eHj6UnRcU7u/KfJ/t5cNzaSl7YnMz3IF7WLitrWdnYUl/P4zEknFV/goXLq/Px8IiIiTupYgiAIw83jjz/Os88+2+Oxc+efy6+//dprW09PT5bv2Uu3wci4ceP4a9fuHs9ff/31p9TyqtDQUMobm+jo1mNj9feXTBsrK26LH9PnPv+c9ny6JdLs7e15/LHH2LtvHzExMcTExDBhwgSRRBMEwWIO3xTam29actjc3Gx+7r/3389/779/yGKpr6/nt99+Iy0tjdTUNFJSUykqLDA/f88991BY33jEiuZAlZKfh2EiTaFQMH78eEuHcdo5PAQqfMpFREy9qMdzHa1NbFv2FNk7VwFgba/stb+TRxBdnZ0UFRURHNyz32tMTAzfLvuOvL1/UFucSW2JKWmmqzJNPbY6VLV21rSxxMRci1qt5oILjp1MO+0TaQaDntqSTMqzD/c3S6ZZV41EIiF69BiuuvR8891ob29vS4crjABqjYaC6rKTOsb902JJLCzl3nWJrLh0Pn/kFmIE5mkCj7nv0QSoTBcAubm5IpEmCMIpJz8/nyDPABafcwMKO0eWb1tFcXFxn9u+/MqrfPbZZ3h6ep4WU7bVajVGo5GSxiZCXVT92uefDaGnTp06iNENT48/8YSlQxAEQTA7//zzmTBhAgX5+ahUKurq6iwWy5133snXX3+NtZ0jMisb9PquHs+np6cftW90gEpBWVIK7e3tovJLwMHBAW8fXxoqC3o9V5Sy2ZREk0jAaKSzrbnXNirPIACys7N7JdKmTJmCvruLde/ejqubOzExY7jozEvNN8kiIiKwtv67tUVjYyP9cdom0pJXv0eFNomqvH20tzYht7YmPj6eS2/7PxISEpgyZQpOTk6WDlMYgUI1GvbmndwdFlsrK5bOTeCS5at5d88BdpdWMtnfGzd7u5M6rpejAzZWVsOuJ4EgCMJAcHFxQSKVoPY2fYlycXQmp6iwz20nT558Sk7G0+v15ObmkpaWRlpaGiUlJTz88MOEhoYCUKDrfyLNTm6Fp1Ix7BpCC4IgnI6USiUpBw/i5+IDVka++fobnnnmGYvEkpaWBoCNgwoXHzXOPhpcDv1Z89bNlJSUAKbPnL4SaUFOSvMqmVGjRg1p7MLwFB4eTnFVQa/H/aMSmLDgbnKT11JbnEnmth+YdtnD5v5oAI6uPsisrMnOzmbu3Lk99p84cSKZmZk4Ojri5eU1YCsKT9tE2p6fX+fMM87klssfICEhgfj4eJENFwaEWq1m5fL+ZbKPJsbLnf9MiOHtXQcwAs+cefIXfFKJBH9nJ3FRJAjCKcnFxYXy2gqe+f41WjpbKa+rRCo/NXuXGgwG8vPzzQmztLQ0DhxMITs7i86ODgDsFSo62lpxdnbm2WefxdbGhqKG4/t8CnRyFDdfBEEQhgmFQklbRxvdej2NumaLDYPx9fUlLVPLlc//2ev8rv4RVFZkAFDU0Ei8b+++0f7/qHgeykTar7/+yiMPP0JhYSF33HEHdvZ2/LHuD9ra2vj8i89FUs+CwsM0pK/d2utxW0cVsefeTuy5t1NVkEJNYRr5+zegqyygoTKfpqpCdJUF6Ls76e7u7vPYGo1mwOM9bRNpRoOBn376EaWy9xpbQTgZoaGh6FrbaGjvwMn25Br6L54Qw6b8YjKq65irPrllnYcFKuzJFRdFgiCcgi6//HK2b9uOnZ0tjgoFk+zsiI+PP+WmTj777HM888wztLe3AWDroMTZOxQ7lS/BcRowGuhoaaShqpC25jxWrPiR559/nuCgIAp1x9fDM0DhSE529mC8DEEQBOE4+fn7sX/ffqzlcto7O9ixYwfe3t7mgWdDZeLEifz222+0NdZg7+Te4zkX3zDSsnfj4+nJ3vJqXOxsKdQ1UdTQSEFDM8VNLRTrGgDTTaGB1N7ejlarpa6ujoSEBKRSaY/nv/rqK0ryi/BRebBs2TJy83IJ8wmlsLqEVatWiUSaBYWFhaH7/Eu6OtporC5CV5lPQ2UhDZX5NFYV0FBVQEtDrXl7D08vwsPDOWPOZDSaa9BoNMyfP3/I4j1tE2mCMFjUajUAhQ1NjDnJRJpcJuXLi+YigZNOyh0W4KRgm7goEgThFBQREYG3tzfffPsNRqMRgM8++wyj0ch//vMfC0c3cNauW4eVvTNh4+fR2dpEY3UxlQWpGLr3AWDr6IyLjwafiInYKV0pKjA1ptaEhVGYsu+4zhWoUvBnuqhiFgRBGA6io6Mpyy8h0k/DlrSdTJs2Dblczv79+80TPIfCnDlzePTRR6krze6dSPMJo7uzg/CICJZt2cKylCxsrK0JDgpCHTmGc9Vq1Go14eHhzJ49e0DjuvKKK/lp5U8AvPTSS70mlwYGBmKUgEwmo6gwHyuZDF9Xb6oaa6iqqhrQWITjEx4eTmdHGx/f/vcAJKWTCo1GQ9zEUYSFXUBYWBgajQaNRmPx4T8ikSYIA+xwH5oiXSNjPN1O+niqAUqgHRbopODrlOw+xwMLgiCMdFu3bmVccDSzx85AYevIB398QXp6uqXDGlAtzU0015VRuP9PnH01eKnHETn9Ulx8w3D20WCvdDVvm77lO8qydlNXV0eoWs2qHduO61yBTgrqdDp0Oh0qlWqAX4kgCIJwPObOncuyZctIKkhBbzCwaNoClm9bxZ49e4Y0kRYXF4dEIqWuTItf5N/DaJrrynHxNS2ju3jhQh574glCQ0Px9fXtVR02GMrLy4gNjaGmqY4vvviCCy+80FzkADB79mzeevMtCmtL6dJ346lyZ1PKNiRIiI2NHfT4hCM7++yz+fjjj5HJZISFhREWFoarq+uwXVEgEmmCMMBUKhWuzs4UNhzf8pmhEqhS0tXdTXFxMUFBQZYORxAEYUC5uLjgLnVmSkS86WdHZ4tONhsMHh4e2CldufaVncf8gunsowGMrFu3jtDQUEp0jXQbDFj184ImQGVqgZGbm3vKXGR0dnYil8uH7ZdzQRCEI5kzZw4uzi5UVlUiQUJSzn5kUumQ9z+WyWQ4KBTUleWQsvFLMrf/iG/4JA6s/5TRs64FICkpidtvv31I4/L18+PX5F/R67sxVJei0Wh44IEHuOiii1Cr1cydO5fmlma++OIL/u///g+VoxPVDTW8+tprXHXVVUMa64kyGo0UFBSQlJREcnIyEomEJ554AhubgS2+GGpSqZQbb7zR0mH0m0ikCcIgCAkJoVB38gMHBkPAoeaeubm5IpEmCMIpx9XNlfQDGXy47iua21soqi7BJsfh2DuOIPHx8axbt47OtmZs7I++tOFwZcDWrVtZsGABXXo95U0t5kbPxxL4j4bQIzWRZjQaeeSRR9j0558U5BdQUVXJPXffwyuvvsKKFSv4448/qKqsoqKinIsvvoT/3v/fYx9UEATBAurq6qisquTy6Rfxx/7N6DobufmWW7jtttuGPBYfby9qizPJ2LocgJoiU/W3Z+hY8vf+wcGDB4c8pmeeeYbS0lJ27tzJefFnsSZ5Ay+88AIvvPACo6NHczDlIFZWVlxxxRXk5uZSUlLC2I7xXHrppUMea38YjUaKiorMSbPde/aQnLwXXb3pBqGjyp1mXTVz585l5syZlg32NDP49ZWCcBpSh4VR1NRs6TD65Kt0RCaViilsgiCcksLDwymrq2Tdvk1sS/8LXUsje/fuHfCGxpY0Z84cAOrLj/0+bmOvxE7hyv79+82tB45VMa1r7+BARTW/ZuXx1YEMi1Q7DKS6ujqee+45Gst0JKgnMjpwFGvWrMFoNHLllVfy8w+rKEkvoLKggtdfe83S4QqCIBxRQEAAEomEZVt/or5Jh52tHS+//DLe3t5DHkvkqFHUl2m58oXN2Ng7IZFIkcrkBI6egYt/OIWFRUMeU3h4OMuXL+fiiy9mzd6NSCVSzhg9jXNiZ1NeXm7ezs7Ojueee44vv/yS75Yvt8jv79+MRiPFxcWsXLmSRx55hLlz5+Lm7kFQUBCXXHIJb3/wKbk1RjTTr+KcOz/i2ld2cuWLW7G2sSMpKcnS4Z92REWaIAwCtVrNn6t/s3QYfbKWyfB1Uo7oiyJBEIQjiY2NRW/Q89Htr6K0V5Ccc4CXVr5NQ0MDDg4OVFRUEBAQYOkwT8qECRMACXWlWrxCxx1zexe/cPLycgkMDEQmk1FQ34jGRUVhQ6N5klqhromiphaKGprQtbaZ93V1dmb8+HFceOGFg/iKjo/BYKC2tpaysjK8vLzw9PQ86vYuLi7Y29kxMWwc50+Yx8+71rJ8xyoAlAolc6Kns3Dq+axJ3shnfy475aa8CoJw6jicAHrwwQeJCggnpTCDjIwM4uLizNvs3buXP//8k4aGBu6++25cXV2PcsQTN2nSJFatWoVUJmPmdc+x7t3b8YuYirWdAlffcMoydgz4OSsqKrj7rruZPmM6t956a5991/z9/VmxYgUlJSVMmjiJ1KJMpBIJCpVlm9MDdHV1IZfLMRqNlJWVmSvN9uxJIikpmZoa08ADR5U7boHRhEy5nNHOXgSMnoGjc9+fdW4BkSQlJw/lyxAQiTRBGBShoaFUNjbR1tWNnXz4/TMLUDqQKyrSBEE4Bbm4uABgI7c+9MfUM2TihInkF+TT3d3NF198wTXXXGPJME+Kra0t9g6O1Jdp+7W9q28YGbl7kcvlBPn789imnTy2aaf5eV8vL9QaDXHTNCwKDUWtVhMaGkpoaOiwGzDwySefcOutt9Ld3Q2Ah7sH5RXlR21iLZFI8PcPoKqhFgB3pSutbW1cc801SCUSGlpMrRhU9kq6urqor683/z0SBEEYbmbNmgWAxieElMKMXtMm558zn/q6eroN3XR1dfHCCy8MShxnnXUWDzzwAHWlWkLGn8UF93+Dk0cQAC4+Grq7OiksLCQwMHDAznnXXXfx88pVLP9+Ocu/W87mLZuPeOPDz8+Pt95+ixUrVtDU2MQlCy8ZsDj6Q6fTsXfvXlOiLCmJ3bv3UFlRwYoVP/DwI49yYL9piraDkxtugVEETlpIXNBo3AOjcFCZkmb7137Ili8fJmLaJZxx3fN9nsc1IIrdu7cP2esSTIbfFb4gnAIOT4cpamgi3M3ZwtH0FqB0JCU729JhCIIgDLjDCZDHlr1IR3cntY31APjaunPmrMms2buRTZs2jehEGoC3lwe1pf17H3fxDaOrs52qqipWrFzJpk2bzImykJAQ7OzsBjnagbNz506cHVXcOOtyyuoq+XLT99TU1ODh4XHU/YJDgtmWuJO0kiwq6ioBWL96HQ3NjezPT+WNXz+ipLYMgJqaGpFIEwRh2AoODsbdzZ2fdq5GqVSi0Wh6PG8tl3P2+DNJK8misrJy0OKIiYlBIpVSX6YlIDoBn7AJ5uecfU3XQmvXruWWW24ZkPNt376d77//nlvmXsOBgnT27ttLR0cHtra2R9znwgsvHJKK6sbGxl5Js/w80+ofa1t73AIicVPPoK5xLb/88gsHD+wn+syrGTfvJhycvXolAztam9j02f/I37ceAM+QsUc8t3vQaFI2fimmaw8xkUgThEFwuA9NUUPjsEykBaoU/JycJpavCIJwypk0aRL3338/Op0OFxcXuru7Wbp0KWeOSWBMUCQ5FQUcPHDA0mGetPDwcP7cuvPYGwLOPn9f0FxzzTWMHTt2ECMbXIf72EwKjyO71HSR8sMPP3DbbbcdtSrtf//7H0tlSwkICMDR0ZGXX36ZxefcSE1jLd8lrmJL2g7i4+N56tbrel2UCoIgDCdubm5ka7MxGo2oVKpe3+X9/P1YtfN3AM60mTNocUilUhQKJXV9VEc7e4WCRML27dsHLJHm5eWFi7ML32z9kea2Ft5+++2jJtEGS1NTE/v27SM5OZmkpCR27d5Dbo7pdyC3scU9IBLX4KmcecatuAdGofIKQSqVAdDWVMf+AwfQhIVjNBpwdOm7N1tRymby961HKrPCoO9m/9qPGDVtIZI+PufcA6MA05LeM888c1Bec21tLQcOHMDBwYGJEycOyjlGGpFIE4RB4OnpiYO93TEbOltKoJOSltY2Kisr8fLysnQ4giAIA8ba2pr777+fL7/8ksLCQvJyc5FKpRRWlTAmKBJfFy+279iNXq9HJpNZOtwTNnHiRNasWUNHSwM2Dk50d7ZjZf33BUVJxk7+WvESUxY9hJt/BACJiYkjvxLP25u6pnpKasqo1FUDsHjxYl579TUW37GYm266CQeH3lNaZ86caZ5otn37dl5++WUAEiInEeoVxJ0fPcyrr77KlClThuy1CIIgnKijVR59t3w5GzZswMPDgxkzZgxqHL4+3tQUZ/Z63MraFoWLDykpKQN2rtDQUDKzMvnf//6H3EpukUmlpaWlRESMorm5Cbm1DW7+kbgGTuKMhP/DPSgaZ68QGqoK+e7ReSRc+QQuPj1vzLgHRpP88x9cfPFF/PlX6hHPExA9nUmX3E9J+nZK0rfTUFVIZ1sTNg5OGI1GmmpLqS5IobowjeoC03TUgoKCk359er0erVbLgQMHOHDgAPv3H2Df/gNUlJcCplYJubm5BAcHn/S5RjqRSBOE49Te3k5mZiZeXl5HTEJJJBJCgkMo1A3PRFrAoWabubm5IpEmCMIp54knnuC9d9/Fx80bd4Ur1jJrVv61mlV7fqe2oQ4XZxfa2tpwdHS0dKgnbPbs2Tz++OPUlWlx8dGw/In5WNspCR47G4O+i/1/fAJGI821ZfiExWPv5M6BU6AST61Wo9frWfzhgwBYSWUsnn8jybkHueeeeygqKuI///kPCoUCpVLZ57JVBwcHZDIZj35j6htkZ2NKQB5raIFg0tzc3KMaIyl5L7fecjN33323pUMTBAFTs/3rr79+SM4VGRnJz7+uMa9y0Xd1UF+RR12ZFqlMTmtb+4Cez93dnU8//XRAj3k8WlpaaG5uYsbVTxMx7RKksp7plNyk3/nzswcAKMnYQfQZV/Z43j0omo6Odvz8/Kgp/hF9dxcyK3mv89g4ODFu3k2Mnft/6CoLKMvYyf51n1BTmEJNURqtTaa2Fd4+vsTHxXHLFU9zxRVXHNdr0el0HDx40Jw027tvP+lpaXR0mP6bKV28cPYNx3vceYw+LwIHZ09WvXg5+/btG1GJtLa2NnJycsjOziY7OxutVktmZha6hgZW/PA9kZGRJ3RckUgThCPo7OwkKyuLtLQ00tLSSE1N42BKCgX5eRgMBtzcPag8SoNjtUZD0f49Qxx1/wQ4mRJpOTk5TJ061cLRCIIgDCyFQoGrkytv/d9zALy9+hM2p27nv/ffz5gxY5g0adKITqKBaTopEgl1ZVrcA6OxtlNSX6alqbYUfVcHbn4R1JZmEzDaVI3g6hdBfn7vqoGR5qyzzmLz5s0sW7aMDz74gMcv/y+jA0fhoXJnR+YeXnvtNV577TXAVJ24ZcsWJk2a1OMYY8eO5cCBAxQUFFBVVUVVVRW+vr7mtgzC31pbW9m/f78pYZaUxO49SWRnZWI0Gg9VY4yis9PIa6+/IRJpgnCK6ejoICsri/T0dNLT00lNS6OysopPPv6IiAhTpfNZZ53Fjz/+yJo3b6ahMo/GmhKMBgMAHp5e3HP3A5Z8CQNOrVbjqFDS3tLQK4lWmvkXf7x/p/nnvpZtugdEIpFIkEqldHd1UF+mxS3AlMgxGo201FeYqswKU6guSKW2OI2WQ4NyPL18iI+LJf7Su4mLiyM2NtZ8A8hoNFJVVcXOnTtJS0s79N8rnZiYGN56840eMfz444/cfc8SSoqLALCyssbFV42zbzixF8zF0dWH7cueZtKihwmNnddjXwcnNw4cOMBFF110kr/JgdXV1UVBQQFardacMMvMMv1vaUmxeTs7BydUXkEo3IMoyN3H6tWrRSJNEE5UV1cXOTk5pKammhJmaWmkpKSSl5tjngqmcPZA5a1BFTyVhKnX0dXewo7vn6egoICQkJA+j6vWaEjeunkIX0n/2cvleCodyc3NtXQogiAIAy4oKIgaXS3NbS3UNtXj5KCkW69nyZIluLm5WTq8AWFjY4ODo4L6Ui1W1racdcsbrHjmQjQTzyPu3NvZ8tWjWNsrsXVUAeDiF0aadrdlgx4gM2bMQCaTseKHFTz6zQuMCx1NR2cnEomE+XFzmBQWS3N7Cy/+9BaZmZm9EmkAUVFRREVFWSD64autrY0DBw6QlJREcnIyu3bvISszA4PBgJWVNW4BEbgGjGPGpKtxD4zG2VuNzEpO9q5f2PjRvdTV1YkhDYIwROrr68nJySEyMrLP5ewnQ6/XM/+88/lj7e8YjUYA5DZ2yOS2dLW38Nxzz/Hll18CsHDhQr7/YQUGg4HRZywwv7dGRUWdko3vpVIp48ePp6Sw95JVJ49AfMInUpGzF4O+i5QNXxAz53oUrr7mbays7XD2DKS2thapVEpG4g/YOKqoKUyltiiN5kMtC9zdPYmPjyP+4sXExsYSGxuLj48PRqORyspK0tLS+P77783XrunpGdTXmRJuMis5Lt4hSKxsSNz6Js89+wwKhcIcw6pVq6hrbGXWjUtx9Y9A5RViroory97Nhg+X0KKrZP/vH/VKpLn4hbN///Cqbv/P4jv46IP3zdftchtbVB6BKDyC8Bgzn5AZXkhlcoLHzcbW0dncU3DlsxeSfRLD90QiTTht6PV6cnNzD1WXmZJmB1NS0Wqz6e7qAsDRyQ2VjxqVbzxT4i/H2UeDi4/GfCFyWHNdOTu+f560tLQjJtJCQ0Mp1TXSpTcglx25AbKlBCgVIpEmCMIpKSQkBIPRwFWv/cf8mJ2tLatWreL//u//LBjZwPLx8qSmxFRl5uKrYeplD7P1q8fwVsdSmrGTiRfdZ97WxUdDd2cHpaWl+Pr6HumQI8a0adMoKS3h+++/58033iB9bwoSiQR/Nx9GB42iua0FoMfFg/C39vZ2Dh482KPSLDMj3dQ70EqOm384rgGjSYi7AvegKFx8NMisrPs8lkfgaACSk5OZM2fwGpsLgvC3M2aewYGDB5BIJFx91dV88eUXA3Zsg8HAhvUbzEk0mZU1Ds7eOHkEUFOUwebNm83bOjs7s2H9HwN27pFgQnwcB7/8rtfjji7eXPDfr+lqb6Esezc1xZnou7vQ7v7NVF1WlEp1YRodbc3I5XImTZ7Cjk1f4+rmbqo0u+BWYmNjiYuLw8fHh8rKSvM162+//UZKSirpGRk06EzLOmVW1rj6hKDwDEEz/WqcvdU4+6hx8ghEKrOiujCNFU8vICMjgwkT/p6oOmbMGL5f8ROaief1GF5QkbOXX16+GiOm/+7+0dN6vUYXvwj27d8w0L/Sk7Lpzz/xCBlP7Hm3o/IMxkHlaX5d5dpkVr14GQDe6vHYKf6+2ePoHkRm1ghMpBUUFPD000/z559/UlFRgY+PD1dddRUPP/ww1tZ9f1CDqVnsli1bejx2yy238P777w92yMIIYTAYyM/P/8eSzFQOpqSSnZ1FZ0cHAPYKFc4+GlTeMUwee4kpYearxk7h2q9zODh7YWuvIC0tjfPOO6/PbdRqNXqDgdKmZoJUygF7fQMlQOmANjvL0mEIgiAMuFmzZvHee+9x22234evqjbezJ2V15Xzy8cenVCJt1KhRrNv493eiyOmXUZK+nT8/+x8YjQSN/Xt61+GGx2vXruXGG28c8liPxGg00t3djVzeu0fMsdja2nLNNddwzTXXUFJSgr+/P7bWNgC0drYBIpEGpuVZKSkpPSrNMtLT6O7uRiqzwt0/HJeAaKZesRCPwNG4+GqQyW36fXwnj0Bs7BxFIk0QhsiWLVs4cPAAs8YkUKmrZseOHQN6fLlcjlwuxz96OglXPNYjMbHp84co3Pv7gJ5vpImLi2Pp0qW0NdWZEzNGo5HG6qJDzf9NfcxqitLYvfJVAPwDApkYH0/ctQuIi4tj6tSpSCQSqqqq8Pf37zV5NSUlhfHjx5s+H61tcPYOxckrlPAzrsfFR42ztxqFqy/71n7Inp/fIP6Cu3D26lncofIKQSKRkJaW1iORFhMTQ2d7K43VxTh5Bpoft3dyR+UdSv2hKazJv71L5PTLzEtUjUYjCldfDhQV0tDQgJOT04D+XnU6HRkZGaSnpyORSLjuuuuOOo37sIjwcPZqq/Eb1XNYUNKvb5P0y1vmn528evZ1c/IMImvXihOO12KJtMzMTAwGAx988AFqtZrU1FRuuukmWlpaWLp06VH3vemmm3jqqafMP9vb2w92uMIwZDAYKCoqMifMDleYZWZk0N5u+gJt66DExUeNk/co4i88HxdfU4WZndKt1xtWf3W0NlFfpsXG0ZmkpKQjbne410qRrmlYJtICnRRszsyzdBiCIAgDTiKRcO2113LbbbdxyZTzOGP0VL7c9D17y9KOul9GRgZr1qyhsrKSqqoqxo0bx1133TVEUR+/SZMm8csvv5i/zEskEmZe8yxV+SnYK91w8vj7C7Kzt+kzKTExcdgk0tra2pgQF09NbS3vvPvOSfVcUSqV2Nna8vovH/Lu759jIzfdlB3oL/rDXWdnJ6mpqeZBALt27yEtLZXuri6kUhlu/mG4+EczedGFeASNxsUvHKt/JM26uzpoqi1D5dX/RtISqRT3wCj27DnydyJBEAbG/v37zROI/zy4DSNGLh6EflWuLiq6O9t69flSuvvT3t5Gd3c3Vlan5+K22NhYAA5u+ByjXn8oaZZKW3MDAL5+/sTHxRF/1bnExcUxfvz4I7aVCAgI6PPxhoYGuru7OefOD/GPno5U2nPKeGtDNb+/fQvFadsAMBr0vY4ht7FD5eFPenp6j8djYmIAqCnJ6JFIU7r7c9lTa2iqLSU3aS0l6dspSk2kriSTutIs6kqyaGtpQKF0ouvQaq4TUV9fT3p6+j96uaWRmppOZUUZYPoOZzQaCQ4O5owzzjjm8cLDw9i8o3df8sKDmzAaDeaf85LXoY4/x/yzyjOI5KoKmpqaTuimm8X+9s+bN4958/5ecxsSEkJWVhbvvffeMRNp9vb2YtLgacxgMHD5FVfy22+/0drSDICNnQMuPhqcvNWMP38uLr4anH00pjsoJ5gw62xvpr4sh7oyLfWlOdSVZVNXpqWlvtK8zR/rdUfc39/fH7mVFYUNjcDwW0YTqFJSW1+PTqc7JXsYCIJwerOzs8PR0ZGG1kYAnOyVVFVXHXWfyy+7nIz0dNxUrhgNRr744gtuueUWbG1thyLk4zZnzhweeugh6su02IVPBEyTvq549o9eFUVyWwccnL1ISend18VSnnrqKbKysxkdOIqLL76Y888/n3feeQc/P7/jPpZSqSRx2zZSUlJobGykqakJNze3HnfhRzKj0UhJSYm5qmzPnj1IZVYs/24ZCoWCJ554gt9WryEl5SBdnZ1IpTJcfdW4BkQzeeH5uAdF4+oXgZX1kf8uN9WWsu69O6gtzuTSJ3/rVd1wNK6B0ezes34gXqogCEfx+uuvY2dty8VTzuXrzStYuHAh77777oCfJyAggJSs/F6PO7kHYDTo2bdvH/Hx8QN+3pEgNDSUwKBg9q5+Dy9vXybExxG7aAlRUVEkJCTg4eFx0uc43AC/q721VxINYPOXj5iTaIB5Ge6/OXmFkpbWM5Hm6emJu7sntcWZhIyfS2tDFTXFmdQWZ1JbkoGuNIva8jyMBgMl6dsICVUzedxYxl59PjExMcTGxh5Xv9nOzk4eeugh9u7bT2pqGtVVFYDpJoyLZxAOrn54jz2H0f6RpqWp7gF8dnc8aWlp/UqkaTQaGqpL6e7q6HFj6Jw7P6Jg/0Zyk9ZQnLaNppoS83Nd7S0cWsGKVqtl/Pjx/X49hw2rNHJDQ0O/mpR+8803fP3113h5eXHeeefx6KOPHrUqraOjg45DS/oAGhsbByRewTI6OztZ8cP3BI2bQ8TUS3DxDcPRxfuEE2ZdHa3Ul+dSV6qlvkxrSpyV5dBUW2raQCJB6eaPi6+GsMkXmqvatH/9wsENn5v6ich6v8HJZDKCAgMp1DWdzMsdNIcnd+bm5prvrAiCIJxK3N3c2Za+i4LKIvKrimltbaW9vf2IiTEba2sSoiZzx/wb2Zt7kKeWv0J1dTX+/v5DHHn/xMTEIJFIqCvLwedQIg044rI8V78ICgpThyq8ozp48CBLly5l4ZTzuXTq+ezMSuK1Xz7goQcf4suvvjyhYx5uyDzSGY1GysrKzP3LkpKS2ZOURG2NqQm1wtkDJ281Jek72LlzJ9HR0Tz11FP4jprCxIv/h3vQaFz9IpDb2PX7nMVp29jw0RLam+uxsrZD6Xp8NwDdA6M5sO4TampqTpmBHoIw2IqKiti/f7+5J1Z/TJ8+nWXfLmPZ1p+wsrJi6dKlg/JvLioqir927cag7zZPp+xobULhbrrRsXnz5tM2kQawauVP7Nq1i5ycHHbvSeLlpa+g7+5m69YtvRJpRqOR4uJiXF1d+z0YwsXFBXcPL+rKc/p8ftS0S+hobaRCa6oE/v3Nm7nqxc09tuloacDaTklK6r5e+48dO4bEzd+SuXUZLY11ADg4KhgzejRnXXAWMTExxMTEEB0d3SvmwwMPDk9zNQ08SKewsJBPPv6I2bNn99h+zZo1vPLKKwSMnkHAhIuJ8VHj4qNB5RlEScZO1rx5E0WpW7ntY+3fr987pFcl3ZGEhYWZltZWFeHiqzE/bm3rgFfoOGwdnfFSx6KrKuSXl6+ksaqApnrTjVU7O/sTrlwfNom0nJwc3nrrrWNWo11xxRUEBgbi4+PDwYMH+d///kdWVhY//fTTEfd5/vnnefLJJwc6ZMFCbG1tCQoOwdHZm8AxM/u9X3dXB7pDCbM6c8JMS2NNCRzK4ivc/HDxURMafw4uPhpcfMNQeYX0+WW0vjwXg76bffv2ERcX1+c5NWFhFGUdfSmRpRxebpqTk3NKXHgIgiD824033sBnn31Ol1JCXPgE/nvG/45aXebp5Ul5pumOpcrB9MWqsrJy2CbS5HI5jgoldaX9a5br6htGWebA9tI5UbfdeiteKg8umjwfiUSClcyKru4uzj7nbEuHNuTKy8v/UWmWxJ49SVRXm6rfHZzccAuMJmjyIiYERuMeFI2DyhOjwcDn98STnJzMWWedhZPKGd/wCYyedc1xndtoMLB3zfvs/vl1/COn0tZUh73S/bh6pEHPgQNz5849rn0F4XR08OBBxo8fj16vZ1R4BOmZGf3a74YbbuDss8/mrbfeIioq6ohLA0/WhAkT+Pjjjyk8uInNXzzMuLNvYecPL6A6VKmanJw8KOcdboxGI/n5+ebl8nuSkklOTqaxQQeAyt0Pl4AooubcxP7fP2DVqlXU1taa2w6t/+MPqqoq6OjsJnbcWJL29k5qHUlUVCRFZdo+nwseN4fgcXNoa6olbfMyFG5+pG1ZRn2pFl1FLrpyLU31ppsv8RMm9tr/8ccf5/U33iAqMtKcNAsKCurRk+xwwuyvv/76R8Ks7wmhSs9Q6hrbWbZsWa9EWnCwqVVA7Pz/4KU2VX7puzr468elHNzwOQC+EZN77KP0DCUltX/X0GFhYYBpqa2V3IaGqgKaqgvRVZdgNJiWdtrbO6DWaJg2NpywsLloNBrCwsIICwvD2dkZgH379rF79+5+9xwc8ETaAw88wIsvvnjUbTIyMoiIiDD/XFpayrx581i4cCE33XTTUfe9+eabzf9/9OjReHt7M2vWLHJzc809qf7twQcfZMmSJeafGxsbh+2XYqF/RkdHszen7zcWfVcHusoCU7KsNNu8PLOxqsi8TtrRxRtnHw3B484yNWz00eDio0Zu2//x0YcbN//xxx9HTKSFqtWsTdp9nK9uaDjZ2qCytxOTOwVBOGU9/MgjPPzII/3e3tPLiz3bd/PD9l+p0JnuVlZWVh5jL8vy9fGmtqR/g2OcfTV0d3VSUFBAUFDQ4AZ2DA4OjpTXVfDd1p84b8I8Plr/NXPnzuWyyy6zaFyDrbKyskfSbPeeJKoqywFwcHLF1T+KgIkXExs4GvfAKBycvfqsuJdIpbgFRLJnTxISiYTY2PHkFx7/jbvNXz5M5rYVRM64jIQrn+D7J86jRVdJd2f7UZeB/pvSIwBbByVJSUkikSYI/dDc3Ixer2dm9BS2pO2kubmZ4uJidu/ejZ+fH7NmzTrivt7e3jz33HODGt+0aaaJjWlbltHeXM/OH14ATP02WxuqyMzMHNTzW4LRaKSoqMhcDbznUEXw4SmZTm4+uPhHEXHG9bgHmd6j/zkFsjR9Gy+/9BLPPvsstnIZdtYy6ls6Abhmij9f7thPc3Mzjo6O/YonOiqS1JVrzT+3NdUeuq7Nob4sB11FDrryXJp1poSZTCYjJFRNXFQUURedSVRUFJGRkYSHh5uP0dnZSV5eHp6envzw/fe9zvnDDz+wYcOGI04IVXqFEjjhIuKDxuDiq0HpHoDMyjQsaOMn/+0z+RUeHo5UKqWuLMecSGtvaeDghs+RWVmj7+6kNHMnnW1NWNuZVky5+KpJ3/Ztv35PHh4ejI+N4+BfKwkJCWVseDhh8yaaE2VhYWF4efX9WXrYpk2bOPPMM01tEfw0R9zunwY8kXbvvfdy3XXXHXWbkJC/ey6UlZVxxhlnMGXKFD788MPjPt/EiaYMa05OzhETaTY2NtjYHN+dNWF4i46OYuOWD83LME1VZtnUlWppqCo0N1y0d/LAxVdD4OiZhyZzanD2VmNjf/JTvFSeQUgkUnbt2nXEbUJDQynWNWAwGpGe4NLTwRSoUpKT03fJsCAIwulmzpw5fP755/x+8E88PDw4d/5888XEcBUZGclvv6/HaDQes8XB4RtAv//+O7fddttQhHdEv63+jZdffpmnn3qa35LWI7OS8f77759wm4bhqKqqiuTk5B5Js4pyU9sIe4UzbgFR+MUuYFxQNO6B0cfdpsItIJo9SesAmBAfT9JHXxx3jFbWpor79C3fUZGzD0dnT4rTtrH168c44/oX+x2PRCLBLSCKpKTTo0pFEE5WdHQ0AInpf2FlZcWbb77Jww8/bH6+vr7eoj2M1Wo1SCQEjp5JS30VdaWmGzZjZl9LU20pRUXFFottMDz73HO8+upr1NXWAKB08cIlIIqwGdccSppFYzTo+fK+qbj6RRAQndDrGK4B0ZRpk3ns/HBW7CkjvdzU3ifQ1Y6LY334ckcx6enp/e7dGRUVRd277/HLy1eiq8ilpcFUBWZlZUWoWsPE6CiiFp5lTphpNBpsbGwwGAwUFxeTnZ1NYmIin376KVlZ2WRmZVFUWIDBYEBubU1GenqP/EltbS2LFi3Cyd0ft6AxhM+8DudDyzCV7v5IpDKydvzEps8eIHDMGZxzZ8/cjbO3mrT1f/b6PnJ4NVn9P6rr7J3cmfufdyjYv4GsHSsB02ovj+AY2hprMRqhrraG6upq3N3dj/p7kkgk7Pprp/l3cyIO95i76JEfcQ+I4r3/O3YybcATae7u7sd8sYeVlpZyxhlnEBsby2effdav8ab/tn//fsCUmRdOHzqdjuaGGpY/Zpq8YadwxdlXg3/kVMbMuc60LNNHg43D4E3rksltULr7k5Fx5FJstVpNe1c3lc2teCv6X+02VPwd7cnV9l3ZJwiCcLq59NJLueSSS07o+4ilTJ48mZ9++om2xlrsnY7eJ8e0JEfC9u3bLZ5Is7a25uGHH2bhwoU8/dTTnH/B+Ravkhso33//PfcsuZeyUtMyYTtHJ9wCovEeey5jFpiSZgpX3z6TVG1NdexY/hzhUy/Gb9TkXs//k3vQaPav+5jq6mpiY2NpfOEFWhtqjvn34J8SrniM2Pn/oSRzByVp2yhJNy1pyd75M/EX3I3C9eh9mwwGPbryPKoKUmhvaSQpubTf5xaEU0FTUxNvvvkmLi4ujBs3jkmTJvVrP6VSyYoVK/jtt99QKBTs3r0ba7k1z1zxAPd/8RRJSUm9lsgNJblcjoODgqa6Mmbd+BI/PnsJto4qvDRxqDyDySvNPmKf6JHom2++ReboxdlXPIt7YDQOqp59zsq1Sfzx/p0AZP/1M/EX3NnrGO5B0Rj03ZTUtaH2dCCzogmDEQpr27h3uWnQT25ubr8Taeeddx4rV/2Mk5OSqMvPMSfM1Go1crmc6upqsrOzyc7O5uuvvyZbqyUzM4u83Fw6OtoB09JLlUcACo8gnDRnkDAtCFtHZ9a9t5jU1NQeiTRnZ2dsbe0YNeMKxs7tOd27s62JLV89Rs7u3wBw8Qvn31x81DQ3NVJaWtpraFB0VBT7c3sWb3iGjMXa1gFXv1FU5u3jrxUvoivPobVJB4C3jx/W1tb9+l2d7ATZw8MG6kq1uAdE9e+cJ3XGk1BaWsrMmTMJDAxk6dKlVFdXm587PJGztLSUWbNm8eWXXzJhwgRyc3P59ttvOeecc3B1deXgwYPcc889TJ8+nTFjxljqpQgW0NTUhLWtA2cvfh9nH02Pstqh5OofQVnWkddRH35zKmpoGpaJtECVglWiIk0QBMFsJCXRAPOFVl2Z9pgJFLmNHY6u3v1u4DsUwsLC+OrrrywdxoD6aeVKGlo6mXPLG3gERaNw8+9XZVdVQQrr3l1Mc10ZNvbKYyfSAk1f9pOTk80tJqoLU4+rfyyAvZMbYRPPJ2zi+RiNRurLc2iuK++VRDMY9DRU5FNVmEp1YSq1hanUFKXT2dEGQKhaw+3/ufu4zi0II92bb77Jo48+ilQqRSKRkJOTQ2BgYL/21el0fP7551hbyens7gLgtV8/AEwVrZbm6eFOY3UxbgGRnHfvF9jYK5FKZSjdTX3Z8vPzTZVrp4AJ8XGs27qXoJgzez2nq8jnl6VXI5GYvh+4BUT2WQXuHmiqMvxmVwkgwXBoKqRUKkXlF87iS6eyYMGCfsfk6+vLurW/93p83759nHHmLPOyS4lEgpObLwqPQJw8xxE35kKcPINorC7CL2Iyzj49/xsZjUZs7BzQ/quYQiqVEh4RQX0fAw5KM/8iZ/dvSGVWGPTdlGuTev0ODp8nPT29dyItOor1f77L5i8foaFcS31ZLm0tDQBY29gQFhbOxLhoIiMXEBkZSVRUFCEhISedIOsvlUpFcEgo1YWphE9e0K99LJZIW79+PTk5OeTk5PT6RR8urevq6iIrK4vW1lbAdPdyw4YNvP7667S0tODv78/FF1/MI8fR/0Q4NUydOpUvv/wK96AxxzWVaqC5+IRRsH8j3d3dff5DDw4ORiKRUNjQyEQ/LwtEeHSBTkpKKw7S1taGnZ3lfo+CIAjCiYmOjkYikVJXmn3UxIu+u5OGykLsHF2wFe/3gyouNpaVK38mJHYuUmn/qjXSt35P4rdPYOdoujHoF3XsJcVK9wDsHJzMfclUzi5UF6YcdyLtnyQSCS4+Gpy9QqmvyKO6MJXqgkNJs+J0OtpaAAgOCSVhQjzxt1xObGws48aNO+HJZ4IwkhUUFKD2CebiSfN54ce3jjrQ5t8OXzu8edNz3Pref5k3dy6hajUTJkxg0aJFgxVyv4WGhpCUmgeAT9jfEzqVHv4Y9N3s37//lEmkxcXF8c23y9B3dyKz6lkF5aDywDtsAuXaPQDkJa+lMnefud8XgL67i+6OVmQyOTHjxnHllVcSFRVFVFQUnp6eA9q2YN++fTTo6pl10yu4+Y1C6RGA1T8GxHS1t7D9++fJ2LocoMc0TDC9z6s8g8jO7j2oaMzoaNZvP9jrcd+ISUxZ9BAF+zZQlr2bCm0S7c315mIWfVcHXR2tSKRS0tPTOeuss3rsf/755/PpZ18gb8jijAlRREVdQmRkJJGRkYSEhAyLysYJE+JJTO7/ZHOLJdKuu+66Y/ZSCwoKMifVAPz9/dmyZcsgRyaMBGPHjgWM6MpzcQ+KtlgcLr4aDPpu9uzZw+TJvS9gbGxs8PfxoVDXZIHoji1AZeoVl5eXR1RU/8pYBUEQhKFzeGqWm5tbnzdsrKysUDo5mXuPGPTdNFQVUleafaghsRZdmZb6ygIM+m4AFp3fe0mKMHBiY2Pp7GhDV56Hi+/R+6x0d3WQ+M2TZG77gcgZl6PyCmbH8ufM1WZHI5FIcAuMJikpGYlEQlxcLDkF/b8IOMxoMNBQXWROmtUUplBTlE5HWzMAgUHBTJkQT9yNlxAXF8f48eNPuHeTTqczVU8cI+lmMBjIycnhwIEDHDhwgP37D7DvUDsXQRhO6urqKKwq4ZMN32Jna4uHx99LAgsKCnjllVfYt3cfdnZ2rFy1skejeY3G9P5w2/v3A/B/N93ExRdfPLQv4CjGjBnDn5sTe1UeOR2qSNuxYweXXHKJpcIbULGxsXR3dZqW9v3r/Vdu68D5935BV0cb5dokilO3YmVjT+b2H003GorSqClOp6uzA4lEwn333cfChQv7fe6ioiIcHR1xcenfCqvDUypd/SJ6fcY015Xzy9KraaotA2DM7Ov6PIbCPYjMrN6JtMjISJb/8FOv/+bWdgpi5lxP1MwrqMzbT2XuPlI2fkFdqZbGylzqKwoxHOpR3ldF5sSJEykvK+nX67OUwzfBDr+OY7FYIk0QTkZkZCQAtaXZFk2kHS5hXb9+fZ+JNAC1RkNRacEQRtV/QSolYBrWIRJpgiAIlmM0GikvLyctLY309HTS09N7TM1acOFFrPzpxz73nTN7Fr+t/YOqvH3UVxSg7zZNCXNxdSM6Opp5F8413xmPiorC1TCP4jcAAPb4SURBVNV1KF/aaedwr5XqwtRjJtJK0raRue0H7J3ccfFR4+jig9zGgW3LnuGsW988ZhWDW0AUu/esBkwDB3a9/8lRtzcajTRWF1FdmEZ1QQo1Rablme0tjQD4BwQyKT6euOsuNCfN+ntx90/19fWkp6eb/z6npqWRmppOZUUZHp5e5OXm4OBw5JYXd9xxJ++++w4Ajip3XPwi8Bh9NmWlHx13LIIwmJYuXUpMTAwFBQVMmTKlx7/Zl19+mY8/+pgxgZFsz9lOcnIyM2bMMD8/efJkPvvsMzo6OggLC2PmzJkWeAVHNmHCBPRdr9DWVIe98u/PjcNLO1NSUiwV2oCLiYlBJpNRXZDSI5FmMOjRVeRRXZBKdUEKtcVpVBelc3DD50gkEkLVGs6cFE/c4mvM1blHm8ppMBjIyMggMTGRrYmJbNmylbLSEjy9vCkpLurXUsbDCdiGygJcfcN6Hl/fTWNNCUajAYCDGz5n/PxbsVP0/Nx38gwia0/v7xSRkZF0tDXTUFVIV0cL9WU51JflUl+mpaEyl/rKQowG07E9vXyIjo4kevp55uWYo0aNOqHPjOEgLi6Ozo42Nnx4T7+2F4k0YURydHTESeXSY/qHJTh5BCGRyo4+uVOjYWfm8OlH80/u9nbYWcvJzc21dCiCIAinnbS0NF5//XVSUlNJT8+gqdHUL8RKboOzVxB2Km88whOQl+ewdu3vGAyGPnu43XvvvdTrdGjU6h4Js39WRghDx8nJiVC1huqCFMKnXHjUbX3CJzDxonspTtvGju9fwKDvAomEvOS1HNzwOTFzrj/q/u5B0exb+yGVlZXExsbSVPccLbpKHFSeGI1GmmpK/q40K0qlpiiNtmbT3zNfP3/i4+KIv+o8c9LMza3/gwrANOXtcOI3LS2N1LR0UlPTqK6qAEAileLiGYTSKxTf2AsIlNuye9VrZGRkmPu69aWiogI3/1HMv+ezHhfw+9eKRJowvAQHB/PYY4/1+VxoaChSiYQb51zBvryD7Nu3r0ciTSKRHHOFliVFREQAUFucTqvClYbqIhqrimioLgI46r/hkcbe3p6IUZEUpSViZW1HdWGq+UZDZ7upzVRIqJoZE+KJveUK4uLiGDduHEql8qjH7erqYu/evabE2dZEtm3fTn1dLVKpDI+gKDwjz8J/ojO7fnqFgwcPmm/EHI2HhweOCiUNlfm9nlO6+3PtKzso2L+R1E1fU1OUDpiSu91dHab/flUFNFQWUFVZQVNTEwqFwrz/4cKKZQ/PMT/m7eNLVFQk0TMvMA88iIyMtOhU2cEQFxfHxMlTaG7qX+WcSKQJI1ZISBA1pb1LUoeSzEqOk0cgWVlZR9wmNDSU5Q2NfTaltDSJREKgyokcMXBAEIRTUFtbG2+88QZKpZLRo0czevToYfXF76mnn+anVb/g5h+Jk180SoOBzvZmWnSVVBdnQbHps0UilWE06Nm1a1ef1c+TJk1iw/r1Qx2+cBQT4uPYkpR2zO2s7RSMP+dWxp9zK13tLZRl76EkfTvF6dtQuBx9YmZ7s46uQ43+/zlwYPt3z9LV1kRNUeo/pp/5mpJml99HbGwssbGxx5Voramp6VktmZpGWlo6NdWVAEj/n73zDmvqbP/4J2FvEnbYkLDCFFBBwb2q3dXuaqu1w+727f717Xjt3sPuape1y2prbd0KbkDZG2XvvVeS3x/RtBQEtCiK53NduTAnz/Oc+wRMzvme+/7eYj0kjh5YOXrjNv4qQmQKpE5yrBw9+3n3HN7wFpmZmYNehIeGhvDnth2j1kxKQGAkCAsLo7O7i7s+1JZuXmg3N7y8vBCLxWx66zbdNjNzC7y9vVm4aBEPPfTQKEY38kyJjWHVqlUcP7IVD08vJo+PJGLZIt2NhuH4QLa1tXHw4EHi4+PZExfPoYMH6ehox8DIGHvPELwmXY9MEYGDVygGxtqsXFVPF0m/vU98fPywhDSRSIRCoaCxqrDfa2q1ip7OVswkDvhGX4WDVxg7P3uY5ppCmmrKdLZZZuYWXHnV1ZiamvaZ7+XlxS+//EJdXZ0uw+xi8b80Nzdnx7at7Nu3jzlz5gw5XhDSBC5YwkJDWbd+02iHgY2LHxVZcad8XS6X09zRSUNnF1KT4RuQnivcLEwFIU1AQGBM8sMPP/DEE0+gp6eHSqX1vJg2dRrbtm87L4xte3t66O3qoDI/CWNzCZZ2rljZu+MWGIOlnRuWdm5Y2bvR29XJ2qdmsmfPnlPaCAicX0RERPDT+l9Qq3oR6w3vdNvA2Az34KkDNgvoamuipjhDW15UlE5dcTqN1SUAWFlLUCgUuLq6MmXqNLKzU4iMCCdy0YM60czRceiGRxqNhpqamj4lmWnp2p91tTUAiPX0kTp5YungjfvEhYTJFEhkcqzt3dH7m2A22DFa27kM2Tk2JCSE9pZG2hoqMZc6DbmugMD5yNSpU1m/fj36+vqEhYXh7Ow82iGdFubm5sTFxVFYWIi3tzfe3t7Y2tqed4kBI8U777zD9ddfj1KpRCKRDDimu7ubvLw8MjMzCQwMxN/fH5VKxdNPP8327TtITj5Kb28vJuZWOHiHEzL/HpwUEVg5ePDdU7NR9fQw/vL7+6ypZ2CEvWcQcXHx3H///QPu95/4+/myNS6JzLjvaaw6TlNVIS3VhTRVl9B7wt7B0MgILy9vwnx88J0fjY+Pj+5hb29/yt/j6XQWvZBpb28nJSWFxMREkpKSOHQ4gdycbNQnSleHQhDSBC5YJk2axBdffEF3ZyuGxqeuRT/bSJ0VHD+yha6uLoyM+p9Eent7A1Dc2HJ+CmlWFuzMG90SWQEBAYGzQVpaGk42Dry77EXK6ir45eBmDh48MNph6TiZnXDTK3uwsDl19pGqtweRSMyRI0fOVWgC/5Lw8HB6ujppqDzWz8NmKLraW6gtzvirY2ZxOg1VRQCYmpkzbtw4Lr9pIREREYSHhyOXy3Ulv7t27jiti9y6ujqefvpp0jO0GWYN9XWANuNe6uiJpaMcz+jrCZfJkcjkWNm79+toV1uSRVnOYdwCY4a1T0tHb9LTB8/WCwkJ0a0tCGkCFyoikYgrrxy8vPt8Z9KkSUyaNGm0wzgn6OvrM3mytmNyV1cXubm5uhsLGZnasvVjBfn09mob90ikNtTV1hAXF8fLL7+Me/BUoq59GiefSKROckQnPpdb6sr47fVb6GprpLJg4O9xB3kEcfH9Tf5PRWxsLGvXriX+m2dwdXPHz9cX39j5+Pj4oFAo8PHxwdXV9by4aXi+kJOTw/bt20lMTORwQiI52VmoVCr0DQyxdfFF6hZMTOSN2Hko+en5K4ZcTxDSBC5YTp5kNZTn4+AVOmpxSGUK1GoVBw8e7ON7cJKTQlphUzOhTnbnOrwhcbe2pCglm56eHgwMDEY7HAEBAYF/TWdnJ7/88gsHDhzA1lyKRqPGw94VfbEe/v7+582J5cSJE/noo4/obG0YVEjT0zfATOIwqI2AwPlFWFgYIpGImsK0QYW07o4WaoszqSnKoLowjfqSDOortL43pqZmhIaFsuD6K3WimY+Pz4A+eSc53UyRL774go8++gh55HzkMTchkcmRyhRY2rmhpz/4OYFGoyFtx1cc+OkV0Gi47Z1EXanSYEhkctIzdg46xs3NDUsra+pKsvEImX5axyQgICAwXDo7O8nJyflLMMvQNkc5fqxAl8luIbHDylGOtXMk0ZHXI5HJaa4uZteaJ6isrCQoKAgA+fgF+Ey8vM/6GrWaX166lrZGbRm8qZU9Xe0tGJla9BnnpIjg6OaPyMvL03XlHIw77riDefPm4eDgMGAix8VAT08P2dnZuu7OFZWVvPzSS7i4uPQbq1KpiImdQn19/QnRTMnkG65FIlNw8OfX8I9ZiEvAJMwlQ2dvn0QQ0gQuWPz9/QER9eV5oyqkSU505Nq+ffuAQpqFhQX2tjYUN7ac69CGRKPRYG1shEqlori4WCf6CQgICFzIvPfeezz66KOIRCI0Gg3XvrocZzsnGlubuOba4bekP9tMmzYNgOaa4j5dwgbCysGT0lJBSLtQsLS0RK7woaYoA79JVwNaf7Ca4kxdpll9STr1FcfRaDQYG5sQGhrKJQsv1Ylmvr6+Z1309fT0BGDyDf/Xr6vbYHS01LFr9eMUpe5GrGeAo/e4YYloAFInOalbv6C9vb2fP89JRCIRwcHBVJRmDzsmAQEBgeGSnp7O5VdcSeHxY7pSPgupA9aOcqzdJjJ54s2I9PTZveYJwhbci3LK9X3mm1pqkyMyMzOZMWMGPr5+VOQl9hPSRGIxrsrJHDuyle6OFtqbqjny+yqiFj4GaP3RqgvTqcxPAuDgwYPDEtJAe8PhYqGurk4nmKWkpHDkaDLZ2Vn0dGvLWK3tnGlpqMHfz4+nnnqq33w9PT2MjIwInHEL0Qsf120/fnQblflJuvdf6uzLwv9uHFZMgpAmcMFiamqKRGpDQ9no+ntZ2bkh1tMnISHhlGO85XKKm2rPYVR/0atWU97SRlFjM0WNLRQ3NVPc1EJRSzvFjU10dPdgZGg46B1uAQEBgQuFzs5O3nj9DaYERuMr8+aXQ39Q01SLh48nFpaW3HPPPaMdog5XV1fEevo0VRcPOdbK3p2q/KTzsnGNwMCMj4xg09bd7Pi8hfqSDOrK8tFoNBgZGRMcEsKcqy7RiWZ+fn7o65/70/KAgABAm91v4js8Ia00az87PvsPalUPs5a/zc4vHsUjdMaw9ymRydFoNGRnZw9qrB0WGkLmT6PvhSsgIDD22LlzJ8cK8om96TlsXPyQOHljZKY11ddm237J/m9eAqA853A/Ic3SzhV9AyMyMjKYMWMGU2Jj+GXz7gH3Ne3Wl5m6+EXqy/M4lvQnUmc/Dv3yJlX5iVQfT6WnuwtTM3NmzJzJ3Llzz+pxn++o1Wpyc3N1gllycgpHk1OorCgDwMDQCBtnH6yd/Zhw9WXYuPph4+KHkaklv752Aympqadce3xkJIez+toKeITOxD14GkWpuxDrGdDeVE13e/OwYhWENIELGm8vTyrLRvcOvVhPHysHj0FLbuQKHzJ3Fp21GLp6eyluaqW4qZnCE2JZUVMrxS1tlDY00XviTouenh7urq7IffyYIZcjl8vx9vZGqVTq7koLCAgIXMh88803VFVX8diCFXg5ujMteDK3vXc//gEBfPLJJ6MdXh9EIhEWFhY01wxHSHND1dtDZWUlTk6CZ9SFwHXXXcfvm//AsrecmZfNIiLiCcLDwwkICBgV0WwgFAoF+vr61JfnI/OdMOR4VW8Pv7+9FLVaxaRrn8JRPg5jcwmFqbsImrkYsXjoDDqJTA5oMzkGE9JCQkJ4//336elqx8Bo4Mw1AQEBgTPh5E0EZ/9orB08+rxWkLCZfetW6p5LnLz6zT/ZeOVk45SYmBg+/fRTOlsbMDb/q1FBe1MtFfmJVOQlUp2fRHVxJhq1GhtbO2ImTyb2juuJiYkhNDT0vPleGE2ee+45nn/+eQAspY5IXHxxCrsU5XwF3R0tKCZcipGp5YBzpc5+HD166JRrR0SEs/nPrWjUap1/nUgkoih1FwC2bv7Muev9Pr+/wRB+WwIXNOPGhfH1uvWjHQa2rv6Upp3a70Mul/Pnhl/+1T6au7q12WSNLX+JZc2tFDe3UdncomtnbGJsjKeHB4rgcUT8TSzz9vbGzc1N8EETEBAY07i4uKCvr8+z615j3rgZGBkY0tndxSOPPDLaoQ2Ig4P9gC3s/4mlnRsajZp9+/ZxzTXXnP3ABP41CxYsoL6utk8GoUajoaioSNclLCAggJtvvnnUYjQwMMBbrqChfHjZ/Xr6BkRefj/5h39n37r/ceCnVzAxl1KefZDkPz9l3CV3DrmGobE5VrYyMjKGbjig0WioK8nGyNSSOqG0WUBAYIT4ezbuP4U0e88gPEJnUJq5j97uThI2vkvwzCUYmligUatpqSujoSIflQbS0tIBrZAGkJ+wGX1DE61wVpCo87x0c/dgzpRYYmLuZ/Lkyfj6+l5U2eW1tbXs3buXvXv3cjQ5mReef57o6Oh+4+rq6rC0kXHV079gYiHVbT/6x8cc/Pl14r99FldlDN6R8/Cf3Neqw8bVn7jd357SNiAiIoLO9haaqouwdvwrgSR60RMcT95B1MLHKMncS01h+rCOSRDSBC5oJk2axCeffEJXe/Mp1elzgUSmID9hM52dnRgb9+/M6e3tTU1LK63dPZgbDixkaTQaats7tQJZYwtFTSfFsjaKm1qob2vXjbW2tMTLywtF9DhmeHvrxDK5XI6jo6NQpnkRI5R9CVxI1NbWkpaWhp6eHrGxsSOy5ty5cykoKOD111/ns08/paOzk2uuuWbYniPnGrm3N7v3ndoaoLe7U5v5bKf1QhGEtAsHjUZDSUmJTjQ7nJBAUtIRXWdMIxNzerraueqqqzAzG56/2GC+YmdKYKCSQxnDt8kYd8mdjLvkTpprSihM2cHx5B20N9dSkrlvWEJaR0sdxha2ZGRkDjpOqVQiFovZ8Mp1upuFAgICw0ej0VBZWXnCRF/76O7u4e2338La2nq0wxtVnJycsLSypqEiH8+wmX1es7RzY+7dq2ioyCfnwAYaK48Rv/YFmirzaCgvoLurA9B2UZ63/CYA3N3dcXZxI/7bZxGJRPgHKLnuynnExMQwefLkAQ3wxzJFRUXEx8cTHx/P7j1x5OZo/S6tbGV0d3Xy5ptvDSikhYSEsOrDD/tlIftGX8XBn18HoCQjnor8JPwmXdPnmsfG1Q+1Wk16ejrjx4/vt/bJDOiq4ymoVD3UFKad6IydQW1JJutXXo1IJMJbrhjWMQpCmsAFzd87dzrKT10ecLaRyhRo1Cri4+OZNWtWv9d1nTsbm5AYG1N0Uixr1PqVFbe0U9TYRFtXt26Oo50dXnI5IRN8uOofYplUKu23D4GxjUajoaqqioqKCioqKigvL//r32XllJeXUVFeQV19PZ9/8TnXX3/90IsKCIwSbW1tTJwwkfSMv+76JSUlDVrmdTq4ubnx7rvv8n//939s2rSJBQsWjMi6Z4OQkBA2b/4DVU8XbY1V1JXl4aacjJ6BEdXHU9nx+X8wlzgy5+4PAEgdxP9DYPTQaDSUlZXpRLOEhAQSk45QV1sDaE2sbdwCkcfciJ17EHbugbQ3VfPj85eTkpLS74KiublZ10Xut99+o6mxgbzcHMoqqnjjjTd46KGHRiz2QKWSLdv3nPY8SztXgmcuIXjmEjpbG9FoVLrXNBoNHc111Jfn0VCRT0N5Po0VeTRWFNDWXA+A3bTIQdc3MTHht99+Iycnh5CQEDw9PfHy6l9iJSBwIdLT08PRo0eJj49nz544CouKWPvtNwQGBp7RetXV1X0Es9S0NDIyMmlqbABA38AIqZMX1cVZzJ9/CQsXnj+Nd0YDkUhEgL8/taU5NFUVUV+RT0N5Hg3l+TRVFlBfkU9PVycAZuYWBAQEMHnaeAIClqBUKgkICMDV1VUn5IhEIvbviyc1NZVJkyYhkQyvPHAsoFarycrKIj4+nrgTf8/lZaUA2DjLcfAOZ8bkZTj5RGBh48zBn18nLm7DgDf/Q0JC0KjV1JflYu8ZrNtuamWn+7eRmTXTb3ul31ypTIFILCYlJWVAIc3GxgY3dw92fv4fQPs7kyt8mD4xgoh7bsHDw4Pq6mrMzc258cYbhzxuQUgTuKA5mRZbX5436kIaaI0rBxLS5HKtH8jl323S3VUVi8W4Ojsjl8uJ8fFhyd/EMi8vL8zNzc/dAQic93z44YesWLFC91wkEmFtboXU3BprU0usza3x8BjH3s7D/P7774KQJnBe09zcTHpGOpdPmMuUwGge/uK/JCcnj5iQdhI7OztuvfXWEV1zpNEKKBqa68o4+NOrFCbvwNDEAidFBMXpcWjUKqzs3TA0McfYXEJe/ug22BHQUl5eTlJSEomJiSQkJJKYmERNTRUA5tZ22Lgp8Yy6jvEegdi5B2Jmbd9vDSMzK/QNjNi+fTs5OTnaC+D0NDLS0ygpq+gz1sHSiIWRzuzVt+K3XzeOqJAWEBBAa2NNP2+f4aDRaGhvqqGhIp/6Mq1o1lRRQENFHu0tjQDoGxigUPgwKTQQ5Q0LCAgIQKlUolAMfdf/kksu4ZJLLgG0nxsCAhcq7e3tHDp0SJehc/DAATo62jEwMsbBK4zaouN89913rFy5ctB11Go1e/fu1QlmaekZpKenU1+nbWqmr2+IxMkTS0dvfKcuRursg0SmwNLOFbFYj68enkhGRsZFL6QBBAcH8cknn5B/WNvUxMLSCmVAALEzJxIQcJtOMHN2dh5WtYebm9tF1UkT4MuvvuKBBx6ksaEesVgPew8lDgGz8Zrigte4OQN+9zkpIjj6x8fk5+f3+x4IDAxELBZTW5LdR0hT9f6VbLLwmY1Y2Mhob66jriSLutJs6kqyaSjLRqNWU1t76gZ/Gzf8ws6dOwkODiYzM5PCwkLa2tpYvHgx1127kK3bdgz72AUhTeCCxtjYGKmNHfXleaMah4WdC2J9AxITEwd83dbWlo0bN1JUVKQTyzw8PDA0NDzHkQpcqOTn5yO1lPDEVfchtbDGytQSfb3+H+HlDVWUl5WPQoQCAsPH0dERqUSKqaEJXg7uyGwcB8y0Ki8vp7e3d0yfmAYFBQHQXF1MyOylFKbsxNhcQnNtCX6TriYr/ge8I+cDJzp3lmYKJdznmMrKyj6iWUJiEtVVWqHLzMoGW7dA3CZcQ7h7IHYegZhZOwzr96Onb4Ctqx/vvfcutbV1uNla4GNvzKU+5pgFyVl3qIyyxg7UGlgx3ZNbJ7tjIBbxzZHh+bcMF6VSCaBtOOAzcJaYRqOhrbGKhvL8E1lmBTRV5NNQkU9HaxMABoaG+Pj4EhseSEDAZTrBzNvbWzDRFtBRVVWlK/kKCQnhtttuG+2QzgoNDQ3s27dPm3EWF0dSUhK9PT2YmFniIA8n5JIVOPlEYueuRE/fkC0f3sPuPUNnhn722WfccccdOrN7K0c5XpNuIMJZgVSmwNLOjYzd37Jv3UrC5i3Hzk3ZZ77ESU76EP6EFwuvvvoq48aNw9vbm4CAAJycnITv1n9QVVVFSkoKaWlpXHrppf1sMjb/vpkejT6XPrQGB69QDIzNqCw4yi8vLWLv2udx9ovCURFO5KX36gz+HeXjEIlExMfH9xPSTE1N8fKWU1eardumVvXSVFXE1FtfpqEsj7ivn6ahLJuWhpoTc8xQBgYy7ZKpBAffx+LFi095PKGhoYSGhvLWW2/x0EMP4e1mSUFxM9HR0VRVVbL4Cheq6rr4M75myPdG+FYTuOCRe3tRUpI99MCziFish8TRm9zc3FOOueyyy85hRAJjDScnJzq7O1HIBi9rkZhbk1tWeG6CukBZuXIlv//+O8nJyRgaGtLY2NhvTHFxMXfddRe7du3C3NycxYsX89JLLwkXgyOESCQiKCiIjOIc9qTvR6NWk5qiFdI2btzI6tWrOXzoEBWVlRgZGpGVnTVmOwu7uLggEotpri0haPrNhMy6jbSdX3HN/22gNGMvevqGeIbOAMDKwYPqwjRqamqwt+9/l1fg31NdXd1PNKusKAPA1EKCrXsgLhFXEOYeiL1HEGYSxz4XXh0t9VTmH8FJET6s/dm4B1KYcIxLghz4eHEoGo2G1XuLWbkpBx9HcxaNl/PGlnxmBWh/3z6O5tTUFVBTU4Odnd0Qqw8PhUKBnp4eDRX5OCkiaGuopL48XyeaNVXm01BRQGebNiPMyMgYH19fpkYqUSqvIiAggICAALy8vM7oM1KlUlFUVERubq7ukZOTQ21dPV9/9eUZl7oJjD4ajYaCggL27t1LXFwce+LiOVagzao1NrOkt6uda6+9dtgegec7v//+O3/88Qe798SRmZGORqPBQmKPgzySqIVP4uAVSvrutSinXo+9R1CfuU6KCA6vf+2Ufssn6e3tRSQWc9u7Sf18pDpbG9n28f0cP7oNAFV3V7/51k4K0tIGvvF/sWFlZcUdd9wx2mGcF/T09JCTk0NKSgopKSkkJyeTnJyqy7QGWLfuexISDveZFxMzmZ/Xr8dRHo6+ofbv1trhr/O1yvwkyrIPoBi/AImT1urIyNQSO1c/4uPjBxTSw8JC2bYnjl2rH6ehLIe6sjx6e7R/y84uroSFhhJ6xR2EhIQQEhKCt7f3afuDW1tbIxKJWPtGCJcsTyA7O5vm5masLQ0oqegY1hrCFYHABU94+DhSvvputMPAxtWP4uStox2GwBjFycmJ9s4OOru7MDY0OuU4qbk1ldmV5zCyC4/u7m4WLlxIVFQUn3/+eb/XVSoV8+fPx9HRkf3791NRUcEtt9yCgYEBL7744ihEPDaZHDOZlStXklqYiYvMmeuuuxaAu++6C02nimi/SNzGu/DWrx9z4MCBMSuk6enpYW0tobm6GIDxVz5IcfqeEx4eItyCpmBoYgGApa0rICIzM1MQ0s4Ce/fu1XVeMzG3wtY9EKewSwm+Qom9RxDmUtmg2QpVx1LY+tG9tNZXcMOL27Gydx9yn3buSjJ2rSW7Srvun+nV/HdjNo5WRry6MJCSem2jof0F9SySOuPrqLV9yMjIYOrUqf/yiLUYGhri5S0nYcPbHPrpVbo6WgEwNjbB18+PGROVBAQs1JU5eXp6oqend1r70Gg0VFRU6ISyvLw8cnJyyc7J4fjxY/T29ABaHyeJgzvmdu6UZeWwfv16QUi7gFCpVKSlpWm9kuLiiYvfq8vetHXxwVExkVkz7sFREU5XWxM/PLuAQ4cOMX369FGO/N9TWFjIpZdeiqWtDCefiUxdciNOiggs7dwQiUR0tTWx5aP7KMvaT0XuYW54cXuf+U4+kfR0d5OQkKD7HBoIpVKp6xx50lrmJPvW/U8nogEwwOeVRCbnQPw6uru7hcqYi5T6+nqdYJaSksKRo8lkZWXS060tn7SydUbi7IvbhKsJc/HDxtWfwuTtHP3tXVQqVZ/P/5iYGFS9PVQdS8HZbwIAxubWBEy5jsw961Cpeohe9ATWjn0TAey8wti9J27A+K679lq2bNmKcftx5k8fT0jI7YSEhBAcHDxi3nNKpRKNRsOEhfEA2NvbIxKJWPVdESqVelhrCEKawAXP5MmTWbVqFZ2tjRibW49aHFKZnLxDv56VjloCAjKZDICG1kacpA6nHCcxt6a5pVn4OxyE5557DoA1a9YM+PrWrVvJzMxk+/btODg4EBoaygsvvMBjjz3Gs88+e1GdeJ7NEsLnn3+eK664Am9vb92JkVqtxt8/gNbSBm6etgiA7/dtJCEhgRtuuOGsxHE+4CyT0VRdBGiFhBm3vcb6lxaiVvUSOneZbpylvRsadS9HjhwZMRFF4C9qarSlHFc8tk5XejIcNBoNmXHr2PvdC6h7ezC2kJ4QPYfG3j0I0FBY00xXr5pgF0suCXJgd04tl7x9ADcbE0wN9Xh6fSbO1sa0dWsN/UdSSAN46803+OGHH3TZZUqlEnd399MWzOrr68nLy+uTXZadk0t+Xh7t7W0AiMRirO1csLDzwMp1AlER12Hl4IGVgwfmUifEYu0+N75yHXl5o2vdcTFQUFDA1q1b2bp1G/PmzWP58ttPe42srCweeuhh9u3fT0tzE3r6Bjh4BuEcdilhXiEk/b6K2qIM5t33GeZSJwDMrOwxMbciPj5+TAhpDg4O6OnrEzRrKUHTb+7zWltjNRtfu4mWOq0Bu0fozH7zbVz8MDa1IC4ubkghDaC+LK+fkKaYeDltjdWUZR8AYMPL13Hnpzl9xljYyOjt7SU3N1cQqS9Cjh8/TkCAks7ODgwMjbBx9sHa2Y9xC2ai0ajwm3TNgL5mtm4BdHZ2kJ+fj6+vr257YGAgllbWVOQl6IQ0gOpjKQDMu+cjPEKm01JXRkVuIhV5CVQVJFFXlo+buwdqtbpfNtlVV13FlVdeeVbLbMePH8+ff/6JSqXCz88PDw8PZs2axe7du6mpqeH5558fcg1BSBO44DnpL1NfnndKb49zgUSmQKNWs3v3bp0xroDASOHkpD3xrB9CSJOeEJMrKip03WIFTo8DBw4QFBSEg8Nf7/OcOXO46667yMjIICwsrN+crq4uurr+KqEYC6bY77//Po89+iguLq54emm75a1cuXLE7gaKxWL09PR49NFHycvNIy83l+bWFq688kp+TzyiE/E87Fw5dPDgiOzzfMXPz5dtcQm653YegUy46hHKsg/gETxNt93KTusVd+jQoXMe48VAeLi2HLOzrXHYJ/C93Z3EffNfcvavRzntRipyE5E6K3ReMEMhkcnRNzCit6eLYzVt+Dma8+zlfqSVNbM9o5rDhY2oNRo6e9Rc97G2HMvczJTQ0NAzOsZTMX/+fObPnz+ssW1tbeTn5/cRzLJzcsjNzaOhvk43zkLqgJW9Bxb2voTMn4OVgyfWDh5Y2rqgZ3DqzOqTWNp7kJWdM+Q4gdOjubmZXbt2sWXLFv74cyuFxwsQ6+ljZmXH9h07uO22W0+7RHfNmjVs276d8AUrcFJEYO8Zgr6hMe3NdWz54G5qi7SeXEZmVro5IrEYB+9w9sTFj+jxjRYmJiZEhEdQkZvYT0jTaNQ0VR3XPU/Z+jnyyEv6mKmLECF18SMuLo6nnnrqlPuxtbXFzs5hQH9ot8AY3AJj6GprIiv+R4zMrcnYvZb68nway3NpqMinrakOfX390xbJBc5vKisrSU1N1WWZRUVHs+Luu/uNa21tpbOzgxlLX0c+fj7iE37Lm9+9naLU3Rz+5S3cg6fhN/kavMbN1s2zcdGKZykpKX2END09PSZPmkRa/l/lwhq1mum3vUZR2i7yD29i/3fP0VSr9W/28fXj6vnTiYn5LzNmzDhlSea58KqbM2dOn+eBgYEEBgbS3NwsCGkCFwc+Pj6IxGIaRllIO3lXaNeuXYKQJjDinBTSGlobBx0nMdeepJaXlwtC2hlSWVnZR0QDdM8rKwcum33ppZd0mW5jhZ9+/AlHS3t8rT2oPl7Dh1u24O3tzcMPPzxi+3jqqaeI3x1HqEcgk7wi+OPoTnp6eqhvbmDlj29RWFNKbVMdE8ZPGHqxC5hx48ax/peNaNRqnQATOmcpoXOW9hlneUJIS08XjKLPBq6urtjY2lFTlK7zpRuKLavuoTh9D8EzlxC96Al+fOFyaooy6O5o0ZXkDoZYTx87N38qCpK546tU6tp6aG7XivImxsYE+PsSPUdbzjJjxgxCQkJwdXU9bT+YkUCtVjN33ny2bf1Tt83E3AprB08s7D1QxE7Eyt4DK0cPrOzdMTTu3328qbqI9uZaLGych9yflYMHWTt2CM01/iUqlYojR46wdetW/vjzTw4dPEhvby8SBzdkATHMnf8fnH3H01BxjPUvXsOhQ4eYNGnSae0jMjISVW8PfpOvwcz6r+/PPV89RWXBEd3z0qwDff5vOSoiOPj7+/T09GBgYPDvD3aUmTIlllWfrO73N2sucWTJW4eoKjhKeW4CWfHfY2nnSmXBUSpyD1ORl0h1wRE62poZ5zu0p7IyUEnxCSGtq72FhvI86k88GsvzaSjPpbVRm2Grp6eHt1zBhKBAAq+di1KpJDQ0dFhdcy8k1Go1W7ZsYdOmTTQ3N/Ppp58O6jV3odLT00N2drZOMDuanExKciq1tdUAGBqbYmRmzY8//cQdy5f3E8X9/PwwMDSkq6NZJ6IBTLj6PxSl7gagKHUXbQ2VfYQ0EwsbLCT2pKSksGjRoj5rxsbGsP3Z50ne8tmJv+Uk2lsa0dPTIyxsHJctvp7JkyczefJkbG1tz9I7c+4RhDSBCx5DQ0Nsbe2pLxvlzp02zugZGJ2yc6eAwL/BysoKE2Nj6ocQ0qTm2myhG66/nqeefpqbbroJc/P+FzNjjccff5xXXnll0DFZWVn4+fmdlf0/8cQTPPTQQ7rnzc3NuLoOr7TrfOX48eNEegVx87RFtHd1cMMbd+pKjEcKV1dXHCT2PHKl9q5pemkOarWaKbFT0NPTY9k18xg/fjyxsbEjut/zjcDAQNSqHtqaqjGXOJ5ynImlDfqGxhQWFp674C4iRCIRkRERZBWlDXuOrXsA5XkJpG5fQ+7BjVjaulJXlc3OLx5jzt0fDEsAsnEPoqe5nOjZs1AqlSiVSgIDA/Hw8BgVwexUiEQi9sbHIY+cT9DMxVg7eGBsPrwMVY1GQ/ben9i3biWGJubc/GrckFl7Vg4eNDc1UltbO2KNFS4mfv/9d7766mu2bd9OQ30dRibmyPyiiL7u/+hoqSdh4zuEXx6hE7bsPAIxtbBm69atpy2knSxFrMhNRD7+r8xG36gr6elspzI/CVVvNzs//w+L3zxATWEaFXmJFKXspKOjXdfV/lzR0dFBVlYWNTU1zJ49e8SE2tjYWF555RWaq4uxcujrkahvaKx9GJlg7xHIN4/F0tPViampGROjJnLb1Q8TGxvLhAlD3zgKClSy/9PP+faxWJrrtB50YrEYTy9vwgMDCbxqmu5zxMfH56KwpHjmmWdYuXKl7rmLiwsvvfTSKEb076mrq+vnZZadnaXzMrO2c8ba2Q/3iQsZ5+qHrYs/lnaulOce5tfXbyY3N5eAgIA+axoYGODn50/dPxr1ncx4B+1n77Tb+p9TS5z9SE5O6bd9zpw5PPvscxz97V0mRkWx+KH7iYmJYeLEiWOmkchACEKawJhAIffmeOnZ7dyp6umisuAozn4TB3xdJBYjcZKTn59/VuMQuDgRiUQ4ODgOmZFmZmyKvp4+ej1iVty9gscefYxbb7uVFStWjLm7j3/n4YcfZsmSJYOO8fIavOPpSRwdHTl8uG9XoqqqKt1rA2FkZISR0dClShcKvb29lJWXMV+p9a2pbqoFwMPDY0T3I5fL+ab+a93de0crO4qLijlw8MCI7ud852T2aHNN8aBCmkgkwtLOjfqyXGpra8fUnd3zhYiIcOLf+2jYWVATrnyIiEvvoepYMiUZeynN3AciESWZ++horsPU6tS/I41GQ2u9ttylrraGVatWYWlpOWLHMtKIRCLkCgUqE3McvfuXuJ+KjpY69nz1fzoTdBflpGGVvp7s/JaXlycIaadJb28vCxcuwsjSDu/oa3FVxmDvGYJYT5+Eje+QtOkDAAwMTXRzxGI9nHyj2fzHn6edYe3g4ICXt5zyvIQ+QppX+Byc/aMpzz3MsaQ/aawsYPV94fT2dGFmbsHkSdE8ef+7Z01E6+zsJCcnh4yMDDIyMkhLSyctPZ2iwuNoNBoAPv/88wE7B54J0dHRiEQiKvISMDKzoiI/iYrcBKryE6kuykCt6kUitSFm8mRil/6P2NhYQkNDTzsb795776W+vh43NzedYObr6zsmM7CG4vvvv2fF3XfT2NQEwMqbnuDV9R8QH3/hlgy3tLQwMSqazIx0gD5eZmHzZ1BbnEHQ9Ftw8okc8HvKxkV70zg1NbWfkAYwLiyULfFH+2zrbGvUznX14/L/rKW3u4Pi9DjqSrKoLcmmsTyH+vJjuEn6i+yhoaGUlpZgaWk5JjJLh4sgpAmMCcaPjyTpky/O2voNlcfY/vED1JZkMf+BL3ALHNgE1MbFl8Ijf5y1OAQubmQyJxpaGgcdIxKJkJpbE+4dzNMLH+TPIztY/dkXvPPOO8ydO5d7772XuXPnnldZDiOBnZ3diF1oRUVFsXLlSqqrq3WdEbdt24alpeWAJyRjkfLyclQqFYfzjtLW1U7LiS5+g3XO1Gg0lJeXn2iZnoy7uzs33XTToPuRy+W0d3bw8s/vUtvaQFltOR5jtDvnYJx8X5uri5H5jB90rJW9O/XleWRmZo75TL3RICIigramWtoaKnWm6EOhp2+IzGc8Mp/xTLjyITpbG2lrqOwjomk0GtoaKqkuTKOmKJ3aonTqijNoa64HICRs3AUhxvv7+bI/tXDY44tSd7NrzRNo1ComXfcU+9atxDt87rDmnux6mpubS3R09JmEe1EgEvX/PtfX18fH15d2I1fGX/GgbnvugY06EQ0gfde3uIdM112MuyonE/f109TX1yOVSk8rjimxMWzavp/2phoq8hK1AlJBEjUl2WjUauzsHIiNjSH2gWXExMQQHBw8Yj5dvb29ZGdn6wSz9PQMUtPSOH6sALVa24HPUuqItZMca/kUpsQuQyKTs2fNYxw9enSI1YePtbU1ysAg9n2/kl1rngDASebMtKlTiP3PncTExODv7/+vz8EUCgXffPPNSIR8wfPiiy/S0dbBJL9I4jIOolKr8XJ0JyfnwvVXrK2tJTMjndC5t+MXfRVWDh66Msz4b5/jWNIWjiVtwdTKHv+Ya/r8Hwdt10xLGydSUlK47rrr+q0fEhLC2nXfo1ar0KhVNFYco7Y0m6AZi2koz+P7/5tFW5PW79LM3ILgoCBmXzaLkJCQU9oX2djYjPC7cP4jCGkCY4KoqCjefvttOlrqMLEYuf/IGo2GnH0/E7/2eXq7OxCJ9bD3CDrleKmzgtwDG2hpacHCYmhvFAGB00Hm7ExeUtaAr6nUKpramqlvbcRQ34CG1kbsrGy4edoiro25gvjMQ/xxZAfz58/Hy9OLe+69h1tvvRVra+tzexDnAcXFxdTX11NcXIxKpSI5ORnQijrm5ubMnj2bgIAAbr75Zl599VUqKyt5+umnWbFixQVxoTsSuLi4cPfdd7M3fi8bk7bQ1NyEwluuExb/SXFxMRMnTKSiUltiYqhvQHdvD3Pnzh00a2ry5MlMm6o105/hE4FcLr8oPSZNTEywspbSVFM85FhLezfEYn1BSDtLnGw4UFOUPmwh7Z8YmVnR29PJ8aPbdaJZbXG67sLEzt6R8ZERRF5zH+Hh4YSHh+t8MEeKpqYm9u/fT2FhIUuWLMHExGToScPAx8eHP7bvGdbYhspjbH73dvQNTZi7YhXGFhJEIjHNNSXDmq9vaIyVrTO5ubn/JuSLgo6Ojn7ZjPPmzuG9Dz/r471o7xmMq3IyNUUZdLY2UJIRT1tjFY0VBZRk7KUkfQ9qtZri4uLTFtJiY2NZvXo1Xz6sFT09PL24ZFosMTEPExMTg1wuP2ted/fdfz8frloFgIXEDmsnBdbuUcRE3YJEpqCjuZZDv7zJtNte6yNwW8t8SRthz8kP3n+PtWvXMmnSJGJiYnB3dz/vPP6SkpJQqVTY29uPeKb5ucbBwYGCnHyWzbqJ+MxDrN7xHbXNdYj0Ltwbxh4eHphbWGJkaoVE1jdbc8JVD5GfuJnOlnram6rJO/RbPyENQOLsO2AZJmiFtJ6uTn56bgGNVUWoensAcHP3IDw0lNBFWtEsJCTkvLMYOJ8QhDSBMYGuc2dZPs5+AwtpLXVlHP3zU9yDp+HsNxH9ITpGdXe0sOfrZ8g/vAm/ydfQXF0MIhHGJ7oiDoREpkCjUbNz504uv/zyMz4eAYGBkMlk7Nq6k7V71tPQ1khDayMN7c3any2NujIFgJiAv0qQDfUNmREcw/SgyeSUFbA5aTuP/udRnn7qKeLi43UXjhcLzzzzDF9++aXu+ckunLt27WLq1Kno6emxadMm7rrrLqKiojAzM2Px4sXD6uAzVhCLxXzwwV9ZC42NjRgYGJzyZCo3N5eKygpun30TkYowenp7WPHx46SkpDBjxqlN2+3s7Ni5a+eIx38h4u7uRvMwhDQrOzfUql4yMoSGA2cDZ2dn7OwcqC5MwzNs1rDmtDVWU1OYRk1RBjVFadQVZ+iMvm1t7YmMjCDyyhWEh4cTEREx4l6DoG2EEh8fT3x8PLv3xJGRnqbLxGlvbx+xJiEKhYKW+ip6OtswMB7c+8Zc4oRP1BUUJu9g01u3Yu3ohYmlLYm/vYdbYCx2HoFD7s/S3p0cQUgbFI1Gzf79+7nyyiv7bJ8zZw4vv/wytaVZ2LkpAbB29GT+A19QX55HQcJmSrP2s+7pWfR0dWJv78j8ubNZsGDlGXWFve6662hubsbBwYGYmJiz8nd+Kmpra7F19efSh7/s49un6u0h4dd3Obr5I0Db7OLvQppUpiD9wPcjGktsbOx5fZNjz549TJ06Vfd8/fr1/f52LiTc3NzY1rWNrcm7ESGiprWeKdOncv/99wNac/5ff/2V9evX09LSwtq1a89772CRSERwcBDVpf1vnusZGNPZos1kdvAOY9byt3WvadRqmqqLqCvNpqu9haPJA392RkVFcdddd6FSqQgODiYkJITg4OARsxbIzs5GJpOd11YFI4EgpAmMCeRyOWKxHvXluTj7DWzSmXvwVzJ2fUvGrm8xMDLDNXAyHiEzcAuagolF37tuVcdS2P7pg3S01DNz+Vt4BE9j9QPjmXD1I4PGYePsA2gvyAUhTWCkmTNnDp9/9hl7Cw7j6OSEs58r42Qy7O3teevNtwiQKbg25gok5lZIBhB8RSIRfi5y/FzkTAuaxHPrXqe4uPiiE9LWrFnDmjVrBh3j7u7O5s2bz01AFwBDZS6GhIRox5lZYW9li0qtxsTImOTk5EGFNIG/CFQGsDU+echx2s6dGubOHV55nMDpIRKJiIyMIL1oYKGyvamWmiJteWZNYTp1xem0NGi7pUltbImMCCfysjt0opmzs/OIZ6NoNBoKCgqIj48nLi6OPXHxHD9WAIDEwQ1773Bib74WJ0UE8d8+Q1xc/IgJaT4+2vOcpuoibN0GL3U3MDJhxtLXUPV0UZp1gGNJf3L86HatEBy3jqke/xtyf5b2HuTkDL/5w8WIiaUNO3fu7CeGREdHY2pqRmnGXiykMkqzDlCSEU951j6a6yowNDJi8uTJzLvzBWbPnk1QUNC/+ls1Njbmvvvu+7eHc0YEBQby+x/bMDKz7rN9z1dPkbP/F91zSzuXPq9LnRUk1lZTU1Nz0fjwFRRoPyveuO05Hv3yeUpLS0c5on/Hfffdx7rv1rH+8GZcXV35ffPveHp6smPHDl5//XW+XLOa9IxMAEQieOSRR/joo49GOeqhCQsN5fsNW/ptV6t6MJfKkMjkjJt3B0Wpu6gryaKhLIe60hy6uzoAbebz8tuXDbi2iYkJq05kcI4kBQUFPProo6xfvx5vL2+ysrPGtGeaIKQJjAkMDAywd3AYtHNnyOzbKEjcTHN1CUEzb6E0az87v3gUkUiMo3wcHqEzcA+ZTmHyDg7/8ia2bgFc+tAaLO3cOHZkK6rebjxCpg8ah5nEEX0jU44cOTLoOAGBM+GSSy6hpbW134nu119/TVt7G0tmXI+zzamNyv/OvqzDOMucufTSS89GqAIXGXZ2dsgcnTheVcwk//G0d7VjZ2mjK5sVGJqAgAB++uU33fOutiaaaoppri7W/qwppqWmmObqQoARLwUU+IuIiHD27H2f9ua6E4JZGrVFGdQVp9NcXwmAtURKREQ411+ylIiICCIiInB1dT0rJVwqlYq0tLQTwlk8cfF7qa6qQCQSYevqi4N3FLNm3ouTTwRm1g595jrII4jf+x1qtXpEynNOCmmNVYVDCmkn0TMwwj14Ku7BU4m9uYfynIPYnsiQ+ie93Z00VRfRWHWcpqpCaorSaaks6JNxLdAX96CpbNu+o992Q0NDpk6bxrZNqzi0/g00Gg2+fv4sveU6Zs+eTWxsLKampuckxvr6ep1/WXV1NQ888MCIZqsolUraWxr6NfiwcfXH0GQ73R0tAGx45XpufOmvLGipTNuEKSMjo0+W1limrq4OQ30DqhpqUKlUF7y3VXBwMM0tzYjFYjo7O7nrrrv48ssvdZ8ZfxdyNBrYufPCyIIPCQlh1Ycf0tvdSXtzLXUl2dSVZlFXko2BgQEl6XGUpMed8EP0Y0ZUKCEhN+pKMk9lxXE2eeaZZ/j9t03cOOVq1satZ/Xq1Sxfvvycx3GuEIQ0gTGDr4+C7EE6d+obGDFr+dv89MKVtDfVcPWTP9HeVENhyi4KU3ZweMPbHPhR2+o3dO7tjL/iQfT0tR++hck7kMgUOuPbUyESiZA4yXV3ewQERpqBLtLefecdwryDhi2iNbe3sifjALa2tnh5emFhYYGlpQUWlpZYWVlhYWFxYpul7t8WFhY6rw8BgYEIHRfGtt17iMs6SHWDtqzN5jQ9di5mvL296e5oYf3/rqS5tpSOv3XotbKW4OXlRXCIN97eMwgLC2PcuHGjF+wYJyIigrbmer58SFsib2llTUREOIvmLNaJZmfT96irq4uEhASdcLZv/35ampvQ0zfAwTMI57BLifCJwFEejpHp4GKETBFB0m/vk52dPSLNUqRSKRKpDU1Vx89ovp6+ATLfibTUlVF9PIXGykKaqgtpPvFoqi3XjbW0skahUHDtXc+edx5T5xPOflHs+PxnysrKcHZ27vPayv+9gJOjA9HR0cyePRsXF5dTrDIyNDU1kZmZecLwX9shMz09k+oqrX+mWKyHRqPG1NSURx4ZvMrjdFAqtcJsfXleHyEtZNatBE67kYq8RI78/iH+MYsozdpPfVkeDeV5NJRrS9/Ky8sHXHcsEhISQq9axau/vI+RodGYaKJ08ibBDz/8wJo1axCLxLyx9Dl2psSzO+sAvT09aAAjfTHVJ7xcR4P//vf/+OjDD3n0sSeGzBIOCQlBo1az5sHx9JzIMpNIbQgJCeHyKVfpBDN/f//zxr933rx5rF27ltK6CsQi0Zi3oBCENIExQ2RkJAcODd6yXuLkzeQb/o/da57EJWASivELCIhdREDsInq62inN3I+BsSku/n91h1KrVRSl7sI/9tphxWHj4suxhFNnxgkIjCSHDx8mMSmJpxb2Nxo9FTtS49BoNFRUVjA/YhYioKOjk9bGBmp7Kuns6aSjp4v2rg46ujpo7+pApVIRGhLK0eSR624lMLZ4/PHHeVX8KkqlktDQUEJDQ1EoFKMd1gXDnDlzuOWWxejr6+HtfT3e3t66h0QiGXoBgRHjkksuYfXq1ZiamhIREYGnp+dZFXKam5vZv38/8fHx7ImLJyHhMN1dXRiZmOHgFYb/9FtxUkRg7xmCvqExAJ2tjUOKaAAOXqGIxXrEx8eP2AWzj4+ChqrCQcdo1GqtiX1VIU1Vx3U/W2uKaaguRq3qBcDY2ARvuZxIfx98L5+Cj4+P7mFjYyMIaMNg5+rH0NPXp6urq99roaGhfPbZZyO+z87OTlJTU/t1yawoLwNAJBZjbe+GtZMCl4grCJYpkDr7YO3gwa+v3TDiF9je3t4YGBrSUJ6Hi38UHS111Jfl01CeR315Ho3leTRU5LH904cAMDQywsfHl2njAwlauoiFCxeOaDznM7Nnz6azs5P6+no0Gg2OjsO7CXshcNLbT61Ro1arCXT3Y1PiNgDMjfQwNdSjtrWV1atX89NPP5Geno6t1IbDiQkj1kF2MNZ9t5b6+nq++XoNd999Nz///DMymYzp0/tXPIWHh/Pmm2/S2dmpE81kMtl5/Zl4/fXX8/Zbb5OUm8p999/PHXfcMdohnVUEIU1gzBAVFcXrr79OR3Mtplan9jnwm3QNpZn72PPV0zh4Bp/wmwEDI1M8w2b2G19VcJTO1oYhyzpPIpUpyO7uorGx8aLsiChwbqmu1nrzfLPnJ+pa6pkSGI3JiQutgVCp1fx5dBeTJkWzJy6O62OuxNxkcMNojUbDp1u/obD94rljK3D6xMTEEBMTM9phXLBIJBK+/HLNaIchAOjp6bFkyZIRWauhoaGfENrb28uGDRt0jQHS01JRq9WYWdrgIA8n4oqHkSkisXH1Q6zX91Rdo9GQtOl9kn7/kNl3vodn6OAehAbGZth7KImLjx+xixo/X1+27ktFo9HQ2dpAU1XhiVJMbUlma00hDVWF9HR1Atr3093DE6WvL74zLkehUOjEMmdn5zHXEa6wsJAXXniBnTt3UllZiUwm46abbuKpp57C0NBQNy41NZUVK1aQkJCAnZ0d9957L48++uhp70+jVpGQeAQvL6+RPIxBue76G9i4Qes9JnFww8pRjmPIAgLmKZA4y+lqa+a3N27B1j2ICVc+1GeulaOc1LT0EY1HX18fHx9fjmz+iKObV+k65OobGKBQ+DA5LBDlTZcSGBiIUqnEy8sLff2L9zLYwMAABweHoQdeYAQHB6MMCCArO5u3fv2Y2matKb+hvj6tXb1oxPqMnzCR2267DX09fWQSB44UH2X37t3nxM/1iSefZtUH72Pv4ISzzImGxiYMDPQpKSnt9/vQ09PjwQeHf5P8fEBPT4+HH3mYFXev4K233uKTTz6hrKwMKyur0Q7trHDxfoIIjDkCA7Xdn+rLcgcV0kQiEbE3vcCPz1/Gtk8e5IrH1ulKOAeiMHkHJpa2OHiGDCsOqbMCNGq2b9/ONddcc3oHISBwmsyfP5+dO3fy7rvv8smvX/PNnp+YFjiJS8Jn4iTtf5KUlJ9MVUM1y6fcwZ64OEyMTi26nUQkEtHV04WlpcXZOAQBAQGBCxqNRkN1dTUZGRlkZmaSmZlJWrr23/V1tTz99NO88MILuvHffvstS5YswdreFXvvCGJuWoiTTwTWDoNnvvV2d7JrzePkH/4dAEPj4XWec/COYM+e7f/uIP+GQqHg62++5csHI+lobdJtd5I54+vri1/kVJ1QplAo8PT0HNOG0/8kOzsbtVrNxx9/jFwuJz09ndtvv522tjZef/11QJuFOHv2bGbOnMlHH31EWloat912G9bW1mfkKeTt7T3ShzEojY2NuPhHM/eeDzEw+stnTa1WcXTzRyRsfBfQ/s3+E6mzgpTft4yYb99JXn7pRb786iuUAQE6wUwul19Uf3sCcNvSpTzyyCNUN9UQFBzMiy++yLhx42hra8PDw4PDhw8zYcIEnr/hMdztXLjxzbv47bffzqqQ1t3dzYYNG/j8s89ISDyCvlhEjI+UiIlyXvsznx9//JF77rlnWGvl5uaSl5eHjY0NEydOPGsxnylbt25F3aPirnlL+PCPNSQmJo7ZplOCkCYwZvD29kZPT5/68jxcAiYNOtbI1IJZy99mwyvXcXjDW0Rdc+o7gIUpO/EImY5omF/2UmdtKdPu3bsFIU3grCMSiZg2bRrTpk2juLiYDz/8kE8+/oTfErYSLg/mkvCZhHkFIRZp/343H9nB+Mjx2NraYmhgiJ54eKns7V0dWDgI5WUCAgIXLxqNhsrKyn8IZulkZmbR2KDNfNDXN0Ti5ImVkxzvyTdinH2QzX/82UdIO5mhNv+hr7C0HZ5nVXtTDX98cBf1pTm4KidTln0Qe8/gYc11VISTsu0LiouLcXNzO82j7s/y5cupra3Fzs5OJ5jJ5fJzZlx/vjN37tw+XXW9vLzIycnhww8/1Alp3377Ld3d3XzxxRcYGhqiVCpJTk7mzTffHFRI6+rq6lPC2dzcfPYOZBCCApVk5P3ZR0QDKM85zOENbyPW00ejUg84VyJT0N7eRlFREZ6eniMW04IFC1iwYMGIrSdwYfLggw8yZcoU/P39+eijj3j88cdZtWoVUVFRAISFhaGnp0duWT5Sc2skZlbs379/RGPIz8/H09NTVy56+7KlfPX1NwS7WgPQq9aQVNTMruw6AgP8mTmzf0XUQKSnpxMWFkZvby9isZhjx46dd97FEyZM4OuvvsLGQopYJObw4cOCkCYgcL6jp6eHg6Mj9eX5wxrv4BXC+Cse5ODPr+HiH4Wrsn9JUkPlMRorjw0qtP0TUyt7DIzNOHpU8JISOLe4ubnx0ksv8d///pd169bx7jvv8sL3byKzcWRu2HR8nb1JPpbON89/Q2FhIWbGw7/o6ejpQjaCHbYEBAQEzlc0Gg3l5eV9BbO0dDKzsmhuagS0DYykMi8sHbzxmbIYiUyOVKbA0s61TzmmnoERqZs/6OPfOmmS9mZfRV7isIS0utIcNr97Oxq1mssfXUvewV+pyEuirbEKawePIec7KcIBiI+P58YbbzzNd6M/dnZ2vPXWW/96nYuJpqYmpH9rvnLgwAFiY2P7lHrOmTOHV155ZcBy4JO89NJLPPfcc/8qlo6ODrKyssjIyCAyMhI/P7/TXkOpVFL/4UeoerrQM/jL6NzOPZCQ2UspTN5OU3URx49uo6WuDAsbZzRqNc21JbQ1aDvfZmRkjKiQJjB8KisraWxsxM7ODqlUel77bp0uIpEId3d3vDy9qKquQk+sx8KFCyktLQW0Za0ymYwvd/3Amp3fAyOb0fnaa6/x6KOPYm9nz1NPP8V9992HkbEJdpYmeNqakFnWRK9aw/zLr+aee+8lKiqKgwcP8tVXX1FcXIxYLOaRRx7p89lQWFjIrl272L59O729vSyffTOfbP2aoqKi805ImzRpEhrgfz+8iYWFBZGRkaMd0lljVIU0Dw8PioqK+mx76aWXePzxx085p7Ozk4cffph169bR1dXFnDlzWLVq1Zis8xY4ffx8fUg/furOnf8kdM4yyrIPsOPzR1n039/6dPoBKErZib6hMc5/az4wFCKRCKnMh2PHjg17joDASGJsbMySJUtYvHgxBw8e5L333uPrH3+kp7cHO1s7rrnmGv773/8Oq6zzJJ09nSPaql5AQEBgtNFoNJSWlg6YYdbaos30MTA0QiqTY+ngjf+MaCROciQyOSYW0mGZ/Utlctrb2ygpKdFlg9nY2ODnH0BFXiK+UVcMuUZtcSat9RUYmpiTsPEdpDIFGrWKP967g6ue+AEjs8H9Z0wsbLCReY+YkCZweuTn5/Pee+/pstFAK2T8U0Q6eS1TWVl5SiHtiSee4KGH/vIca25uxtXVdcCxnZ2d5OTk6BoCpKVpu2gWFR5Ho9EAIHN2pay0+LSPSalUolb10lhViI2Lr267kakFUQsfI3D6TZSkx1GZf4TDG96hqTKP+vJ8nW+eRGqDr6/vqZYXOAvcf//9rPpgFWqNGjRaQ36A++69l3fefXeUoxtZ4uLiqKqu4raZ12NubM67mz7l888/Z+nSpQBs2LCB//73v0yYMIErrrhC1/V1JPj6q68I8VTS1NbCe+++x3333ceKFStY991aslpMeeW117nllluwtdVec+7Zs4epU6cCYGlmQXNbC/7+/lx55ZW6NWfOmEnBsQKMDY3QE+uxZuc6QFsyer4RFBREcnIy+vr6+Pr6jjkPzL8z6hlpzz//PLfffrvuuYXF4B48Dz74IL///js//vgjVlZW3HPPPVx11VXs27fvbIcqcAEwfvx44va9O2jnzr8jEouZftur/PDspez84j/Mv//zPiWcx5N34BIwCQMjk9OKw8bFh/xDWacdv4DASCISiYiKiiIqKoo333yTr776ismTJ2NkZERzczMmBqcjpHUN+fksICAgcCFQWlrK1dcsJCMjg7bWFgAMjIyROsmxcpKjnBWDVKYVzCxsXRD/rQS+t7uT/T++TN7BX7ns4a+w8wgcdF8SmdbuISMjo09Z5dQpsfz467ZhxSuPvARzqYzK/EQq8pLI2PMdqt5uGiuPkbrjKyIvu/eUc7s7W6nMP4pIz5C4+L3D2p/AwDz++OO88sorg47Jysrqk+FVVlbG3LlzWbhwYZ/rnTPFyMgIIyOjfttzc3MpKCjoI5gdP1aAWq0VSyyljlg7yZHIpzJlyjKkMgW1xZnEffPfM2qOdVJ4KM85RFtj9T+6Y+bT1dEGgKmZOQEBAUyeNh6l8ladd9lIdR/s7u4mKSmJuLg4Dh9O4MEHH2Dy5Mn/et0Lnd7eXgoLC6mpqWHixImIRCI2b96M1NyaueOm8/3eDQS4+tLY1sQff/7JO6Md8Ahz2WWXIZVISMxL4YmF97N6x3c89cSTOiFt3Lhx/PbbbyO+3+bmZtIzMpA7edLQ1sgt1y4GICQkhMqqakxMTPr93ff09ADw7u0rcbGVcd1ryykpKekzxtDQkJkhsdhaSvl+70ZeeP4FAgIChl0Seq4JCgoa7RDOCaMupFlYWAy77W5TUxOff/45a9eu1bWJXb16Nf7+/hw8ePCUhnvni5+AwNln4sSJ9Ha9TFtDJeZSp2HNMbWyY8ay19j01m0kb/2csLnaE52Oljqq8o8w5Zb/nXYcUmcfuru7qK2t1d1xEBAYTRwdHft0A2tpacF4kO6e/6Sju1MQ0gQEBMYE27dv5/Chg0Rcei92HoFIZQosbJyH9EKtL8tj2ycPUF+WC4CxxdC+kRZSGYZGJmRmZjJv3jzd9piYGD766CM6WuowsbAZdA09AyOc/Sbg7DcB0Bq615fmUJGfhDxyfp+x7U21VOQnUpGXSHV+EtXFmWjUamxs7Vhy73+GjFfg1Dz88MNDdnP9e+fM8vJypk2bRnR0NJ988kmfcY6OjlRVVfXZdvL5cK+L/s7J8ikLiR3WTgqs3aOIiV6MVKbA2FzChleuxztyPn6T//Lu1dPXlo5lZmYSHT38ygsAqVSKs4sbe7/Tev+ZmJji6+fHnJhQlMobUSqVKJVK3NzcRrRssL29nYMHDxIXF8fuPXEcOniQzs4ODI1NEesb0dzSzLatW0dsf+czGo2GsrIynfl8bm4uOTm5ZOfkUFR4nN7eXgA+++wzli5diqGhIdZmVlwVNZ9NCVuxMZfQ2Np02iLqhYC+vj5vvPkmt956K7e//xBtne2Mj55w1vdbWlrK5MmTSUlOprWjjQ2/bOCll17CzMzslB6SJ2+w7M9KQGIpwUDfgJdffpkFCxboPk9Cw0L57rvvAHBydOLee+/F3Hx4zWYEzh6jLqS9/PLLvPDCC7i5uXHDDTfw4IMPnrIdcVJSEj09PX3UVz8/P9zc3Dhw4MAphbSR8BMQuDA4qYDXl+cPW0gDcFXGEDpnGYd/eROZz3gcvEIoSt2DBg3uwdNOOw6JTAEaDVu3buWGG2447fkCAmeblpYWTAz639U+Fe2d7UJpp4CAwJjA398fAI/QGdi5D13So9FoyIxbx751K7G0c8PWTYlGo8LCxnnIuSKxGKlMTmZmZp/tMTFaX9aKvCS8xs0+rfjFYj1s3QKwcfWnpbaEotTdWuGsIJH6iuMAuLl7MGdKLDEx9xMTE4OPj8+Y8kEaDezs7LCzO3VX+L9TVlbGtGnTCA8PZ/Xq1f3Km6Kionjqqafo6enRdZXctm0bvr6+pyzrHIqrnvwJB6++HeZLMvby56oVdLTUcfTPT/sIadZO3ojEYjIyMk5bSAOIj9tNeno6SqUSDw+Ps1LC1djYyL59+4iLi2NPXBxJiYn09vZiamGNg3cE4y57ACefSGxd/UnZ+jn7//iQ3t7eU15Lng90d3ejVqsxNh7ezcy6ujpyc3N1gllObi7Z2TkU5OfT0dEOaD8TrO1dsbBzx8pjElETbsTa0ZN9a5/VeTYbGhqSX53PA589TVN7M8eqiqhuqiXAfmxmDy1evJjv1q4lMzOLz974nGuvvfas7/P5554n6XAikYowLEzM2ZSwlfT0dCZMOLWIl5CQgIG+Aev2bkBPLAZEdNfUsG7dOp588kkAPvjgA2bMmIGfnx+BgYGCiHaeMKqfMvfddx/jxo1DKpWyf/9+nnjiCSoqKnjzzTcHHF9ZWalV0/+hnDs4OFBZWXnK/ZyOn4DAhY2Hhwf6+gY0lOfhFti/ecBgjL/yQcpzDrP90wdZ+MxGClN24OAV2s83bThIT5RyxMXFCUKawHlJU2MTJsPMSNNoNNqunUJGmoCAwBggICAAgPryvGEJaXHfPEPmnnX4RF1J7E3P8ePzl2NqZTtsGwlLRzmpael9trm6uuLi6k5FXuKwhTSNWk19WS4VedqMs6qCRFrqqxCJRPgHKLnuynnExMQQExODs/PQIp/A2aGsrIypU6fi7u7O66+/Tk1Nje61k9lmN9xwA8899xxLly7lscceIz09nXfeeedfNXHQmvj/JaRV5h9h01u3IjpRmuwRMr3PeH0DIyQO7mRkZJzR/jw9Pc9Ks4ADBw6wbt06du3eQ3paKhqNBguJPVI3JeGXP4B78DSkTvJ+GaROPuM5+PPrHD169JwZnKekpFBYWEhTU5Pu0djY+Ld/N1Hf0HDieTMtzU10dnZgZ+9ASvJRnJxOfdN/06ZN3HzLYl1HYABLGycs7d2xsPMnYEYs9p5BWDt4YmHrgp6+Qb81rBzl5OTkANrr4WVLl1HVUotGA8W1ZWg0GhYuXDjyb8x5gEgkYss5zk709PLEzMSUpTNvJK+igE0JW0lLSzulkKZWq1n1wQdYm1oyO2wq3+75mbnjppNUkMKunbt0QppEItGVpQqcP4y4kHY6/gF/F7eCg4MxNDTkjjvu4KWXXhqw/v9MOZWfgMDYQywW4+jkRH153mnP1dM3ZNYdb/HDc5exa82TlGTsJXz+3WcUh4mlDYYmFiQnJ5/RfAGBs01zczM2hsMTxnpUPahUKkFIExAQGBNYWFggc3ahYZhdvjUnfKYKEn6nq60BGxcfjiVtIW3HlwTPXDLkfKnMm7Qt2/sJb1OnxLB9X+Ip56l6uqguTKciL5HK/ASqCo7S2daMvoEB4eHhXLVsMTExMUyaNOmMs5gERp5t27aRn59Pfn4+Li59u7KeNPm3srJi69atrFixgvDwcGxtbXnmmWdYvnz5Ge3TWiKlvjwPr/A5um3mNjIcvcdReUybkZS85TMCp9+MhY1MN8bKUU5aenq/9UaTq69ZSHNbFy7KWKYuvg4nn0jMbZz55A5/ilJ2kb33Jxy9xxEyZyk2zj66eXbuSgyMjImLizsnQlpmZibh4eGoVCpAex1hYmaJoakFBsZmGJpYYWBijqGJO6YSC6xNLDAytaC3u4tD618nPz9/UCFt9+7ddHT1MPvOd7Fy8MDK3h0DI215YPrOb4hf+xw2rn4oxl+Kd8RcLO3c+q1h7eBJTuYWABYtWsSiRYvOwjshcOzYMaqrqwkODqamsY5b3l4BgJGhEf7+/hQXF2Ntbd2nskOtVhM1MYrDCYcx0DcgIT8ZgD+P7MTC3By1WtUnY1Xg/GPEhbTT9Q/4OxMmTNCZIw7UycXR0ZHu7u5+pphVVVVn5CcgMDbx9/PlaM6ZGf1b2rkx5eYX2P6pVuT1CJ0+xIyBEYlESF18OX688IzmC4wuKpWKJ594EjNzMx599NFhp99fSLS0tKDq7SIu4wAmhiaYGhljYmiCiaExJkban4b6BohEIjpOdNkShDQBAYGxQqBSSW7F8IS0qYtXEjZvOYXJOzievJ3KtDgADvz0KooJl2FiIT3l3N6eLvSNTGlrbaGsrKyPsBITE8Patd/R09mGgbGZrjFARV4CVfmJVB9Ppae7C1Mzc6Kjo1h6zSPExMQwYcIETExOrwmSwLljyZIlQ14LgTaJID4+fkT26e/nR80/hGFziSNXPvE97c11FB7dRnnuYVobKinJiD/RGCCfmsI0TDz7CzCjiZeXFxUdpky/rW9ihk/UFeQe2EBTVSFNVYWYSx37CGl6+oY4eIURFxfPww8/POg+4uPjKSkpoauri+7ubt3j789P9dqUKVO49957aW9vR6VScelDa3BURKB/wi7j6B+fcPDn1whfcDcBU67HXNL3GrWtsZpD61+nqalp0Bh9fHzo6WzHI3SGzs/uJDau2vL0upJs6kqyydr7Izes7N+8xMrBg+QtxXR2do7Jc9nRQKPREB8fT2VlJUZGRtx80820tLYgAn786Sc++eQTLC0tCQoKQqFQ8P333xMTE4Ovjy87d+3UiacikYjcXK3fZq+ql5r2vzIPrYwt2blrF4cOHRKaZ5zHjLiQdjr+Af8kOTkZsViMvb39gK+Hh4djYGDAjh07uPrqqwHIycmhuLiYqKioM45ZYGwxYcIEdu5+fdglF/9EMeFSaosz6WpvRuIkP+M4bFx8yS06v+7yCQyPjz76iFdfexV9PX2+/eZbPv3sU2JjY0c7rBElJiaG1WtWk1J46pIOPbEYU2NTDA20J3CCR5qAwF+czCwRfKcuTAIDlSR8+/Owx1vZuxMy+zZCZt9GR0s9Ram76Wpv0olovd2dNFQW0FCeT315Po3l+TRVFdBQVYRGrcbSyrpfZkFMTAxqtYqdqx+jra6U6uIsXWOAmMmTmXLnDcTExBASEnJeez4JjD4BAf5s3KIV5Xo626gvz9d20izLo6Eij8aKfJrrKsg79BtisRgPTy/Cg4IIvOo+3TXV+cLUKbG8/d5H/c7jg2cuIffABgDcg6cSOvevLqjtTTVU5CXS3dnGvv37B10/NTWVKVOm9PkM1zcwRM/ACD19A+1DzwA9A0PEegaI9Q1O/DSkrbGGjRs3cu+992JlZaWdL9bTiWgAPV3ajqVJm1Zx9I9PiLz8fsZdcqfudSNT7blUY2PjoHH6+PigVqtori1F4tg3CcVRPk73b5FYD8WEywZcw8rBA41GQ0FBga7TqsDg/NNj7+WXXyY7O5vi4mKSkpJoa21FdSJL2dfXl9bWFu5bcDur/ljNmjVr+nQD3bJlC4sXLybaL5Kc8nymxMaSm6etmmppaaH1RNdojUaDrYkEI40BdS313DTlGl5d/z47duwQhLTzmFH7Vj5w4ACHDh1i2rRpWFhYcODAAR588EFuuukmXXp6WVkZM2bM4KuvvmL8+PFYWVmxdOlSHnroIaRSKZaWltx7771ERUWdstGAwMXHxIkTUfV00lpfPiwj4IGIWvjYv45DKlPQ091FRUXFoKnbAucXpaWlPP7448wJm8b8iJl8+OeXTJkyhWXLlvHqq6+OmfKZz7/4nM8+/4y2tjZaWlpoaWmhubl5wH+3tLRgbW3NlClTRjtsAYFRobq6moyMDN0jLT2d9PQMZs+eww/ffzfa4QmcAQEBATRWv0Vvdyf6p9HBuKerg9a6ckRiMR3Ndfzx3h00VRbQWF2suzB3kjmjVAYQOPVylEolAQEBBAQE9PP49fPzI3bqNAqP55xoDPAAkydPxtfXVxBoBU4Lf39/vli9mu+emEZjTSmgFYjc3D0ICQoi8NIluk6afn5+53V2UmxsLCtXrqSx8hgSJ2/d9rZGbVdT5dQbCJmzlONHt1ORm0BVfoKuyYaHpxcrHnt0wHVP0t7ejkaj4eqn12PrFoD4hIdcT1c7G1+9kbB5y/EcN1u3/e9kxf/I7i+fRK1W6/4/d3e09BnjN3khSZtWAaBW9dLW0Lc760nBbjgZaQBNlcf7CWkn3wuAKx77DkfvMF0sTVVFNFYdp6m6kNpibYXO8ePHBSFtELq6upg6dSrpaWl0dHTy8Scfs3TpUnJzc3niiSfQE4tRqdVIzK1RqdU8ufAB3tr4EWq1Gg3w0/7f6FX1kpKS0mfdoqIi1Go1HvZuFNaU0N3do3vN3NwctUYryInFYtBoMDQwpEfVy8s/v4uri0ufBosC5x+jJqQZGRmxbt06nn32Wbq6uvD09OTBBx/s45vW09NDTk4O7e3tum1vvfUWYrGYq6++mq6uLubMmcOqVatG4xAEzlMCAwMBbZv6MxXSRgKpTA5oO3cuXrx41OIQGD4ajYa7774bI7EBN09biLmxGStveoItR3bx9Tff8uvGX3nv/fdYuHDhmLjIEYlEmJubY25uLoi9AqNKbW0te/fuZd++fVx99dWjdnOsrq7uH4KZ9mddrdYsXF/fEImTJ1ZOcqzdw/hl/U90d3+JoaHhECsLnG8EBASg0WhorDyGrVtAv9d7OttoqDx2IqMnn8YTj6aaUp1gJnN2ISgwEOWMqwkICECpVOLv76/LVBkKkUjE7p07xsT3icDoctVVV5GdnY1UKtUJZv7+/piamo52aIDWDwoYVmfPqKgo9PT0qMhNQOLkrft/2t5Yjee4OZSm7yZj91oAXZON2NhYYmJikMlkQ6yOzjdbJBL3Ectqi7OoKUpn60f3YeXgybh5y/t0OQV0JZY9PT26/+dd7X2FtOy9P2r3Y2bNlJufxyt8bp/XRSIRxqYWQwppTk5OmJqa0VhV2Gd7b08X3e0tTL7hv3S1NZIV/yOHf36N5ppCWhr+amxhY2uPr4+CuXfdxaxZs4Z6Wy5q3n//fQ4ePMhk/wmkFGbw4YcfsnTpUl555RXEIjFv3PYcD3z2f5gamdDQ2shHf6yho7uTyspK9PX0Ka/XNj2sq6nts+6yZctITU3lgw8+QOYkY+eunbrXxGIxURMmsO/AQYwMDSlpqGDihIncErOEm2++GYVCIXw3nOeMmpA2btw4Dh48OOgYDw8P3cnKSYyNjfnggw/44IMPzmZ4Ahcwbm5uGBgYUl+eh3vw1FGLQ3Kic2d8fLwgpF0g/PLLL/z22288etU9mBubASAWiZkXPoPxPmF8uvVbrr32Wr768itWfbgKN7fzy1dEQOBCobi4mPj4eOLj49m1ew+5OdmA9iLljz+3kJ6Welb339jY2E8wS0/PoKZaezIs1tNH6uSJlaMcz+jrCZfJkTr7YGXvjlhPe+pUnptAUcpOcnNzdTdwBC4cTnburDqegqq3h4aKfJ1vVFNlgS6rB8DF1Y1ApZLAWdfqssv8/f1HpNxduFASGAlsbGz49NNPRzsMNBoNJSUlus/W9PR0UtPSycnOZnJMDFv+/GPINSwsLAgOCSMz/gdKMvdSlZ9IW1Mdenp6hISGcdXi64mJiWHy5MnY2Nicdownb3yoerv7bHdShGPj4kddaTZNVceJX/tcPyFNfKIzZnd3NxYWFhgZGdPWUElTVRHdHS10dbQgdfbBZ+LlSJx9qC/Pp7LgJbo7WujuaKGno5XezhY62pr7JIoMhEgkQq5QcPzIVlrqymiuOk5zTSFNNWW662MzcwsUCgVh4b74+FyCj48PPj4+KBSKYQv6ArBgwQIeeeQRulU9iEQi0k90WW5sbEStUfPwF88iAsrqKgBo7WxHam5NR08XIpGIKYHRWBib8XvidlpbWzE3Nwe0Ytl7773HpZdeip+fH+7u7n32G79vPyqVCn19fVQqFXp6/bMgBc5fBMMFgTGHSCRCJpNRX3b6nTtHEhMLKUZm1v3SfAXOTxobG1lx9wrG+4wjyjei3+s2FlIev/peDuYk8um2b/H39+fFF1/knnvuEb74BAQGQaPRkJOTQ3x8PHFxcezeE09pSREAts5y7OURzIhZhpMikrKs/ez+8sl+TYXOlObmZjIzM/tc1KWlZ1JZUQaASCxG6uiBlaMct/FXEypTIHFWYO3g0c/c+Z9IT9wsycjIEIS0CxArKytkzq7Eff2MbpubuweBSiXKudfrSjL9/f11F0UCAgJaNBoN5eXlfW5IpKSmkZWVRdsJ3ydDIxOkMjlWTgpkgY5s3fI7LS0tw2pctGzprfznP4/i6yrhhnvvIiYmhqioqBFpenRSSFOrevpsb6g8Rl2p9qaOtaMXE658qN9cvRNCWldXFxYWFjjJZBze8BaHN7zVb6yxsQmWVlZYnXhIpNZIJM5YWVlhbT2HBx98cMhYF15zNS/8739YG3YyztcXn/nROrHMx8cHe3t7QYwfAXx9fTExMiYxP1mXPZmYmMiXX36Jt7c377zzDupuFa/f+iyPf/U/HKxtsbW04eixNBwcHNiTrvXlMzM17ff7EIlEzJkzp98+T7520o9NuJa48BCENIExSUCAP4fTzqxz50hi4+xDYeHx0Q5DYBg8/vjjNDU2cfvCRwc9KZnoG0GQewDf7P6JBx98kM2/b2bL1i3nMFIBgfOb3l6tT4hWOIsnLj6eutoaxGI97N0DsPebTuBlkTjKw/t1O3TyiUSj0bBv3z7mz59/RvtXqVTctnQZO3bsoKy0BNCerEoc3LFylCMLuwzlAjlSmQJrR68+JtGgvZgqTN6Bd8S8QfdjbG6NhcSOjIxTN+wQOL/ZsX0rhw8f1vlGmZmZjXZIAgLnDb29vVRUVNDb24unpycAO3fu5Omn/4+MzEyamxoBMDA0QuIkx1qmIGhOLBKZAqlMgYWNM6ITpZw1henkH/6dzMxMJkyYMOS+7777bu68885hlYKeLrqMtJ6+GWnm1g5YO3oRPHMx/jGLdNnHABq1GrWqB80JkaW7Wzt3547tJCcnnxDHrHWimZWVVb/mImfC008/zZNPPnlW3geBvoSEhXIkMYn/3fI0j655ntWrV/PBBx/w6quv4uvry7Jly3jy6xfpVfVSUltOdXMd06ZO45tvv2HVqlXMmzePCRMmCI1hLiKE37TAmCQqKoqt23agUat1X+KjgY2LL1nHhYy08529e/fy8ccfc/vsm7CzHLpMwMzYlOVzbuZYVSEFBfln3CFWQGAs0NnZSUJCAnFxceyJi2P//gO0tbagb2CEg1cwHhMXEqWIwME7DEPjwTN7LO3csJDYExcXd8ZCWmlpKV99uQav8LlMn3OfTjAzMDIZdJ5GoyFn/3ri1z5Pb1c7N72yBwubwf12rJ0UpKcLQtqFip+fH35+fqMdhoDAOUej0VBfX09JSQnFxcW6n8XFxRQVFVNUXExVZYWu7Gz//v1ERkby0ccfk5yeTdDM25CeFMzsXIj/9jky93xH+IIVmFrZ9RHRAKydvBGJRGRkZAxLSIPh+amdCSeFtIM/vcrRzatQ9/ag7u1GpepBjJrUPz/hyKb3UfV2o+rt0T10842MdCKZp6enTmQ8Wwgi2tmlq6sLIyMjrrjiCg4ePMija54H6PN7vfnmm9m0aRMqlYpFixYRFhZGQECA7tz/f//736jELjC6CEKawJhk/PjxqHq7aakrxdJu9HysJM4Kenu6KSkpwdXVddTiEDg1XV1d3L7sdnxd5MwdN2PY83amxpNTVsCWLVsEEU3goqK5uZn9+/cTHx/Pnrg4Dh8+TE93N8amFjh4jyNw9nKcFJHYewSidyLbS61WUVechZ3H4CWQIpEIB3kke+Lizjg+V1dXTE3NcPAKwTfqimHN6e5oIe6b/5J36DfE+gZInLyHFNEArJ3kpKYdOONYBQQEBM4FW7du5fvvv6fohFBWVlpKR8dfHl16+gZYSh0xlThhJnHCMSQE+TRHzKwd+PODu0hNTSUyMhJ/Pz/+2LKT8Pl39Vm/rkRbBZK06QOSNn1A8MwlTLruKd3rBkYmWNu7nRcZvI6Ojjz33HNkZmZiaGiIoaEhRkZGun//8/k/X1MoFNjZ2Y32YQj8C44dO8aKFSs4fOgwjY0NTJ02jS1btlBSUoKVlRVLlixBoVDoxhsaGvLLL7+MYsQC5yOCkCYwJjnZ4rm+LG9UhTSth46GLVu2sGzZslGLQ+DUvPLKK+Tl5/HGrc+iN8y7fo1tzazZ9T033ngjs2fPPssRCgicH3zyySes+vAj0lJTUKvVmFnZ4iiPYPxVj+LkE4GNi1+fDmgn6elsY/tnj1CYvJ25Kz7EM2zwdu5OiggO/LCS9vb2M+o4JxaL8fP3H7ZPZvXxVLZ98gAdLfXMWPo6+354EffgacOaK5UpyNj1DZ2dnRgbG592rAICAgLnguV33EldUzv2XqGYu0cTFibDXOpIXWkO5bkJRC98HDuPwAFvDJpb21JSoi2TVyqVtDbV0tFS36c0f87dH/DVI5MAbTdMKwePfutYOclJT08/Owd4GohEIp555pmhBwqcd3R2dpKamkpoaOi/6pa9ePFi9u3dR5hXIB1mNuyNj0dPT4/3339/BKMVGOsIQprAmMTZ2RlDIyPqy/PwCB1+ltFIc9KMeu/evYKQdh6SlZXFyv+t5IoJ8/CwH77g+sX2tRgYGfLWW/3NZQUExipPPfV/YGJDzE0vIPOJxMrBY8hszNb6Cja/t5y6Eq2Bs5WD+6DjQSuk9fb2cujQIaZNG56g9U+CgwL5c0/SsMb+8f6dtDfVEH7pPTj7T8TEwoaitD1EXn4/+oaDi2NSZx/UajU5OTmEhIScUawCAgKnRq1Wk5+fT0pKCgkJCaMdzgWLvb09xjIZ0297tc/2rR/dB8DPK69CIpMz6doncVXG9BljLpVRXFwMoGus0lCeh4nvXyWa5TmHAK2Idvmj3+Kk0DZtUvX20FRdSH1ZHp1tTaSlV5ydAxQYc3z33Xfs3r2b9PR0CgsLqatvpKurAzQQOX48hw8dPOO1fX192bd3H/p6+hyvKkalUtPU1DQiTY4ELh4EIU1gTCISiXB2dhn1zp1GZlaYWEhJTU0d1TgEBmbVqlX09vYyyX/8sOccKUglLuMAX375pZDaL3BRMX58JGmFjQTELhrW+KpjKfz5wV3o6RviFT6HsuxDSBy9h5wndfbBxMyS+Pj4MxbSlEola7/7flg+meELVpBz4BeSfnufI5tWYWptR1tDFfu+f5EpNz8/6FyJTA5oO3cKQpqAwMiQlpbGBx98wJGjyaSnpelKEM2the/cM8XTw539aSX9tl/+6Fo2vnoDAA3l+ZTlHO4npJlYO1J0QkhTKBToGxhQX56P7G9CmmfYLMLm3YG1oxdl2QdI2/E1zZX51Fce1/mL2djas2zZ0rN1iABUVVWRlJREUlISCQmJJKek8MLzz7F48eKzul+BkeXjjz/mzrvuAo0GQxMLpM4+eCumIHVWkPjr+6Sm/jv/6QceeIAf1n1PbvUxomMmcffddwsimsBpIwhpAmOWQGUAe5NGv3On1MWPouKc0Q5DYACWLFnCH5v/4LEvX+CaqAVcFb0AA71Tfyx2dnfx8davmT5tOjfffPM5jFRAYPSZMiWW7TufQ63q7dPNbCCK0+P484O7kTh6Mf/B1Rzd/BG93R3UFmcO7ZMmFuMgD2f3njjOtPgmMDCQ7q4OWurKsLQb3J8ycNqNBE67kbbGaopSd1OYsoPO1kYq8hJRq1UDlquexNDEAguJ/Xnh+yMgMFZ46qmn2bpjF+7B0wldMBUbFz9sXP0xtbThw2WKoRcQ6IerqyvtcYf7bS9K2an794SrHiZkdn+hy8zagaIirRekgYEBcrmCwpSddLU301CeR2NFHvXlx1D1ajtZSqQ2BAYGMueymSiVSt3D1tZ2RI+ppqamj2h2OCGBivIyAEwtrLF1C6S115CPPvr4ghLSWlpa+PHHH9m2dSvp6Rl89vlnw27QMFZQqVSg0TDvno/6VRYVJu+gNHMfdXV12NgM3SBsIAIDA2lubRmJUAUuYgQhTWDMEhUVxe+b/xzyQuhsY+PsQ2Z+otDZ8TwkPDyctPQ0XnjhBV577TX25SRw15zF+Lv6DDj+u/j1NLU38fEnHwu/S4GLjpiYGLo726ktzsTeM3jQsR0t9ah6uqgtyeKXlxZh565Eo1bxx/t3ctVTP2EucRx0vqMikoO/v09PT4+uO9rpoPPJLM8bUkg7iZm1PQGxiwiIXURPVwc9nW267w6NRkN7Uw31ZbnUl+fRUJ5PY4X2Z2d7i/akX0BAoB+VlZVkZWUxadKkYXsaeXl5YpaQyoxlr5/l6C4e3NzcaK6r6HcuKh8/n/zDvzP+qocQISJl6+e01JXT1lBBe0MFrfWVdLQ1IVf8dV40d85s3n77bZpKUlEGBDB93hSUyrt1gpm9vf2wz5E0Gg1lZWVkZmaSmZlJRkYGGRmZ3H33Xdx00026cS0tLRw4cKCPaFZWqs2wMzGzwtY9EMfg+QRdFoideyAWti6IRCJStn7BkV/fpre3F339C+Oy9+WXX+bFF1/Ex9mbmqZaPv3002EJaSUlJZiamp6xuDQa1NTU9Pndm5ub88ILLxAbGwuIaG2o7DdHKlNQnnOI3NxcoqKizn3QAgInuDA+UQQEzoDIyEjUqh6aa0qwHsD09FwhkWk7dx4/fhwvL69Ri0NgYExMTHjxxRe57rrruH3Z7Tzx9Urmhk3j5mmLMDP+y+i8oLKQ3w5v5cWXXkQul49ixAICo0N4eDjGxiaU5yYMKaT5Rl2Bs99EKvOPUJmXSEV+Ehq1mrbGKo4f2UrQjFsGnKfRaGiqLqKrrYmOjnaOHj3K+PHDL70+iYuLC+YWljSU5+ERMv205rY311FflktDeR715Xk0VuTTUJ5HR2sTAEZGxvj4+jJjYhBK5SKUSiUzZoyeF6eAwPlAT08P2dnZpKSkkJKSwtHkZJKTU6mrrQZg+fLlfPzxx8NaKyQkhHfffZeezjYMjM3OZtgXDa6urvT2dBG/9jk6muvoaKyktb6c1sYaNBoNOz//DwBW1hJcXV0JcnfHbXIAbm5uuLq6Eh0drVvrzTff5LHHHsPBweG0BLPS0lIyMjJ0wklaejqZmVm0tjQDYGBohFQmp7O9lVdefa2PkLbw2uvY8sdmjM0ssXULwE45B/9LAtnz1VP09HThqAjHJWAy9h5BfTKm7TwC6ezsICsri6CgoJF4K4dFd3c3f/zxB9HR0adtA+Lg4ICBvgEv3PAYa3Z8z5Y/twx6M16j0fDxxx/zwP0PYGlpSUpqCk5OTiNxGGeFd999l59+Xk9mZiZ1tTUAiPX0sZQ60VhTwtVXX01oaChiPX0aKgr6zZfIFKhVvaSnpwtCmsCoIghpAmOWvzp35o6qkCZ11pYhbN26lTvvvHPU4hAYnODgYPYf2M+HH37I448/TkJBMstm3sRE33DUGjWr/liNUqnkoYceGu1QBQRGBUNDQyZMnEhxXiLMGdrnxlziiDzyEuSRlwDQ3dlK9bEUnHwidWPUahX1pTlU5CVSkZtAVUESrY01iEQiIiLH4+/vf0axikQiAgL8qS/PP+WYztYG6svzqC87IZidEM3amusBMDA0RKHwYUp4EMpbrtBlW3h5eaGnN3pZzhczJxs77N27lwkTJhAcPLigK3B2qKur0wlmKSkpHDmaTHZ2Fj3d2tI+azsXJM5+eEQtIsLFj/yE39l/YPjG4CEhIWg0GurKcnH0Djtbh3FRMWHCBPz8A+goPoS7uzvuQaG4uV2Gq6urTixzdXXF3Nx8yLVEIhGOjoNnFZeVlfHdd9+dEMwyyMrKou1EKZ2+oRE2MgVWTnKUs2KQyuRIZHIsbF0Qi/VI3f4lCb+8hkql0n3Wqnp7cAmIZsEDq3W+l6rebrZ/8gAACRvfJWHju0y5ZWUfH09btwBEIhFJSUnnTEjbvn07K+6+k9y8AqbExrBr957TqmKYOXMmPb093PfpU1Q11mCgr09FRQUymWzA8U89+RQvvfwSs0KncORYKjfecCPbtm87L7+nent7efjhh5G4+OMZfT3hJ373VvbuqHt7+OyeUN58802+++47jI0NqS/L7TO/o6UOiZPWa/Xw4cPcfvvto3EYAgKAIKQJjGEcHR0xMjahoTwPxs0etTgkTtrspb179wpC2nmOnp4e99xzD5dffjkrVqzglfXvMcFnHM42ThyrLOLghoNnVGYmIDBWmBIbw+tvvTcsE/9/YmhsjpMigurjaVTkJVKZn0BVwVE625oxMDQkIjyChXcuJTY2lujoaKysrP5VrEGBgWzacYiu9mbqy/K0GWZluTRU5NNYkU9ro/ZOuL6+Pt5yBdEhQQTeMF8nmMnl8gumFGisU1xczMwZMzleeByNWoNKrcLY0Ii2jnbEp/l3KDB8VCoVeXl5OsEsOTmFo8kpVFZofagMjIyxcfZB4uzHhGsu13qZufhhZGrRZ532phoO/Pgi3d3dwyrvDAgIQE9Pj7qSLEFIGyFkMhlZmefOy/Ghhx7m5/XrsXP1w9JRTtCcKdrmLBoNf7x/JxJnXzzDZuESMKlfd2Sps4Luri6OHTuGQqG9GR0ZEcGhxM/7fO/o6Ruy4MEv2PTWbQAYW0h1N69Pom9ogrW9G4mJiSxZsuTsHvQJ7r5rOb2d1bz0sD9PvBHPTz/9xMKFC4c939/fHwtzc5rbW5noE87B3CT27NnD9ddfP+D4yspKTIxMmBM6lZTjGezavYu8vDz8/PxG6pBGDH19fSwsrXCShxN52b19XtPTN8TM2oGkpCMAyJycKC3LpTTrAPvWrUQx4VIOrX8dZ7+JAKSmpf3reNra2jhy5AgxMTFDDxYQ+AfCGaLAmEUkEuHq6kp9+Sh37jS1wNTKjvT09FGNQ2D4uLq6snHjRtavX889K+7hUO4R7r333jMqMRMQ+Dd0dHSQnZ1NeXk58+bNG3XRIDY2lueff56GioJ+FywD0dPZRmXBUa1wlpdA9fEUerq7MDU1Iyo6imUL/0NMTAzjx4/HxMRkRGMNDAzk888/54v7wgGtUO7p5c34oCACF87SCWYKhWLY3k0Co8PLL79MXn4e3o7uKGTe6Iv12JS4jYSEhIvOhPts0dTURGpqap8ss8yMDDo7OwCwlDoicfbFKexSgi71w8bVDysHj2F50Nq4+tF7ovRzOFmExsbGKHx8qSsVGjWdT7S1tZGXl0dubq7uZ2VVFe+8/XY/0cbGRorUyYsrn1rfZ3vhieYG2ft+Jnvfz5hZO3DL63v7jDl5AzozM1MnpEVERNDS8BJtjVWYWTvoxpZk7APAziOI2Xe9h6qrg5wDG6gpSqeuKJ3a4ky6uzrO6XfnVVcv4p2332BWtB1fbyzlv888dVpCmkgkYvqMGeQfzWbx9GvJKc9n27ZtpxTS3njzDf7c8iePffUCKrUasVhMWlraeSmkAViYm/fLNDuJjYsfZccSAfDx8SE/fzOH1r9BfVkOh9ZrPw9s3ZXUl+dz/NjxfxWHWq1GoVBQUVGBXC4nOTkZMzOhlFxg+AhCmsCYJihQya4D/65F8khg4+JHUbEgpF1IiEQirr76ambOnMmePXuYOXPmaIckMIbp6uoiNzeX9PR0MjIySE/PIC09ncLjx1Cr1QC88cYbo15aPHHiRPT19anISxhQSOtoqacyP4mK3ESqChKpLsxArVYhkdoQGxND7PJriY2NJTQ09Kxne91xxx2IRCLs7e1RKpX4+vpiZGR0VvcpMHIkJiby888/c/z4cY4ePQrAbTNvQOnmR0NrI5sSt7FhwwZBSPsX/PTTT3z99TccTU6mpLgIAH19Q6TOciTOvoy7bDY2rn7YuvphYGyGnv6ZCc42Lr4ApKSkDLscd1xYKLsTss9ofwJnTnd3N8eOHdMJZbm5uWTn5JKbm6fLRARtV0wrB0/qSnP45ptv+N///tdnHaVSySeffoaqtwc9/b8y+T1CpmPvFUL1Me25uZWjZ7+mYKZWdpiYWZGZmcnll18OaIU0gJrCdMxCHdCo1TRWFyJxliPznQDqXn56dj5dHW0AeHp5Mzkygsjl1xEeHn5OvbSefvppvv5qDTE37qOjU8Vjj9112mvMmjWLjRs3ctdHj6Knp0dY2KkzMyUSCSJEmBmb8X+LHuKDzV9w/XXXEx4efl56M/v6+rD34JEBX5M4yynN2kdraythYWFs3ryZiEvvIX7tc7TUlgLgM/Fyaoszqcw9jPqEcHgmPP3001RUVODl6E5+fj5btmzhqquuOuPjErj4EIQ0gTFNdHQ0Gzb+2u+L/FwjdfahPOeQ0LnzAsTKyorLLrtstMMQGCP09PSQl5d3ojOZVixLS0vnWEG+rvOjhdQBayc5Es/JxEy6Famzgv3rXtCJCaOJmZkZoaFhVOQlopx6A631FZTnJVCRm0B1fhK1ZdoMYJmzC9OnTiH20buJiYnBz8/vnGfTmZiYcP/995/TfQqMHLExsXR1dWFpao61mRUiRByvKkHp5ofE3BpzYzP27t079EICp+Tuu++hR2yCe+hMfGb5Y+Pqh7WjV7/zJVVvD7+8fC09nW24BU3BPWgKTooI9AyGJ0wbmlhgbe9KSkoKN99887DmhISE8PMvG8+ojFzg9GloaGDqtOmkp6Xqbt4YGplgYeeKha0bzuGXE2DvgZWDB9YOHhibSwD47Y2bycntn10UEBCAqreH5ppinacVgKqnSyeiTbjqYULn3o5a1Ut9aQ71Ffk0lGsfPT2dFBcX6+ZZW1tjaWXNkc0fkbL1c+pKMnWimbuHJ5GREUQuW0R4eDjjxo1DIpGctfdqKMzNzfnyq29Y/cXn3P/Ag2dUzXDbbbehVqvx9PQkNjYWS0vLQccbGRtRVVVFYXUJLSfeF41Gc0bxn23Gjx/Ptm3b6GpvoaOlDgupE2U5hziWtAV7jyDUql4SExNPNLgQ0dpQwczb32TDK9djLnXCxsUPqUxBRW4C5eXluLi4nFEcJSUliEQijlUWYaBvIJzrC5w2gpAmMKYJDw9Ho1bRXF2k9WYYJaQyOarebvLy8vDx8Rl6goCAwJjg119/5fjx42RkZJCalk5eXi69PT0AmFvZYi1TIHEZz6QJNyKVKZDKFBiZ9fcGk7r4k5p2fmS1Tp06hXff/5C1T0ylqUaboaDw8eWKeVOIjf0/YmNjcXd3H+UoBS501Go1l0+Yy+Lp1wJw7WvLKaz+68Lay9Gd7GwhY+nfoPCRU91tTfTCxwcdJxKLMTQ2p6YwjdaGSlK3rUbfyBQX/2h8Jl6Gd8S8IfclcfbjaHLysGMLCQmhq6ON5toSrOyFz5MzQaVS6b5/MjMzT9y8ySAiPILPP/+0z9iCggJSU5IJm3cHrsoYrB09MLG04+PlvtSV5tLb3YGZxBFrB0+Mza118yztPcjOzuy374CAAAAayvN1QlpPVweNlccInXM7rQ0VVB1L5sdn5tJQXYzmhHjn4CgjMDCAy+66kwceeADQCkLRUdG0NDfT2pqORq3CxdmZz7/4hfDwcKRS6YDH396u9VA0NjYe8PXBKC4uZt++feTn52Nubs599913Wub9M2fO/FeVDCYmJtx7771DDzzB/v37CfAP4IPNXyAWifn4k4/x9vYeeuIoMHPmTFauXElZ9gG2rFrR5zVXpdarbPv27Sxfvhyxnh4N5fkop1zPZY98hYm5FJFIhFSmQK1SkZ6efsZCWk9PDxqNBqm5hG5Nt+CJKnDaCH8xAmOawMBAAOrL80ZVSJPItCVQf/75pyCkCQhcRNx8882YWkiQyORYy0KJClt4okOZAhML7cm/qqeL3u7OAQW0k0hkcpIO/9qni9losWzZMjIyM/H18SEmJobJkydjb28/qjEJjD2MjY1paG3UPTc1NCa/QuuJ09XTjY2FhKzS0fVAvdAJCw3l+w1bhhwnFusx8/Y3+OG5y5DKFEQteoycfetJ2/EVdSVZwxLSpC6+pBz4ftiZ+SEhIQDUlWSjp29IXWm24Jk2CPn5+RQVFZGZmUlmZiapaenk5ubQ3dUFgLGZJVKZHESGfPHFZ7z99ptYWPzVFOKkF5mNiy/Ofn+VSwdMuZ7MPd9RmrmP0sx9dDTXEnHpXwKPtYMnRw//2u/3am9vj0RqQ8r2NeTs/5mmygIaq0t0WVJOMmeCApUop11JQEAASqUSf39/rK2t+x2bSCSirq6WWSGx3DrjOn5N2MqWtN3MmjULgM7OTlJTUwkP1/ph3rpkCbt27qK0vAxlgJK09LTTrgaZPm06BccKMDMxo62jjZiYGF156fmIo6MjuXm53HHHHVxyySUsXTp0Z+3RQluOL6KjpR4DYzN6OrUZdNaOXrgGTgbg0KFDPP/886DR6PzUZD5/ZfZpr+k07N+/n7lz5w64n97eXjIzMyktLWXu3Ln9suKVSiUA9a0NWFoMnvEnIDAQgpAmMKaxs7PDxNSM+vJ8RvO+jPSEiLd//37uu+++UYxEQEDgXHHpw18hlSkwsbQ55Ul8Y+Vxtn3yAB0tdVz3wp8YGpsPOE4qU9DV1dmni9lo4evry+bffx/VGM4nSktLaWhowNbWFhsbG6FxwQhhaWVJXUuj7rnUQsLxqmLu+vA/VDbWoNFo8PLwFCwT/gUhISGs+vBDers7+3VO/CemVnb4Rl1BVvyPdDTXk3/4d0yt7Jh26yvD2petix9JtTVUVlbi5OQ05HgHBwdsbO3Y9skDqFW9AFgIF7sDIxLpRCRjU0skMjkSmT+RV16KxEmO1FmBqZU9IpGImsJ0fvrflWRlZfUpObSyssLWzoHGqsI+S3uNm03mnu+0Yxw8cQ+e3ud1KwcPOjraKS8vx9nZ+W8hiVi2bBlr167Fz9sO5cyFfQSzoUoV/4lC4UNBfiHf7vmZjJIc6hvqycvLY8WKFeyNj6ejs5O7776bJ598kq+/+YYo3whCIwLYlLht2H9z/2R+xCyuj72Sm968m/z8/PNaSAOwtbXl559/Hu0wBqS3t5eWlhYkEgkmJiaYmZvTVHmcRc9u4sfnLqW7oxXPcbMxNDbH1Mqe7OwcxGIxUqmU+rL+N0xONqQ4ckTrtaZSqcjKyiIxMZHExEQOJySSmpJCV1cnAB9++CF33nlnnzU2bNiAWKQV16Q2Un7++Wf27dvHjTfeqPv/JCAwGIKQJjDmcXNzpWGUO3caGJthJnEUjK4FBC4iXPwHNzfO2f8Lcd8+S29XO1b27hgYnbpb1Elj/4yMjFEX0gT+oqGhAaVSSXNzs26bs0zGgYMHcXV1HcXILnxsbGyoKqrQPfeReVFYXcL0S2bpGlYEBQUNKqK1t7djbGw86t1uz1dCQkLQqNXUl+dh7xE05Piy7IOo1b38/s5SXPyimLHsDUytbIe1LxtXbQfBlJSUYYkaIpGIn378gfj4eEJCQggJCcHa2nrAjKWLnUnXPoXUWYGlnRvfPj6Nivwj2LgGIJUpcPIZ38fzztrJG5FIRGZmZj/vLh+FnJqqvp0Qq45pvTmVU28gfMEK2huryTv0G03VhTRWFtJQrs0SrKiowNnZmebmZo4fP45SqeTVV17m1VdeHpFjvGbhNTz6n0fRszBEGRnMI/MeY9euXdpulrFXUVxTyg/f/8CCBQswNTXFxcYJJ6kjADk5OactpPn4+lCdX4kIEabGpuTn54/IcVwMVFdXk5qaSmpqKmlpaRxNTiE7KwtEsGf3biZMmICTowN1ZTlY2row5ZaVpGz7AkXkfECbFVmRexCNRoOnpwcJCQl0tTX1ydo3NrfG2EJKTo42W+1///sfzz77rLbs08kLqauS8Csews49kP3rnufQoUP9hDRfX1/SU9MxMzKhuKiYhdcsRF9fn4MHDrD/wIFz94YJXLAIQprAmCc4KIgtew6PdhjYuvpRXV0z2mEICAiMMt2drcR/8yy5BzfiE3Ulhcnb8QidOaggYGplj4m5FRkZGVxxxRXnLliBQamoqKC5uZm75i3BxkJCUXUpX+/+kYKCAkFI+5dER0fzcerH3P/Z0zS2NdHc3oKjgwPffPPNKeeUlJTw6aefsn37drIzM2hubsHJyYGSsopTzrmYCQwMRCwWU1eSPaSQ1t5UQ01ROiKRmMjL7mPc/Lv6dFr8O73dndSX5VJbkk1daTYNpdnUlWkFl46OjmHHN3XqVKZOnap7/nfBWuAvgmcuBkCjVmNoYk53Ryvpu78lfdc3zFj6Oj5Rl+vGGhiZYG3nQmZmf18zPz9f8nccor48j6aqQhorj9NaX4mDZxDFKdvI2L1WN9bG1h5fHwWTZ0YTEXEfBgYGODo4UlVdBcDy5cv5+OOPKS0tpbGxkba2NkJDQ8/4hvJ9993HPffc00cUz8rKAkBqbkVDayO1dbVccskleLh78OP+3wAwNTHtkyk3XMzMzNhakMotb69ApVaTlyeUkZ+K3t5ennnmGQ4dPkxqShq1tdUAGBgZI5UpkDj7En7FPBI2vMWPP/7IhAkT8Pf3Z8cerVglj7wEeeQluvWkMgVl2QcoLS0lKCiIhIQE6svzcZSPo62hkvryfBrK8xCJxFRUaj/bm5ubMbOy5bqV2/pl9tt5hHDocEK/uB9++GF++P4HOnu6sLWUUttcj8TcmszMrH/VDVTg4kEQ0gTGPNHR0fz083pUvd1n3Lp9JJDIFKSl/zlq+xcQEBh91GoVPz1/BU3VRUy5+QU8QmeQf/g3NGrVoPNEIhESmYKMjIxzFKnAcKitrQUg0M0PZxsnLEy0J/A2NjajGdaY4Mknn6SpqQkzMzNkMhlOTk7Exsb2G9fe3k5iYiLh4eFEjgujqrYOKxN9wj2sOW5gQllVFWVlZWd0MT3WMTU1xctbTl1J1pBjjc0lTLz6P9h7BuPsNxHQmsC3NVZRd0IwqyvJpuH/27vv8Ciq9YHj391NdtN7770XpBpQivSiYuFaUES4WC42bMC14v3ZsWLBQhHFhg1UlF5EWgiE9EpCeiOVtE2y8/sjZiWmh/Scz/PkkZ05M+edMZmZfeeU7ASK89IaZ9uUyfD08mb8FSMYcdd8RowYwezZHY+nJnSPTC5n4p3/Y+/HK0CSMLF20bYEvJSpvRcxMS3vJYGBgWzcuJFvnm1MahgaGePt7c01Vwbh7X0DPj4++Pj44O3t3aJl4Keffkp+QT4Pzl3KiaTTnDh+gt9//73Z/+8lS5awYcOGbh/fPxMbfn5+ODo48v7OTciQEeIWQFJOKnctvovQ0FB8fX3x9PTsVvLO0tISPaWK+ePmsDfqkBiIvh379+/n5ZdfxilgAq5h/2Kkky+Wjr6Y2Lg0S7annPiF77//nrVr1xIWFsbPP/+MuroCpb5xs/2ZO3ihaagnPDyccePGsXHjRvZ+soKai6XUqxsT8TKZHGQyrr66cUy1UaNGUfnmm2jq61rEZ+0WzB9HtlFVVYWBgYF2+ahRo1AqlVTXVFOtrkGpq2TKjGtYfPdikUQTOkVcFYQhr2nmztL8dCwd+36g/+qKYkpykqkqKyI7K5Py8vIujw0hCMLQIJcrUBmZQcF5jnz1P9LP7sPC0ZeovZtxCZ6Ec+BVbW5rZu81YGbuFBpduHABQJtAK6+6CDSOVSNcHkdHxzZbnx08eJDPPvuM3bt2k1+QT0NDAzNnzKSqthZdhYwgRxMqaupJK6pCRy7jyy+/5IknnujjIxgcrrhiBEfPdjz7qaRpwDnwKooyE/jzm5cozkqgOCuBqooSAIyMTQgJCWbmDbO0XTGDgoKafXEVel9q+E4AfMLmc9Vtz1BbWUp65D5KclMozkmlPC+ZoqxkbPVHttj2oYcewtnZGVtbW3x8fLC1te30+INeXn9N6CWTIZfJST2XzLlz55DJZLyy6Gm+ObKdlOSe7R4pk8k48ucRbrzhRs5EniE+K5m6+jqysrJYtWpVt2brbOLq6opcLme832giUs9SkJ/fg5EPLS4uLgCEzliKS9DVbZazcg3gXHhjS8GpU6cCUJKbiq3HiGblmiaHO3DgAI8//jgOjk7U1tTgE+RPQEAAY8eOZcKECfj4+GBk1HjvbRq/rvB8bIvnKGvXIDQaDZGRkYwfP77Zuh0/72Djxo0sXLiQGTNmoKuriyB0lkikCUNe06wsJdnJvZpIq60sozgnufEnO5nSnGRKc1O4WNbYYkFHV5cZM2eJh0pBGOZu+u93lOanc/7sftIj92lbgxz99mX+9fwvbX5xsXDw5sSxH6ivrxdvxweIphZpUelxmBqYkJZ/HhAt0npLXl4eL730EuvWrcNIz5BgN3/mBl9DbkkB+w4fws3dncSEeP5MKcbFQh8DpQJ1vYbff/9dJNLaMCI0lJ9//b3ZpA1V5Re4kBnfrJXZhZxU7aD/rm7ujB4xghG3zNQmzdzc3MSkD/1Eo2mgojCL4pxkrFwDqauppCw3mc+fmEDdX4OtGxoZExAQwFVTxhIQsJjrr7++xX50dHRYsGBBt2Lw9/fH0MCAdb98ikKh4Lprr8PAwABJkkjNTaei+iK1F9pved0dbm5u/L7rd5YtW8aOHTsA2LBhA+7u7jz11FPd3q+npycVVRdZ/tEqACbNuaaDLYYvLy8vZHI5F7IS2k2kWTr6Enf4GyorKwkKCgKgJCelRSLN2LKx9XD4qVO4urqSnZXZqRiMjE0oPB/TIpFm4eiNjo6SiIiIFom0adOmMW3atM4cpiC0IJ7EhSHPwsICI2MTintowgF1dQXFOSkUZydTkpNMSW5j0qyipHFMAIVCgYenF2ODggi+ZSaBgYEEBgbi7e0t3nQIggCAma0bZjOWEDpjCTUXSzgffQhDs5Zv/+tqqynNO0dxdhJ5KaepU6s5d+4cPj5937pWaCkkJAQ9lR5rf/pAu8zF2VnM3NlLnnjiCb7+6msAlk5fyJTgCQAk55zjl/DdTJ48WTtu0rxQOz48mIavrRHRUWf7LebekJSUxNatW0lIiOeRR1YQFtb+xCbtCQ0NpaaynCNfvkB54XlKshOoKGkcz1Vf34Cg4GAmz55EaOhDhIaGEhwcLFrVDzCf3B+MpqGxS5uxiSmBAQEETQgjIGApgYGBBAQE4Ojo2KuJTltbW+ITEqitrcXV1RWAe++9F7lMxse7P0cuk6Nfotcrs+za2Njg5OSEi40jry16jqe+fIns7OzL2ueCBQuwtLREV1cXW1tbPDw8eijaoUdHRwdDQyOKs5PaLWfh5AuSxK5du7jxxhsxMjElM+5PkMkpyW0c96w4J4WKC43/7zIyMjodg1wuZ+TIkWSdj0GSpMbJCQxMOPL1/+ESdDVWLn6cOnXqso5TEP5JJNKEYWHMmNGkd3CB/6e6mkrtgJZN/y3LS6GsKAdovGi7uXswMiiIoPmTCQoKIjAwEB8fHzE7pyAInaZnZI7n6NmU5qaSdHz739eb3BRKCzKQJAkARydnbl94h3igH0DGjRtHZVUlZWVlXLhwgaKiIu2XSKHn+fv7o6NQYGdmQ2xGgjaR5mHnioGePiYmJsiAKf5WfHCgcfbBib5WbPjj/JBqyfnQg8s58schlEoZFeXl7Pyt++Ovjhs3Dk8vb0qSD3HFiBGMmH+vtpWZp6enGCtoENA01PHTTz8xZswY7O3t+61l4KUTrGzZsoVNmzbhaGFHtboGHYUOGh16LTYbGxuKyov5bP83FJUVU1BQcFn7k8lk2u6HQsecHB3IP9/+GK5NvYL27dvHjTfeiLenJ2dO/kLKyV8wtnLC3N4T54Cr0FHqUZQRR25yODU1NZ3uojt2zGiiPvuK+D+2cWjLUxiY2lBVVkDM/s8JmHQbJ8NPkZaWxo4dOygoKODJJ5/E1NS04x0LQhuGxhOFMOjU1NSQmJhIXFwccXFxxMTEcsXIK3j2mWd6pb7goCDObvu51XWXtvgozknWJsxKC7K0ZVzd3AkJCiJo3iJtCzM/Pz/09fV7JV5BEIamhno1pXlp2mtNcXYy5XmpFOenI2k0ANjZOxIcFEDwNTdprzcBAQEYGxt3sHehP8jlcszNzTE3N/97nCChV9x+++089dRTWBibEZuRqF2ukCvwc/Tm9OnTGBroYW6gy3Uj7CisqKWsSk2DRmL37t3Mnj2bc+fOsWPHDg4cOEB0VDRTpkxm46ZN/XZM3WFn74iVhRIzIwUxMVHN1iUlJWFgYICTk1On9mVlZUVyUqLoljnITZkyZUC1FDQ3NwfgfwtX8VvEPraf/B0jY2NiY2O1Q670pLlz5/L5ls/JUhcwJmwsDz74YI/XIbQtKCiIpB9/oqG+DoVO671vVIamGJhac/r0aQD+978XmDdvHj5XXk9VaR5F6WfJiD4ENI65ePvCO7rUMGHUqFGsXbsWlUHjs1JVWWMydeTc+zGxcubwH9/wr5tvIjLyLPUaDZaWljz66KOXc9jCMCcSaUKvqqqq0ibMYmNjiY2NIyY2lvS0c2j++tJobGGLjp4R27f/xOOPPdYrY4gFBQVR8t57FKRHU5qfRkl2CsU5Sa22+AgJDiZo5m3aL7D+/v4YGhr2eEyCIAxtJTkp2jETi3OSKc9LoTgvXTvOkLWNHUFBgYRMvLZZwuyfs6EJgtDIzc2NqyZcRVbyeXJL8imuKEEukxOTkUBlTSXJx4/j6u7J9jNxSJJEgwQn00pxdXbCxsYGC3MLysrKkJBQ6uhiZmjKZ1u28PEnnwyq1mo33HAD3333LYYmjqx49D7UajXbt29n/QcfsP/gQfRUKrJzcrCwsOjU/kQSTegp5eXlZGVlaQegX/vTh+SVFlDXUE9FeQVvvfUWn376aY/XO3r0aFJSe3YyA6HzrrrqKr777jvK8tOxcPRus5yVsz+pqY2Tm4wfP57gkFCqL8Rz5YhQQhfOIzQ0lJCQEFxcXLp8XWqacEBXz4irb3+OP75cA4Bz4FUo9Y3RaDQUFl2g/q/vn99//71IpAmXZfA8NQgDWmVlJQkJCcTGxv6VNGtMmJ1PT9MmqUws7TGz88TM/SqunnA3Fg5emNt7ojI0Jf9cJD+8tICEhARGjmw5k9DlGjFiBJJGw/f/dyPQ+DY3JDiQINHiQxCEXvL1s7MBsLSyJjAwkJAbZmmvN4GBgZ3+kisIwt8W3bWIe++5F4AVG56lrKocAEsLS55++mkmTpzI8uXLCQoKYsGCBcyaNQsdHR2ee+45ysrLWDr9dvycvHG3dSEuM4lntr7C/v37mTFjRn8eVpdcf/31lJdfJCMjg08++QQXJ0fyC4sY62HJyzcF8Oz2BLZs2cIjjzzS36EKw8SxY8eYN3cexSXFADz22GPccccdhJ88SVl2OStvfJBDMUdJT0/v30CFXjFv3jweeeQRLmQntptIs3T2JzrxBNDYajHqbGSPxeDp6YmxiSmF6dGMnHs/hRlxlOan/TWZgQwdXRUurq6c/2vstYqykh6rWxieRCJN6JKKigri4+ObdcmMiY0lM+O8toyplSNm9l6YeU9h0qR/Y27vhbm9l7apbWvM7T0BiIuL65VE2pgxYzhy5AgymUy0+BCEYSw9PZ3//e9/7N+/n7y8PBwcHLjjjjt46qmnmg0QHxUVxfLlywkPD8fa2poHH3yQJ598ssv1paamijHNBKEH3XzzzTyw/AHUdWou1lxk3Lhx3H///dx+++3aCX0iIiJabDdu3DgkSWKUVyj25rYA+Dh4IJfJ+fnnnwdNIq2+vp6dO3fy+OOPkZycglJHwWhXUz66ZSyj3Bq70/2ZUsz6D97n4YcfFq3NhD5x/PhxSkpLWHH9feyNPMyp8FMcPHSQbdu28a9//YvDscdIyknFRmbX36EKvcDT0xMdXSUXshLxHjuvzXIWTj7U19WSlpaGu7t7j8Ygk8kYNWoU5zNikMlkTFn8UrP11i7+/NW2AzNDJXl5+b0y+YUwfIhEmtCqsrIybcLs0i6Zl05BbG7jjImdJ5b+0/Ga6oWFgzdm9h4o9Yy6XJ9S3xgTS3vi4uJ68jCamTBhQq/tWxCEwSEhIQGNRsNHH32El5cXMTExLFu2jMrKStauXQs0dk2ZMWMG06ZNY/369URHR7NkyRLMzMy45557ulSflZVVbxyGIAxb5ubm/L7rd9LT07nxxhs7PVh0WFgYMiA+M1mbSFPpqnCzcebon0d7MeKekZOTw6effsonH60nKydXu1xd38DR1GJisytQ10t8fjyLXbEFODrootFoUCgUl133H3/8wdGjR8nKyiI7K5uHHn6IyZMnX/Z+haHDzc0NSZKwM7PG1MBY2/Js4sSJTJo4CUmSmDfuOu64447+DVToNWZmplzISmh1nUbTgFyuwNLJD4BffvmlV8axGztmNGc2ftHqOkuXIPKyTrJs2TL09fW58cYbRRJNuCwikSY08/HHn/D8mjXk5jROPSyTyTCzccHUzhOboNn4zvTGwt4LM3sPdFXdG8usoV5NRVE2ZYUZlBdmUF6YSXlhBjVVF/l223e89NJLHe9EEAShG2bNmsWsWbO0nz08PEhMTOTDDz/UJtK2bt2KWq1m48aNKJVKAgMDiYyM5M033+xyIk0QhJ43ZcqUZp/VajVPPPEE5WVlbNi4sdWZJs3NzbG3dyA+K4lrQq7SLg908eVQ/LFej/lyzZh6DWlp55g/wpaF/7oSEz1drn7lDwAsDHX5+Egm5wsv4uvtxWuvv8GiRYt6JIlWUVHB9GnTUcjl2JpZU1xRSsXFCpFIE5rx8fFBJpOx8rP/ATBp4iQAbG1tOXjoYD9GJvQVTw8PohP+bhBx5vdPMLN1IyPmMHGHvub2l/ZiZuuOTK7gjz/+6JVE2ujRo3nttdeoKr+AgYlls3XWroEcPLiV0xGnxFA+Qo/ot0TawYMHWzwINTl58iRjxoxpdd3kyZM5dOhQs2X33nsv69ev7/EYh6Nvt31LVYMuU/+9FnMHL8ztPNFRdm7a4UupqysaE2UFjcmyssJM7b8vFuciSY0DPcoVuhhbOWFq7YKFow9paZGima0gCH2qrKys2Xhlx44dY+LEic26es6cOZNXX32VkpIS7Wxkl6qtraW2tlb7uby8vHeDFgQBjUbD2rVreWHNC1RWVQLg6ubG888/32r50WNGc/zQMf6MP0l8VjJxGYmkFWSgp9Kjvr6+Xycc+Pzzz9m4YSPzrp3HTTfdhJubW7P1dg4OGKgLeXVBIJIkEZ5Wql1XXqth+tw5bL7/fiZNmtSjz1CGhoY0aBpYPOUWZo+ayqd7tpKSmdFj+xeGhsDAQI4ePUp1dTWurq44Ozv3d0hCHxs5ciQnTpygtqqChrpajn/3WrP1Cl0VCh1dzGzdiY6O7pUYRo0aBUDh+Rhcgyc1W2ftFowkSURGRnL11Vf3Sv3C8NJvTwzjx48nNze32bJnnnmGffv2aWfdaMuyZct44YUXtJ97Y5bH4SooMJDI2HP4XHl9u+UkSaKqrLAxSVbQ1LIsQ5s8q7n49wCOSn0jTKxdMLF2wcttLiY2jf82tXHB0NwOubzxjWnamT38/v5/OH/+fIsHSEEQhN6QkpLCunXrtK3RAPLy8lqM3WFra6td11oi7eWXX2bNmjW9G6wwIEmSRHFxMWZmZj3SAkjonIqKCvz9/MnOyWa01wgWTVnAR79v4bVXX+O///1vs0R4k/nz57Njxw5e//F99FR62DvYc+211/Loo4/2axJNo9Gw8smVNFTXcfTPP3n88ceZN3ceO37eoU2K3Xf/f1iwYAFv7EphV1wR8dll2NvaMP/Gm3j++eexsbFptk9JkoiLiyMjI6PZj0wm4913321zrFiNRkNERAQxMTFER0cTHR2NibEJFyoan+ssjc0HRQs+oe9deeWV/R2C0I+mTp3Khx9+SHF2Ivbeoxl97YOc+nkdANauQRiZ2/3170CyYg/0Sgzu7u6YmplTmB6tTaTVq2soyoynIO0s0DiGpkikCT2h354alEoldnZ/DzhZV1fH9u3befDBBzt8k2ZgYNBsW6HnBAYGUpy/jvq6WmQyORUXshuTZAUZ2q6YZQUZVBRlUq+u0W5naGaLibUz5naeuIVM0SbOTKyd0TMy79TbURPrxqmyDx06JBJpgiB0yapVq3j11VfbLRMfH4+fn5/2c3Z2NrNmzWLBggUsW7bssupfvXp1s2nUy8vLxRv5Ieree+9l967d1NXXUVdXR1lZGbW1tY1DIZiaYWVlxQv/e4Fbb721v0Md0tLS0sjOyea2q2/glqvnA7B46q08sXkN//nPf/j0009bbHP33Xfj4+ODm5sbjo6OfRxx206cOEFuXi4v3rEaD1tX1v26gUOHD1FfX6+dQOH666/H2dGBd/elcd2183hrw3+YPn16q91YAT777DPuvvtuABRyOZamlpgbmpKYlcL8+fO58cYbW93uk08+4b777kMmk2FvYUtDg4aa2mqOJ0aQW5JPekEmFysvUl1djb6+fu+cEEEQBp2ZM2cCMi5kNSbSRl27nJzkcHISjuMSMllbzsLRh+STv/RKK+DGCQdGcjZiFxUXsrmQEcOFrGQ0mgZ0dHUZccVI5syZ06N1CsPXgBkjbceOHVy4cEF702/P1q1b+eKLL7Czs+Paa6/lmWeeabdVmuhy03kBAQFIEny5eipVZYX/6ILpiIm1C46+4zC5agEmNi6YWrtgbOWEruryH6ZMrBu/dB4/fpy77rrrsvcnCMLw8dhjj7F48eJ2y1w6e2ZOTg5Tpkxh/PjxfPzxx83K2dnZkZ+f32xZ0+e2XuKoVCpUKlU3IhcGk4qKCj7++GMsjM0xNzTFTM8YnXo5hQ0XWDb9DsqqKth95gDr168XibReFhISwrixY9kfdYSbxs9DR6GDt4MH4/3GsOWzLbz++uutth4diBMPff/995gbm+Ln5INCLienNJ9r583TJtEAdHV1iU9MoqysDAcHhw73WVRUhFJXyfv3voKFkRkKuQJJkrj1jXvJyGi7a2Z1dTUKhYLPH3kfA5U++84eZt2vG7Czs0dpa8jUMdOZOnWqSKIJgtCMkZERevoGXMhKBEAuVzDt32+QePSHZj2dLJ39kDQN/Pnnn0yaNKmt3XXbwttv5+jyB3Cy0mf+zKsZPXoFo0ePJigoqNWWyoLQXQMmkbZhwwZmzpyJk5NTu+Vuv/12XF1dcXBwICoqipUrV5KYmMgPP/zQ5jaiy03n+fn5IZPJMLVxZfS1D2i7YRpZ2Gu7YPYWXZUBekbmxMbG9mo9giAMPdbW1lhbW3eqbHZ2NlOmTGHUqFFs2rSpRYuOsLAwnnrqKerq6rRfZPfs2YOvr2+rX8yF4aMpWXrn5JuZEtw4YP3PJ3fz2f6vmXHFZAD+jD9BQ0NDP0U4vHz8ySeEhoayO/IQc0ZNBeCm8fM4mhDO0qVL2302HEhOhYfT0KDh68M/4GnvTnpeBm/c/FaLcoaGhhgaGnZqny4uLqjr1Ogr9VD89fwmk8mwNrUkMzOzze2Cg4NpaGig5GIpBip9XG0aX3J+9fVXjB07thtHJwjCcGFna82FzHjtZ0MzG0bOua9ZGQtHHwB+//33XkmkLVmyhMWLF7fZWlcQekqPJ9K6070mKyuLXbt28e2333a4/0tnTAsODsbe3p6pU6eSmpqKp6dnq9uILjedZ2lpiY6OLmZ27gRM6vu36aY2rqSlpfd5vYIgDA/Z2dlMnjwZV1dX1q5dS2FhoXZdU2uz22+/nTVr1rB06VJWrlxJTEwM77zzDm+91fKLrTD4aTQasrKySE5OJikpiaSkJBITk0hITGT+/Pm8+cbf4+fp6uoiQ0ZtnVq7TKWrpF7ToJ0op7yqQgw/0UdCQkK47trr+Grv91zlP45DMUf5+siPyGSyDl/MDiRvv/MO77z9Dj/++CNlR39GV1eX9PT0y+r61PScW1R+ASO9v5NvlkbmJCQktDlxSnBwMAA/HN+JXCYjvaAx6Zadnd2tOARBGD78/PzYe+BQuxPHGZrZotQ34uTJk70Wh0iiCX2hxxNpXe1eA7Bp0yYsLS257rrrulzfuHHjgMYBo9tKpIkuN11jZmpCWcH5fqnb1NaNzMjkfqlbEIShb8+ePaSkpJCSktLii7YkSQCYmpqye/duli9fzqhRo7CysuLZZ59t9iJHGFwkSaKoqEibKEtOTiYxKYmEhERSU1KorW0c81Ou0MHcxgVjG1dqFOZ8/PHHvLH2de0XAplMhlwhb5FIA1DXq1HpqrhYUzmgxt8a6ta9tw4PDw+WvbeC2no1VlZWbNqymQULFvR3aJ2yf/9+3nrrLaLORlFWXgZAQ30Djz32GN7e3lx77bXd2q+LS+O4s+cLs6muraX4YgllVRWUXiwj5vff8ffzJys7q0WizsbGhnFjx/HHmeP4+fkx7prx3HfF8m49owuCMLyEhYXx+++/c7E4B2PL1u+DMpkMSyc/EpOSei0OSZK4cOFCq/f87OxsNm3cwPz58wGoqalBV1dXTBYkdFmPJ9K60r0GGn/RN23axKJFi5qNBdFZkZGRANjb23d5W6F1zs5OJPZTqzATaxdq1Y1j2onkpyAIPW3x4sUdvuyBxpYuf/zxR+8HJPSoioqKZi3LkpKSSEhsfIguLyvVljO1csTExhUTmxBGBV2Hma07prZuGFs6otBpfBY5d3o3uz5YTkFBgXbWVgCFXEFidgp7Ig9SW6cmOTcN4K/kmgx1fR2urq59edjDmouLC2vXruXDDz/kvvvu45FHHunUBEcDxQtrXiD69FkmBoZxw4iZuNo442Rpz5J1jxAdHd3tRFpGRga6Orq8s+NjJEmDTCZHI2kwVBkw0iOE8JRITp48yfjx41tse+TPIwD9OpOpIAiDz+zZs3nuuee4kJnQZiINwMrZn6SjvTeUz3+WP8D6Dz/QfjaxtMfExg0TmyA0eSX88MMP1NbW8s7bbxMRcZqrr76avfv29lo8wtDU73fI/fv3k5aWxr///e8W67Kzs5k6dSpbtmxh7NixpKam8uWXXzJnzhwsLS2JiopixYoVTJw4kZCQkH6Ifmjy8/Pj9JkzNNSrUej07aCMptYuaBrqOXPmjJhGWxAEYZCTJInq6mrKysooKyvDxMSkUwOlt6e2tpbU1NRmCbOExMbumIUFedpyhqaWmNq4YWLjhv/UiZjaumFm646JtUunJsgxs3UHICkpqVkizcTYmKMJ4RxNCG9soSaToSNXcN+HT2i70P2z5b3Qux555BEeeeSR/g6jW3z9fMlMTmfx1ObDabhYOxEdHd2tfcbHxzNhwgQMVPoEeYTgZuPMN0d+4tar5nPrxBto0Gi4650H2LNnT6uJNJFAEwShO0aNGoVcocOF7CTcRkxts5yFkx+1NdUUFxdjYWHR43EkJCRg6zGCSXf+DxMb12b3/P0bnyQ+IZHwk+GUF5Yy3nc0+/bvo6ioCCsrqx6PRRi6+v1OuWHDBsaPH99szLQmdXV1JCYmUlVVBYBSqWTv3r28/fbbVFZW4uzszE033cTTTz/d12EPaWPGjOHLL7+k4kIOZrZufVq3iU1jV4TDhw+LRJogCEI/q6mp0SbBmn5KS0vbXFZS2nx9RXkZ9fX12v0Zm5iSkpyEjY1Nl2MpKipi0uQpJMTHodE0ziit0jfEzNYdYxtXXMbdTIitG6Y2bpjZuqEyNL2sYzexcUEmk5GUlMTVV1+tXf7Hn0f4+eef0dfXp6amhvDwcORyOYWFhRQVFTHSfjSzZs26rLqF4SM4OJiNGzZS31CPjuLvx3InC3vORp7t1j5NTU2RJIl7Zy5iYmAYkiTx2+l9nEw+w4Krric1Nw0zQ1P27N7Dc88911OHIgjCMCeXyzE2Nqb4r5k722Lp1DjhwM6dO7njjjt6PA4/Xx/iUg9i6dwyv2Bq607igYNcd+08ju//k9sm3sjBmKMcPXpUdGEXuqTfE2lffvllm+vc3Ny0Y9ZA48Cphw4d6ouwhrWmGVTKCzP6PpFm3ZhICw8P79N6BUEQBPj+++95cuUqysrKqagoQ11b22ZZlb4hKn1jlAbGKPWM0NEzRqlvjNLUFjM7Y2z0jRvX/1WmqrSAQ58/Q05OTrcSaYmJicTFxjBq3nKcAsZjZuuOvolVm934cpMjiNz1KZ6jZuEaeg0qA+Mu1aejq8LU2pGkf4zj4ufn1+rLP0HojpCQEOob6tlxchflVRWcL8oioyibC2XF+Pr4dmlfkiTxxRdfsH//fvT19DibHsfEwDDySwuxM7PhXP557nrnAS5WV2JoaMjyefN66agEQRiuXJydyMhov9umhYM3IOPAgQO9kkjz8fGhdNNnSBoNsn9MPGBm60ZZaQmhoaFs/WIrqz7/P6BxaAhB6Ip+T6QJA09ISEjjzGOFbU+P3lv0jS3QUeqTkJDQ53ULgiD0rMEzTlOTbdu2UVBSScDE2xsTYJf8qAwu/WyEXNG1R4iSvHMAlJWVdSs2H5/GN9iWzn44+IztsHxG9CHSI/eSHrkXuY4uzgFX4TFqFu4jpna6tZqJtRuJib03ILIwPOXm5qJWq3F1dSUkJAQrSyu2HPgWFydnQkaEMjvkWoKDg7UtISVJIi0tDWdn53bHEz5y5AiLFi3C1cYZXZkuR+NPEpeVSO6FfBRyBa6uroy4YgTV1dVEnonkpZdfYs7cOWJ4FEEQuqy+vp7c3FwyMzPx8/PTdtEMDQ0lZuuXNNTVotBtfbxrXT1DjCzstWOd9zRvb2/UtdVUluZjaG6HTCajoV5NbnIEpn81EgkMDGTJ0iVYW1szYcIEZs+e3SuxCEOXSKQJLSgUCvQNDCkvyOjzumUyGSbWzmRmZvV53YIgCD1JJpc3a1XdlzQaDXV1ddTW1qJWq7U/l35WKBSMGDGi2XampqYYmVoyat7yNvddlBnPgTdW4hN2A35X3YTKwKRTMan0G8t1N5FmZWWFiakZZfmdm1V61LXLSYvcS83FEkKn30161AEObF7FIbmC2/5vl7YFdHtMbNxITDrdrXgF4VJff/01GzZs4FT4KUrLSpHL5cTHx+Pj40NSchJyuRxT05YJXkmSePzxx3nzzTe5/vrr+eabb9qcjMnZ2RmA0Z4hJGanEpuRyOjxYzE3N+f8+fOcPHGCc2nncLdzZaxrKPui/mD79u0ikSYIQoeysrJ49NFHST+fQWZmFgX5udphFkaOGk3EqcbeRJMmTeKLL76gJDcVK5eANvdn7RpIWnrv3F+bXrwV56byxcrJ6OoboasyoLIkj6nL3gAgPz+fTz75pFfqF4YHkUgTWmVlaUlZYee+rPQ0U1s30nNTUKvVKJV9O9mBIAhCT5E0DSQmJjJ2bMetp/4pKiqK11577ZIEmJqa2lrUtbXUtpIUU6vrqKtTo66tpa5O3WxcsvZs27aNm2++WfvZ1NQUdfXFdrcpL8ykKDOeosx4Tm5/G9+w+QRdc8dfXTXapvyra2V3E2kymQwvLy/K8tM6VV5HV8VVtz3Nz2/chZm9J/7GFhRlxKFvbIFM3rlp7k1t3Qg/uo2GhgYUis5tIwxvdXV1HD16lC1btnDmzBneffdd9PT0uO222zDSM2SUZwheYzzYsHcrH3zwAW+//Tbm5uZt7u+ZZ57hzTffZMaIyfz2607mz5/PDz/8gL5+ywkzTE1NsTC34Ptjv6KQy9HR0eHnn39GT6nC38kHtbqOWVdM4b7ZiwHIKy1k967dPPPMM711OgRBGCI2b97M99//gPeV1+M4ahS+5vYYWtiTnXCM2ENbkSQJmUzG3LlzAbiQlahNpEkaDRUXsriQlaj9yU0+RX1tVa/E6u7ujkKhoCzvHJKkQV1VjrqqHADngKswsbRvMWyDIHSVSKQJrXJzcyUq6Vy/1G1q7YJMJic5OZnAwMB+iUEQBKEnHD16tFuJtJdffpkfd/yKjXsIcoUuch0lCh195ApTFPq6KIyV6OnoYqjQRa6ji0JH+dd/L/m3QtlinVzR+Fmho8uO1xeSnp7erF5TU1Nqq9sfJ8T9iuk4+F3JhcwEAifdRvyRbcQe/BIn//FMv/dt9IxaTwro6KpQ6CgpLS3t8vlo4u/ny4GTne/6X5zd+KAcvW8L2fFH8ZtwExNufQqlfufGSzOzc0ddW0tmZiZubm7dCVkYgvLy8vjiiy+YMmUKTk5OfPXVV/z444+cOH6Curo6NJIGhVyBXCZn2bJlHDhwAIA7Jt/MrJHXAPBrxB5+++033n777TbreeWVV3jxxRe565pbuOHKOUzwH8tL37/DDfNv4Pddv7co/9VXX1FcUgyADA0NDRKzrpjC0ukL0dXRZdHbDxCdEQ+Aul6NtYklv5/Yz8WLFzEyMurhsyQIwlDi4OCAJGmYtOh/KHT+buhQX1vF2V0bKC4uxtLSEnt7e3RVeiQe/ZHclAiKsxK5kJ2kTZop9Y0xMLNBV8+QmoslFBQUdGvc1Pbo6uri6uZOeVEWt6zZyXf/m48kSdj7jEHf2OKv1uYikSZcHpFIE1oVFBTEn0ePa98u9CUTGxc0DQ1ERUWJRJogCIOWTK6jnbylq3R1dbFy9mPeis3tlmuor0MuV7QYTLdTdSj1UKvVzZaZmZlRU9l+Ik0mkxE6bTG/vXcfjv5h2PuMYfeHD5IVf5Sq8qI2E2kAeobG3W6RBo3dNX76pWUCoS2pEbsAuJAZz8z738Nj1Mwu1dc04U5SUpJIpA0TkiQRGxvLli1bsLa25oknnmhRZunSpezcuRMAHYWC+oYGbEytMFQaoG+sx/2zFuPj6MGGPV9yKO4odnZ2mJmaEZkWw8wrppBZlI25oRmpaentPmdt+WwLNmbW2uSbh50rNqZWJCW1PiOet/ffrULrNSCXQU5JAbo6jeOq+Th4cCrlLM999RrxWcmo69T4+/q1O+6aIAgCNHYdlySJypJ8TKydtcuNLOwByMzMxNLSEgA3FxdSEk9QcSELXT0jTK1dqKsuo6I4H3V1BerqClzd3Fl4831YW1v3Srx+vr7EZqVh4ejNuJse5+g3L+Excgbw17ANie1PiCAIHRGJNKFV48aN44MPPqCqrBBDs559S9ARU2sXQOLYsWPcdtttfVq3IAhCT5E09Xh6enZrW6VSiaZe3W4ZSZLY89HDlOSmcsXse/Aed22zt8QdUejoUvuPWTlNTU1pqFdTX1eLThuDBAOkRe5DoaMkau9mzp/dj53nSCbc9nSH3TtV+pefSKssu0BtVXmHY7NJkoSZrRu6KgOmLH653XtZXW0VZQXnKctPpzQ/nbL8NMoL0inLTwegqqp3up8IA4ckSUyaNInwk+HU1NZol0+ePJkxY8ZoP9fW1rJ3z14WjL+WQFc/DkT/yaGYo9xy9XzySgr4LWIfgS6+yGQyglz82HXmAGfOnGHkqJEcPniIRW8tp6KmEgADfX00Gk2b3YY/+fQTZs2axZpv1vLYdffz2o/vUdVQw6FfdrVaPjg4uNlnjSQRl5nIhj1bicqI53x+Jvp6+th4OXDn8ruZPn06wcHBff7CVBCEwcfJyQmAiyW52kSaJEkYmtsBjYm0pnFXlyy5m9WrV1NemImJqRkhISGEhkwlJCSEkJAQAgMDMTbu2kzaXeXr68OJM9sBCJl6F95jr0XfpDHRZ2brxpmT29FoNMi78SJSEEAk0oQ2jBo1CoDywow+T6Q1DQB9ppdmchEEQRjoVCoVmvq6dsvIZDLkCl1K885xYNMqwre/Q+iMJfhf/S90VQYd1iHX0W3RIq1psHN1VTk6pq2/Ja65WELyiR001Kspyohj2rI38Ro7r1NfxpUGl59IAyjLT8fGvf0B0mUyGVMWv6xt8dNQX0dFURal+Wl/JczSKC84T0VBOmUXcrXbmVtY4uXlxbjxwfj43IS/vz/XX399t2MWBofIyEj++OMPrvQdzYwRk3CwsOO+D59g06ZNzRJpO3bsQF2nZmLQeJytHKitU3Mo5ig+9h5YGJmx7c8d/Baxj7zSAk6nRgHw22+/cf3113Po4EFtEk2lq6Squprjx48zYcKEVmOaMGEC+/btY8aMGdy//kkMDQ05eOhgm631bW1tcXa0IzM7D09PdwwMjMjKzOJMTizT58xgxowZTJs2DSsrqx4+e4IgDHVNk5mU5KYS/tM7BE9dxP5NKzEyt0cmV5CV9fdEcStXrmTChAm4u7vj6OjYL8l6b29vSgoyaKivQ6Gji4Hp39c9U1s3qquryMnJ0SYIBaGrRCJNaJWHhwcAZQUZ2HuP7tO6jSwaL8jJScl9Wq8gCMJAoVQqaWhoP5EGMHb+I5w7vQvf8TegaWjg6LevcOrn9wlb8CT+Vy1od1uFou1EWm11BQZtJNJkcgX23qOx8xzJiFn/7lTSromu3uUl0pq6rpXmp7WaSJM0Gi6W5FFWkE5pXmPCrCw/jYrC85QWZKLRNACgr2+Al7c344N98fa+Bh8fH3x8fPD29tZ2TRGGl9DQUHR1dLEzs2akZ+PvlpOlPfv27WtW7uOPP0alqyS3OA87M2v+iD2OXC7nle/fJbekAIVcwce7P0chl6P5a9ZetVqNgYEBDRoNi6feytUB45DL5Nz97sPs2LGjzUQawNixYzl48CBrX1/LQw8/1GKm3UvJZDJS0zKQJEk7WVNtbS1KpVK0OhME4bIYGRlhYmpGRtRBcpJOkpN0EoCS3BRMrRzJzMzUlpXJZFx99dX9FSrQ+OJN01BPxYVs7TANTcxs3YHGYRtEIk3oLpFIE1qlr6+PgaEx5YUZfV63XKGDsaUDBYXZ1NXVibE7BEEYdhq7dnacSDOzc8fc3ouKCzlMXfoa6upy0iP3kRX7Z4eJNLmustWunQDqqrbHSVMZmDD3kQ3UVV+kuqKYsvx01NUV1FZf1I59oq4qp7a6AnX1RdRVFdTVVFBXXcGFnBQuurU9hlpHjI2NsbaxozA9BmNLJ8ry0ygtOE9ZfhoXC9IpyU+nTt14TDo6Ori5exDs64vvjJu0iTIfHx8cHBxEYkFoRi6X4+joyJlz0VzhEUzM+Xgqa6soTC9uNo7Z6NGjObB/Py999w76Sj3U9XVoNBoKyopwt3UhNS8dXwcvFk+9BS97d5ase5jffvuNX375hWXLlmGg1MfS2AIAEwNjjh492mFsI0aM4IutX3TqOP75zKRStd1FWxAEoSscHR2Rm9th4x5CQVpji9urbn+Wc+G/NkukDQR/t2BPa5ZIkyQJHaUeMpmMpKQkrrnmmn6KUBjsRCJNaJOdnW2/JNIATG1cKS/MJCUlBX9//36JQRAEob+oVCoaOpFIy00+RXF2IjbKEL787zQUOirCFqwkaModHW7bWos0MzMzAKL2bEJpYPxXYuwi9TUV1FVfpPavBFltdWWb+9VVKjExNsXE1ARTU1PszM0xd7HG1NQLU9PJ/Pvf/+4wtvYEBwWyf+9movZuRiaT4ejkjI+PN37jpmpblvn4+ODq6oqOjnjMETpv4qSJbNmyhee+eg2FXE6DRgPA119/rR2z9f/+7/+YMmUKH374Ib/88gsaScO/JlzHggnXIZPJuG3tfWjQ4O/c+CUu2DWA6Ng4rK2t0dfTZ1/UH8RmJJCUc47yqooWyWxBEISBytXFhcS8AiYvfpnvXpgPSHiNmUtecgTnMwZWIs3R0RE9PX2ST/xCQVo0pflpXCw8T2l+OjV/vSxsenkoCN0hnjCFNnl7eRKRcK5f6jaxcUWecJy4uDiRSBMEYdhRKpU0dDDZAMCZ3z4GoDg7mZDpdzNi5r87HIS/iayVRJqdnR1Tp80gJSUOA1NTrMzMMLc3w8zMDVNT02Y/ZmZmrS7T09Pr+gF3wbZt33Lw4EF8fHzw9PREX1+/V+sTho9XX321cbZOU0umBE0g0MWP//v2zWaJNIVCwYwZjeON1dTUYGJsgrqhXjszpr+zN4lZKRxNCCfmfALxWclU19Sg0WiYNXsWO7bvoERdjk+gD3dddTcrVqzoz0MWBEHoNBcXZ84kHMXS0Yfp976NJGnQN7bA0MKOzKQD/R1eM3K5nGumTmXnrzuwsrbF19ebiVPG4OOzUPvCLSAgoL/DFAYxkUgT2hQaGsqBw8f6pW5TaxckSUNsbCw33XRTv8QgCILQXzqbSAuYdCvGlo6MmvefNsc0a0trkw2oVCr27ml9RsCBwsLCghtvvLG/wxCGIDs7OwwNDAl09uX2SY3PHn5O3pw4caLV8np6eji7OHM88RT19XWcTY8lsygHhVzBaz+8h5GhEQEBAfxn+X+Qy+X88MMPYpa4Yaa2tpZx48Zx9uxZzpw502yMu6ioKJYvX054eDjW1tY8+OCDPPnkk/0XrCB0wNnZmYsljZPzeIycoV1uZG5PfE52s27wA8H2n36ksrJStDwTeoVIpAltCg0NRV1djrq6AqV+705R/E8m1i5IGg2nT5/u03oFQRAGgsZEWsddO91Cr8EttPXxPSRJQtNQh6a+joZ6NQ31dWga6mj463O9ulZ0KxOEf/D18+VsUiyVNVXEZyVR31DPhaKiNr8g3n333Tz77LMciDuKv78/S5Yvw8fHh7CwMNzd3VuUF0m04eXJJ5/EwcGBs2fPNlteXl6unUV1/fr1REdHs2TJEszMzLjnnnv6KVpBaJ+TkxOVZReor6tFR/fv8ReNLOxR19ZSWFiIjY1NP0bYnI6OjkiiCb1GJNKENnl5eQFQVpiBtUvrU633FhMbFwDOno3q03oFQRAGApVKRV1tNfs2PHFJIkyN1FDfmBxrqENTr25c11BHQ50aTYP6ryRZHfV16k4l4hznXNUHRyMIg8fs2bN58fSL3PHmf5CQ0NfT56abb26zlcXTTz/NI488gpGRUR9HKgx0v/32G7t37+b777/nt99+a7Zu69atqNVqNm7ciFKpJDAwkMjISN58802RSBMGLGdnZwAqS/IwtXHVLjcytwMgMzNzQCXSekpxcTHm5uYDqrWd0P9EIk1ok6enJwDlBf2QSLNqvFBnZmZQX18vBowWBGFYmTNnDpMmT6FOXYTSWIVKqURPzxClUqn9UalUrf67o8+X/jsoKKi/D1UQ+oVGo2H37t1s3LiRyZMn85///AeAZ555hry8PKytrVm0aBF+fn4dfnkSSTThn/Lz81m2bBk//fQTBgYGLdYfO3aMiRMnolQqtctmzpzJq6++SklJCebmrc9uXFvbvCVxeXl5zwcvCG1oSqQlHv0BXZUBF4vzuFiSy8WixokGiouL+zO8HlVXV8e4seOIj4ujRl3LtfOuZcfPO/o7LGEAEdkJoU0WFhbo6Rv06sydbXWV0FXpY2BiRVV5EefOndNOYSwIgjAcuLq6cmD/vv4OQxCGnCeeeILt27dzPi0d9V+tNn/4/gcWL16MgYEBKpWKTz/9tJ+jFAYzSZJYvHgx9913H6NHjyY9Pb1Fmby8vBZdf21tbbXr2kqkvfzyy6xZs6bHYxaEznB2dsbJ2ZWIXz7A2MQUJycnfJ2dcRkzCT+/e5k6dWp/h9hjDh48SGRkJKHugejq6HDo4MH+DkkYYEQiTWiTTCbDwcGBssLemc44K/4Yh794ltHzHsAn7PoW601sXKkqLyI2NlYk0gRBEARBuCxbt25l7dq12JvbMvOKKYS6N7bI/L9v3+Szzz7j/vvv7+cIhYFs1apVvPrqq+2WiY+PZ/fu3VRUVLB69eoej2H16tU8+uij2s/l5eXaVkKC0Nv09fWJj4tBkiSMjft2/Oy+smfPHvbt20daWhoymQyZTIZSR0n5xQpKS0sxMzPr7xCFAUIk0oR2+fv5ciY5rUf3WVdbzYnvXyd6/+cAVJYVtFrO1MaFwvPRxMXFccMNN/RoDIIgCIIg9I9z586xb98+CgsLufPOO/ssEdA0Y+Idk29mgv9YACprqlDI5XzzzTcikSa067HHHmPx4sXtlvHw8GD//v0cO3YMlUrVbN3o0aNZuHAhn332GXZ2duTn5zdb3/TZzs6uzf2rVKoW+xWEvjSUu7K/8cYbPP744yh1dKlrqEehUHDmXDRymRxvTy9MTEz6O0RhABGJNKFdgYGBHPxzc6vr1NUVHPzsKex9xuA5ahYGptYd7i8v9Qz7N67kYnEODr7jyEk8gdeYOa2WNbF2AWTExcVdxhEIgiAIgtDXKisriYiI4ODBg5w+fZqkpCSyc3KprKy8ZCIMGV99/Q3RUWfb3VdPCQwMxEDfgEOxx8gvLeTMuWjiMpNo0GiG9JdDoWdYW1tjbd3xs+67777L//3f/2k/5+TkMHPmTL755hvGjRsHQFhYGE899RR1dXXo6uoCjS1hfH192+zWKQhC72rqhv3a4mdZ98sGDG1MeP7555k8ebL4uxRaEIk0oV1eXl5UlRXSUK9GoaNstq4oM57UU7+Reuo3/vzq/7D3HYvXmDl4jJyJvrFFs7IN9WpO7XiPM799hLVbELMf3MHx717D1mMExpaOrdZtYuNCQ10tkWLmTkEQhgBJksjOziYxMZHJkyejUCj6OyRB6HG33Hob23/6CXVdPZKmHgCZTI6JtTOWnmPxdvDG3MELpZ4hO9+9h+SkpDbHS+0No8eM5vDhw5xKjsTSypJrr7uWxYsXc+211/ZJ/cLQ5+Li0uxzU5LW09MTJycnAG6//XbWrFnD0qVLWblyJTExMbzzzju89dZbfR6vIAiNpk6dynvvvcezW1+jvLoCD30P0StKaJNIpAnt8vT0RJIkKoqyMbNrPiiqg89YAibeQtwf3zJi9j0UpkXxxxfP88fWNTj5h+E5Zg4eV8zgYnEu+zY+QUlOCmOuf5grZt9DXW0VGdGHufLmJ9qs29S68UEkOSmJhoYG8aVTEIRB57fffiMuLo7w8FOcOhVBYWFj151nnnmGF154oZ+jE4Se9+OPP6KjZ8zIGbdi8VfSzMzWrcXLOI2mAYWuitraGnJzc3FwcOiT+Pbv388PP/zA7NmzRSs0od+Ympqye/duli9fzqhRo7CysuLZZ5/lnnvu6e/QBKHLml4UxsbGkpuby+23395sRtrBYvLkydja2lJVVcWIESN48803+zskYQATiTShXZ6engCUFWa0SKQBhP1rFZlxf5KbfIrrn/iCmoulnIv4ndTwnRz87CkOf/4cAGZ27tz01PdYuQQAkHZmDxpNPZ6jZ7dZt8lfibS6OjVpaWl4eXn19OEJgiD0qltvvRVDEwssXYJwGXcTo1yDiTnwBUf+PNrfoQ0qkiQRFxfHnj17UKvVrFixQtsdShhYrCwtqVDD2OsfbrecXK7AzM6DC5nxJCUl9VkiTaFQsGDBgj6pSxAA3NzckCSpxfKQkBD++OOPfohIEC7fV199xd69e4mKjiE+Pp7KixXadfn5+axcubIfo+seMzMzcnNz+6yFtDC4iUSa0C4nJycUOjqUF2a0ul6pZ8Q1S15j++sLidqziREz/03QlIUETVlIZWk+qad+p7aqnJGz70Gh+/fgqCnhO7H3Ho2RedsDquoZmaOrZ0RdzUXi4uJEIk0QhEFl5v3vYe0WjJGFfbOHsuLsRE7t39Sn3dkGq8TERF588UX27N5DXn4eOgod6hvqGTVqFFOnTu3v8IRWeHp6cOTIn9TVVqOr0m+3rIWDF8XZSSQlJTF58uS+CVAQBEG4LEVFRSxcuBAzW1es3a8geOZEzO29sHD0Zv+njxMTE9PfIXabeC4TOksk0oR2KRQKnJxcKC9oPZEG4OAzhtDpSzjx45u4BE3CwtEbAEMzW0Km3dWifM3FErLjjzLh1qfbrVsmk2Fq40JJTjKxsbFcd911l3cwgiAIfchj1MxWl1u7BXOyvIzU1FTxgqADr7/+Ot9+/Q2zR00jdEoAAc4+3Lf+Sfbs2SMSaf2koqKCuLg44uLiSEhI4I477iA4OFi7Pjg4mCNHjlCadw5r18B292Xu4I1MJiMxMbG3wxYEQRB6iIWFBXp6+vhPvI3QGUuarTNz8CY6JrafIuucuro6UlJSiI2N1TbWuP322/s7LGGQkfd3AMLA5+/nQ1nB+XbLjL1hBaY2ruzb8MQls3G17tzp3UgaDR4jW/+SeSkTa2d0lPpi5k5BEIaMpuRCREREP0cy8I0aNYr6hnr+NeE6rvAIRqWrIsTFn927dl3Wfqurq7nvvvsYM3oMGRltvygazsrKyjh+/DgbNmzgscceY+bMWTg6OWNiYsKVV17J0qVLefPtd3jwoeZdOMPCwgAoyUnpsA4LBy80DfVERw/e1guCIAjDjVwux9fPj+Kc5BbrzO29SExIQKPR9ENkbTt48CAL/vUv/PwDMDAwICAggAULFvB/L77MwoULKSsr6+8QhUFGJNKEDnl5eVGWn95uGR1dFVOXvE5xdiIRv37QbtmU8J04+F2JgalVh3WbWLsgSdKAf7MhCILQWfrGlphaOXLq1Kn+DuWy7du3j59//rnX9j99+nQaNBpiMhK0y0LcA4k8e5aioqJu7TMhIYGxY8ayaeMmUpNSuPWWW6mra/8F0HDy6KOPYe/ghJmZGWFhYSxbtozNX/5AUkE9tiFzmbp0LTc/8xNL3ztL6MxlREdHN9t+xIgRyBU6rX7B+idz+8YWmXHx4mWZIAjCYBIcFEhZbmqzZZIkYe7gRU1NNenp6f0TWBv++9+n2Ln7IDr2Iwm75Wmue/wLFr91nOtXfQ1AfHx8P0coDDaia6fQIU9PTyouZCNpNMjkbederd2CGDn3P0T88j5uIVOwcQ9pUaaqrIichONMvLNzs9WZWrugrrlIQnw8Go0GeTv1C4IgDBaWLoGEnxq8LdLOnTvHoyseYfuOxiTavn37uOaaa3q8Hk9PT1xdXPgj9jhFZReITIsh6nwckiRx/Phx5s2b16X9bd26lXuWLcPSyILX7nqWGnUtT33xIs888wyvvPJKj8c/2EiSxPr167FwG8HUeY9iYe+FmZ0HOkq9VstbOHgTcaGInTt3MmfOHKDx5ZtG09CpFmkm1s7IdZTk5uRQX1+Pjo54LBUEQRgMAgMD2fb9T5QVZpJ8fDsOPmPY/vodBE9tHNYnLi4ODw+Pfo7yb97eXqQXVDFx4fPNluuqDJHJZMTGxnLllVf2T3DCoCSyEkKHPD09aahXU1lW0GHZkXPuw8rZn30bnqBeXdNi/bnTu5DJ5XiMnNGpuk1sXECSqK2tGXBvNgRBELrL2i2YiIiIAdf1oTPeffddAgL8CT+xn0/+F8rYEAsefugB6uvre7wumUxGUHAwf8Qd55PdX3A2PRZ7c1v0VXqEh4d3eX8rn3wSKyNLXr/rOdxsnPFz8mLhpJt59dVX+f3333s8/sFGJpPh6+eHsaUDPuOuw8oloM0kGoC5Q2OLsktbJerr62NkaMiF7LbHPSvKiCMj5jByhQ5mtm5oNBrOn29/CAlBEARh4AgICKCmqoK9H68gfPs7bH/9DgCKMuPRMzAmNnZg9SYKCAigOCelxQy6Oko9zGxcxDBCQpeJRJrQIU9PT4A2Z+68lEJHl2uWvkZFURYnfnyzxfqU8F9x9B+PnpF5p+o2sXbR/ltc4ARBGCqsXQO5WFFOSkrHrXYGms+3bMLOUofj34Rx4wx7Xn7Ul9i4BNavX98r9TW1dHvpzv/y9eMf89bS/3GFezB7du/u8r4eWbGCjMIsYjMbu4rmFueTnHMOgD/++KPngh7EDA0MuJDVucH/zWzdkMnknD59utlyRydnKoqyqVfXUJJ3jgObV5N2Zi91tVVE7d3M9y/dzG/r7kVdcxELRx9kcjlJSUm9cTiCIAhCLwgICAAg6JqFzZaPnHMf5g5eA+57W0BAALXVF6ksydMu0zTUU11RjKmdJ7GxAyteYeATiTShQ+7u7gDtztx5KQsHb8be8ChRezeTk3hCu/xiSR65yafwGjOn03UbWdgjV+iio6sccBdkQRCErqirrSYv5TTR+7YQd/hbYHC+IFj932c4n1PFnxHFAIzwN+WO6xx55pmnuj1uWXtmzmycmKamrlY7LX2oWyAnw8MpLy/v0r4effRRZs+ezbu/fMrxxFM8+Ml/OZZ4CmNjY1asWNHjsQ9Gzs5OlLTy1r41Ch0lJtYuLRLCAf5+gERp3jlSw38l4ch3/P7+/Wx4cCR/fv0iSGDnORKlnhEWDl5A38/cKUkSY8eMwcbahl2XOXmFIAjCcOPm5oaenj41FaXMuO9dAHT1DHH0HYupnSdRA2wSmcDAxomeijIT+Oa5eUT88gEbHhzJ5hXjMLJwIDpmYMUrDHwikSZ0SF9fHzt7B8o60SKtScj0xdh7jWL/plWoay4CkHrqN+QKHdyvmN7p/cjlCoytHFEamAy4JsKCIAjtyT8XScz+L9i/cSXfrZnHhgdH8OMrt3Di+1cxo4gHHniAWbNm9XeYXXbDDTdwzZTJPP1OMuo6DRqNhIezAaWl5fz66689Xl9AQAC2NracTW9MOpZXVVBbX0tDQwOHDx/u0r7kcjlbtmxBrlTw2g/v42Rpz6qbHqK6qpoxY8ZQU9NySILhZsKECdSrq5u9tW+PhZMPpaVlqNVq7bIxY8YAUJyTzIhZ92Dp7IdS35hR85Yz/d53kMlkuI2YBjROOCBpGoiKiur5g2nHAw88QPipU2hq65kzezaLFy/u0/oFQRAGM4VCgY+vLyW5KXiOns1Vtz3D5LteQqGrGpAzd7q6uqKnp09uUjjF2Ymc/Okt6tXVAFi5BZGTndXuy7mqqqq+ClUYJEQiTegULy+vTnXtbCKXK7hmyatUVxRz9JuXAUgN34lz4NWoDEy6VLeJtQtyhVLM3CkIwqDyw0sLOLbtJfSr07lh9iQ+Wr+eiIgILlZUEHEqnHXr1qGn1/b4UwOVTCbjnXfXkZ5dyaq1ccxYepI17yVx++23ce211/ZKfdOnT+dw7DEe2/Qci95+gI17v8Lfz49Ro0Z1eX+xsbGUl5cT6OLLy3c+xZW+o3jyxgc4n34eb2/vLrdyG2qmTWtMcJXkdq7bsYWDN5JMRkLC3zOrhoaGIlfoUJKTio6uiunL3kLTUEdlaT4yuZyGejXuV0wFwNzRG4DIs2d7+EjaduzYMdZ/uJ45o6bx/r2vYmduy9YvtnaqFZ4gCILQKDgokLK8xntF8NRF2l5HFo7eVFdXDaixL5sSf7VVZYy78bHGZTpKfMJuwMrRF6DZfQwgPT0dB3t7lLpKjIyMWLduXZ/HLQxcvZZIe/HFFxk/fjwGBgaYmZm1WiYjI4O5c+diYGCAjY0NTzzxRIeDFRcXF7Nw4UJMTEwwMzNj6dKlXLx4sReOQLiUt5cnZfnpXdrGxNqFCf9aTfwf3xJzYCv55yLxGju3y3WbWrug0dQPuDcbgiAIHcnJzubM6Qg++ugjli1bxsiRI1Eqlf0d1mULCgri/vv/w2c/ZSFTuXDkyBG2bv0SCwuLXqnv3vvuxdHVifFTr+azzz4jKyuLuPh47O3tu7yvw4cP09DQwLQRk9BX6QMwxmsEV3gGk5WVxZEjR3o6/EHF29sbuVxBcXbnEmnmDl5IDfUcPXpUu8zHxweNpoHinGRtmQm3PkP84W848cMbWDj6aMdANbV2Qa7QJe1cWs8fTBu+/fZbNJIGQz0DTp+LIqc4jynXTNF2HRYEQRA6FhgY2OoA/ub2jRPRDLThK0KCgyjNTWbErGU4+oU1vtQZMRUze0/tzJ2Xeuedd8jNy2P2yGsw0jPgq6++6qfIhYGo1xJparWaBQsWcP/997e6vqGhgblz56JWqzl69CifffYZmzdv5tlnn213vwsXLiQ2NpY9e/bwyy+/cPjwYe65557eOAThEp6enlQUZXZ5O/+Jt+ASNIk/tj6PQleFW+g1Xd6HibULtRdLqaqqJDOz6zEIgiD0F5VK1d8h9Jo333yT06dPczI8ggkTJvRqXVdddRVR0VFs+XwLixYtwtHRsdv7WrZsGQb6Brz78ydEn4+nvqGedb98yunUKLw8vZg+vfPDDwxFCoUCM3Pzdluk5SZHcPy716lX12Dh0Nii7NJutm5ubsiA4uy/JxDwv3oBHqNmUZafjvsV07TL5QodTG3dKC0tobq6uucPqBU+Pj4AbPtzB6/98B4ymYxt27b1Sd2CIAhDRUBAADWV5VSVFTRbbmRhj0rfcMAl0gICAijJTUUmkzNt2RvMW7EJ15DJ6Kr0MbV2ahHv5MmTATDWN8JQz5DUlNR+iFoYqHR6a8dr1qwBYPPmza2u3717N3FxcezduxdbW1tGjBjB//73P1auXMnzzz/f6hv7+Ph4fv/9d8LDwxk9ejQA69atY86cOaxduxYHB4feOpxhz9PTk5rKcmqryrvUNVMmkzF58Ytse2E+riGTUeoZdbluExsXGuobx16JjY3F1dW1y/sQBEEQepauri5XXHFFf4fRZXZ2dsTFxxESHMKL376Ft4MHMRkJONg7EH4qHF1d3f4Osd+5urqQ/dfMneVFWez+8EEc/cNwHzGNrPijnNqxDknS4B12PWa2biCTc/aSrpk6OjpYWVlTVJRFfV0tOrqqxueBRf+Hk39Yi7FSLR19KM1NJTU1laCgoF4/vqZWB3NHT6ew/AInk06TnZ2Nqalpr9ctCIIwVDQN4J+fGomhuS3FOcmU5KRSkpNMfV0dWVlZ/Rxhc5cm/gzNbDEwtdaua23mzunTp6Oj0GHroe9RyOVcO++6vg5ZGMB6LZHWkWPHjhEcHIytra122cyZM7n//vuJjY1t9eH82LFjmJmZaZNo0DiWh1wu58SJE9xwww2t1lVbW0ttba3283Af/6Q7PD09gcaZO63duvaQa2hmy11vHO12lwnTv7p/6ChVxMXFMWdO52f9FARBGE4qKiqIjIwkJSVF++Ng78DaN9aiUCj6O7wBw9XVlfiEeAIDAojNSMDUxJSjx462ORTFcDMiNJToL79BkiTKC85TeD6GwvMxRP7+CTKZHKWBCSoDEywcvJHJZJhYOZGWnt5sH15eXhQWFlCWl4alsx8AKkNTAiff3qI+cwdPkMlITk7uk0RaWFgYH7zfONnEzePncTLpNDExMQQEBPR63YIgCEOFu7s7+voG7PrwAaCxAYWLqxshQUHcNncFDz30UD9H2FxT4q84OxlDM9tm68wdvImOaT6Ds4GBAdEx0RQUFBAWFiZetAnN9FsiLS8vr1kSDdB+zstrfaaovLw8bGxsmi3T0dHBwsKizW0AXn75ZW0LOaF7mhJpZYVdT6QBlzXuiIm1MwAGJtYDromwIAhCe7755hsyMzNJTEwkPi6e4uILfP/DD4wbN65X6rvxhhvZu28vADZmVhiqDEjLz+DBhx7Ew8OjV+ocrBwcHMgvKODLL7/kyiuvFK2dLzFp0iQ2bdpEVVkBTgETCJ2+hKh9nzHh1qewdglk14cP4D5imvbebunkS1pkJoWFhVhbN77hv+KKERw7dpTinGRtIq0t5g7eSJoGTp8+3eZL0Z4UHByMXK5gy4FtfLr7C0DMyCYIgtBVCoWCP/44THx8PIGBgfj6+mJgYNDfYbXJ3d0dlUqPktwUnAOv0i7XaBrQN7YkKzODixcvYmT0dw8qPz8//Pzav4cJw1OXEmmrVq3i1VdfbbdMfHz8gPtlW716NY8++qj2c3l5Oc7Ozv0Y0eBjYWGBialZl2bu7Ck6Sj0MzWzRURkSHR3T5/ULgiB01z333IOliQUOFrbYm9mSXprGzz//3GuJtIqKcsb5jOTR6+9Hpaskv7SQez94nJSUFJFIa4VSqWTx4sX9HcaAM378eABKclIwNLNl3I2Pkp14guh9nzPxjuepKivE7a9ZN6FxMoHzUQeIiopi6tTG5SEhIY0zd3Zi9k+LvwamjoiI6FKcdXV1xMXFcerUKU6dOoW+vj6vvPJKhxN6JCUloVQpqamuQUIiNDSUu+66q0t1C4IgCDBq1KhuzaDdH5pm7jwfdZC62mpKclIoz0+lODeVutoazC0sqaur6+8whUGiS4m0xx57rMMHzs4+qNvZ2XHy5Mlmy/Lz87Xr2tqmoKD5YIb19fUUFxe3uQ00DvY8lAd87iseHh6UFfR9Ig0aW6WpayqJj49HkiQxs5YgCIOCDBnr73sNXZ3G7gB5ZQUkJiT2Wn3ePj6cPnwSlW5jIsHKxAIdhQ4pKSnMmDGj1+oVhhYPD4/GmTtzknEKmIBCV8X0e95i2//ms+vDB9EzMsfOc6S2vIWDN5qGek6cOKFNpHl7eyNpNBTndJxIM7FxRSbXIaGdv436+nri4+O1SbOT4aeIijqLurYWmUyGqbUzpQUZ3HLLLR0mqh968CGUMl0WTr+Jn078RlxsHFVVVRgaGpKRkcHevXsJCgpi7NixnTxjgiAIwmAwc8Z01q5dS1lWDIEBAUyePp6AgH8TGBhISEgI5ubm/R2iMEh0KZFmbW2tbbJ/ucLCwnjxxRcpKCjQdtfcs2cPJiYmbY5RERYWRmlpKREREdrM9/79+9FoNL32dl/4m4+3F0fOpvdL3SbWLuSfO0Nl5UWysrJEi0JBEAYFCYm8kgKcrRtnmXQwtyMhIaHX6vPy8uLX7b9oPyvkCuwsbEhJ6TiZIQwfVVVVxMXFERUVRXR0NP7+/s1mQFcoFFhaWVFySRLMzM6dq29/lgObVuE7/gbkir8fIc0dGluU/fnnn9plPj4+SJKm2cydbVHo6GJq60pObk6z5dXV1axevZrjJ05wNvIsNTXVyGQyLB08sXAOZMwN07F2DcLK2R+5QocND15BREREh8+EE66awK5ff2fmyCnU1tXyxaHvcHJ0QldXl8KiQgCMjIwoLi4WY+IIgiAMIa+99hqPP/44NjY2omGGcFl6bYy0jIwMiouLycjIoKGhgcjISKDxId/IyIgZM2YQEBDAnXfeyWuvvUZeXh5PP/00y5cv17YeO3nyJIsWLWLfvn04Ojri7+/PrFmzWLZsGevXr6euro4HHniAW2+9VczY2Qc8PT35be/hjgv2AlMbF9Ij9wEQFxcnEmmCIAwa2cV52FnYkldSQH1DPampKWg0GuRyeY/X5eXlRUlFKdW11eir9JEkCStjC5KTk3u8LmHg02g0pKWlER0dTVRUFFHR0URGniXtXCoajQaZTIa+kTlVFcXcdtttGBsba7f19HAnLat5CzHf8TdiZueBub1ns+Vmdh4gkxMVHa1d5uDggK6uLuWFGTTU1aLQbb9ngKWjD+fy0igrK9POnvnTTz/xzjvv4DFqJiOvW4G1WyBWLgFtzgBu5eRLeHh4h+fl2Wef5bvvvuPNnz4kNiMBSZJoqK3j4sWLrLzxQfSUKtZ8vZbw8HBtN1dBEARh8JPJZC3GaReE7ui1RNqzzz7LZ599pv3cNAvngQMHmDx5MgqFgl9++YX777+fsLAwDA0Nueuuu3jhhRe021RVVZGYmNisr/LWrVt54IEHmDp1KnK5nJtuuol33323tw5DuISnpycXS/I79UDc00ysXaitKkNHqSI2NpaZM2f2af2CIAjdYWRkxHs7N1BdW02DRgPA6FGjezWRBrBiw7NUqau5WFMJEsjN2h8zShj8iouLtQmz6OhozkSeJS42lqqqSgAMjM2xcPLF3DWMiRPuxtLJF3MHL0rz0/juhfnExMQQFham3d/IkSOJ2PhZs+EUZDIZdp4tZ1XX0VVhYuVETk429fX16OjoIJfLcXJ2Ie1cKqUF57F09GkzdkmSMLZy0s7c2TQ7e9OYu6HTl2DnNbLN7ZtYugQSfqrjcdaCg4NRKVWcTD7DaK8RLLjqOpKyU/lk1xYM9QzIKmpsGXf48GGRSBMEQRAEoYVeS6Rt3ryZzZs3t1vG1dWVnTt3trl+8uTJSJLUbJmFhQVffvllT4QodJGnpyeSJFFelNXibXRvM7F2afyvpZOYuVMQhEFj/fr1nD59Gl9fX+2Pra1tr3UnaHrxVFlbhYWxGR62ruSW5KMU3dOGtAcfepj31jW+VNTRUWLh6ImZgy+hc6dg4eiLpZMvBqbWrf7emdt7IZPJ2bx5c7NE2sSJE/nggw+oLi/CwLTjYT0sHL0pL8okOTkZf39/AIKCAkk7l0pJdjKWjj5IkkRlSR4luSkU56RQkpNCcXYSJTkpqGsuIpPrkJaWpk2kBQYGolSpKEiP7lQizdo1iCN/fkdVVVWHM8dNnjKZA/v2M3/cLCJSznIiKQIJePbLV1EoFIRdGcb8+fM7rFMQBEEQhOGn1xJpwtDj6dmYPCsvzOjzRJqpTWMiTWVkTkxsbJ/WLQjC0HLdddcRGRlJQUEB5ubmTJs2jVdffbXZEAFRUVEsX76c8PBwrK2tefDBB3nyySe7XNe1117LwoULezL8djV1iXtqwQr8nBpbp23c+xVxRWKMtKEsNjYWG7cQptz9Cqa2bih0Op841dFVYWLtzNGjR5stb+pJUJyT0rlEmoMPGdGHiIqK0ibSgoOC+HXnb5z57SPO7tlISU4KdbVVjRvI5MhkcpAaMDMzJyhgFOPGjeX666/X7lOpVBIUFEzh+c7N2G3tFkxDQwNnz55tlhRsza233squXbtY/fmLyGVydHV18ff358UXX2Tq1KmYmJh0qk5BEARBEIYfkUgTOs3R0RGlStUvM3eqDM1Q6hujo9QjNjZKzNwpCEK3TZkyhf/+97/Y29uTnZ3N448/zs0336xNJJSXlzNjxgymTZvG+vXriY6OZsmSJZiZmTUbkH0gahr3o7SyTLvMzNCEgviCtjYRhoDgoEBOR/+IhaN3t7a3dg0iI+nPZsuaZu4syUnGyb/9pBSAuYMnmoZ6wsPDueWWWwCYMGECkuYVLmQnoadS4uRoj4+3NyNHjmTcuHH4+/vj7u7e7oD+48aOYduOvZ06DgtHb3R0lURERHSYSLvzzjuJiopCR0eHRYsWERgYKJ4rBEEQhrmm3nDifiB0RCTShE6Ty+W4urpRXtj3iTSZTIaJtQuSJHGxopycnBwcHR37PA5BEAa/FStWaP/t6urKqlWrmD9/PnV1dejq6rJ161bUajUbN25EqVQSGBhIZGQkb775ZpcTaRs3buShhx7qlfHQWmNpaYlcLqe0sqzxelnTOD5WeUU5NTU16Onp9UkcQt9qaGigrDCLutoqdFXtd2lsjYWjD+dO72o2dp+Ojg7WNjYU53SuNaOFQ2MS79ix49plc+bM4Xx6OhYWFhgaGnY5LoDRo0c3TjBVU4muXvv7UOgosXLy5dSpUx3uV6FQ8Oabb3YrJkEQBGFwqqqqIiYmhuzsbLKzszl//jwHDhzAQF+PvNxcsnNyWLLkbta990F/hyoMcCKRJnSJt5cXsVl9n0iDxu6dF4tzgcaZO0UiTRCEy1VcXMzWrVsZP368tlXMsWPHmDhxIkrl3wP0z5w5k1dffZWSkhLMzc1b7Ke2tpba2lrt5/LycqAxaeft7c3cuXN7+UgaKRQKrK2s2bz/Gz7ds5X6hnoAbKxtaGho6JMYhL535ZVX8v7771OcnYytR2iXt7d08kXTUM+pU6cYO3asdrmvjzeJWQmd2kfjzJ0y4uKbj2N6ubNsjxo1CkmSKMqMx957dIflLVyCOHGy45k7BUEQhOFnyd2L+ebbbQDoKuTI5VBbp8HBTI+5IbYkyA356ccfRSJN6FDfvCIXhgwvL08qivonkWZi7UJlaQE6uiox4YAgCJdl5cqVGBoaYmlpSUZGBtu3b9euy8vLazE1etPnvLy8Vvf38ssvY2pqqv25NHnQ1ja95auvv+KJlU/w9jtvs23bNg4fPszZqLPdbhEkDHyzZ88GoDg7qVvbWzj5ArSYAGrMmDGUZCe3mPjpUpIkUVV+gfxzZ1Hpm1BaUkJpaWm34mhNQEAAKpUeBenRnSpv7RpEUmIClZWVPRaDIAiCMDQUFxcz2s2Mz5aO5NEZntTWNc6oXlFTT2xuJYkFVZSUlrV73xMEEC3ShC7y9PSkvDAbSaNB1kddlZqYWLtwsSQXa2cfYsWEA4IgXGLVqlW8+uqr7ZaJj4/Hz88PgCeeeIKlS5dy/vx51qxZw6JFi/jll1+6PSbG6tWrefTRR7Wfy8vLcXZ2xsTQmPz8/G7ts7umTJnClClT+rROoX9ZWlqi0tPnQidbj/2TsaUjOkp9Tpw40Wz5lVdeyRtvvEF1RTH6xhZUlxc1zraZm0JxdjIluY0zb9ZcLAFArmh8rIyOjubqq6++vIP6i66uLiGhoRSdb/u+fyEzgeQTPzNq3n+wcQtCo9EQGRnJhAkTeiQGQRAEYWjw9fPnvb37uGvDaaBxGANdHQXunl44BQZypZMTM2bMEGOkCR0SiTShSzw9Pamvq6WyNB8jC/s+rdvUxgUkCQMzB2JiRCJNEIS/PfbYYyxevLjdMh4eHtp/W1lZYWVlhY+PD/7+/jg7O3P8+HHCwsKws7Nrkfxq+mxnZ9fqvlUqFSqVqsVyU4O+T6QJw5OVlSVFGfHd2lYmk2Hh5EN8fPPtg4KCAPh57Z1UlhVSW1kKgFxHFzNbDywcvHDyH4+5gxcW9l4YWdqz6eExREVF9VgiDWDsmNF8/ePvANTX1XLkq/9h7RKAc+BV5KWc4dDnT1OvrsE19Bps3EPQ0VVx6tQpkUgTBEEQmlm7di1z5szBzs6OyspK9u3bR25uLrNmzWL+/Pn9HZ4wiIhEmtAlnp6eAJQVZPR5Is3E2gUAlYEpcfGHxMydgiBoWVtbY21t3a1tNZrGZv1NY5yFhYXx1FNPaScfANizZw++vr6tjo/WntzifC5evNituAShK3x9fDhyLLzb90Yr5wDOndzRbJmXlxchoSNITEohdOa/Mbf3wtzRG1NrF23rs4b6Okrz0yjMjCPx6I/IFTo9PvzC6NGj+eCDD1DXXKQoI474w99wacpPqW+MUl8HG/cQFDq6WDn7ERERATS2RD148CCRkZHkZGXx9rvvap9lBEEQhOFFpVJph0O4ctwYos5GYqjU4ZcdP4lEmtAlwzKR1tTnuWkwaKHzLC0tkclklBdm4Og3rk/rNjS3Ra6ji1xHl7LSEpKTk9tsHSIIQt9qup4O9DElTpw4QXh4OFdddRXm5uakpqbyzDPP4OnpSVhYGAC33347a9asYenSpaxcuZKYmBjeeecd3nrrrU7X03QeHnzoIe655x5xvxF63RVXXMH+/fupKivA0My24w3+wdLJl7hDVWRnZ2NsbKxdvnTJ3TyyYgWhM5airq7gQlYC58/u50JWIqXZiVzISaGhvg4AB0cnrpk8kYULF/bo77yfn1/jhAMZcTj4jCVk+t1E7dnEiJn/xtLFn7hDX6MyMEWh05j4tnQN4tjxExQWFjJh/JWUl1fgZWtCemEFn376KatXr+6x2IS+M1juM31JfKcRhO4zMDDkKk9zJnhbsGZHEsXFxejoDMv0iHCJzt5rZNIwvBtlZWVd9ixSgiAIQkuZmZk4OTn1dxhtio6O5uGHH+bs2bNUVlZib2/PrFmzePrpp5vNBBwVFcXy5csJDw/HysqKBx98kJUrV3a6HnGfEQRB6B0D/T7Tl8S9RhAEoXd0dK8Zlok0jUZDTk4OxsbGA7JrYNMg1ZmZmZiYmPR3OO0Ssfa8wRIniFh7y2CJ9dI4jY2NqaiowMHBAXkfT0QyEA30+8zlGiy/o5dDHOPQII5xaGg6xri4OHx9fcV95i8D4V4zHH7/ukqck+bE+WhOnI+WBtI5kSSpU99phmXbRblcPijeZJmYmPT7L1JniVh73mCJE0SsvWWwxNoUp6mpaX+HMmAMlvvM5Rosv6OXQxzj0CCOcWhwdHQUSbRLDKR7zXD4/esqcU6aE+ejOXE+Whoo56Qz32nEnUgQBEEQBEEQBEEQBEEQOkEk0gRBEARBEARBEARBEAShE0QibQBSqVQ899xzqFSq/g6lQyLWnjdY4gQRa28ZLLEOljiFnjcc/t+LYxwaxDEODcPhGAcr8f+mJXFOmhPnozlxPloajOdkWE42IAiCIAiCIAiCIAiCIAhdJVqkCYIgCIIgCIIgCIIgCEIniESaIAiCIAiCIAiCIAiCIHSCSKQJgiAIgiAIgiAIgiAIQieIRJogCIIgCIIgCIIgCIIgdIJIpAmCIAiCIAiCIAiCIAhCJ4hE2gBw8OBBZDJZqz/h4eFtbjd58uQW5e+7775ej9fNza1Fva+88kq729TU1LB8+XIsLS0xMjLipptuIj8/v9diTE9PZ+nSpbi7u6Ovr4+npyfPPfccarW63e366py+//77uLm5oaenx7hx4zh58mS75bdt24afnx96enoEBwezc+fOHo/pn15++WXGjBmDsbExNjY2zJ8/n8TExHa32bx5c4vzp6en1+uxPv/88y3q9fPza3eb/jin0Prfj0wmY/ny5a2W78tzevjwYa699locHByQyWT89NNPzdZLksSzzz6Lvb09+vr6TJs2jeTk5A7329Xfd2Hg6uy1NSoqiquvvho9PT2cnZ157bXX+ini7nnxxRcZP348BgYGmJmZtVomIyODuXPnYmBggI2NDU888QT19fV9G+hlGkp/m711/RpIOnNf7uvnrZ724YcfEhISgomJCSYmJoSFhfHbb79p1w/24xuqamtrGTFiBDKZjMjIyGbrBvv9oCuGyz2yq4bSvaYrhsM1+3K88soryGQyHnnkEe2ywXQ+RCJtABg/fjy5ubnNfv7973/j7u7O6NGj29122bJlzbbrqwvxCy+80KzeBx98sN3yK1as4Oeff2bbtm0cOnSInJwcbrzxxl6LLyEhAY1Gw0cffURsbCxvvfUW69ev57///W+H2/b2Of3mm2949NFHee655zh9+jShoaHMnDmTgoKCVssfPXqU2267jaVLl3LmzBnmz5/P/PnziYmJ6dG4/unQoUMsX76c48ePs2fPHurq6pgxYwaVlZXtbmdiYtLs/J0/f75X42wSGBjYrN4jR460Wba/zilAeHh4szj37NkDwIIFC9rcpq/OaWVlJaGhobz//vutrn/ttdd49913Wb9+PSdOnMDQ0JCZM2dSU1PT5j67+vsuDGydubaWl5czY8YMXF1diYiI4PXXX+f555/n448/7sfIu0atVrNgwQLuv//+Vtc3NDQwd+5c1Go1R48e5bPPPmPz5s08++yzfRxp9w21v83euH4NNJ25L/f181ZPc3Jy4pVXXiEiIoJTp05xzTXXcP311xMbGwsM/uMbqp588kkcHBxaLB8K94OuGC73yK4YavearhgO1+zuCg8P56OPPiIkJKTZ8kF1PiRhwFGr1ZK1tbX0wgsvtFtu0qRJ0sMPP9w3QV3C1dVVeuuttzpdvrS0VNLV1ZW2bdumXRYfHy8B0rFjx3ohwta99tprkru7e7tl+uKcjh07Vlq+fLn2c0NDg+Tg4CC9/PLLrZb/17/+Jc2dO7fZsnHjxkn33ntvr8b5TwUFBRIgHTp0qM0ymzZtkkxNTfsuqL8899xzUmhoaKfLD5RzKkmS9PDDD0uenp6SRqNpdX1/nVNA+vHHH7WfNRqNZGdnJ73++uvaZaWlpZJKpZK++uqrNvfT1d93YfD557X1gw8+kMzNzaXa2lrtspUrV0q+vr79Ed5laevvb+fOnZJcLpfy8vK0yz788EPJxMSk2XEPZEP5b7Onrl8D3T/vywPleaunmZubS59++umQPb7BbufOnZKfn58UGxsrAdKZM2e064bS/aC7hvI9sjOG8r2mq4bLNbsjFRUVkre3t7Rnz55m370H2/kQLdIGoB07dnDhwgXuvvvuDstu3boVKysrgoKCWL16NVVVVX0QYWNTTEtLS6644gpef/31druzREREUFdXx7Rp07TL/Pz8cHFx4dixY30RLgBlZWVYWFh0WK43z6larSYiIqLZuZDL5UybNq3Nc3Hs2LFm5QFmzpzZp+cOGs8f0OE5vHjxIq6urjg7Ozd7i9zbkpOTcXBwwMPDg4ULF5KRkdFm2YFyTtVqNV988QVLlixBJpO1Wa6/zuml0tLSyMvLa3beTE1NGTduXJvnrTu/78Lg889r67Fjx5g4cSJKpVK7bObMmSQmJlJSUtIfIfa4Y8eOERwcjK2trXbZzJkzKS8v75e/z64abn+b3bl+DQb/vC8PlOetntLQ0MDXX39NZWUlYWFhQ+74hoL8/HyWLVvG559/joGBQYv1w+F+0JHheI9sMtzuNR0Z6tfszlq+fDlz585t8V1ssJ0Pnf4OQGhpw4YNzJw5Eycnp3bL3X777bi6uuLg4EBUVBQrV64kMTGRH374oVfje+ihhxg5ciQWFhYcPXqU1atXk5uby5tvvtlq+by8PJRKZYtxZmxtbcnLy+vVWJukpKSwbt061q5d22653j6nRUVFNDQ0NPvyBY3nIiEhodVt8vLyWi3fV+cOQKPR8MgjjzBhwgSCgoLaLOfr68vGjRsJCQmhrKyMtWvXMn78eGJjYzv8fb4c48aNY/Pmzfj6+pKbm8uaNWu4+uqriYmJwdjYuEX5gXBOAX766SdKS0tZvHhxm2X665z+U9O56cp5687vuzC4tHZtzcvLw93dvVm5pt+BvLw8zM3N+zTG3tDWNaRp3UA33P42u3P9Guhauy8PhOetnhAdHU1YWBg1NTUYGRnx448/EhAQQGRk5JA4vqFCkiQWL17Mfffdx+jRo0lPT29RZjjcD9ozXO+RTYbbvaY9Q/ma3RVff/01p0+fbnUc+MF2PkSLtF60atWqNicRaPr550UkKyuLXbt2sXTp0g73f8899zBz5kyCg4NZuHAhW7Zs4ccffyQ1NbVXY3300UeZPHkyISEh3HfffbzxxhusW7eO2traLtfbm3E2yc7OZtasWSxYsIBly5a1u/+ePKdDyfLly4mJieHrr79ut1xYWBiLFi1ixIgRTJo0iR9++AFra2s++uijXo1v9uzZLFiwgJCQEGbOnMnOnTspLS3l22+/7dV6L9eGDRuYPXt2q+OKNOmvcyoML719bR0IunOMgjBQdfa+PBj5+voSGRnJiRMnuP/++7nrrruIi4vr77CGjc5eK9etW0dFRQWrV6/u75B73XC4Rwq9ayhfszsrMzOThx9+mK1bt/bJZHS9TbRI60WPPfZYuy1NADw8PJp93rRpE5aWllx33XVdrm/cuHFA49sPT0/PLm3bnVgvrbe+vp709HR8fX1brLezs0OtVlNaWtosw5yfn4+dnV2vxpmTk8OUKVMYP358twbxvJxz2horKysUCkWL2UfaOxd2dnZdKt/THnjgAX755RcOHz7c5RZQurq6XHHFFaSkpPRSdK0zMzPDx8enzXr7+5wCnD9/nr1793a5tWN/ndOmc5Ofn4+9vb12eX5+PiNGjGh1m+78vgv9oyevrW39fTWt6y+Xc5/7Jzs7uxazjg2EY+ys4fa32Z3r10DW1n25J5+3+pNSqcTLywuAUaNGER4ezjvvvMMtt9wyJI5voOvstXL//v0cO3YMlUrVbN3o0aNZuHAhn3322YC9H3TVcLhH9obhdq9py1C/ZndWREQEBQUFjBw5UrusoaGBw4cP895777Fr167BdT76e5A24W8ajUZyd3eXHnvssW5tf+TIEQmQzp4928ORte+LL76Q5HK5VFxc3Or6poEDv/vuO+2yhISEXh84MCsrS/L29pZuvfVWqb6+vlv76I1zOnbsWOmBBx7Qfm5oaJAcHR3bnWxg3rx5zZaFhYX1+sD4Go1GWr58ueTg4CAlJSV1ax/19fWSr6+vtGLFih6Orn0VFRWSubm59M4777S6vr/O6aWee+45yc7OTqqrq+vSdn11TmljsO61a9dql5WVlXVqsoGu/L4LA19H19amgZTVarV22erVqwflQModTTaQn5+vXfbRRx9JJiYmUk1NTR9G2H1D+W+zp65fA01H9+X+et7qbVOmTJHuuuuuIXt8g9X58+el6Oho7c+uXbskQPruu++kzMxMSZKG1v2gs4bTPbIzhvK9piPD9ZrdlvLy8mbXjOjoaGn06NHSHXfcIUVHRw+68yESaQPI3r17JUCKj49vsS4rK0vy9fWVTpw4IUmSJKWkpEgvvPCCdOrUKSktLU3avn275OHhIU2cOLFXYzx69Kj01ltvSZGRkVJqaqr0xRdfSNbW1tKiRYvajFWSJOm+++6TXFxcpP3790unTp2SwsLCpLCwsF6LMysrS/Ly8pKmTp0qZWVlSbm5udqftuLsq3P69ddfSyqVStq8ebMUFxcn3XPPPZKZmZl29rc777xTWrVqlbb8n3/+Keno6Ehr166V4uPjpeeee07S1dWVoqOjezSuf7r//vslU1NT6eDBg83OX1VVlbbMP2Nds2aNtGvXLik1NVWKiIiQbr31VklPT0+KjY3t1Vgfe+wx6eDBg1JaWpr0559/StOmTZOsrKykgoKCVuPsr3PapKGhQXJxcZFWrlzZYl1/ntOKigrpzJkz0pkzZyRAevPNN6UzZ85I58+flyRJkl555RXJzMxM2r59uxQVFSVdf/31kru7u1RdXa3dxzXXXCOtW7dO+7mj33dhcOnMtbW0tFSytbWV7rzzTikmJkb6+uuvJQMDA+mjjz7qx8i75vz589KZM2ekNWvWSEZGRtq/i4qKCkmSGhPaQUFB0owZM6TIyEjp999/l6ytraXVq1f3c+SdN9T+Nnvi+jXQdea+3NfPWz1t1apV0qFDh6S0tDQpKipKWrVqlSSTyaTdu3dLkjT4j28oS0tLazFr51C4H3TFcLlHdsVQu9d0xXC4Zl+uS2ftlKTBdT5EIm0Aue2226Tx48e3uq7p5nTgwAFJkiQpIyNDmjhxomRhYSGpVCrJy8tLeuKJJ6SysrJejTEiIkIaN26cZGpqKunp6Un+/v7SSy+91OwN/D9jlSRJqq6ulv7zn/9I5ubmkoGBgXTDDTc0u6n0tE2bNklAqz9txdmX53TdunWSi4uLpFQqpbFjx0rHjx/Xrps0aZJ01113NSv/7bffSj4+PpJSqZQCAwOlX3/9tcdj+qe2zt+mTZvajPWRRx7RHpetra00Z84c6fTp070e6y233CLZ29tLSqVScnR0lG655RYpJSWlzTglqX/OaZOmt7aJiYkt1vXnOT1w4ECr/8+b4tFoNNIzzzwj2draSiqVSpo6dWqLY3B1dZWee+65Zsva+30XBpfOXFslSZLOnj0rXXXVVZJKpZIcHR2lV155pZ8i7p677rqr1WO89L6Wnp4uzZ49W9LX15esrKykxx57rMstTPvbUPrb7Inr10DXmftyXz9v9bQlS5ZIrq6uklKplKytraWpU6dqk2iSNPiPbyhrLZEmSYP/ftAVw+Ue2VVD6V7TFcPhmn25/plIG0znQyZJknQ5XUMFQRAEQRAEQRAEQRAEYTgQs3YKgiAIgiAIgiAIgiAIQieIRJogCIIgCIIgCIIgCIIgdIJIpAmCIAiCIAiCIAiCIAhCJ4hEmiAIgiAIgiAIgiAIgiB0gkikCYIgCIIgCIIgCIIgCEIniESaIAiCIAiCIAiCIAiCIHSCSKQJgiAIgiAIgiAIgiAIQieIRJogCIIgCIIgCIIgCIIgdIJIpAmCIAiCIAiCIAiCIAhCJ4hEmiAIgiAIgiAIgiAIgiB0gkikCYIgCIIgCIIgCIIgCEIn/D9gbVFfhabU7AAAAABJRU5ErkJggg==", + "text/plain": [ + "
      " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "from typing import List, Optional, Tuple\n", "\n", @@ -368,7 +391,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.11" + "version": "3.12.12" } }, "nbformat": 4, diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_constants.py b/src/py123d/conversion/datasets/pandaset/pandaset_constants.py index 51ef348d..bae1c61c 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_constants.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_constants.py @@ -2,7 +2,9 @@ from py123d.common.utils.enums import SerialIntEnum from py123d.datatypes.detections.detection_types import DetectionType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType, PinholeDistortion, PinholeIntrinsics +from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.geometry import StateSE3 PANDASET_SPLITS: List[str] = ["pandaset_train", "pandaset_val", "pandaset_test"] @@ -15,6 +17,8 @@ "right_camera": PinholeCameraType.CAM_R1, } +PANDASET_LIDAR_MAPPING: Dict[str, LiDARType] = {"main_pandar64": LiDARType.LIDAR_TOP, "front_gt": LiDARType.LIDAR_FRONT} + class PandasetBoxDetectionType(SerialIntEnum): @@ -108,6 +112,98 @@ class PandasetBoxDetectionType(SerialIntEnum): PandasetBoxDetectionType.TRAM_SUBWAY: DetectionType.GENERIC_OBJECT, # TODO: Adjust default types } +# https://github.com/scaleapi/pandaset-devkit/blob/master/docs/static_extrinsic_calibration.yaml +PANDASET_LIDAR_EXTRINSICS: Dict[str, StateSE3] = { + "front_gt": StateSE3( + x=-0.000451117754, + y=-0.605646431446, + z=-0.301525235176, + qw=0.021475754959146356, + qx=-0.002060907279494794, + qy=0.01134678181520767, + qz=0.9997028534282365, + ), + "main_pandar64": StateSE3(x=0.0, y=0.0, z=0.0, qw=1.0, qx=0.0, qy=0.0, qz=0.0), +} + +# https://github.com/scaleapi/pandaset-devkit/blob/master/docs/static_extrinsic_calibration.yaml +PANDASET_CAMERA_EXTRINSICS: Dict[str, StateSE3] = { + "back_camera": StateSE3( + x=-0.0004217634029916384, + y=-0.21683144949675118, + z=-1.0553445472201475, + qw=0.713789231075861, + qx=0.7003585531940812, + qy=-0.001595758695393934, + qz=-0.0005330311533742299, + ), + "front_camera": StateSE3( + x=0.0002585796504896516, + y=-0.03907777167811011, + z=-0.0440125762408362, + qw=0.016213200031258722, + qx=0.0030578899383849464, + qy=0.7114721800418571, + qz=-0.7025205466606356, + ), + "front_left_camera": StateSE3( + x=-0.25842240863267835, + y=-0.3070654284505582, + z=-0.9244245686318884, + qw=0.33540022607039827, + qx=0.3277491469609924, + qy=-0.6283486651480494, + qz=0.6206973014480826, + ), + "front_right_camera": StateSE3( + x=0.2546935700219631, + y=-0.24929449717803095, + z=-0.8686597280810242, + qw=0.3537633879725252, + qx=0.34931795852655334, + qy=0.6120314641083645, + qz=-0.6150170047424814, + ), + "left_camera": StateSE3( + x=0.23864835336611942, + y=-0.2801448284013492, + z=-0.5376795959387791, + qw=0.5050391917998245, + qx=0.49253073152800625, + qy=-0.4989265501075421, + qz=0.503409565706149, + ), + "right_camera": StateSE3( + x=-0.23097163411257893, + y=-0.30843497058841024, + z=-0.6850441215571058, + qw=0.5087448402081216, + qx=0.4947520981649951, + qy=0.4977829953071897, + qz=-0.49860920419297333, + ), +} + +# https://github.com/scaleapi/pandaset-devkit/blob/master/docs/static_extrinsic_calibration.yaml +PANDASET_CAMERA_INTRINSICS: Dict[str, PinholeIntrinsics] = { + "back_camera": PinholeIntrinsics(fx=933.4667, fy=934.6754, cx=896.4692, cy=507.3557), + "front_camera": PinholeIntrinsics(fx=1970.0131, fy=1970.0091, cx=970.0002, cy=483.2988), + "front_left_camera": PinholeIntrinsics(fx=929.8429, fy=930.0592, cx=972.1794, cy=508.0057), + "front_right_camera": PinholeIntrinsics(fx=930.0407, fy=930.0324, cx=965.0525, cy=463.4161), + "left_camera": PinholeIntrinsics(fx=930.4514, fy=930.0891, cx=991.6883, cy=541.6057), + "right_camera": PinholeIntrinsics(fx=922.5465, fy=922.4229, cx=945.057, cy=517.575), +} + +# https://github.com/scaleapi/pandaset-devkit/blob/master/docs/static_extrinsic_calibration.yaml +PANDASET_CAMERA_DISTORTIONS: Dict[str, PinholeDistortion] = { + "back_camera": PinholeDistortion.from_list([-0.1619, 0.0113, -0.00028815, -7.9827e-05, 0.0067]), + "front_camera": PinholeDistortion.from_list([-0.5894, 0.66, 0.0011, -0.001, -1.0088]), + "front_left_camera": PinholeDistortion.from_list([-0.165, 0.0099, -0.00075376, 5.3699e-05, 0.01]), + "front_right_camera": PinholeDistortion.from_list([-0.1614, -0.0027, -0.00029662, -0.00028927, 0.0181]), + "left_camera": PinholeDistortion.from_list([-0.1582, -0.0266, -0.00015221, 0.00059011, 0.0449]), + "right_camera": PinholeDistortion.from_list([-0.1648, 0.0191, 0.0027, -8.5282e-07, -9.6983e-05]), +} + PANDASET_LOG_NAMES: List[str] = [ "001", diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py index 04f6149b..ce9bc588 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py @@ -1,28 +1,37 @@ -import gzip -import json -import pickle from pathlib import Path from typing import Dict, List, Optional, Tuple, Union import numpy as np +import pandas as pd from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter from py123d.conversion.dataset_converter_config import DatasetConverterConfig from py123d.conversion.datasets.pandaset.pandaset_constants import ( PANDASET_BOX_DETECTION_FROM_STR, PANDASET_BOX_DETECTION_TO_DEFAULT, + PANDASET_CAMERA_DISTORTIONS, PANDASET_CAMERA_MAPPING, + PANDASET_LIDAR_EXTRINSICS, + PANDASET_LIDAR_MAPPING, PANDASET_LOG_NAMES, PANDASET_SPLITS, ) +from py123d.conversion.datasets.pandaset.pandaset_sensor_loading import load_pandaset_lidars_pc_from_path +from py123d.conversion.datasets.pandaset.pandaset_utlis import ( + main_lidar_to_rear_axle, + pandaset_pose_dict_to_state_se3, + read_json, + read_pkl_gz, + rotate_pandaset_pose_to_iso_coordinates, +) from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.utils.sensor_utils.lidar_index_registry import PandasetLidarIndex from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, - PinholeDistortion, PinholeIntrinsics, ) from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType @@ -36,7 +45,6 @@ from py123d.geometry.geometry_index import BoundingBoxSE3Index, EulerAnglesIndex from py123d.geometry.transform.transform_se3 import ( convert_absolute_to_relative_se3_array, - translate_se3_along_body_frame, ) from py123d.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL from py123d.geometry.utils.rotation_utils import get_quaternion_array_from_euler_array @@ -107,8 +115,8 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=None, # TODO: Add location information. timestep_seconds=0.1, vehicle_parameters=get_pandaset_chrysler_pacifica_parameters(), - camera_metadata=_get_pandaset_camera_metadata(source_log_path), - lidar_metadata=_get_pandaset_lidar_metadata(source_log_path), + camera_metadata=_get_pandaset_camera_metadata(source_log_path, self.dataset_converter_config), + lidar_metadata=_get_pandaset_lidar_metadata(source_log_path, self.dataset_converter_config), map_metadata=None, # NOTE: Pandaset does not have maps. ) @@ -119,11 +127,11 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: if log_needs_writing: # Read files from pandaset - timesteps = _read_json(source_log_path / "meta" / "timestamps.json") - gps: List[Dict[str, float]] = _read_json(source_log_path / "meta" / "gps.json") - lidar_poses: List[Dict[str, Dict[str, float]]] = _read_json(source_log_path / "lidar" / "poses.json") + timesteps = read_json(source_log_path / "meta" / "timestamps.json") + gps: List[Dict[str, float]] = read_json(source_log_path / "meta" / "gps.json") + lidar_poses: List[Dict[str, Dict[str, float]]] = read_json(source_log_path / "lidar" / "poses.json") camera_poses: Dict[str, List[Dict[str, Dict[str, float]]]] = { - camera_name: _read_json(source_log_path / "camera" / camera_name / "poses.json") + camera_name: read_json(source_log_path / "camera" / camera_name / "poses.json") for camera_name in PANDASET_CAMERA_MAPPING.keys() } @@ -142,65 +150,73 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: camera_poses, self.dataset_converter_config, ), + lidars=_extract_pandaset_lidar( + source_log_path, + iteration, + ego_state, + self.dataset_converter_config, + ), ) # 4. Finalize log writing log_writer.close() -def _get_pandaset_camera_metadata(source_log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]: +def _get_pandaset_camera_metadata( + source_log_path: Path, dataset_config: DatasetConverterConfig +) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - all_cameras_folder = source_log_path / "camera" camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} - for camera_folder in all_cameras_folder.iterdir(): - camera_name = camera_folder.name - - assert camera_name in PANDASET_CAMERA_MAPPING.keys(), f"Camera name {camera_name} is not recognized." - camera_type = PANDASET_CAMERA_MAPPING[camera_name] - - intrinsics_file = camera_folder / "intrinsics.json" - assert intrinsics_file.exists(), f"Camera intrinsics file {intrinsics_file} does not exist." - intrinsics_data = _read_json(intrinsics_file) - - camera_metadata[camera_type] = PinholeCameraMetadata( - camera_type=camera_type, - width=1920, - height=1080, - intrinsics=PinholeIntrinsics( - fx=intrinsics_data["fx"], - fy=intrinsics_data["fy"], - cx=intrinsics_data["cx"], - cy=intrinsics_data["cy"], - ), - distortion=PinholeDistortion(k1=0.0, k2=0.0, p1=0.0, p2=0.0, k3=0.0), - ) + if dataset_config.include_cameras: + all_cameras_folder = source_log_path / "camera" + for camera_folder in all_cameras_folder.iterdir(): + camera_name = camera_folder.name + + assert camera_name in PANDASET_CAMERA_MAPPING.keys(), f"Camera name {camera_name} is not recognized." + camera_type = PANDASET_CAMERA_MAPPING[camera_name] + + intrinsics_file = camera_folder / "intrinsics.json" + assert intrinsics_file.exists(), f"Camera intrinsics file {intrinsics_file} does not exist." + intrinsics_data = read_json(intrinsics_file) + + camera_metadata[camera_type] = PinholeCameraMetadata( + camera_type=camera_type, + width=1920, + height=1080, + intrinsics=PinholeIntrinsics( + fx=intrinsics_data["fx"], + fy=intrinsics_data["fy"], + cx=intrinsics_data["cx"], + cy=intrinsics_data["cy"], + ), + distortion=PANDASET_CAMERA_DISTORTIONS[camera_name], + ) return camera_metadata -def _get_pandaset_lidar_metadata(log_path: Path) -> Dict[LiDARType, LiDARMetadata]: - # TODO: Implement - return {} +def _get_pandaset_lidar_metadata( + log_path: Path, dataset_config: DatasetConverterConfig +) -> Dict[LiDARType, LiDARMetadata]: + lidar_metadata: Dict[LiDARType, LiDARMetadata] = {} + + if dataset_config.include_lidars: + for lidar_name, lidar_type in PANDASET_LIDAR_MAPPING.items(): + lidar_metadata[lidar_type] = LiDARMetadata( + lidar_type=lidar_type, + lidar_index=PandasetLidarIndex, + extrinsic=PANDASET_LIDAR_EXTRINSICS[ + lidar_name + ], # TODO: These extrinsics are incorrect, and need to be transformed correctly. + ) + + return lidar_metadata def _extract_pandaset_sensor_ego_state(gps: Dict[str, float], lidar_pose: Dict[str, Dict[str, float]]) -> EgoStateSE3: - rear_axle_pose = _main_lidar_to_rear_axle( - StateSE3( - x=lidar_pose["position"]["x"], - y=lidar_pose["position"]["y"], - z=lidar_pose["position"]["z"], - qw=lidar_pose["heading"]["w"], - qx=lidar_pose["heading"]["x"], - qy=lidar_pose["heading"]["y"], - qz=lidar_pose["heading"]["z"], - ) - ) - # rear_axle_pose = translate_se3_along_body_frame( - # main_lidar_pose, - # vector_3d=Vector3D(x=-0.83, y=0.0, z=0.0), - # ) + rear_axle_pose = main_lidar_to_rear_axle(pandaset_pose_dict_to_state_se3(lidar_pose)) vehicle_parameters = get_pandaset_chrysler_pacifica_parameters() center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) @@ -234,7 +250,7 @@ def _extract_pandaset_box_detections( # - attributes.pedestrian_behavior # - attributes.pedestrian_age # - attributes.rider_status - # https://github.com/scaleapi/pandaset-devkit/blob/59be180e2a3f3e37f6d66af9e67bf944ccbf6ec0/README.md?plain=1#L288 + # https://github.com/scaleapi/pandaset-devkit/blob/master/README.md?plain=1#L288 iteration_str = f"{iteration:02d}" cuboids_file = source_log_path / "annotations" / "cuboids" / f"{iteration_str}.pkl.gz" @@ -242,7 +258,7 @@ def _extract_pandaset_box_detections( if not cuboids_file.exists(): return BoxDetectionWrapper(box_detections=[]) - cuboid_df = _read_pkl_gz(cuboids_file) + cuboid_df: pd.DataFrame = read_pkl_gz(cuboids_file) # Read cuboid data box_label_names = list(cuboid_df["label"]) @@ -271,15 +287,36 @@ def _extract_pandaset_box_detections( box_se3_array[:, BoundingBoxSE3Index.QUATERNION] = get_quaternion_array_from_euler_array(box_euler_angles_array) box_se3_array[:, BoundingBoxSE3Index.EXTENT] = np.stack([box_lengths, box_widths, box_heights], axis=-1) + # NOTE: Pandaset annotates moving bounding boxes twice (for synchronization reasons), + # if they are in the overlap area between the top 360° lidar and the front-facing lidar (and moving). + # The value in `cuboids.sensor_id` is either + # - `0` (mechanical 360° LiDAR) + # - `1` (front-facing LiDAR). + # - All other cuboids have value `-1`. + + # To avoid duplicate bounding boxes, we only keep boxes from the front lidar (sensor_id == 1), if they do not + # have a sibling box in the top lidar (sensor_id == 0). Otherwise, all boxes with sensor_id == {-1, 0} are kept. + # https://github.com/scaleapi/pandaset-devkit/blob/master/python/pandaset/annotations.py#L166 + # https://github.com/scaleapi/pandaset-devkit/issues/26 + + top_lidar_uuids = set(cuboid_df[cuboid_df["cuboids.sensor_id"] == 0]["uuid"]) + sensor_ids = cuboid_df["cuboids.sensor_id"].to_list() + sibling_ids = cuboid_df["cuboids.sibling_id"].to_list() + # Fill bounding box detections and return box_detections: List[BoxDetectionSE3] = [] for box_idx in range(num_boxes): + + # Skip duplicate box detections from front lidar if sibling exists in top lidar + if sensor_ids[box_idx] == 1 and sibling_ids[box_idx] in top_lidar_uuids: + continue + pandaset_box_detection_type = PANDASET_BOX_DETECTION_FROM_STR[box_label_names[box_idx]] box_detection_type = PANDASET_BOX_DETECTION_TO_DEFAULT[pandaset_box_detection_type] # Convert coordinates to ISO 8855 # NOTE: This would be faster over a batch operation. - box_se3_array[box_idx, BoundingBoxSE3Index.STATE_SE3] = _rotate_pose_to_iso_coordinates( + box_se3_array[box_idx, BoundingBoxSE3Index.STATE_SE3] = rotate_pandaset_pose_to_iso_coordinates( StateSE3.from_array(box_se3_array[box_idx, BoundingBoxSE3Index.STATE_SE3], copy=False) ).array @@ -310,39 +347,22 @@ def _extract_pandaset_sensor_camera( if dataset_converter_config.include_cameras: for camera_name, camera_type in PANDASET_CAMERA_MAPPING.items(): - image_rel_path = f"camera/{camera_name}/{iteration_str}.jpg" - image_abs_path = source_log_path / image_rel_path + image_abs_path = source_log_path / f"camera/{camera_name}/{iteration_str}.jpg" assert image_abs_path.exists(), f"Camera image file {str(image_abs_path)} does not exist." camera_pose_dict = camera_poses[camera_name][iteration] - camera_extrinsic = _rotate_pose_to_iso_coordinates( - StateSE3( - x=camera_pose_dict["position"]["x"], - y=camera_pose_dict["position"]["y"], - z=camera_pose_dict["position"]["z"], - qw=camera_pose_dict["heading"]["w"], - qx=camera_pose_dict["heading"]["x"], - qy=camera_pose_dict["heading"]["y"], - qz=camera_pose_dict["heading"]["z"], - ) - ) - # camera_extrinsic = StateSE3( - # x=camera_pose_dict["position"]["x"], - # y=camera_pose_dict["position"]["y"], - # z=camera_pose_dict["position"]["z"], - # qw=camera_pose_dict["heading"]["w"], - # qx=camera_pose_dict["heading"]["x"], - # qy=camera_pose_dict["heading"]["y"], - # qz=camera_pose_dict["heading"]["z"], - # ) + camera_extrinsic = pandaset_pose_dict_to_state_se3(camera_pose_dict) + # camera_extrinsic = rotate_pandaset_pose_to_iso_coordinates(camera_extrinsic) + camera_extrinsic = StateSE3.from_array( convert_absolute_to_relative_se3_array(ego_state_se3.rear_axle_se3, camera_extrinsic.array), copy=True ) camera_data = None if dataset_converter_config.camera_store_option == "path": - camera_data = str(image_rel_path) + pandaset_data_root = source_log_path.parent + camera_data = str(image_abs_path.relative_to(pandaset_data_root)) elif dataset_converter_config.camera_store_option == "binary": with open(image_abs_path, "rb") as f: camera_data = f.read() @@ -351,76 +371,22 @@ def _extract_pandaset_sensor_camera( return camera_dict -def _extract_lidar(lidar_pc, dataset_converter_config: DatasetConverterConfig) -> Dict[LiDARType, Optional[str]]: - # TODO: Implement this function to extract lidar data. - return {} - - -def _read_json(json_file: Path): - """Helper function to read a json file as dict.""" - with open(json_file, "r") as f: - json_data = json.load(f) - return json_data - - -def _read_pkl_gz(pkl_gz_file: Path): - """Helper function to read a pkl.gz file as dict.""" - with gzip.open(pkl_gz_file, "rb") as f: - pkl_data = pickle.load(f) - return pkl_data - - -def _rotate_pose_to_iso_coordinates(pose: StateSE3) -> StateSE3: - """Helper function for pandaset to rotate a pose to ISO coordinate system (x: forward, y: left, z: up). +def _extract_pandaset_lidar( + source_log_path: Path, iteration: int, ego_state_se3: EgoStateSE3, dataset_converter_config: DatasetConverterConfig +) -> Dict[LiDARType, Optional[Union[str, np.ndarray]]]: - NOTE: Pandaset uses a different coordinate system (x: right, y: forward, z: up). - [1] https://arxiv.org/pdf/2112.12610.pdf - - :param pose: The input pose. - :return: The rotated pose. - """ - F = np.array( - [ - [0.0, 1.0, 0.0], # new X = old Y (forward) - [-1.0, 0.0, 0.0], # new Y = old -X (left) - [0.0, 0.0, 1.0], # new Z = old Z (up) - ], - dtype=np.float64, - ).T - # F = np.eye(3, dtype=np.float64) - transformation_matrix = pose.transformation_matrix.copy() - transformation_matrix[0:3, 0:3] = transformation_matrix[0:3, 0:3] @ F - - # transformation_matrix[0, 3] = pose.y - # transformation_matrix[1, 3] = -pose.x - # transformation_matrix[2, 3] = pose.z - - return StateSE3.from_transformation_matrix(transformation_matrix) - - -def _main_lidar_to_rear_axle(pose: StateSE3) -> StateSE3: - - F = np.array( - [ - [0.0, 1.0, 0.0], # new X = old Y (forward) - [-1.0, 0.0, 0.0], # new Y = old X (left) - [0.0, 0.0, 1.0], # new Z = old Z (up) - ], - dtype=np.float64, - ).T - # F = np.eye(3, dtype=np.float64) - transformation_matrix = pose.transformation_matrix.copy() - transformation_matrix[0:3, 0:3] = transformation_matrix[0:3, 0:3] @ F - - rotated_pose = StateSE3.from_transformation_matrix(transformation_matrix) - - imu_pose = translate_se3_along_body_frame( - rotated_pose, - vector_3d=Vector3D(x=-0.840, y=0.0, z=0.0), - ) + lidar_dict: Dict[LiDARType, Optional[Union[str, np.ndarray]]] = {} + if dataset_converter_config.include_lidars: + iteration_str = f"{iteration:02d}" + lidar_absolute_path = source_log_path / "lidar" / f"{iteration_str}.pkl.gz" + assert lidar_absolute_path.exists(), f"LiDAR file {str(lidar_absolute_path)} does not exist." - # transformation_matrix[0, 3] = pose.y - # transformation_matrix[1, 3] = -pose.x - # transformation_matrix[2, 3] = pose.z + if dataset_converter_config.lidar_store_option == "path": + pandaset_data_root = source_log_path.parent + lidar_relative_path = str(lidar_absolute_path.relative_to(pandaset_data_root)) + lidar_dict[LiDARType.LIDAR_FRONT] = lidar_relative_path + lidar_dict[LiDARType.LIDAR_TOP] = lidar_relative_path + elif dataset_converter_config.lidar_store_option == "binary": + lidar_dict = load_pandaset_lidars_pc_from_path(lidar_absolute_path, ego_state_se3) - return imu_pose + return lidar_dict diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_sensor_loading.py b/src/py123d/conversion/datasets/pandaset/pandaset_sensor_loading.py new file mode 100644 index 00000000..503b3dff --- /dev/null +++ b/src/py123d/conversion/datasets/pandaset/pandaset_sensor_loading.py @@ -0,0 +1,45 @@ +from pathlib import Path +from typing import Dict, Union + +import numpy as np +import pandas as pd + +from py123d.conversion.datasets.pandaset.pandaset_utlis import read_pkl_gz +from py123d.conversion.utils.sensor_utils.lidar_index_registry import PandasetLidarIndex +from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.geometry.transform.transform_se3 import convert_absolute_to_relative_points_3d_array + + +def load_pandaset_global_lidar_pc_from_path(pkl_gz_path: Union[Path, str]) -> Dict[LiDARType, np.ndarray]: + # NOTE: The Pandaset dataset stores both front and top LiDAR data in the same gzip-pickle file. + # We need to separate them based on the laser_number field. + # See here: https://github.com/scaleapi/pandaset-devkit/blob/master/python/pandaset/sensors.py#L160 + + all_lidar_df = read_pkl_gz(pkl_gz_path) + top_lidar_df: pd.DataFrame = all_lidar_df[all_lidar_df["d"] == 0] + front_lidar_df: pd.DataFrame = all_lidar_df[all_lidar_df["d"] == 1] + + # Remove the 't' (timestamp) and 'd' (laser id) columns + top_lidar_df = top_lidar_df.drop(columns=["t", "d"]) + front_lidar_df = front_lidar_df.drop(columns=["t", "d"]) + + return {LiDARType.LIDAR_TOP: top_lidar_df.to_numpy(), LiDARType.LIDAR_FRONT: front_lidar_df.to_numpy()} + + +def load_pandaset_lidars_pc_from_path( + pkl_gz_path: Union[Path, str], ego_state_se3: EgoStateSE3 +) -> Dict[LiDARType, np.ndarray]: + + lidar_pc_dict = load_pandaset_global_lidar_pc_from_path(pkl_gz_path) + + for lidar_type in lidar_pc_dict.keys(): + lidar_pc_dict[lidar_type][..., PandasetLidarIndex.XYZ] = convert_absolute_to_relative_points_3d_array( + ego_state_se3.rear_axle_se3, + lidar_pc_dict[lidar_type][..., PandasetLidarIndex.XYZ], + ) + + # relative_points = (points_3d_array - t_origin) @ R_origin + + # Pass the loaded point clouds to the appropriate LiDAR types + return lidar_pc_dict diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_utlis.py b/src/py123d/conversion/datasets/pandaset/pandaset_utlis.py new file mode 100644 index 00000000..caab4da1 --- /dev/null +++ b/src/py123d/conversion/datasets/pandaset/pandaset_utlis.py @@ -0,0 +1,96 @@ +import gzip +import json +import pickle +from pathlib import Path +from typing import Dict + +import numpy as np + +from py123d.geometry import StateSE3, Vector3D +from py123d.geometry.transform.transform_se3 import translate_se3_along_body_frame + + +def read_json(json_file: Path): + """Helper function to read a json file as dict.""" + with open(json_file, "r") as f: + json_data = json.load(f) + return json_data + + +def read_pkl_gz(pkl_gz_file: Path): + """Helper function to read a pkl.gz file as dict.""" + with gzip.open(pkl_gz_file, "rb") as f: + pkl_data = pickle.load(f) + return pkl_data + + +def pandaset_pose_dict_to_state_se3(pose_dict: Dict[str, Dict[str, float]]) -> StateSE3: + """Helper function for pandaset to convert a pose dict to StateSE3. + + :param pose_dict: The input pose dict. + :return: The converted StateSE3. + """ + return StateSE3( + x=pose_dict["position"]["x"], + y=pose_dict["position"]["y"], + z=pose_dict["position"]["z"], + qw=pose_dict["heading"]["w"], + qx=pose_dict["heading"]["x"], + qy=pose_dict["heading"]["y"], + qz=pose_dict["heading"]["z"], + ) + + +def rotate_pandaset_pose_to_iso_coordinates(pose: StateSE3) -> StateSE3: + """Helper function for pandaset to rotate a pose to ISO coordinate system (x: forward, y: left, z: up). + + NOTE: Pandaset uses a different coordinate system (x: right, y: forward, z: up). + [1] https://arxiv.org/pdf/2112.12610.pdf + + :param pose: The input pose. + :return: The rotated pose. + """ + F = np.array( + [ + [0.0, 1.0, 0.0], # new X = old Y (forward) + [-1.0, 0.0, 0.0], # new Y = old -X (left) + [0.0, 0.0, 1.0], # new Z = old Z (up) + ], + dtype=np.float64, + ).T + transformation_matrix = pose.transformation_matrix.copy() + transformation_matrix[0:3, 0:3] = transformation_matrix[0:3, 0:3] @ F + + # transformation_matrix[0, 3] = pose.y + # transformation_matrix[1, 3] = -pose.x + # transformation_matrix[2, 3] = pose.z + + return StateSE3.from_transformation_matrix(transformation_matrix) + + +def main_lidar_to_rear_axle(pose: StateSE3) -> StateSE3: + + F = np.array( + [ + [0.0, 1.0, 0.0], # new X = old Y (forward) + [-1.0, 0.0, 0.0], # new Y = old X (left) + [0.0, 0.0, 1.0], # new Z = old Z (up) + ], + dtype=np.float64, + ).T + # F = np.eye(3, dtype=np.float64) + transformation_matrix = pose.transformation_matrix.copy() + transformation_matrix[0:3, 0:3] = transformation_matrix[0:3, 0:3] @ F + + rotated_pose = StateSE3.from_transformation_matrix(transformation_matrix) + + imu_pose = translate_se3_along_body_frame( + rotated_pose, + vector_3d=Vector3D(x=-0.840, y=0.0, z=0.0), + ) + + # transformation_matrix[0, 3] = pose.y + # transformation_matrix[1, 3] = -pose.x + # transformation_matrix[2, 3] = pose.z + + return imu_pose diff --git a/src/py123d/conversion/log_writer/utils/lidar_compression.py b/src/py123d/conversion/log_writer/utils/lidar_compression.py index db7749c2..fffc0e76 100644 --- a/src/py123d/conversion/log_writer/utils/lidar_compression.py +++ b/src/py123d/conversion/log_writer/utils/lidar_compression.py @@ -54,9 +54,11 @@ def decompress_lidar_from_laz(laz_binary: bytes, lidar_metadata: LiDARMetadata) las = laspy.read(buffer) # Extract the point cloud data - num_points = len(las.x) + xyz = las.xyz + + num_points = len(xyz) point_cloud = np.zeros((num_points, len(lidar_metadata.lidar_index)), dtype=np.float32) - point_cloud[:, lidar_index.XYZ] = np.vstack((las.x, las.y, las.z)).T + point_cloud[:, lidar_index.XYZ] = xyz for feature in lidar_index: if feature.name in ["X", "Y", "Z"]: diff --git a/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py b/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py index 35abe287..7684b685 100644 --- a/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py +++ b/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py @@ -66,10 +66,20 @@ class WOPDLidarIndex(LiDARIndex): class AVSensorLidarIndex(LiDARIndex): """Argoverse Sensor LiDAR Indexing Scheme. - NOTE: The LiDAR files also include 'laser_number', 'offset_ns', which we do not currently include. + NOTE: The LiDAR files also include, 'offset_ns', which we do not currently include. """ X = 0 Y = 1 Z = 2 INTENSITY = 3 + + +@register_lidar_index +class PandasetLidarIndex(LiDARIndex): + """Pandaset LiDAR Indexing Scheme.""" + + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 diff --git a/src/py123d/script/config/common/default_dataset_paths.yaml b/src/py123d/script/config/common/default_dataset_paths.yaml index acd2e59b..b2dc2027 100644 --- a/src/py123d/script/config/common/default_dataset_paths.yaml +++ b/src/py123d/script/config/common/default_dataset_paths.yaml @@ -16,4 +16,8 @@ dataset_paths: av2_sensor_data_root: ${dataset_paths.av2_data_root}/sensor # WOPD defaults - wopd_data_root: ${oc.env:WOPD_DATA_ROOT,null} \ No newline at end of file + wopd_data_root: ${oc.env:WOPD_DATA_ROOT,null} + + + # Pandaset defaults + pandaset_data_root: ${oc.env:PANDASET_DATA_ROOT,null} \ No newline at end of file diff --git a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml index 683b1899..6f340e02 100644 --- a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml @@ -3,7 +3,7 @@ pandaset_dataset: _convert_: 'all' splits: ["pandaset_train", "pandaset_val", "pandaset_test"] - pandaset_data_root: "/media/nvme1/pandaset" + pandaset_data_root: ${dataset_paths.pandaset_data_root} dataset_converter_config: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig @@ -30,8 +30,8 @@ pandaset_dataset: camera_store_option: "binary" # "path", "binary", "mp4" # LiDARs - include_lidars: false - lidar_store_option: "path" # "path", "binary" + include_lidars: true + lidar_store_option: "binary" # "path", "binary" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. diff --git a/src/py123d/visualization/viser/elements/detection_elements.py b/src/py123d/visualization/viser/elements/detection_elements.py index 639c1632..91e38764 100644 --- a/src/py123d/visualization/viser/elements/detection_elements.py +++ b/src/py123d/visualization/viser/elements/detection_elements.py @@ -32,13 +32,26 @@ def add_box_detections_to_viser_server( visible=True, ) elif viser_config.bounding_box_type == "lines": - lines, colors = _get_bounding_box_outlines(scene, scene_interation, initial_ego_state) + lines, colors, se3_array = _get_bounding_box_outlines(scene, scene_interation, initial_ego_state) viser_server.scene.add_line_segments( "box_detections", points=lines, colors=colors, line_width=viser_config.bounding_box_line_width, ) + viser_server.scene.add_batched_axes( + "frames", + batched_wxyzs=se3_array[:-1, StateSE3Index.QUATERNION], + batched_positions=se3_array[:-1, StateSE3Index.XYZ], + ) + ego_rear_axle_se3 = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array + ego_rear_axle_se3[StateSE3Index.XYZ] -= initial_ego_state.center_se3.array[StateSE3Index.XYZ] + viser_server.scene.add_frame( + "ego_rear_axle", + position=ego_rear_axle_se3[StateSE3Index.XYZ], + wxyz=ego_rear_axle_se3[StateSE3Index.QUATERNION], + ) + else: raise ValueError(f"Unknown bounding box type: {viser_config.bounding_box_type}") @@ -74,6 +87,34 @@ def _get_bounding_box_meshes(scene: AbstractScene, iteration: int, initial_ego_s return mesh +# def _get_bounding_box_outlines( +# scene: AbstractScene, iteration: int, initial_ego_state: EgoStateSE3 +# ) -> npt.NDArray[np.float64]: + +# ego_vehicle_state = scene.get_ego_state_at_iteration(iteration) +# box_detections = scene.get_box_detections_at_iteration(iteration) + +# # Load boxes to visualize, including ego vehicle at the last position +# boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] +# boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO] + +# # Create lines for all boxes +# box_se3_array = np.array([box.array for box in boxes]) +# box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_state.center_se3.array[StateSE3Index.XYZ] +# box_corners_array = bbse3_array_to_corners_array(box_se3_array) +# box_outlines = corners_array_to_edge_lines(box_corners_array) + +# # Create colors for all boxes +# box_colors = np.zeros(box_outlines.shape, dtype=np.float32) +# for i, box_type in enumerate(boxes_type): +# box_colors[i, ...] = BOX_DETECTION_CONFIG[box_type].fill_color.rgb_norm + +# box_outlines = box_outlines.reshape(-1, *box_outlines.shape[2:]) +# box_colors = box_colors.reshape(-1, *box_colors.shape[2:]) + +# return box_outlines, box_colors + + def _get_bounding_box_outlines( scene: AbstractScene, iteration: int, initial_ego_state: EgoStateSE3 ) -> npt.NDArray[np.float64]: @@ -99,4 +140,4 @@ def _get_bounding_box_outlines( box_outlines = box_outlines.reshape(-1, *box_outlines.shape[2:]) box_colors = box_colors.reshape(-1, *box_colors.shape[2:]) - return box_outlines, box_colors + return box_outlines, box_colors, box_se3_array diff --git a/src/py123d/visualization/viser/elements/sensor_elements.py b/src/py123d/visualization/viser/elements/sensor_elements.py index 758140bd..d6901077 100644 --- a/src/py123d/visualization/viser/elements/sensor_elements.py +++ b/src/py123d/visualization/viser/elements/sensor_elements.py @@ -6,6 +6,7 @@ import numpy.typing as npt import viser +from py123d.common.utils.timer import Timer from py123d.visualization.viser.viser_config import ViserConfig from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType @@ -106,9 +107,12 @@ def add_lidar_pc_to_viser_server( ) -> None: if viser_config.lidar_visible: + timer = Timer() + timer.start() scene_center_array = initial_ego_state.center.point_3d.array ego_pose = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array ego_pose[StateSE3Index.XYZ] -= scene_center_array + timer.log("1. prepare ego pose") def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]: lidar = scene.get_lidar_at_iteration(scene_interation, lidar_type) @@ -125,11 +129,13 @@ def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]: for future in concurrent.futures.as_completed(future_to_lidar): lidar_points_3d_list.append(future.result()) + timer.log("2. load lidar points") points_3d_local = ( np.concatenate(lidar_points_3d_list, axis=0) if lidar_points_3d_list else np.zeros((0, 3), dtype=np.float32) ) points = convert_relative_to_absolute_points_3d_array(ego_pose, points_3d_local) colors = np.zeros_like(points) + timer.log("3. convert lidar points") if lidar_pc_handle is not None: lidar_pc_handle.points = points @@ -142,6 +148,8 @@ def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]: point_size=viser_config.lidar_point_size, point_shape=viser_config.lidar_point_shape, ) + timer.log("4. add lidar to viser server") + timer.end() def _get_camera_values( diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index 843a008d..b5923db5 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -51,7 +51,7 @@ class ViserConfig: # Bounding boxes bounding_box_visible: bool = True - bounding_box_type: Literal["mesh", "lines"] = "mesh" + bounding_box_type: Literal["mesh", "lines"] = "lines" bounding_box_line_width: float = 4.0 # Cameras diff --git a/test_viser.py b/test_viser.py index d395dfd9..6a1b2cf0 100644 --- a/test_viser.py +++ b/test_viser.py @@ -11,8 +11,9 @@ # splits = ["nuplan_private_test"] # splits = ["carla"] # splits = ["wopd_val"] - splits = ["av2-sensor_train"] + # splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] + splits = ["pandaset_test"] log_names = None scene_uuids = None From 54100692d1a164c3b23a7b4be47fbe9469660ea1 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 18 Oct 2025 14:37:35 +0200 Subject: [PATCH 097/145] Add support for Pandaset loading from sensor paths. --- .../scene/arrow/utils/arrow_getters.py | 23 +++++++++++++++---- .../conversion/datasets/pandaset_dataset.yaml | 4 ++-- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 8996b71c..e3bf5106 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -7,6 +7,7 @@ from omegaconf import DictConfig import pyarrow as pa +from py123d.conversion.datasets.pandaset.pandaset_sensor_loading import load_pandaset_lidars_pc_from_path from py123d.datatypes.detections.detection import ( BoxDetection, BoxDetectionMetadata, @@ -32,6 +33,7 @@ "nuplan": DATASET_PATHS.nuplan_sensor_root, "av2-sensor": DATASET_PATHS.av2_sensor_data_root, "wopd": DATASET_PATHS.wopd_data_root, + "pandaset": DATASET_PATHS.pandaset_data_root, } @@ -137,12 +139,8 @@ def get_lidar_from_arrow_table( lidar_type: LiDARType, log_metadata: LogMetadata, ) -> LiDAR: - lidar: Optional[LiDAR] = None - - # assert ( - # f"{lidar_type.serialize()}_data" in arrow_table.schema.names - # ), f'"{lidar_type.serialize()}" field not found in Arrow table schema.' + lidar: Optional[LiDAR] = None lidar_column_name = f"{lidar_type.serialize()}_data" if lidar_column_name in arrow_table.schema.names: @@ -162,21 +160,36 @@ def get_lidar_from_arrow_table( from py123d.conversion.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) + elif log_metadata.dataset == "carla": from py123d.conversion.datasets.carla.carla_load_sensor import load_carla_lidar_from_path lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata) + elif log_metadata.dataset == "av2-sensor": from py123d.conversion.datasets.av2.utils.av2_sensor_loading import load_av2_sensor_lidar_pc_from_path lidar_pc_dict = load_av2_sensor_lidar_pc_from_path(full_lidar_path) + assert ( lidar_type in lidar_pc_dict ), f"LiDAR type {lidar_type} not found in AV2 sensor data at {full_lidar_path}." lidar = LiDAR(metadata=lidar_metadata, point_cloud=lidar_pc_dict[lidar_type]) + elif log_metadata.dataset == "wopd": raise NotImplementedError + elif log_metadata.dataset == "pandaset": + + ego_state_se3 = get_ego_vehicle_state_from_arrow_table( + arrow_table, index, log_metadata.vehicle_parameters + ) + + lidar_pc_dict = load_pandaset_lidars_pc_from_path(full_lidar_path, ego_state_se3) + assert ( + lidar_type in lidar_pc_dict + ), f"LiDAR type {lidar_type} not found in Pandaset data at {full_lidar_path}." + lidar = LiDAR(metadata=lidar_metadata, point_cloud=lidar_pc_dict[lidar_type]) else: raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") diff --git a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml index 6f340e02..02df5671 100644 --- a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml @@ -27,11 +27,11 @@ pandaset_dataset: # Cameras include_cameras: true - camera_store_option: "binary" # "path", "binary", "mp4" + camera_store_option: "path" # "path", "binary", "mp4" # LiDARs include_lidars: true - lidar_store_option: "binary" # "path", "binary" + lidar_store_option: "path" # "path", "binary" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. From 241e1e53cb502987232f20fb1f337b5b2f506e86 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sun, 19 Oct 2025 12:35:53 +0200 Subject: [PATCH 098/145] Remove output path from dataset converter config (now handed to Map/LogWriter). --- src/py123d/conversion/dataset_converter_config.py | 7 +------ .../config/conversion/datasets/av2_sensor_dataset.yaml | 1 - .../script/config/conversion/datasets/carla_dataset.yaml | 1 - .../script/config/conversion/datasets/nuplan_dataset.yaml | 1 - .../config/conversion/datasets/nuplan_mini_dataset.yaml | 1 - .../config/conversion/datasets/pandaset_dataset.yaml | 1 - .../script/config/conversion/datasets/wopd_dataset.yaml | 1 - 7 files changed, 1 insertion(+), 12 deletions(-) diff --git a/src/py123d/conversion/dataset_converter_config.py b/src/py123d/conversion/dataset_converter_config.py index 43c06199..8cd9396a 100644 --- a/src/py123d/conversion/dataset_converter_config.py +++ b/src/py123d/conversion/dataset_converter_config.py @@ -1,14 +1,12 @@ from __future__ import annotations +from typing import Literal from dataclasses import dataclass -from pathlib import Path -from typing import Literal, Union @dataclass class DatasetConverterConfig: - output_path: Union[str, Path] # TODO: Remove. The directory of writing should be handled by the log/map writer. force_log_conversion: bool = False force_map_conversion: bool = False @@ -38,9 +36,6 @@ class DatasetConverterConfig: include_route: bool = False def __post_init__(self): - if isinstance(self.output_path, str): - self.output_path = Path(self.output_path) - assert self.camera_store_option != "mp4", "MP4 format is not yet supported, but planned for future releases." assert self.camera_store_option in [ "path", diff --git a/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml index 933862fe..261dc386 100644 --- a/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml @@ -9,7 +9,6 @@ av2_sensor_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/datasets/carla_dataset.yaml b/src/py123d/script/config/conversion/datasets/carla_dataset.yaml index 0a2b5220..c28ccb76 100644 --- a/src/py123d/script/config/conversion/datasets/carla_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/carla_dataset.yaml @@ -9,7 +9,6 @@ carla_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml index 26e01ff4..22daf2c0 100644 --- a/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml @@ -11,7 +11,6 @@ nuplan_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml index 8c03e368..265bcefb 100644 --- a/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml @@ -11,7 +11,6 @@ nuplan_mini_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml index 02df5671..6acdd2df 100644 --- a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml @@ -9,7 +9,6 @@ pandaset_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml index 1e03f557..0a04e96f 100644 --- a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml @@ -13,7 +13,6 @@ wopd_dataset: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${dataset_paths.py123d_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} From c65900f253f2d798feef38d76e50bb6766eac9dd Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sun, 19 Oct 2025 12:36:48 +0200 Subject: [PATCH 099/145] Remove CARLA dataset conversion (files directly writter in 123D format in carla garage). --- notebooks/bev_matplotlib.ipynb | 8 +- .../datasets/av2/av2_sensor_converter.py | 15 +- .../datasets/av2/utils/av2_map_conversion.py | 3 +- .../conversion/datasets/carla/__init__.py | 0 .../datasets/carla/carla_data_converter.py | 460 ---------------- .../datasets/carla/carla_load_sensor.py | 10 - .../conversion/map_writer/gpkg_map_writer.py | 66 ++- .../utils/gpkg_utils.py} | 15 + .../map_utils/opendrive/__init__ copy.py | 0 .../opendrive/opendrive_map_conversion.py | 494 +++++++----------- .../map_utils/opendrive/utils/lane_helper.py | 88 +++- .../opendrive/utils/objects_helper.py | 35 +- .../map_utils/road_edge/road_edge_2d_utils.py | 3 +- .../map_utils/road_edge/road_edge_3d_utils.py | 285 +++++----- src/py123d/geometry/polyline.py | 22 +- src/py123d/geometry/test/test_polyline.py | 8 +- .../visualization/viser/viser_config.py | 4 +- test_viser.py | 3 +- 18 files changed, 558 insertions(+), 961 deletions(-) delete mode 100644 src/py123d/conversion/datasets/carla/__init__.py delete mode 100644 src/py123d/conversion/datasets/carla/carla_data_converter.py delete mode 100644 src/py123d/conversion/datasets/carla/carla_load_sensor.py rename src/py123d/conversion/{utils/map_utils/opendrive/utils/id_mapping.py => map_writer/utils/gpkg_utils.py} (62%) delete mode 100644 src/py123d/conversion/utils/map_utils/opendrive/__init__ copy.py diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index ae61f557..3b13b455 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -24,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "id": "1", "metadata": {}, "outputs": [], @@ -39,7 +39,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 3, "id": "2", "metadata": {}, "outputs": [ @@ -85,13 +85,13 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 4, "id": "3", "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABNIAAAGBCAYAAACjLZwtAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjcsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvTLEjVAAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3Xd4W+X1wPGv5D01PGTLe2jYkmMnIZCQhBkIG0qgUEopm9JAf0AndDBLoKwuoKWE0bIKFMJeCWRBEiDDieWd2I634yFvy0P6/SGsxNjxILblJOfzPHmor17de5SCJZ173nMULpfLhRBCCCGEEEIIIYQQYlRKbwcghBBCCCGEEEIIIcThQBJpQgghhBBCCCGEEEKMgyTShBBCCCGEEEIIIYQYB0mkCSGEEEIIIYQQQggxDpJIE0IIIYQQQgghhBBiHCSRJoQQQgghhBBCCCHEOEgiTQghhBBCCCGEEEKIcfD1dgDe4HQ6qampISwsDIVC4e1whBDisOdyuWhvb0ev16NUyj0aeZ8RQojJJe8zw8l7jRBCTK7xvtcclYm0mpoaEhISvB0GZ9+ykkTrCdN6zYH+Pv710yxczoFpve7h5NJ7PkCjT/fa9T995tcUb35L/j8Sh6XKykri4+O9HYbXzZT3GTG6a/6+Hf/A0FHXuFwuVt40mz5H56jr4jOO59yfPz/qmv7eHv61fBa4XCM+Hmeez3m/+M/oQX+jYtc63v/LteNaK8SRRN5n9pP3GiGEmBpjvdcclYm0sLAwwP2XEx4ePu3XdzqdxOrjaK4umfZEmo+vHxH6VM5fegKPPvrotF57pqurq8NkMmGvL/NqIk0dk0JoSAiVlXvl7qI4bLS1tZGQkOD5/Xq08/b7jBjd9u3bOemkk2ipKUWXmjPqWoVCQVSimYU5qTz99NMjrrn77rt5+rmXxryur38gGl0iP7jwbFasWDHksbvuuouV/35l3K9BrUsG4I033uDUU08d9/OEOFzJ+8xw8l4jhBCTa7zvNUdlIm0wOREeHu61Nx2z2UxLTYlXrq2KMVBUXCJvuN8SFhZGSGgY9royr8ah1qXQ3t6Gw+EgOjraq7EIMVGS/HWbCe8z4uDmzZuHQqGguaZkzEQagDo2nYLCgoP+fzlnzhzaHn0UR1cbAcGj//+tjjFQVFw87FyzZs2irekx+hzd+AUEjRlTWGQ8vr7+VFdXy79j4qgi7zP7yXuNEEJMjbHea6TBgJfMyrJiry31yrW1cenYbDZcB9lacrRSKBQYDAbs9V5OpMWkAlBUVOTVOIQQ4kgVHBxMYlIyLdXjex/W6A0UFxcxMDDylnuLxQJAS83Y59PoDeTl5Q87bjQaAWhtKB9XTEqlD+qYJHmvEEIIIYSYZkdlRdpMYLVa+e9r/8Plck37nTWt3khLcxMNDQ3odLppvfZMl5lhZu3XxV6NITw6EYVCQVFREYsXL/ZqLEIIcaTKslrZWT6+ynBtnIFeh4Pdu3d7El4HMpvNKJVKmmtKiEmfM+I57PXlqKKT0MYZ2PZ+DXa7HbVa7XncZDK519WVEZmQMa64wqKSKSwsHNdaIYQQR76BgQE2bNjAvn372LdvH42NjZ5/Op0u/va3v054x4vdbic/Px+bzUZ+fj4FBQX89Kc/5bzzzpuiVyHEzCeJNC+xWCw4ujvpaK4hLCJuWq+tiTMAkJeXJ4m0bzGZTLz13kdejcHXLwBVVDzFxd5N6AkhxOGou7sbf39/fHx8Rl2XlWVl/abnxnVOrd79vmmz2UZMpAUGBpKSmkbzNy0baoq24OMXQHTyLFwuJ1+/+zjb3n2C2WfeQPq8szznWrhwoeccERERaLQRtI5SFe3oasdetwd7fRmt9WW01O6mpEWqy4UQQrg99thj/PKXvwRA6eNLSLiGwFAtAaEaqgo2c/rpp3HNNdeMeR6Hw8EPLruMzz/fREN9LQAKpRKtLpmeni4am+6TRJo4qkkizUsGt4E0VxdPeyJNFZWIj68/NptNGhR/i9FopLO1CUdnKwEhKq/FER6dQmGhbNcRQoiR9Pf3U15eTnFxMcXFxZSUlFBYVERRUTE11VWcc+55vP3WqlHPYbFYaGuqxdHVTkDw/oayLpeLjuYamqtLaK4poaWmBHutO0FWV1d30PNlWa1sLS6hv8/Bh48vx9HVSrAqiqCwCJqq3FVjquhE1DEpKJU+5OXlDUmkARiNBppqSmip24O9rgx73R5a68tp+yZx1tHa6Fmri4kl22zmmquvmuhfnxBCiCNUe3s7IapILrn3I/yDwobsfHr+1nns27dvXOcpKCjgzTfewLjge8w6bzFavQFVTAq+fgFs/+Apdn34pFd2VgkxU0gizUsSExMJDgmlubqEpFknT+u1lT6+aPWp2Gy2ab3u4cCztaa+bFwNqCebc6Cf9qZqfPwCKJDtOkII4fHZZ5/xyCOPUlhUREV5Gf39/QD4+Qeg1iUTFp1M9KyzCdJX8vFHH+F0OlEqD94KdvCGVuHnr4PLRXNNCfaaElpqS3F0dwIQHBJKRkYGx580D+tNV3HVVQdPWlmtFlav3YivXwDZp1/Fl6v+TFhEHAqFEvPCZRR+8QZJ2Sfj4xeAJiZpxPdgq8XCypUrKf3yPc/1DQYDJ8w1YzKdjdFoxGQyYTAYZHKhEEKIYaKioujpbBuWRAMICtXS2Nh4kGcOZTKZUCgU6E3zMBx37pDHtHEGOjs7qKysJDExcdJiF+JwIok0L1EoFGRkZNA8jsbEU0EVk86uXXleufZMZjC4t+/Y66YukeZyuejpaHZXG9SXYa8ro7W+nPaGMlrqKxjo7wPg9KVnTMn1hRDicLRixQo+37Kd9GPPYcFxP0Qdk4Jal0KIWofigIRZxc61lH75HlVVVaN+wDebzag1Wr747/0EBgZhMps5fVE2FstlWCwWLBYLiYmJoybjDmSxWOiw76Onw87ss35CVcEmWuvLufjOt1n7/G+JSZtDUFgEAOEx6ezKG55Ie/TRRzn++ONJSUnBZDIRGxs76Xf7XS4X1dXV7Ny5k/nz56PVaif1/EIIIbwnMjKSgf5e+no68A8aesMlIFQz7oq0oKAgkpJTRhzKo4lNByA/P18SaeKoJYk0L8qelcV7n33plWtr44zYPn1GSnK/JTQ0lJjYuEmZ3Nnf20NrQ8U3/WzKsdeV0d7gTp51d7QC7oRqfEIiZpMR86IzMZlMnoqD+Pj4Q45BCCGOFGlpaeQWVbLw0t+Ouk6tSwGguLh41A/4gYGBFBbk09HRQXJy8pg91cZitVoBaK4pQW+cx6nXPsxrd5/Hpyt/RU3xFo4592bPWm2ckbzNrw07R3h4OFdfffUhxXEgh8NBQUEBubm55Obmsn2H+58tzU0ALFy0mI0b1k/a9YQQQnhXVFQUAD0dLfj4+uPjF4Cjq92dWAvRsG/f+CrSwF0lvbN8aCKtp6OFEE0M/gFB5Ofnc8YZcuNfHJ0kkeZFFouF/7z4Ei6nc8jd9Omg1Rtoa7VTU1NDXNz09mib6cxmE3vrxpdIczmddLTUDqkua6svo62hnNbGalwudxNolVqD0WhkwaJsjMaLMZlMmEwm0tLSCAoKmsqXI4QQRwSj0Yj92efHfM8Mi4xD6eNLSUkJS5YsGfWcOp1u0obuGI1GfH19afkmkRaqieHkqx7gg7/dAEByzv6epFq9ga376mlqaiIiImJSrj/o1Vdf5Z133mHb9h0UFxV6tsBqdElo4kykL/4hEfEZ1O/ezvb1L465BVYIIcThIzIyEoDqoq9Y+9xvSMw6kb271gFgXHAh9Q17x30uq9XChs3/obm6hA8fvxHLSZfxxasrCAqPRKtPkzZB4qgmiTQvslgs9Dl6aGusRBWdNK3XPnACmSTShjKbjOS/v3bIMUdXm7uy7ICEmbu6rJy+XgcAfv7+pKamMdtkwnzOwiHVZREREVL5J4QQh8BgMNDr6KbTXk+oNvag65Q+vmh0idM++djf35/UtPQhLRuSs0/huAt/Tk97C5rYNM/xA9+DTzjhhEmLoauri8suu4zwqERiTcexIOciIhMy0MYb8Q8MHbLW1z+Q7R8+xe7duz1tDYQQQhzeBivS+nu7ADxJNJUumRB1FI3528Z9rszMTFobq9n16b9pbajgi1dXABCdMouA4PARWxQIcbSQRJoXeSZ31pROeyItLCoeP/8AbDYbp59++rRee6YzmUw0r3yWz5673V1ZVl9GZ2uT5/GY2DjMZhPmeSd7KsuMRiNJSUn4+sp/UkIIMRWMRiMA9vryURNpAGFRSRQVTW8iDSAry8qWvKHXnXPWT4atU+mSUPr4kpeXN6mJtODgYCIio4mfcwbHXXjbqGsjEjIAyM3NlUSaEEIcIQarnP0CQph9xvXkrn4WZ38fx55/Cx0tdeMeNgD7v6sa559PY0U+DeU7Achc/H1aakvJ+/hTaRMkjlryrd+L4uLiCA0Lp6WmhJQDtnxMB6XSB60+XUpyR3DGGWfw+BNPEti5hznHmDGZzh0yKS00NHTskwghhJhUKSkp+Pj40FpfRnzGglHXqqJTKCxaN02R7We1WPjwk8/GXOfj6482JmVK3oNn52RTWFUw5rrg8AhC1VHk5uZy0UUXTXocQgghpl9AQAChYeH0dDQz99zllHz5Dv19DlLmnEbpl+/R1dlBT08PgYGBY57LbDajUChobahgyfWP8trd5+NyOYnPXAhAR3sb1dXV0tdZHJUkkeZFCoUCq8VCU3WJV66vijWwUyZ3DmM2mykpLvJ2GEIIIQ7g5+dHUnIK9vryMdeqYpLJ+/R5+vr68PPzm/rgvmG1WulsbaK7vckzofNgVLGGKdkWk5OTzaav/z2utdp4Mzt25E56DEIIIbwnIiKCnvYW/AKCuej3q1D6+OLj609gqAaAxsbGcSW/goODSUhMormmBPPCZZz3i3+jUPrg6x+IRr9/cqck0sTRSLrLellWlpXWuuFjhSdLQ/kuaku2jviYVp9Ofn6+pyG+EEIIMZOZTSZaR5mq7HI6AffkzoGBAcrKDn0C80R4WjZUj/2+ro2bmqrw7Oxs2ppq6emwjx1DvJntO3ZMegxCCCHGp729nW3btvHf//6XRx55hNbW1kM+Z1RUFN0dzQAEhWkJCA73/G9gQts7s6xW7N/0/oxOmUVUkvt9LiwyHr+AQNndJI5akkjzMovFQnPNbpzOgUk9r8vpZMdHK3lzxSW89+drGOjvG7ZGozfQ2dFOZWXlpF5bCCGEmApGo4GOfe6JY12tjXQ013oea6kp5X9/XMbn/70flS4ZYNoHDqSnp+Pr50dLzdiV5hq9geamRhoaGiY1huzsbACaqgrHXBuRYKa6qpKWlpZJjUEIIcTI3n//fa6//npOOPEkdDGxhIeHM3fuXC699FJ+8YtfcO+99x7yNaKjoujpGP57fbAibd++feM+l8WSSWv97mHHlUoftLFp5Ofnf/dAhTiMydZOL7NYLPT3OWhr2Is6JmVSztnV1sSnz/yKyrz1KJQ+6M3H4eM7fGvLgVPDEhMTJ+XaQgghxFQxGo20NOxloL+PNU//nKqCL9Cl5hCdmk3+ulcY6HOgik4iRK3DPyBo2hNpfn5+GAxGmseRSBt8D87Ly+OUU06ZtBiMRiP+AQE0VhYQZ54/7HGXy0XhxtdJsCwiMt49cGDnzp2ceOKJkxaDEEKI4VwuFz+47IfgG0R06hwS5i0jS5eCSpeMWpfM6qd/PimDcqKjo9hRPHzb/nepSLNYLNgbqujr6cQvMGTIY6qYdHblSZsgcXSSRJqX7Z/cWTJiIi3vsxdpqiokOftU4jIW4OsXMOr5qvI/Z83KX+JyOjn12of5dOWvSM4eeZBBWEQc/gFB2Gw2zjzzzEN/MUIIIcQUMhqNOAf6aW+qJnn2EqoKvqCjpZbGdQXEmeazN28dqXOXolAoUMckT3siDWBWlpWN20dPpA1WoSsUCmw226Qm0nx9fcnMtNBU6a5IK97yNl32BhKzTiQ4PIpPn/01FbmfknbMmSy57lF8/QLIzc2VRJoQQkwxhUJBbGwsfvpjWHzZH4Y9rtIlU1S8+ZCvExkZSUt9ORteuoee9mYcnS04Oprpbndv9+zt7R33uTIzMwEo3/kZCoWS5poSWmpKaavfTXNtGRkZGYccrxCHI0mkeVlMTAxqjda9DWTO6cMeL9n8NnW7t5G/7hV8A4JJsCwiOftUkmad5LmrADDQ38dXq/7M9o/+RXzG8Zx6zUNU2jbicjlJzj55xGsrlEq0cQbZ2y6EEOKwYDQaAWitL8N68uVU5H5GU1URP1zxKcWbVlFTvIXELHdCKCwqmaLi6R/mY7Vaeef9j9z9R10u2ptraK4upqWmhOaaUlprS2muLaXP0QNAbGzspMcwZ3YO7326BZfLxddv/43W+nI2vfYgygOq0+MzF6L08SUizkBurgwcEEKI6WA2mdheOnL/TnV0MpvWvUR/fz++vt/9a/r555/Pq6+9jrN+O/HR0USnxBMVNZvIyEj0ej2XX375+OM1mwkIDGT1U7cCEBmlw2q1YF2wlMzMTE4/ffj3VyGOBpJI8zKFQoHFkkn9QbaBnHL1n3jtnvPRxKWTknMa5blr+Oy536BAgS59DsnZp6BLzWbT63+iscLG/At/Qc7Sa1EolZTnriE6NZtgVdRBr6+KSSd3566penlCCCHEpImLiyMwMAh7XTlJsxScfNUK/nvnOWx8+R7am2pIzDoRv4Bg4Js7+9vfnvYYLRYLXe123rz/Qlpq99Db0wVASGgYmZmZLD71OCyWq7FYLFgsFuLi4iY9huzsbJ7/9ws4B/o5+coVvPXQ5ehSsokxzCUwRM3m/z3kSThq4kxs275j0mMQQggxnNFoYP2mkQfBqXTJ9Pf1UVFRQVpa2ne+xqJFi6gon5xhO6GhoeTu2MG+ffvIyMggImL0idRCHC0kkTYDZFmtFL/32YiPqXRJnHD53axZ+QusJ/2QC29/la7WRip2fkZ57hq+fvuv9Pf2EBYZzwW/fhldag4A/X0O9uZtYM5ZPxn12to4Azve/RCn04lSKbMnhBBCzFxKpZK09HRaG8oBCFHrOPGKe/n4yZsByD79as9atS6FbTXVdHZ2EhISMtLppsQpp5zC5T+6Al8fJRbLjz0Js4SEBBQKxbTEkJ2dzUB/L/b6MmINx3DsBbey5Y2HmXvuTZRsfouIBDOhmhjAPXDgqzffPeQKCCGEEGMzGo3Y91Ux0OfA6RzA1z+I3q429mz7mLiMBYB7UM6hJNImm8lkwmQyeTsMIWYU+cQ0A1gsFv719EoG+vtGHApgXHA+lfmfs/7Fu9Cl5qCOSSFj8cVkLL6YPkc3NUVbiEmfS0BwmOc51YWb6Xd0kZKzZNRra/QGuru7qKioICVlcoYdCDHI5XKxb98+1Go1/v7+3g5HCHEEMJmMfFVQ7vk5be4ZZCz+PnW7t5E06yTP8cHJnaWlpZ5JltNBpVLxn38/P23XG8msWbMAaKosICLOyOwzrqO2+CvWPP1zXC4XmSde6lkbEZ9Br8NBcXGxpxeOEEKIqWE0GnG5XDRVF/O/+y4c8tiZN/0DX78AiouLpX+1EDOc10uQkpOTUSgUw/4sX758xPXPPffcsLWBgYHTHPXkslgsDPT30dZQcdA1J/zwTkLU0Xzy1C0M9Dk8x/0CgkiaddKQJBpA+Y41hEcloNGnj3rtiDh3vxnpkyYOhcvloq6ujk8//ZS//e1v3HjjjSxafAIRkVHodDrOOfc8b4cohDhCmIxG2r6pSBt00o//yKX3fIB/YKjnmPqbRFpJyfT3SfM2jUZDXHyCZ+CAQqnklGv+hI9fAI5OO0lZ+wcLRCaYAaRPmhBCTIPBXp/tTdXDHovLOB5NTPJR+b4lxOHG6xVpX331FQMDA56f8/LyOO2007j44osP+pzw8HCKioo8P0/XVompYrVaAffkzoMlvvwCQzjt+j/zxv0Xs/l/D7Pw0t8e9Hwul8szkWusv5sQTQyBwWHYbDbOOeec7/4ixFHB5XJRW1tLfn4++fn52Gw28mw2bLZ8Wu0tAPj6+qOJTUEVm0764stpb6xi3TrZNiSEmBxGo5HWxhr6HN34BQQddF1gqIbgMLVXJnfOBLNzcthZVuj5OShMy5k3/YPmmhJPGwiAgBAVqkg9ubm5/OAHP/BCpEIIcfTQ6XSEhIbR1rCXy/64mlfvOY9+Rxepc8/ALyCI0MhECg/4niuEmJm8/q02KmpoI/wHHniAtLS0UcewKxQKYmJipjq0aRMVFUVEZBTNNaWMths+KsnC/It+yRf/vZ/4zONJmjXyNM7GChud9nqSc04d89oKhQJNbDp5eXnfMXpxJHK5XNTU1GCz2TxJs1278sgvKKCt1Q6Ar18AWn0q4bo0TCddiUafjlZvIDwqAaXP/l8tVQVfUPj5/ygrK8NgMHjpFQkhjhSDd/PbGiqI+Kaa6mBUupSjNpGWk5PN2o3/HHIsKslCVJJl2FpNnIkdO3ZMU2RCCHH0UigUGAwGWhsqUOmSWHjJHWx86R4Mx7oLGtS6FIpsH3o5SiHEWLyeSDtQb28vL7zwArfddtuolVQdHR0kJSXhdDqZM2cO999/PxbL8A+GgxwOBw7H/u2QbW1tkxr3ZLBYLFRVj/1hf9aSK6ku+IJPn/k137/rHULUumFrynasJiBYRUz63HFdWxWbzs5dkkg7GrlcLqqqqjzVZfn5+eTZbOTnF9De1gqAn38AWn064bo0Mk49Hk1sOhp9ujthpvQZ8xpavTt5ZrPZJJEmhDhkg4k0e0P5mIm0sKhkCgqPzjv72dnZdNj30dXaSLAqctS12ngz279+c5oiE0KIo5vZZGTD9t0AZCz+PhmLLkbxzdA3VUwyOz6qpLu7m6Cgg1ddCyG8y+s90g60atUq7HY7V1555UHXmEwmnnnmGd566y1eeOEFnE4nxx9/PFVVVQd9zooVK1CpVJ4/CQkJUxD9ocmyWmirKx1znUKh4OSrHkDp48fqp3+B0zkwbE157hoSs04ccXDBSLR6A0WFhUO22Ioji8vlYu/evXzwwQc88sgjXH311cw79jjCwlUkJiZyxhlncPsdv+O9T7fQooghc8l1nHnTP7js/tVc/fdcLvzdKpZc9whzzvoJKbOXoNYljyuJBhAUHklQqIr8/PwpfpVCiKNBREQE4So1rfXlY65V65IoLRn7vfVINDhgoamqYMy1kQkZNNTXsW/fvqkOS4ij0gMPPIBCoeCWW27xHOvp6WH58uVEREQQGhrKsmXLqK+v916QYtoYD+j1qVAoPEk0cFekuVwudu/e7aXohBDjMaMq0lauXMmZZ56JXq8/6JoFCxawYMECz8/HH388GRkZ/POf/+Tee+8d8Tm33347t912m+fntra2GZdMs1gsNP/jnwz09+LjO/p0w6CwCJZc9whvP3IF29//J3PP+annsfamapoqC5lz1o3jvrY2zoDD0UNZWRnp6aMPJxCHF5fLxXXX38DLL79MV2cHAP6BwWhj01DFppO19ES0cQY0semERcQNeSOfLAqFAo3eIAMthBCTYnBbjL2ubMy1Kl0Kzc2NNDc3o9VqpyG6mSMtLY3g4BAaKwtJsCwedW1E/P6BA0uWjD7tWwgxMV999RX//Oc/PdN0B91666289957vPbaa6hUKm666SYuvPBCPv/8cy9FKqaL0WikvWUfvd3t+AcNHRin0qUA7kE5g320hRAzz4xJpFVUVLB69WreeOONCT3Pz8+P2bNnU1p68DvOAQEBBAQEHGqIU8pqteIc6MdeX+6ZpDmaOPN85pz1E756+6/EmecTkz4HgPLcT1H6+I35oflA2rj9W+8kkXbkefXVV4lMmYv1lMvRxhkI1cROScJsNOqYNHblSSJNCDE5MswmPt0ydpXrgZM7jzvuuCmOamZRKpVYs6yeyZ2jCY9OxD8gSBJpQkyyjo4OfvjDH/Kvf/2L++67z3O8tbWVlStX8tJLL3HKKacA8Oyzz5KRkcHmzZuZP3++t0IW02CwRUFrfQVRyUOTZUFhWgKDw47a/p5CHC5mzNbOZ599lujoaM4+++wJPW9gYIBdu3YRGxs7RZFNj8Eeby3V4x93PO+8n6FLyeaTp27F0enuZ1W+Yw1607EEBIeN8ez9glXRBIWoZODAEUihUJCZmUFgqJqkWSdNWdXZWDT6dIqLimT7sBBiUhiNxjG3djqdAyh93C0OjtYvJLNzcrDXjN4jzuVy0WVvIEQTQ25u7jRFJsTRYfny5Zx99tnDEtRbt26lr69vyHGz2UxiYiKbNm066PkcDgdtbW1D/ojDz2DP4H0VeTSU7aR4y9t89dZf+eSpW1l1/4X09nTR2Ng4qdd0uVzU19ezadMmXnzxRe69915uvPFGysrGru4WQgw3IyrSnE4nzz77LD/+8Y/x9R0a0hVXXEFcXBwrVqwA4J577mH+/Pmkp6djt9t56KGHqKio4Nprr/VG6JNGq9USFR1Dc00xML5kotLHlyXXP8qrd5/H2n//lpN+vIKaoi9ZeMkdE7q2e+tdumy9O0JZLRbeXbPFqzFo9bJ9WAgxeYxGI51tzTg6Wxno78NeX0Zrfbnnnx37Kmipr6C/zz1oKCIiwssRT42Ojg5KSkooKiqirKyMq666ashU8+zsbP719EoG+hz4+AXQ3+egpaaUpsoCGqsKaakqpLm6iK52OwCx+u976ZUIceR55ZVX2LZtG1999dWwx+rq6vD390etVg85rtPpqKurO+g5V6xYwd133z3ZoYppplar0cXEsu4/v/cci4qOwWQyYj51PkbjFVxzzTWTdr3nn3+eG2/8Kd3dXZ5jIaoIers7USgUPPHEE5N2LSGOFjMikbZ69Wr27t3L1VdfPeyxvXv3ojyggqalpYXrrruOuro6NBoNc+fO5YsvviAzM3M6Q54SVquF8glUpAGERcRx0o//yMdP3kx/bw/OgT6Ssk+Z8LXVMrnziJWZmcm/X3gRl9PplWo0cFekAeTn50siTQhxyAa3xfznl4vo6+0B3DeF4hMSMZuMmBaejtFo9PxJSUnxZriHpL+/n4qKCoqKiiguLqaoqIjComIKC4uoq632rFMoFBQWFvL88897jmVnZ+Mc6OejJ2+is7mG5prdOJ0DKBQKklNSOTYnh5zLziY7O5vs7GwSExO98RKFOOJUVlbyf//3f3zyyScEBgZO2nkPh77PYnzWfvYpO3bswGg0YjAYCAsb/26iidqwYQM+geGccfXDhEcmEBYVj39gKB8+/lN2794zZdcV4kg2IxJpp59+Oi6Xa8TH1q5dO+Tnxx57jMcee2waopp+WVYLua+9M+Hnpc09g8wTLyV/3StEJmQQFnHwYQ0Ho40zsmXzm/T39w+rChSHN4vFQp+jh/amKsKjvPMlKVgVTWBIOPn5+Zx33nleiUEIceTIzs7miSeeoL293ZMsS01NndQvrDPFKUtOY8O6tQD4+QegiUkhNCoZ/ZzzyIxJRa1LRq1L4fP//pH8gqH90HJyclhy2lI62tuYvfBUsrNvIzs7G6vVSmhoqBdejRBHh61bt9LQ0MCcOXM8xwYGBli/fj1///vf+eijj+jt7cVutw+pSquvrx9SVfpth0PfZzE+ZrMZs9k8LddKTU3F2d9DyuzThhwPj0qkdPe6aYlBiCONZExmEKvVSsvf/05/nwNfv4m9SS685LfgcpGYddJ3urZGb6Cvt5fdu3djMpm+0znEzDRYrdlcU+q1RJpCoUAr24eFEJNEqVRy443jn059OCsvKyPtmLNYcPGvCdXEHLSyWKVLofDTz3C5XCgUCgCCg4P55OMPpzNcIQRw6qmnsmvXriHHrrrqKsxmM7/+9a9JSEjAz8+PNWvWsGzZMgCKiorYu3cvCxYs8EbI4giWmppKV7sdR1cbAcHhnuPhUQnYPqtgYGAAHx8fL0YoxOFHEmkziMViweV0Yq/bQ2RCxoSe6+sfyIlX3Df2woPQ6t1NL/Py8iSRdoSJj48nJDSMlpoSkr/Dtt/JoopJl8mdQojDnsPhoLCwEJvNhs1mIy/Phn9AAM8+s3JKqrwyzCZKGnrHrDZXx6TQ1mqnsbGRqKioSY9DCDF+YWFhWK1DpzGGhIQQERHhOX7NNddw2223odVqCQ8P5+abb2bBggUysVNMutTUVADaGiuJSrR4jodHJdDf10dVVRVJSUneCk+Iw5Ik0mYQT+VQdfGEE2mHKig8guAwDTabzXNnTBwZFAoFGRkZNNfs9mocmth0tn31Dk6nc0jfQyGEmGwOh4Pi4mJsNhv5+fnkfZPwOvPMM/nLn8fXHqK3t9dzjsGE2a68PMr27PZMIA7XxhAaGU9N8df89MafcPLJJ0/6azGbzWzNe2/MdeoY9xeloqIiSaQJcRh47LHHUCqVLFu2DIfDwdKlS6Xpu5gSnkTavm8l0iLd/fX27NkjiTQhJkgSaTOIWq0mJjaO5prSab/24OTOPNl6d0TKslr4YO3XXo1BG5dOT0835eXlnjd0IYQ4VO3t7bz77rvfJLzcSbMDk11hmijUsQb6XGH88x//4NFHHh51C8vAwADnnX8BH3/0If39/UPOoU6cz6L5P0KjN6CNMxAQHI5zoJ+nl2eTl5c3JYk0o9GIveFJnAP9KH0O/rFNFZWIQqGguLiYRYsWTXocQohD8+2+z4GBgTz++OM8/vjj3glIHDUiIiIICQ2jtaGCbe//k+ScU/jyzcfod3QD7kTaVLx/CXEkk0TaDJNlzaSkZmKTOyeLWm9k167tXrm2mFoWi4UXX3rFu5M7Y/dP7pREmhBistx999088sgjhGl1qGPTUSct8CS7NLFpBIaqAai0beDdx66mvLyctLS0g56vv7+fjz78gJS5Z2I56Qdo9QYCQzUHXa/08SVCnzplPSBNJhMD/X20N1aj0h28YsDXPxBVVBxFRUVTEocQQojDk0KhICUlhar8TVQXfM6WNx72PBYeEcuePTK5U4iJkkTaDJOVlcXXL/7PK9fW6g1s2vBfent78ff390oMYmpkZmbS6+imvbmG8Mh4r8QQookhMDgMm83GOeec45UYhBBHnqCgIMI00Vz+p42jrtN80wvUZrONmkgLCAggJTWNoFANeuOx44ohPCadXbvyxh/0BAz2LbXX7xk1kQYQHp1CYaEk0oQQQgxlMKTzdWEd6phU7HXuxFnWqVfQXF0oiTQhvgNpVDTDWCwW7A176fum1HY6afXp9Pf3U1LinYo4MXUG+++1eGHb8CCFQoEmNp38/HyvxSCEOPJkZmbS3tKAo6tt1HUhah2BwWHj+h2UlWWlpXb8vy+1egO2/HxcLte4nzNecXFxBAYGYa8rP+iawd/tquhkCqUiTQghxLekpabS0VTN6Tf+DaWvHwDpx55DWEQCJaXe7aMsxOFIEmkzjMViweVyYa+d/l9oB96tF0eWxMREgkNCafbStuFBqtg0mdwphJhU471RMJFkvtVioWUCvy+1cQZa7S3U1taO+znjpVQqSTcYsNe7KwYKP3+DqoIvcA7009vTwWfP3c4rfzgT29qXUMeksmfPbk9vNyGEEALcAwdaG6vRxKRy4o/uZfaZN6BLzSE8KoGysjJvhyfEYUe2ds4wnsmdNSVEJVvHWD25gsK0hKqjJJF2BFIoFJjNZq9WpIG7amP7O+/L5E4hxKQxmUwolUqaa0qJSZ8z6lpVbDo7x7EF02q10tnaSHd7M0Fh2jHXH3gjSq/Xjy/wCTCbTWzeVUZHSx2fPfcbcLkIDNXg6x9IR7M7eaeJTcM50E9/Xx/l5eWkp6dPehxCCCEOT6mpqTgH+uloqcO8cJnneFhUAs1NjbS1tREeHu7FCIU4vMg32RkmLCyM+IREr1UOqWNlcueRalaWldY67ybSNPp0uru72Lt3r1fjEEIcOQIDA0lOSR1XBZlWn05RYSFOp3PUdRaLBWDcVWnhUQn4+QdM2Y0os8lEa0M5oZoYZp36YwB0qTlEJmaSMud0gsIiiDEcg0qXAkBxcfGUxCGEEOLwNDjoq21f5ZDj4ZEJAFKVJsQESSJtBsqyWie0pWQyaWKnrmGy8K7MzEyaa0qnpIfPeA1O7pSqRyHEZLJaLNjH0dNMozfQ09NNRUXFqOsMBgO+vr40j7OKV6n0QatPn9LJne3N9fT2dHDcsl+gjTPS3lTDaTf8hca9+aTMXoJS6UOoJga/gECZ3CmEEGKIpKQkFArF8ERaVCKADBwQYoIkkTYDWa0Wr1UOaeIM7NldisPh8Mr1xdTJzMykt6fLsw3IG0K1sQQEhcjAASHEpLJaLdjr3L1F+/scDPTtfw/rat3Hu3++hk2v/wmtfnzJfH9/f9LSDROqDlfFpLNz167vEP3YjEYjAK31Ffj6BbDk2kew1+/ho8eX095YRercpQAolEo0umRJpAkhhBjC398ffVw8bY1DE2lBYVr8A4MlkSbEBEkibQZyT+6soq+nc9qvrdUbGBgYkA/hR6DBrUreHDigUCjQ6mVypxBicmVmZtLWVIujq52NL9/Ls7cex8f/+Bm5Hz/Dq3edS2Xeehr35hOiiSEgKHRcv4NmzcrCPsbvywMTdhq9gfz8gimp+h1MpNnr3F90IhLMzL/wF+zNW0dAsAq9ab5nbVh0CoVFsrVTCCHEUGlpabQfUJHW5+impbaUwFC1JNKEmCAZNjAD7U94lKJLzZ7Wa2sPaJg8a9asab22mFqJiYkEBQXTUlNKUtaJXosjXJc2rmbfQggxXp6eZrWlqKIS6OvpZG/eenZ//QG61By625tIyVniSeaPZwum1WLhvQ8+weVyoVAo6GptBIWC4PAIAOpKt7H6X7eRMvs0Fl76W7T6dDra26iqqiIhIWFSX59arSYiMhp73f4eNrOWXElHSx3aOAM+vn7718akUPj1qkm9vhBCiMNfeloqX736Jm89eCntjZW0tzR4HktMTPRiZEIcfiSRNgNlZGSgUChoqSmZ9kRaQIiKMK1OelgdgZRKJSazmZZx9BGaSto4A7nvfeT5cirEdFuxYgVvvPEGhYWFBAUFcfzxx/Pggw9iMpk8a3p6evj5z3/OK6+8gsPhYOnSpTzxxBPodDovRi4OxmQyud83a0vJWXodVflf0FxTyvduf5WmqkLq9+wgOedUAFQxaezKG/s9zmKx0NXeQndbE8GqSD76x83UlW4lJnU2mjgDhRtfx+UcYGCgD3D/bgP3jajJTqQBmM1G6uv3J9IUSiULL7lj2DqVLoWtdTW0t7cTFhY25nkbGxvJz88nPz8fm82GLb+AG66/jksuuWRS4xdCCOFdN9xwA3v2lJOQEEdq6jmkpqZ6/sTGxno7PCEOK5JIm4FCQkJITEoed5PjyaaOTSdvHF8yxOFnVpaVTz7f6dUYNLHpdHV1UllZKXe/hFesW7eO5cuXM2/ePPr7+7njjjs4/fTTyc/PJyQkBIBbb72V9957j9deew2VSsVNN93EhRdeyOeff+7l6MVIgoKCSEpOoaW6FIVSySnXPMSrd53Dlv89hK9/MJGJFkK17i8JGn06ue99iNPpRKk8eIeLA7fDB6siSco6kbqSr+loqaWpqojk7FMo2/4JydnuBF1YRDz+AUHYbDbOOOOMSX+NGWYzpas3j7lO/c3kzpKSEubMmeM53tnZyVdffeVJmOXZ8sm35dPY6K5IUPr4oo1JxuHoofH+FZJIE0KII8yxxx7LZ5+t8XYYQhwRJJE2Q2VZrews99bkTgM7d23wyrXF1MrMzOS/r73h1WqwA6s2JJEmvOHDDz8c8vNzzz1HdHQ0W7du5YQTTqC1tZWVK1fy0ksvccoppwDw7LPPkpGRwebNm5k/f/5IpxVelmW1klvmvgEVoo7mlKsf5P2/Xg/AvPN/5lmn0Rvo6upk7969JCcnH/R86enp+Pn701JTQnzGAmafcT1VBZtoqSnhsvs/oWDj61Tlf0Gc6VjAXSGm1aeTlzc1W9eNRiP2F18e8/e3OsadSCsqKhqSSLv++ht46aUX8fH1QxuTQnhMGskLLmFOnAFNbDoqXRI+vv7sXP08X7/5MAMDA/j4+EzJaxFCCCGEOJzJsIEZymq1YK/1UiJNb6C8bA/d3d1eub6YOhaLBUd3B50tdV6LIVQTi39gsAwcEDNGa2srAFqtFoCtW7fS19fHkiVLPGvMZjOJiYls2rRpxHM4HA7a2tqG/BHTy2LJpPWbyZ0ASbNOJmvJj/ELCCFl9ume44OTO8f6HeTr64vRaPIMaFEolZx6zUM4nQN89uztlG9fTYJ1MT5+AZ7nqGLTp6wHpMlkwtHdSVdrw6jrAoLDCVVFDhsa1N3dTUzabK75ey4X3fUep//kr8w7/2ekHXPmN33W/AH334/D0UNZWdlIpxdCCCEOWWFhIc888ww33XQTH330kbfDEWLCJJE2Q1ksFs8EsummjTPgcrkoLCyc9muLqZWZmQngtW3D8E3VRmya9OETM4LT6eSWW25h4cKFWK1WAOrq6vD390etVg9Zq9PpqKsbOQm9YsUKVCqV589U9MgSo8vMzKS1sZreng7PseO/fzvX/G0bEfH7+9+FavUEBIWMb3JnlnXI5E53pduf2Ju3jobynZ6+a4M0egOFBQU4nc5JeEVDDfbwO3DgwMGodCkUFw+d3Gm1WuhsrhoymGAkmgOGDgkhhBCTraqqipzsHK699lqeXfksP7n+Bm+HJMSESSJthhr8QueNxvCDd+vlQ/SRJzk5mcDAIFq8mEgDd9XGeJp9CzHVli9fTl5eHq+88sohnef222+ntbXV86eysnLsJ4lJ5ZncWbO/Kk2p9EHxrT5oCoUCzTiT+RaLhZaaElwul+dYUtaJzD7zBiITLcMmIGsP2DY62VJSUvDx8cFeP3YiLVyXQn7B0JthmZmZtLfso6fDPupzg1VRBIWopGpYCCHElAgKCsLR6+Bn51zL5SdeRPneClpaWrwdlhATIom0GcpsNqNUKmmunv7tnf5BYagi9ZJIOwINTu5srvHOtuFBmth0CgoKhnw5FWK63XTTTbz77rt89tlnxMfHe47HxMTQ29uL3W4fsr6+vp6YmJgRzxUQEEB4ePiQP2J6mc1mz+TOsUxkcmd3Z9uw7ZTzl/2Ci/+wisBQzZDj2jgjwJT0SfP39ycpOWXUirSBPgfNNSU4B/ooKRmaABysSG6p3X2wpwPfJBr1aZJIE0KIw1RfXx/5+fn09vZOybnLy8sn/Bne4XDw85//nPnHzScnOxtdtI6/v/cMT330b8LCwujr65v0WIWYSjJsYIYKDAwkOSWVFi8lPFQx6eyaoj4vwruyrBbWbJ7aJGnxprfo6bSTdeoVIzbF1ujT6exop6qqSrbAiWnncrm4+eabefPNN1m7di0pKSlDHp87dy5+fn6sWbOGZcuWAe7G7Xv37mXBggXeCFmMQ3Bw8DcTr8d+39ToDez84JMxG/cPVoc3V5cQotaNed5QbSwBQaHYbDbOOeec8Qc/TmaTibzKPXQ012KvL8NeV4a9vozWujLa95Vj31eF65ttpcfOXzDk9ZlMJpRKJS01JcQa5o56HVWsYcp6vQkhhJg8u3fvZseOHd9MY7axa1cepSXF9Pf3c975F/DWqje/03mbm5vZtWsXRUVFFBcXU1hYRGFRERXlZfT393Pffffx29/+dtzne/fdd3n00UeZb5xLm72N8753PmlpacyZM4d58+YRHR39neIUwlskkTaDZVmtbCvx3sCBXXkyHvlIlJmZyetvvDUlkzt7ezrY8OLdFG9ahV9ACJYTLx3SiHuQ9psePPn5+ZJIE9Nu+fLlvPTSS7z11luEhYV5+p6pVCqCgoJQqVRcc8013HbbbWi1WsLDw7n55ptZsGCBTOyc4awWC3l7x65I0+rT6ersoLKyctTpwSkpKQQEBNJcU0KCZdGY51UoFGj16VNW0Z2ZmcH77z/Mf351AuAeiJCckkq22YzpjO9jMpkwGo2YTCaio6OH/I4PCAggJTWN5nFU7Gli09i65S2Z3CmEEDNYfn4+2dnZ9Pf3ExymQaM3oNbPZsGc79NQtpONG9d/p/P29vaSnTOHqsoKFEol6qgEwqOTUSUv5Pj5l1O8+W0+W7t2Qom0jIwMALr7eujv76erq4u77rrrO8UnxEwgibQZzGq18On6f3nl2to4A7kfr6Szs5OQkBCvxCCmhsVioaernU57PaGakbepuZzOYX2FxtJQtpNPnrqV7rYmfHz9Scw6ccQkGkBYRBx+AYHk5+ezdOnSCb8GIQ7Fk08+CcBJJ5005Pizzz7LlVdeCcBjjz2GUqlk2bJlOBwOli5dyhNPPDHNkYqJslgy+fyrl8Zcp4ndP7lztESaj4+Pezv8BNosTOXkzjvvvJO0tDTi4+MxmUykpKTg6zv+j3JZVitbS9yJtG/fTOnpsPP5f+8ndc7pnsmd5eXlpKWlTfrrEEIIcej6+vro7+/nzJv/SdKsk4f8Tt8dHkHRF2/Q0NAw4WovX19fGvc1MPusnzDvvJs9U50HtTdWU2T7cELnNJvNnHfuedTW1nDNOddy4403Tuj5Qsw0kkibwaxWK+0tDTg6WwkIUU3rtQcrhgoKCjjmmGOm9dpiann65NSUHjSRtvbfv6W2ZCspOaeSnLMEXVoOSuXIVQkup5MdH6/kyzcfJTIhk5N+fD9vP3w5ybNPHXE9DE7unLqqDSFGM56+HoGBgTz++OM8/vjj0xCRmCwWiwV7QxV9PZ34BY58E8jR2UpHSx0KpRKbzcYZZ5wx6jlnZVlZs2n8v6u0egPb3n5vSqq5QkND+clPfvKdn2+xZLJm/RcAfLryl9jrykjOORV1TApfvLqCjuZacDk5btkvAHeiURJpQggxMw1u2e+yNwzbZTL4XS4vL49TTjllQudVKpWkGww4Ou3DkmgA6pgUdnxUSXd3N0FBQeM+51tvvzWhOISYySSR9g2n00l9fT3l5eXEx8fPiO1mgxPImmtKiDVMbzJLE+v+4Gyz2SSRdoQZ11Yll4vW+jJyP3mWHR89TVBYBEnZJ5Ocs4SEzIX4+gcC0NW6jzUrf0lV/ufknHEdx15wCwXrX0Xp40tS1kmjxiGTO4UQk81zo6BuD+GRCTTXlNBSW0pLdQn2ut3Ya0tob9kHuD/UD241GY3FYpnQdnhtnIGenm7KyspIT08/tBc0yTIzM2lvrqensxVHVysN5TtpqNgFLhfhUe7PPck5pxKi1hEYHEZ+fj7nnnuul6MWQggxksDAQFLT0mmuLh72mCo6CR9ff2w224QTaQBms4nNu0YebqPSpeByuSgtLSUrK2vC5xbiSHBUJ9J+9rOfUVNdRVnZHvZWVtHb654WotWo2dfYhHKCW9smm9FoxMfHxyuJNL/AENTR8VMyeUx4l4+PD0aTadTJbYt+8HtqS7fh6x/Awkt+x968dZTvWEPhxtfx9Q8iwbIIXWoOOz5eiVLpwzm3PutJypXtWE2s8VgCgkefWqiJTcf2yadT0qtNCHF0MpvNALzz8OX09nQB7t95qWnpzLNasSw7lczMTCwWC0ajkYCAkbefH8hqtbq3w7fUEaqNHXO95psqAJvNNuMSaYM36Ox1ezj5qgd49a5zUUUncewFt9BQnseWNx8hwbrYPblTqoaFEGLGy7Ja+apweO9LpY8vEfrU7/x73Gwy8dGakXusqWPcQ5qKiookkSaOWkd1Iu2r1W+RpPXn5LggErLSiNMEUdXczZ1vFVJbW0tcXJxX4wsICPjmLoP3JnfmScXQESnLamHtV0UHfdwvMITTrn+MN+6/mIrcTzn+kttZcNGvaKndTfmO1ZTtWMPmNx4m0XICJ1/9IMHhEQA4utqpKfqS47//mzFj0OrT6Whvo6amxuv/rQkhjgyhoaG89NJLFBUVkZmZSXx8PP7+/lRXV1NeXk55eTmvvvpfuru6eOpfT4+r+vzA6vDxJNJC1DoCQ8Kx2Wycf/75h/yaJtPgNqDm6hJi0mZz6jUP8c5jV1G/J5e9eeuJNx+Pf2AoIFXDQghxOLBaLXzy2YYRHwuPSWfXd+zZaTKZaG+up7enw/O+ANDn6CIoTEtwmJqiooN/lxDiSHdUJ9Jev3EuYYFD/woKa9sBKC8vnxFf7rOyrHxpm95EWl9PJ801pbhcLnZJRdoRyWKx8OZb741aDRaVZGH+sl/yxav3E29ZSKL1BDSxaWhi05h95g10tzcTGKIeMpRgb946nAN9JOccvD/aII1+f7PvmfDfmhDi8FRTU8OWLVuoqKigvLyc4qIivvrqS7q7u+ns6vasC/DzIT4ilHi1PxuLG3n33XfH1ew4KSmJoKBgmquLSbSeMOb6mVzNFRgYSHJKKi3fTO6Mz1zI7KXX8eWqx3C5XCz+4Z2etVp9OtvfeQ+n0+n1Cn0hhBAjs1gsdNj30dPRwp5tn6BLy2HHh/+iKv8LMhZdRN7Gjd9p94fRaASgta6c2tKt1O/ZgVafzper/sxJV96PKjqF4uLhW0qFOFoc1Ym0kcRr3A0TKyoqWLhwoZejcZfrfrx63ZScu8/Rjb12N801Je4+MjUltNaWYN9XDbi/DFz8/Uun5NrCuzIzM+nubKWrdR8h6oNP8pm15MdU5X/Opyt/xffveodgVZTnsaAw7bD15TtWE5mQQVjE2ImxsMh4/PwDsNlsnHbaad/thQghjnpLT1tCXn4BQf6+xEeEgHOAxqYOALQhfpxoiuSSeXEsSNOiVLq/SCx88AvKy8vHdX6lUok5I2PE6vA+RxctNaWe91F7TSn22hJaG2vosiZN2mucTFaLhR179m8DmnfBLVQXbaahbCfJ2fv76Gj06XR3d1FRUUFKSoo3QhVCCDGGwarp3Vs/ZP1//jDkMW28kbZWO7W1tej1+gmd12QyAe6eo5+/ct+Qx/yDwgjXJZNfUHgIkQtxeJNE2reEBvqiDQ0c9wfsqWaxWOhobaS7vXnExMV49Pc5DkiYlX6TMCvF3rDXM70uPiGRLKsV69LLsFqtWCwWMjIyCA4OnsyXI2aI/ZM7S0ZNpCmUSk6++kFevescPn3mV5z9fyuHVKAdaKC/l7271jFryVXjikGp9EGrTyc/P3/iL0AIIb7R39fH5QsSuP/CDBQKBX0DTs79y2b27OviNEs0//2ymg93NVB0//5K2Xh1wITe57NnZfHu6s8p2rSKluoSWmrd76MtDZWeNYlJye730bN+hMViGXMaqLdYLJms/fw5z88+vn6c/X8rcbmcQz5nDE58y8/Pl0SaEELMUEajEV9fX5wDA8SZF1BduAmA5JwlRCa6k2w2m23CiTSVSkVklI7W+nKW/e4N/nffhYC7/UvyrJNprS+nYM1n0utYHLUkkTaCOE3QjEqkgTvhEWQ6btS1A/292OvLaa4upqWmhObqEtrqdtNcX47L6QQgVh9HltWC9ZRlWCwWrFYrGRkZhIWFTflrETNHamoq/gEBNNeUEp85euVlcHgEp17zJ9597GpyP3mGnKXXjriupuhLers7SJm9ZNxxhOvSvnPvBiGEAEhOTWVf9Q7PB3k/HyXz07QU1nVQUNOOQgGXL4jH6XRh7+6jsrkbnE7Ky/aM+xqLFi3iueee49OVvyQuPsGdMDvtEiwWi+fGU2ho6NgnmgEsFgttTbX0drfjH+R+7w8MVQ9bF6KJISAoFJvNxtlnnz3NUQohhBgPf39/0tINtNSWctoNj/HqXefR1dpAcs4phEcljLn7o7+/n9LSUmw2GzabjWOPPdZzI8hkMlBfX8a85J9x7AW38uWqx0iadTI+fgGodMm0tdppbGwkKipqxHMLcSTzeiLtrrvu4u677x5yzGQyUVh48FLR1157jd///veUl5djMBh48MEHOeussyYtpni134Q+YE8lg8GAr58fzdUl6L9JpA3099HWUOGpMGuuLqatrpTmunKcA/0AROtisFotZJ1wrueDfmZmJmq12ouvRswUvr6+GAxGT5+csSRYFpOz9Fq2vPEIetNxRCcPn9BTvmMNYRFxRCRkjDsOjT6d/E/Xy92sGayvr4+ysjJKSkooLi6mpKSEk08+mYsvvtjboQkBQHJKKhvyv8LlctHY0UtFYxf/21rDgNPF3qZuZieoWVti58U/rKXL0ed53pWLxp8cuuaaa1i4cCF6vZ7w8NEnEs90gxXJzTWlxKTNPug6hUIhVcNCCHEYyMqysmlnCUFhEZx2w2OUb19DSs4Sz+6PvG/1vN6+fTsPPfQQuTt3UVJSTF9vLwA+vv4EBvjT3t6GQqEgw2ymdPVmAGafdQNhEXHEGOYCoI5JBdyTOyWRJo5GXk+kgfvu6OrVqz0/+/oePKwvvviCH/zgB6xYsYJzzjmHl156iQsuuIBt27ZhtVonJZ54TRCfzZBEmp+fH+npBoo2vUlt6de01pbSXLuHgX73l4GIyCgsFguzvneGJ2FmsVjQar/bNlBx9JiVZWXd1vEPsjj2e7dSXbSF1U/dykV/WDVkgo/L5aI81/2mPZGEmFZvoK3VTl1dHbGxY0/DE1PD5XJRUVHhSZSVlJRQVFREcVExFXsrGBgYAMDfz58g/wBeefkVSaSJGSM5OZmnqu0Y7/iUnj73zSR/HwX+fn7EpaQRn2YgOSWF5ORkkpOTSUpKIjk5GY1GM6HrmM3mqQh/2pnNZhQKBS21oyfSAFQxaeyUqmEhhJjRrBYLH3y0BgC98Vj0xmM9j6li0ocNj1vxwAO8/e6HpM07i+NmfQ+N3oBWb2BfRR7v//U69u7dS1JSEiaTif+8+DIulwul0gfjgv2TqFVRiSgUCoqKili0aNH0vFAhZpAZkUjz9fUlJiZmXGv/8pe/cMYZZ/DLX/4SgHvvvZdPPvmEv//97/zjH/+YlHgStEFUbt4zYyZVXX3VlTzwwJ/QxYWz5OyTsVpv9iTM5A6A+K4yMzNZ9c77464G8/H157TrHuO1e85nw4v3cOo1f/I81rg3n47mWpInsK0T9k/utNlskkjzoueff56rrnL3tvP18SVGG02sWoc1ysjppsXotTHEanVEhGnYVPg1D735OE1NTURERHg5ciHgxhtvpLu7G7Va7UmWJScnSwX2QQQFBZGUnEJLze4x12r06eS+9+GM+TwkhBBiOKvVSmdbM11tTQSHD/1sptEbsH0ytJdZeloaAYHBnPije4es1ca5e2Pm5eWRlJSE0WjE0d1JV2sDIWrdkLW+/oGoouJkcqc4as2IRFpJSQl6vZ7AwEAWLFjAihUrSExMHHHtpk2buO2224YcW7p0KatWrTro+R0OBw6Hw/NzW1vbqPHEa4Jw9PZSX18/I77c//KXv/QkDoWYLJmZmXR3tNLd1kSwKnJcz1Hpklh8+V18uvKXJGQu9NyZKtuxGv/gcGIN8yYUQ3hUAr5+AeTn57NkycSScGLy1NXV4e/nz1+uvY9oVSQ+Sp+Dro3Vuj9IlZSUSCJNzAjh4eHcdddd3g7jsGK1WNhZPnZFskZvoKurk8rKSpKSZuYUUiGEONod2FP724k0rT6djvY2qqqqSEhIANyJt7bmOhxdbQQE729XEKrVExAU4umNOTi5015XNiyRBhAenUJhYdFUvSwhZjSv31487rjjeO655/jwww958sknKSsrY/HixbS3t4+4vq6uDp1u6H/IOp2Ourq6g15jxYoVqFQqz5/BXyIHE68JApgxAweEmAqDb7rNNePf3glgWnABxvnns/6FO2ltqACgfMdqkrJOxMfXb0LncvduSJUePF5mMBjo7esl2D9o1CQaQKxmfyJNCHF4slottNaNXpHmcrk8UzxtNtt0hCWEEOI7SE9Px8/fn+bq4dVhg1VmB/4e3594G9or2d0b0+BZm5KSgo+PD/a6kVseqaKTKSySRJo4Onm9Iu3MM8/0/O9Zs2Zx3HHHkZSUxKuvvso111wzKde4/fbbh1SxtbW1jZpMi9MEAu5E2oIFCyYlBiFmmrS0NHz9/GipLSU+Y2L/ni/+4Z3U7d7O6qdu49TrHqGpspA5Z934neKQyZ3eZzC4P2TVttSjChm9kXqQfyCRKq2U8gtxGMvMzKS1sZreng78AkLoat1HS20pzdUltNSW0lq7m5baErra7QCHzURSIYQ4kjkcDioqKjAYDEPasgwOERvp5nhYRDz+AUHk5eUdMI3ThFKppLmmhP7eblS6ZCrzNrD9w6eITs3x9Mb09/cnKTkFe32553x9jm5aG8qx15XR1lhJ9Z7d9Pf3j9rjXIgj0Yz7N16tVmM0GiktHXmaYExMDPX19UOO1dfXj9pjLSAggICAgHHHEB7khzokQCrSxBHN86ZbPfHKIv+gME67/jHefOAS3v/rdSh9/Ei0Lv5OcWj1Bmxrn5PJnV6Unu7uVVfTXI853jDm+hi1TirSxGGpr6+PgoICcnNzyc3Nxc/Pj7vuumtCnxGOBIOTO9/+02V0NNXQ3dkKgJ+/P0ajicVzLFgs55GZmYnVasVoNHozXCGEOOqUlpbywQcfUFxcTHFxCYVFRVRV7sXpdPKrX/2KBx98cMj67FlZfLrZRnXhFlpqSmiuKcFeW0JLTSm9ju4hbY4CAwNJSU2jfs8O1v37d0POkz7vbPI/Xe3pjZlhNrN+88e0VBfT1lBGa2ONZ61GG8Hll/8IH5/RdzMIcSSacYm0jo4Odu/ezY9+9KMRH1+wYAFr1qzhlltu8Rz75JNPvnPlWP+Ak7pWB5Ut3VQ1d1Pd0k1lSw+9/QNUVVV9p3MKcbiYlWVl446Rk9ZjiU6ZxbEX3Mrm/z1EgvUE/IPCvtN5NPp0Wu0tYybExdQJDg4mLlZPTfPBt8gfKFYTTZH0xBAzXGNjoydhlpuby7btOygsLKC/zz31Okyjo72lnjPPPJMTTjjBy9FOr6ysLH5y4420tbZisVxOZmYmmZmZpKamSlWBEELMAMsuuhibLR9NTBJhUcloM04j5cRkdn/1Hhs2bBy2fs6cObz88su8/fDl+Pr5kZ5uYGG2FesPz8VisXDWWWcNWZ9ltfJ1UTUJ1hOozFsPgDbOSKzxWLa9/w/KyspIS0vjhhuuZ2/l70lP12E+/wSMRiMmkwmj0Thqr9x9+/Z53n8NBgPnnXfe5P4FCeFlXv+09Itf/IJzzz2XpKQkampquPPOO/Hx8eEHP/gBAFdccQVxcXGsWLECgP/7v//jxBNP5JFHHuHss8/mlVde4euvv+app56a8LVPe3QLdfZOBpwuzzFdVCRJycmce8Eibr755sl5kULMUJmZmbz7wSff+fk5S68lIERNTPrs73yOwcmd+fn5kkjzIoPJSM0ovSYPpNfG8MWWr6WKUMxItbW1LDh+IRXlZQD4BQQSEWdEE2dm/sUXEBFvJiLejK9/IM/clENubu5Rl0jz9/fnySee8HYYQgghDiI0JITUuUtZct2jQ453tdZT/PnLw9bfdtttzJo1i/j4eAwGA35+o/ctzsqysmbdRi6++yNeu+c82hurSM45Fe03n8ttNhtpaWmce+65nHvuuWPGW1FRwZNPPsmOHbls35FLQ30tAAqlElwu2tvbCQkJGe/LF2LG83oiraqqih/84Ac0NTURFRXFokWL2Lx5M1FRUQDs3bt3yMj1448/npdeeonf/e533HHHHRgMBlatWoXVap3wtb//4+swmUwkJyeTlJREYmIiwcHBk/bahJjpLBbLQcdlj4dCqSTzhO8fUgyqqER8ff3Jz8/nlFNOOaRzie/OZDLxSdGH41obq9HR2dkpVYRiRioqKqKivIz5F/2K5JxTUUUnoTzIEA1tXDq5ubnTHKEQQggxOrPZxJ61Xw87rtKl0NS4D7vdjlqt9hxXKpWcfvrp4z6/xWKhvWUfLucAp13/GDtXP49x/vmEaGIIDAnHZrNNqIrs7rvv5j8vvkSceQHxx1xATkIGEfFmervb+d8fl7Fr1y7mz58/7vMJMdN5PZH2yiuvjPr42rVrhx27+OKLufjiiw/52nfddRfh4aM31hbiSDbYJ6elpvQ7JdImg9LHF01sikyF8zKDwcC/m58fV5WZXuue3FlcXCyJNDHjZGdnAxCi0aGJSR11rSbOxLbtO6YhKiGEEGL8DAYD9v++PuxzmTo6GXBPT583b953Pv/g5M7mmhL0xnmcdn2O5zGtPn3Cn8sTExMJDArjrJ/9a8jx/j4HSqUPubm5kkgTRxTl2EuEEEeq9PR0z+ROb1LFpJNny/dqDEc7o9FIt6OHlg77mGtjNNEoFAoZOCBmJI1Ggz4unqbKwjHXRsRnkG+zMTAwMA2RCSGEEONjNBrp6Wyju73Zc8w50E9YVDzAIU9PNxqN+Pr60jLCpE9VrMEzuXO8srOz6WhtpKt135Djvn4BaGNTpfpbHHG8XpEmhPAev2+akY40Lns6afTp5G980asxHO0MBve0zurmOrRhmmGPd/f2UNtcT01zHTXNdfj5+kkiTcxYs3Ny2FUxjkRaghmHo4eSkhLMZvM0RCaEEEKMbXBa8r7ynWz/8F9knngpa/71c3x8/QlVRx5yIs3f35/UtHSaq4d/ltPq0/lqyyoGBgbGPZFzsBq8sbKQRJW7RZPL5aK7rQl1nIntUv0tjjCSSBPiKGe1Wti8y7sVaRp9Os1NjTQ0NBAdHe3VWI5WqampKJVKdpUX0NnTSc03SbNaewO1LfU0t7V41mo1WmbPzuGSSy7xYsRCHFxOTjbrPh97CFFkgjt5lpubK4k0IYQQM0ZaWhoKhYLSL9+jtvgraou/AtzJKZUu5ZATaeAeOLAlb38izdHVRnN1CW2NVfQ6HOzevduT0BtLcnIyIaFhNFUWULxpFWERcdSVbqWm+EusJ/+QnV+uxel0Dul9LsThTBJpQhzlLJmZfPDxp16NQRu7f3KnJNK8w9/fnwxzBq9+/hYAoaGhGNMNZC3IYZnRiMFg8PzRarVejlaI0WVnZ9Nh33fQQSqOrjbsdXvQpeYQro0hNzdXEsNCCCFmjKCgIOLiEwhW69Cb51NTuBmABRf/iqaqYgqLJiGRZrXyzrsf8N5jV2GvLaWt2T293cfHh3nHHkdcXNy4z6VUKsmyWmko38merR8NeSzGcAx5n71IWVkZaWlphxy3EDOBJNKEOMpZLBY6W5vobm8iKMw7AwfCo5Pw8fXDZrNx0kkneSUGAZ+t/YyioiIMBgPR0dFjDh0A6OzspLCwkK1bt05DhEKMz+AWk6bKAgJMx1K48XXizAtQ6ZLZV5HHx//4Ge1N1fzg3o/QxJvYsUN6twghhJhZjEYDZQ3lnHr1n/jvnWfT291O6pylDPT1suujD8c1IGo03//+93n/gw+J0UVgPf8ELBYLVqsVk8lEQEDAhM83e3YOr729miXXP8bqp25F6eNLZEImeuOxgLv6WxJp4kghiTQhjnKeyZ21u72WSPPx9UMbk0J+vgwc8KaoqCiioqLGXOd0Orn+uuv55JNP2Fu5dxoiE2Ji0tLSCAoKpqmqkE57PetfuBOA8KgEOlrqcPb3ERiqITw6EW18Btu3v+PliIUQQoihzCYTu95ZQ6g2ljNv/ieOrjZCtbGoY1Lo7Ginvr7+kKanZ2RksGXzpkmLNzs7m3/+8ylSZp9G5ok/IH/dyyTOOolgVRQhqghyc3O58MILJ+16QniTJNKEOMoZDAZ8fX1pri7x3DHyhnCZ3HnYqKurY+UzKznOOIfzzzmdhEg9EaEarvn7rd4OTQjAvS3FYrXSVFnIiVfcx87Vz9FcXUJM2hyCwiLYm7eOqCQrSqUPEfEmtr//D5qamoiI8M7NhJG0t7dTXFxMcXExlZWVXHfddWg0wweBCCGEODIZjUbs9StxOZ3ojfM8x1W6ZMA9ufNQEmmTLTs7G6dzgJbqEhZe+lsSrYvRG49FoVCgjTNL9bc4okgiTYij3ODUnpba3V6NQ6NPx/bFy16NQYxPbGwsYWFhmOLSOXXWYgC6HN1ejkqIoebMzuHND9bh6x/I6Tf8ldfv/R4ul5NZp19F7ifPMPec5QBExu8fOHDKKadMa4wul4vS0lKKi4spKiqiuLiYgsIiCguLaKiv9axTKBTU1NTw5z//eVrjE0II4T1Go5G+XgcdLXWEReg9x8OjElEoFJSUlHDCCSd4McKhsrKyUCgUNFYVEJVsJWX2aZ7HtPFmtu9Y7cXohJhckkgTQmC1Wvgy37uTO7X6dL5u3Me+ffvGtb1QeI9CoSAzI4PKxmpvhyLEQWVnZ/P0ymcY6HOgjknhxB/fx+qnbqWzdR8KhZIEizsJrNIl4+cf4JVE2tNPP831118PgF9AIJqYFMKiUkiYdyFZuhTUMSmodMmsWfkLCguLpjW2jo4OCgoKyM/PJz8/H5stn77+fl584T9ERkZOayxCCHE0GpyY2VpfNiSR5usXgCoqblImd06mkJAQUlLTaKosHPZYRIKZ3I9X0traikql8kJ0QkwuSaQJIbBaLHy8Zr1XY9Do3ZM7CwoKJJF2GLBYrWz4aK23wxDioLKzs3EO9NNSu5vIxEwMx55DTdGX5K97mZj0uQSGqgFQ+vgSEWckN3f6t5w0NDTgHxjMJfd8SIhah0KpHHGdWpdKYdHUTVfOzc1l27Zt5Ofnk5dnI89mo+qA/ofqqHjCopOptG1k7dq1XHTRRVMWixBCCLekpCR8/fyw15cTn7kQcFcyd9rr8Q/RUjQJkzsn2+zZOWzeNTSRNtDfh6+fe3jBzp07Wbx4sTdCE2JSSSJNCEFmZiYd9n30dLQQGOqdHjyq6CSUPr7YbLYZVaYuRpaZmcnLL76E0+VEqRj5y78Q3jRr1iwAGisLiUx0D1VZeOlv8fUPJGnWSUPWquPMbNu+Y5ojBJPJRG9PFz5+AQdNogGoY1LYtboCh8PxnSapjcZmszFnzhycTiea6ARUselEWpZiWJKONs6AJiYVv8AQAP5923EyFEYIIaaJr68vyckpFH3xBjVFX9K+rxx7fTm9PV0A6E5bMO0x2e32Ie0ICouKKC4u4fbf/JpLL72UnOxs3vvgY3I/fobGykLsNYU0VZcy0N+Hn7+/VDSLI4Yk0oQQnsmdzTWlQ5qZTicfX3+0McnyJe0wkZmZSbejh8a2ZqJV8qFIzDxhYWEkJafQVLX/zrivXwALL7lj2NrIBDNb/vcWfX19+Pn5TVuM+7ftlBMUpj3oOrUuGafTye7duz2/r7+L9vZ22traiIuL8xxTKpU4nU7OvuUZEq2jVwmoY9Pkd7QQQkyja665mr/85W/Eh0ZgnrsYg+FqjEYjRqORtLS0aY3l/265lb/+5c+en8O0OlS6FNqbWvjb3/7OpZdeyimnnMK9993Htrf/jMVq5cTTF5GdvZzs7GyysrJkW6c4YkgiTQiB0WjEx8eHllrvJdLAPblzV57Na9cX45eRkQFAZWO1JNLEjDU7J4evi4b3avm2iAQzfb29FBYWkpWVNQ2RuRkMBhQKBfa6PcSkzznoOnVMKgBFRUVjJtL6+/spKysbYYBBIQ31dfj6+vLpp596ttakp6fj6+dHW0MFMFYizcDOXTJ1TQghpstvfv1rfvPrX4/4mMvloru7m6CgoFHP4XK5aGhowGazYbPZKC4u5sYbb5zwjZm8vF1EJWdx4o/uRaVLwj8wFICv3v4bRZ+7B4Ydf/zx1NXWEh4ejo+Pz4TOL8ThRBJpQggCAgJISU2jpca7Awc0+nRsm1/zagxidE6nk8rKSoqLi/H396eysYa5adneDkuIEeXkZPPxmrW4XC4UCsVB10UcMLlzOhNpQUFBxMUnYK8rG31deCSBwWEUFY08cODZZ5/lzTdXUVhURFnZHvr7+oBvDzBYRpYuhY0v3cnmzZs9iTQ/Pz/S0w001479+1+jT2fz56/R39+Pr698hBRCiOlSXV1Nbm4uNpuN/Px8du7Ko7CwED8/P77cstlT4Txo06ZNvPDCC+zKcyfPmpsaAfcOEKWPD5WVVaxa9eaEYjCbTOwqqiQqyTLkuDomhaamfbS0tKDRaNBovNMmRojpJJ+ChBAAZFmtfF3k5cmdsels3VdPU1MTERERXo3laOZyuaitraWkpITi4mJKSkooKS6msLCIsrIyHL0OAHyUPlQ21ng5WiEOLjs7m672Fjrt9YRqYg66LiA4HHVUHLm5uVx++eXTGCGYTEb21I+eSFMoFKh1ySNOaHM6ndzwk58QFpVEnHkBC+b9AHVMCmpdyogDDPLWPDfsPFarhc27xv79r9Ub6O/ro7S0FLPZPI5XJ4QQ4lBVVVVhNJro7u7CPzAYrT4dVUw6s844iS1vPMIXX3wxLJF27XXXU15ZS6zxONIWXcYxegNavQFVdBJfvPYAu/I+n3AcJpOJlvqVOJ0DKJX7q83UMSkAFBcXc9xxxx3aixXiMHFUJ9KeeOIJfvWrX6EcpcGvEEcLiyWT1Ws3ejUGTZwBgPz8fJno4yUul4uTTjyJ9RvcU1yVCiXRmihiNdGkquNZdOJc9NoYYrU6Xln/JhUNlQB0Obq9GbYQI8rOdldLNlUWjppIA/fAge07dkxDVENlmM3krvpozHVh0SkUFAzfpqpUKomPTyA8bTHHX3L7OM6TTEHh0PNYLRY++HjsqaCaWPd05fz8fEmkCSHENHFv4ezi5KsewLTge0NukBRt+C822/C2KIkJCXT5RnH6T/4y7DGt3oDt0//Q09NDYGDguOMwmUz09znoaKohPCoBgJ4OO0Gh7h6fRUVFkkgTR42jOpF2++238/677/Ds8/+e9maNQsw0mZmZtLc04OhsJSDEO41A1dFJKJU+kkjzIoVCQUFBAYszj+OSRRegU0fh5zty8/XEqHg2FnzJVX/9GS0drdMcqRBjS05OJjQsnKaqwmGTOgEG+nux1+6hsaqQ7rYmcqtrpz1Go9FIS91Tw+7wD8a3/YN/kmBZjDommaINI1cQmE0mCmpGr2obpI5JpfCLV4Ycy8zMpLO1ie72JoLCDl4NHBQeQXCYmvz8fC688MJxXU8IIcShiY+PJyQ0jO62pmFVxuExadhsw4fAWK0WNm97ZdhxAG2cEafTSWFhITk5OeOOY7Dqrab4S168/RTmnH0j2957EoAwjW7EqmkhjlRHdSLtuWvm8Ie3djIry8qfHnqYG2+8UarTxFHLYnH3O2iuKSXWMNcrMfj4BaCRyZ1eZzQaUbQ6iY/Uj7ru5FmL+KLwK6pb6njqqae4/vrrpylCIcZHoVAwa1YWDZWFdLc301RVSFNlIY2VhdhrCmmqLmWg391PLDEpmWt/dtO0x2gymRjo76WjqZqdq5+npaaU1LlL0caZ2PjyPTTuzaehbBfG+efT0tw04tZ3s9nElh2rxnU9tS6ZpsYG7HY7arUa2D+5uaV296iJNIVCgSbWMGL1gxBCiKmhUCjIyMigeYRexprYdPJsnww7brVasTdU0dfTiV9gyLDnANhstgkl0hITE/EPCKC6cDOAJ4mm0RsIUUVSeJA+nkIciY7qrNGxKRo+vvU4LpodxU033cSSU0+hvLzc22EJ4RVGoxGlUknLOBpOTyVVTJpM7vQyo8lIrb1+zHXaUDUXLjibHkcPp5122jREJsTEzc7JYffX7/PcrcfxziM/Ztvbfyasr4rzT1/EX/78GBs2bMBut1NRXsbvf/e7aY/PZDIB0Fy7h5aaUqoKvmD9C3ey6sFL6Wh2V8ilzl06pAfNSOew1+9loL93zOsNTgA98DwGgwFfX1+aq0tGfE5LTSmfPXcHHS11qGLld7QQQky3LKuFtrpServbcXS109vTwdrnf0tQeCSVeyvo6OgYsn7wBnlL7e5h5woIDiM8InbCN0V8fHxIS0vHLzAE44LvoVD6gELBrCU/RqUbuf2AEEeqozqRBhAS4MsfL8zk5RuOoXjn12RZLfzzn//E5XJ5OzQhplVgYCDJKak014z8RWq6aPTpUu3gZUajkdrm+nH9HozV6gDYvXv4BzUhZoI777yT++67j1deeYWCggI6Otr5+qsveeqpp1i+fDmLFi1CpfLOdnaAhIQEAgICaWso5+SrHyQgWIXedByn3fAXZp95PUofX5KzT0WlSwZGTqQZjUaczgHaGqvGvJ5KlwQwZAKov78/qWnptNTupreng//9cRlrVv6S3Vs/xLbuZV6/73sUbnyN2pKv0eoNFBcX0d/fPzl/AUIcgVasWMG8efMICwsjOjqaCy64YNjU3Z6eHpYvX05ERAShoaEsW7aM+vqxb2KJo5PFYqG5djev33shz/xsDi/+5hQKNrxKpW0DAIXf6n2ZkZEBQHNNCS6nk4H+PlwuF/nrXqGjuRZ1bDp53+GmiNlsoq2+jMWX/Z5QbSwKFKTMXoJKl8zu0lKcTuehv1ghDgNHfSJt0CJDBJ/cNp/zs7T85Cc/4fTTT6OystLbYQkxrbKsVuxerkjT6A001NfR0tLi1TiOZgaDgY7uTtq62sdcG6uRRJqY2aKiorjjjju45JJLMJvN+Pj4jP2kaaRUKklLT8deV0aoJoYTr7iXmqIt9Pf2sHfXevSm4wgMVeMXEEx4ROywL+Owv6rNXjd2nzT/wFDCtMN72VitFuy1pfR2tWOvK6N40yo+fvJm1v/nD/gHhaP08SPReiIafTp9vb3s2bNncv4ChDgCrVu3juXLl7N582Y++eQT+vr6OP300+ns7PSsufXWW3nnnXd47bXXWLduHTU1NdJ7UBxUZmYmvT1d6E3HAtDT4f6cPPuM6wCG3YQOCQkhMSmZ5uoSvnzrzzz1k0z+98dlrPvP73n9vgvRxBrYuWvXhOMwm0y0NZTjHxTGeT9/nu/f/S5BYRGoY1JxOHqGfH92Op2UlZXx3nvv8ac//Ykrr7ySRYsWs2HDhu/61yDEjCGJtAOEBfrywEUW/nPdXNZ99hl33nmnt0MSYlpZLJm0jlACPp20B0yFE97haSbbXDfm2iD/QCJVWkmkCXEIzGYTrfXuJFjaMWdiOv57bHzpHmqKvyR17lLPOpUuZcQeNLGxsQSHhHrOMZaRzmO1uBNpodpYTrziPgDmnH0j5/3iBbT6dPSmYwkIDhsyuVMIMbIPP/yQK6+8EovFQnZ2Ns899xx79+5l69atALS2trJy5UoeffRRTjnlFObOncuzzz7LF198webNm70cvZiJBntZJuecSoLFPZArMEyL3jwfdVT8iL+TZ2Vl0VJTglLpbou+r9ydOJt33s1o4tLZW1FOV1fXhOIwmUy0NtbQ5+giPCoRrd4AgOab9gODN3tu+/kvCAkJJTU1lXPOOYc/3HUPH23Ywfa8Qp588snv8DcgxMwiibQRNLQ56Btw8r3vfc/boQgxrSwWC23NdTi62rwWgyomBYVSKV/SvGhwinFNy/i2mMSodZSWereSUYjD2eAd/kGLfvB7AsM0uFxOUnL29x8Mj06msHB4Ik2hUGAwGMZVkXaw82RmZtJh30dPRwvp887CvOhidn7yHL7+AdQUf0lKzhIAglVRBIWqZAu+EBPQ2uqebK3VagHYunUrfX19LFmyxLPGbDaTmJjIpk2bDnoeh8NBW1vbkD/i6JCYmEhwSCj2uj2ccvWf0MYZsZ70Q5RKH1SjTO5srSvlmPNuJu2YMz3Hk2adjFZvxOVyUVBQMKE4Bm+2ttaXDzkeGhGHj6+/p9r5nXfeQZ1g4exbVnL5g+u46q/b+d4dr5Ocfar02RRHBEmkfUutvYe73i7mx1dcwbnnnuvtcISYVgdObvMWX78AtLpk+ZLmRcHBwcTF6qltHjuR1tvfiyo4jNJi7/bWE+JwZjKZaGuqpc/hrgwY3DJzyV3vEayK9KxTx6Swe3cpAwMDw86RmWGmbZSKtErbBr5+5284B/pR61IpLSkZ0svmwMnNAIt+8DtCtbG88+iVOAf6Sc45FRic3JkuNzuEGCen08ktt9zCwoULsVqtANTV1eHv7++ZnDtIp9NRV3fwavAVK1agUqk8fxISEqYydDGDKBQKzGYzLTWlBKsiueTu95h3/s8AUOvT2ZWXN+w5FovFXT3W08GJV9xHWGQ8cRnHE6qNRavfP7lzIjytBL6VSFMqfdDEJHkq0rKsVvz8/Em0nkBYhB6FQgG4eyEXFxeN+D4mxOHE19sBzCQul4tfvp5PmErDn//yF2+HI8S0M5lM7smdNaXEpM32Sgy9PR0EhEeRJ4k0rzKaTNR882G+f6Cfensje/dVUlBVSnn9XursDbR2tdM/0M+Ac4AF8xd4OWIhJq6rq4vS0lKKi4spLi6mpKSE0NBQHn74YQICAqYtjgPv8Ecmum9ohEclDlun0qXQ63BQWVlJcnLysHO888FqAOp2b2fN0z8nKfsUUmefxp7tn7Br9fMAmBddjDomhZ6ebqqqqkhMdF/HYDDg4+NDS20peuM8/AKCOe36x/jf/RcRlWQlVBvruZY6Np2du4Z/aRNCDLd8+XLy8vLYuHHjIZ/r9ttv57bbbvP83NbWJsm0o8isLCsfrt8+7Lg2Np2dHz9DZ2cnISEhnuMH3iCJSZvND+9fg0LprqPxCwxBHRU/4USaVqtFGxFJdaF7C3JrfRn2ujLa95XTUldBY2PTN9fOZPXa4f/Oa/UGeh0O9uzZg8FgmNC1hZhJJJF2gFe+rGZd4T7ef//9YXeIhDgaBAUFkZScMi2TOx1d7bTUlrr/VLv/2Vq3m9bGagAWZF085TGIkfX29qLWqHlv40Z++NhP6e7pQqFQMuDcf/dQE6omIz6d4po9XLDsezz44IPyYV7MaKtXr2bXrl0UFxdTWFRMUVERtTXVnseDQlSEaHQ0VhVzxRVXMG/evGmL7cBhAYOJtJEc2IPm24k0k8lEh30fjq52utuaaNtXya7Vz7Nr9fP4+PrjHxSKRm8gVBPDQK8DcE8AHUykBQQEkJKaRkvN/m3akYmZfP/OdwgK0w6NQ5/OV1tWMTAwMOOGNwgxk9x00028++67rF+/nvj4eM/xmJgYent7sdvtQ75z1NfXExMTc9DzBQQETGuSX8wsmZmZvPzf13C5XJ4KL3AP6nK5XBQWFjJ37lzPcbPZjFKppLm6hJi02Z4k2iBVTNp3mtw5Z84cVn/yMvnrXkYbEYnRYGDhCXMwGi/l8ssvB9xJvPaWBhydrQSEuCdj2+vKCAyLANx9NiWRJg5nkkj7RlVzN/e8W8LVV1/NmWeeOfYThDhCWS0Wcssmr9+Vo6uNlppSmmvcyTJ7TQn2ut20NdUC7lL1pOQUZlksWM6+HIvFQmZmpucumph+v/nNb3jzzTcBUIWoMMelkxAZR0KknoRIPfEReoIDggC457+P0NbWRnh4uDdDFmJUGzdu5LTTTsMvIBCNLpnQqGRic87DvDQZtS4ZlS6ZwFANfY5OVt40m+Li4mlNpGk0GiIio7DXjz4Jc7AHTVFREUuXLh3y2P5k3B5SZi/BvOhiSr98hxMuvwd1TApv/emHpM5xPycsMg4fXz+KioqG9GjKslr5umjo739NbNrweGPT6XU4KCsrIz09/Tu9ZiGOZC6Xi5tvvpk333yTtWvXkpKSMuTxuXPn4ufnx5o1a1i2bBngTpDv3buXBQukwluMLDMzE0d3J50tdYRoYuhua6KltpSGslzAvU3zwERaUFAQySmptBzkBrlGb2Dnro8nHMeqN99g586dmEwmT9+/kWIF2LPtE9Y+fzsZJ1xCwfr/uuMKcffZPP/88yd8bSFmCkmkAU6ne0unRhvJo48+Sn9/Pw6HY0hprBBHC4slk3VfPD/h5zk6W93JspoSmmtLsdeUYq8rpf2bPltKpZKk5BRyrFYs5y32JMtMJhPBwcGT/TLEIejo6CAuMpZHrrybQP/R73zHR+ilaayY8fz9/QE4/5cvE5VsPfi6wFDCNNGeZsnTyWgw0FhXPuqawR40I8U3eGe/tb4cXWo2iy79LTVFW8j77AWyTr2Cgf5eUuec7j6Pjy8a3f5eNoMOthXn27Rx7mvZbDZJpAkxguXLl/PSSy/x1ltvERYW5ul7plKpCAoKQqVScc0113Dbbbeh1WoJDw/n5ptvZsGCBcyfP9/L0YuZavAm8/t/uYbutn10tdsB8PP3x5o1ixNPPHHYc7KsVraVHjyRtuOjp+no6CA0NHTccYSEhIyZ8DUajSiVSmqKvwTwJNFSZp+Go6NJ+myKw54k0oBnNlawsbiRK664gguXLWPzps2EhIaSt2sn0dHR3g5PiGllsVhoa6qlt7sd/6CwYY/3dLR4EmYtNaXY63bTUlNCh30f4E6YpaSmMddqxfK9kzwJM6PRSFBQ0DS/GvFdZGZm8vxzz+Pn6zfm2oRIPe989RHd3d3TENnkWb9+PQ899BBbt26ltraWN998kwsuuMDzuMvl4s477+Rf//oXdrudhQsX8uSTT8o2hMPU4P9v9obyURNp4J5o6Y1EWkaGmfc/+2rMdWFRyRQUFg4/HhaGLibWU9XmFxjCqdc+zKoHf8CGF+8iKslKeFTCkPMUFQ19nZmZmcO24owkWBVNYEg4+fn5UlEgxAiefPJJAE466aQhx5999lmuvPJKAB577DGUSiXLli3D4XCwdOlSnnjiiWmOVBwuampq2Lt3L6cvPYM9u0tJ0qXS19dLa3MLtQ31XHD+uSQlJQ17ntVq4dMNT494zog4d3/O/Px8jj322EmNNzAwkJTUNAJDVJgXXUTJ5rcZ6O/FcNy5VNo2yk1Ycdg7qhNpD39YwqayVgpq3KOjX/vfKnTpc8k49Sq2vvN3NmzY4Cm3FuJoMViKXVuyFb+AoG+SZqXYa91Js8GEmY+PD6lp6RxjsWBddiqZmZlkZmZiMpmkf8dhLjMzk96+Xhpa9xGr0Y26NiEyDpfLRWnp5G0Hng6dnZ1kZ2dz9dVXc+GFFw57/E9/+hN//etfef7550lJSeH3v/89S5cuJT8/n8DAQC9ELA6FRqNBGxFJ67emjI0kXJdCQWHRmOsmm9Fo5MWXXx3W++bb1LoUCne9P+JjJpOJ6rr9kztj0mYz95zlfP32X0mdO3QrqComhYL8oVt6DmxMHWuYy8EoFAq0epncKcTBuFyuMdcEBgby+OOP8/jjj09DROJw9vnnn7N48eIh/14F+fuQHBHEGcZItvuH8taqN7n33vuGPddisdDeXE9XayPdHc20VJfQXFNCS00J9hr3zZTGxsYpidtqsbCttISlP32CutJttDdWkWBdTEdLHVu/fFv6bIrD2lGdSHstrwudcRGLT5pHrOEYtHqDpwnj7i1vSSJNHJXMZjO+vr68/9frAPD19SUt3cBxVguW75/u6WFmMBgkYXaEGkymVjXWjplIi490T/IrHKFCZiY788wzD9oP0+Vy8ec//5nf/e53nmqbf//73+h0OlatWsWll146naGKA6xcuZIHH3iQ235+G9ddd92EPoAbDQaax5FIU+uS2bnt/TETWpPNZDLh6O6gu62RYFXUQdepYpLZ/mEVXV1dw7bFZ5hNFH64fsixuWffSKL1BCLiTUOOq3Up5H60l+7ubk+18OBWnJba0RNp7jhkcqcQQkyHhoYGXC4Xr904j8Z2Bze+sJPu3gEqmrr589xYWjp7+aho74jvW1aruwr737843pOIi4rSYc2ycs6i88jJyZmy/uAWSyafbXgGv4Bgzv/VSyiVSvwDQ9Hq03E4eigvLyctbXgfTiEOB15PpK1YsYI33niDwsJCgoKCOP7443nwwQc9TXNH8txzz3HVVVcNORYQEEBPT8+4r/vDFZ8SFhl/0A/J0WnHsHbd+hEfE+JIFhwczOeff05FRQUWi4X09HRPfyFxdIiLiyM0NJTKxmrmGXJGXRsaGEKkSjus19LhrKysjLq6uiFN2FUqFccddxybNm0aMZHmcDhwOByen9va2qYl1qNJdXU1//d//4cmMJwbb7yRp//1NM8+9yxZWVnjer7ZbOKjDdvHXKeKTqazo33M6Xlj6evro7S0lLy8PGw2G3k2G7t25dHW1s4nH3/o+XIz6MBhAUHhkXS1NmCvK8devwd7XRmt9WW0N5Rjb6gkICCQjo6OYYk0k8lEy3PP43I6PTcGlT6+6FKzh8WnjknB5XKxe/duTyyBgYHfNKYeu8JUE5vO1q+kokAIIaba4JbNl7ZUUVzXAYCPUkFX7wBLH90EwLy5cw6aSPv3v/9NR0cHFosFi8VCRETEtMRtsVhoa67D0dVOcPj+a2r0+/tsSiJNHK68nkhbt24dy5cvZ968efT393PHHXdw+umnk5+fP2qz//Dw8CFf3CZ61/jAPiEjiTUcw4YX3pJpdOKodOyxx056rwRx+FAoFGRmZFDZWDOu9XFaPYUFBVMc1fQZbAqt0w2txtPpdJ7Hvm3FihXcfffdUx7b0ey2227DT+HLAz/+PZX7qvn9Sw9y//338/LLL4/r+Uajkf++9uY4tk4mA1BcXDyhRNq6detYv349NpuNnbvyKCkppr+vD4BQVSRqfTrquHk0lb/Gxx9/PCyRlpqailKp5NNnfklvVzuO7m++LPn4kJiUjMVsxrzkQoxGI8cdd9yIPVyNRiN9jh467fWEamNHjVcd454iWFRUNCSW0RpTH0gbl46jRyoKhBBiqplMJhYumM+uhnqSMzKINAxw8sknYzAYSE5OJjk5mZiYGJTf3EA5kEKh4Ec/+pEXot6/w6GltpSYtNme4yFqHYHBYeTn53Peeed5JTYhDpXXE2kffvjhkJ+fe+45oqOj2bp1KyeccMJBn6dQKA7pTvFYYo3H4HQ62bRp07AR80IIcaTLtFj44pMN41qbEBFLfv6Rk0j7Lm6//XZuu+02z89tbW0kJIx+w0aM34YNG3j11Ve56exrCA0Mobalgf6Bft584w2uuvIqrrn2GhYtWjTqOYxGI92drfR0NBMUdvC78eFRCSiUSoqLi0f9HHKg+vp6lpx2Gn7+QWjjTKj12SzIuQiN3oA2Ln3I9faVbcdmG95kOSAggL/+9a9s374dk8mEyWTCaDSSmpo67qrgA6vaxkqkBYZqCQpRDRusMLgVZ5DL5aKjuZaWWne/zOaaElprd9NS665a6+joGFdsQgghvpuQkBA2frHJ22FMmMlkcrcLqBmaSHN0thKiiZE+m+Kw5vVE2re1trYCoNVqR13X0dFBUlISTqeTOXPmcP/993ua5H7bd9lyo9alEKKKZMOGDZJIE0IcdTIzM/nvy6+Mq09UQmQc729bM02RTb3BmzT19fXExu5PRtTX15OTkzPicwICAqRn4BQKDAzEz9eP/21+l0C/AJ5d8zJBAUF877gz+fCd93nu+ecoLi4edaqq0eieTtZaXzFiIq1u93Zqir4k54xrUUfFT2hyp1qtxuV0Mu/CX2A58QejrlXFGA7aW2z58uXjvuZIkpOT8fXzw15XRnzmwlHXKhQK1DEpw7ZlZ2Zm0tZcx5qVv6Ktfg8ttaU4ujsBCAoKxmQ2M+94KxbLJRxzzDFkZw/fNiqEEGL61NXVkZuby+zZs0esVvaWoKAgkpJTKNnyDo2VBe7BZbWlnsFlGs3U9GYTYjrMqESa0+nklltuYeHChcO2PBzIZDLxzDPPMGvWLFpbW3n44Yc5/vjjsdlsxMfHD1s/0pabG58efduCQqFAlzaHdevHV5EhhBBHkszMTLodPTS2NROlGr2XRnykHqfTOU2RTb2UlBRiYmJYs2aNJ3HW1tbGli1buPHGG70b3FFq3rx57Ny1k5/e+FMeXvUESoWSCxeczfcXnY+vjy+vfvEWUVEHb9APeLYf2uvL8PUP5JOnbiUp+2RS5yxl7651bHvvSVwuJ2nHnEFYVDJFReNPpAUEBJCalk5z9Ti2ROrT2fnhJ1MyzMDX15eUlFTs9WVjLwbColPILxg6KOSkk04iO2c2Pr2VLFicQ2bmZZ4hM0lJSSNuHRJCCDE9Ojo6WLVqFbm5uWzfsYPcHTtpbGwAwJo1i107c70c4VDLll3Inx97jBBXC8dlWbFcfBqZmZlkZmaSkZHh7fCE+M5mVCJt+fLl5OXlsXHjxlHXLViwgAULFnh+Pv7448nIyOCf//wn995777D1I225uf+jsQcTxBiO4ctVj+BwOKTSQAhxVPFM7myq8STSXC4XrV3tVDZWU9lYQ1VjNVVNtVQ2ja+X2kzS0dFBaen+huplZWXs2LEDrVZLYmIit9xyC/fddx8Gg4GUlBR+//vfo9frueCCC7wX9FHObDaz5tM13HTTTTzxxBNsyN9MXUs9+VXFnH322ajV6lGfHxwcjD4uHnt9OYEhaux1e7DX7SH3o5UolD74BQYTHpmAKjoJVXQSRcVfTyi+WbOy2JI3nt5iRro6O9i7d6+ngfRkyjCb2bFnfIk0tS6Z4nXrhhyLi4tjx/Ztkx6XEEKIQ3f//fezYsUK1NHxaPRmkhZ8nznxJlpqStj6zt+HTGKeCR7605+4/49/xM/Pz9uhCDGpZkwi7aabbuLdd99l/fr1I1aVjcbPz4/Zs2cP+VJ0oJG33IydSNMb5tHrcPD111+zcOHoWySEEDOT0+nkpzf+hJ4eB3fedRcpKSneDumwkJSURFBgIB9u+5TNRVupaq6lqrGG1k731nhfX18MaelY5lq5wHIRZrOZyy67zMtRj9/XX3/NySef7Pl58GbLj3/8Y5577jl+9atf0dnZyfXXX4/dbmfRokV8+OGHBAYGeitkgbta/LzzzuMfT/4DR5+D6uY6mtvtvLlqFbfeciuP/fmxUZ9vMpmoqC9j/oU/x7zoYkq2vM3iy+5EHZPKe3++htS5ZwCgiklhy8b/TmgipdVi4aNP1o65Thu3f1rZVCTSTCYj6zePbwCDOiYFe0szjY2NREZGTnosQgghJldoaCiBIeFc+sdPh1Q112ti+Oqtv2Kz2TjmmGO8GOFwkkQTRyKv1+e7XC5uuukm3nzzTT799NPv9CV3YGCAXbt2DellMxkiEswEBIWwYYNs7xTicPXqq6/yz6f+xbtvvorJZOSmm26ivr7e22HNeEqlkgsu+B47ym3U9DWStSCHn//6F7z++uvk5+fT1dVFfmEBr73+GnfffTdnn322t0OekJNOOgmXyzXsz3PPPQe4Ezb33HMPdXV19PT0sHr1ak+PLeFdS5YsQaVSsST7RB675l5W3vwYhtgUVq/+ZMznmk1G2hvKAVh46R2EqKMp2PAqXW376HN0knqMO5Gm1iXT19vL3r17xx2XxWKho7WR7vbmUdeFavUEBIWMOHBgMphMJlr3VdPf5xhzrVq3f3KnEEKImS87O5uezjY6mofuBtDGGVEoFOTmzqytnUIcqbyeSFu+fDkvvPACL730EmFhYdTV1VFXV0d3d7dnzRVXXMHtt9/u+fmee+7h448/Zs+ePWzbto3LL7+ciooKrr322kmNTenjS3RqDuulT5oQh6W+vj5+/9s7WJIZzabbF/GL01J44dmnSUtN4Xe/+51nuIkY2YsvvUhnVyc7d+3k5Zdf5ve//z3Lli0jIyND7i4Kr9m0aRMt9hbmGXIAUIeoqbXv47zzzx/zuUajEXtDBS6nE//AUE699hEaynay7vnfoY0zoolJBUD1TYJpIgMHBgcetdSM3YNVE5tOXt7IAwcOlclkcm/DbqgY9pjL5aK7vYnakq8p2PgaBZ+/DkBJydhbUoUQQnjf4ICXxsr9/S37e3vobKlHE5MsiTQhponXt3Y++eSTgLs64EDPPvssV155JQB79+4d0ty2paWF6667jrq6OjQaDXPnzuWLL77w9PSZTDHpx7Dxs+cmtL1DCDEzPPPMM+wuK+eJWxcQ5O/DT09J5bL5CfxjbRmPPvQgTz7xOL+5/Q5uuummGdVPYqZQKBTye0/MOO+88w4Bfv7oVO7hAkXVpbR1tnHuueeO+VyDwUCfo4dOez2h2lhi0mYz5+wb2fru48w67ceedaHaWHz9AiguLh735G6DwYCvnx/N1SXoTceNulYdm37QyZ2HarBysjJvA/ba3djryrDXl9PeUIa9rozuTvcNBIVCQXxCImeeeeZhV1EqhBBHq7i4ONQaLfsq8lj/nz+QMnsJtrUvAZCYdSLbd0giTYjp4PVEmsvlGnPN2rVrh/z82GOP8dhjo/dBmSyxhnl89dZfyMvLkxHvQhxGurq6uPvOP3DB7Fgy9GGe4+pgP35zlpGrFiXxl092c8ftv+Evjz3KnXffw1VXXYWvr9d/LQohRpGamkpvfx9X/e3/MMcbUKIgKjKKY489dsznDiaZ7PVlhGrd7SDmnrOc+IwFRCXtnxauVPqg1iVOqFLLz88Pg8FI8ygVaYOTOrVxRra/8wFOp3PSp2BGRUURGxvHptceAECl1mA0GlmwKBuT6fsYjUZMJhNpaWlyA0EIIQ4zCoWC7Oxs9pTtpKu1wZNEA9Cl5JC79rlJnQrd2NhIcXExRUVFFBUVoVAo+MMf/iDvH+KoJ98Yx6BLzcbH148NGzZIIk2Iw8jf//539jU28vOrRx4UogsP4P5lmVx3YhKPfLSb66+/nocefID77l/BRRddNOlfboUQk+OGG27gggsu4L333uPtt99mzZo1/OIXvxjXf7PJycn4+vpirysjPuN4AHx8/UasIAuLSp5w77BZWVY27nAn0vp6OvHxD0SpdFd1ttSUsuaZX2E6/nto9Aa6u7soLy8nNTV1QtcYi0Kh4KuvtlBRUYHRaJQhAkIIcYSZnZPNrldWccrVf+LTZ36Fj18AKbOXEJVs4au21kmZCn3TzT/jxRdfxN7i7vupUCgI08bQ1lTL4sWLOeuss77TeQcGBigvL8dms5Gfn091dTW33347er3+kOIVYrpJIm0Mvv6BRCdbWb9+AzfddJO3wxFCjENLSwsr7v8jlx0XR1JE8KhrUyJD+PsPZ3HjScn86cPdXHLJJVx4wfn8781V0xOsEGLCdDodV199NVdfffWEqrr8/PxISk6htb58zLUqXTKFBWMPMDiQxWLhnfc/xul08v7fbqC5upikWSehik5i2/v/pL+3G11qDik5SwD35M7JTqSBe+tPXFzcpJ9XCCGE92VnZ9Pyl7+QOud0aoq2UPj5/0jKOpmIeDMAubm5h5xIe/HFFwmNMXPsJZei1qWg0iXj4+vPypuzKSoqmnAi7cEHH+TFl16muKgIh6MHgMCQcPocPYSHh/PHP/7xkOIVYrpJIm0cdOnHsG79u5NaJiuEmDoPPfQQvT3d/GzJnHE/xxIXznNX53DWX7ZMaFKfEMK7Jlo9ajaZyKssH3OdWpdC7kcVOBwOAgICxnVui8VCV3sLPe3NhEclUFO0hbIda+jtaiMmbQ51u7eRnH0KIZoYAoPDyMvLG1dvt5nG6XRSWVlJfn4+NpuN+vp6fv3rX0v1mxBCTIPs7GxcLhdN1cUsuuxOsk69Ao3egNLHl+AwNTt37uS88847pGsYjUbsymjSjjlzyHGNLnnUQTz9/f1UVFRQVFREVVUVl19+OUFBQdx99z2ExRqYe8FtaPUGNPp0QtQ63v/LNeTlTc0UayGmkiTSxiHWcAw7PvwXe/bsIS0tzdvhCCFGUVtby1/+/BhXL0pAFz6+L7+DPi1oJK+qlQ/+df8URSeE8Daj0cCmbavGXKfSJeNyudi9e/e4hxkNTu5srilm0Q9+T13pNnz8/Dnzznco+uINmmuK0ZuOdU/u1Kdjs83sLw9Op5OKigpPwiw/P59deTYKCwro6uoEwD8wmP6+XjQaDXfccYeXIxZCiCNfZmYmPj4+NFUWEJM2m8jE/e9R2ngzOyZh4ECG2cRHG3YMOx4WnUJB4f62B11dXdx7770UFBRSUFhIWdke+np7PY/v2bOHBx54AIPRSF+4kezTrhpyPnVsOnm2DYccrxDTTRJp4xCbPheFQsGGDRskkSbEDHfvvffir3Txk5OSJ/Q8p9PFgx/t5sQTFo97Sp8Q4vBjNBqx1+9loL8PH1+/g65T61IAKCkpGXciLS0tDf+AAJqrS4jPOJ7Tbvgzb/zxInI/epq63dtJtJ6Ij6+/+/x645RN7pwMDz38MHf+4U66u7sACAgKQRObjjo2neyzT0WjN6DVpxOqieXthy6b8UlBIYQ4UgQEBGAwmmisLBz2mDbOzPYdGw/5GiaTiVdfXzVsR5Y6JoXCr1d5fn7hhRd44IEHSLAsRB03j+Pmfh+1LgV1TCrr/v1bCgrcMc7KsrJm8/D3Ca3eQN6a5+np6SEwMPCQ4xZiukgibRwCQlRExhtZv349V155pbfDEQJwT39rampi9+7dlJaWev5ZWlJMeFgYr/3vDcLCwsY+0RFk9+7d/OtfT/GrpWmogg7+BXkkb+2opaC6lZWvPShbuIU4ghmNRpzOAdobq1DHpBx0XVB4BAFBoaNuYfk2X19fjEYTLd9M7oxMyGDB93/DxpfuAWDW6fvvxGv16Xy95S0GBgbw8fH5jq9m6qxZvYZAdSyn3HAHmth0QrWxB/3dqIpNZ5dszRFCiGkzZ3YOn31ZAICjq52mqkKaqgrZt9dG3Z7dh5yYMplMdHe20tZYhXOgjzCtnnX//h3h0UnU19XQ0dFBaGiopx/nST9e4ZmGPUgdk0pB4RbAXUX3+ptvD0vMafTpOJ1OioqKZLCfOKxIIm2cotOPYd16KTsV08vpdFJTUzM8WVZczO7du2nr6PCsjQwNIUkdhtbfn482byE3N5dFixZ5Mfrpd+cf/kBkaABXLkwc9tj7O+sIDfTlBOPwHj69/U4e+biM8849hwULFkxHqEIILzEajQDY68tGTaQpFAo0MaP3ghnJrCwra7/av+3FevLlVOV/QV3pVpKsJ3qOa/UGHI4e9uzZg8FgmOCrmHpWq4Uvd+STaD1hzLUzPSkohBBHmuzsbP776mu8fPvJ2PdVAeDr54fZnMH1f/jDIVd3Db5Xrn7qVhrKcvEPCqW3u4NgdTQAxcXFzJkzB5PJBIC9bs/wRJou5f/Zu++4Ju/tgeOfJISdEPbeCSCgqIAbtVWrrR12WLvnr+vWLtvb27137d5719raaodWq9aBoyq42IS9N2HPJL8/omkpqKhAQL/v18vXvSTPOKGa5DnP+Z7Dzq3L6O7uJioqivaWRlp0ldg5OiOT25C/fyM2dqab/unp6SKRJowoIpHWT96aODZs+oaKigq8vLwsHY5wCunq6qKwsLBnskyrJUerJb+ggPaODsB0UefjpCBQ6Ui40pE5MeEEqpQEOikIUClxtDZVYLV1dRP1zlfk5uaeVom0AwcO8O2yZTx30SjsrP++kGtu7+axVRn8kFSGnVxG9vOze+373e4Simpb+O2554cyZEEQLMDHxwc7O3saKgvNjxkMeprrymmozKehshBdZT4NlQXUVRRQVtY7MX80UVFRrPx5tfmuu0QiYe5/3kYq7ZlgcvYxJc9SU1OHZSItMjISXVUxXR1tyG3sjrqts7eajo528vPzUavVQxShIAjC6euqq64iJTUVTw8PYmJiiImJISIiAmtr6wE5vlqtRiKR4B4YTVX+ATrbTDfvJ164hE2fPWBOpAUFBWEll6OrLMAvcmqPY6i8gunu6qKgoMDcIiFrx0p2r3wVj5AYqvJMvdwUzp6iPYAw4ohEWj95a+IA2LZtG5dccomFoxFGmtbWVvLy8syJstzcXLRaLblaLUUlJej1egDkMhn+zk74O9ozQaXgkkkx5mSZn1KBjdWx7/Tbya3wVCrIyckZ7Jc1rDz80IMEuTmyaIKv+bF9RTru+OYg1U2dTApxJqO8qdd+rR3dvLGxgKuuvJLo6OihDFkQBAuQSqWEhIaSvXMl5do9NFcXUl9ZSHeX6aaFlVxOSEgoMWFhXDrvVm644YbjOn5UVBRtLQ206CpxdPY6dM7e7932Tu7YK1SkpaVx4YUXnvwLG2CRkZEYjUZ0lXm4B0QddVtnH1PyLD09XSTSBEEQhoCPjw9fffnloB3f1tYWP/8A5DZ2RJ95Nal/foVMbkNo7Dz2/LSUrCxT5bWVlRXBwSHoKvJ6HUPlFQJAVlYW8+bNw8bGlpb6cgBzEm1UwqU015WSlpY+aK9FEAaDSKT1k6OzFyoPfxITE0UiTehTfX19j6qy3Nxc0xLMnBzKKivN29lZywlydsJf4cAcNwUBoRPMyTJvhQMyqfSkYwlQOpKbm3vSxxkptm3bxuo1v/P2lWOQy6ToDUbe+TOPV//IZbSvkq9uiuXX/RXkVLX02vfTbUXUt3bx5FNPWSByQRAs4eab/o+lr7xKiKuRiClzCAsLM/8JCAjAyurEvx4dTsjXl2nNibS+SCQSnL01w/Yu/OHqgfrSnGMm0hxUntg6KElLS+P8888fivAEQRCEQTYqIgJtZQGzb36N+vJc3AOjkNs64OQZbE6kAUSEh3MgvwCjwYBe34WV3Abtrl/xUscht7ElOzub+fPnExYeTrfBwIQF97B71WsABI+bTXH6dlLTdlrqZQrCCRGJtOPgERrH5i1bLR2GMEx8+OGHbN68GW1WFnl5edTpdObnVPZ2BKqUBCgcuDjIi4AYjTlZ5mZvN+jN7AOVjmizs4694SnAaDTywP33E+Wn4rwYL0rr27hrWQq78+tZfGYI95wVilwmpUtvRC7rmaTUtXbx/pYibr75FoKDj9wrSRCEU8udd97JnXfeOSjHDg4OxtbWjrrSHPyjEo66rZO3moMpKYMSx8lSKpV4+/hSV37s6mZTUjCU9HRRUSAIgjBU9Ho9qampZGdnk52djVarJTMri+xsLVdffTVvvP7aSR0/IiKc5NTVWFnbcv69X5gfV3gEkZGZ1WO7bbu+I3n1O+z5+U08Q8ZSmbcfmdwGV58Qc9JtdHQUm/dkMf2qJynJ2EFDVSG+o6bQoqti26av6ejowMbG5qRiFoShIhJpx8FbE0fi1z/T2NiIUqm0dDiCBVVWVnLLLbcQ7ubCaA9XZkSGmHuVBTopcLK17IdAoErBn+m9S6xPRWvWrGH7zp18+X/jWX2wkgd/TMPRxorvb41nUqiLebsuvQG5Vc8E5nub8ulGyqOPPjrUYQuCcIqSSqWER0RQd2hy59G4+GrYtfNHurq6kMuPb9LwUBgdHYW2rH9tApy8xOROQRCEofTSSy/x0EMPAWDn6ITKKwSFeyB2HvDNN9+edCItLCwMXdV7GPTdSGV/pw1UXsEc/P13cy/Q8PBwdNUlSKSmbSrz9gMw8aJ7qczdZ066RUZGsvKX1UgkUs679wtz2wNnbzV6vZ7s7GxGjx59UjELwlARibQ+NNWWYmOvxPrQFJHDvMPiMBgM7Nixg3nz5lkoOmE4cHNzQ25lxeWjw7gmZpSlw+kl0ElBnU5HfX09zs7Olg5n0BgMBh564H/EBbvwy74KViSXcd5YL56/OBInu54XpZ3dBqz/UZFW0dDOp9uKue9/D+Dp6TnUoQuCcAobMzqaDTuOXWnm4qOhq7OTnJwcRo0afp8lUVFRJB1c2a9tXXw07P3lNwwGA9IBaFEgCIIgHJ1Op8NR5c7CJ1Zj6/j3933trl/Z8NESdDodKpXqhI8fHh6OvruLpppSnDwDzY+rPINpbWmmoqICb29vwsLCMBqNBMXMorY4k9ykNQCEjJ9LR4uOrKRVwKEeos0NtDZU43Bo+ieYJj+Dqc+mSKQJI4X4pvMvpZm7+OGpBax84fJez6k8g3FwciMxMdECkQnDiUwmIzgoiEJdo6VD6VOAylQxear3Sfvuu+84mJpGaV0Lv6dW8uqiaN65ckyvJBpwaGnn3xVpb2zIw97Rkfvuu28oQxYE4TQQFRVFXVkORqPxqNu5+JqmdQ7nPmn1VUV0d7b3+byusoBt3z1De3M9zj5q2tvbKCgoGNogBUEQTkFdXV188803ZGRkHHGbsLAwWhpqsLKx7/G4ytPUrkSrPXZl9NGEh4cDoKvsucpF5WU6/uElm4e3a6jMZ/pVT+Lg7IVvxGQUrj6oPEOorCijqanp796b/2oZYOPghMLZY9h+FgpCX0RF2j9kJP7A1q8fw6DvxlsT2+t5iUSCZ+h4tmwViTQB1BoNRVnD8w0/0MlUTZmbm0tcXJyFoxkcnZ2dLL79diQSU5LstctGo/ZwoLC2DWsrCXKZFFcHa6RSU/KsS28w90jLr25h2a5SXnjxRZycnCz5MgRBGMaMRiM1NTUUFhZSU1PDWWed1a9qq+joaDrammmuK0fh6nPE7ewUrjg4uZKWljYsBxlFRUVhNBjQVeSh9Ajg9zdvwcU3jKCxs2jRVZH47ZN0d7TirY7FM2QsYKooCAkJsWzggiAII9xbb73Fvffei5WVFS+88AL33ntvr20OV4I1Vhfh4qMxP364ekyr1RIfH9/n8Y1GI2VlZaSmppKWlkZaWhoHU1Jpamril59XERYWhq+vL7a2dhQc2ERzXTm6inwaKvNprMoHTIPWANzd3VEondBV5hMSO5erXtxsXrbp5BlkjmXMmDFY29hQX5aD36gpPeJReavF5E5hRBGJNMBg0PPXipc58McnRExbiPavn/EKHd/ntl6aOPasekU0QxRQazSsSdpt6TD65GRrg8rejpyc/vW2GYk++eQTmpsaMBqhprmTm7/Y32ub/52tYfEs0wVdZ7cBayvTBfDSdbl4eXpw++23D2XIgiAMM0ajkerqagoKCtBqtezdu5f29nYK8vMpyM+loLCI1ra/q7Fef/117rrrrmMeNyrKNOWyrkx7xESavruLxqpCbB1dSB2md+EPLzetK8/BysaeqoKDlGXvJnXT1wDYOjpj0HfhH52A3MYBGztH0tLSOPfccy0Z9nFpaGhg5cqVzJ8/H3d3d0uHIwiCQEVFBY899hhzxs7gYEE6Tz7xJI6Ojlx//fVYW1ubt9NoTMmzhsqCHok0azsFjip3srOz+zx+W1sbsXHxZKSbPnvkNra4+Khx8tKQm7OatWvXEhYWhlQqZdLkyWzetAwrKysCg4KJDg8n4qyFREVFccEFFwCmYpOwsDB0FaYE2+EkGoDKy/Q9PCsri/Hjx6PRhFFXpqWzrYm6shzqy3OoL8tFV1lIlrRpAH+LgjC4TvtEWmd7Mxs+XEJRyhamXvYIXurxZG77AW9N31U8Ppp4Ojs62LNnD9OmTRviaIXhJDQ0lOL6BgxGI9JBnsJ5IgJVylN2aWdLSwtPPfE4Z0V5cN88NV3dRjr1Brr0BvP/X5taxSvrcpimcWVsgJN5aWdaaSO/7C/nww8/xM7OztIvRRAECyktLSV23Fgqq2sAkABGwN5aho/KlnEBTlw8OxB/Fzv8nO1Y8n36UZfY/FNAQAD29g7Ul2nxj5pGY3UxdaXZ1JdpqSvT0lCeQ115HvruLgBCQy4epFd5clQqFZ5ePtSX5RA28XymLnqYLV89Suy5t+MZOo79az/CSm6Lta0jYOpzM5wmd7a3t9Pd3Y2jo2Ofz1dUVDD3rLkcTDlI7PhYNm3ehEKh6HNbQRCEoWI0Gunq6mJPzn50zQ24KJy59dZbUalULFq0yLydp6cnDo4KdBX5ZCT+gEdIDNpdv5K7ZzUKV78jJtL0ej0Z6WmMmX0d0WdehdLNH8mhamtdWWaPJZZrVv9GUVERISEhRx2KEzkqgg07U3s9bmNvSur9c3Ln99//QPqW7wBTEi4wKJgZk8dzyy03H/8vSxAs5LROpDXVlrLmzVtorivlnLs+IiB6Ogc3fI7Myhr3wKg+93H1j8DGzoHExESRSDvNhYaG0tHdTWVzK94KB0uH00uAwoGcI3yAjnRvvvkmtXW1PHTjVAJc7fvcZorahZSSBu749iBr75lsXtr54tocwtShXH/99UMctSAIw0lxcTGV1TU8cm44UzUulOvaueGzfVhbSSmuayOnqoV50R7cPCMIgEAXGwry+zcNWSqVEjFqFHtXv0vSz6/T1dkBgJPKmejoKGbPP4OoqMVERUURFRWFh4fHMY5oOdHRkeQdmtw5avoiitO3kfLnV4SMP4tybRIJVz5u3tbJW83BlN4XUkPpk08+YdmyZWizsikuLcHL04v0jPReDbdzc3OZM3sOjXUN3HPBrXyw7gsuXHAha35f06PiQxAEYah5e3uTmprKxRdfTFNaGpJDN+xdXV17bCeRSNBoNFTmHSB/3x89nvMMGW+elvlvjo6O+PkHILWS4+QR2OM5Jy81Kf94H7ezszP3QDuasLAwflz1W5/POXkEmZN6L730EuHh4Wg0GiIjI4mIiBA3toUR6bQdNvDXT6/w4zMX093ZyoUPfk9A9HQAyrXJeASPQSbve9mmVGaFZ8g4too+aac9tdo0YWa4DhwIdFKQewou7ayvr+elF1/gyol+R0yiAchlUt66cgxVjR08/nMmnXoDLR3dbMqo5pnnnsfK6rS+jyAIp72goCAAgt3tifZVMifKg7nRHtjJZdxzVigA2ZXN5u39nO0ozM/v9/HfeP01rr3qcl5+6UXWr19PWVkZ9XW1bEtM5L333mPx4sWcccYZwzqJBhAdFUVjham6WSKRMPOaZ7G2deCXpddgNOgJijnTvK2zt5qszEwMBoOlwuW+e+9DezCLOP8xLJp2AeUV5b0qCTdu3Eh8XDxtDS08f/XDzIiazAMX3cnGPzfywgsvWChyQRCEv2k0Gp566in0BgMqeyVymRUvPN/7/WlURDgdzXUEjJ5hfsw/ahqufmFotdojDr0ZM3o09aW9b7g7+2hIS08/5rCcfwsPD6e1qZ725vpezyk9g0nPyDTF5u/PE088wZVXXsm4ceNEEk0YsU7bRFr6lu9QeQVz0UMrzGvKjUYjFTnJeB1hWedhXpo4tm3fjl6vH4pQhWEqKCgIiURCYcPwXM8fqFJQVllJa2urpUMZUC+88AKd7W3cOfvYzayD3Rx4asEolu8uZXdePbnVrYwfG8PFFw/PZVSCIAwdT09PbG1sKKlrA8BgMJJS0kh5QzsvrNEyJ9KdG6YF8sq6HO5ZlsLGjBrKKir6ffxp06bxwQcfcNdddzF79my8vb3NVQUjSVRUFHWVBei7TFV1Ng5OzL7pVTrbmvAIHoODytO8rbOPmra2VoqKiiwVLsFBQYT7hnLNGZdywcSzgZ6T67755hvmzZtHva6emsY6mtta6NJ3s/7AFiQSibkvnCAIgqWVlJQgk8l46or/4eXigX+Af69twsLCaKwu5MwbXsRO6QZASOw8VF7BtDQ3UVVV1eexo6OjaKjo3QLGxUdDY4OO8vLy44r1cNVaadYuitO2kfrn12z79ilWv34DhQc2HDEOQRipTtuSjOtf24XRaEAq+/tX0FhdRGtDNd7q3hM7/8lbE8fuVa+TkpLC2LFjBzlSYbiysbEhwNd32FakBTgpAcjLyyM6OtrC0QyMsrIy3nzjDW5O8Mdd0b9hH5fG+7Aps5rVBysBeP7Fl/o1dU8QhFObRCLB38+XVfvKOVjSSE51K+W6dmRSCXqDkfXp1axPr8bb04PAoCAmz5lhbqx8OomMjDw0uTMfV/8IwPQ96KKHV/RIogHmG5NpaWnmir+hFhYRTupfBwCwldvgqnQxJ9Jee+01lixZwqwxCcRrxvLST2/z4FfPEu4bSmZpDt9///2wnJ4qCMLpSafTodfr+d9XT1NSXdbnZ1BYWBjNumpkVtbM/r+l5O/bQPC4ObQ11gKQnZ2Np6dnr/2ioqLQVZfQ1d6C3NbUoqa1oQalmylZl5qaio/PkadO/5tarUZubc0f790BgJVcTnBwCGMjIgg/+8bT8vNTOLWdtok0iVSK5F8FeeXaZJBI8Aodd9R9PYJjkFnJSUxMFIm001yoWk1RaYGlw+hTkMrUMDk3N/eUSaQ99dRT2FnBLYd6FvWHRCLhuYsi2ZJVi7d/EHPmzBm8AAVBGFGuufY63nj9NQ6WlOPm5s4lCxeSkJBAREQEQUFB+Pv7Y2tra+kwLSoyMhIwTe48nEgD8Aga3WtbRxdvbOwcSE9PZ/78+UMW4z9pNBp+XrmK+794irK6ClPFWVcXGzZsYMmSJZw1dia3nX0dEomEy6dfzDdbVqCtzOf3tb9z5plnHvsEgiAIQ+S+++4jKiqKX375hfbWtj6/w/49ubMQv8ip+EVOBUBu64BEIiE7O5uEhIRe+x2eLl2Umsifn97PuLNvYs/Pb5r2tbYhLS2Ns846q9+x2tvbk7RnDyUlJYSFhREUFCTaqAinNPG3+x8qcpJx8dFg4+B01O2srG3xCBrN1q2J3HHHHUMUnTAcqcPC2J6ZduwNLcDN3g57a2tyTpE+aVqtlo8//pgHzg5FaXfkqUF92ZxVQ3NHN19+9dWIXFo13BgMBsrKytBqteh0OnGXURixbl+8mHffeZuxASoqGlv4fc1qZs2axezZs8V7xSHOzs54eHpRX6Y95rYSiQRnb8tO7rzmmmvYv28/Hp4eaDQaUlNSef+993jxxReRSqTsy0+hoaWR2uZ6VievR6NWs3rNGvPFqCAIwnBha2vLhRdeyIUXXnjEbQ6/d+kq83EP+vvGuZXcBid33yNO7hw1ahQSiYTc5N/p7mwzJ9F8wiei72jpMbmzv8aMGcOYMWP6ta3BYKCoqIj09HTS09NJS0sjJTUNa2trVv/2K87Ozsd9fkEYSiKR9g/l2iR8wif2a1svdRxbtv6C0WgUX7ZPY6GhoSzTNQ7LvwcSiYRAZydyc3v3PxiJNm7ciF6vR9faRXN7N462/Xv76uw28Mr6fBZccD6TJk0a5ChPHUajkaqqKrRaLVqtluzsbLTZWrKzssjJzaGtvd287SeffCKWQwkj0l133klLk473bp6Mo40VT/6SyR2338Ybb7zB2rVrCQgIsHSIw0JUVBSFZf27KePkFWrRyZ0ajYZff/vV/POUyZNxtlXxn7OuJ6M4m192r+PRZS9S01hH9Oho1vy+Bjc3N4vFKwiCcDJUKhWubh7oKgt6Pad0DyIrq+9Emr29PYFBwTg4eRIYcybFqVsx6LsZlbCQ4rRtg/o+/tHHH3P3XXfT2toCgI2dAy4+auydfcndsYZdu3Yxb968QTu/IAwE0SjokLamWnQVeXhrjt4f7TDvsDiqqypOmSSFcGLUajVN7R3Ut3dYOpQ++SvsezRZHsmuuOIK7rnnHj7eVsq0F7fzaWIhHd3Hngz37a4SimtbeObZ54YgypHPaDRy1ZVX4aR0wsvLi4SEBG644QY+ff9jcvZl4mvtzuXTLuThhffwzi0v4uXiadHqE0E4UevXr+err7/msXPD8HayRWFrxc3Tg+jSG9FmZRIdFcmnn3563JPLTkXRUZF9NqXui7OPmsyMjGHze4uMikImkzIxbDzTIidiMBpoN3Yy44wZ/LnpT5FEEwRhxAsP09BQ2XuqtNIzmMysrCPuNzo6Gl1FDmdc9zx2ClckUhkB0TNw8VGTMYjv44lbtyKzVTL/7k+46sUtXP/mPhY8uII5N7+GtY2d+F4pjAiiIu2Qipx9gKmBbn94qWORSCQkJiaiVqsHMzRhGAsNDQWgUNeIi93w66MT6KRgwymSSFMqlbz66qvcfffdPPHEEzz5xRd8tK2YJbODuSjWB5m0d0VgS0c3b24s4Jqrrzb3ghCOTq/Xs/z75YwPGcMZ0VPxcfHCy9kDG7l1n9v7OHsecdmAIAxn9vb2WFnJ+GR7MdG+CkZ5K/gjzTRVTCoBL3u48cYbWfHD93z08Sf4+vpaOGLLiYqKov6dd9F3dSCTH33Qi4uPhtbWFoqKiggMDByiCHvasmULb735FpmZmWRnZ2E0wo6MPeRWmC40X3n1Fa6++mqLxCYIgjDQIiLCyfx9M6mbvkFXmU9DZQFNVQU0VJcQcJT34ejoKLbs+Bw7hQvn3/cVBn03to4qnH00NDc1UlJSgr9/70mhJysyMpIfflyFf1RCjxU9EqkUF59QkUgTRoTTtiItbcuyHj+X5yTh4OyFo0v/ppPY2Ctx8w9n69atgxGeMEKEhIQAUNjQZOFI+hbopKCopISuri5LhzJgAgIC+PTTT0lNTWXijLNYsjyVs177i7Wplb3unH2SWEhDWzdPPPmkhaIdeaysrAgKDMJL5c7kiDgCPfyOmEQD8Hb2JCvzyHc7BWG4mjp1Knv2JCF39mP+G38x6dktvPi7Fn9nOy6f6Ed+TSvPXTyKvTsTiYocxZdffjlsqqyGWmRkJAaDHl1V4TG3dfYx3Vy05IXQk088ycZ1G/C38WBOzEy69d28tPJtNmf9xcwZM4+rgbYgCMJwN3PmTOrK89i5/BnaCncyJlDJDVddwnvvvcsf69Yecb/o6Ggaa8vpaG1E5RWMi6+p39rhCcypqYOzvDMyMpL21iZa6it6Paf0Ulu0PYAg9Ndpm0hL/OZJSjJ2mn+u0CbjrYk9rj5XnqFxbNmaOBjhCSOEQqHA092NQl2jpUPpU4BKiV6vp7Dw2Bc/I82oUaNY8eNP7N69G//IOG76fD8XvLOHHTmmcd/1LZ28v6WIW269laCgIMsGO8KEhYdRXl/Zr219XDwpKChAr9cPclSCMPDGjh3LnuS9PPDgQ1Q3dwJQXN/GFzuK6TYYqWzoYP29E5mlUXDttddy4w03WDhiyzg8ubO+9NgVzgoXH6xt7S2aSAsOCcZd5cqtZ1/HzXOvxtPZnVtuuYWa2ho2bd6Ep6enxWITBEEYaFdffTXl5eW0tbWhzc7i119/YenSpdx8881HHaRyeLVG/b96YCpcfbG2tT+hgQP9YT5vee/em87eg7usVBAGymmbSPMbNZl17y1GV5FPd2c71YVpeKv7t6zzMG9NHPl5uVRU9M6mC6ePkFA1RcO0Ii3ISQFwSvfyi4+PZ8OGjWzYsAEr12AWvZ/ElR/t5bFVmRgkMh555BFLhzjihIWFUa6r6te2Pi5edHZ1UlxcPMhRCcLgsLa25plnnmHnX7uJCNNgJZNwSZwPTy2I4P+mB+Fsb81ri6IJ91aye/dflg7XIlxdXXF396Suj4uefzIajbToKnFQeQzaBVh/hIWFUVpTzqq/1vDyyndobjMtNT2spaWF1tZWi8UnCIIw0Ly8vLCyOr6uTeHh4UilUur+NZXZtMRSPWjv40FBQdjY2FL3rwReXZkWFx81zU2NlJWVDcq5BWGgnLY90ubc8gY/PbeQNW/dzORL/odB34WXevxxHcM7zJR4S0xMZOHChYMRpjACaMLCSN1YYOkw+uStcEAuk5GTk8PcuXMtHc6gmjVrFrv2JLFy5UoefvABtu7T8uijj+Lh4WHp0EacsLAwyusq0Rv0yKSyo27r7Wyq7MjJ6d9EP0EYruLi4th/MIXHH3+cl19+GW1VG1PVrqjs5XySWEBWeSNrPvnO0mFaTGRUJMWHLnqMRiMt9RXUlWmpL8uhriyHhvIc6stzaG813ViyZNVXYWEhrR1tfP7ncvNj7e3tnDHzDLKzsiirKCc4KIis7GzkcrnF4hQEQRgM7e3tFBUVUVhYSEFBAaWlpdx00029en3a2toSHBJKXWnvXrdOg7jEUiaTER4RQX1ZDonfPkVRymY8Q8ai3fUro2dfB0BaWtpp3ZtUGP5O20Sajb2Sc+74kB+fu4QNHy1BbuuAi1/4cR3DQeWJs2eASKSd5kJDQ1nz0/CsSJNJpfg7O53SFWn/JJFIuOiiizj//PNJT08nOjra0iGNSBqNBr1eT5WuBm+Xo18Muzu5YSWzOm3+jgmnNhsbG1544QXOPfdcrrrics56dQd+LvYU17UDsOTe+zj77LMtHKVljI6OYtdnX7Hq+YXUleXQ0dYMgK2tHeEREcyeEk1k5KVERUURGRlJcHCwxWLt7u7G3cmVBy6+E18Xb255/z42b95MqFcQU0Pi0Acb+GnnagoKCo667EkQBGGk2L17N3fdeQd5uXlU1dSYH5dKJUglEvLz8/niiy967Tdm9Gh2pWdQVZBCXamW+jLTn+qCg6iUDoMW7+joKP7clU75oYF/jdWmlQ2BY2aSuXUZ6enpA9rPsra2lvr6+h5DAltaWrj8ssvJzc1lze9rLDYgRxiZTttEGoCTZyDz/vM2v756Pb6aSUiPUXnRF4/QWDZvEQMHTmdqtZqa5haaO7twtB5+d7b9He3JOUUmd/aXlZUVY8aMsXQYI9bhC8uy+opjJtJkUinerp4ikSaMaM3NzezcuZPExES2bE1k965dtLe3YWVtS5drNOOnTMRo0JP82zuUlZXh49O/wUSnkhtuuIGU1DQCA/yJjLzMnDALCgpCKh1enUIiIyP5ovULgjwCkEml+Ln6kKcvZExQJFfOvITqhlp+2rkarVYrEmmCIJwSvv76a1IP7OPcMR5893cejc+uH8dPe8soyOv7e9q4cWNZufInfnzmIgD8AwIZHR1N9Nk3sWDBgkGLNyoqih9X/coVz//JiqcuQN/VgZW1Hb7hEwd0WWl9fT3xcfHkHnr9v/76K+eeey4lJSXMP2c+2mwtCjsHzpozh9179uDk5DQg5xVOfcMikfbOO+/w8ssvU1FRQUxMDG+99RYTJkw44vY//PADjz76qPlO4osvvsg555xzQuf2CZ/I5c+sw8H5xJYgeGvi2fLlKhoaGsQ/vNNUaGgoAEW6RiI9XC0cTW+BTgr2nGaJNOHk+Pv7Y2NtQ3ldJYQee3tvJw+xtFMYsZqbm1Grw6isLMde4YxnaCzjzrsLb00cbgGRyKxMN0haG6pJ/u0dEhMTWbRokYWjHnrjxo1j86Y/T/o4ra2t2NjYIJMd/83L/oqMjKSzq5N9eQdp7+ygvaMduZUVZXWmISquSmes5dZoxWejIAiniKCgIAxGuDEhkO92lwKgspfz+fYiMipacHTtu+fYAw88QGRkJAEBAYwaNQpHR8chiTcyMpL2lkas5NbMuPpp1n94N6FxZyOVWaH0DCU1dWASadXV1eTm5XLp1AtYf3ALu3btIjQ0lDPPPJOKigqi/MNwcnRiR8Ye6uvrxfW80G8Wv4W4fPlylixZwuOPP87evXuJiYlh7ty5VFX13eh6x44dXH755dx4443s27ePBQsWsGDBgpMaz6t090dmZX1C+3pr4jAajezYseOEzy+MbIcTaYXDdOBAoEpBXn4+BoPB0qEII4RUKiUkJISyuv4NUvF28SRHKxJpwsgkk8morash9tz/cM0rfzFv8XuMnXsjniEx5iQagL2TOy5eQSQmimndJyopKQlXFxfGjhnNL7/8MmhT2aKjo7GysuKZ719j6ap3aTa0MioykqKaEjalbGfZ1pVYSWUikSYIwikjIiKC1o4u5ryyA8mhxxTObhjcI5h7waW8/c57fe4nl8u5+OKLiY+PH7IkGvw9DbquVIt6wnwueWQlCVc+DoCzj5r0AZrcGRwcjEwm40BBKu0d7ezdu5eJEyZiY5QzIWwc6SVadmXv5ZtvviEoKOikzyecPiyeSHv11Ve56aabuP7664mMjOT999/H3t6eTz/9tM/t33jjDebNm8d///tfRo0axdNPP8348eN5++23hzhyEyfPIByc3MQX69OYq6srTgoFhbpGS4fSpwAnJR2dnWL6zSnAaDRSVlbGli1b+Oijj7j//vu5cMECXnv1tQE/V3h4GOX1/Zzc6exFcYmY2imMPDqdDoPBQFxsHLqKfCTHWKIo2jmcnAcffABvJ2uUXdVccMEFTJ0ymW3btg34eXx8fEhOTmbbtm3U1tZSUVnJHXfcQVltBW/8+iGJObuImxDPddddN+DnFgRBsIRp06bh4eaKnbUMI3DBBReQlpHF5q2JfP7558Nu6FhISAjWNjbUH5oG7R4UjbWdAgAXHw2NDTrKy8tP+jxyuZw777gTJx8XgtUhrFmzhpaWFmQSKVUNtTg6OLJy5UquuOKKkz6XcHqx6NLOzs5OkpOTefDBB82PSaVSZs+ezc6dO/vcZ+fOnSxZsqTHY3PnzmXVqlVHPE9HRwcdHR3mnxsbBy7hIZFI8FTHsmWr+GJ9upJIJISEhFA0TCvSglSmD6WcnBz8/PwsHI3QHwaDgb/++gutVkt2djZarZaszCxycnNobW0FQCqR4uHsjgwJa9eu46677xrQPkVh4eH8ldjzfbits53y+krKaitM/1tXSbmuktLaciQSyRGOJAjDU2JiImfPm4e7uwfzzp7H19/9iNFoPOrfZW9NHJu/+AmdTodKpRq6YE8BGzduZMOGjXx47VjmRXuwNbuWx35O47xz51NTWzfgSz3/3SfzyiuvZPLkyXh4eAxp1YUgCMJQeO6556itq2PT/VO5/asD/Pzzz6hDQli2fDlnnnmmpcPrRSaTERYWTn1Z7xUNzj6mgQDp6ekD0pP01ddeJTs7m1tvvZXGKh3uShdSCjPx8vJi1+5djBo16qTPIZx+LJpIq6mpQa/X9xqR7unpSWZmZp/7VFRU9Ll9RcWRlyA9//zzPPnkkz0eu+3jgSvn99bEsfunl2lvb8fW1nbAjiuMHJrwcAr/Gvi76gPBT6lAIpGQm5vLzJkzLR2O0A+ffPIJN998MwBuTq74OHvipfJg/OTz8XH2xNvFCy+VO3IrOUk5+3nm+9coKSkhICBgwGLQaDRU1lfzzppPKa+vory+ktrGOvPzzipnNBo18TMncYVGw7hx4zjvvPMG7PyCMJgSExOZN28ewe4B1DbV8dOPP9Gsq6axqggnzyNP7fLWxGM0Gtm+fTvz588fwogHR0dHBwUFBeTk5JCbm2v635wcSktKeOudd0hISBiQ8xiNRh564H+MDXQmwMWO9i4DU9QugIQpU6YMar+0fwoJCRmS8wiCIAy1iIgIjEi45pN9FFS3ANDapGPWrFn85z//4cUXXxx2NxHGjI5m856sXo8r3f2xktuQnp7O7NmzT/o8P/zwA5deeqn555KaMtShavbt3zfsfifCyDEshg0MtgcffLBHFVtjYyPPrWsfsON7a+Lp6uwkKSmJadOmDdhxhZEjNDSU7X+ss3QYfbKxkuHjpBRTFUeQ9vZ2ZDIZX9/9DnY2dkfd1tvZCwCtVjugibQzzzyTiPAIqo0NRE0awwKNhrCwMDQaDRqNBlfXnoM1BrLSVxAG0+7du5k3bx5qjyAeuuRumtqauevjhwEo1+45aiJN6RGAo8qdxMTEEZNI6+joIDMzs2eyTKslR6uluKzM3IPGxsoKf2cnAhUOlFXW8NVXXw1YIm3VqlXsTkpmwTgvzn59J1PVLpwb40VeVRM/Pf/CgJxDEAThdHbdddcRHh7ORQsuAEyJtC69gf/OU/POJx9RVlLMyp9/sWyQ/xIZGcnKX1b3qgaXSmU4ewcP2OTO2tpaAC6eNJ8f/1rNAw88wK233oqDg8OAHF84PVk0kebm5oZMJqOysrLH45WVlXh5efW5j5eX13FtD2BjY4ONjc2/Hh24RJqrfwQ2do4kJiaKRNppSq1WU9bQSEe3HhurobmzfjwClA5iquIIotFo0Ov1NLW3HDOR5qlyQyaVotVqmTVr1oDFEBISQnpG+oAdTxCGi127dtHa2sq8cWdiI7dmxY5NtHa04e3jR7k2iYhpl/S5X2tDNXZKNzzVcWwZQX3SrrnqKr5fsQIAha0NASolgQoHzvF2IXBUEIFOCgJVSjwd7ZEeupBZvHoTOdnZA3J+vV7Pf+9dgpOdnN8OVOKusKGj28DrGwq4/LLLiImJGZDzCIIgnO4mT55MXkEh999/P2+//TYhHg64OFjTpddjZf3va2HLi4yMpK25gfy9f9DaUE1deQ66Mi26ilxaGmrp7Jw0IOeRy+VIJBJ+/Gs1UomEH1f8yDvvvMPUKVP5fe3vA3IO4fRj0USatbU1sbGxbNy4kQULFgCm3kAbN25k8eLFfe4zefJkNm7cyN13321+bP369UyePHkIIu6bVCrDM3QcW7du7dHvTTh9hIaGYjQaKWlsItRFZelweglQOpI9QBdFwuDTaDQAlNVV4OHkdtRtrWRWeLp4iv++gtBPt9xyCxs2bOC1Xz9ge+ZutmfsZunSpeTn57Psx9UAGPTdSKQy8x3y/P0b2fDREiKmXIS3Jo6/VrxAW1sbdnZHT3QPB9XV1Uz29+bNs2fiYmfTr36GgSolqwfo5surr75KfkEBBiOM9lXiYCOjsrGDmuYOnnr66QE5hyAIgmCSnp5OYGAgU6dO5a9du3nwR9NN0a1btx2zD+hQGzt2LHJra9a9txgruRy1WsOUmGiiLj+HqKioAVnWCaZEmtFo5P/mXMXH67+mtLiEcSGjWbtuLS0tLaIyTTghFl/auWTJEq699lri4uKYMGECr7/+Oi0tLVx//fUAXHPNNfj6+vL8888DcNdddzFjxgxeeeUV5s+fz3fffUdSUhIffvihJV8GXuo4tm38BL1eP2S9PoThQ602NcUsbBieibRAJwW/H8gadh+gQt8CAwORy+WU1VUyNjj6mNt7O7mLRJog9JO1tTU//PADCxcu5JdffmHp0qXce++9LF++nHfeeYfWhmq2L3+W8uwkvNTjsVO4krr5GzAasbZT4K2Jo7uri927dzNjxgxLv5xjUoeFsSMrHVf7/vdwDXBSUJyUQkdHRx8V/f3X0dHBW2+8jpvChmA3ezLKmmhs70Yuk/J/N91s/uwUBEEQTt7BgweJi4tDbmOLZ8g4xp19K95hcXS0NPDH+3eSl5dHaGiopcM0Cw4OJi01Fb1eT2hoKHK5/Ijbrlu3jgMHDnDrrbeiVCqP6zyRkZEAbE3bAYDeaGBMUCTbM3aTk5MjKqOFE2LxRNqiRYuorq7mscceo6KigrFjx7J27VrzQIGioqIek+imTJnCt99+yyOPPMJDDz2ERqNh1apVREcf+2JzMHmHxbF71WukpKQwduxYi8YiDD1vb29sbWwo1A3PPlEBKiWNzc3U1tbi5nb0CifB8qysrAgKDKK87shDVP7J28WLrCyRSBOE/rK2tubHH3+koaHB3O/vcD+wcm0ScltHWnSVlGb+RVd7C+q4c8jZsxovTSwufuHY2itITEwcEYm00NBQvtM1HteNlCCVEqPRSH5+PhERESd87vfff5+y8go23jeFUA8HNmVUc80ne9Eb4bHHHjvh4x6vE7nJ2dXVRWZmJmVlZcyZM2dApyILgiAMtIqKChITEwGYc8ubBI45w/xcR2sjEomExMTEYZVIg79XYRxNRUUFF124gNa2dl568Xlef+Mtrrrqqn6fY/To0Vx99dU0NDQwzi6e5cuX897vn6NQKHB2dj6Z8IXT2LD4VrB48WIKCwvp6Ohg165dTJw40fzc5s2b+fzzz3tsv3DhQrKysujo6CA1NZVzzjlniCPuzSM4Biu5tfkNTDi9SKVSgoOCKGpoGpLztXZ18fimnbyz+4C5UfTRBDkpAESftBEkLDyMsvrKY28I+Dh7UlBYQHd39yBHJQinDisrqx5DM3x8fAgMCqY8ew8JVzyGT7jpu8ilT/yGf9Q0kEjwCh13qJ3DeLZsHRl90tRqNU3tHdS1dfR7n4BDnxknO6Tm448+wM/ZFmsr09fNaD8l1lYybr75Zry9vU/q2Efz119/ccMNNzB1ylQ83D3w8vTqd9VuQUEB42LG4uDgwJgxY5g3bx7vvffeoMUqCIJwssrKypieMJ3Fixdja2tPec7eHs/b2Ctx8w9nqwU+tw4PvPntt9944403uPPOO3n99df7df1y2NNPP40VBlYtnoijpJOXXnz+uGKwsbHhyy+/5Oeff+a7775jx44dHDhwgIqKigEd1CWcXoZFIu1UYCW3wSNoNFu3ikTa6UoTFkaBbvATaXvLqpj/zc98dSCTtdqCflUYBKhMJdBicufIERYWRoWuql/bert40tXVRWFh4SBHdfp55513CAoKwtbWlokTJ7J7925LhyQMopkzplOZm4zMypq5t72Fjb2Ste/+h8KULbj6hmNjb3ov9dLEsWPHzhGRvD5cfVDU0P+KaU9He2ysrE765ssjjz5Oh8yBGS9t47FVGTy3Oht7Bweee+65kzrusdz/3/v5cfkK5I1GzoyYSk1tDdu3b+/Xvn/++Sf7Dx7g8mkX8uxVDxHiFUhSUtKgxisIgnAyFl6ykNrKas4eP4v29lYqtHt6PG80GvEMjWPLEFynGo1Gnn76ac6cOZMAX1/s7OwYNWoU5513Hg/89z5++OIz7rnnHmpqavp1vLy8PN5//z26uru5/tO9FNW1ccut/zmpGCdPnsyYMWOwt7c/qeMIpzeRSBtAnmrTG9TxZNiFU0eoWk1xc2u/t1+ZkcOvWXm0dHb1a/tOvZ6Xtydz6Q9rsDm0ROWcsOB+7etoLcfN0UEk0kaQsLAwKuoq6dYf/UK9rbMdvUEPgFarHYrQThvLly9nyZIlPP744+zdu5eYmBjmzp1LVVX/EpzCyNHZ2UlXVxcJCQlUF2XQ0dqEraMzZ9/xAS31leQlr8VLE2ve3lsTR2tLMwcOHLBg1P1zOJFWeBw3eqQSCQHOTif9mbFo0SJy8vJ58qln+OlgPSuSynjgwYcGfSlNQGAAfm4+3HPBrVw+/UI8VG79fn8cM2YMANGBEUQFhBPsGciB/fsHMVpBEISTExQURFNbC1vTdxIYGEhVfgrN9RUUpSZSU5TO+zeF0dJQRV5uDhUV/WsbcqIqKyt57LHHaNJmcq6vG8/NmsKyS85m+42Xkvqfq/jkPNOE+f7eqLnm6qsxGIy0dRmob+0iKMCfm266aTBfgiD0i8V7pJ1KvDVx7Pv9A3Jzc0UD3dOQWq2muL4BvcGA7Bi9VNq6unl80180d3ZhayXjzGB/zgkL5owgP+zkvf9ZZlTXcd+6RHLqdNwzeRy+SkeWrN3K/H4m0sC0VEcs7Rw5NBoNeoOBqoYa3JQuVNRXUVZXSVldBeX1lZTVV1JRX0VtYx1gWqZ2uLekMDBeffVVbrrpJvPwm/fff5/Vq1fz6aef8sADD1g4OuFkpKam8uKLL5KXl0taSgoNTc1MmzKJTz77AqPRSEXuXgJHz8DZO5S5t73F9uXPETL+LPP+HkGjsZLbkJiYSGxs7FHOZHkKhQIPN1cKj6MiDSBAYU/OACTnHRwcePDBB7nlllvYs2cPZ5xxxrF3Ogaj0Uh5eTmenp599j7TaDSs+WW1+WdvZ0+02f17LVFRUUilUtYmb2RN0kYOFKTR2NbEZ599Zn4vEISB9M477/Dyyy9TUVFBTEwMb731FhMmTLB0WMII8tnnn2FtbY1Op2PJvUuYPn066969nar8g+ZtOtuaAdi2bRuXXHLJoMXi4eGBrY0NZ2sCuX5cVK/n/9k6YPLkyUc91oEDB9i+Ywcqezm6VlPhwVPPPIu1tfUJxVZfX49EIkGlUvX5fEtLC6mpqUilUuLj40/oHMLpQyTSBpCXery5kaNIpJ1+QkND6dLrKW9qwe/Qh8SR2MmtuG/KeJ7YvIsJvl4U6hpZvHoT9nIrZoUEMF8TxIwgX6ykUj5KTuW1nfsIdlay8rJzifRw5dZfNzLa0838YdQfAQoHcrSiIf1Icbj56oNfPUtja5O50tXR0RFNqJroSTFcFBaGRqMh7ND/uri4WDLkU0pnZyfJyck8+OCD5sekUimzZ89m586dvbbv6Oigo+PvHlSNjcNz8Ihg8sILL/DLj9/j7WRDc0sL58Z48duOvwgKCsLN3YNybRKBo02DBPwip7LoydU99pfJbfAMHsPWrYncfffdFngFxydUraGoofq49gl0UpI4gFWuLi4uzJ0794T37+jo4NZbbiU5OZncvFxaW1u57NJFLFv+Xa9tNRoN9U06Pv7jayp0VeRVFGJf1r8pb7a2trionPkzxbQUVG4lR2mn4M477+S6664Tk6+FAXW48vn9999n4sSJvP7668ydO5esrCw8PDwsHZ4wQlhbW/PZ558BpuEqjgol7kFjeiTSomZcRmtdCYmJiYOaSDtW32gHazkeCsd+3dxft24dALfMCOLHveV02zhz+eWXH3dMO3bs4Pzzzqe2rhYPdw+ytdk4OTn12CY/P5/oqGha20yriw4ePMjo0aOP+1zC6UMk0gaQjb0Sd/8Itm7dKu5anoYOJ08LG5qOmUgDuCpmFOvzismuref3qxZQ19bOGm0Bv2Xn82tWHo7Wcrwc7cmta+Cm2GjumTweGysZzZ1dbC4o5d4p444rvkCVgh2iIm3E8Pf3580336S0tNScKNNoNHh6eooLuSFQU1ODXq/vVeXn6elJZmZmr+2ff/55nnzyyaEKTzhJrq6ueDvbc3OCP/d9n0qMv5LfDlRQU1PD9IQEdqUmH/MYnuo4tiauOK5pmJai1mhI+7PguPYJUCkoSMk6oYmXg2HHjh18/sXnTB01gUsnn4+2LI/NWzb3ue20adMIDQ4hoy6P8Ihwpp17BldffXW/ziORSJDKZEwOj+OqmZfg5ezJvrwUnvn+VQoKCggO7n8luCAci6h8FgZKUVERGRkZ6HQ6Jk+eRFZZEWcvfp/f37kNjEa81OPxCI1l85bBHzig1mgoSN1/xOcDnBz71TrghhtuIDc3l6WffIJer+fXX5ed0OfRjh07qK+v5+qZC/lq8w9kZ2f3qjirr6+nta2VG2dfyScbvmH79u0ikSYcleiRNsA81EPTyFEYfgICApDJZBTq+leJIpVIeHHOVFo6u3ly8y6CnZ24fUIMv1+1gD+uuZAbx0fhbm/HdwvP4YGEeGysTB8cG/KK6NTrOUdzfF/mA52UVNXU0tQ0NJNFhZMjkUi44447eOGFF7jhhhtISEjAy8tr2F+wn64efPBBGhoazH+Ki4stHZJwFJ6entQ0deCuMC0PaW43LRmprKxk+vQEKvMPou86+pRL77A4amuq+z0N0pLUavUxp0p3dHeTWFhq/jnISUlXd/ew+bscEhICwJljprFg4tnEa8ZSUVnZ52daQEAAOXm55OTmsHr1al577TXGjx/f73PFjI1Bb9Tj6+qNTCol2NMfMFUoCMJAOVz5PHv2bPNjR6t8BlNlZmNjY48/grBu3ToCAwM55+x5XHbZZUglEqry9hEwZiYzr32WyQsfwEHlibcmntSUg4P+90at0Ry1b3SgwhFtdtYxj+Pm5sYHH3xAZmYmGzZsYP78+Ufdftu2bVxzzTVMmjiRd95+x/y4RqPBYDRQXl8J9N1TODIyEqlEyud/mqqcH3rwIVpb+9/7Whi5mpub2bFjB++99x633nors/7xnnw0IpE2wLw1ceTn5Q56I0dh+JHL5QT4+R3zYuWffBSOPHHGRFZl5vK7tsD8uNpFxV2TxvHNJWcT79uzImZ1dj7jvNzxVToeV3yBqr97EgiCcHRubm7IZDIqKyt7PF5ZWYmXl1ev7W1sbFAqlT3+CMOXp6cndc3t2FiZvga9sSEfgOzsbBISEuju6qCqIOWox/AKHYdEKiUxcfjfPAsNDaWmuYXmQ8Nt2ru7uf+PbSxPzUbX3kFqVQ3nf/sr1678gz/zTYmzgGH2meHv74+NtQ17tPv5PXkjSVrToIfBiG/s2LEUVpeYf3ZxdEZh7zgihksII8fRKp+PdB3x/PPP4+TkZP7j7+8/FKEKw1xpqekmiOHQvDt7e3vaW5uoLclk1LSFjJ17I2C6TjUYDOzYsWNQ4wkNDTX3jf6nwwPWAlQK8o7jvVutVjNr1qxj3kz+3/338+vKXyjLK2Hp0qXmxydMmIBGrUFbX8jZZ5/dZ5sBa2trrKysGOWn4bKEBdTr6klPT+93jMLwZzQaKSgo4Oeff+app57ioosuJjgkFIVCwdSpU1m8+A5+WrOZBql3v44nEmkDzFsTBzAivlgLA08dFkbBcSTSABZEhDJXHcgjG3dQ3XL0Ox+N7R0kFpYe15CBwwKcTBf2w+WiSBCGM2tra2JjY9m4caP5MYPBwMaNG4/ZHFcY/hQKU5Losg+Sejze1tZGTEwMDo4KyrVJfe1qZm2nwN1/1Ij4vD88ubPoUMV0SmUtK9K1PLhhOxM+XMZF3/1GTp0OuVRKrLepL5OvwhGZVDpshtRIpVImTpzI2r1/8snGbynvqOGKy68gKqp3M+uTNWbMGCrrq6lqqCGtKIs1yRuQSaRs3rx5wM8lCMdDVD8Lffl3MrayshK5tTXl2T0/x5w8g3Bwchv0zy21Wm3uG70xr4jzvvmZT/amMfrdr3l44w4CnBRU19YNeGVcQGAg1jI5tnIbikuK6e7uBsDb25tsbTYFhQWsWbMGV1fXXvtKpVKio6PwVLmzYOI5SCQS9u3bN6DxCUOvo6ODJUuWMC1hOk4qZ4KDg1mwYAEvLn2NPZllKEOnc8b1L3DRwz/gFzmVUTOuYPZNr/Tr2KJH2gBzUHng7BlIYmIiCxcutHQ4whBTq9VsPrj/uPaRSCQ8c+YU5n29koc27uDD8458x2V9XhGdegNna4KOOzYXOxsUtjYikSYI/bRkyRKuvfZa4uLimDBhAq+//jotLS2iB+YpIDAwsMfPrg5yWrqMVFdXI5PJmDplMlnHSKQBeKpjR0Q7h8M9PAsamoj0cCXe15NF0WEsT83mmphInO1sWJ9bhKu9LU62NgDIZVJ8Vcph9Znx+9rfKS0tJSgoCLlcPmjnGTt2LAA3v3MvADKpFIlEak5ICsJAON7KZzBVP9vY2AxFeMII8u9hUxVlZcTFxlGhTWLM7GvNj0skkkOfW4PbJ+3we2VhQxPPbt1Dga6RtOrdAKhsbQhS/X1zf9y44+v5fDRXXnklO7bvwCvQj1vvW4yV1fGlOgICA/l99e/szErCaDSyZ88ebrrppgGLTxh6a9as4bXXXiMw5kxGzboRN/8IXP0icHD+u11OY00J6z+4i6r8gxSlbiVq5hX9OraoSBsEQ9XIURh+1Go1hfUN5gmL/eVqb8tzs6awMa+YFelHvvu/OruAOB9PvBUOxx2bRCIhQKUcNtUFgjDcLVq0iKVLl/LYY48xduxY9u/fz9q1a3vd+RVGnsMXqb4qW2yspPi72uPjbG++oJ0+fTqVuXsxGPS99u1sa6IodSu7fnqV0owdlBQX0dLSMqTxHy9XV1eUjo7mijSAh6dPwE/pSEpVDQsiQjlYWcO5/6p2Nk17HrjJnSfL3t4ejUYzqEk0gOjoaM4++2xclM68/n/PsPy/HxHsFWCubhCEgSAqn4WB4ufnB4D00H14mUzKjBnTqcxNpruznbLs3ST/9i6rX7+BkrREKioqj3K0kxcYGGjuG7184Tk42/6d/L08OoyAQ0PZBvqa5Nxzz6WwqJCtiVtZsmTJce9/7bXX0tHVwQTNeBxs7Y+Y0BYsq66ujr/++ouvvvqKRx99lEWLFhEbF8/atWt7bXv4RuLYuTcSO/82AsecgaOLtzmJ1tpQzYqnFlBbbBokFhp3Tr/jEBVpA6C7q4Oq/IOUa5Oo0CZRmbsXZ5XTiJjkJQys0NBQWjs7qWltw93B/rj2nRMayMWRap7esovJfl69Jn/q2jvYVlTKI9MnnHB8AQoHcofRRZEgDHeLFy9m8eLFlg5DGGCHk6H3zVPz2vo86ls68XT6O5GWkJBAe+sj1JVkYe/kQbl2D+XaJCpzkqkuzsBoMODm7sHMhAQWLnwaB4fjv7kxlCQSCaGhoRT+o/WAo7WcpWclcPmK37n5l41Yy6TMCgnosV+gk4KDp+lnxoUXXsi6tevwdvbESmZFoJsf+/eKZT7CwBKVz8JA8PAwLck/3COtuqaahIQEXnjhBT69czz67i4cFUqmTZ3CrVc8woIFCwY1nn/2jXZ3sOOZWVO4ffUmYjzd8HNSYDQacbKzHVYVzwDnnHMOVlZWKO0VqBycxIAZC2ppaSEnJ4fs7Gyys7PRarVkZmWRna2lvq7WvJ3CxRMnjyB0lSW8//4HzJs3r8dxIiIikFtbU1uciU9Y72toG3sl9ip36stMSd3cpDWUZvavIk0k0k5AR2sTFTnJh75UJ1FVkEJ3V+c/3qAe4vzzzxdJtNPQ4ax3YUPTcSfSAB6dMZGdxeX8949tfHPJPKT/+Dv0R24heoOReSewrPOwQCcFa07TiyJBEITDbG1tUSocqWnuxENpQ3ppI1KJBFWlqcH3hAkTkFtb89ur19LWrAMgMCiYs2dOJyHhHhISEtBoNCPqc14dFkbRru09Hpvg58WN46P4eG8ac0ICUNpY93g+0EnBquTU0/LGYExMDAajgeKaUtTewfi4eLF12190d3cf93IhQTiSRYsWUV1dzWOPPUZFRQVjx44Vlc/CEWVlZbHo0kVUVVYikUpZv2E9kZGR2NjYoHJSoGtowtlJwaOPP8ns2bN56aWXsLW1JSEhgdGjRyOTyYYs1lCNhoIC0zXH2ZogPjjvTEKcnYDhu0rG2tqauNg4ftm1FplMxoT4Ey9eEI6P0Wjk0UcfZdv2HWRnZ1Ne9vcUcTtHJ1SewSg8ggiZEouLbxgqzyCcPAKR25puZG779ikys3q35JDL5UREjKLmUMXZv8nkNlz21O801pRQlLKF9C3fYW2v6HPbfxPfBPqhRVdFuTaJcm0SVTlJVBdnYjQacffwYsb0BKbfca1F3qCE4SckJASAQl0jcT7H/yVIaWPNy2clcOWPa/l8Xzo3jP+7ifLqrHwm+nnhcQIJusMCVUqKk1Pp6OgQPTYEQTjl6PV6iouLqaurM/+ZPXt2r/4xAB7u7tQ0deKpsOagUYJM4cHChZcCpkTbB++/T3JyMgkJCUybNg1fX9+hfjkDSq1Ws/2Pdb0ev3fKeJztbJkZ5NfruUCVgpbWtqP2bDpVRUdHI5FIeOPXD2nuaKW+SYeLswttbW3mYRWCMBBE5bPQX+vWrePAwQMsmraA77f/zLZt24iMjATgoYceYd26tdy++A4uvPBCAP773/8OaXwtLS2kp6cjkUjQaDRsTtlvfm5OaM/epAGO9sNylcwf6/9Aq9USGRmJra2tpcM5bWRkZPDss8/iEz4R73HnEzE3CJVXMCrPIGwdnQHITfqdP96/k4ipF6OO77kE08krmF3blqPX63vlY8aPG8u6rXt7nbOzrYnakixqijOoLc6kvjSTxuoCVjx1Qb9iFom0fzEajTRUFZqWcWQnUZWbTH1lIQDBIaGcc2YC06f/l4SEBEJDQ0+7O7TC0dnZ2eHj6UnRcU7u/KfJ/t5cNzaSl7YnMz3IF7WLitrWdnYUl/P4zEknFV/goXLq/Px8IiIiTupYgiAIw83jjz/Os88+2+Oxc+efy6+//dprW09PT5bv2Uu3wci4ceP4a9fuHs9ff/31p9TyqtDQUMobm+jo1mNj9feXTBsrK26LH9PnPv+c9ny6JdLs7e15/LHH2LtvHzExMcTExDBhwgSRRBMEwWIO3xTam29actjc3Gx+7r/3389/779/yGKpr6/nt99+Iy0tjdTUNFJSUykqLDA/f88991BY33jEiuZAlZKfh2EiTaFQMH78eEuHcdo5PAQqfMpFREy9qMdzHa1NbFv2FNk7VwFgba/stb+TRxBdnZ0UFRURHNyz32tMTAzfLvuOvL1/UFucSW2JKWmmqzJNPbY6VLV21rSxxMRci1qt5oILjp1MO+0TaQaDntqSTMqzD/c3S6ZZV41EIiF69BiuuvR8891ob29vS4crjABqjYaC6rKTOsb902JJLCzl3nWJrLh0Pn/kFmIE5mkCj7nv0QSoTBcAubm5IpEmCMIpJz8/nyDPABafcwMKO0eWb1tFcXFxn9u+/MqrfPbZZ3h6ep4WU7bVajVGo5GSxiZCXVT92uefDaGnTp06iNENT48/8YSlQxAEQTA7//zzmTBhAgX5+ahUKurq6iwWy5133snXX3+NtZ0jMisb9PquHs+np6cftW90gEpBWVIK7e3tovJLwMHBAW8fXxoqC3o9V5Sy2ZREk0jAaKSzrbnXNirPIACys7N7JdKmTJmCvruLde/ejqubOzExY7jozEvNN8kiIiKwtv67tUVjYyP9cdom0pJXv0eFNomqvH20tzYht7YmPj6eS2/7PxISEpgyZQpOTk6WDlMYgUI1GvbmndwdFlsrK5bOTeCS5at5d88BdpdWMtnfGzd7u5M6rpejAzZWVsOuJ4EgCMJAcHFxQSKVoPY2fYlycXQmp6iwz20nT558Sk7G0+v15ObmkpaWRlpaGiUlJTz88MOEhoYCUKDrfyLNTm6Fp1Ix7BpCC4IgnI6USiUpBw/i5+IDVka++fobnnnmGYvEkpaWBoCNgwoXHzXOPhpcDv1Z89bNlJSUAKbPnL4SaUFOSvMqmVGjRg1p7MLwFB4eTnFVQa/H/aMSmLDgbnKT11JbnEnmth+YdtnD5v5oAI6uPsisrMnOzmbu3Lk99p84cSKZmZk4Ojri5eU1YCsKT9tE2p6fX+fMM87klssfICEhgfj4eJENFwaEWq1m5fL+ZbKPJsbLnf9MiOHtXQcwAs+cefIXfFKJBH9nJ3FRJAjCKcnFxYXy2gqe+f41WjpbKa+rRCo/NXuXGgwG8vPzzQmztLQ0DhxMITs7i86ODgDsFSo62lpxdnbm2WefxdbGhqKG4/t8CnRyFDdfBEEQhgmFQklbRxvdej2NumaLDYPx9fUlLVPLlc//2ev8rv4RVFZkAFDU0Ei8b+++0f7/qHgeykTar7/+yiMPP0JhYSF33HEHdvZ2/LHuD9ra2vj8i89FUs+CwsM0pK/d2utxW0cVsefeTuy5t1NVkEJNYRr5+zegqyygoTKfpqpCdJUF6Ls76e7u7vPYGo1mwOM9bRNpRoOBn376EaWy9xpbQTgZoaGh6FrbaGjvwMn25Br6L54Qw6b8YjKq65irPrllnYcFKuzJFRdFgiCcgi6//HK2b9uOnZ0tjgoFk+zsiI+PP+WmTj777HM888wztLe3AWDroMTZOxQ7lS/BcRowGuhoaaShqpC25jxWrPiR559/nuCgIAp1x9fDM0DhSE529mC8DEEQBOE4+fn7sX/ffqzlcto7O9ixYwfe3t7mgWdDZeLEifz222+0NdZg7+Te4zkX3zDSsnfj4+nJ3vJqXOxsKdQ1UdTQSEFDM8VNLRTrGgDTTaGB1N7ejlarpa6ujoSEBKRSaY/nv/rqK0ryi/BRebBs2TJy83IJ8wmlsLqEVatWiUSaBYWFhaH7/Eu6OtporC5CV5lPQ2UhDZX5NFYV0FBVQEtDrXl7D08vwsPDOWPOZDSaa9BoNMyfP3/I4j1tE2mCMFjUajUAhQ1NjDnJRJpcJuXLi+YigZNOyh0W4KRgm7goEgThFBQREYG3tzfffPsNRqMRgM8++wyj0ch//vMfC0c3cNauW4eVvTNh4+fR2dpEY3UxlQWpGLr3AWDr6IyLjwafiInYKV0pKjA1ptaEhVGYsu+4zhWoUvBnuqhiFgRBGA6io6Mpyy8h0k/DlrSdTJs2Dblczv79+80TPIfCnDlzePTRR6krze6dSPMJo7uzg/CICJZt2cKylCxsrK0JDgpCHTmGc9Vq1Go14eHhzJ49e0DjuvKKK/lp5U8AvPTSS70mlwYGBmKUgEwmo6gwHyuZDF9Xb6oaa6iqqhrQWITjEx4eTmdHGx/f/vcAJKWTCo1GQ9zEUYSFXUBYWBgajQaNRmPx4T8ikSYIA+xwH5oiXSNjPN1O+niqAUqgHRbopODrlOw+xwMLgiCMdFu3bmVccDSzx85AYevIB398QXp6uqXDGlAtzU0015VRuP9PnH01eKnHETn9Ulx8w3D20WCvdDVvm77lO8qydlNXV0eoWs2qHduO61yBTgrqdDp0Oh0qlWqAX4kgCIJwPObOncuyZctIKkhBbzCwaNoClm9bxZ49e4Y0kRYXF4dEIqWuTItf5N/DaJrrynHxNS2ju3jhQh574glCQ0Px9fXtVR02GMrLy4gNjaGmqY4vvviCCy+80FzkADB79mzeevMtCmtL6dJ346lyZ1PKNiRIiI2NHfT4hCM7++yz+fjjj5HJZISFhREWFoarq+uwXVEgEmmCMMBUKhWuzs4UNhzf8pmhEqhS0tXdTXFxMUFBQZYORxAEYUC5uLjgLnVmSkS86WdHZ4tONhsMHh4e2CldufaVncf8gunsowGMrFu3jtDQUEp0jXQbDFj184ImQGVqgZGbm3vKXGR0dnYil8uH7ZdzQRCEI5kzZw4uzi5UVlUiQUJSzn5kUumQ9z+WyWQ4KBTUleWQsvFLMrf/iG/4JA6s/5TRs64FICkpidtvv31I4/L18+PX5F/R67sxVJei0Wh44IEHuOiii1Cr1cydO5fmlma++OIL/u///g+VoxPVDTW8+tprXHXVVUMa64kyGo0UFBSQlJREcnIyEomEJ554AhubgS2+GGpSqZQbb7zR0mH0m0ikCcIgCAkJoVB38gMHBkPAoeaeubm5IpEmCMIpx9XNlfQDGXy47iua21soqi7BJsfh2DuOIPHx8axbt47OtmZs7I++tOFwZcDWrVtZsGABXXo95U0t5kbPxxL4j4bQIzWRZjQaeeSRR9j0558U5BdQUVXJPXffwyuvvsKKFSv4448/qKqsoqKinIsvvoT/3v/fYx9UEATBAurq6qisquTy6Rfxx/7N6DobufmWW7jtttuGPBYfby9qizPJ2LocgJoiU/W3Z+hY8vf+wcGDB4c8pmeeeYbS0lJ27tzJefFnsSZ5Ay+88AIvvPACo6NHczDlIFZWVlxxxRXk5uZSUlLC2I7xXHrppUMea38YjUaKiorMSbPde/aQnLwXXb3pBqGjyp1mXTVz585l5syZlg32NDP49ZWCcBpSh4VR1NRs6TD65Kt0RCaViilsgiCcksLDwymrq2Tdvk1sS/8LXUsje/fuHfCGxpY0Z84cAOrLj/0+bmOvxE7hyv79+82tB45VMa1r7+BARTW/ZuXx1YEMi1Q7DKS6ujqee+45Gst0JKgnMjpwFGvWrMFoNHLllVfy8w+rKEkvoLKggtdfe83S4QqCIBxRQEAAEomEZVt/or5Jh52tHS+//DLe3t5DHkvkqFHUl2m58oXN2Ng7IZFIkcrkBI6egYt/OIWFRUMeU3h4OMuXL+fiiy9mzd6NSCVSzhg9jXNiZ1NeXm7ezs7Ojueee44vv/yS75Yvt8jv79+MRiPFxcWsXLmSRx55hLlz5+Lm7kFQUBCXXHIJb3/wKbk1RjTTr+KcOz/i2ld2cuWLW7G2sSMpKcnS4Z92REWaIAwCtVrNn6t/s3QYfbKWyfB1Uo7oiyJBEIQjiY2NRW/Q89Htr6K0V5Ccc4CXVr5NQ0MDDg4OVFRUEBAQYOkwT8qECRMACXWlWrxCxx1zexe/cPLycgkMDEQmk1FQ34jGRUVhQ6N5klqhromiphaKGprQtbaZ93V1dmb8+HFceOGFg/iKjo/BYKC2tpaysjK8vLzw9PQ86vYuLi7Y29kxMWwc50+Yx8+71rJ8xyoAlAolc6Kns3Dq+axJ3shnfy475aa8CoJw6jicAHrwwQeJCggnpTCDjIwM4uLizNvs3buXP//8k4aGBu6++25cXV2PcsQTN2nSJFatWoVUJmPmdc+x7t3b8YuYirWdAlffcMoydgz4OSsqKrj7rruZPmM6t956a5991/z9/VmxYgUlJSVMmjiJ1KJMpBIJCpVlm9MDdHV1IZfLMRqNlJWVmSvN9uxJIikpmZoa08ADR5U7boHRhEy5nNHOXgSMnoGjc9+fdW4BkSQlJw/lyxAQiTRBGBShoaFUNjbR1tWNnXz4/TMLUDqQKyrSBEE4Bbm4uABgI7c+9MfUM2TihInkF+TT3d3NF198wTXXXGPJME+Kra0t9g6O1Jdp+7W9q28YGbl7kcvlBPn789imnTy2aaf5eV8vL9QaDXHTNCwKDUWtVhMaGkpoaOiwGzDwySefcOutt9Ld3Q2Ah7sH5RXlR21iLZFI8PcPoKqhFgB3pSutbW1cc801SCUSGlpMrRhU9kq6urqor683/z0SBEEYbmbNmgWAxieElMKMXtMm558zn/q6eroN3XR1dfHCCy8MShxnnXUWDzzwAHWlWkLGn8UF93+Dk0cQAC4+Grq7OiksLCQwMHDAznnXXXfx88pVLP9+Ocu/W87mLZuPeOPDz8+Pt95+ixUrVtDU2MQlCy8ZsDj6Q6fTsXfvXlOiLCmJ3bv3UFlRwYoVP/DwI49yYL9piraDkxtugVEETlpIXNBo3AOjcFCZkmb7137Ili8fJmLaJZxx3fN9nsc1IIrdu7cP2esSTIbfFb4gnAIOT4cpamgi3M3ZwtH0FqB0JCU729JhCIIgDLjDCZDHlr1IR3cntY31APjaunPmrMms2buRTZs2jehEGoC3lwe1pf17H3fxDaOrs52qqipWrFzJpk2bzImykJAQ7OzsBjnagbNz506cHVXcOOtyyuoq+XLT99TU1ODh4XHU/YJDgtmWuJO0kiwq6ioBWL96HQ3NjezPT+WNXz+ipLYMgJqaGpFIEwRh2AoODsbdzZ2fdq5GqVSi0Wh6PG8tl3P2+DNJK8misrJy0OKIiYlBIpVSX6YlIDoBn7AJ5uecfU3XQmvXruWWW24ZkPNt376d77//nlvmXsOBgnT27ttLR0cHtra2R9znwgsvHJKK6sbGxl5Js/w80+ofa1t73AIicVPPoK5xLb/88gsHD+wn+syrGTfvJhycvXolAztam9j02f/I37ceAM+QsUc8t3vQaFI2fimmaw8xkUgThEFwuA9NUUPjsEykBaoU/JycJpavCIJwypk0aRL3338/Op0OFxcXuru7Wbp0KWeOSWBMUCQ5FQUcPHDA0mGetPDwcP7cuvPYGwLOPn9f0FxzzTWMHTt2ECMbXIf72EwKjyO71HSR8sMPP3DbbbcdtSrtf//7H0tlSwkICMDR0ZGXX36ZxefcSE1jLd8lrmJL2g7i4+N56tbrel2UCoIgDCdubm5ka7MxGo2oVKpe3+X9/P1YtfN3AM60mTNocUilUhQKJXV9VEc7e4WCRML27dsHLJHm5eWFi7ML32z9kea2Ft5+++2jJtEGS1NTE/v27SM5OZmkpCR27d5Dbo7pdyC3scU9IBLX4KmcecatuAdGofIKQSqVAdDWVMf+AwfQhIVjNBpwdOm7N1tRymby961HKrPCoO9m/9qPGDVtIZI+PufcA6MA05LeM888c1Bec21tLQcOHMDBwYGJEycOyjlGGpFIE4RB4OnpiYO93TEbOltKoJOSltY2Kisr8fLysnQ4giAIA8ba2pr777+fL7/8ksLCQvJyc5FKpRRWlTAmKBJfFy+279iNXq9HJpNZOtwTNnHiRNasWUNHSwM2Dk50d7ZjZf33BUVJxk7+WvESUxY9hJt/BACJiYkjvxLP25u6pnpKasqo1FUDsHjxYl579TUW37GYm266CQeH3lNaZ86caZ5otn37dl5++WUAEiInEeoVxJ0fPcyrr77KlClThuy1CIIgnKijVR59t3w5GzZswMPDgxkzZgxqHL4+3tQUZ/Z63MraFoWLDykpKQN2rtDQUDKzMvnf//6H3EpukUmlpaWlRESMorm5Cbm1DW7+kbgGTuKMhP/DPSgaZ68QGqoK+e7ReSRc+QQuPj1vzLgHRpP88x9cfPFF/PlX6hHPExA9nUmX3E9J+nZK0rfTUFVIZ1sTNg5OGI1GmmpLqS5IobowjeoC03TUgoKCk359er0erVbLgQMHOHDgAPv3H2Df/gNUlJcCplYJubm5BAcHn/S5RjqRSBOE49Te3k5mZiZeXl5HTEJJJBJCgkMo1A3PRFrAoWabubm5IpEmCMIp54knnuC9d9/Fx80bd4Ur1jJrVv61mlV7fqe2oQ4XZxfa2tpwdHS0dKgnbPbs2Tz++OPUlWlx8dGw/In5WNspCR47G4O+i/1/fAJGI821ZfiExWPv5M6BU6AST61Wo9frWfzhgwBYSWUsnn8jybkHueeeeygqKuI///kPCoUCpVLZ57JVBwcHZDIZj35j6htkZ2NKQB5raIFg0tzc3KMaIyl5L7fecjN33323pUMTBAFTs/3rr79+SM4VGRnJz7+uMa9y0Xd1UF+RR12ZFqlMTmtb+4Cez93dnU8//XRAj3k8WlpaaG5uYsbVTxMx7RKksp7plNyk3/nzswcAKMnYQfQZV/Z43j0omo6Odvz8/Kgp/hF9dxcyK3mv89g4ODFu3k2Mnft/6CoLKMvYyf51n1BTmEJNURqtTaa2Fd4+vsTHxXHLFU9zxRVXHNdr0el0HDx40Jw027tvP+lpaXR0mP6bKV28cPYNx3vceYw+LwIHZ09WvXg5+/btG1GJtLa2NnJycsjOziY7OxutVktmZha6hgZW/PA9kZGRJ3RckUgThCPo7OwkKyuLtLQ00tLSSE1N42BKCgX5eRgMBtzcPag8SoNjtUZD0f49Qxx1/wQ4mRJpOTk5TJ061cLRCIIgDCyFQoGrkytv/d9zALy9+hM2p27nv/ffz5gxY5g0adKITqKBaTopEgl1ZVrcA6OxtlNSX6alqbYUfVcHbn4R1JZmEzDaVI3g6hdBfn7vqoGR5qyzzmLz5s0sW7aMDz74gMcv/y+jA0fhoXJnR+YeXnvtNV577TXAVJ24ZcsWJk2a1OMYY8eO5cCBAxQUFFBVVUVVVRW+vr7mtgzC31pbW9m/f78pYZaUxO49SWRnZWI0Gg9VY4yis9PIa6+/IRJpgnCK6ejoICsri/T0dNLT00lNS6OysopPPv6IiAhTpfNZZ53Fjz/+yJo3b6ahMo/GmhKMBgMAHp5e3HP3A5Z8CQNOrVbjqFDS3tLQK4lWmvkXf7x/p/nnvpZtugdEIpFIkEqldHd1UF+mxS3AlMgxGo201FeYqswKU6guSKW2OI2WQ4NyPL18iI+LJf7Su4mLiyM2NtZ8A8hoNFJVVcXOnTtJS0s79N8rnZiYGN56840eMfz444/cfc8SSoqLALCyssbFV42zbzixF8zF0dWH7cueZtKihwmNnddjXwcnNw4cOMBFF110kr/JgdXV1UVBQQFardacMMvMMv1vaUmxeTs7BydUXkEo3IMoyN3H6tWrRSJNEE5UV1cXOTk5pKammhJmaWmkpKSSl5tjngqmcPZA5a1BFTyVhKnX0dXewo7vn6egoICQkJA+j6vWaEjeunkIX0n/2cvleCodyc3NtXQogiAIAy4oKIgaXS3NbS3UNtXj5KCkW69nyZIluLm5WTq8AWFjY4ODo4L6Ui1W1racdcsbrHjmQjQTzyPu3NvZ8tWjWNsrsXVUAeDiF0aadrdlgx4gM2bMQCaTseKHFTz6zQuMCx1NR2cnEomE+XFzmBQWS3N7Cy/+9BaZmZm9EmkAUVFRREVFWSD64autrY0DBw6QlJREcnIyu3bvISszA4PBgJWVNW4BEbgGjGPGpKtxD4zG2VuNzEpO9q5f2PjRvdTV1YkhDYIwROrr68nJySEyMrLP5ewnQ6/XM/+88/lj7e8YjUYA5DZ2yOS2dLW38Nxzz/Hll18CsHDhQr7/YQUGg4HRZywwv7dGRUWdko3vpVIp48ePp6Sw95JVJ49AfMInUpGzF4O+i5QNXxAz53oUrr7mbays7XD2DKS2thapVEpG4g/YOKqoKUyltiiN5kMtC9zdPYmPjyP+4sXExsYSGxuLj48PRqORyspK0tLS+P77783XrunpGdTXmRJuMis5Lt4hSKxsSNz6Js89+wwKhcIcw6pVq6hrbGXWjUtx9Y9A5RViroory97Nhg+X0KKrZP/vH/VKpLn4hbN///Cqbv/P4jv46IP3zdftchtbVB6BKDyC8Bgzn5AZXkhlcoLHzcbW0dncU3DlsxeSfRLD90QiTTht6PV6cnNzD1WXmZJmB1NS0Wqz6e7qAsDRyQ2VjxqVbzxT4i/H2UeDi4/GfCFyWHNdOTu+f560tLQjJtJCQ0Mp1TXSpTcglx25AbKlBCgVIpEmCMIpKSQkBIPRwFWv/cf8mJ2tLatWreL//u//LBjZwPLx8qSmxFRl5uKrYeplD7P1q8fwVsdSmrGTiRfdZ97WxUdDd2cHpaWl+Pr6HumQI8a0adMoKS3h+++/58033iB9bwoSiQR/Nx9GB42iua0FoMfFg/C39vZ2Dh482KPSLDMj3dQ70EqOm384rgGjSYi7AvegKFx8NMisrPs8lkfgaACSk5OZM2fwGpsLgvC3M2aewYGDB5BIJFx91dV88eUXA3Zsg8HAhvUbzEk0mZU1Ds7eOHkEUFOUwebNm83bOjs7s2H9HwN27pFgQnwcB7/8rtfjji7eXPDfr+lqb6Esezc1xZnou7vQ7v7NVF1WlEp1YRodbc3I5XImTZ7Cjk1f4+rmbqo0u+BWYmNjiYuLw8fHh8rKSvM162+//UZKSirpGRk06EzLOmVW1rj6hKDwDEEz/WqcvdU4+6hx8ghEKrOiujCNFU8vICMjgwkT/p6oOmbMGL5f8ROaief1GF5QkbOXX16+GiOm/+7+0dN6vUYXvwj27d8w0L/Sk7Lpzz/xCBlP7Hm3o/IMxkHlaX5d5dpkVr14GQDe6vHYKf6+2ePoHkRm1ghMpBUUFPD000/z559/UlFRgY+PD1dddRUPP/ww1tZ9f1CDqVnsli1bejx2yy238P777w92yMIIYTAYyM/P/8eSzFQOpqSSnZ1FZ0cHAPYKFc4+GlTeMUwee4kpYearxk7h2q9zODh7YWuvIC0tjfPOO6/PbdRqNXqDgdKmZoJUygF7fQMlQOmANjvL0mEIgiAMuFmzZvHee+9x22234evqjbezJ2V15Xzy8cenVCJt1KhRrNv493eiyOmXUZK+nT8/+x8YjQSN/Xt61+GGx2vXruXGG28c8liPxGg00t3djVzeu0fMsdja2nLNNddwzTXXUFJSgr+/P7bWNgC0drYBIpEGpuVZKSkpPSrNMtLT6O7uRiqzwt0/HJeAaKZesRCPwNG4+GqQyW36fXwnj0Bs7BxFIk0QhsiWLVs4cPAAs8YkUKmrZseOHQN6fLlcjlwuxz96OglXPNYjMbHp84co3Pv7gJ5vpImLi2Pp0qW0NdWZEzNGo5HG6qJDzf9NfcxqitLYvfJVAPwDApkYH0/ctQuIi4tj6tSpSCQSqqqq8Pf37zV5NSUlhfHjx5s+H61tcPYOxckrlPAzrsfFR42ztxqFqy/71n7Inp/fIP6Cu3D26lncofIKQSKRkJaW1iORFhMTQ2d7K43VxTh5Bpoft3dyR+UdSv2hKazJv71L5PTLzEtUjUYjCldfDhQV0tDQgJOT04D+XnU6HRkZGaSnpyORSLjuuuuOOo37sIjwcPZqq/Eb1XNYUNKvb5P0y1vmn528evZ1c/IMImvXihOO12KJtMzMTAwGAx988AFqtZrU1FRuuukmWlpaWLp06VH3vemmm3jqqafMP9vb2w92uMIwZDAYKCoqMifMDleYZWZk0N5u+gJt66DExUeNk/co4i88HxdfU4WZndKt1xtWf3W0NlFfpsXG0ZmkpKQjbne410qRrmlYJtICnRRszsyzdBiCIAgDTiKRcO2113LbbbdxyZTzOGP0VL7c9D17y9KOul9GRgZr1qyhsrKSqqoqxo0bx1133TVEUR+/SZMm8csvv5i/zEskEmZe8yxV+SnYK91w8vj7C7Kzt+kzKTExcdgk0tra2pgQF09NbS3vvPvOSfVcUSqV2Nna8vovH/Lu759jIzfdlB3oL/rDXWdnJ6mpqeZBALt27yEtLZXuri6kUhlu/mG4+EczedGFeASNxsUvHKt/JM26uzpoqi1D5dX/RtISqRT3wCj27DnydyJBEAbG/v37zROI/zy4DSNGLh6EflWuLiq6O9t69flSuvvT3t5Gd3c3Vlan5+K22NhYAA5u+ByjXn8oaZZKW3MDAL5+/sTHxRF/1bnExcUxfvz4I7aVCAgI6PPxhoYGuru7OefOD/GPno5U2nPKeGtDNb+/fQvFadsAMBr0vY4ht7FD5eFPenp6j8djYmIAqCnJ6JFIU7r7c9lTa2iqLSU3aS0l6dspSk2kriSTutIs6kqyaGtpQKF0ouvQaq4TUV9fT3p6+j96uaWRmppOZUUZYPoOZzQaCQ4O5owzzjjm8cLDw9i8o3df8sKDmzAaDeaf85LXoY4/x/yzyjOI5KoKmpqaTuimm8X+9s+bN4958/5ecxsSEkJWVhbvvffeMRNp9vb2YtLgacxgMHD5FVfy22+/0drSDICNnQMuPhqcvNWMP38uLr4anH00pjsoJ5gw62xvpr4sh7oyLfWlOdSVZVNXpqWlvtK8zR/rdUfc39/fH7mVFYUNjcDwW0YTqFJSW1+PTqc7JXsYCIJwerOzs8PR0ZGG1kYAnOyVVFVXHXWfyy+7nIz0dNxUrhgNRr744gtuueUWbG1thyLk4zZnzhweeugh6su02IVPBEyTvq549o9eFUVyWwccnL1ISend18VSnnrqKbKysxkdOIqLL76Y888/n3feeQc/P7/jPpZSqSRx2zZSUlJobGykqakJNze3HnfhRzKj0UhJSYm5qmzPnj1IZVYs/24ZCoWCJ554gt9WryEl5SBdnZ1IpTJcfdW4BkQzeeH5uAdF4+oXgZX1kf8uN9WWsu69O6gtzuTSJ3/rVd1wNK6B0ezes34gXqogCEfx+uuvY2dty8VTzuXrzStYuHAh77777oCfJyAggJSs/F6PO7kHYDTo2bdvH/Hx8QN+3pEgNDSUwKBg9q5+Dy9vXybExxG7aAlRUVEkJCTg4eFx0uc43AC/q721VxINYPOXj5iTaIB5Ge6/OXmFkpbWM5Hm6emJu7sntcWZhIyfS2tDFTXFmdQWZ1JbkoGuNIva8jyMBgMl6dsICVUzedxYxl59PjExMcTGxh5Xv9nOzk4eeugh9u7bT2pqGtVVFYDpJoyLZxAOrn54jz2H0f6RpqWp7gF8dnc8aWlp/UqkaTQaGqpL6e7q6HFj6Jw7P6Jg/0Zyk9ZQnLaNppoS83Nd7S0cWsGKVqtl/Pjx/X49hw2rNHJDQ0O/mpR+8803fP3113h5eXHeeefx6KOPHrUqraOjg45DS/oAGhsbByRewTI6OztZ8cP3BI2bQ8TUS3DxDcPRxfuEE2ZdHa3Ul+dSV6qlvkxrSpyV5dBUW2raQCJB6eaPi6+GsMkXmqvatH/9wsENn5v6ich6v8HJZDKCAgMp1DWdzMsdNIcnd+bm5prvrAiCIJxK3N3c2Za+i4LKIvKrimltbaW9vf2IiTEba2sSoiZzx/wb2Zt7kKeWv0J1dTX+/v5DHHn/xMTEIJFIqCvLwedQIg044rI8V78ICgpThyq8ozp48CBLly5l4ZTzuXTq+ezMSuK1Xz7goQcf4suvvjyhYx5uyDzSGY1GysrKzP3LkpKS2ZOURG2NqQm1wtkDJ281Jek72LlzJ9HR0Tz11FP4jprCxIv/h3vQaFz9IpDb2PX7nMVp29jw0RLam+uxsrZD6Xp8NwDdA6M5sO4TampqTpmBHoIw2IqKiti/f7+5J1Z/TJ8+nWXfLmPZ1p+wsrJi6dKlg/JvLioqir927cag7zZPp+xobULhbrrRsXnz5tM2kQawauVP7Nq1i5ycHHbvSeLlpa+g7+5m69YtvRJpRqOR4uJiXF1d+z0YwsXFBXcPL+rKc/p8ftS0S+hobaRCa6oE/v3Nm7nqxc09tuloacDaTklK6r5e+48dO4bEzd+SuXUZLY11ADg4KhgzejRnXXAWMTExxMTEEB0d3SvmwwMPDk9zNQ08SKewsJBPPv6I2bNn99h+zZo1vPLKKwSMnkHAhIuJ8VHj4qNB5RlEScZO1rx5E0WpW7ntY+3fr987pFcl3ZGEhYWZltZWFeHiqzE/bm3rgFfoOGwdnfFSx6KrKuSXl6+ksaqApnrTjVU7O/sTrlwfNom0nJwc3nrrrWNWo11xxRUEBgbi4+PDwYMH+d///kdWVhY//fTTEfd5/vnnefLJJwc6ZMFCbG1tCQoOwdHZm8AxM/u9X3dXB7pDCbM6c8JMS2NNCRzK4ivc/HDxURMafw4uPhpcfMNQeYX0+WW0vjwXg76bffv2ERcX1+c5NWFhFGUdfSmRpRxebpqTk3NKXHgIgiD824033sBnn31Ol1JCXPgE/nvG/45aXebp5Ul5pumOpcrB9MWqsrJy2CbS5HI5jgoldaX9a5br6htGWebA9tI5UbfdeiteKg8umjwfiUSClcyKru4uzj7nbEuHNuTKy8v/UWmWxJ49SVRXm6rfHZzccAuMJmjyIiYERuMeFI2DyhOjwcDn98STnJzMWWedhZPKGd/wCYyedc1xndtoMLB3zfvs/vl1/COn0tZUh73S/bh6pEHPgQNz5849rn0F4XR08OBBxo8fj16vZ1R4BOmZGf3a74YbbuDss8/mrbfeIioq6ohLA0/WhAkT+Pjjjyk8uInNXzzMuLNvYecPL6A6VKmanJw8KOcdboxGI/n5+ebl8nuSkklOTqaxQQeAyt0Pl4AooubcxP7fP2DVqlXU1taa2w6t/+MPqqoq6OjsJnbcWJL29k5qHUlUVCRFZdo+nwseN4fgcXNoa6olbfMyFG5+pG1ZRn2pFl1FLrpyLU31ppsv8RMm9tr/8ccf5/U33iAqMtKcNAsKCurRk+xwwuyvv/76R8Ks7wmhSs9Q6hrbWbZsWa9EWnCwqVVA7Pz/4KU2VX7puzr468elHNzwOQC+EZN77KP0DCUltX/X0GFhYYBpqa2V3IaGqgKaqgvRVZdgNJiWdtrbO6DWaJg2NpywsLloNBrCwsIICwvD2dkZgH379rF79+5+9xwc8ETaAw88wIsvvnjUbTIyMoiIiDD/XFpayrx581i4cCE33XTTUfe9+eabzf9/9OjReHt7M2vWLHJzc809qf7twQcfZMmSJeafGxsbh+2XYqF/RkdHszen7zcWfVcHusoCU7KsNNu8PLOxqsi8TtrRxRtnHw3B484yNWz00eDio0Zu2//x0YcbN//xxx9HTKSFqtWsTdp9nK9uaDjZ2qCytxOTOwVBOGU9/MgjPPzII/3e3tPLiz3bd/PD9l+p0JnuVlZWVh5jL8vy9fGmtqR/g2OcfTV0d3VSUFBAUFDQ4AZ2DA4OjpTXVfDd1p84b8I8Plr/NXPnzuWyyy6zaFyDrbKyskfSbPeeJKoqywFwcHLF1T+KgIkXExs4GvfAKBycvfqsuJdIpbgFRLJnTxISiYTY2PHkFx7/jbvNXz5M5rYVRM64jIQrn+D7J86jRVdJd2f7UZeB/pvSIwBbByVJSUkikSYI/dDc3Ixer2dm9BS2pO2kubmZ4uJidu/ejZ+fH7NmzTrivt7e3jz33HODGt+0aaaJjWlbltHeXM/OH14ATP02WxuqyMzMHNTzW4LRaKSoqMhcDbznUEXw4SmZTm4+uPhHEXHG9bgHmd6j/zkFsjR9Gy+/9BLPPvsstnIZdtYy6ls6Abhmij9f7thPc3Mzjo6O/YonOiqS1JVrzT+3NdUeuq7Nob4sB11FDrryXJp1poSZTCYjJFRNXFQUURedSVRUFJGRkYSHh5uP0dnZSV5eHp6envzw/fe9zvnDDz+wYcOGI04IVXqFEjjhIuKDxuDiq0HpHoDMyjQsaOMn/+0z+RUeHo5UKqWuLMecSGtvaeDghs+RWVmj7+6kNHMnnW1NWNuZVky5+KpJ3/Ztv35PHh4ejI+N4+BfKwkJCWVseDhh8yaaE2VhYWF4efX9WXrYpk2bOPPMM01tEfw0R9zunwY8kXbvvfdy3XXXHXWbkJC/ey6UlZVxxhlnMGXKFD788MPjPt/EiaYMa05OzhETaTY2NtjYHN+dNWF4i46OYuOWD83LME1VZtnUlWppqCo0N1y0d/LAxVdD4OiZhyZzanD2VmNjf/JTvFSeQUgkUnbt2nXEbUJDQynWNWAwGpGe4NLTwRSoUpKT03fJsCAIwulmzpw5fP755/x+8E88PDw4d/5888XEcBUZGclvv6/HaDQes8XB4RtAv//+O7fddttQhHdEv63+jZdffpmnn3qa35LWI7OS8f77759wm4bhqKqqiuTk5B5Js4pyU9sIe4UzbgFR+MUuYFxQNO6B0cfdpsItIJo9SesAmBAfT9JHXxx3jFbWpor79C3fUZGzD0dnT4rTtrH168c44/oX+x2PRCLBLSCKpKTTo0pFEE5WdHQ0AInpf2FlZcWbb77Jww8/bH6+vr7eoj2M1Wo1SCQEjp5JS30VdaWmGzZjZl9LU20pRUXFFottMDz73HO8+upr1NXWAKB08cIlIIqwGdccSppFYzTo+fK+qbj6RRAQndDrGK4B0ZRpk3ns/HBW7CkjvdzU3ifQ1Y6LY334ckcx6enp/e7dGRUVRd277/HLy1eiq8ilpcFUBWZlZUWoWsPE6CiiFp5lTphpNBpsbGwwGAwUFxeTnZ1NYmIin376KVlZ2WRmZVFUWIDBYEBubU1GenqP/EltbS2LFi3Cyd0ft6AxhM+8DudDyzCV7v5IpDKydvzEps8eIHDMGZxzZ8/cjbO3mrT1f/b6PnJ4NVn9P6rr7J3cmfufdyjYv4GsHSsB02ovj+AY2hprMRqhrraG6upq3N3dj/p7kkgk7Pprp/l3cyIO95i76JEfcQ+I4r3/O3YybcATae7u7sd8sYeVlpZyxhlnEBsby2effdav8ab/tn//fsCUmRdOHzqdjuaGGpY/Zpq8YadwxdlXg3/kVMbMuc60LNNHg43D4E3rksltULr7k5Fx5FJstVpNe1c3lc2teCv6X+02VPwd7cnV9l3ZJwiCcLq59NJLueSSS07o+4ilTJ48mZ9++om2xlrsnY7eJ8e0JEfC9u3bLZ5Is7a25uGHH2bhwoU8/dTTnH/B+Ravkhso33//PfcsuZeyUtMyYTtHJ9wCovEeey5jFpiSZgpX3z6TVG1NdexY/hzhUy/Gb9TkXs//k3vQaPav+5jq6mpiY2NpfOEFWhtqjvn34J8SrniM2Pn/oSRzByVp2yhJNy1pyd75M/EX3I3C9eh9mwwGPbryPKoKUmhvaSQpubTf5xaEU0FTUxNvvvkmLi4ujBs3jkmTJvVrP6VSyYoVK/jtt99QKBTs3r0ba7k1z1zxAPd/8RRJSUm9lsgNJblcjoODgqa6Mmbd+BI/PnsJto4qvDRxqDyDySvNPmKf6JHom2++ReboxdlXPIt7YDQOqp59zsq1Sfzx/p0AZP/1M/EX3NnrGO5B0Rj03ZTUtaH2dCCzogmDEQpr27h3uWnQT25ubr8Taeeddx4rV/2Mk5OSqMvPMSfM1Go1crmc6upqsrOzyc7O5uuvvyZbqyUzM4u83Fw6OtoB09JLlUcACo8gnDRnkDAtCFtHZ9a9t5jU1NQeiTRnZ2dsbe0YNeMKxs7tOd27s62JLV89Rs7u3wBw8Qvn31x81DQ3NVJaWtpraFB0VBT7c3sWb3iGjMXa1gFXv1FU5u3jrxUvoivPobVJB4C3jx/W1tb9+l2d7ATZw8MG6kq1uAdE9e+cJ3XGk1BaWsrMmTMJDAxk6dKlVFdXm587PJGztLSUWbNm8eWXXzJhwgRyc3P59ttvOeecc3B1deXgwYPcc889TJ8+nTFjxljqpQgW0NTUhLWtA2cvfh9nH02Pstqh5OofQVnWkddRH35zKmpoGpaJtECVglWiIk0QBMFsJCXRAPOFVl2Z9pgJFLmNHY6u3v1u4DsUwsLC+OrrrywdxoD6aeVKGlo6mXPLG3gERaNw8+9XZVdVQQrr3l1Mc10ZNvbKYyfSAk1f9pOTk80tJqoLU4+rfyyAvZMbYRPPJ2zi+RiNRurLc2iuK++VRDMY9DRU5FNVmEp1YSq1hanUFKXT2dEGQKhaw+3/ufu4zi0II92bb77Jo48+ilQqRSKRkJOTQ2BgYL/21el0fP7551hbyens7gLgtV8/AEwVrZbm6eFOY3UxbgGRnHfvF9jYK5FKZSjdTX3Z8vPzTZVrp4AJ8XGs27qXoJgzez2nq8jnl6VXI5GYvh+4BUT2WQXuHmiqMvxmVwkgwXBoKqRUKkXlF87iS6eyYMGCfsfk6+vLurW/93p83759nHHmLPOyS4lEgpObLwqPQJw8xxE35kKcPINorC7CL2Iyzj49/xsZjUZs7BzQ/quYQiqVEh4RQX0fAw5KM/8iZ/dvSGVWGPTdlGuTev0ODp8nPT29dyItOor1f77L5i8foaFcS31ZLm0tDQBY29gQFhbOxLhoIiMXEBkZSVRUFCEhISedIOsvlUpFcEgo1YWphE9e0K99LJZIW79+PTk5OeTk5PT6RR8urevq6iIrK4vW1lbAdPdyw4YNvP7667S0tODv78/FF1/MI8fR/0Q4NUydOpUvv/wK96AxxzWVaqC5+IRRsH8j3d3dff5DDw4ORiKRUNjQyEQ/LwtEeHSBTkpKKw7S1taGnZ3lfo+CIAjCiYmOjkYikVJXmn3UxIu+u5OGykLsHF2wFe/3gyouNpaVK38mJHYuUmn/qjXSt35P4rdPYOdoujHoF3XsJcVK9wDsHJzMfclUzi5UF6YcdyLtnyQSCS4+Gpy9QqmvyKO6MJXqgkNJs+J0OtpaAAgOCSVhQjzxt1xObGws48aNO+HJZ4IwkhUUFKD2CebiSfN54ce3jjrQ5t8OXzu8edNz3Pref5k3dy6hajUTJkxg0aJFgxVyv4WGhpCUmgeAT9jfEzqVHv4Y9N3s37//lEmkxcXF8c23y9B3dyKz6lkF5aDywDtsAuXaPQDkJa+lMnefud8XgL67i+6OVmQyOTHjxnHllVcSFRVFVFQUnp6eA9q2YN++fTTo6pl10yu4+Y1C6RGA1T8GxHS1t7D9++fJ2LocoMc0TDC9z6s8g8jO7j2oaMzoaNZvP9jrcd+ISUxZ9BAF+zZQlr2bCm0S7c315mIWfVcHXR2tSKRS0tPTOeuss3rsf/755/PpZ18gb8jijAlRREVdQmRkJJGRkYSEhAyLysYJE+JJTO7/ZHOLJdKuu+66Y/ZSCwoKMifVAPz9/dmyZcsgRyaMBGPHjgWM6MpzcQ+KtlgcLr4aDPpu9uzZw+TJvS9gbGxs8PfxoVDXZIHoji1AZeoVl5eXR1RU/8pYBUEQhKFzeGqWm5tbnzdsrKysUDo5mXuPGPTdNFQVUleafaghsRZdmZb6ygIM+m4AFp3fe0mKMHBiY2Pp7GhDV56Hi+/R+6x0d3WQ+M2TZG77gcgZl6PyCmbH8ufM1WZHI5FIcAuMJikpGYlEQlxcLDkF/b8IOMxoMNBQXWROmtUUplBTlE5HWzMAgUHBTJkQT9yNlxAXF8f48eNPuHeTTqczVU8cI+lmMBjIycnhwIEDHDhwgP37D7DvUDsXQRhO6urqKKwq4ZMN32Jna4uHx99LAgsKCnjllVfYt3cfdnZ2rFy1skejeY3G9P5w2/v3A/B/N93ExRdfPLQv4CjGjBnDn5sTe1UeOR2qSNuxYweXXHKJpcIbULGxsXR3dZqW9v3r/Vdu68D5935BV0cb5dokilO3YmVjT+b2H003GorSqClOp6uzA4lEwn333cfChQv7fe6ioiIcHR1xcenfCqvDUypd/SJ6fcY015Xzy9KraaotA2DM7Ov6PIbCPYjMrN6JtMjISJb/8FOv/+bWdgpi5lxP1MwrqMzbT2XuPlI2fkFdqZbGylzqKwoxHOpR3ldF5sSJEykvK+nX67OUwzfBDr+OY7FYIk0QTkZkZCQAtaXZFk2kHS5hXb9+fZ+JNAC1RkNRacEQRtV/QSolYBrWIRJpgiAIlmM0GikvLyctLY309HTS09N7TM1acOFFrPzpxz73nTN7Fr+t/YOqvH3UVxSg7zZNCXNxdSM6Opp5F8413xmPiorC1TCP4jcAAPb4SURBVNV1KF/aaedwr5XqwtRjJtJK0raRue0H7J3ccfFR4+jig9zGgW3LnuGsW988ZhWDW0AUu/esBkwDB3a9/8lRtzcajTRWF1FdmEZ1QQo1Rablme0tjQD4BwQyKT6euOsuNCfN+ntx90/19fWkp6eb/z6npqWRmppOZUUZHp5e5OXm4OBw5JYXd9xxJ++++w4Ajip3XPwi8Bh9NmWlHx13LIIwmJYuXUpMTAwFBQVMmTKlx7/Zl19+mY8/+pgxgZFsz9lOcnIyM2bMMD8/efJkPvvsMzo6OggLC2PmzJkWeAVHNmHCBPRdr9DWVIe98u/PjcNLO1NSUiwV2oCLiYlBJpNRXZDSI5FmMOjRVeRRXZBKdUEKtcVpVBelc3DD50gkEkLVGs6cFE/c4mvM1blHm8ppMBjIyMggMTGRrYmJbNmylbLSEjy9vCkpLurXUsbDCdiGygJcfcN6Hl/fTWNNCUajAYCDGz5n/PxbsVP0/Nx38gwia0/v7xSRkZF0tDXTUFVIV0cL9WU51JflUl+mpaEyl/rKQowG07E9vXyIjo4kevp55uWYo0aNOqHPjOEgLi6Ozo42Nnx4T7+2F4k0YURydHTESeXSY/qHJTh5BCGRyo4+uVOjYWfm8OlH80/u9nbYWcvJzc21dCiCIAinnbS0NF5//XVSUlNJT8+gqdHUL8RKboOzVxB2Km88whOQl+ewdu3vGAyGPnu43XvvvdTrdGjU6h4Js39WRghDx8nJiVC1huqCFMKnXHjUbX3CJzDxonspTtvGju9fwKDvAomEvOS1HNzwOTFzrj/q/u5B0exb+yGVlZXExsbSVPccLbpKHFSeGI1GmmpK/q40K0qlpiiNtmbT3zNfP3/i4+KIv+o8c9LMza3/gwrANOXtcOI3LS2N1LR0UlPTqK6qAEAileLiGYTSKxTf2AsIlNuye9VrZGRkmPu69aWiogI3/1HMv+ezHhfw+9eKRJowvAQHB/PYY4/1+VxoaChSiYQb51zBvryD7Nu3r0ciTSKRHHOFliVFREQAUFucTqvClYbqIhqrimioLgI46r/hkcbe3p6IUZEUpSViZW1HdWGq+UZDZ7upzVRIqJoZE+KJveUK4uLiGDduHEql8qjH7erqYu/evabE2dZEtm3fTn1dLVKpDI+gKDwjz8J/ojO7fnqFgwcPmm/EHI2HhweOCiUNlfm9nlO6+3PtKzso2L+R1E1fU1OUDpiSu91dHab/flUFNFQWUFVZQVNTEwqFwrz/4cKKZQ/PMT/m7eNLVFQk0TMvMA88iIyMtOhU2cEQFxfHxMlTaG7qX+WcSKQJI1ZISBA1pb1LUoeSzEqOk0cgWVlZR9wmNDSU5Q2NfTaltDSJREKgyokcMXBAEIRTUFtbG2+88QZKpZLRo0czevToYfXF76mnn+anVb/g5h+Jk180SoOBzvZmWnSVVBdnQbHps0UilWE06Nm1a1ef1c+TJk1iw/r1Qx2+cBQT4uPYkpR2zO2s7RSMP+dWxp9zK13tLZRl76EkfTvF6dtQuBx9YmZ7s46uQ43+/zlwYPt3z9LV1kRNUeo/pp/5mpJml99HbGwssbGxx5Voramp6VktmZpGWlo6NdWVAEj/n73zDmvqbP/4J2FvEnbYkLDCFFBBwb2q3dXuaqu1w+727f717Xjt3sPuape1y2prbd0KbkDZG2XvvVeS3x/RtBQEtCiK53NduTAnz/Oc+wRMzvme+/7eYj0kjh5YOXrjNv4qQmQKpE5yrBw9+3n3HN7wFpmZmYNehIeGhvDnth2j1kxKQGAkCAsLo7O7i7s+1JZuXmg3N7y8vBCLxWx66zbdNjNzC7y9vVm4aBEPPfTQKEY38kyJjWHVqlUcP7IVD08vJo+PJGLZIt2NhuH4QLa1tXHw4EHi4+PZExfPoYMH6ehox8DIGHvPELwmXY9MEYGDVygGxtqsXFVPF0m/vU98fPywhDSRSIRCoaCxqrDfa2q1ip7OVswkDvhGX4WDVxg7P3uY5ppCmmrKdLZZZuYWXHnV1ZiamvaZ7+XlxS+//EJdXZ0uw+xi8b80Nzdnx7at7Nu3jzlz5gw5XhDSBC5YwkJDWbd+02iHgY2LHxVZcad8XS6X09zRSUNnF1KT4RuQnivcLEwFIU1AQGBM8sMPP/DEE0+gp6eHSqX1vJg2dRrbtm87L4xte3t66O3qoDI/CWNzCZZ2rljZu+MWGIOlnRuWdm5Y2bvR29XJ2qdmsmfPnlPaCAicX0RERPDT+l9Qq3oR6w3vdNvA2Az34KkDNgvoamuipjhDW15UlE5dcTqN1SUAWFlLUCgUuLq6MmXqNLKzU4iMCCdy0YM60czRceiGRxqNhpqamj4lmWnp2p91tTUAiPX0kTp5YungjfvEhYTJFEhkcqzt3dH7m2A22DFa27kM2Tk2JCSE9pZG2hoqMZc6DbmugMD5yNSpU1m/fj36+vqEhYXh7Ow82iGdFubm5sTFxVFYWIi3tzfe3t7Y2tqed4kBI8U777zD9ddfj1KpRCKRDDimu7ubvLw8MjMzCQwMxN/fH5VKxdNPP8327TtITj5Kb28vJuZWOHiHEzL/HpwUEVg5ePDdU7NR9fQw/vL7+6ypZ2CEvWcQcXHx3H///QPu95/4+/myNS6JzLjvaaw6TlNVIS3VhTRVl9B7wt7B0MgILy9vwnx88J0fjY+Pj+5hb29/yt/j6XQWvZBpb28nJSWFxMREkpKSOHQ4gdycbNQnSleHQhDSBC5YJk2axBdffEF3ZyuGxqeuRT/bSJ0VHD+yha6uLoyM+p9Eent7A1Dc2HJ+CmlWFuzMG90SWQEBAYGzQVpaGk42Dry77EXK6ir45eBmDh48MNph6TiZnXDTK3uwsDl19pGqtweRSMyRI0fOVWgC/5Lw8HB6ujppqDzWz8NmKLraW6gtzvirY2ZxOg1VRQCYmpkzbtw4Lr9pIREREYSHhyOXy3Ulv7t27jiti9y6ujqefvpp0jO0GWYN9XWANuNe6uiJpaMcz+jrCZfJkcjkWNm79+toV1uSRVnOYdwCY4a1T0tHb9LTB8/WCwkJ0a0tCGkCFyoikYgrrxy8vPt8Z9KkSUyaNGm0wzgn6OvrM3mytmNyV1cXubm5uhsLGZnasvVjBfn09mob90ikNtTV1hAXF8fLL7+Me/BUoq59GiefSKROckQnPpdb6sr47fVb6GprpLJg4O9xB3kEcfH9Tf5PRWxsLGvXriX+m2dwdXPHz9cX39j5+Pj4oFAo8PHxwdXV9by4aXi+kJOTw/bt20lMTORwQiI52VmoVCr0DQyxdfFF6hZMTOSN2Hko+en5K4ZcTxDSBC5YTp5kNZTn4+AVOmpxSGUK1GoVBw8e7ON7cJKTQlphUzOhTnbnOrwhcbe2pCglm56eHgwMDEY7HAEBAYF/TWdnJ7/88gsHDhzA1lyKRqPGw94VfbEe/v7+582J5cSJE/noo4/obG0YVEjT0zfATOIwqI2AwPlFWFgYIpGImsK0QYW07o4WaoszqSnKoLowjfqSDOortL43pqZmhIaFsuD6K3WimY+Pz4A+eSc53UyRL774go8++gh55HzkMTchkcmRyhRY2rmhpz/4OYFGoyFtx1cc+OkV0Gi47Z1EXanSYEhkctIzdg46xs3NDUsra+pKsvEImX5axyQgICAwXDo7O8nJyflLMMvQNkc5fqxAl8luIbHDylGOtXMk0ZHXI5HJaa4uZteaJ6isrCQoKAgA+fgF+Ey8vM/6GrWaX166lrZGbRm8qZU9Xe0tGJla9BnnpIjg6OaPyMvL03XlHIw77riDefPm4eDgMGAix8VAT08P2dnZuu7OFZWVvPzSS7i4uPQbq1KpiImdQn19/QnRTMnkG65FIlNw8OfX8I9ZiEvAJMwlQ2dvn0QQ0gQuWPz9/QER9eV5oyqkSU505Nq+ffuAQpqFhQX2tjYUN7ac69CGRKPRYG1shEqlori4WCf6CQgICFzIvPfeezz66KOIRCI0Gg3XvrocZzsnGlubuOba4bekP9tMmzYNgOaa4j5dwgbCysGT0lJBSLtQsLS0RK7woaYoA79JVwNaf7Ca4kxdpll9STr1FcfRaDQYG5sQGhrKJQsv1Ylmvr6+Z1309fT0BGDyDf/Xr6vbYHS01LFr9eMUpe5GrGeAo/e4YYloAFInOalbv6C9vb2fP89JRCIRwcHBVJRmDzsmAQEBgeGSnp7O5VdcSeHxY7pSPgupA9aOcqzdJjJ54s2I9PTZveYJwhbci3LK9X3mm1pqkyMyMzOZMWMGPr5+VOQl9hPSRGIxrsrJHDuyle6OFtqbqjny+yqiFj4GaP3RqgvTqcxPAuDgwYPDEtJAe8PhYqGurk4nmKWkpHDkaDLZ2Vn0dGvLWK3tnGlpqMHfz4+nnnqq33w9PT2MjIwInHEL0Qsf120/fnQblflJuvdf6uzLwv9uHFZMgpAmcMFiamqKRGpDQ9no+ntZ2bkh1tMnISHhlGO85XKKm2rPYVR/0atWU97SRlFjM0WNLRQ3NVPc1EJRSzvFjU10dPdgZGg46B1uAQEBgQuFzs5O3nj9DaYERuMr8+aXQ39Q01SLh48nFpaW3HPPPaMdog5XV1fEevo0VRcPOdbK3p2q/KTzsnGNwMCMj4xg09bd7Pi8hfqSDOrK8tFoNBgZGRMcEsKcqy7RiWZ+fn7o65/70/KAgABAm91v4js8Ia00az87PvsPalUPs5a/zc4vHsUjdMaw9ymRydFoNGRnZw9qrB0WGkLmT6PvhSsgIDD22LlzJ8cK8om96TlsXPyQOHljZKY11ddm237J/m9eAqA853A/Ic3SzhV9AyMyMjKYMWMGU2Jj+GXz7gH3Ne3Wl5m6+EXqy/M4lvQnUmc/Dv3yJlX5iVQfT6WnuwtTM3NmzJzJ3Llzz+pxn++o1Wpyc3N1gllycgpHk1OorCgDwMDQCBtnH6yd/Zhw9WXYuPph4+KHkaklv752Aympqadce3xkJIez+toKeITOxD14GkWpuxDrGdDeVE13e/OwYhWENIELGm8vTyrLRvcOvVhPHysHj0FLbuQKHzJ3Fp21GLp6eyluaqW4qZnCE2JZUVMrxS1tlDY00XviTouenh7urq7IffyYIZcjl8vx9vZGqVTq7koLCAgIXMh88803VFVX8diCFXg5ujMteDK3vXc//gEBfPLJJ6MdXh9EIhEWFhY01wxHSHND1dtDZWUlTk6CZ9SFwHXXXcfvm//AsrecmZfNIiLiCcLDwwkICBgV0WwgFAoF+vr61JfnI/OdMOR4VW8Pv7+9FLVaxaRrn8JRPg5jcwmFqbsImrkYsXjoDDqJTA5oMzkGE9JCQkJ4//336elqx8Bo4Mw1AQEBgTPh5E0EZ/9orB08+rxWkLCZfetW6p5LnLz6zT/ZeOVk45SYmBg+/fRTOlsbMDb/q1FBe1MtFfmJVOQlUp2fRHVxJhq1GhtbO2ImTyb2juuJiYkhNDT0vPleGE2ee+45nn/+eQAspY5IXHxxCrsU5XwF3R0tKCZcipGp5YBzpc5+HD166JRrR0SEs/nPrWjUap1/nUgkoih1FwC2bv7Muev9Pr+/wRB+WwIXNOPGhfH1uvWjHQa2rv6Upp3a70Mul/Pnhl/+1T6au7q12WSNLX+JZc2tFDe3UdncomtnbGJsjKeHB4rgcUT8TSzz9vbGzc1N8EETEBAY07i4uKCvr8+z615j3rgZGBkY0tndxSOPPDLaoQ2Ig4P9gC3s/4mlnRsajZp9+/ZxzTXXnP3ABP41CxYsoL6utk8GoUajoaioSNclLCAggJtvvnnUYjQwMMBbrqChfHjZ/Xr6BkRefj/5h39n37r/ceCnVzAxl1KefZDkPz9l3CV3DrmGobE5VrYyMjKGbjig0WioK8nGyNSSOqG0WUBAYIT4ezbuP4U0e88gPEJnUJq5j97uThI2vkvwzCUYmligUatpqSujoSIflQbS0tIBrZAGkJ+wGX1DE61wVpCo87x0c/dgzpRYYmLuZ/Lkyfj6+l5U2eW1tbXs3buXvXv3cjQ5mReef57o6Oh+4+rq6rC0kXHV079gYiHVbT/6x8cc/Pl14r99FldlDN6R8/Cf3Neqw8bVn7jd357SNiAiIoLO9haaqouwdvwrgSR60RMcT95B1MLHKMncS01h+rCOSRDSBC5oJk2axCeffEJXe/Mp1elzgUSmID9hM52dnRgb9+/M6e3tTU1LK63dPZgbDixkaTQaats7tQJZYwtFTSfFsjaKm1qob2vXjbW2tMTLywtF9DhmeHvrxDK5XI6jo6NQpnkRI5R9CVxI1NbWkpaWhp6eHrGxsSOy5ty5cykoKOD111/ns08/paOzk2uuuWbYniPnGrm3N7v3ndoaoLe7U5v5bKf1QhGEtAsHjUZDSUmJTjQ7nJBAUtIRXWdMIxNzerraueqqqzAzG56/2GC+YmdKYKCSQxnDt8kYd8mdjLvkTpprSihM2cHx5B20N9dSkrlvWEJaR0sdxha2ZGRkDjpOqVQiFovZ8Mp1upuFAgICw0ej0VBZWXnCRF/76O7u4e2338La2nq0wxtVnJycsLSypqEiH8+wmX1es7RzY+7dq2ioyCfnwAYaK48Rv/YFmirzaCgvoLurA9B2UZ63/CYA3N3dcXZxI/7bZxGJRPgHKLnuynnExMQwefLkAQ3wxzJFRUXEx8cTHx/P7j1x5OZo/S6tbGV0d3Xy5ptvDSikhYSEsOrDD/tlIftGX8XBn18HoCQjnor8JPwmXdPnmsfG1Q+1Wk16ejrjx4/vt/bJDOiq4ymoVD3UFKad6IydQW1JJutXXo1IJMJbrhjWMQpCmsAFzd87dzrKT10ecLaRyhRo1Cri4+OZNWtWv9d1nTsbm5AYG1N0Uixr1PqVFbe0U9TYRFtXt26Oo50dXnI5IRN8uOofYplUKu23D4GxjUajoaqqioqKCioqKigvL//r32XllJeXUVFeQV19PZ9/8TnXX3/90IsKCIwSbW1tTJwwkfSMv+76JSUlDVrmdTq4ubnx7rvv8n//939s2rSJBQsWjMi6Z4OQkBA2b/4DVU8XbY1V1JXl4aacjJ6BEdXHU9nx+X8wlzgy5+4PAEgdxP9DYPTQaDSUlZXpRLOEhAQSk45QV1sDaE2sbdwCkcfciJ17EHbugbQ3VfPj85eTkpLS74KiublZ10Xut99+o6mxgbzcHMoqqnjjjTd46KGHRiz2QKWSLdv3nPY8SztXgmcuIXjmEjpbG9FoVLrXNBoNHc111Jfn0VCRT0N5Po0VeTRWFNDWXA+A3bTIQdc3MTHht99+Iycnh5CQEDw9PfHy6l9iJSBwIdLT08PRo0eJj49nz544CouKWPvtNwQGBp7RetXV1X0Es9S0NDIyMmlqbABA38AIqZMX1cVZzJ9/CQsXnj+Nd0YDkUhEgL8/taU5NFUVUV+RT0N5Hg3l+TRVFlBfkU9PVycAZuYWBAQEMHnaeAIClqBUKgkICMDV1VUn5IhEIvbviyc1NZVJkyYhkQyvPHAsoFarycrKIj4+nrgTf8/lZaUA2DjLcfAOZ8bkZTj5RGBh48zBn18nLm7DgDf/Q0JC0KjV1JflYu8ZrNtuamWn+7eRmTXTb3ul31ypTIFILCYlJWVAIc3GxgY3dw92fv4fQPs7kyt8mD4xgoh7bsHDw4Pq6mrMzc258cYbhzxuQUgTuKA5mRZbX5436kIaaI0rBxLS5HKtH8jl323S3VUVi8W4Ojsjl8uJ8fFhyd/EMi8vL8zNzc/dAQic93z44YesWLFC91wkEmFtboXU3BprU0usza3x8BjH3s7D/P7774KQJnBe09zcTHpGOpdPmMuUwGge/uK/JCcnj5iQdhI7OztuvfXWEV1zpNEKKBqa68o4+NOrFCbvwNDEAidFBMXpcWjUKqzs3TA0McfYXEJe/ug22BHQUl5eTlJSEomJiSQkJJKYmERNTRUA5tZ22Lgp8Yy6jvEegdi5B2Jmbd9vDSMzK/QNjNi+fTs5OTnaC+D0NDLS0ygpq+gz1sHSiIWRzuzVt+K3XzeOqJAWEBBAa2NNP2+f4aDRaGhvqqGhIp/6Mq1o1lRRQENFHu0tjQDoGxigUPgwKTQQ5Q0LCAgIQKlUolAMfdf/kksu4ZJLLgG0nxsCAhcq7e3tHDp0SJehc/DAATo62jEwMsbBK4zaouN89913rFy5ctB11Go1e/fu1QlmaekZpKenU1+nbWqmr2+IxMkTS0dvfKcuRursg0SmwNLOFbFYj68enkhGRsZFL6QBBAcH8cknn5B/WNvUxMLSCmVAALEzJxIQcJtOMHN2dh5WtYebm9tF1UkT4MuvvuKBBx6ksaEesVgPew8lDgGz8Zrigte4OQN+9zkpIjj6x8fk5+f3+x4IDAxELBZTW5LdR0hT9f6VbLLwmY1Y2Mhob66jriSLutJs6kqyaSjLRqNWU1t76gZ/Gzf8ws6dOwkODiYzM5PCwkLa2tpYvHgx1127kK3bdgz72AUhTeCCxtjYGKmNHfXleaMah4WdC2J9AxITEwd83dbWlo0bN1JUVKQTyzw8PDA0NDzHkQpcqOTn5yO1lPDEVfchtbDGytQSfb3+H+HlDVWUl5WPQoQCAsPH0dERqUSKqaEJXg7uyGwcB8y0Ki8vp7e3d0yfmAYFBQHQXF1MyOylFKbsxNhcQnNtCX6TriYr/ge8I+cDJzp3lmYKJdznmMrKyj6iWUJiEtVVWqHLzMoGW7dA3CZcQ7h7IHYegZhZOwzr96Onb4Ctqx/vvfcutbV1uNla4GNvzKU+5pgFyVl3qIyyxg7UGlgx3ZNbJ7tjIBbxzZHh+bcMF6VSCaBtOOAzcJaYRqOhrbGKhvL8E1lmBTRV5NNQkU9HaxMABoaG+Pj4EhseSEDAZTrBzNvbWzDRFtBRVVWlK/kKCQnhtttuG+2QzgoNDQ3s27dPm3EWF0dSUhK9PT2YmFniIA8n5JIVOPlEYueuRE/fkC0f3sPuPUNnhn722WfccccdOrN7K0c5XpNuIMJZgVSmwNLOjYzd37Jv3UrC5i3Hzk3ZZ77ESU76EP6EFwuvvvoq48aNw9vbm4CAAJycnITv1n9QVVVFSkoKaWlpXHrppf1sMjb/vpkejT6XPrQGB69QDIzNqCw4yi8vLWLv2udx9ovCURFO5KX36gz+HeXjEIlExMfH9xPSTE1N8fKWU1eardumVvXSVFXE1FtfpqEsj7ivn6ahLJuWhpoTc8xQBgYy7ZKpBAffx+LFi095PKGhoYSGhvLWW2/x0EMP4e1mSUFxM9HR0VRVVbL4Cheq6rr4M75myPdG+FYTuOCRe3tRUpI99MCziFish8TRm9zc3FOOueyyy85hRAJjDScnJzq7O1HIBi9rkZhbk1tWeG6CukBZuXIlv//+O8nJyRgaGtLY2NhvTHFxMXfddRe7du3C3NycxYsX89JLLwkXgyOESCQiKCiIjOIc9qTvR6NWk5qiFdI2btzI6tWrOXzoEBWVlRgZGpGVnTVmOwu7uLggEotpri0haPrNhMy6jbSdX3HN/22gNGMvevqGeIbOAMDKwYPqwjRqamqwt+9/l1fg31NdXd1PNKusKAPA1EKCrXsgLhFXEOYeiL1HEGYSxz4XXh0t9VTmH8FJET6s/dm4B1KYcIxLghz4eHEoGo2G1XuLWbkpBx9HcxaNl/PGlnxmBWh/3z6O5tTUFVBTU4Odnd0Qqw8PhUKBnp4eDRX5OCkiaGuopL48XyeaNVXm01BRQGebNiPMyMgYH19fpkYqUSqvIiAggICAALy8vM7oM1KlUlFUVERubq7ukZOTQ21dPV9/9eUZl7oJjD4ajYaCggL27t1LXFwce+LiOVagzao1NrOkt6uda6+9dtgegec7v//+O3/88Qe798SRmZGORqPBQmKPgzySqIVP4uAVSvrutSinXo+9R1CfuU6KCA6vf+2Ufssn6e3tRSQWc9u7Sf18pDpbG9n28f0cP7oNAFV3V7/51k4K0tIGvvF/sWFlZcUdd9wx2mGcF/T09JCTk0NKSgopKSkkJyeTnJyqy7QGWLfuexISDveZFxMzmZ/Xr8dRHo6+ofbv1trhr/O1yvwkyrIPoBi/AImT1urIyNQSO1c/4uPjBxTSw8JC2bYnjl2rH6ehLIe6sjx6e7R/y84uroSFhhJ6xR2EhIQQEhKCt7f3afuDW1tbIxKJWPtGCJcsTyA7O5vm5masLQ0oqegY1hrCFYHABU94+DhSvvputMPAxtWP4uStox2GwBjFycmJ9s4OOru7MDY0OuU4qbk1ldmV5zCyC4/u7m4WLlxIVFQUn3/+eb/XVSoV8+fPx9HRkf3791NRUcEtt9yCgYEBL7744ihEPDaZHDOZlStXklqYiYvMmeuuuxaAu++6C02nimi/SNzGu/DWrx9z4MCBMSuk6enpYW0tobm6GIDxVz5IcfqeEx4eItyCpmBoYgGApa0rICIzM1MQ0s4Ce/fu1XVeMzG3wtY9EKewSwm+Qom9RxDmUtmg2QpVx1LY+tG9tNZXcMOL27Gydx9yn3buSjJ2rSW7Srvun+nV/HdjNo5WRry6MJCSem2jof0F9SySOuPrqLV9yMjIYOrUqf/yiLUYGhri5S0nYcPbHPrpVbo6WgEwNjbB18+PGROVBAQs1JU5eXp6oqend1r70Gg0VFRU6ISyvLw8cnJyyc7J4fjxY/T29ABaHyeJgzvmdu6UZeWwfv16QUi7gFCpVKSlpWm9kuLiiYvfq8vetHXxwVExkVkz7sFREU5XWxM/PLuAQ4cOMX369FGO/N9TWFjIpZdeiqWtDCefiUxdciNOiggs7dwQiUR0tTWx5aP7KMvaT0XuYW54cXuf+U4+kfR0d5OQkKD7HBoIpVKp6xx50lrmJPvW/U8nogEwwOeVRCbnQPw6uru7hcqYi5T6+nqdYJaSksKRo8lkZWXS060tn7SydUbi7IvbhKsJc/HDxtWfwuTtHP3tXVQqVZ/P/5iYGFS9PVQdS8HZbwIAxubWBEy5jsw961Cpeohe9ATWjn0TAey8wti9J27A+K679lq2bNmKcftx5k8fT0jI7YSEhBAcHDxi3nNKpRKNRsOEhfEA2NvbIxKJWPVdESqVelhrCEKawAXP5MmTWbVqFZ2tjRibW49aHFKZnLxDv56VjloCAjKZDICG1kacpA6nHCcxt6a5pVn4OxyE5557DoA1a9YM+PrWrVvJzMxk+/btODg4EBoaygsvvMBjjz3Gs88+e1GdeJ7NEsLnn3+eK664Am9vb92JkVqtxt8/gNbSBm6etgiA7/dtJCEhgRtuuOGsxHE+4CyT0VRdBGiFhBm3vcb6lxaiVvUSOneZbpylvRsadS9HjhwZMRFF4C9qarSlHFc8tk5XejIcNBoNmXHr2PvdC6h7ezC2kJ4QPYfG3j0I0FBY00xXr5pgF0suCXJgd04tl7x9ADcbE0wN9Xh6fSbO1sa0dWsN/UdSSAN46803+OGHH3TZZUqlEnd399MWzOrr68nLy+uTXZadk0t+Xh7t7W0AiMRirO1csLDzwMp1AlER12Hl4IGVgwfmUifEYu0+N75yHXl5o2vdcTFQUFDA1q1b2bp1G/PmzWP58ttPe42srCweeuhh9u3fT0tzE3r6Bjh4BuEcdilhXiEk/b6K2qIM5t33GeZSJwDMrOwxMbciPj5+TAhpDg4O6OnrEzRrKUHTb+7zWltjNRtfu4mWOq0Bu0fozH7zbVz8MDa1IC4ubkghDaC+LK+fkKaYeDltjdWUZR8AYMPL13Hnpzl9xljYyOjt7SU3N1cQqS9Cjh8/TkCAks7ODgwMjbBx9sHa2Y9xC2ai0ajwm3TNgL5mtm4BdHZ2kJ+fj6+vr257YGAgllbWVOQl6IQ0gOpjKQDMu+cjPEKm01JXRkVuIhV5CVQVJFFXlo+buwdqtbpfNtlVV13FlVdeeVbLbMePH8+ff/6JSqXCz88PDw8PZs2axe7du6mpqeH5558fcg1BSBO44DnpL1NfnndKb49zgUSmQKNWs3v3bp0xroDASOHkpD3xrB9CSJOeEJMrKip03WIFTo8DBw4QFBSEg8Nf7/OcOXO46667yMjIICwsrN+crq4uurr+KqEYC6bY77//Po89+iguLq54emm75a1cuXLE7gaKxWL09PR49NFHycvNIy83l+bWFq688kp+TzyiE/E87Fw5dPDgiOzzfMXPz5dtcQm653YegUy46hHKsg/gETxNt93KTusVd+jQoXMe48VAeLi2HLOzrXHYJ/C93Z3EffNfcvavRzntRipyE5E6K3ReMEMhkcnRNzCit6eLYzVt+Dma8+zlfqSVNbM9o5rDhY2oNRo6e9Rc97G2HMvczJTQ0NAzOsZTMX/+fObPnz+ssW1tbeTn5/cRzLJzcsjNzaOhvk43zkLqgJW9Bxb2voTMn4OVgyfWDh5Y2rqgZ3DqzOqTWNp7kJWdM+Q4gdOjubmZXbt2sWXLFv74cyuFxwsQ6+ljZmXH9h07uO22W0+7RHfNmjVs276d8AUrcFJEYO8Zgr6hMe3NdWz54G5qi7SeXEZmVro5IrEYB+9w9sTFj+jxjRYmJiZEhEdQkZvYT0jTaNQ0VR3XPU/Z+jnyyEv6mKmLECF18SMuLo6nnnrqlPuxtbXFzs5hQH9ot8AY3AJj6GprIiv+R4zMrcnYvZb68nway3NpqMinrakOfX390xbJBc5vKisrSU1N1WWZRUVHs+Luu/uNa21tpbOzgxlLX0c+fj7iE37Lm9+9naLU3Rz+5S3cg6fhN/kavMbN1s2zcdGKZykpKX2END09PSZPmkRa/l/lwhq1mum3vUZR2i7yD29i/3fP0VSr9W/28fXj6vnTiYn5LzNmzDhlSea58KqbM2dOn+eBgYEEBgbS3NwsCGkCFwc+Pj6IxGIaRllIO3lXaNeuXYKQJjDinBTSGlobBx0nMdeepJaXlwtC2hlSWVnZR0QDdM8rKwcum33ppZd0mW5jhZ9+/AlHS3t8rT2oPl7Dh1u24O3tzcMPPzxi+3jqqaeI3x1HqEcgk7wi+OPoTnp6eqhvbmDlj29RWFNKbVMdE8ZPGHqxC5hx48ax/peNaNRqnQATOmcpoXOW9hlneUJIS08XjKLPBq6urtjY2lFTlK7zpRuKLavuoTh9D8EzlxC96Al+fOFyaooy6O5o0ZXkDoZYTx87N38qCpK546tU6tp6aG7XivImxsYE+PsSPUdbzjJjxgxCQkJwdXU9bT+YkUCtVjN33ny2bf1Tt83E3AprB08s7D1QxE7Eyt4DK0cPrOzdMTTu3328qbqI9uZaLGych9yflYMHWTt2CM01/iUqlYojR46wdetW/vjzTw4dPEhvby8SBzdkATHMnf8fnH3H01BxjPUvXsOhQ4eYNGnSae0jMjISVW8PfpOvwcz6r+/PPV89RWXBEd3z0qwDff5vOSoiOPj7+/T09GBgYPDvD3aUmTIlllWfrO73N2sucWTJW4eoKjhKeW4CWfHfY2nnSmXBUSpyD1ORl0h1wRE62poZ5zu0p7IyUEnxCSGtq72FhvI86k88GsvzaSjPpbVRm2Grp6eHt1zBhKBAAq+di1KpJDQ0dFhdcy8k1Go1W7ZsYdOmTTQ3N/Ppp58O6jV3odLT00N2drZOMDuanExKciq1tdUAGBqbYmRmzY8//cQdy5f3E8X9/PwwMDSkq6NZJ6IBTLj6PxSl7gagKHUXbQ2VfYQ0EwsbLCT2pKSksGjRoj5rxsbGsP3Z50ne8tmJv+Uk2lsa0dPTIyxsHJctvp7JkyczefJkbG1tz9I7c+4RhDSBCx5DQ0Nsbe2pLxvlzp02zugZGJ2yc6eAwL/BysoKE2Nj6ocQ0qTm2myhG66/nqeefpqbbroJc/P+FzNjjccff5xXXnll0DFZWVn4+fmdlf0/8cQTPPTQQ7rnzc3NuLoOr7TrfOX48eNEegVx87RFtHd1cMMbd+pKjEcKV1dXHCT2PHKl9q5pemkOarWaKbFT0NPTY9k18xg/fjyxsbEjut/zjcDAQNSqHtqaqjGXOJ5ynImlDfqGxhQWFp674C4iRCIRkRERZBWlDXuOrXsA5XkJpG5fQ+7BjVjaulJXlc3OLx5jzt0fDEsAsnEPoqe5nOjZs1AqlSiVSgIDA/Hw8BgVwexUiEQi9sbHIY+cT9DMxVg7eGBsPrwMVY1GQ/ben9i3biWGJubc/GrckFl7Vg4eNDc1UltbO2KNFS4mfv/9d7766mu2bd9OQ30dRibmyPyiiL7u/+hoqSdh4zuEXx6hE7bsPAIxtbBm69atpy2knSxFrMhNRD7+r8xG36gr6elspzI/CVVvNzs//w+L3zxATWEaFXmJFKXspKOjXdfV/lzR0dFBVlYWNTU1zJ49e8SE2tjYWF555RWaq4uxcujrkahvaKx9GJlg7xHIN4/F0tPViampGROjJnLb1Q8TGxvLhAlD3zgKClSy/9PP+faxWJrrtB50YrEYTy9vwgMDCbxqmu5zxMfH56KwpHjmmWdYuXKl7rmLiwsvvfTSKEb076mrq+vnZZadnaXzMrO2c8ba2Q/3iQsZ5+qHrYs/lnaulOce5tfXbyY3N5eAgIA+axoYGODn50/dPxr1ncx4B+1n77Tb+p9TS5z9SE5O6bd9zpw5PPvscxz97V0mRkWx+KH7iYmJYeLEiWOmkchACEKawJhAIffmeOnZ7dyp6umisuAozn4TB3xdJBYjcZKTn59/VuMQuDgRiUQ4ODgOmZFmZmyKvp4+ej1iVty9gscefYxbb7uVFStWjLm7j3/n4YcfZsmSJYOO8fIavOPpSRwdHTl8uG9XoqqqKt1rA2FkZISR0dClShcKvb29lJWXMV+p9a2pbqoFwMPDY0T3I5fL+ab+a93de0crO4qLijlw8MCI7ud852T2aHNN8aBCmkgkwtLOjfqyXGpra8fUnd3zhYiIcOLf+2jYWVATrnyIiEvvoepYMiUZeynN3AciESWZ++horsPU6tS/I41GQ2u9ttylrraGVatWYWlpOWLHMtKIRCLkCgUqE3McvfuXuJ+KjpY69nz1fzoTdBflpGGVvp7s/JaXlycIaadJb28vCxcuwsjSDu/oa3FVxmDvGYJYT5+Eje+QtOkDAAwMTXRzxGI9nHyj2fzHn6edYe3g4ICXt5zyvIQ+QppX+Byc/aMpzz3MsaQ/aawsYPV94fT2dGFmbsHkSdE8ef+7Z01E6+zsJCcnh4yMDDIyMkhLSyctPZ2iwuNoNBoAPv/88wE7B54J0dHRiEQiKvISMDKzoiI/iYrcBKryE6kuykCt6kUitSFm8mRil/6P2NhYQkNDTzsb795776W+vh43NzedYObr6zsmM7CG4vvvv2fF3XfT2NQEwMqbnuDV9R8QH3/hlgy3tLQwMSqazIx0gD5eZmHzZ1BbnEHQ9Ftw8okc8HvKxkV70zg1NbWfkAYwLiyULfFH+2zrbGvUznX14/L/rKW3u4Pi9DjqSrKoLcmmsTyH+vJjuEn6i+yhoaGUlpZgaWk5JjJLh4sgpAmMCcaPjyTpky/O2voNlcfY/vED1JZkMf+BL3ALHNgE1MbFl8Ijf5y1OAQubmQyJxpaGgcdIxKJkJpbE+4dzNMLH+TPIztY/dkXvPPOO8ydO5d7772XuXPnnldZDiOBnZ3diF1oRUVFsXLlSqqrq3WdEbdt24alpeWAJyRjkfLyclQqFYfzjtLW1U7LiS5+g3XO1Gg0lJeXn2iZnoy7uzs33XTToPuRy+W0d3bw8s/vUtvaQFltOR5jtDvnYJx8X5uri5H5jB90rJW9O/XleWRmZo75TL3RICIigramWtoaKnWm6EOhp2+IzGc8Mp/xTLjyITpbG2lrqOwjomk0GtoaKqkuTKOmKJ3aonTqijNoa64HICRs3AUhxvv7+bI/tXDY44tSd7NrzRNo1ComXfcU+9atxDt87rDmnux6mpubS3R09JmEe1EgEvX/PtfX18fH15d2I1fGX/GgbnvugY06EQ0gfde3uIdM112MuyonE/f109TX1yOVSk8rjimxMWzavp/2phoq8hK1AlJBEjUl2WjUauzsHIiNjSH2gWXExMQQHBw8Yj5dvb29ZGdn6wSz9PQMUtPSOH6sALVa24HPUuqItZMca/kUpsQuQyKTs2fNYxw9enSI1YePtbU1ysAg9n2/kl1rngDASebMtKlTiP3PncTExODv7/+vz8EUCgXffPPNSIR8wfPiiy/S0dbBJL9I4jIOolKr8XJ0JyfnwvVXrK2tJTMjndC5t+MXfRVWDh66Msz4b5/jWNIWjiVtwdTKHv+Ya/r8Hwdt10xLGydSUlK47rrr+q0fEhLC2nXfo1ar0KhVNFYco7Y0m6AZi2koz+P7/5tFW5PW79LM3ILgoCBmXzaLkJCQU9oX2djYjPC7cP4jCGkCY4KoqCjefvttOlrqMLEYuf/IGo2GnH0/E7/2eXq7OxCJ9bD3CDrleKmzgtwDG2hpacHCYmhvFAGB00Hm7ExeUtaAr6nUKpramqlvbcRQ34CG1kbsrGy4edoiro25gvjMQ/xxZAfz58/Hy9OLe+69h1tvvRVra+tzexDnAcXFxdTX11NcXIxKpSI5ORnQijrm5ubMnj2bgIAAbr75Zl599VUqKyt5+umnWbFixQVxoTsSuLi4cPfdd7M3fi8bk7bQ1NyEwluuExb/SXFxMRMnTKSiUltiYqhvQHdvD3Pnzh00a2ry5MlMm6o105/hE4FcLr8oPSZNTEywspbSVFM85FhLezfEYn1BSDtLnGw4UFOUPmwh7Z8YmVnR29PJ8aPbdaJZbXG67sLEzt6R8ZERRF5zH+Hh4YSHh+t8MEeKpqYm9u/fT2FhIUuWLMHExGToScPAx8eHP7bvGdbYhspjbH73dvQNTZi7YhXGFhJEIjHNNSXDmq9vaIyVrTO5ubn/JuSLgo6Ojn7ZjPPmzuG9Dz/r471o7xmMq3IyNUUZdLY2UJIRT1tjFY0VBZRk7KUkfQ9qtZri4uLTFtJiY2NZvXo1Xz6sFT09PL24ZFosMTEPExMTg1wuP2ted/fdfz8frloFgIXEDmsnBdbuUcRE3YJEpqCjuZZDv7zJtNte6yNwW8t8SRthz8kP3n+PtWvXMmnSJGJiYnB3dz/vPP6SkpJQqVTY29uPeKb5ucbBwYGCnHyWzbqJ+MxDrN7xHbXNdYj0Ltwbxh4eHphbWGJkaoVE1jdbc8JVD5GfuJnOlnram6rJO/RbPyENQOLsO2AZJmiFtJ6uTn56bgGNVUWoensAcHP3IDw0lNBFWtEsJCTkvLMYOJ8QhDSBMYGuc2dZPs5+AwtpLXVlHP3zU9yDp+HsNxH9ITpGdXe0sOfrZ8g/vAm/ydfQXF0MIhHGJ7oiDoREpkCjUbNz504uv/zyMz4eAYGBkMlk7Nq6k7V71tPQ1khDayMN7c3any2NujIFgJiAv0qQDfUNmREcw/SgyeSUFbA5aTuP/udRnn7qKeLi43UXjhcLzzzzDF9++aXu+ckunLt27WLq1Kno6emxadMm7rrrLqKiojAzM2Px4sXD6uAzVhCLxXzwwV9ZC42NjRgYGJzyZCo3N5eKygpun30TkYowenp7WPHx46SkpDBjxqlN2+3s7Ni5a+eIx38h4u7uRvMwhDQrOzfUql4yMoSGA2cDZ2dn7OwcqC5MwzNs1rDmtDVWU1OYRk1RBjVFadQVZ+iMvm1t7YmMjCDyyhWEh4cTEREx4l6DoG2EEh8fT3x8PLv3xJGRnqbLxGlvbx+xJiEKhYKW+ip6OtswMB7c+8Zc4oRP1BUUJu9g01u3Yu3ohYmlLYm/vYdbYCx2HoFD7s/S3p0cQUgbFI1Gzf79+7nyyiv7bJ8zZw4vv/wytaVZ2LkpAbB29GT+A19QX55HQcJmSrP2s+7pWfR0dWJv78j8ubNZsGDlGXWFve6662hubsbBwYGYmJiz8nd+Kmpra7F19efSh7/s49un6u0h4dd3Obr5I0Db7OLvQppUpiD9wPcjGktsbOx5fZNjz549TJ06Vfd8/fr1/f52LiTc3NzY1rWNrcm7ESGiprWeKdOncv/99wNac/5ff/2V9evX09LSwtq1a89772CRSERwcBDVpf1vnusZGNPZos1kdvAOY9byt3WvadRqmqqLqCvNpqu9haPJA392RkVFcdddd6FSqQgODiYkJITg4OARsxbIzs5GJpOd11YFI4EgpAmMCeRyOWKxHvXluTj7DWzSmXvwVzJ2fUvGrm8xMDLDNXAyHiEzcAuagolF37tuVcdS2P7pg3S01DNz+Vt4BE9j9QPjmXD1I4PGYePsA2gvyAUhTWCkmTNnDp9/9hl7Cw7j6OSEs58r42Qy7O3teevNtwiQKbg25gok5lZIBhB8RSIRfi5y/FzkTAuaxHPrXqe4uPiiE9LWrFnDmjVrBh3j7u7O5s2bz01AFwBDZS6GhIRox5lZYW9li0qtxsTImOTk5EGFNIG/CFQGsDU+echx2s6dGubOHV55nMDpIRKJiIyMIL1oYKGyvamWmiJteWZNYTp1xem0NGi7pUltbImMCCfysjt0opmzs/OIZ6NoNBoKCgqIj48nLi6OPXHxHD9WAIDEwQ1773Bib74WJ0UE8d8+Q1xc/IgJaT4+2vOcpuoibN0GL3U3MDJhxtLXUPV0UZp1gGNJf3L86HatEBy3jqke/xtyf5b2HuTkDL/5w8WIiaUNO3fu7CeGREdHY2pqRmnGXiykMkqzDlCSEU951j6a6yowNDJi8uTJzLvzBWbPnk1QUNC/+ls1Njbmvvvu+7eHc0YEBQby+x/bMDKz7rN9z1dPkbP/F91zSzuXPq9LnRUk1lZTU1Nz0fjwFRRoPyveuO05Hv3yeUpLS0c5on/Hfffdx7rv1rH+8GZcXV35ffPveHp6smPHDl5//XW+XLOa9IxMAEQieOSRR/joo49GOeqhCQsN5fsNW/ptV6t6MJfKkMjkjJt3B0Wpu6gryaKhLIe60hy6uzoAbebz8tuXDbi2iYkJq05kcI4kBQUFPProo6xfvx5vL2+ysrPGtGeaIKQJjAkMDAywd3AYtHNnyOzbKEjcTHN1CUEzb6E0az87v3gUkUiMo3wcHqEzcA+ZTmHyDg7/8ia2bgFc+tAaLO3cOHZkK6rebjxCpg8ah5nEEX0jU44cOTLoOAGBM+GSSy6hpbW134nu119/TVt7G0tmXI+zzamNyv/OvqzDOMucufTSS89GqAIXGXZ2dsgcnTheVcwk//G0d7VjZ2mjK5sVGJqAgAB++uU33fOutiaaaoppri7W/qwppqWmmObqQoARLwUU+IuIiHD27H2f9ua6E4JZGrVFGdQVp9NcXwmAtURKREQ411+ylIiICCIiInB1dT0rJVwqlYq0tLQTwlk8cfF7qa6qQCQSYevqi4N3FLNm3ouTTwRm1g595jrII4jf+x1qtXpEynNOCmmNVYVDCmkn0TMwwj14Ku7BU4m9uYfynIPYnsiQ+ie93Z00VRfRWHWcpqpCaorSaaks6JNxLdAX96CpbNu+o992Q0NDpk6bxrZNqzi0/g00Gg2+fv4sveU6Zs+eTWxsLKampuckxvr6ep1/WXV1NQ888MCIZqsolUraWxr6NfiwcfXH0GQ73R0tAGx45XpufOmvLGipTNuEKSMjo0+W1limrq4OQ30DqhpqUKlUF7y3VXBwMM0tzYjFYjo7O7nrrrv48ssvdZ8ZfxdyNBrYufPCyIIPCQlh1Ycf0tvdSXtzLXUl2dSVZlFXko2BgQEl6XGUpMed8EP0Y0ZUKCEhN+pKMk9lxXE2eeaZZ/j9t03cOOVq1satZ/Xq1Sxfvvycx3GuEIQ0gTGDr4+C7EE6d+obGDFr+dv89MKVtDfVcPWTP9HeVENhyi4KU3ZweMPbHPhR2+o3dO7tjL/iQfT0tR++hck7kMgUOuPbUyESiZA4yXV3ewQERpqBLtLefecdwryDhi2iNbe3sifjALa2tnh5emFhYYGlpQUWlpZYWVlhYWFxYpul7t8WFhY6rw8BgYEIHRfGtt17iMs6SHWDtqzN5jQ9di5mvL296e5oYf3/rqS5tpSOv3XotbKW4OXlRXCIN97eMwgLC2PcuHGjF+wYJyIigrbmer58SFsib2llTUREOIvmLNaJZmfT96irq4uEhASdcLZv/35ampvQ0zfAwTMI57BLifCJwFEejpHp4GKETBFB0m/vk52dPSLNUqRSKRKpDU1Vx89ovp6+ATLfibTUlVF9PIXGykKaqgtpPvFoqi3XjbW0skahUHDtXc+edx5T5xPOflHs+PxnysrKcHZ27vPayv+9gJOjA9HR0cyePRsXF5dTrDIyNDU1kZmZecLwX9shMz09k+oqrX+mWKyHRqPG1NSURx4ZvMrjdFAqtcJsfXleHyEtZNatBE67kYq8RI78/iH+MYsozdpPfVkeDeV5NJRrS9/Ky8sHXHcsEhISQq9axau/vI+RodGYaKJ08ibBDz/8wJo1axCLxLyx9Dl2psSzO+sAvT09aAAjfTHVJ7xcR4P//vf/+OjDD3n0sSeGzBIOCQlBo1az5sHx9JzIMpNIbQgJCeHyKVfpBDN/f//zxr933rx5rF27ltK6CsQi0Zi3oBCENIExQ2RkJAcODd6yXuLkzeQb/o/da57EJWASivELCIhdREDsInq62inN3I+BsSku/n91h1KrVRSl7sI/9tphxWHj4suxhFNnxgkIjCSHDx8mMSmJpxb2Nxo9FTtS49BoNFRUVjA/YhYioKOjk9bGBmp7Kuns6aSjp4v2rg46ujpo7+pApVIRGhLK0eSR624lMLZ4/PHHeVX8KkqlktDQUEJDQ1EoFKMd1gXDnDlzuOWWxejr6+HtfT3e3t66h0QiGXoBgRHjkksuYfXq1ZiamhIREYGnp+dZFXKam5vZv38/8fHx7ImLJyHhMN1dXRiZmOHgFYb/9FtxUkRg7xmCvqExAJ2tjUOKaAAOXqGIxXrEx8eP2AWzj4+ChqrCQcdo1GqtiX1VIU1Vx3U/W2uKaaguRq3qBcDY2ARvuZxIfx98L5+Cj4+P7mFjYyMIaMNg5+rH0NPXp6urq99roaGhfPbZZyO+z87OTlJTU/t1yawoLwNAJBZjbe+GtZMCl4grCJYpkDr7YO3gwa+v3TDiF9je3t4YGBrSUJ6Hi38UHS111Jfl01CeR315Ho3leTRU5LH904cAMDQywsfHl2njAwlauoiFCxeOaDznM7Nnz6azs5P6+no0Gg2OjsO7CXshcNLbT61Ro1arCXT3Y1PiNgDMjfQwNdSjtrWV1atX89NPP5Geno6t1IbDiQkj1kF2MNZ9t5b6+nq++XoNd999Nz///DMymYzp0/tXPIWHh/Pmm2/S2dmpE81kMtl5/Zl4/fXX8/Zbb5OUm8p999/PHXfcMdohnVUEIU1gzBAVFcXrr79OR3Mtplan9jnwm3QNpZn72PPV0zh4Bp/wmwEDI1M8w2b2G19VcJTO1oYhyzpPIpUpyO7uorGx8aLsiChwbqmu1nrzfLPnJ+pa6pkSGI3JiQutgVCp1fx5dBeTJkWzJy6O62OuxNxkcMNojUbDp1u/obD94rljK3D6xMTEEBMTM9phXLBIJBK+/HLNaIchAOjp6bFkyZIRWauhoaGfENrb28uGDRt0jQHS01JRq9WYWdrgIA8n4oqHkSkisXH1Q6zX91Rdo9GQtOl9kn7/kNl3vodn6OAehAbGZth7KImLjx+xixo/X1+27ktFo9HQ2dpAU1XhiVJMbUlma00hDVWF9HR1Atr3093DE6WvL74zLkehUOjEMmdn5zHXEa6wsJAXXniBnTt3UllZiUwm46abbuKpp57C0NBQNy41NZUVK1aQkJCAnZ0d9957L48++uhp70+jVpGQeAQvL6+RPIxBue76G9i4Qes9JnFww8pRjmPIAgLmKZA4y+lqa+a3N27B1j2ICVc+1GeulaOc1LT0EY1HX18fHx9fjmz+iKObV+k65OobGKBQ+DA5LBDlTZcSGBiIUqnEy8sLff2L9zLYwMAABweHoQdeYAQHB6MMCCArO5u3fv2Y2matKb+hvj6tXb1oxPqMnzCR2267DX09fWQSB44UH2X37t3nxM/1iSefZtUH72Pv4ISzzImGxiYMDPQpKSnt9/vQ09PjwQeHf5P8fEBPT4+HH3mYFXev4K233uKTTz6hrKwMKyur0Q7trHDxfoIIjDkCA7Xdn+rLcgcV0kQiEbE3vcCPz1/Gtk8e5IrH1ulKOAeiMHkHJpa2OHiGDCsOqbMCNGq2b9/ONddcc3oHISBwmsyfP5+dO3fy7rvv8smvX/PNnp+YFjiJS8Jn4iTtf5KUlJ9MVUM1y6fcwZ64OEyMTi26nUQkEtHV04WlpcXZOAQBAQGBCxqNRkN1dTUZGRlkZmaSmZlJWrr23/V1tTz99NO88MILuvHffvstS5YswdreFXvvCGJuWoiTTwTWDoNnvvV2d7JrzePkH/4dAEPj4XWec/COYM+e7f/uIP+GQqHg62++5csHI+lobdJtd5I54+vri1/kVJ1QplAo8PT0HNOG0/8kOzsbtVrNxx9/jFwuJz09ndtvv522tjZef/11QJuFOHv2bGbOnMlHH31EWloat912G9bW1mfkKeTt7T3ShzEojY2NuPhHM/eeDzEw+stnTa1WcXTzRyRsfBfQ/s3+E6mzgpTft4yYb99JXn7pRb786iuUAQE6wUwul19Uf3sCcNvSpTzyyCNUN9UQFBzMiy++yLhx42hra8PDw4PDhw8zYcIEnr/hMdztXLjxzbv47bffzqqQ1t3dzYYNG/j8s89ISDyCvlhEjI+UiIlyXvsznx9//JF77rlnWGvl5uaSl5eHjY0NEydOPGsxnylbt25F3aPirnlL+PCPNSQmJo7ZplOCkCYwZvD29kZPT5/68jxcAiYNOtbI1IJZy99mwyvXcXjDW0Rdc+o7gIUpO/EImY5omF/2UmdtKdPu3bsFIU3grCMSiZg2bRrTpk2juLiYDz/8kE8+/oTfErYSLg/mkvCZhHkFIRZp/343H9nB+Mjx2NraYmhgiJ54eKns7V0dWDgI5WUCAgIXLxqNhsrKyn8IZulkZmbR2KDNfNDXN0Ti5ImVkxzvyTdinH2QzX/82UdIO5mhNv+hr7C0HZ5nVXtTDX98cBf1pTm4KidTln0Qe8/gYc11VISTsu0LiouLcXNzO82j7s/y5cupra3Fzs5OJ5jJ5fJzZlx/vjN37tw+XXW9vLzIycnhww8/1Alp3377Ld3d3XzxxRcYGhqiVCpJTk7mzTffHFRI6+rq6lPC2dzcfPYOZBCCApVk5P3ZR0QDKM85zOENbyPW00ejUg84VyJT0N7eRlFREZ6eniMW04IFC1iwYMGIrSdwYfLggw8yZcoU/P39+eijj3j88cdZtWoVUVFRAISFhaGnp0duWT5Sc2skZlbs379/RGPIz8/H09NTVy56+7KlfPX1NwS7WgPQq9aQVNTMruw6AgP8mTmzf0XUQKSnpxMWFkZvby9isZhjx46dd97FEyZM4OuvvsLGQopYJObw4cOCkCYgcL6jp6eHg6Mj9eX5wxrv4BXC+Cse5ODPr+HiH4Wrsn9JUkPlMRorjw0qtP0TUyt7DIzNOHpU8JISOLe4ubnx0ksv8d///pd169bx7jvv8sL3byKzcWRu2HR8nb1JPpbON89/Q2FhIWbGw7/o6ejpQjaCHbYEBAQEzlc0Gg3l5eV9BbO0dDKzsmhuagS0DYykMi8sHbzxmbIYiUyOVKbA0s61TzmmnoERqZs/6OPfOmmS9mZfRV7isIS0utIcNr97Oxq1mssfXUvewV+pyEuirbEKawePIec7KcIBiI+P58YbbzzNd6M/dnZ2vPXWW/96nYuJpqYmpH9rvnLgwAFiY2P7lHrOmTOHV155ZcBy4JO89NJLPPfcc/8qlo6ODrKyssjIyCAyMhI/P7/TXkOpVFL/4UeoerrQM/jL6NzOPZCQ2UspTN5OU3URx49uo6WuDAsbZzRqNc21JbQ1aDvfZmRkjKiQJjB8KisraWxsxM7ODqlUel77bp0uIpEId3d3vDy9qKquQk+sx8KFCyktLQW0Za0ymYwvd/3Amp3fAyOb0fnaa6/x6KOPYm9nz1NPP8V9992HkbEJdpYmeNqakFnWRK9aw/zLr+aee+8lKiqKgwcP8tVXX1FcXIxYLOaRRx7p89lQWFjIrl272L59O729vSyffTOfbP2aoqKi805ImzRpEhrgfz+8iYWFBZGRkaMd0lljVIU0Dw8PioqK+mx76aWXePzxx085p7Ozk4cffph169bR1dXFnDlzWLVq1Zis8xY4ffx8fUg/furOnf8kdM4yyrIPsOPzR1n039/6dPoBKErZib6hMc5/az4wFCKRCKnMh2PHjg17joDASGJsbMySJUtYvHgxBw8e5L333uPrH3+kp7cHO1s7rrnmGv773/8Oq6zzJJ09nSPaql5AQEBgtNFoNJSWlg6YYdbaos30MTA0QiqTY+ngjf+MaCROciQyOSYW0mGZ/Utlctrb2ygpKdFlg9nY2ODnH0BFXiK+UVcMuUZtcSat9RUYmpiTsPEdpDIFGrWKP967g6ue+AEjs8H9Z0wsbLCReY+YkCZweuTn5/Pee+/pstFAK2T8U0Q6eS1TWVl5SiHtiSee4KGH/vIca25uxtXVdcCxnZ2d5OTk6BoCpKVpu2gWFR5Ho9EAIHN2pay0+LSPSalUolb10lhViI2Lr267kakFUQsfI3D6TZSkx1GZf4TDG96hqTKP+vJ8nW+eRGqDr6/vqZYXOAvcf//9rPpgFWqNGjRaQ36A++69l3fefXeUoxtZ4uLiqKqu4raZ12NubM67mz7l888/Z+nSpQBs2LCB//73v0yYMIErrrhC1/V1JPj6q68I8VTS1NbCe+++x3333ceKFStY991aslpMeeW117nllluwtdVec+7Zs4epU6cCYGlmQXNbC/7+/lx55ZW6NWfOmEnBsQKMDY3QE+uxZuc6QFsyer4RFBREcnIy+vr6+Pr6jjkPzL8z6hlpzz//PLfffrvuuYXF4B48Dz74IL///js//vgjVlZW3HPPPVx11VXs27fvbIcqcAEwfvx44va9O2jnzr8jEouZftur/PDspez84j/Mv//zPiWcx5N34BIwCQMjk9OKw8bFh/xDWacdv4DASCISiYiKiiIqKoo333yTr776ismTJ2NkZERzczMmBqcjpHUN+fksICAgcCFQWlrK1dcsJCMjg7bWFgAMjIyROsmxcpKjnBWDVKYVzCxsXRD/rQS+t7uT/T++TN7BX7ns4a+w8wgcdF8SmdbuISMjo09Z5dQpsfz467ZhxSuPvARzqYzK/EQq8pLI2PMdqt5uGiuPkbrjKyIvu/eUc7s7W6nMP4pIz5C4+L3D2p/AwDz++OO88sorg47Jysrqk+FVVlbG3LlzWbhwYZ/rnTPFyMgIIyOjfttzc3MpKCjoI5gdP1aAWq0VSyyljlg7yZHIpzJlyjKkMgW1xZnEffPfM2qOdVJ4KM85RFtj9T+6Y+bT1dEGgKmZOQEBAUyeNh6l8ladd9lIdR/s7u4mKSmJuLg4Dh9O4MEHH2Dy5Mn/et0Lnd7eXgoLC6mpqWHixImIRCI2b96M1NyaueOm8/3eDQS4+tLY1sQff/7JO6Md8Ahz2WWXIZVISMxL4YmF97N6x3c89cSTOiFt3Lhx/PbbbyO+3+bmZtIzMpA7edLQ1sgt1y4GICQkhMqqakxMTPr93ff09ADw7u0rcbGVcd1ryykpKekzxtDQkJkhsdhaSvl+70ZeeP4FAgIChl0Seq4JCgoa7RDOCaMupFlYWAy77W5TUxOff/45a9eu1bWJXb16Nf7+/hw8ePCUhnvni5+AwNln4sSJ9Ha9TFtDJeZSp2HNMbWyY8ay19j01m0kb/2csLnaE52Oljqq8o8w5Zb/nXYcUmcfuru7qK2t1d1xEBAYTRwdHft0A2tpacF4kO6e/6Sju1MQ0gQEBMYE27dv5/Chg0Rcei92HoFIZQosbJyH9EKtL8tj2ycPUF+WC4CxxdC+kRZSGYZGJmRmZjJv3jzd9piYGD766CM6WuowsbAZdA09AyOc/Sbg7DcB0Bq615fmUJGfhDxyfp+x7U21VOQnUpGXSHV+EtXFmWjUamxs7Vhy73+GjFfg1Dz88MNDdnP9e+fM8vJypk2bRnR0NJ988kmfcY6OjlRVVfXZdvL5cK+L/s7J8ikLiR3WTgqs3aOIiV6MVKbA2FzChleuxztyPn6T//Lu1dPXlo5lZmYSHT38ygsAqVSKs4sbe7/Tev+ZmJji6+fHnJhQlMobUSqVKJVK3NzcRrRssL29nYMHDxIXF8fuPXEcOniQzs4ODI1NEesb0dzSzLatW0dsf+czGo2GsrIynfl8bm4uOTm5ZOfkUFR4nN7eXgA+++wzli5diqGhIdZmVlwVNZ9NCVuxMZfQ2Np02iLqhYC+vj5vvPkmt956K7e//xBtne2Mj55w1vdbWlrK5MmTSUlOprWjjQ2/bOCll17CzMzslB6SJ2+w7M9KQGIpwUDfgJdffpkFCxboPk9Cw0L57rvvAHBydOLee+/F3Hx4zWYEzh6jLqS9/PLLvPDCC7i5uXHDDTfw4IMPnrIdcVJSEj09PX3UVz8/P9zc3Dhw4MAphbSR8BMQuDA4qYDXl+cPW0gDcFXGEDpnGYd/eROZz3gcvEIoSt2DBg3uwdNOOw6JTAEaDVu3buWGG2447fkCAmeblpYWTAz639U+Fe2d7UJpp4CAwJjA398fAI/QGdi5D13So9FoyIxbx751K7G0c8PWTYlGo8LCxnnIuSKxGKlMTmZmZp/tMTFaX9aKvCS8xs0+rfjFYj1s3QKwcfWnpbaEotTdWuGsIJH6iuMAuLl7MGdKLDEx9xMTE4OPj8+Y8kEaDezs7LCzO3VX+L9TVlbGtGnTCA8PZ/Xq1f3Km6Kionjqqafo6enRdZXctm0bvr6+pyzrHIqrnvwJB6++HeZLMvby56oVdLTUcfTPT/sIadZO3ojEYjIyMk5bSAOIj9tNeno6SqUSDw+Ps1LC1djYyL59+4iLi2NPXBxJiYn09vZiamGNg3cE4y57ACefSGxd/UnZ+jn7//iQ3t7eU15Lng90d3ejVqsxNh7ezcy6ujpyc3N1gllObi7Z2TkU5OfT0dEOaD8TrO1dsbBzx8pjElETbsTa0ZN9a5/VeTYbGhqSX53PA589TVN7M8eqiqhuqiXAfmxmDy1evJjv1q4lMzOLz974nGuvvfas7/P5554n6XAikYowLEzM2ZSwlfT0dCZMOLWIl5CQgIG+Aev2bkBPLAZEdNfUsG7dOp588kkAPvjgA2bMmIGfnx+BgYGCiHaeMKqfMvfddx/jxo1DKpWyf/9+nnjiCSoqKnjzzTcHHF9ZWalV0/+hnDs4OFBZWXnK/ZyOn4DAhY2Hhwf6+gY0lOfhFti/ecBgjL/yQcpzDrP90wdZ+MxGClN24OAV2s83bThIT5RyxMXFCUKawHlJU2MTJsPMSNNoNNqunUJGmoCAwBggICAAgPryvGEJaXHfPEPmnnX4RF1J7E3P8ePzl2NqZTtsGwlLRzmpael9trm6uuLi6k5FXuKwhTSNWk19WS4VedqMs6qCRFrqqxCJRPgHKLnuynnExMQQExODs/PQIp/A2aGsrIypU6fi7u7O66+/Tk1Nje61k9lmN9xwA8899xxLly7lscceIz09nXfeeedfNXHQmvj/JaRV5h9h01u3IjpRmuwRMr3PeH0DIyQO7mRkZJzR/jw9Pc9Ks4ADBw6wbt06du3eQ3paKhqNBguJPVI3JeGXP4B78DSkTvJ+GaROPuM5+PPrHD169JwZnKekpFBYWEhTU5Pu0djY+Ld/N1Hf0HDieTMtzU10dnZgZ+9ASvJRnJxOfdN/06ZN3HzLYl1HYABLGycs7d2xsPMnYEYs9p5BWDt4YmHrgp6+Qb81rBzl5OTkANrr4WVLl1HVUotGA8W1ZWg0GhYuXDjyb8x5gEgkYss5zk709PLEzMSUpTNvJK+igE0JW0lLSzulkKZWq1n1wQdYm1oyO2wq3+75mbnjppNUkMKunbt0QppEItGVpQqcP4y4kHY6/gF/F7eCg4MxNDTkjjvu4KWXXhqw/v9MOZWfgMDYQywW4+jkRH153mnP1dM3ZNYdb/HDc5exa82TlGTsJXz+3WcUh4mlDYYmFiQnJ5/RfAGBs01zczM2hsMTxnpUPahUKkFIExAQGBNYWFggc3ahYZhdvjUnfKYKEn6nq60BGxcfjiVtIW3HlwTPXDLkfKnMm7Qt2/sJb1OnxLB9X+Ip56l6uqguTKciL5HK/ASqCo7S2daMvoEB4eHhXLVsMTExMUyaNOmMs5gERp5t27aRn59Pfn4+Li59u7KeNPm3srJi69atrFixgvDwcGxtbXnmmWdYvnz5Ge3TWiKlvjwPr/A5um3mNjIcvcdReUybkZS85TMCp9+MhY1MN8bKUU5aenq/9UaTq69ZSHNbFy7KWKYuvg4nn0jMbZz55A5/ilJ2kb33Jxy9xxEyZyk2zj66eXbuSgyMjImLizsnQlpmZibh4eGoVCpAex1hYmaJoakFBsZmGJpYYWBijqGJO6YSC6xNLDAytaC3u4tD618nPz9/UCFt9+7ddHT1MPvOd7Fy8MDK3h0DI215YPrOb4hf+xw2rn4oxl+Kd8RcLO3c+q1h7eBJTuYWABYtWsSiRYvOwjshcOzYMaqrqwkODqamsY5b3l4BgJGhEf7+/hQXF2Ntbd2nskOtVhM1MYrDCYcx0DcgIT8ZgD+P7MTC3By1WtUnY1Xg/GPEhbTT9Q/4OxMmTNCZIw7UycXR0ZHu7u5+pphVVVVn5CcgMDbx9/PlaM6ZGf1b2rkx5eYX2P6pVuT1CJ0+xIyBEYlESF18OX688IzmC4wuKpWKJ594EjNzMx599NFhp99fSLS0tKDq7SIu4wAmhiaYGhljYmiCiaExJkban4b6BohEIjpOdNkShDQBAYGxQqBSSW7F8IS0qYtXEjZvOYXJOzievJ3KtDgADvz0KooJl2FiIT3l3N6eLvSNTGlrbaGsrKyPsBITE8Patd/R09mGgbGZrjFARV4CVfmJVB9Ppae7C1Mzc6Kjo1h6zSPExMQwYcIETExOrwmSwLljyZIlQ14LgTaJID4+fkT26e/nR80/hGFziSNXPvE97c11FB7dRnnuYVobKinJiD/RGCCfmsI0TDz7CzCjiZeXFxUdpky/rW9ihk/UFeQe2EBTVSFNVYWYSx37CGl6+oY4eIURFxfPww8/POg+4uPjKSkpoauri+7ubt3j789P9dqUKVO49957aW9vR6VScelDa3BURKB/wi7j6B+fcPDn1whfcDcBU67HXNL3GrWtsZpD61+nqalp0Bh9fHzo6WzHI3SGzs/uJDau2vL0upJs6kqyydr7Izes7N+8xMrBg+QtxXR2do7Jc9nRQKPREB8fT2VlJUZGRtx80820tLYgAn786Sc++eQTLC0tCQoKQqFQ8P333xMTE4Ovjy87d+3UiacikYjcXK3fZq+ql5r2vzIPrYwt2blrF4cOHRKaZ5zHjLiQdjr+Af8kOTkZsViMvb39gK+Hh4djYGDAjh07uPrqqwHIycmhuLiYqKioM45ZYGwxYcIEdu5+fdglF/9EMeFSaosz6WpvRuIkP+M4bFx8yS06v+7yCQyPjz76iFdfexV9PX2+/eZbPv3sU2JjY0c7rBElJiaG1WtWk1J46pIOPbEYU2NTDA20J3CCR5qAwF+czCwRfKcuTAIDlSR8+/Owx1vZuxMy+zZCZt9GR0s9Ram76Wpv0olovd2dNFQW0FCeT315Po3l+TRVFdBQVYRGrcbSyrpfZkFMTAxqtYqdqx+jra6U6uIsXWOAmMmTmXLnDcTExBASEnJeez4JjD4BAf5s3KIV5Xo626gvz9d20izLo6Eij8aKfJrrKsg79BtisRgPTy/Cg4IIvOo+3TXV+cLUKbG8/d5H/c7jg2cuIffABgDcg6cSOvevLqjtTTVU5CXS3dnGvv37B10/NTWVKVOm9PkM1zcwRM/ACD19A+1DzwA9A0PEegaI9Q1O/DSkrbGGjRs3cu+992JlZaWdL9bTiWgAPV3ajqVJm1Zx9I9PiLz8fsZdcqfudSNT7blUY2PjoHH6+PigVqtori1F4tg3CcVRPk73b5FYD8WEywZcw8rBA41GQ0FBga7TqsDg/NNj7+WXXyY7O5vi4mKSkpJoa21FdSJL2dfXl9bWFu5bcDur/ljNmjVr+nQD3bJlC4sXLybaL5Kc8nymxMaSm6etmmppaaH1RNdojUaDrYkEI40BdS313DTlGl5d/z47duwQhLTzmFH7Vj5w4ACHDh1i2rRpWFhYcODAAR588EFuuukmXXp6WVkZM2bM4KuvvmL8+PFYWVmxdOlSHnroIaRSKZaWltx7771ERUWdstGAwMXHxIkTUfV00lpfPiwj4IGIWvjYv45DKlPQ091FRUXFoKnbAucXpaWlPP7448wJm8b8iJl8+OeXTJkyhWXLlvHqq6+OmfKZz7/4nM8+/4y2tjZaWlpoaWmhubl5wH+3tLRgbW3NlClTRjtsAYFRobq6moyMDN0jLT2d9PQMZs+eww/ffzfa4QmcAQEBATRWv0Vvdyf6p9HBuKerg9a6ckRiMR3Ndfzx3h00VRbQWF2suzB3kjmjVAYQOPVylEolAQEBBAQE9PP49fPzI3bqNAqP55xoDPAAkydPxtfXVxBoBU4Lf39/vli9mu+emEZjTSmgFYjc3D0ICQoi8NIluk6afn5+53V2UmxsLCtXrqSx8hgSJ2/d9rZGbVdT5dQbCJmzlONHt1ORm0BVfoKuyYaHpxcrHnt0wHVP0t7ejkaj4eqn12PrFoD4hIdcT1c7G1+9kbB5y/EcN1u3/e9kxf/I7i+fRK1W6/4/d3e09BnjN3khSZtWAaBW9dLW0Lc760nBbjgZaQBNlcf7CWkn3wuAKx77DkfvMF0sTVVFNFYdp6m6kNpibYXO8ePHBSFtELq6upg6dSrpaWl0dHTy8Scfs3TpUnJzc3niiSfQE4tRqdVIzK1RqdU8ufAB3tr4EWq1Gg3w0/7f6FX1kpKS0mfdoqIi1Go1HvZuFNaU0N3do3vN3NwctUYryInFYtBoMDQwpEfVy8s/v4uri0ufBosC5x+jJqQZGRmxbt06nn32Wbq6uvD09OTBBx/s45vW09NDTk4O7e3tum1vvfUWYrGYq6++mq6uLubMmcOqVatG4xAEzlMCAwMBbZv6MxXSRgKpTA5oO3cuXrx41OIQGD4ajYa7774bI7EBN09biLmxGStveoItR3bx9Tff8uvGX3nv/fdYuHDhmLjIEYlEmJubY25uLoi9AqNKbW0te/fuZd++fVx99dWjdnOsrq7uH4KZ9mddrdYsXF/fEImTJ1ZOcqzdw/hl/U90d3+JoaHhECsLnG8EBASg0WhorDyGrVtAv9d7OttoqDx2IqMnn8YTj6aaUp1gJnN2ISgwEOWMqwkICECpVOLv76/LVBkKkUjE7p07xsT3icDoctVVV5GdnY1UKtUJZv7+/piamo52aIDWDwoYVmfPqKgo9PT0qMhNQOLkrft/2t5Yjee4OZSm7yZj91oAXZON2NhYYmJikMlkQ6yOzjdbJBL3Ectqi7OoKUpn60f3YeXgybh5y/t0OQV0JZY9PT26/+dd7X2FtOy9P2r3Y2bNlJufxyt8bp/XRSIRxqYWQwppTk5OmJqa0VhV2Gd7b08X3e0tTL7hv3S1NZIV/yOHf36N5ppCWhr+amxhY2uPr4+CuXfdxaxZs4Z6Wy5q3n//fQ4ePMhk/wmkFGbw4YcfsnTpUl555RXEIjFv3PYcD3z2f5gamdDQ2shHf6yho7uTyspK9PX0Ka/XNj2sq6nts+6yZctITU3lgw8+QOYkY+eunbrXxGIxURMmsO/AQYwMDSlpqGDihIncErOEm2++GYVCIXw3nOeMmpA2btw4Dh48OOgYDw8P3cnKSYyNjfnggw/44IMPzmZ4Ahcwbm5uGBgYUl+eh3vw1FGLQ3Kic2d8fLwgpF0g/PLLL/z22288etU9mBubASAWiZkXPoPxPmF8uvVbrr32Wr768itWfbgKN7fzy1dEQOBCobi4mPj4eOLj49m1ew+5OdmA9iLljz+3kJ6Welb339jY2E8wS0/PoKZaezIs1tNH6uSJlaMcz+jrCZfJkTr7YGXvjlhPe+pUnptAUcpOcnNzdTdwBC4cTnburDqegqq3h4aKfJ1vVFNlgS6rB8DF1Y1ApZLAWdfqssv8/f1HpNxduFASGAlsbGz49NNPRzsMNBoNJSUlus/W9PR0UtPSycnOZnJMDFv+/GPINSwsLAgOCSMz/gdKMvdSlZ9IW1Mdenp6hISGcdXi64mJiWHy5MnY2Nicdownb3yoerv7bHdShGPj4kddaTZNVceJX/tcPyFNfKIzZnd3NxYWFhgZGdPWUElTVRHdHS10dbQgdfbBZ+LlSJx9qC/Pp7LgJbo7WujuaKGno5XezhY62pr7JIoMhEgkQq5QcPzIVlrqymiuOk5zTSFNNWW662MzcwsUCgVh4b74+FyCj48PPj4+KBSKYQv6ArBgwQIeeeQRulU9iEQi0k90WW5sbEStUfPwF88iAsrqKgBo7WxHam5NR08XIpGIKYHRWBib8XvidlpbWzE3Nwe0Ytl7773HpZdeip+fH+7u7n32G79vPyqVCn19fVQqFXp6/bMgBc5fBMMFgTGHSCRCJpNRX3b6nTtHEhMLKUZm1v3SfAXOTxobG1lx9wrG+4wjyjei3+s2FlIev/peDuYk8um2b/H39+fFF1/knnvuEb74BAQGQaPRkJOTQ3x8PHFxcezeE09pSREAts5y7OURzIhZhpMikrKs/ez+8sl+TYXOlObmZjIzM/tc1KWlZ1JZUQaASCxG6uiBlaMct/FXEypTIHFWYO3g0c/c+Z9IT9wsycjIEIS0CxArKytkzq7Eff2MbpubuweBSiXKudfrSjL9/f11F0UCAgJaNBoN5eXlfW5IpKSmkZWVRdsJ3ydDIxOkMjlWTgpkgY5s3fI7LS0tw2pctGzprfznP4/i6yrhhnvvIiYmhqioqBFpenRSSFOrevpsb6g8Rl2p9qaOtaMXE658qN9cvRNCWldXFxYWFjjJZBze8BaHN7zVb6yxsQmWVlZYnXhIpNZIJM5YWVlhbT2HBx98cMhYF15zNS/8739YG3YyztcXn/nROrHMx8cHe3t7QYwfAXx9fTExMiYxP1mXPZmYmMiXX36Jt7c377zzDupuFa/f+iyPf/U/HKxtsbW04eixNBwcHNiTrvXlMzM17ff7EIlEzJkzp98+T7520o9NuJa48BCENIExSUCAP4fTzqxz50hi4+xDYeHx0Q5DYBg8/vjjNDU2cfvCRwc9KZnoG0GQewDf7P6JBx98kM2/b2bL1i3nMFIBgfOb3l6tT4hWOIsnLj6eutoaxGI97N0DsPebTuBlkTjKw/t1O3TyiUSj0bBv3z7mz59/RvtXqVTctnQZO3bsoKy0BNCerEoc3LFylCMLuwzlAjlSmQJrR68+JtGgvZgqTN6Bd8S8QfdjbG6NhcSOjIxTN+wQOL/ZsX0rhw8f1vlGmZmZjXZIAgLnDb29vVRUVNDb24unpycAO3fu5Omn/4+MzEyamxoBMDA0QuIkx1qmIGhOLBKZAqlMgYWNM6ITpZw1henkH/6dzMxMJkyYMOS+7777bu68885hlYKeLrqMtJ6+GWnm1g5YO3oRPHMx/jGLdNnHABq1GrWqB80JkaW7Wzt3547tJCcnnxDHrHWimZWVVb/mImfC008/zZNPPnlW3geBvoSEhXIkMYn/3fI0j655ntWrV/PBBx/w6quv4uvry7Jly3jy6xfpVfVSUltOdXMd06ZO45tvv2HVqlXMmzePCRMmCI1hLiKE37TAmCQqKoqt23agUat1X+KjgY2LL1nHhYy08529e/fy8ccfc/vsm7CzHLpMwMzYlOVzbuZYVSEFBfln3CFWQGAs0NnZSUJCAnFxceyJi2P//gO0tbagb2CEg1cwHhMXEqWIwME7DEPjwTN7LO3csJDYExcXd8ZCWmlpKV99uQav8LlMn3OfTjAzMDIZdJ5GoyFn/3ri1z5Pb1c7N72yBwubwf12rJ0UpKcLQtqFip+fH35+fqMdhoDAOUej0VBfX09JSQnFxcW6n8XFxRQVFVNUXExVZYWu7Gz//v1ERkby0ccfk5yeTdDM25CeFMzsXIj/9jky93xH+IIVmFrZ9RHRAKydvBGJRGRkZAxLSIPh+amdCSeFtIM/vcrRzatQ9/ag7u1GpepBjJrUPz/hyKb3UfV2o+rt0T10842MdCKZp6enTmQ8Wwgi2tmlq6sLIyMjrrjiCg4ePMija54H6PN7vfnmm9m0aRMqlYpFixYRFhZGQECA7tz/f//736jELjC6CEKawJhk/PjxqHq7aakrxdJu9HysJM4Kenu6KSkpwdXVddTiEDg1XV1d3L7sdnxd5MwdN2PY83amxpNTVsCWLVsEEU3goqK5uZn9+/cTHx/Pnrg4Dh8+TE93N8amFjh4jyNw9nKcFJHYewSidyLbS61WUVechZ3H4CWQIpEIB3kke+Lizjg+V1dXTE3NcPAKwTfqimHN6e5oIe6b/5J36DfE+gZInLyHFNEArJ3kpKYdOONYBQQEBM4FW7du5fvvv6fohFBWVlpKR8dfHl16+gZYSh0xlThhJnHCMSQE+TRHzKwd+PODu0hNTSUyMhJ/Pz/+2LKT8Pl39Vm/rkRbBZK06QOSNn1A8MwlTLruKd3rBkYmWNu7nRcZvI6Ojjz33HNkZmZiaGiIoaEhRkZGun//8/k/X1MoFNjZ2Y32YQj8C44dO8aKFSs4fOgwjY0NTJ02jS1btlBSUoKVlRVLlixBoVDoxhsaGvLLL7+MYsQC5yOCkCYwJjnZ4rm+LG9UhTSth46GLVu2sGzZslGLQ+DUvPLKK+Tl5/HGrc+iN8y7fo1tzazZ9T033ngjs2fPPssRCgicH3zyySes+vAj0lJTUKvVmFnZ4iiPYPxVj+LkE4GNi1+fDmgn6elsY/tnj1CYvJ25Kz7EM2zwdu5OiggO/LCS9vb2M+o4JxaL8fP3H7ZPZvXxVLZ98gAdLfXMWPo6+354EffgacOaK5UpyNj1DZ2dnRgbG592rAICAgLnguV33EldUzv2XqGYu0cTFibDXOpIXWkO5bkJRC98HDuPwAFvDJpb21JSoi2TVyqVtDbV0tFS36c0f87dH/DVI5MAbTdMKwePfutYOclJT08/Owd4GohEIp555pmhBwqcd3R2dpKamkpoaOi/6pa9ePFi9u3dR5hXIB1mNuyNj0dPT4/3339/BKMVGOsIQprAmMTZ2RlDIyPqy/PwCB1+ltFIc9KMeu/evYKQdh6SlZXFyv+t5IoJ8/CwH77g+sX2tRgYGfLWW/3NZQUExipPPfV/YGJDzE0vIPOJxMrBY8hszNb6Cja/t5y6Eq2Bs5WD+6DjQSuk9fb2cujQIaZNG56g9U+CgwL5c0/SsMb+8f6dtDfVEH7pPTj7T8TEwoaitD1EXn4/+oaDi2NSZx/UajU5OTmEhIScUawCAgKnRq1Wk5+fT0pKCgkJCaMdzgWLvb09xjIZ0297tc/2rR/dB8DPK69CIpMz6doncVXG9BljLpVRXFwMoGus0lCeh4nvXyWa5TmHAK2Idvmj3+Kk0DZtUvX20FRdSH1ZHp1tTaSlV5ydAxQYc3z33Xfs3r2b9PR0CgsLqatvpKurAzQQOX48hw8dPOO1fX192bd3H/p6+hyvKkalUtPU1DQiTY4ELh4EIU1gTCISiXB2dhn1zp1GZlaYWEhJTU0d1TgEBmbVqlX09vYyyX/8sOccKUglLuMAX375pZDaL3BRMX58JGmFjQTELhrW+KpjKfz5wV3o6RviFT6HsuxDSBy9h5wndfbBxMyS+Pj4MxbSlEola7/7flg+meELVpBz4BeSfnufI5tWYWptR1tDFfu+f5EpNz8/6FyJTA5oO3cKQpqAwMiQlpbGBx98wJGjyaSnpelKEM2the/cM8XTw539aSX9tl/+6Fo2vnoDAA3l+ZTlHO4npJlYO1J0QkhTKBToGxhQX56P7G9CmmfYLMLm3YG1oxdl2QdI2/E1zZX51Fce1/mL2djas2zZ0rN1iABUVVWRlJREUlISCQmJJKek8MLzz7F48eKzul+BkeXjjz/mzrvuAo0GQxMLpM4+eCumIHVWkPjr+6Sm/jv/6QceeIAf1n1PbvUxomMmcffddwsimsBpIwhpAmOWQGUAe5NGv3On1MWPouKc0Q5DYACWLFnCH5v/4LEvX+CaqAVcFb0AA71Tfyx2dnfx8davmT5tOjfffPM5jFRAYPSZMiWW7TufQ63q7dPNbCCK0+P484O7kTh6Mf/B1Rzd/BG93R3UFmcO7ZMmFuMgD2f3njjOtPgmMDCQ7q4OWurKsLQb3J8ycNqNBE67kbbGaopSd1OYsoPO1kYq8hJRq1UDlquexNDEAguJ/Xnh+yMgMFZ46qmn2bpjF+7B0wldMBUbFz9sXP0xtbThw2WKoRcQ6IerqyvtcYf7bS9K2an794SrHiZkdn+hy8zagaIirRekgYEBcrmCwpSddLU301CeR2NFHvXlx1D1ajtZSqQ2BAYGMueymSiVSt3D1tZ2RI+ppqamj2h2OCGBivIyAEwtrLF1C6S115CPPvr4ghLSWlpa+PHHH9m2dSvp6Rl89vlnw27QMFZQqVSg0TDvno/6VRYVJu+gNHMfdXV12NgM3SBsIAIDA2lubRmJUAUuYgQhTWDMEhUVxe+b/xzyQuhsY+PsQ2Z+otDZ8TwkPDyctPQ0XnjhBV577TX25SRw15zF+Lv6DDj+u/j1NLU38fEnHwu/S4GLjpiYGLo726ktzsTeM3jQsR0t9ah6uqgtyeKXlxZh565Eo1bxx/t3ctVTP2EucRx0vqMikoO/v09PT4+uO9rpoPPJLM8bUkg7iZm1PQGxiwiIXURPVwc9nW267w6NRkN7Uw31ZbnUl+fRUJ5PY4X2Z2d7i/akX0BAoB+VlZVkZWUxadKkYXsaeXl5YpaQyoxlr5/l6C4e3NzcaK6r6HcuKh8/n/zDvzP+qocQISJl6+e01JXT1lBBe0MFrfWVdLQ1IVf8dV40d85s3n77bZpKUlEGBDB93hSUyrt1gpm9vf2wz5E0Gg1lZWVkZmaSmZlJRkYGGRmZ3H33Xdx00026cS0tLRw4cKCPaFZWqs2wMzGzwtY9EMfg+QRdFoideyAWti6IRCJStn7BkV/fpre3F339C+Oy9+WXX+bFF1/Ex9mbmqZaPv3002EJaSUlJZiamp6xuDQa1NTU9Pndm5ub88ILLxAbGwuIaG2o7DdHKlNQnnOI3NxcoqKizn3QAgInuDA+UQQEzoDIyEjUqh6aa0qwHsD09FwhkWk7dx4/fhwvL69Ri0NgYExMTHjxxRe57rrruH3Z7Tzx9Urmhk3j5mmLMDP+y+i8oLKQ3w5v5cWXXkQul49ixAICo0N4eDjGxiaU5yYMKaT5Rl2Bs99EKvOPUJmXSEV+Ehq1mrbGKo4f2UrQjFsGnKfRaGiqLqKrrYmOjnaOHj3K+PHDL70+iYuLC+YWljSU5+ERMv205rY311FflktDeR715Xk0VuTTUJ5HR2sTAEZGxvj4+jJjYhBK5SKUSiUzZoyeF6eAwPlAT08P2dnZpKSkkJKSwtHkZJKTU6mrrQZg+fLlfPzxx8NaKyQkhHfffZeezjYMjM3OZtgXDa6urvT2dBG/9jk6muvoaKyktb6c1sYaNBoNOz//DwBW1hJcXV0JcnfHbXIAbm5uuLq6Eh0drVvrzTff5LHHHsPBweG0BLPS0lIyMjJ0wklaejqZmVm0tjQDYGBohFQmp7O9lVdefa2PkLbw2uvY8sdmjM0ssXULwE45B/9LAtnz1VP09HThqAjHJWAy9h5BfTKm7TwC6ezsICsri6CgoJF4K4dFd3c3f/zxB9HR0adtA+Lg4ICBvgEv3PAYa3Z8z5Y/twx6M16j0fDxxx/zwP0PYGlpSUpqCk5OTiNxGGeFd999l59+Xk9mZiZ1tTUAiPX0sZQ60VhTwtVXX01oaChiPX0aKgr6zZfIFKhVvaSnpwtCmsCoIghpAmOWvzp35o6qkCZ11pYhbN26lTvvvHPU4hAYnODgYPYf2M+HH37I448/TkJBMstm3sRE33DUGjWr/liNUqnkoYceGu1QBQRGBUNDQyZMnEhxXiLMGdrnxlziiDzyEuSRlwDQ3dlK9bEUnHwidWPUahX1pTlU5CVSkZtAVUESrY01iEQiIiLH4+/vf0axikQiAgL8qS/PP+WYztYG6svzqC87IZidEM3amusBMDA0RKHwYUp4EMpbrtBlW3h5eaGnN3pZzhczJxs77N27lwkTJhAcPLigK3B2qKur0wlmKSkpHDmaTHZ2Fj3d2tI+azsXJM5+eEQtIsLFj/yE39l/YPjG4CEhIWg0GurKcnH0Djtbh3FRMWHCBPz8A+goPoS7uzvuQaG4uV2Gq6urTixzdXXF3Nx8yLVEIhGOjoNnFZeVlfHdd9+dEMwyyMrKou1EKZ2+oRE2MgVWTnKUs2KQyuRIZHIsbF0Qi/VI3f4lCb+8hkql0n3Wqnp7cAmIZsEDq3W+l6rebrZ/8gAACRvfJWHju0y5ZWUfH09btwBEIhFJSUnnTEjbvn07K+6+k9y8AqbExrBr957TqmKYOXMmPb093PfpU1Q11mCgr09FRQUymWzA8U89+RQvvfwSs0KncORYKjfecCPbtm87L7+nent7efjhh5G4+OMZfT3hJ373VvbuqHt7+OyeUN58802+++47jI0NqS/L7TO/o6UOiZPWa/Xw4cPcfvvto3EYAgKAIKQJjGEcHR0xMjahoTwPxs0etTgkTtrspb179wpC2nmOnp4e99xzD5dffjkrVqzglfXvMcFnHM42ThyrLOLghoNnVGYmIDBWmBIbw+tvvTcsE/9/YmhsjpMigurjaVTkJVKZn0BVwVE625oxMDQkIjyChXcuJTY2lujoaKysrP5VrEGBgWzacYiu9mbqy/K0GWZluTRU5NNYkU9ro/ZOuL6+Pt5yBdEhQQTeMF8nmMnl8gumFGisU1xczMwZMzleeByNWoNKrcLY0Ii2jnbEp/l3KDB8VCoVeXl5OsEsOTmFo8kpVFZofagMjIyxcfZB4uzHhGsu13qZufhhZGrRZ532phoO/Pgi3d3dwyrvDAgIQE9Pj7qSLEFIGyFkMhlZmefOy/Ghhx7m5/XrsXP1w9JRTtCcKdrmLBoNf7x/JxJnXzzDZuESMKlfd2Sps4Luri6OHTuGQqG9GR0ZEcGhxM/7fO/o6Ruy4MEv2PTWbQAYW0h1N69Pom9ogrW9G4mJiSxZsuTsHvQJ7r5rOb2d1bz0sD9PvBHPTz/9xMKFC4c939/fHwtzc5rbW5noE87B3CT27NnD9ddfP+D4yspKTIxMmBM6lZTjGezavYu8vDz8/PxG6pBGDH19fSwsrXCShxN52b19XtPTN8TM2oGkpCMAyJycKC3LpTTrAPvWrUQx4VIOrX8dZ7+JAKSmpf3reNra2jhy5AgxMTFDDxYQ+AfCGaLAmEUkEuHq6kp9+Sh37jS1wNTKjvT09FGNQ2D4uLq6snHjRtavX889K+7hUO4R7r333jMqMRMQ+Dd0dHSQnZ1NeXk58+bNG3XRIDY2lueff56GioJ+FywD0dPZRmXBUa1wlpdA9fEUerq7MDU1Iyo6imUL/0NMTAzjx4/HxMRkRGMNDAzk888/54v7wgGtUO7p5c34oCACF87SCWYKhWLY3k0Co8PLL79MXn4e3o7uKGTe6Iv12JS4jYSEhIvOhPts0dTURGpqap8ss8yMDDo7OwCwlDoicfbFKexSgi71w8bVDysHj2F50Nq4+tF7ovRzOFmExsbGKHx8qSsVGjWdT7S1tZGXl0dubq7uZ2VVFe+8/XY/0cbGRorUyYsrn1rfZ3vhieYG2ft+Jnvfz5hZO3DL63v7jDl5AzozM1MnpEVERNDS8BJtjVWYWTvoxpZk7APAziOI2Xe9h6qrg5wDG6gpSqeuKJ3a4ky6uzrO6XfnVVcv4p2332BWtB1fbyzlv888dVpCmkgkYvqMGeQfzWbx9GvJKc9n27ZtpxTS3njzDf7c8iePffUCKrUasVhMWlraeSmkAViYm/fLNDuJjYsfZccSAfDx8SE/fzOH1r9BfVkOh9ZrPw9s3ZXUl+dz/NjxfxWHWq1GoVBQUVGBXC4nOTkZMzOhlFxg+AhCmsCYJihQya4D/65F8khg4+JHUbEgpF1IiEQirr76ambOnMmePXuYOXPmaIckMIbp6uoiNzeX9PR0MjIySE/PIC09ncLjx1Cr1QC88cYbo15aPHHiRPT19anISxhQSOtoqacyP4mK3ESqChKpLsxArVYhkdoQGxND7PJriY2NJTQ09Kxne91xxx2IRCLs7e1RKpX4+vpiZGR0VvcpMHIkJiby888/c/z4cY4ePQrAbTNvQOnmR0NrI5sSt7FhwwZBSPsX/PTTT3z99TccTU6mpLgIAH19Q6TOciTOvoy7bDY2rn7YuvphYGyGnv6ZCc42Lr4ApKSkDLscd1xYKLsTss9ofwJnTnd3N8eOHdMJZbm5uWTn5JKbm6fLRARtV0wrB0/qSnP45ptv+N///tdnHaVSySeffoaqtwc9/b8y+T1CpmPvFUL1Me25uZWjZ7+mYKZWdpiYWZGZmcnll18OaIU0gJrCdMxCHdCo1TRWFyJxliPznQDqXn56dj5dHW0AeHp5Mzkygsjl1xEeHn5OvbSefvppvv5qDTE37qOjU8Vjj9112mvMmjWLjRs3ctdHj6Knp0dY2KkzMyUSCSJEmBmb8X+LHuKDzV9w/XXXEx4efl56M/v6+rD34JEBX5M4yynN2kdraythYWFs3ryZiEvvIX7tc7TUlgLgM/Fyaoszqcw9jPqEcHgmPP3001RUVODl6E5+fj5btmzhqquuOuPjErj4EIQ0gTFNdHQ0Gzb+2u+L/FwjdfahPOeQ0LnzAsTKyorLLrtstMMQGCP09PSQl5d3ojOZVixLS0vnWEG+rvOjhdQBayc5Es/JxEy6Famzgv3rXtCJCaOJmZkZoaFhVOQlopx6A631FZTnJVCRm0B1fhK1ZdoMYJmzC9OnTiH20buJiYnBz8/vnGfTmZiYcP/995/TfQqMHLExsXR1dWFpao61mRUiRByvKkHp5ofE3BpzYzP27t079EICp+Tuu++hR2yCe+hMfGb5Y+Pqh7WjV7/zJVVvD7+8fC09nW24BU3BPWgKTooI9AyGJ0wbmlhgbe9KSkoKN99887DmhISE8PMvG8+ojFzg9GloaGDqtOmkp6Xqbt4YGplgYeeKha0bzuGXE2DvgZWDB9YOHhibSwD47Y2bycntn10UEBCAqreH5ppinacVgKqnSyeiTbjqYULn3o5a1Ut9aQ71Ffk0lGsfPT2dFBcX6+ZZW1tjaWXNkc0fkbL1c+pKMnWimbuHJ5GREUQuW0R4eDjjxo1DIpGctfdqKMzNzfnyq29Y/cXn3P/Ag2dUzXDbbbehVqvx9PQkNjYWS0vLQccbGRtRVVVFYXUJLSfeF41Gc0bxn23Gjx/Ptm3b6GpvoaOlDgupE2U5hziWtAV7jyDUql4SExNPNLgQ0dpQwczb32TDK9djLnXCxsUPqUxBRW4C5eXluLi4nFEcJSUliEQijlUWYaBvIJzrC5w2gpAmMKYJDw9Ho1bRXF2k9WYYJaQyOarebvLy8vDx8Rl6goCAwJjg119/5fjx42RkZJCalk5eXi69PT0AmFvZYi1TIHEZz6QJNyKVKZDKFBiZ9fcGk7r4k5p2fmS1Tp06hXff/5C1T0ylqUaboaDw8eWKeVOIjf0/YmNjcXd3H+UoBS501Go1l0+Yy+Lp1wJw7WvLKaz+68Lay9Gd7GwhY+nfoPCRU91tTfTCxwcdJxKLMTQ2p6YwjdaGSlK3rUbfyBQX/2h8Jl6Gd8S8IfclcfbjaHLysGMLCQmhq6ON5toSrOyFz5MzQaVS6b5/MjMzT9y8ySAiPILPP/+0z9iCggJSU5IJm3cHrsoYrB09MLG04+PlvtSV5tLb3YGZxBFrB0+Mza118yztPcjOzuy374CAAAAayvN1QlpPVweNlccInXM7rQ0VVB1L5sdn5tJQXYzmhHjn4CgjMDCAy+66kwceeADQCkLRUdG0NDfT2pqORq3CxdmZz7/4hfDwcKRS6YDH396u9VA0NjYe8PXBKC4uZt++feTn52Nubs599913Wub9M2fO/FeVDCYmJtx7771DDzzB/v37CfAP4IPNXyAWifn4k4/x9vYeeuIoMHPmTFauXElZ9gG2rFrR5zVXpdarbPv27Sxfvhyxnh4N5fkop1zPZY98hYm5FJFIhFSmQK1SkZ6efsZCWk9PDxqNBqm5hG5Nt+CJKnDaCH8xAmOawMBAAOrL80ZVSJPItCVQf/75pyCkCQhcRNx8882YWkiQyORYy0KJClt4okOZAhML7cm/qqeL3u7OAQW0k0hkcpIO/9qni9losWzZMjIyM/H18SEmJobJkydjb28/qjEJjD2MjY1paG3UPTc1NCa/QuuJ09XTjY2FhKzS0fVAvdAJCw3l+w1bhhwnFusx8/Y3+OG5y5DKFEQteoycfetJ2/EVdSVZwxLSpC6+pBz4ftiZ+SEhIQDUlWSjp29IXWm24Jk2CPn5+RQVFZGZmUlmZiapaenk5ubQ3dUFgLGZJVKZHESGfPHFZ7z99ptYWPzVFOKkF5mNiy/Ofn+VSwdMuZ7MPd9RmrmP0sx9dDTXEnHpXwKPtYMnRw//2u/3am9vj0RqQ8r2NeTs/5mmygIaq0t0WVJOMmeCApUop11JQEAASqUSf39/rK2t+x2bSCSirq6WWSGx3DrjOn5N2MqWtN3MmjULgM7OTlJTUwkP1/ph3rpkCbt27qK0vAxlgJK09LTTrgaZPm06BccKMDMxo62jjZiYGF156fmIo6MjuXm53HHHHVxyySUsXTp0Z+3RQluOL6KjpR4DYzN6OrUZdNaOXrgGTgbg0KFDPP/886DR6PzUZD5/ZfZpr+k07N+/n7lz5w64n97eXjIzMyktLWXu3Ln9suKVSiUA9a0NWFoMnvEnIDAQgpAmMKaxs7PDxNSM+vJ8RvO+jPSEiLd//37uu+++UYxEQEDgXHHpw18hlSkwsbQ55Ul8Y+Vxtn3yAB0tdVz3wp8YGpsPOE4qU9DV1dmni9lo4evry+bffx/VGM4nSktLaWhowNbWFhsbG6FxwQhhaWVJXUuj7rnUQsLxqmLu+vA/VDbWoNFo8PLwFCwT/gUhISGs+vBDers7+3VO/CemVnb4Rl1BVvyPdDTXk3/4d0yt7Jh26yvD2petix9JtTVUVlbi5OQ05HgHBwdsbO3Y9skDqFW9AFgIF7sDIxLpRCRjU0skMjkSmT+RV16KxEmO1FmBqZU9IpGImsJ0fvrflWRlZfUpObSyssLWzoHGqsI+S3uNm03mnu+0Yxw8cQ+e3ud1KwcPOjraKS8vx9nZ+W8hiVi2bBlr167Fz9sO5cyFfQSzoUoV/4lC4UNBfiHf7vmZjJIc6hvqycvLY8WKFeyNj6ejs5O7776bJ598kq+/+YYo3whCIwLYlLht2H9z/2R+xCyuj72Sm968m/z8/PNaSAOwtbXl559/Hu0wBqS3t5eWlhYkEgkmJiaYmZvTVHmcRc9u4sfnLqW7oxXPcbMxNDbH1Mqe7OwcxGIxUqmU+rL+N0xONqQ4ckTrtaZSqcjKyiIxMZHExEQOJySSmpJCV1cnAB9++CF33nlnnzU2bNiAWKQV16Q2Un7++Wf27dvHjTfeqPv/JCAwGIKQJjDmcXNzpWGUO3caGJthJnEUjK4FBC4iXPwHNzfO2f8Lcd8+S29XO1b27hgYnbpb1Elj/4yMjFEX0gT+oqGhAaVSSXNzs26bs0zGgYMHcXV1HcXILnxsbGyoKqrQPfeReVFYXcL0S2bpGlYEBQUNKqK1t7djbGw86t1uz1dCQkLQqNXUl+dh7xE05Piy7IOo1b38/s5SXPyimLHsDUytbIe1LxtXbQfBlJSUYYkaIpGIn378gfj4eEJCQggJCcHa2nrAjKWLnUnXPoXUWYGlnRvfPj6Nivwj2LgGIJUpcPIZ38fzztrJG5FIRGZmZj/vLh+FnJqqvp0Qq45pvTmVU28gfMEK2huryTv0G03VhTRWFtJQrs0SrKiowNnZmebmZo4fP45SqeTVV17m1VdeHpFjvGbhNTz6n0fRszBEGRnMI/MeY9euXdpulrFXUVxTyg/f/8CCBQswNTXFxcYJJ6kjADk5OactpPn4+lCdX4kIEabGpuTn54/IcVwMVFdXk5qaSmpqKmlpaRxNTiE7KwtEsGf3biZMmICTowN1ZTlY2row5ZaVpGz7AkXkfECbFVmRexCNRoOnpwcJCQl0tTX1ydo3NrfG2EJKTo42W+1///sfzz77rLbs08kLqauS8Csews49kP3rnufQoUP9hDRfX1/SU9MxMzKhuKiYhdcsRF9fn4MHDrD/wIFz94YJXLAIQprAmCc4KIgtew6PdhjYuvpRXV0z2mEICAiMMt2drcR/8yy5BzfiE3Ulhcnb8QidOaggYGplj4m5FRkZGVxxxRXnLliBQamoqKC5uZm75i3BxkJCUXUpX+/+kYKCAkFI+5dER0fzcerH3P/Z0zS2NdHc3oKjgwPffPPNKeeUlJTw6aefsn37drIzM2hubsHJyYGSsopTzrmYCQwMRCwWU1eSPaSQ1t5UQ01ROiKRmMjL7mPc/Lv6dFr8O73dndSX5VJbkk1daTYNpdnUlWkFl46OjmHHN3XqVKZOnap7/nfBWuAvgmcuBkCjVmNoYk53Ryvpu78lfdc3zFj6Oj5Rl+vGGhiZYG3nQmZmf18zPz9f8nccor48j6aqQhorj9NaX4mDZxDFKdvI2L1WN9bG1h5fHwWTZ0YTEXEfBgYGODo4UlVdBcDy5cv5+OOPKS0tpbGxkba2NkJDQ8/4hvJ9993HPffc00cUz8rKAkBqbkVDayO1dbVccskleLh78OP+3wAwNTHtkyk3XMzMzNhakMotb69ApVaTlyeUkZ+K3t5ennnmGQ4dPkxqShq1tdUAGBgZI5UpkDj7En7FPBI2vMWPP/7IhAkT8Pf3Z8cerVglj7wEeeQluvWkMgVl2QcoLS0lKCiIhIQE6svzcZSPo62hkvryfBrK8xCJxFRUaj/bm5ubMbOy5bqV2/pl9tt5hHDocEK/uB9++GF++P4HOnu6sLWUUttcj8TcmszMrH/VDVTg4kEQ0gTGPNHR0fz083pUvd1n3Lp9JJDIFKSl/zlq+xcQEBh91GoVPz1/BU3VRUy5+QU8QmeQf/g3NGrVoPNEIhESmYKMjIxzFKnAcKitrQUg0M0PZxsnLEy0J/A2NjajGdaY4Mknn6SpqQkzMzNkMhlOTk7Exsb2G9fe3k5iYiLh4eFEjgujqrYOKxN9wj2sOW5gQllVFWVlZWd0MT3WMTU1xctbTl1J1pBjjc0lTLz6P9h7BuPsNxHQmsC3NVZRd0IwqyvJpuH/27vv8Ciq9YHj391NdtN7770XpBpQivSiYuFaUES4WC42bMC14v3ZsWLBQhHFhg1UlF5EWgiE9EpCeiOVtE2y8/sjZiWmh/Scz/PkkZ05M+edMZmZfeeU7ASK89IaZ9uUyfD08mb8FSMYcdd8RowYwezZHY+nJnSPTC5n4p3/Y+/HK0CSMLF20bYEvJSpvRcxMS3vJYGBgWzcuJFvnm1MahgaGePt7c01Vwbh7X0DPj4++Pj44O3t3aJl4Keffkp+QT4Pzl3KiaTTnDh+gt9//73Z/+8lS5awYcOGbh/fPxMbfn5+ODo48v7OTciQEeIWQFJOKnctvovQ0FB8fX3x9PTsVvLO0tISPaWK+ePmsDfqkBiIvh379+/n5ZdfxilgAq5h/2Kkky+Wjr6Y2Lg0S7annPiF77//nrVr1xIWFsbPP/+MuroCpb5xs/2ZO3ihaagnPDyccePGsXHjRvZ+soKai6XUqxsT8TKZHGQyrr66cUy1UaNGUfnmm2jq61rEZ+0WzB9HtlFVVYWBgYF2+ahRo1AqlVTXVFOtrkGpq2TKjGtYfPdikUQTOkVcFYQhr2nmztL8dCwd+36g/+qKYkpykqkqKyI7K5Py8vIujw0hCMLQIJcrUBmZQcF5jnz1P9LP7sPC0ZeovZtxCZ6Ec+BVbW5rZu81YGbuFBpduHABQJtAK6+6CDSOVSNcHkdHxzZbnx08eJDPPvuM3bt2k1+QT0NDAzNnzKSqthZdhYwgRxMqaupJK6pCRy7jyy+/5IknnujjIxgcrrhiBEfPdjz7qaRpwDnwKooyE/jzm5cozkqgOCuBqooSAIyMTQgJCWbmDbO0XTGDgoKafXEVel9q+E4AfMLmc9Vtz1BbWUp65D5KclMozkmlPC+ZoqxkbPVHttj2oYcewtnZGVtbW3x8fLC1te30+INeXn9N6CWTIZfJST2XzLlz55DJZLyy6Gm+ObKdlOSe7R4pk8k48ucRbrzhRs5EniE+K5m6+jqysrJYtWpVt2brbOLq6opcLme832giUs9SkJ/fg5EPLS4uLgCEzliKS9DVbZazcg3gXHhjS8GpU6cCUJKbiq3HiGblmiaHO3DgAI8//jgOjk7U1tTgE+RPQEAAY8eOZcKECfj4+GBk1HjvbRq/rvB8bIvnKGvXIDQaDZGRkYwfP77Zuh0/72Djxo0sXLiQGTNmoKuriyB0lkikCUNe06wsJdnJvZpIq60sozgnufEnO5nSnGRKc1O4WNbYYkFHV5cZM2eJh0pBGOZu+u93lOanc/7sftIj92lbgxz99mX+9fwvbX5xsXDw5sSxH6ivrxdvxweIphZpUelxmBqYkJZ/HhAt0npLXl4eL730EuvWrcNIz5BgN3/mBl9DbkkB+w4fws3dncSEeP5MKcbFQh8DpQJ1vYbff/9dJNLaMCI0lJ9//b3ZpA1V5Re4kBnfrJXZhZxU7aD/rm7ujB4xghG3zNQmzdzc3MSkD/1Eo2mgojCL4pxkrFwDqauppCw3mc+fmEDdX4OtGxoZExAQwFVTxhIQsJjrr7++xX50dHRYsGBBt2Lw9/fH0MCAdb98ikKh4Lprr8PAwABJkkjNTaei+iK1F9pved0dbm5u/L7rd5YtW8aOHTsA2LBhA+7u7jz11FPd3q+npycVVRdZ/tEqACbNuaaDLYYvLy8vZHI5F7IS2k2kWTr6Enf4GyorKwkKCgKgJCelRSLN2LKx9XD4qVO4urqSnZXZqRiMjE0oPB/TIpFm4eiNjo6SiIiIFom0adOmMW3atM4cpiC0IJ7EhSHPwsICI2MTintowgF1dQXFOSkUZydTkpNMSW5j0qyipHFMAIVCgYenF2ODggi+ZSaBgYEEBgbi7e0t3nQIggCAma0bZjOWEDpjCTUXSzgffQhDs5Zv/+tqqynNO0dxdhJ5KaepU6s5d+4cPj5937pWaCkkJAQ9lR5rf/pAu8zF2VnM3NlLnnjiCb7+6msAlk5fyJTgCQAk55zjl/DdTJ48WTtu0rxQOz48mIavrRHRUWf7LebekJSUxNatW0lIiOeRR1YQFtb+xCbtCQ0NpaaynCNfvkB54XlKshOoKGkcz1Vf34Cg4GAmz55EaOhDhIaGEhwcLFrVDzCf3B+MpqGxS5uxiSmBAQEETQgjIGApgYGBBAQE4Ojo2KuJTltbW+ITEqitrcXV1RWAe++9F7lMxse7P0cuk6Nfotcrs+za2Njg5OSEi40jry16jqe+fIns7OzL2ueCBQuwtLREV1cXW1tbPDw8eijaoUdHRwdDQyOKs5PaLWfh5AuSxK5du7jxxhsxMjElM+5PkMkpyW0c96w4J4WKC43/7zIyMjodg1wuZ+TIkWSdj0GSpMbJCQxMOPL1/+ESdDVWLn6cOnXqso5TEP5JJNKEYWHMmNGkd3CB/6e6mkrtgJZN/y3LS6GsKAdovGi7uXswMiiIoPmTCQoKIjAwEB8fHzE7pyAInaZnZI7n6NmU5qaSdHz739eb3BRKCzKQJAkARydnbl94h3igH0DGjRtHZVUlZWVlXLhwgaKiIu2XSKHn+fv7o6NQYGdmQ2xGgjaR5mHnioGePiYmJsiAKf5WfHCgcfbBib5WbPjj/JBqyfnQg8s58schlEoZFeXl7Pyt++Ovjhs3Dk8vb0qSD3HFiBGMmH+vtpWZp6enGCtoENA01PHTTz8xZswY7O3t+61l4KUTrGzZsoVNmzbhaGFHtboGHYUOGh16LTYbGxuKyov5bP83FJUVU1BQcFn7k8lk2u6HQsecHB3IP9/+GK5NvYL27dvHjTfeiLenJ2dO/kLKyV8wtnLC3N4T54Cr0FHqUZQRR25yODU1NZ3uojt2zGiiPvuK+D+2cWjLUxiY2lBVVkDM/s8JmHQbJ8NPkZaWxo4dOygoKODJJ5/E1NS04x0LQhuGxhOFMOjU1NSQmJhIXFwccXFxxMTEcsXIK3j2mWd6pb7goCDObvu51XWXtvgozknWJsxKC7K0ZVzd3AkJCiJo3iJtCzM/Pz/09fV7JV5BEIamhno1pXlp2mtNcXYy5XmpFOenI2k0ANjZOxIcFEDwNTdprzcBAQEYGxt3sHehP8jlcszNzTE3N/97nCChV9x+++089dRTWBibEZuRqF2ukCvwc/Tm9OnTGBroYW6gy3Uj7CisqKWsSk2DRmL37t3Mnj2bc+fOsWPHDg4cOEB0VDRTpkxm46ZN/XZM3WFn74iVhRIzIwUxMVHN1iUlJWFgYICTk1On9mVlZUVyUqLoljnITZkyZUC1FDQ3NwfgfwtX8VvEPraf/B0jY2NiY2O1Q670pLlz5/L5ls/JUhcwJmwsDz74YI/XIbQtKCiIpB9/oqG+DoVO671vVIamGJhac/r0aQD+978XmDdvHj5XXk9VaR5F6WfJiD4ENI65ePvCO7rUMGHUqFGsXbsWlUHjs1JVWWMydeTc+zGxcubwH9/wr5tvIjLyLPUaDZaWljz66KOXc9jCMCcSaUKvqqqq0ibMYmNjiY2NIyY2lvS0c2j++tJobGGLjp4R27f/xOOPPdYrY4gFBQVR8t57FKRHU5qfRkl2CsU5Sa22+AgJDiZo5m3aL7D+/v4YGhr2eEyCIAxtJTkp2jETi3OSKc9LoTgvXTvOkLWNHUFBgYRMvLZZwuyfs6EJgtDIzc2NqyZcRVbyeXJL8imuKEEukxOTkUBlTSXJx4/j6u7J9jNxSJJEgwQn00pxdXbCxsYGC3MLysrKkJBQ6uhiZmjKZ1u28PEnnwyq1mo33HAD3333LYYmjqx49D7UajXbt29n/QcfsP/gQfRUKrJzcrCwsOjU/kQSTegp5eXlZGVlaQegX/vTh+SVFlDXUE9FeQVvvfUWn376aY/XO3r0aFJSe3YyA6HzrrrqKr777jvK8tOxcPRus5yVsz+pqY2Tm4wfP57gkFCqL8Rz5YhQQhfOIzQ0lJCQEFxcXLp8XWqacEBXz4irb3+OP75cA4Bz4FUo9Y3RaDQUFl2g/q/vn99//71IpAmXZfA8NQgDWmVlJQkJCcTGxv6VNGtMmJ1PT9MmqUws7TGz88TM/SqunnA3Fg5emNt7ojI0Jf9cJD+8tICEhARGjmw5k9DlGjFiBJJGw/f/dyPQ+DY3JDiQINHiQxCEXvL1s7MBsLSyJjAwkJAbZmmvN4GBgZ3+kisIwt8W3bWIe++5F4AVG56lrKocAEsLS55++mkmTpzI8uXLCQoKYsGCBcyaNQsdHR2ee+45ysrLWDr9dvycvHG3dSEuM4lntr7C/v37mTFjRn8eVpdcf/31lJdfJCMjg08++QQXJ0fyC4sY62HJyzcF8Oz2BLZs2cIjjzzS36EKw8SxY8eYN3cexSXFADz22GPccccdhJ88SVl2OStvfJBDMUdJT0/v30CFXjFv3jweeeQRLmQntptIs3T2JzrxBNDYajHqbGSPxeDp6YmxiSmF6dGMnHs/hRlxlOan/TWZgQwdXRUurq6c/2vstYqykh6rWxieRCJN6JKKigri4+ObdcmMiY0lM+O8toyplSNm9l6YeU9h0qR/Y27vhbm9l7apbWvM7T0BiIuL65VE2pgxYzhy5AgymUy0+BCEYSw9PZ3//e9/7N+/n7y8PBwcHLjjjjt46qmnmg0QHxUVxfLlywkPD8fa2poHH3yQJ598ssv1paamijHNBKEH3XzzzTyw/AHUdWou1lxk3Lhx3H///dx+++3aCX0iIiJabDdu3DgkSWKUVyj25rYA+Dh4IJfJ+fnnnwdNIq2+vp6dO3fy+OOPkZycglJHwWhXUz66ZSyj3Bq70/2ZUsz6D97n4YcfFq3NhD5x/PhxSkpLWHH9feyNPMyp8FMcPHSQbdu28a9//YvDscdIyknFRmbX36EKvcDT0xMdXSUXshLxHjuvzXIWTj7U19WSlpaGu7t7j8Ygk8kYNWoU5zNikMlkTFn8UrP11i7+/NW2AzNDJXl5+b0y+YUwfIhEmtCqsrIybcLs0i6Zl05BbG7jjImdJ5b+0/Ga6oWFgzdm9h4o9Yy6XJ9S3xgTS3vi4uJ68jCamTBhQq/tWxCEwSEhIQGNRsNHH32El5cXMTExLFu2jMrKStauXQs0dk2ZMWMG06ZNY/369URHR7NkyRLMzMy45557ulSflZVVbxyGIAxb5ubm/L7rd9LT07nxxhs7PVh0WFgYMiA+M1mbSFPpqnCzcebon0d7MeKekZOTw6effsonH60nKydXu1xd38DR1GJisytQ10t8fjyLXbEFODrootFoUCgUl133H3/8wdGjR8nKyiI7K5uHHn6IyZMnX/Z+haHDzc0NSZKwM7PG1MBY2/Js4sSJTJo4CUmSmDfuOu64447+DVToNWZmplzISmh1nUbTgFyuwNLJD4BffvmlV8axGztmNGc2ftHqOkuXIPKyTrJs2TL09fW58cYbRRJNuCwikSY08/HHn/D8mjXk5jROPSyTyTCzccHUzhOboNn4zvTGwt4LM3sPdFXdG8usoV5NRVE2ZYUZlBdmUF6YSXlhBjVVF/l223e89NJLHe9EEAShG2bNmsWsWbO0nz08PEhMTOTDDz/UJtK2bt2KWq1m48aNKJVKAgMDiYyM5M033+xyIk0QhJ43ZcqUZp/VajVPPPEE5WVlbNi4sdWZJs3NzbG3dyA+K4lrQq7SLg908eVQ/LFej/lyzZh6DWlp55g/wpaF/7oSEz1drn7lDwAsDHX5+Egm5wsv4uvtxWuvv8GiRYt6JIlWUVHB9GnTUcjl2JpZU1xRSsXFCpFIE5rx8fFBJpOx8rP/ATBp4iQAbG1tOXjoYD9GJvQVTw8PohP+bhBx5vdPMLN1IyPmMHGHvub2l/ZiZuuOTK7gjz/+6JVE2ujRo3nttdeoKr+AgYlls3XWroEcPLiV0xGnxFA+Qo/ot0TawYMHWzwINTl58iRjxoxpdd3kyZM5dOhQs2X33nsv69ev7/EYh6Nvt31LVYMuU/+9FnMHL8ztPNFRdm7a4UupqysaE2UFjcmyssJM7b8vFuciSY0DPcoVuhhbOWFq7YKFow9paZGima0gCH2qrKys2Xhlx44dY+LEic26es6cOZNXX32VkpIS7Wxkl6qtraW2tlb7uby8vHeDFgQBjUbD2rVreWHNC1RWVQLg6ubG888/32r50WNGc/zQMf6MP0l8VjJxGYmkFWSgp9Kjvr6+Xycc+Pzzz9m4YSPzrp3HTTfdhJubW7P1dg4OGKgLeXVBIJIkEZ5Wql1XXqth+tw5bL7/fiZNmtSjz1CGhoY0aBpYPOUWZo+ayqd7tpKSmdFj+xeGhsDAQI4ePUp1dTWurq44Ozv3d0hCHxs5ciQnTpygtqqChrpajn/3WrP1Cl0VCh1dzGzdiY6O7pUYRo0aBUDh+Rhcgyc1W2ftFowkSURGRnL11Vf3Sv3C8NJvTwzjx48nNze32bJnnnmGffv2aWfdaMuyZct44YUXtJ97Y5bH4SooMJDI2HP4XHl9u+UkSaKqrLAxSVbQ1LIsQ5s8q7n49wCOSn0jTKxdMLF2wcttLiY2jf82tXHB0NwOubzxjWnamT38/v5/OH/+fIsHSEEQhN6QkpLCunXrtK3RAPLy8lqM3WFra6td11oi7eWXX2bNmjW9G6wwIEmSRHFxMWZmZj3SAkjonIqKCvz9/MnOyWa01wgWTVnAR79v4bVXX+O///1vs0R4k/nz57Njxw5e//F99FR62DvYc+211/Loo4/2axJNo9Gw8smVNFTXcfTPP3n88ceZN3ceO37eoU2K3Xf/f1iwYAFv7EphV1wR8dll2NvaMP/Gm3j++eexsbFptk9JkoiLiyMjI6PZj0wm4913321zrFiNRkNERAQxMTFER0cTHR2NibEJFyoan+ssjc0HRQs+oe9deeWV/R2C0I+mTp3Khx9+SHF2Ivbeoxl97YOc+nkdANauQRiZ2/3170CyYg/0Sgzu7u6YmplTmB6tTaTVq2soyoynIO0s0DiGpkikCT2h354alEoldnZ/DzhZV1fH9u3befDBBzt8k2ZgYNBsW6HnBAYGUpy/jvq6WmQyORUXshuTZAUZ2q6YZQUZVBRlUq+u0W5naGaLibUz5naeuIVM0SbOTKyd0TMy79TbURPrxqmyDx06JBJpgiB0yapVq3j11VfbLRMfH4+fn5/2c3Z2NrNmzWLBggUsW7bssupfvXp1s2nUy8vLxRv5Ieree+9l967d1NXXUVdXR1lZGbW1tY1DIZiaYWVlxQv/e4Fbb721v0Md0tLS0sjOyea2q2/glqvnA7B46q08sXkN//nPf/j0009bbHP33Xfj4+ODm5sbjo6OfRxx206cOEFuXi4v3rEaD1tX1v26gUOHD1FfX6+dQOH666/H2dGBd/elcd2183hrw3+YPn16q91YAT777DPuvvtuABRyOZamlpgbmpKYlcL8+fO58cYbW93uk08+4b777kMmk2FvYUtDg4aa2mqOJ0aQW5JPekEmFysvUl1djb6+fu+cEEEQBp2ZM2cCMi5kNSbSRl27nJzkcHISjuMSMllbzsLRh+STv/RKK+DGCQdGcjZiFxUXsrmQEcOFrGQ0mgZ0dHUZccVI5syZ06N1CsPXgBkjbceOHVy4cEF702/P1q1b+eKLL7Czs+Paa6/lmWeeabdVmuhy03kBAQFIEny5eipVZYX/6ILpiIm1C46+4zC5agEmNi6YWrtgbOWEruryH6ZMrBu/dB4/fpy77rrrsvcnCMLw8dhjj7F48eJ2y1w6e2ZOTg5Tpkxh/PjxfPzxx83K2dnZkZ+f32xZ0+e2XuKoVCpUKlU3IhcGk4qKCj7++GMsjM0xNzTFTM8YnXo5hQ0XWDb9DsqqKth95gDr168XibReFhISwrixY9kfdYSbxs9DR6GDt4MH4/3GsOWzLbz++uutth4diBMPff/995gbm+Ln5INCLienNJ9r583TJtEAdHV1iU9MoqysDAcHhw73WVRUhFJXyfv3voKFkRkKuQJJkrj1jXvJyGi7a2Z1dTUKhYLPH3kfA5U++84eZt2vG7Czs0dpa8jUMdOZOnWqSKIJgtCMkZERevoGXMhKBEAuVzDt32+QePSHZj2dLJ39kDQN/Pnnn0yaNKmt3XXbwttv5+jyB3Cy0mf+zKsZPXoFo0ePJigoqNWWyoLQXQMmkbZhwwZmzpyJk5NTu+Vuv/12XF1dcXBwICoqipUrV5KYmMgPP/zQ5jaiy03n+fn5IZPJMLVxZfS1D2i7YRpZ2Gu7YPYWXZUBekbmxMbG9mo9giAMPdbW1lhbW3eqbHZ2NlOmTGHUqFFs2rSpRYuOsLAwnnrqKerq6rRfZPfs2YOvr2+rX8yF4aMpWXrn5JuZEtw4YP3PJ3fz2f6vmXHFZAD+jD9BQ0NDP0U4vHz8ySeEhoayO/IQc0ZNBeCm8fM4mhDO0qVL2302HEhOhYfT0KDh68M/4GnvTnpeBm/c/FaLcoaGhhgaGnZqny4uLqjr1Ogr9VD89fwmk8mwNrUkMzOzze2Cg4NpaGig5GIpBip9XG0aX3J+9fVXjB07thtHJwjCcGFna82FzHjtZ0MzG0bOua9ZGQtHHwB+//33XkmkLVmyhMWLF7fZWlcQekqPJ9K6070mKyuLXbt28e2333a4/0tnTAsODsbe3p6pU6eSmpqKp6dnq9uILjedZ2lpiY6OLmZ27gRM6vu36aY2rqSlpfd5vYIgDA/Z2dlMnjwZV1dX1q5dS2FhoXZdU2uz22+/nTVr1rB06VJWrlxJTEwM77zzDm+91fKLrTD4aTQasrKySE5OJikpiaSkJBITk0hITGT+/Pm8+cbf4+fp6uoiQ0ZtnVq7TKWrpF7ToJ0op7yqQgw/0UdCQkK47trr+Grv91zlP45DMUf5+siPyGSyDl/MDiRvv/MO77z9Dj/++CNlR39GV1eX9PT0y+r61PScW1R+ASO9v5NvlkbmJCQktDlxSnBwMAA/HN+JXCYjvaAx6Zadnd2tOARBGD78/PzYe+BQuxPHGZrZotQ34uTJk70Wh0iiCX2hxxNpXe1eA7Bp0yYsLS257rrrulzfuHHjgMYBo9tKpIkuN11jZmpCWcH5fqnb1NaNzMjkfqlbEIShb8+ePaSkpJCSktLii7YkSQCYmpqye/duli9fzqhRo7CysuLZZ59t9iJHGFwkSaKoqEibKEtOTiYxKYmEhERSU1KorW0c81Ou0MHcxgVjG1dqFOZ8/PHHvLH2de0XAplMhlwhb5FIA1DXq1HpqrhYUzmgxt8a6ta9tw4PDw+WvbeC2no1VlZWbNqymQULFvR3aJ2yf/9+3nrrLaLORlFWXgZAQ30Djz32GN7e3lx77bXd2q+LS+O4s+cLs6muraX4YgllVRWUXiwj5vff8ffzJys7q0WizsbGhnFjx/HHmeP4+fkx7prx3HfF8m49owuCMLyEhYXx+++/c7E4B2PL1u+DMpkMSyc/EpOSei0OSZK4cOFCq/f87OxsNm3cwPz58wGoqalBV1dXTBYkdFmPJ9K60r0GGn/RN23axKJFi5qNBdFZkZGRANjb23d5W6F1zs5OJPZTqzATaxdq1Y1j2onkpyAIPW3x4sUdvuyBxpYuf/zxR+8HJPSoioqKZi3LkpKSSEhsfIguLyvVljO1csTExhUTmxBGBV2Hma07prZuGFs6otBpfBY5d3o3uz5YTkFBgXbWVgCFXEFidgp7Ig9SW6cmOTcN4K/kmgx1fR2urq59edjDmouLC2vXruXDDz/kvvvu45FHHunUBEcDxQtrXiD69FkmBoZxw4iZuNo442Rpz5J1jxAdHd3tRFpGRga6Orq8s+NjJEmDTCZHI2kwVBkw0iOE8JRITp48yfjx41tse+TPIwD9OpOpIAiDz+zZs3nuuee4kJnQZiINwMrZn6SjvTeUz3+WP8D6Dz/QfjaxtMfExg0TmyA0eSX88MMP1NbW8s7bbxMRcZqrr76avfv29lo8wtDU73fI/fv3k5aWxr///e8W67Kzs5k6dSpbtmxh7NixpKam8uWXXzJnzhwsLS2JiopixYoVTJw4kZCQkH6Ifmjy8/Pj9JkzNNSrUej07aCMptYuaBrqOXPmjJhGWxAEYZCTJInq6mrKysooKyvDxMSkUwOlt6e2tpbU1NRmCbOExMbumIUFedpyhqaWmNq4YWLjhv/UiZjaumFm646JtUunJsgxs3UHICkpqVkizcTYmKMJ4RxNCG9soSaToSNXcN+HT2i70P2z5b3Qux555BEeeeSR/g6jW3z9fMlMTmfx1ObDabhYOxEdHd2tfcbHxzNhwgQMVPoEeYTgZuPMN0d+4tar5nPrxBto0Gi4650H2LNnT6uJNJFAEwShO0aNGoVcocOF7CTcRkxts5yFkx+1NdUUFxdjYWHR43EkJCRg6zGCSXf+DxMb12b3/P0bnyQ+IZHwk+GUF5Yy3nc0+/bvo6ioCCsrqx6PRRi6+v1OuWHDBsaPH99szLQmdXV1JCYmUlVVBYBSqWTv3r28/fbbVFZW4uzszE033cTTTz/d12EPaWPGjOHLL7+k4kIOZrZufVq3iU1jV4TDhw+LRJogCEI/q6mp0SbBmn5KS0vbXFZS2nx9RXkZ9fX12v0Zm5iSkpyEjY1Nl2MpKipi0uQpJMTHodE0ziit0jfEzNYdYxtXXMbdTIitG6Y2bpjZuqEyNL2sYzexcUEmk5GUlMTVV1+tXf7Hn0f4+eef0dfXp6amhvDwcORyOYWFhRQVFTHSfjSzZs26rLqF4SM4OJiNGzZS31CPjuLvx3InC3vORp7t1j5NTU2RJIl7Zy5iYmAYkiTx2+l9nEw+w4Krric1Nw0zQ1P27N7Dc88911OHIgjCMCeXyzE2Nqb4r5k722Lp1DjhwM6dO7njjjt6PA4/Xx/iUg9i6dwyv2Bq607igYNcd+08ju//k9sm3sjBmKMcPXpUdGEXuqTfE2lffvllm+vc3Ny0Y9ZA48Cphw4d6ouwhrWmGVTKCzP6PpFm3ZhICw8P79N6BUEQBPj+++95cuUqysrKqagoQ11b22ZZlb4hKn1jlAbGKPWM0NEzRqlvjNLUFjM7Y2z0jRvX/1WmqrSAQ58/Q05OTrcSaYmJicTFxjBq3nKcAsZjZuuOvolVm934cpMjiNz1KZ6jZuEaeg0qA+Mu1aejq8LU2pGkf4zj4ufn1+rLP0HojpCQEOob6tlxchflVRWcL8oioyibC2XF+Pr4dmlfkiTxxRdfsH//fvT19DibHsfEwDDySwuxM7PhXP557nrnAS5WV2JoaMjyefN66agEQRiuXJydyMhov9umhYM3IOPAgQO9kkjz8fGhdNNnSBoNsn9MPGBm60ZZaQmhoaFs/WIrqz7/P6BxaAhB6Ip+T6QJA09ISEjjzGOFbU+P3lv0jS3QUeqTkJDQ53ULgiD0rMEzTlOTbdu2UVBSScDE2xsTYJf8qAwu/WyEXNG1R4iSvHMAlJWVdSs2H5/GN9iWzn44+IztsHxG9CHSI/eSHrkXuY4uzgFX4TFqFu4jpna6tZqJtRuJib03ILIwPOXm5qJWq3F1dSUkJAQrSyu2HPgWFydnQkaEMjvkWoKDg7UtISVJIi0tDWdn53bHEz5y5AiLFi3C1cYZXZkuR+NPEpeVSO6FfBRyBa6uroy4YgTV1dVEnonkpZdfYs7cOWJ4FEEQuqy+vp7c3FwyMzPx8/PTdtEMDQ0lZuuXNNTVotBtfbxrXT1DjCzstWOd9zRvb2/UtdVUluZjaG6HTCajoV5NbnIEpn81EgkMDGTJ0iVYW1szYcIEZs+e3SuxCEOXSKQJLSgUCvQNDCkvyOjzumUyGSbWzmRmZvV53YIgCD1JJpc3a1XdlzQaDXV1ddTW1qJWq7U/l35WKBSMGDGi2XampqYYmVoyat7yNvddlBnPgTdW4hN2A35X3YTKwKRTMan0G8t1N5FmZWWFiakZZfmdm1V61LXLSYvcS83FEkKn30161AEObF7FIbmC2/5vl7YFdHtMbNxITDrdrXgF4VJff/01GzZs4FT4KUrLSpHL5cTHx+Pj40NSchJyuRxT05YJXkmSePzxx3nzzTe5/vrr+eabb9qcjMnZ2RmA0Z4hJGanEpuRyOjxYzE3N+f8+fOcPHGCc2nncLdzZaxrKPui/mD79u0ikSYIQoeysrJ49NFHST+fQWZmFgX5udphFkaOGk3EqcbeRJMmTeKLL76gJDcVK5eANvdn7RpIWnrv3F+bXrwV56byxcrJ6OoboasyoLIkj6nL3gAgPz+fTz75pFfqF4YHkUgTWmVlaUlZYee+rPQ0U1s30nNTUKvVKJV9O9mBIAhCT5E0DSQmJjJ2bMetp/4pKiqK11577ZIEmJqa2lrUtbXUtpIUU6vrqKtTo66tpa5O3WxcsvZs27aNm2++WfvZ1NQUdfXFdrcpL8ykKDOeosx4Tm5/G9+w+QRdc8dfXTXapvyra2V3E2kymQwvLy/K8tM6VV5HV8VVtz3Nz2/chZm9J/7GFhRlxKFvbIFM3rlp7k1t3Qg/uo2GhgYUis5tIwxvdXV1HD16lC1btnDmzBneffdd9PT0uO222zDSM2SUZwheYzzYsHcrH3zwAW+//Tbm5uZt7u+ZZ57hzTffZMaIyfz2607mz5/PDz/8gL5+ywkzTE1NsTC34Ptjv6KQy9HR0eHnn39GT6nC38kHtbqOWVdM4b7ZiwHIKy1k967dPPPMM711OgRBGCI2b97M99//gPeV1+M4ahS+5vYYWtiTnXCM2ENbkSQJmUzG3LlzAbiQlahNpEkaDRUXsriQlaj9yU0+RX1tVa/E6u7ujkKhoCzvHJKkQV1VjrqqHADngKswsbRvMWyDIHSVSKQJrXJzcyUq6Vy/1G1q7YJMJic5OZnAwMB+iUEQBKEnHD16tFuJtJdffpkfd/yKjXsIcoUuch0lCh195ApTFPq6KIyV6OnoYqjQRa6ji0JH+dd/L/m3QtlinVzR+Fmho8uO1xeSnp7erF5TU1Nqq9sfJ8T9iuk4+F3JhcwEAifdRvyRbcQe/BIn//FMv/dt9IxaTwro6KpQ6CgpLS3t8vlo4u/ny4GTne/6X5zd+KAcvW8L2fFH8ZtwExNufQqlfufGSzOzc0ddW0tmZiZubm7dCVkYgvLy8vjiiy+YMmUKTk5OfPXVV/z444+cOH6Curo6NJIGhVyBXCZn2bJlHDhwAIA7Jt/MrJHXAPBrxB5+++033n777TbreeWVV3jxxRe565pbuOHKOUzwH8tL37/DDfNv4Pddv7co/9VXX1FcUgyADA0NDRKzrpjC0ukL0dXRZdHbDxCdEQ+Aul6NtYklv5/Yz8WLFzEyMurhsyQIwlDi4OCAJGmYtOh/KHT+buhQX1vF2V0bKC4uxtLSEnt7e3RVeiQe/ZHclAiKsxK5kJ2kTZop9Y0xMLNBV8+QmoslFBQUdGvc1Pbo6uri6uZOeVEWt6zZyXf/m48kSdj7jEHf2OKv1uYikSZcHpFIE1oVFBTEn0ePa98u9CUTGxc0DQ1ERUWJRJogCIOWTK6jnbylq3R1dbFy9mPeis3tlmuor0MuV7QYTLdTdSj1UKvVzZaZmZlRU9l+Ik0mkxE6bTG/vXcfjv5h2PuMYfeHD5IVf5Sq8qI2E2kAeobG3W6RBo3dNX76pWUCoS2pEbsAuJAZz8z738Nj1Mwu1dc04U5SUpJIpA0TkiQRGxvLli1bsLa25oknnmhRZunSpezcuRMAHYWC+oYGbEytMFQaoG+sx/2zFuPj6MGGPV9yKO4odnZ2mJmaEZkWw8wrppBZlI25oRmpaentPmdt+WwLNmbW2uSbh50rNqZWJCW1PiOet/ffrULrNSCXQU5JAbo6jeOq+Th4cCrlLM999RrxWcmo69T4+/q1O+6aIAgCNHYdlySJypJ8TKydtcuNLOwByMzMxNLSEgA3FxdSEk9QcSELXT0jTK1dqKsuo6I4H3V1BerqClzd3Fl4831YW1v3Srx+vr7EZqVh4ejNuJse5+g3L+Excgbw17ANie1PiCAIHRGJNKFV48aN44MPPqCqrBBDs559S9ARU2sXQOLYsWPcdtttfVq3IAhCT5E09Xh6enZrW6VSiaZe3W4ZSZLY89HDlOSmcsXse/Aed22zt8QdUejoUvuPWTlNTU1pqFdTX1eLThuDBAOkRe5DoaMkau9mzp/dj53nSCbc9nSH3TtV+pefSKssu0BtVXmHY7NJkoSZrRu6KgOmLH653XtZXW0VZQXnKctPpzQ/nbL8NMoL0inLTwegqqp3up8IA4ckSUyaNInwk+HU1NZol0+ePJkxY8ZoP9fW1rJ3z14WjL+WQFc/DkT/yaGYo9xy9XzySgr4LWIfgS6+yGQyglz82HXmAGfOnGHkqJEcPniIRW8tp6KmEgADfX00Gk2b3YY/+fQTZs2axZpv1vLYdffz2o/vUdVQw6FfdrVaPjg4uNlnjSQRl5nIhj1bicqI53x+Jvp6+th4OXDn8ruZPn06wcHBff7CVBCEwcfJyQmAiyW52kSaJEkYmtsBjYm0pnFXlyy5m9WrV1NemImJqRkhISGEhkwlJCSEkJAQAgMDMTbu2kzaXeXr68OJM9sBCJl6F95jr0XfpDHRZ2brxpmT29FoNMi78SJSEEAk0oQ2jBo1CoDywow+T6Q1DQB9ppdmchEEQRjoVCoVmvq6dsvIZDLkCl1K885xYNMqwre/Q+iMJfhf/S90VQYd1iHX0W3RIq1psHN1VTk6pq2/Ja65WELyiR001Kspyohj2rI38Ro7r1NfxpUGl59IAyjLT8fGvf0B0mUyGVMWv6xt8dNQX0dFURal+Wl/JczSKC84T0VBOmUXcrXbmVtY4uXlxbjxwfj43IS/vz/XX399t2MWBofIyEj++OMPrvQdzYwRk3CwsOO+D59g06ZNzRJpO3bsQF2nZmLQeJytHKitU3Mo5ig+9h5YGJmx7c8d/Baxj7zSAk6nRgHw22+/cf3113Po4EFtEk2lq6Squprjx48zYcKEVmOaMGEC+/btY8aMGdy//kkMDQ05eOhgm631bW1tcXa0IzM7D09PdwwMjMjKzOJMTizT58xgxowZTJs2DSsrqx4+e4IgDHVNk5mU5KYS/tM7BE9dxP5NKzEyt0cmV5CV9fdEcStXrmTChAm4u7vj6OjYL8l6b29vSgoyaKivQ6Gji4Hp39c9U1s3qquryMnJ0SYIBaGrRCJNaJWHhwcAZQUZ2HuP7tO6jSwaL8jJScl9Wq8gCMJAoVQqaWhoP5EGMHb+I5w7vQvf8TegaWjg6LevcOrn9wlb8CT+Vy1od1uFou1EWm11BQZtJNJkcgX23qOx8xzJiFn/7lTSromu3uUl0pq6rpXmp7WaSJM0Gi6W5FFWkE5pXmPCrCw/jYrC85QWZKLRNACgr2+Al7c344N98fa+Bh8fH3x8fPD29tZ2TRGGl9DQUHR1dLEzs2akZ+PvlpOlPfv27WtW7uOPP0alqyS3OA87M2v+iD2OXC7nle/fJbekAIVcwce7P0chl6P5a9ZetVqNgYEBDRoNi6feytUB45DL5Nz97sPs2LGjzUQawNixYzl48CBrX1/LQw8/1GKm3UvJZDJS0zKQJEk7WVNtbS1KpVK0OhME4bIYGRlhYmpGRtRBcpJOkpN0EoCS3BRMrRzJzMzUlpXJZFx99dX9FSrQ+OJN01BPxYVs7TANTcxs3YHGYRtEIk3oLpFIE1qlr6+PgaEx5YUZfV63XKGDsaUDBYXZ1NXVibE7BEEYdhq7dnacSDOzc8fc3ouKCzlMXfoa6upy0iP3kRX7Z4eJNLmustWunQDqqrbHSVMZmDD3kQ3UVV+kuqKYsvx01NUV1FZf1I59oq4qp7a6AnX1RdRVFdTVVFBXXcGFnBQuurU9hlpHjI2NsbaxozA9BmNLJ8ry0ygtOE9ZfhoXC9IpyU+nTt14TDo6Ori5exDs64vvjJu0iTIfHx8cHBxEYkFoRi6X4+joyJlz0VzhEUzM+Xgqa6soTC9uNo7Z6NGjObB/Py999w76Sj3U9XVoNBoKyopwt3UhNS8dXwcvFk+9BS97d5ase5jffvuNX375hWXLlmGg1MfS2AIAEwNjjh492mFsI0aM4IutX3TqOP75zKRStd1FWxAEoSscHR2Rm9th4x5CQVpji9urbn+Wc+G/NkukDQR/t2BPa5ZIkyQJHaUeMpmMpKQkrrnmmn6KUBjsRCJNaJOdnW2/JNIATG1cKS/MJCUlBX9//36JQRAEob+oVCoaOpFIy00+RXF2IjbKEL787zQUOirCFqwkaModHW7bWos0MzMzAKL2bEJpYPxXYuwi9TUV1FVfpPavBFltdWWb+9VVKjExNsXE1ARTU1PszM0xd7HG1NQLU9PJ/Pvf/+4wtvYEBwWyf+9movZuRiaT4ejkjI+PN37jpmpblvn4+ODq6oqOjnjMETpv4qSJbNmyhee+eg2FXE6DRgPA119/rR2z9f/+7/+YMmUKH374Ib/88gsaScO/JlzHggnXIZPJuG3tfWjQ4O/c+CUu2DWA6Ng4rK2t0dfTZ1/UH8RmJJCUc47yqooWyWxBEISBytXFhcS8AiYvfpnvXpgPSHiNmUtecgTnMwZWIs3R0RE9PX2ST/xCQVo0pflpXCw8T2l+OjV/vSxsenkoCN0hnjCFNnl7eRKRcK5f6jaxcUWecJy4uDiRSBMEYdhRKpU0dDDZAMCZ3z4GoDg7mZDpdzNi5r87HIS/iayVRJqdnR1Tp80gJSUOA1NTrMzMMLc3w8zMDVNT02Y/ZmZmrS7T09Pr+gF3wbZt33Lw4EF8fHzw9PREX1+/V+sTho9XX321cbZOU0umBE0g0MWP//v2zWaJNIVCwYwZjeON1dTUYGJsgrqhXjszpr+zN4lZKRxNCCfmfALxWclU19Sg0WiYNXsWO7bvoERdjk+gD3dddTcrVqzoz0MWBEHoNBcXZ84kHMXS0Yfp976NJGnQN7bA0MKOzKQD/R1eM3K5nGumTmXnrzuwsrbF19ebiVPG4OOzUPvCLSAgoL/DFAYxkUgT2hQaGsqBw8f6pW5TaxckSUNsbCw33XRTv8QgCILQXzqbSAuYdCvGlo6MmvefNsc0a0trkw2oVCr27ml9RsCBwsLCghtvvLG/wxCGIDs7OwwNDAl09uX2SY3PHn5O3pw4caLV8np6eji7OHM88RT19XWcTY8lsygHhVzBaz+8h5GhEQEBAfxn+X+Qy+X88MMPYpa4Yaa2tpZx48Zx9uxZzpw502yMu6ioKJYvX054eDjW1tY8+OCDPPnkk/0XrCB0wNnZmYsljZPzeIycoV1uZG5PfE52s27wA8H2n36ksrJStDwTeoVIpAltCg0NRV1djrq6AqV+705R/E8m1i5IGg2nT5/u03oFQRAGgsZEWsddO91Cr8EttPXxPSRJQtNQh6a+joZ6NQ31dWga6mj463O9ulZ0KxOEf/D18+VsUiyVNVXEZyVR31DPhaKiNr8g3n333Tz77LMciDuKv78/S5Yvw8fHh7CwMNzd3VuUF0m04eXJJ5/EwcGBs2fPNlteXl6unUV1/fr1REdHs2TJEszMzLjnnnv6KVpBaJ+TkxOVZReor6tFR/fv8ReNLOxR19ZSWFiIjY1NP0bYnI6OjkiiCb1GJNKENnl5eQFQVpiBtUvrU633FhMbFwDOno3q03oFQRAGApVKRV1tNfs2PHFJIkyN1FDfmBxrqENTr25c11BHQ50aTYP6ryRZHfV16k4l4hznXNUHRyMIg8fs2bN58fSL3PHmf5CQ0NfT56abb26zlcXTTz/NI488gpGRUR9HKgx0v/32G7t37+b777/nt99+a7Zu69atqNVqNm7ciFKpJDAwkMjISN58802RSBMGLGdnZwAqS/IwtXHVLjcytwMgMzNzQCXSekpxcTHm5uYDqrWd0P9EIk1ok6enJwDlBf2QSLNqvFBnZmZQX18vBowWBGFYmTNnDpMmT6FOXYTSWIVKqURPzxClUqn9UalUrf67o8+X/jsoKKi/D1UQ+oVGo2H37t1s3LiRyZMn85///AeAZ555hry8PKytrVm0aBF+fn4dfnkSSTThn/Lz81m2bBk//fQTBgYGLdYfO3aMiRMnolQqtctmzpzJq6++SklJCebmrc9uXFvbvCVxeXl5zwcvCG1oSqQlHv0BXZUBF4vzuFiSy8WixokGiouL+zO8HlVXV8e4seOIj4ujRl3LtfOuZcfPO/o7LGEAEdkJoU0WFhbo6Rv06sydbXWV0FXpY2BiRVV5EefOndNOYSwIgjAcuLq6cmD/vv4OQxCGnCeeeILt27dzPi0d9V+tNn/4/gcWL16MgYEBKpWKTz/9tJ+jFAYzSZJYvHgx9913H6NHjyY9Pb1Fmby8vBZdf21tbbXr2kqkvfzyy6xZs6bHYxaEznB2dsbJ2ZWIXz7A2MQUJycnfJ2dcRkzCT+/e5k6dWp/h9hjDh48SGRkJKHugejq6HDo4MH+DkkYYEQiTWiTTCbDwcGBssLemc44K/4Yh794ltHzHsAn7PoW601sXKkqLyI2NlYk0gRBEARBuCxbt25l7dq12JvbMvOKKYS6N7bI/L9v3+Szzz7j/vvv7+cIhYFs1apVvPrqq+2WiY+PZ/fu3VRUVLB69eoej2H16tU8+uij2s/l5eXaVkKC0Nv09fWJj4tBkiSMjft2/Oy+smfPHvbt20daWhoymQyZTIZSR0n5xQpKS0sxMzPr7xCFAUIk0oR2+fv5ciY5rUf3WVdbzYnvXyd6/+cAVJYVtFrO1MaFwvPRxMXFccMNN/RoDIIgCIIg9I9z586xb98+CgsLufPOO/ssEdA0Y+Idk29mgv9YACprqlDI5XzzzTcikSa067HHHmPx4sXtlvHw8GD//v0cO3YMlUrVbN3o0aNZuHAhn332GXZ2duTn5zdb3/TZzs6uzf2rVKoW+xWEvjSUu7K/8cYbPP744yh1dKlrqEehUHDmXDRymRxvTy9MTEz6O0RhABGJNKFdgYGBHPxzc6vr1NUVHPzsKex9xuA5ahYGptYd7i8v9Qz7N67kYnEODr7jyEk8gdeYOa2WNbF2AWTExcVdxhEIgiAIgtDXKisriYiI4ODBg5w+fZqkpCSyc3KprKy8ZCIMGV99/Q3RUWfb3VdPCQwMxEDfgEOxx8gvLeTMuWjiMpNo0GiG9JdDoWdYW1tjbd3xs+67777L//3f/2k/5+TkMHPmTL755hvGjRsHQFhYGE899RR1dXXo6uoCjS1hfH192+zWKQhC72rqhv3a4mdZ98sGDG1MeP7555k8ebL4uxRaEIk0oV1eXl5UlRXSUK9GoaNstq4oM57UU7+Reuo3/vzq/7D3HYvXmDl4jJyJvrFFs7IN9WpO7XiPM799hLVbELMf3MHx717D1mMExpaOrdZtYuNCQ10tkWLmTkEQhgBJksjOziYxMZHJkyejUCj6OyRB6HG33Hob23/6CXVdPZKmHgCZTI6JtTOWnmPxdvDG3MELpZ4hO9+9h+SkpDbHS+0No8eM5vDhw5xKjsTSypJrr7uWxYsXc+211/ZJ/cLQ5+Li0uxzU5LW09MTJycnAG6//XbWrFnD0qVLWblyJTExMbzzzju89dZbfR6vIAiNpk6dynvvvcezW1+jvLoCD30P0StKaJNIpAnt8vT0RJIkKoqyMbNrPiiqg89YAibeQtwf3zJi9j0UpkXxxxfP88fWNTj5h+E5Zg4eV8zgYnEu+zY+QUlOCmOuf5grZt9DXW0VGdGHufLmJ9qs29S68UEkOSmJhoYG8aVTEIRB57fffiMuLo7w8FOcOhVBYWFj151nnnmGF154oZ+jE4Se9+OPP6KjZ8zIGbdi8VfSzMzWrcXLOI2mAYWuitraGnJzc3FwcOiT+Pbv388PP/zA7NmzRSs0od+Ympqye/duli9fzqhRo7CysuLZZ5/lnnvu6e/QBKHLml4UxsbGkpuby+23395sRtrBYvLkydja2lJVVcWIESN48803+zskYQATiTShXZ6engCUFWa0SKQBhP1rFZlxf5KbfIrrn/iCmoulnIv4ndTwnRz87CkOf/4cAGZ27tz01PdYuQQAkHZmDxpNPZ6jZ7dZt8lfibS6OjVpaWl4eXn19OEJgiD0qltvvRVDEwssXYJwGXcTo1yDiTnwBUf+PNrfoQ0qkiQRFxfHnj17UKvVrFixQtsdShhYrCwtqVDD2OsfbrecXK7AzM6DC5nxJCUl9VkiTaFQsGDBgj6pSxAA3NzckCSpxfKQkBD++OOPfohIEC7fV199xd69e4mKjiE+Pp7KixXadfn5+axcubIfo+seMzMzcnNz+6yFtDC4iUSa0C4nJycUOjqUF2a0ul6pZ8Q1S15j++sLidqziREz/03QlIUETVlIZWk+qad+p7aqnJGz70Gh+/fgqCnhO7H3Ho2RedsDquoZmaOrZ0RdzUXi4uJEIk0QhEFl5v3vYe0WjJGFfbOHsuLsRE7t39Sn3dkGq8TERF588UX27N5DXn4eOgod6hvqGTVqFFOnTu3v8IRWeHp6cOTIn9TVVqOr0m+3rIWDF8XZSSQlJTF58uS+CVAQBEG4LEVFRSxcuBAzW1es3a8geOZEzO29sHD0Zv+njxMTE9PfIXabeC4TOksk0oR2KRQKnJxcKC9oPZEG4OAzhtDpSzjx45u4BE3CwtEbAEMzW0Km3dWifM3FErLjjzLh1qfbrVsmk2Fq40JJTjKxsbFcd911l3cwgiAIfchj1MxWl1u7BXOyvIzU1FTxgqADr7/+Ot9+/Q2zR00jdEoAAc4+3Lf+Sfbs2SMSaf2koqKCuLg44uLiSEhI4I477iA4OFi7Pjg4mCNHjlCadw5r18B292Xu4I1MJiMxMbG3wxYEQRB6iIWFBXp6+vhPvI3QGUuarTNz8CY6JrafIuucuro6UlJSiI2N1TbWuP322/s7LGGQkfd3AMLA5+/nQ1nB+XbLjL1hBaY2ruzb8MQls3G17tzp3UgaDR4jW/+SeSkTa2d0lPpi5k5BEIaMpuRCREREP0cy8I0aNYr6hnr+NeE6rvAIRqWrIsTFn927dl3Wfqurq7nvvvsYM3oMGRltvygazsrKyjh+/DgbNmzgscceY+bMWTg6OWNiYsKVV17J0qVLefPtd3jwoeZdOMPCwgAoyUnpsA4LBy80DfVERw/e1guCIAjDjVwux9fPj+Kc5BbrzO29SExIQKPR9ENkbTt48CAL/vUv/PwDMDAwICAggAULFvB/L77MwoULKSsr6+8QhUFGJNKEDnl5eVGWn95uGR1dFVOXvE5xdiIRv37QbtmU8J04+F2JgalVh3WbWLsgSdKAf7MhCILQWfrGlphaOXLq1Kn+DuWy7du3j59//rnX9j99+nQaNBpiMhK0y0LcA4k8e5aioqJu7TMhIYGxY8ayaeMmUpNSuPWWW6mra/8F0HDy6KOPYe/ghJmZGWFhYSxbtozNX/5AUkE9tiFzmbp0LTc/8xNL3ztL6MxlREdHN9t+xIgRyBU6rX7B+idz+8YWmXHx4mWZIAjCYBIcFEhZbmqzZZIkYe7gRU1NNenp6f0TWBv++9+n2Ln7IDr2Iwm75Wmue/wLFr91nOtXfQ1AfHx8P0coDDaia6fQIU9PTyouZCNpNMjkbederd2CGDn3P0T88j5uIVOwcQ9pUaaqrIichONMvLNzs9WZWrugrrlIQnw8Go0GeTv1C4IgDBaWLoGEnxq8LdLOnTvHoyseYfuOxiTavn37uOaaa3q8Hk9PT1xdXPgj9jhFZReITIsh6nwckiRx/Phx5s2b16X9bd26lXuWLcPSyILX7nqWGnUtT33xIs888wyvvPJKj8c/2EiSxPr167FwG8HUeY9iYe+FmZ0HOkq9VstbOHgTcaGInTt3MmfOHKDx5ZtG09CpFmkm1s7IdZTk5uRQX1+Pjo54LBUEQRgMAgMD2fb9T5QVZpJ8fDsOPmPY/vodBE9tHNYnLi4ODw+Pfo7yb97eXqQXVDFx4fPNluuqDJHJZMTGxnLllVf2T3DCoCSyEkKHPD09aahXU1lW0GHZkXPuw8rZn30bnqBeXdNi/bnTu5DJ5XiMnNGpuk1sXECSqK2tGXBvNgRBELrL2i2YiIiIAdf1oTPeffddAgL8CT+xn0/+F8rYEAsefugB6uvre7wumUxGUHAwf8Qd55PdX3A2PRZ7c1v0VXqEh4d3eX8rn3wSKyNLXr/rOdxsnPFz8mLhpJt59dVX+f3333s8/sFGJpPh6+eHsaUDPuOuw8oloM0kGoC5Q2OLsktbJerr62NkaMiF7LbHPSvKiCMj5jByhQ5mtm5oNBrOn29/CAlBEARh4AgICKCmqoK9H68gfPs7bH/9DgCKMuPRMzAmNnZg9SYKCAigOCelxQy6Oko9zGxcxDBCQpeJRJrQIU9PT4A2Z+68lEJHl2uWvkZFURYnfnyzxfqU8F9x9B+PnpF5p+o2sXbR/ltc4ARBGCqsXQO5WFFOSkrHrXYGms+3bMLOUofj34Rx4wx7Xn7Ul9i4BNavX98r9TW1dHvpzv/y9eMf89bS/3GFezB7du/u8r4eWbGCjMIsYjMbu4rmFueTnHMOgD/++KPngh7EDA0MuJDVucH/zWzdkMnknD59utlyRydnKoqyqVfXUJJ3jgObV5N2Zi91tVVE7d3M9y/dzG/r7kVdcxELRx9kcjlJSUm9cTiCIAhCLwgICAAg6JqFzZaPnHMf5g5eA+57W0BAALXVF6ksydMu0zTUU11RjKmdJ7GxAyteYeATiTShQ+7u7gDtztx5KQsHb8be8ChRezeTk3hCu/xiSR65yafwGjOn03UbWdgjV+iio6sccBdkQRCErqirrSYv5TTR+7YQd/hbYHC+IFj932c4n1PFnxHFAIzwN+WO6xx55pmnuj1uWXtmzmycmKamrlY7LX2oWyAnw8MpLy/v0r4effRRZs+ezbu/fMrxxFM8+Ml/OZZ4CmNjY1asWNHjsQ9Gzs5OlLTy1r41Ch0lJtYuLRLCAf5+gERp3jlSw38l4ch3/P7+/Wx4cCR/fv0iSGDnORKlnhEWDl5A38/cKUkSY8eMwcbahl2XOXmFIAjCcOPm5oaenj41FaXMuO9dAHT1DHH0HYupnSdRA2wSmcDAxomeijIT+Oa5eUT88gEbHhzJ5hXjMLJwIDpmYMUrDHwikSZ0SF9fHzt7B8o60SKtScj0xdh7jWL/plWoay4CkHrqN+QKHdyvmN7p/cjlCoytHFEamAy4JsKCIAjtyT8XScz+L9i/cSXfrZnHhgdH8OMrt3Di+1cxo4gHHniAWbNm9XeYXXbDDTdwzZTJPP1OMuo6DRqNhIezAaWl5fz66689Xl9AQAC2NracTW9MOpZXVVBbX0tDQwOHDx/u0r7kcjlbtmxBrlTw2g/v42Rpz6qbHqK6qpoxY8ZQU9NySILhZsKECdSrq5u9tW+PhZMPpaVlqNVq7bIxY8YAUJyTzIhZ92Dp7IdS35hR85Yz/d53kMlkuI2YBjROOCBpGoiKiur5g2nHAw88QPipU2hq65kzezaLFy/u0/oFQRAGM4VCgY+vLyW5KXiOns1Vtz3D5LteQqGrGpAzd7q6uqKnp09uUjjF2Ymc/Okt6tXVAFi5BZGTndXuy7mqqqq+ClUYJEQiTegULy+vTnXtbCKXK7hmyatUVxRz9JuXAUgN34lz4NWoDEy6VLeJtQtyhVLM3CkIwqDyw0sLOLbtJfSr07lh9iQ+Wr+eiIgILlZUEHEqnHXr1qGn1/b4UwOVTCbjnXfXkZ5dyaq1ccxYepI17yVx++23ce211/ZKfdOnT+dw7DEe2/Qci95+gI17v8Lfz49Ro0Z1eX+xsbGUl5cT6OLLy3c+xZW+o3jyxgc4n34eb2/vLrdyG2qmTWtMcJXkdq7bsYWDN5JMRkLC3zOrhoaGIlfoUJKTio6uiunL3kLTUEdlaT4yuZyGejXuV0wFwNzRG4DIs2d7+EjaduzYMdZ/uJ45o6bx/r2vYmduy9YvtnaqFZ4gCILQKDgokLK8xntF8NRF2l5HFo7eVFdXDaixL5sSf7VVZYy78bHGZTpKfMJuwMrRF6DZfQwgPT0dB3t7lLpKjIyMWLduXZ/HLQxcvZZIe/HFFxk/fjwGBgaYmZm1WiYjI4O5c+diYGCAjY0NTzzxRIeDFRcXF7Nw4UJMTEwwMzNj6dKlXLx4sReOQLiUt5cnZfnpXdrGxNqFCf9aTfwf3xJzYCv55yLxGju3y3WbWrug0dQPuDcbgiAIHcnJzubM6Qg++ugjli1bxsiRI1Eqlf0d1mULCgri/vv/w2c/ZSFTuXDkyBG2bv0SCwuLXqnv3vvuxdHVifFTr+azzz4jKyuLuPh47O3tu7yvw4cP09DQwLQRk9BX6QMwxmsEV3gGk5WVxZEjR3o6/EHF29sbuVxBcXbnEmnmDl5IDfUcPXpUu8zHxweNpoHinGRtmQm3PkP84W848cMbWDj6aMdANbV2Qa7QJe1cWs8fTBu+/fZbNJIGQz0DTp+LIqc4jynXTNF2HRYEQRA6FhgY2OoA/ub2jRPRDLThK0KCgyjNTWbErGU4+oU1vtQZMRUze0/tzJ2Xeuedd8jNy2P2yGsw0jPgq6++6qfIhYGo1xJparWaBQsWcP/997e6vqGhgblz56JWqzl69CifffYZmzdv5tlnn213vwsXLiQ2NpY9e/bwyy+/cPjwYe65557eOAThEp6enlQUZXZ5O/+Jt+ASNIk/tj6PQleFW+g1Xd6HibULtRdLqaqqJDOz6zEIgiD0F5VK1d8h9Jo333yT06dPczI8ggkTJvRqXVdddRVR0VFs+XwLixYtwtHRsdv7WrZsGQb6Brz78ydEn4+nvqGedb98yunUKLw8vZg+vfPDDwxFCoUCM3Pzdluk5SZHcPy716lX12Dh0Nii7NJutm5ubsiA4uy/JxDwv3oBHqNmUZafjvsV07TL5QodTG3dKC0tobq6uucPqBU+Pj4AbPtzB6/98B4ymYxt27b1Sd2CIAhDRUBAADWV5VSVFTRbbmRhj0rfcMAl0gICAijJTUUmkzNt2RvMW7EJ15DJ6Kr0MbV2ahHv5MmTATDWN8JQz5DUlNR+iFoYqHR6a8dr1qwBYPPmza2u3717N3FxcezduxdbW1tGjBjB//73P1auXMnzzz/f6hv7+Ph4fv/9d8LDwxk9ejQA69atY86cOaxduxYHB4feOpxhz9PTk5rKcmqryrvUNVMmkzF58Ytse2E+riGTUeoZdbluExsXGuobx16JjY3F1dW1y/sQBEEQepauri5XXHFFf4fRZXZ2dsTFxxESHMKL376Ft4MHMRkJONg7EH4qHF1d3f4Osd+5urqQ/dfMneVFWez+8EEc/cNwHzGNrPijnNqxDknS4B12PWa2biCTc/aSrpk6OjpYWVlTVJRFfV0tOrqqxueBRf+Hk39Yi7FSLR19KM1NJTU1laCgoF4/vqZWB3NHT6ew/AInk06TnZ2Nqalpr9ctCIIwVDQN4J+fGomhuS3FOcmU5KRSkpNMfV0dWVlZ/Rxhc5cm/gzNbDEwtdaua23mzunTp6Oj0GHroe9RyOVcO++6vg5ZGMB6LZHWkWPHjhEcHIytra122cyZM7n//vuJjY1t9eH82LFjmJmZaZNo0DiWh1wu58SJE9xwww2t1lVbW0ttba3283Af/6Q7PD09gcaZO63duvaQa2hmy11vHO12lwnTv7p/6ChVxMXFMWdO52f9FARBGE4qKiqIjIwkJSVF++Ng78DaN9aiUCj6O7wBw9XVlfiEeAIDAojNSMDUxJSjx462ORTFcDMiNJToL79BkiTKC85TeD6GwvMxRP7+CTKZHKWBCSoDEywcvJHJZJhYOZGWnt5sH15eXhQWFlCWl4alsx8AKkNTAiff3qI+cwdPkMlITk7uk0RaWFgYH7zfONnEzePncTLpNDExMQQEBPR63YIgCEOFu7s7+voG7PrwAaCxAYWLqxshQUHcNncFDz30UD9H2FxT4q84OxlDM9tm68wdvImOaT6Ds4GBAdEx0RQUFBAWFiZetAnN9FsiLS8vr1kSDdB+zstrfaaovLw8bGxsmi3T0dHBwsKizW0AXn75ZW0LOaF7mhJpZYVdT6QBlzXuiIm1MwAGJtYDromwIAhCe7755hsyMzNJTEwkPi6e4uILfP/DD4wbN65X6rvxhhvZu28vADZmVhiqDEjLz+DBhx7Ew8OjV+ocrBwcHMgvKODLL7/kyiuvFK2dLzFp0iQ2bdpEVVkBTgETCJ2+hKh9nzHh1qewdglk14cP4D5imvbebunkS1pkJoWFhVhbN77hv+KKERw7dpTinGRtIq0t5g7eSJoGTp8+3eZL0Z4UHByMXK5gy4FtfLr7C0DMyCYIgtBVCoWCP/44THx8PIGBgfj6+mJgYNDfYbXJ3d0dlUqPktwUnAOv0i7XaBrQN7YkKzODixcvYmT0dw8qPz8//Pzav4cJw1OXEmmrVq3i1VdfbbdMfHz8gPtlW716NY8++qj2c3l5Oc7Ozv0Y0eBjYWGBialZl2bu7Ck6Sj0MzWzRURkSHR3T5/ULgiB01z333IOliQUOFrbYm9mSXprGzz//3GuJtIqKcsb5jOTR6+9Hpaskv7SQez94nJSUFJFIa4VSqWTx4sX9HcaAM378eABKclIwNLNl3I2Pkp14guh9nzPxjuepKivE7a9ZN6FxMoHzUQeIiopi6tTG5SEhIY0zd3Zi9k+LvwamjoiI6FKcdXV1xMXFcerUKU6dOoW+vj6vvPJKhxN6JCUloVQpqamuQUIiNDSUu+66q0t1C4IgCDBq1KhuzaDdH5pm7jwfdZC62mpKclIoz0+lODeVutoazC0sqaur6+8whUGiS4m0xx57rMMHzs4+qNvZ2XHy5Mlmy/Lz87Xr2tqmoKD5YIb19fUUFxe3uQ00DvY8lAd87iseHh6UFfR9Ig0aW6WpayqJj49HkiQxs5YgCIOCDBnr73sNXZ3G7gB5ZQUkJiT2Wn3ePj6cPnwSlW5jIsHKxAIdhQ4pKSnMmDGj1+oVhhYPD4/GmTtzknEKmIBCV8X0e95i2//ms+vDB9EzMsfOc6S2vIWDN5qGek6cOKFNpHl7eyNpNBTndJxIM7FxRSbXIaGdv436+nri4+O1SbOT4aeIijqLurYWmUyGqbUzpQUZ3HLLLR0mqh968CGUMl0WTr+Jn078RlxsHFVVVRgaGpKRkcHevXsJCgpi7NixnTxjgiAIwmAwc8Z01q5dS1lWDIEBAUyePp6AgH8TGBhISEgI5ubm/R2iMEh0KZFmbW2tbbJ/ucLCwnjxxRcpKCjQdtfcs2cPJiYmbY5RERYWRmlpKREREdrM9/79+9FoNL32dl/4m4+3F0fOpvdL3SbWLuSfO0Nl5UWysrJEi0JBEAYFCYm8kgKcrRtnmXQwtyMhIaHX6vPy8uLX7b9oPyvkCuwsbEhJ6TiZIQwfVVVVxMXFERUVRXR0NP7+/s1mQFcoFFhaWVFySRLMzM6dq29/lgObVuE7/gbkir8fIc0dGluU/fnnn9plPj4+SJKm2cydbVHo6GJq60pObk6z5dXV1axevZrjJ05wNvIsNTXVyGQyLB08sXAOZMwN07F2DcLK2R+5QocND15BREREh8+EE66awK5ff2fmyCnU1tXyxaHvcHJ0QldXl8KiQgCMjIwoLi4WY+IIgiAMIa+99hqPP/44NjY2omGGcFl6bYy0jIwMiouLycjIoKGhgcjISKDxId/IyIgZM2YQEBDAnXfeyWuvvUZeXh5PP/00y5cv17YeO3nyJIsWLWLfvn04Ojri7+/PrFmzWLZsGevXr6euro4HHniAW2+9VczY2Qc8PT35be/hjgv2AlMbF9Ij9wEQFxcnEmmCIAwa2cV52FnYkldSQH1DPampKWg0GuRyeY/X5eXlRUlFKdW11eir9JEkCStjC5KTk3u8LmHg02g0pKWlER0dTVRUFFHR0URGniXtXCoajQaZTIa+kTlVFcXcdtttGBsba7f19HAnLat5CzHf8TdiZueBub1ns+Vmdh4gkxMVHa1d5uDggK6uLuWFGTTU1aLQbb9ngKWjD+fy0igrK9POnvnTTz/xzjvv4DFqJiOvW4G1WyBWLgFtzgBu5eRLeHh4h+fl2Wef5bvvvuPNnz4kNiMBSZJoqK3j4sWLrLzxQfSUKtZ8vZbw8HBtN1dBEARh8JPJZC3GaReE7ui1RNqzzz7LZ599pv3cNAvngQMHmDx5MgqFgl9++YX777+fsLAwDA0Nueuuu3jhhRe021RVVZGYmNisr/LWrVt54IEHmDp1KnK5nJtuuol33323tw5DuISnpycXS/I79UDc00ysXaitKkNHqSI2NpaZM2f2af2CIAjdYWRkxHs7N1BdW02DRgPA6FGjezWRBrBiw7NUqau5WFMJEsjN2h8zShj8iouLtQmz6OhozkSeJS42lqqqSgAMjM2xcPLF3DWMiRPuxtLJF3MHL0rz0/juhfnExMQQFham3d/IkSOJ2PhZs+EUZDIZdp4tZ1XX0VVhYuVETk429fX16OjoIJfLcXJ2Ie1cKqUF57F09GkzdkmSMLZy0s7c2TQ7e9OYu6HTl2DnNbLN7ZtYugQSfqrjcdaCg4NRKVWcTD7DaK8RLLjqOpKyU/lk1xYM9QzIKmpsGXf48GGRSBMEQRAEoYVeS6Rt3ryZzZs3t1vG1dWVnTt3trl+8uTJSJLUbJmFhQVffvllT4QodJGnpyeSJFFelNXibXRvM7F2afyvpZOYuVMQhEFj/fr1nD59Gl9fX+2Pra1tr3UnaHrxVFlbhYWxGR62ruSW5KMU3dOGtAcfepj31jW+VNTRUWLh6ImZgy+hc6dg4eiLpZMvBqbWrf7emdt7IZPJ2bx5c7NE2sSJE/nggw+oLi/CwLTjYT0sHL0pL8okOTkZf39/AIKCAkk7l0pJdjKWjj5IkkRlSR4luSkU56RQkpNCcXYSJTkpqGsuIpPrkJaWpk2kBQYGolSpKEiP7lQizdo1iCN/fkdVVVWHM8dNnjKZA/v2M3/cLCJSznIiKQIJePbLV1EoFIRdGcb8+fM7rFMQBEEQhOGn1xJpwtDj6dmYPCsvzOjzRJqpTWMiTWVkTkxsbJ/WLQjC0HLdddcRGRlJQUEB5ubmTJs2jVdffbXZEAFRUVEsX76c8PBwrK2tefDBB3nyySe7XNe1117LwoULezL8djV1iXtqwQr8nBpbp23c+xVxRWKMtKEsNjYWG7cQptz9Cqa2bih0Op841dFVYWLtzNGjR5stb+pJUJyT0rlEmoMPGdGHiIqK0ibSgoOC+HXnb5z57SPO7tlISU4KdbVVjRvI5MhkcpAaMDMzJyhgFOPGjeX666/X7lOpVBIUFEzh+c7N2G3tFkxDQwNnz55tlhRsza233squXbtY/fmLyGVydHV18ff358UXX2Tq1KmYmJh0qk5BEARBEIYfkUgTOs3R0RGlStUvM3eqDM1Q6hujo9QjNjZKzNwpCEK3TZkyhf/+97/Y29uTnZ3N448/zs0336xNJJSXlzNjxgymTZvG+vXriY6OZsmSJZiZmTUbkH0gahr3o7SyTLvMzNCEgviCtjYRhoDgoEBOR/+IhaN3t7a3dg0iI+nPZsuaZu4syUnGyb/9pBSAuYMnmoZ6wsPDueWWWwCYMGECkuYVLmQnoadS4uRoj4+3NyNHjmTcuHH4+/vj7u7e7oD+48aOYduOvZ06DgtHb3R0lURERHSYSLvzzjuJiopCR0eHRYsWERgYKJ4rBEEQhrmm3nDifiB0RCTShE6Ty+W4urpRXtj3iTSZTIaJtQuSJHGxopycnBwcHR37PA5BEAa/FStWaP/t6urKqlWrmD9/PnV1dejq6rJ161bUajUbN25EqVQSGBhIZGQkb775ZpcTaRs3buShhx7qlfHQWmNpaYlcLqe0sqzxelnTOD5WeUU5NTU16Onp9UkcQt9qaGigrDCLutoqdFXtd2lsjYWjD+dO72o2dp+Ojg7WNjYU53SuNaOFQ2MS79ix49plc+bM4Xx6OhYWFhgaGnY5LoDRo0c3TjBVU4muXvv7UOgosXLy5dSpUx3uV6FQ8Oabb3YrJkEQBGFwqqqqIiYmhuzsbLKzszl//jwHDhzAQF+PvNxcsnNyWLLkbta990F/hyoMcCKRJnSJt5cXsVl9n0iDxu6dF4tzgcaZO0UiTRCEy1VcXMzWrVsZP368tlXMsWPHmDhxIkrl3wP0z5w5k1dffZWSkhLMzc1b7Ke2tpba2lrt5/LycqAxaeft7c3cuXN7+UgaKRQKrK2s2bz/Gz7ds5X6hnoAbKxtaGho6JMYhL535ZVX8v7771OcnYytR2iXt7d08kXTUM+pU6cYO3asdrmvjzeJWQmd2kfjzJ0y4uKbj2N6ubNsjxo1CkmSKMqMx957dIflLVyCOHGy45k7BUEQhOFnyd2L+ebbbQDoKuTI5VBbp8HBTI+5IbYkyA356ccfRSJN6FDfvCIXhgwvL08qivonkWZi7UJlaQE6uiox4YAgCJdl5cqVGBoaYmlpSUZGBtu3b9euy8vLazE1etPnvLy8Vvf38ssvY2pqqv25NHnQ1ja95auvv+KJlU/w9jtvs23bNg4fPszZqLPdbhEkDHyzZ88GoDg7qVvbWzj5ArSYAGrMmDGUZCe3mPjpUpIkUVV+gfxzZ1Hpm1BaUkJpaWm34mhNQEAAKpUeBenRnSpv7RpEUmIClZWVPRaDIAiCMDQUFxcz2s2Mz5aO5NEZntTWNc6oXlFTT2xuJYkFVZSUlrV73xMEEC3ShC7y9PSkvDAbSaNB1kddlZqYWLtwsSQXa2cfYsWEA4IgXGLVqlW8+uqr7ZaJj4/Hz88PgCeeeIKlS5dy/vx51qxZw6JFi/jll1+6PSbG6tWrefTRR7Wfy8vLcXZ2xsTQmPz8/G7ts7umTJnClClT+rROoX9ZWlqi0tPnQidbj/2TsaUjOkp9Tpw40Wz5lVdeyRtvvEF1RTH6xhZUlxc1zraZm0JxdjIluY0zb9ZcLAFArmh8rIyOjubqq6++vIP6i66uLiGhoRSdb/u+fyEzgeQTPzNq3n+wcQtCo9EQGRnJhAkTeiQGQRAEYWjw9fPnvb37uGvDaaBxGANdHQXunl44BQZypZMTM2bMEGOkCR0SiTShSzw9Pamvq6WyNB8jC/s+rdvUxgUkCQMzB2JiRCJNEIS/PfbYYyxevLjdMh4eHtp/W1lZYWVlhY+PD/7+/jg7O3P8+HHCwsKws7Nrkfxq+mxnZ9fqvlUqFSqVqsVyU4O+T6QJw5OVlSVFGfHd2lYmk2Hh5EN8fPPtg4KCAPh57Z1UlhVSW1kKgFxHFzNbDywcvHDyH4+5gxcW9l4YWdqz6eExREVF9VgiDWDsmNF8/ePvANTX1XLkq/9h7RKAc+BV5KWc4dDnT1OvrsE19Bps3EPQ0VVx6tQpkUgTBEEQmlm7di1z5szBzs6OyspK9u3bR25uLrNmzWL+/Pn9HZ4wiIhEmtAlnp6eAJQVZPR5Is3E2gUAlYEpcfGHxMydgiBoWVtbY21t3a1tNZrGZv1NY5yFhYXx1FNPaScfANizZw++vr6tjo/WntzifC5evNituAShK3x9fDhyLLzb90Yr5wDOndzRbJmXlxchoSNITEohdOa/Mbf3wtzRG1NrF23rs4b6Okrz0yjMjCPx6I/IFTo9PvzC6NGj+eCDD1DXXKQoI474w99wacpPqW+MUl8HG/cQFDq6WDn7ERERATS2RD148CCRkZHkZGXx9rvvap9lBEEQhOFFpVJph0O4ctwYos5GYqjU4ZcdP4lEmtAlwzKR1tTnuWkwaKHzLC0tkclklBdm4Og3rk/rNjS3Ra6ji1xHl7LSEpKTk9tsHSIIQt9qup4O9DElTpw4QXh4OFdddRXm5uakpqbyzDPP4OnpSVhYGAC33347a9asYenSpaxcuZKYmBjeeecd3nrrrU7X03QeHnzoIe655x5xvxF63RVXXMH+/fupKivA0My24w3+wdLJl7hDVWRnZ2NsbKxdvnTJ3TyyYgWhM5airq7gQlYC58/u50JWIqXZiVzISaGhvg4AB0cnrpk8kYULF/bo77yfn1/jhAMZcTj4jCVk+t1E7dnEiJn/xtLFn7hDX6MyMEWh05j4tnQN4tjxExQWFjJh/JWUl1fgZWtCemEFn376KatXr+6x2IS+M1juM31JfKcRhO4zMDDkKk9zJnhbsGZHEsXFxejoDMv0iHCJzt5rZNIwvBtlZWVd9ixSgiAIQkuZmZk4OTn1dxhtio6O5uGHH+bs2bNUVlZib2/PrFmzePrpp5vNBBwVFcXy5csJDw/HysqKBx98kJUrV3a6HnGfEQRB6B0D/T7Tl8S9RhAEoXd0dK8Zlok0jUZDTk4OxsbGA7JrYNMg1ZmZmZiYmPR3OO0Ssfa8wRIniFh7y2CJ9dI4jY2NqaiowMHBAXkfT0QyEA30+8zlGiy/o5dDHOPQII5xaGg6xri4OHx9fcV95i8D4V4zHH7/ukqck+bE+WhOnI+WBtI5kSSpU99phmXbRblcPijeZJmYmPT7L1JniVh73mCJE0SsvWWwxNoUp6mpaX+HMmAMlvvM5Rosv6OXQxzj0CCOcWhwdHQUSbRLDKR7zXD4/esqcU6aE+ejOXE+Whoo56Qz32nEnUgQBEEQBEEQBEEQBEEQOkEk0gRBEARBEARBEARBEAShE0QibQBSqVQ899xzqFSq/g6lQyLWnjdY4gQRa28ZLLEOljiFnjcc/t+LYxwaxDEODcPhGAcr8f+mJXFOmhPnozlxPloajOdkWE42IAiCIAiCIAiCIAiCIAhdJVqkCYIgCIIgCIIgCIIgCEIniESaIAiCIAiCIAiCIAiCIHSCSKQJgiAIgiAIgiAIgiAIQieIRJogCIIgCIIgCIIgCIIgdIJIpAmCIAiCIAiCIAiCIAhCJ4hE2gBw8OBBZDJZqz/h4eFtbjd58uQW5e+7775ej9fNza1Fva+88kq729TU1LB8+XIsLS0xMjLipptuIj8/v9diTE9PZ+nSpbi7u6Ovr4+npyfPPfccarW63e366py+//77uLm5oaenx7hx4zh58mS75bdt24afnx96enoEBwezc+fOHo/pn15++WXGjBmDsbExNjY2zJ8/n8TExHa32bx5c4vzp6en1+uxPv/88y3q9fPza3eb/jin0Prfj0wmY/ny5a2W78tzevjwYa699locHByQyWT89NNPzdZLksSzzz6Lvb09+vr6TJs2jeTk5A7329Xfd2Hg6uy1NSoqiquvvho9PT2cnZ157bXX+ini7nnxxRcZP348BgYGmJmZtVomIyODuXPnYmBggI2NDU888QT19fV9G+hlGkp/m711/RpIOnNf7uvnrZ724YcfEhISgomJCSYmJoSFhfHbb79p1w/24xuqamtrGTFiBDKZjMjIyGbrBvv9oCuGyz2yq4bSvaYrhsM1+3K88soryGQyHnnkEe2ywXQ+RCJtABg/fjy5ubnNfv7973/j7u7O6NGj29122bJlzbbrqwvxCy+80KzeBx98sN3yK1as4Oeff2bbtm0cOnSInJwcbrzxxl6LLyEhAY1Gw0cffURsbCxvvfUW69ev57///W+H2/b2Of3mm2949NFHee655zh9+jShoaHMnDmTgoKCVssfPXqU2267jaVLl3LmzBnmz5/P/PnziYmJ6dG4/unQoUMsX76c48ePs2fPHurq6pgxYwaVlZXtbmdiYtLs/J0/f75X42wSGBjYrN4jR460Wba/zilAeHh4szj37NkDwIIFC9rcpq/OaWVlJaGhobz//vutrn/ttdd49913Wb9+PSdOnMDQ0JCZM2dSU1PT5j67+vsuDGydubaWl5czY8YMXF1diYiI4PXXX+f555/n448/7sfIu0atVrNgwQLuv//+Vtc3NDQwd+5c1Go1R48e5bPPPmPz5s08++yzfRxp9w21v83euH4NNJ25L/f181ZPc3Jy4pVXXiEiIoJTp05xzTXXcP311xMbGwsM/uMbqp588kkcHBxaLB8K94OuGC73yK4YavearhgO1+zuCg8P56OPPiIkJKTZ8kF1PiRhwFGr1ZK1tbX0wgsvtFtu0qRJ0sMPP9w3QV3C1dVVeuuttzpdvrS0VNLV1ZW2bdumXRYfHy8B0rFjx3ohwta99tprkru7e7tl+uKcjh07Vlq+fLn2c0NDg+Tg4CC9/PLLrZb/17/+Jc2dO7fZsnHjxkn33ntvr8b5TwUFBRIgHTp0qM0ymzZtkkxNTfsuqL8899xzUmhoaKfLD5RzKkmS9PDDD0uenp6SRqNpdX1/nVNA+vHHH7WfNRqNZGdnJ73++uvaZaWlpZJKpZK++uqrNvfT1d93YfD557X1gw8+kMzNzaXa2lrtspUrV0q+vr79Ed5laevvb+fOnZJcLpfy8vK0yz788EPJxMSk2XEPZEP5b7Onrl8D3T/vywPleaunmZubS59++umQPb7BbufOnZKfn58UGxsrAdKZM2e064bS/aC7hvI9sjOG8r2mq4bLNbsjFRUVkre3t7Rnz55m370H2/kQLdIGoB07dnDhwgXuvvvuDstu3boVKysrgoKCWL16NVVVVX0QYWNTTEtLS6644gpef/31druzREREUFdXx7Rp07TL/Pz8cHFx4dixY30RLgBlZWVYWFh0WK43z6larSYiIqLZuZDL5UybNq3Nc3Hs2LFm5QFmzpzZp+cOGs8f0OE5vHjxIq6urjg7Ozd7i9zbkpOTcXBwwMPDg4ULF5KRkdFm2YFyTtVqNV988QVLlixBJpO1Wa6/zuml0tLSyMvLa3beTE1NGTduXJvnrTu/78Lg889r67Fjx5g4cSJKpVK7bObMmSQmJlJSUtIfIfa4Y8eOERwcjK2trXbZzJkzKS8v75e/z64abn+b3bl+DQb/vC8PlOetntLQ0MDXX39NZWUlYWFhQ+74hoL8/HyWLVvG559/joGBQYv1w+F+0JHheI9sMtzuNR0Z6tfszlq+fDlz585t8V1ssJ0Pnf4OQGhpw4YNzJw5Eycnp3bL3X777bi6uuLg4EBUVBQrV64kMTGRH374oVfje+ihhxg5ciQWFhYcPXqU1atXk5uby5tvvtlq+by8PJRKZYtxZmxtbcnLy+vVWJukpKSwbt061q5d22653j6nRUVFNDQ0NPvyBY3nIiEhodVt8vLyWi3fV+cOQKPR8MgjjzBhwgSCgoLaLOfr68vGjRsJCQmhrKyMtWvXMn78eGJjYzv8fb4c48aNY/Pmzfj6+pKbm8uaNWu4+uqriYmJwdjYuEX5gXBOAX766SdKS0tZvHhxm2X665z+U9O56cp5687vuzC4tHZtzcvLw93dvVm5pt+BvLw8zM3N+zTG3tDWNaRp3UA33P42u3P9Guhauy8PhOetnhAdHU1YWBg1NTUYGRnx448/EhAQQGRk5JA4vqFCkiQWL17Mfffdx+jRo0lPT29RZjjcD9ozXO+RTYbbvaY9Q/ma3RVff/01p0+fbnUc+MF2PkSLtF60atWqNicRaPr550UkKyuLXbt2sXTp0g73f8899zBz5kyCg4NZuHAhW7Zs4ccffyQ1NbVXY3300UeZPHkyISEh3HfffbzxxhusW7eO2traLtfbm3E2yc7OZtasWSxYsIBly5a1u/+ePKdDyfLly4mJieHrr79ut1xYWBiLFi1ixIgRTJo0iR9++AFra2s++uijXo1v9uzZLFiwgJCQEGbOnMnOnTspLS3l22+/7dV6L9eGDRuYPXt2q+OKNOmvcyoML719bR0IunOMgjBQdfa+PBj5+voSGRnJiRMnuP/++7nrrruIi4vr77CGjc5eK9etW0dFRQWrV6/u75B73XC4Rwq9ayhfszsrMzOThx9+mK1bt/bJZHS9TbRI60WPPfZYuy1NADw8PJp93rRpE5aWllx33XVdrm/cuHFA49sPT0/PLm3bnVgvrbe+vp709HR8fX1brLezs0OtVlNaWtosw5yfn4+dnV2vxpmTk8OUKVMYP358twbxvJxz2horKysUCkWL2UfaOxd2dnZdKt/THnjgAX755RcOHz7c5RZQurq6XHHFFaSkpPRSdK0zMzPDx8enzXr7+5wCnD9/nr1793a5tWN/ndOmc5Ofn4+9vb12eX5+PiNGjGh1m+78vgv9oyevrW39fTWt6y+Xc5/7Jzs7uxazjg2EY+ys4fa32Z3r10DW1n25J5+3+pNSqcTLywuAUaNGER4ezjvvvMMtt9wyJI5voOvstXL//v0cO3YMlUrVbN3o0aNZuHAhn3322YC9H3TVcLhH9obhdq9py1C/ZndWREQEBQUFjBw5UrusoaGBw4cP895777Fr167BdT76e5A24W8ajUZyd3eXHnvssW5tf+TIEQmQzp4928ORte+LL76Q5HK5VFxc3Or6poEDv/vuO+2yhISEXh84MCsrS/L29pZuvfVWqb6+vlv76I1zOnbsWOmBBx7Qfm5oaJAcHR3bnWxg3rx5zZaFhYX1+sD4Go1GWr58ueTg4CAlJSV1ax/19fWSr6+vtGLFih6Orn0VFRWSubm59M4777S6vr/O6aWee+45yc7OTqqrq+vSdn11TmljsO61a9dql5WVlXVqsoGu/L4LA19H19amgZTVarV22erVqwflQModTTaQn5+vXfbRRx9JJiYmUk1NTR9G2H1D+W+zp65fA01H9+X+et7qbVOmTJHuuuuuIXt8g9X58+el6Oho7c+uXbskQPruu++kzMxMSZKG1v2gs4bTPbIzhvK9piPD9ZrdlvLy8mbXjOjoaGn06NHSHXfcIUVHRw+68yESaQPI3r17JUCKj49vsS4rK0vy9fWVTpw4IUmSJKWkpEgvvPCCdOrUKSktLU3avn275OHhIU2cOLFXYzx69Kj01ltvSZGRkVJqaqr0xRdfSNbW1tKiRYvajFWSJOm+++6TXFxcpP3790unTp2SwsLCpLCwsF6LMysrS/Ly8pKmTp0qZWVlSbm5udqftuLsq3P69ddfSyqVStq8ebMUFxcn3XPPPZKZmZl29rc777xTWrVqlbb8n3/+Keno6Ehr166V4uPjpeeee07S1dWVoqOjezSuf7r//vslU1NT6eDBg83OX1VVlbbMP2Nds2aNtGvXLik1NVWKiIiQbr31VklPT0+KjY3t1Vgfe+wx6eDBg1JaWpr0559/StOmTZOsrKykgoKCVuPsr3PapKGhQXJxcZFWrlzZYl1/ntOKigrpzJkz0pkzZyRAevPNN6UzZ85I58+flyRJkl555RXJzMxM2r59uxQVFSVdf/31kru7u1RdXa3dxzXXXCOtW7dO+7mj33dhcOnMtbW0tFSytbWV7rzzTikmJkb6+uuvJQMDA+mjjz7qx8i75vz589KZM2ekNWvWSEZGRtq/i4qKCkmSGhPaQUFB0owZM6TIyEjp999/l6ytraXVq1f3c+SdN9T+Nnvi+jXQdea+3NfPWz1t1apV0qFDh6S0tDQpKipKWrVqlSSTyaTdu3dLkjT4j28oS0tLazFr51C4H3TFcLlHdsVQu9d0xXC4Zl+uS2ftlKTBdT5EIm0Aue2226Tx48e3uq7p5nTgwAFJkiQpIyNDmjhxomRhYSGpVCrJy8tLeuKJJ6SysrJejTEiIkIaN26cZGpqKunp6Un+/v7SSy+91OwN/D9jlSRJqq6ulv7zn/9I5ubmkoGBgXTDDTc0u6n0tE2bNklAqz9txdmX53TdunWSi4uLpFQqpbFjx0rHjx/Xrps0aZJ01113NSv/7bffSj4+PpJSqZQCAwOlX3/9tcdj+qe2zt+mTZvajPWRRx7RHpetra00Z84c6fTp070e6y233CLZ29tLSqVScnR0lG655RYpJSWlzTglqX/OaZOmt7aJiYkt1vXnOT1w4ECr/8+b4tFoNNIzzzwj2draSiqVSpo6dWqLY3B1dZWee+65Zsva+30XBpfOXFslSZLOnj0rXXXVVZJKpZIcHR2lV155pZ8i7p677rqr1WO89L6Wnp4uzZ49W9LX15esrKykxx57rMstTPvbUPrb7Inr10DXmftyXz9v9bQlS5ZIrq6uklKplKytraWpU6dqk2iSNPiPbyhrLZEmSYP/ftAVw+Ue2VVD6V7TFcPhmn25/plIG0znQyZJknQ5XUMFQRAEQRAEQRAEQRAEYTgQs3YKgiAIgiAIgiAIgiAIQieIRJogCIIgCIIgCIIgCIIgdIJIpAmCIAiCIAiCIAiCIAhCJ4hEmiAIgiAIgiAIgiAIgiB0gkikCYIgCIIgCIIgCIIgCEIniESaIAiCIAiCIAiCIAiCIHSCSKQJgiAIgiAIgiAIgiAIQieIRJogCIIgCIIgCIIgCIIgdIJIpAmCIAiCIAiCIAiCIAhCJ4hEmiAIgiAIgiAIgiAIgiB0gkikCYIgCIIgCIIgCIIgCEIn/D9gbVFfhabU7AAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAABMkAAAGBCAYAAAB8Y+YyAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjcsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvTLEjVAAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3WVYlGnfx/EvHVImGNjdrYiAdGN3d2Bix+rummt3K9itIC2N3d2NBSYpPfO88F7umwcDFRji/BzHvGCu87zmN7PrxP86Q04qlUoRBEEQBEEQBEEQBEEQhCJMXtYBBEEQBEEQBEEQBEEQBEHWRJFMEARBEARBEARBEARBKPJEkUwQBEEQBEEQBEEQBEEo8kSRTBAEQRAEQRAEQRAEQSjyRJFMEARBEARBEARBEARBKPJEkUwQBEEQBEEQBEEQBEEo8kSRTBAEQRAEQRAEQRAEQSjyFGUdIKdJJBJev36NpqYmcnJyso4jCIJQ4EmlUuLi4ihXrhzy8uLaCojPGkEQhJwkPmeyEp8zgiAIOSu7nzWFrkj2+vVrFvgnAUmyjpLFm4eX8VzeH0laKlKpRNZxBEEQfsqLFy+oUKGCrGPkC69fv0ZfX1/WMQShyJGTV6CYThk6TjuARomyREc9Y99MSwDsxm2lUgOTr/ZLTU7Ea8VAop5cRSoR38HyK/E581/ic0YQhF8hJ69AiXI1aD9lLyrqmgRuceHheU/k5OQpXak+ejWacTNo55d6hFQq67gy8aPPmkJXJNPU1CQ/FsjePruJ75qhtGzejKNHj6Curi7rSIIgCNkSGxuLvr7+f95fBSDjtXjx4gVaWloyTiMIhd/79++xtbXjzftYnCbvQaNEWQCeXQtEQUkFjeJ6XPXZiJKKGnrVmyEvr5DRNz01mRPrRxH96i5+vr60bt1aVk9D+AbxOZOV+JwRBOFnPHv2DCtrG9IVNHCYuAMV9S/vIa07T6ZMlYZUa25HdORTfFYPxtrKkt27d6OkpCTj1Hkru581ha5Ilh+HI394eR+flYNoWL8O/v5+4guAIAgFUn58f5WVf18LLS0t8eNFEHJZdHQ0nTp34VXUBxwn70Gr9H9H1zy9FkSFuoZUa2bDuSNL8FjcG1WN4lgOX0mFOm1IT0slYPN4Ih9dxMfbG3Nzcxk+E+FHxOfMf4nPGUEQsuv+/fs4ODqRggqOE9xQ0yyRcUyjRFkaWgwg8vFV/NYOw9ioLceOHUNVVVWGiWXrR581YtJ/LouOfIrPyoHUqFYZfz9RIBMEQchNGzZsoGHDhhk/KgwMDPD19c04npSUhLOzMyVLlkRDQ4POnTsTFRUlw8SCIHxPXFwc1jY2PHj0FPsJbhTXq5px7HPsByIfXUa3amOSEqJR1y4DgCQ9DakkHYkknZDtk3lxK4yjR46IApkgCIJQ6GzYsIHatWvz6tUrVDVKoKxaLEub9xF38F09hObNmnLcw6NIF8iyQxTJclHs+5d4r+hPeb1SBAacoHjx4rKOJAiCUKhVqFCBRYsWcfnyZS5duoSZmRnt27fn9u3bAEyYMAFPT08OHTpEWFgYr1+/plOnTjJOLQjC13z+/Bl7B0du3LyD3fjtlKxQK9PxiBshIJVy4dhyzh1ZgmbJcliNXEP/5WepUMeQsJ2zeHzJl/379mFvby+jZyEIgiAIuWPVqlWMGjWKGq2cUFZR5+2zm3gu70/y59iMNp9eP8J75UDq1q6Jj7cXxYplLaIJmRW66Zb5RfynSLyX96OktjohwUGULl1a1pEEQRAKPUdHx0x/z58/nw0bNnDu3DkqVKjAtm3b2Lt3L2ZmZgC4urpSp04dzp07J9YpEoR8JDk5mQ4dO3L+/AXsxrtSpnKDLG306xtTrbkdFeq2oVozG1SKaQNfdq86tW8u908fYefOnXTu3Dmv4wuCIAhCrlq6dCmTJ0+mkfVgDLpM5d2zm3gt78+HF3fxXNIHexc3UhLj8FrRn8r6ZTnh74e2trasYxcIokiWCz7HvMd7eX+KKUNIcBBly5aVdSRBEIQiJz09nUOHDpGQkICBgQGXL18mNTUVCwuLjDa1a9emYsWKnD179rtFsuTkZJKTkzP+jo2N/WZbQRB+T2pqKt269yAkJAzbsZspW6PZV9sV0ymD1YhVme6TSqWcO7KEW8G72LRpE3369MmLyIIgCIKQZxYsWMDMmTNpajeSlh0nICcnR5kqDbEbvx3vFQOIjnrC8cW9SE9NQrekNkGBgZQsWVLWsQuMQjndUirDrUyT4qPxXjkARclnQkOCqVSpksyyCIIgFEU3b95EQ0MDFRUVRowYwbFjx6hbty6RkZEoKyujo6OTqb2uri6RkZHfPefChQvR1tbOuOnr63+3vSAIvyY9PZ1+/frj7e2N1cg1VKjT5qf6X/ZayzW/LaxYsYJhw4blUkpBEARByHtSqZQ///yTmTNn0txpbEaB7F961ZpgO3YryMmTGBOJlroiIcFB6OnpyTB1wVMoi2Thu/5AKpHk+eOmJMbhs3ow6QnvCQ4KpHr16nmeQRAEoairVasW165d4/z584wcOZL+/ftz586d3zrn9OnTiYmJybi9ePEih9IKgvAviUTC0KFDOXDwAOZDllGpoelP9b/mv42LHquZP38+48ePz52QgiAIgiADUqmUWbNm8ddff9Giw3iaOzh/dZdGVQ0dlFWL0aRJE549fSIu7P6CQjnd8k74AdJSkjAduAh5hbx5iqnJn/FdM4zP758TFhpCvXr18uRxBUEQhMyUlZUzLlI0a9aMixcvsmrVKrp3705KSgrR0dGZRpNFRUX98AqbiooKKioquRlbEIq8wMBAXF1dad15MtWa2/5U31shezh7aBHTp09nxowZuZRQEARBEPKeVCplypQpLF26FIOuUzl76B8uuq9kxJYHmQplH17cw3vFAKpUKof7saPIyxfKMVG5rtC+ao8veRO4eTzpaSm5/lhpqcn4rxtF9Kt7nPD3o0mTJrn+mIIgCEL2SCQSkpOTadasGUpKSgQFBWUcu3//PhERERgYGMgwoSAIAA0bNqRqtercDtnFp8gn2e537/RRTu75k7FjxzJ//vxcTCgIgiAIeUsqlTJ+/HiWLl2KYY9ZVGpklnHsRqAbSfGfAHgXcRvPZX2pXrUioSEhYuPA31Boi2R7du8m4mYIJ9Y7k5aa/OMOvyg9LYWAjWN4+/gyPt5eYnc0QRAEGZo+fTrh4eE8e/aMmzdvMn36dEJDQ+nduzfa2toMHjwYFxcXQkJCuHz5MgMHDsTAwEC8dwtCPqCnp8epk+FU0C2J55LefHhx74d9Hl3wJnTHdIYMGcLKlSu/OvVEEARBEAoiiUSCs7Mzq1evxqj3nzS06M+j814Zx88cWID7Pz15+/QGXsv6Ubd2dUKCg8Qi/b+p0BbJbG1t8fbyIvLBeXxXDyU1KSHHH0OSnkbQ1km8vHMKd/djmJiY5PhjCIIgCNn39u1b+vXrR61atTA3N+fixYv4+/tjaWkJwIoVK3BwcKBz584YGxujp6fH0aNHZZxaEIomHx8f+g8YSERERMZ9ZcuWJTw8lBpVK+K5rA9vn938Zv9n14II2jaJXj17sXHjxiJdIDt79ixjxozh1atXso4iCIIg5ACJRMLw4cPZuHEj7fovoL5pbwDePLwEgKKyKnrVm1HDoAPeKwbQqEFdggIDKV68uCxjFwpyUlluBZkLYmNj0dbWJiYmBi0tLcLDw7Gzs0erbE1sx25FRV0zRx5HKpEQ4jqNRxeOc/jwYTp06JAj5xUEQchv/v/7qiBeE0H4XQEBAdg7OJCeLqVcubKEhgRTrVq1jOPR0dHY2Npy/cZtbMZsoWyNZpn6v7h9Cr+1w3FydOTAgf0oKhbKZXaz5erVqxibtCMhPg79ipUICgwocJtHiffUrMRrIghFV3p6OoMHD2Hnzh20G7CI2oadMo7FfXjFu2e30K9vxPuIO/iuHkLzZk3x9fFGUzNnah2FVXbfVwvtSLJ/GRsbExwcRMK7J3gv75cxZ/d3SKVSwvf8yYNz7uzatUsUyARBEARBELLp5MmTODm1p1xtA3rO8+dzmiKGbY24e/duRhsdHR0CAwJo2aIZPisH8vLumYxjrx9cxH/9SCwtzNm3b2+RLpDdv38fSytripWuTLe/vIlPkaeNYVtu3Lgh62iCIAjCLxoxYgQ7d+3EbMjSTAUyAM2S5anazJq3T2/is3IQrVu1xN/PVxTIclChL5IBtGzZkvCwUNLio/Bc2ofPMe9/+VxSqZSzh/7hTtg+tm7dSs+ePXMwqSAIgiAIQuF14cIFbO3sKVWlMVYj1qJVWh/HSXtIV9LCyNiEa9euZbTV0NDAz9cHU1MTfFcP4/mNEKKeXMdvzTAM27ThyJEjKCsry+7JyNiVK1do3doAiZImdmO3UqJcDZym7EVevRRGxiacPXtW1hEFQRCEX3Dl2nU0i+tSofbXN5Z6cfsUvmuGYGRkiI+3F8WKFcvjhIXbLxfJwsPDcXR0pFy5csjJyeHu7p7puFQqZfbs2ZQtWxY1NTUsLCx4+PDhD8+7bt06KleujKqqKq1ateLChQu/GjGTRo0acTI8DMX0eDyX9iL+45tfOs/F46u5fmIba9asYdCgQTmSTRAEQRAEobC7fv06VtY2aOnVwMZ5A4rKqgCoa5fCceJuFDX1MGlnyvnz5zP6qKmp4eHujoO9HX7rRuGzchBNGjfA87gHampqsnoqMvfo0SPaGBoRHf0JqRTS01IBUNMsib3LLjR1a2BubkFAQICMkwqCIAg/6+D+fRRTkcdreX8+x37IdOz5zTD81o7AwswUL09P1NXVZZSy8PrlIllCQgKNGjVi3bp1Xz2+ePFiVq9ezcaNGzl//jzFihXD2tqapKSkb57zwIEDuLi4MGfOHK5cuUKjRo2wtrbm7du3vxozkzp16nDqZDjFlCQcX9KL2HcRP+70P676buKy51oWLVrE6NGjcySTIAiCIAhCYXf37l3MLSxR1SmP7ditKKlmvuqtqqGD/YQdaJSphrm5BeHh4RnHVFRUOHToIMOGDsHK0gw/X180NDTy+inkG2/evKFxk6ZIkMNqxGpSkxNw/6cHse9eAKCirontuG2UqdESO3t7jhw5IuPEgiAIwo+EhIRkXNyoVq0aoSHBKKTF4b2iP4lxH4EvG9acWD8KGxtrjh07hqqqqoxTF045snC/nJwcx44dy1ibSyqVUq5cOSZOnMikSZMAiImJQVdXFzc3N3r06PHV87Rq1YoWLVqwdu1a4MuODvr6+owZM4Zp06ZlK0t2FmN78eIFpqZmvP0Uj/0EN4qXrfbVdv/rZtBOTu2byx9//MHff/+drSyCIAiFgVg8OCvxmghC9j1+/Ji2RsakKWjgOGk3qhrf3nkrNfkz/utG8e7JFTw83LGyssrDpPlfdHQ0VatWIzY+AadJu9Cr1oTY9y/xWj6A1OREHF3cKFG+BgDpaSmEbJ/C40u+bN26lYEDB8o4/beJ99SsxGsiCEXH2bNnsbCwJB15JGnJHDl8GEdHR+7evYuRsQkKxUpTz7QvYbv+oEP79uzbt7dILzfwq2S6cP/Tp0+JjIzEwsIi4z5tbW1atWr1zfURUlJSuHz5cqY+8vLyWFhYfHdNheTkZGJjYzPdfkRfX59Tp05SQbcknkv78OHFve+2v3vqEKf2zcXFxYW//vrrh+cXBEEQBEEQ/nNh0sycFFSwn+D23QIZgJKKOjZjNqFXqzUOjo4cP348j5Lmf4mJidSpW5eYmFhsR29Er1oTALRKVaDD1H2oaZbAfXEvop5cB0BBURmzIcuobdSdQYMGsXnzZlnGFwRBEL7i6tWrWNvYUrxCXXovCqViA1M6durEgQMHqFOnDqEhwaTGRRHiNp2uXbpw4MB+USDLZblSJIuMjARAV1c30/26uroZx/6/9+/fk56e/lN9ABYuXIi2tnbGTV9fP1sZ9fT0CA8PpUbVingu68PbZze/2u7heU/CdsxkxIgRLF26FDk5uWydXxAEQRAEoSiLjIzE1NSM2M9p2E/Ygbp26Wz1U1RSwWrkWkrq16Nnz165nLJgSEtLo379BkRGRmI5fCX69dpmOq6uXZr2U/ZQXK8qnsv68/LulwvM8vIKNLEZgpKKGsEhIbKILgiCIHzDnTt3sLC0Qr1UJWzGbEJFXQuLYSup1tyeXr164erqSv369Tl96iQ7d+5k9+5dRXpH57xS4He3nD59OjExMRm3Fy9eZLtvqVKlCAkOomH9Ongt68ebh5cyHX9y5QTB2ybTt28/1q1bJwpkgiAIgiAI2fD+/XvMLSx4+zEOh4k70ShR9qf6P78RytunN+jatWsuJSw4pFIpLVq04MmTx7TrP5+qzay/2k5FXQsHF1f0qjfBZ9UQnl4NJCH6Ld4rBqJfoTyrVq7M2+CCIAjCNz169Agzcwvk1UtiN3YbymqaAMgrKGI2aDG1jboxaNAgdu3aRZ06dejbt68okOWRXCmS6enpARAVFZXp/qioqIxj/1+pUqVQUFD4qT7wZTFXLS2tTLefoaOjQ2BAAK1btcBn5SBe3jkNQMStcAI3j6dz585s27YVefkCX08UBEEQBEHIddHR0VhaWfP8RST2LjvQKl2R0/vnczN4V7b6P78Zluk7WFFnZmbGtWvXaNNtOnXafr9oqKSiju3ojVRqZIb/htEcW9gVNcV0goMCs8zWEARBEGQjIiICUzNzUuVUKVO1Kc+uB2c6LicvT83WHZCTl+fGjRsySll05Urlp0qVKujp6REUFJRxX2xsLOfPn8fAwOCrfZSVlWnWrFmmPhKJhKCgoG/2ySkaGhr4+nhjamqC75rhXPJcm7FrxJ49u0XFVhAEQRAEIRvi4+OxsbXl/sPHGZsjxbx9zo1ANy56rCY9LeW7/V/dO0fABmdsbW3Zs2c3CgoKeZQ8f+rcuTOhoaE0cxhFI6tB2eqjoKRCbcPOSCXppCVGExQYQKVKlXI5qSAIQtEUExODYRtDGjVslK1ZbZGRkZiZmROXJMGozzzuhO0jxHUqkvS0jDbvnt/Gb81Q2rY1Emuiy8AvF8ni4+O5du0a165dA74s1n/t2jUiIiKQk5Nj/PjxzJs3j+PHj3Pz5k369etHuXLlMnbABDA3N8/YyRLAxcWFLVu2sGPHDu7evcvIkSNJSEjIk9141NTU8HB3x9HBnoseqzA2NuLwoUMoKSnl+mMLgiAIgiAUdImJiTg4OnHt+i3sxm2npH5t4MuW9XLyCiQnRPPi1slv9o98dAW/tcMxNjbi0MEDRf472NChQzl69Cj1zfrSov34TMcS4z5+s9+bh5fxX++MgqISIcHB1KlTJ5eTCoIgFE3Jyck42Dtw8/oNol6+wbCNIQ8fPvxm+w8fPmQsRWA/wS3TuuibhtfhVsgePr56iM/KgTSoXwdvL0/U1dXz4qkI/+OXh0hdunQJU1PTjL9dXFwA6N+/P25ubkyZMoWEhASGDRtGdHQ0bdu2xc/PD1VV1Yw+jx8/5v379xl/d+/enXfv3jF79mwiIyNp3Lgxfn5+eTY8XEVFhYMHD3Du3DmaNm2aKasgCIIgCILwdSkpKXTq3JkzZ89iN247Zao0zDj29FogFesbEffhDYFbJqJf34gqTSyo2tQaReUv37XePb+Nz+ohtGjejOMeHkX+O9i0adPYunUbNVo50bbHLOTk5Pgc+4GH54/z4MwxPr5+iP347VSok3m2xfuIO3ivGoxUKsHXx5tWrVrJ6BkIgiAUfp8+feLU6VO0qd0Cp5Y2TNs5lx07djBv3rwsbWNiYrC0subZizc4TdqDdplKPLsWiLyiEpK01C/ne/OEq97rqFa5Iv5+fmhqaub1UxIAOalUKpV1iJwUGxuLtrY2MTExP70+mSAIgpCVeF/NSrwmgvBfaWlpdOvWHU8vL2xGb8q082Ji3Ed2uBhg3PdvdKs1IdR1Om+ffVlfpdOMw+hWbcSHVw/wWtqHurWrExQYWOT/TS1ZsoSpU6dRsYEJFsNW8PL2Ke6fPUbEzTBADh29qnx8dZ+O0w+iV61JRr/oyKccW9SdlM+x7Nu3l27dusnuSfwk8Z6alXhNBKFg2Lp1K8OGDUNZUZkyumU4feY0+vr6mdokJCRgZW3DlWs3cJy4i1IV6wJwev983jy8RE2D9uhVb0rAhjHoldTi5MkwypQpI4unU6hl931VLLYlCIIgCIIg/JL09HT69x+Ax/HjWI9cm6lABl92qZRKJTy/EcLZQ/+QkhhHqYr1qNnaieJlqxId9QyfFf2pWlkffz+/Il8M2LZtG1OnTUevRjOsRq7h4bnjhO6YAUDlxuYY9/mLm8G7+RzzNtNovfiPbzi+rB/Jn2NZv25tgSqQCYIgFGRDhgxBR0eHPbv3sHjJ4iwFMoD+AwZw+cpV7Ma7ZhTIAAx7zAQgIfotnkt7U1xTheDgQFEgkzFRJBMEQRAEQRB+mlQqZcSIEezbvw/LYSup3Ng8S5v4j68B+PDyPvXN+lCzlRPFy1UHIPb9S7yX96dcmVIEBQZQokSJPM2f3zx48IDhI0ZSskIt7MZuRlFJhbI1W1DXpCcRN0N5di0IVY0SvHt2k4oNTJCX/7KpQWLcB44v68fnmHfM/fsvRowYIdsnIgiCUMR06dKFLl26fPN4UlIycnLySCWSLMcS4z7is3IgagpphASHU758+dyMKmSDKJIJgiAIgiAIP0UqlTJ+/Hi2bt2K6cB/qNbc9qvtmtmPolKDdpSqVA85ObmM++M/ReK9vD8lNFXFVXPgxYsXmJlboFVaH4cJriiragCgo1sZk75/I5VKOX9kKTeDd5KWkkRT+5EAJH+Ow2v5IGLfvWDc2DHMnDlTlk9DEARB+Ip9e/dg7+CIz8qBWI/eSIU6bQBI/hyLz6pBkBxN8MlwqlSpIuOkAvzG7paCIAiCIAhC0TRz5kxWr16NUe8/qW3Y6Zvt5OTlKV25fqYC2efYD3ivGEAxZSkhIUFF/qr527dvMTe3JD5ZioPLDtQ0s46ok5OTo45xN9JSkpCTV0C/XlvSUpLwXTOMD6/u06d3L5YvXy6D9IIgCMKPaGpq4ufrQ7t2xviuHsaza0GkJiXgu3ooydGvCQoMoFatWrKOKfyHGEkmCIIgCIIgZNv8+fNZuHAhBl2nUd+090/1TYqPxmflQBTS4gk+GU7lypVzJ2QB0q//AF5FvaPD1INoFNf7ZjvtMpWoadABBSUVFJXV8Fs3ishHV7Czs2XHjh2ZCpGCIAiCbERFRXHjxg3Mzc2Rl//vmCR1dXWOe3jQs2cvPDaMpkS5GiR+eklIcBANGzb8zhmFvCZGkgmCIAiCIAjZkpiYyKxZs6jc2JzG1oN/qm9KYhy+q4eQFv+W4KBAatasmUspC5aK+hVIio8h8vGVH7Y1H7wEkz5/E7x9KhG3wmjTpg0eHh6iQCYIgpAPREVFYWRkgpWVFYMHDyY9PT3TcRUVFQ4ePEDvXr0g6QM+3l60aNFCRmnzH6lUysqVK1m5ciVSqVRmOcRIMkEQBEEQBCFbVFVVGTVqFOvXr+dGoBsNLQZkq19qciJ+a4aT8P4ZoSHB1K9fP3eDFiAbNmwgPT2d7dunkJIYRwPzft9sK5VKObn3Lx5d8KJBwwYEBQWioKCQh2kFQRCEr/n48SPmFha8ef+J1p0ns3PnchITk9i1aydKSkoZ7RQVFdm5c4cMk+Zf8+bNY/bs2QDcuHGDzZs3o6iY9yUrUSQTBEEQBEEQskVOTo61a9eirq7O0qXzvywib/f93RTTUpPxXz+STy/vEBgYQNOmTfMobcGgoKDA1q1bKV68OMuWzSX5cwzNHEZ/dXTYhWMruB26lypVq3L2zBlUVFRkkFgQBEH4X7GxsVhZW/M04jWOk3ZTolwNtMpU5PAWFz4nJnLo4AHxfv0DW7duZfbs2bTsMB7NkhXY4TaNqKgoDh48SLFixfI0i5huKQiCIAiCIGSbnJwcixcvZs6cOZw/uowL7iu+OS0iPS2VgE1jefvoMl5enhgYGORx2oJBTk6OJUuWsGDBAi56rOb0gflIJZJMba75b+WKzwb09PS4fOlSnv9oEL5u0aJFyMnJMX78+Iz7kpKScHZ2pmTJkmhoaNC5c2eioqJkF1IQhFyTkJCArZ09t+8+wG78dkqUqwFAtWY2WI9aj6+vH45OTnz+/FnGSfMvT09Phg8fTr12vWhqP4qaBu2xG7uZwOBQTM3MeP/+fZ7mEUUyQRAEQRAE4afIycnx559/snjxYi57refMwYVZCmUSSTrB2ybz8vZJjh07iqmpqYzSFgxycnJMnz6d9evXcytoJ6Fu05GkpwFwJ/wgZw/9g4GBAVevXqV48eIyTisAXLx4kU2bNmVZdHvChAl4enpy6NAhwsLCeP36NZ06fXsXWEEQCqakpCSc2rfn8pWr2I7dSumK9TIdr9SwHa06TybgxAmCg4NllDJ/O3v2LN26dadyYwva9pqdMYpav54RjpN2c+feYwzbGudpJjHdUhAEQRAEQfglkydPplixYjg7O5OWkohx77+Qk5dHKpEQ5jaDp1f8OHjwILa2trKOWmCMHDkSbW1t+vXvT0pSHFWaWhO++w9GjRrF2rVrxSL9+UR8fDy9e/dmy5YtzJs3L+P+mJgYtm3bxt69ezEzMwPA1dWVOnXqcO7cOVq3bv3V8yUnJ5OcnJzxd2xsbO4+AUEQfktKSgqdu3Th5MlTNLEbiZxc1vFHH17c44rXGlobGNCuXbu8D5nP3bt3Dzt7B0pWrI/50OXIy2deY1NHtzLq2qWJi4tFIpFk2i00N4mRZIIgCIIgCMIvGzVqFNu3b+feyYMEu05Fkp7GyX1/c//sMXbs2CFG0PyCXr164eHuzqvbJwnaOomePXqyZs0aUSDLR5ydnbG3t8fCwiLT/ZcvXyY1NTXT/bVr16ZixYqcPXv2m+dbuHAh2traGTd9ff1cyy4Iwu9JS0ujd+8+nDgRgHH/hVxwX8nRBV0ytfn05jHeKwZQu2Z1/Hx90dDQkFHa/On169dYWlmjWKwU1s4biHn7nA8v72ccT09Nxn+9M0kxbzjh75dnBTIQI8kEQRAEQRCE3zRw4EDU1dXp3acPUU+uERP1jC1bttC7d29ZRyuw7O3tCQkJ5tKlS4wYMSJPfyAI37d//36uXLnCxYsXsxyLjIxEWVkZHR2dTPfr6uoSGRn5zXNOnz4dFxeXjL9jY2NFoUwQ8iGJRMKgQYM5evQoViPXkhj73/WyNg6rjfXItZSsUBPv5f2ppK9HwAl/tLW1ZZg4/4mOjsbK2pqYhGTqmXbHa9Vg3j65jrKaBgNWnEdeXpHg7VN5++QKASdO5PmO2KJIJgiCIAiCIPy27t27o6amxrjxE5i7ejVDhgyRdaQCz8DAIN9sdnD//n3Onj1L7969UVJSknUcmXnx4gXjxo0jICAAVVXVHDuvioqK2P1OEPI5qVTKqFGj2L17F+ZDllGliQXeq4agoKhMeloKUkk6jy/5cPbgfHRLaRMUGEjJkiVlHTtf+bKOWwceP42gkc1wzh1ZknFMW7cK8gpKnD4wn8eXfDh8+DDGxnm7HhmIIpkgCIIgCIKQQ5ycnHBycpJ1DCGH3b17FyNjEz68f4e7x3EOHtiPsrKyrGPJxOXLl3n79i1NmzbNuC89PZ3w8HDWrl2Lv78/KSkpREdHZxpNFhUVhZ6engwSC4KQE6RSKZMmTWLTpk20G7CAGq0cAZBXUEJBSYWaBh2o2MCEC0eXoKWmQHBQoPg3//+kp6fTp09fzp07h/0ENzRLVSDu/UseX/IlOSEa/Xptuea/lZuBO1i/fr3MlmsQRTJBEARBEARBEL7qwYMHtDM1Q06tBGaDp+C9cyYdO3XiyOHDOTqSqqAwNzfn5s2bme4bOHAgtWvXZurUqejr66OkpERQUBCdO3cGvozCi4iIyDejAgVB+Hlz5sxh+fLltO01mzptu2bcbzViFVKplLTkz3gu7YOKXAqhISfFdOn/RyqVMm7cOI4eO4r1yLWUrdEcAJO+fyOVSrgbfgCpVMq5w4uZOXMmI0eOlFlWUSQTBEEQBEEoohITE9m5cyempqbUrFlT1nGEfObJkye0MzUjXUEDhwk7UNcqiZpmSU6sH4mjkxPHPTxQU1OTdcw8pampmWV9nGLFilGyZMmM+wcPHoyLiwslSpRAS0uLMWPGYGBg8M2dLQVByN927tzJ3Llzad15Mg3M+mY6pqCoTPLnOHxWDkKa9ImQk+FUrVpVRknzrwMHDrBu3TqMev9JlSaWmY4Zdp9J2RotCHGdysCBA5k7d66MUn4hVgAVBEEQBEEogpKSkujQsSMjRoygrZExt2/flnUkIR95/vw57UzNSJIoYe/ypUAGULG+ETZjthAefgo7ewcSEhJknDT/WbFiBQ4ODnTu3BljY2P09PQ4evSorGMJgvCLkpOTAZBKJVmOpSYl4Lt6CEnRrwgKDKB27dp5Ha9AKFWqFAoKCry6c5q0lKRMxz69ecSpPbOxtbVh8+bNMt/JWRTJBEEQBEEQipjk5GQ6d+5MSEgolsNXgWpxTNqZZplGJhRNL1++pF07M+KSpDi47KSYTplMxyvUMcB23DbOnjuPtY0tcXFxMkqaP4SGhrJy5cqMv1VVVVm3bh0fP34kISGBo0ePirWJBKEASUhIwNGpPZ06dSY+Pp4hQ4YwZ84czh9dxgX3FUilUgDSUpLwWzeS2MiHnPD3o1GjRjJOnjMOHDiAtk5xxowZk/Fcf5eFhQUeHh68vnsK7xUDSIqPBiAm6jl+q4fSuFEDDh08iKKi7Cc7iiKZIAiCIAhCEZKSkkLnLl05ERCEtfMGqreww8FlJwrFSmPSzpTr16/LOqIgQ2/evMHUzJyP8Uk4TNyJRomyX21XrmYL7Ma7cunKNSytrIiJicnjpIIgCDkvOTmZDh07EhAYjI/fCUzNzPj48SN//vknixcv5rLXes4cXEh6ajInNo7h/dNr+Pp407JlS1lHzxHbt2+nV+8+xMXFsXbtWpydnXOsUGZvb09ISDCJH55xfEkv3j67ic+qQZTTLYW3lxfq6uo58ji/SxTJBEEQBEEQiojU1FS6duuOv78/VqPWoV/PCABVjeLYT9iBslZZ2pmacfXqVRknFWTh7du3mJlbEPUhFseJu9AsWf677fWqNcHBZQfXb97BzNyCjx8/5lFSQRCEnPfv7ouhoeFYO2/EcdJu7t5/QlsjY169esXkyZNZt24dNwJc2f+HDa/vneH4cQ+MjIxkHT1HuLq6MnTYcIrp6FKuVkvkFZXZsGEDo0aNQiLJOtX0V7Ru3ZqzZ06jrpDCkXmdUJFP5cQJP0qVKpUj588JokgmCIIgCIJMxcXF0bRZc6pVr8GtW7dkHafQSk1NpUePnnh7e2M5Yg2VGphkOq6qoYO9yw5Ui+vTztSMS5cuySipIAsfPnzAzNycl2/e4eCyE63SFbPVr0zlBliNXMeVy5dYv359LqcUBEHIHVKplJEjR3L06FEshq2kfO1WlK5UD6cp+3j9LhqDNoY8ePCAUaNGsX37dsqV1uLwoUNYWlr++OQFgJubG0OGDqOYji4dpu7DbuwWytdujbyCEhs3bmTkyJE5ViirVasW586eYfr06QSc8Kdy5co5ct6cIopkgiAIgiDIjEQioU/ffty994DYZHlatWotFrjOBWlpafTp0xd3Dw8sR6yhciOzr7ZTUdfCbrwrxUpVwczcggsXLuRxUkEWPn36hLmFJc8i3mDvsgMdvSrZ7pv8OY7zRxZTvERJOnTokHshBUEQctGMGTPYsmULJv0XUKWJRcb9OnpVaD9lPwlpirQxbMubN28YOHAgd+/cxsnJSYaJc86uXbsYPGQo6tplaD9lLxolyqKopIKN83oq1GmDvIISmzdvZsSIETlWKCtXrhwLFiygQYMGOXK+nCSKZIIgCIIgyMzcuXPxPO6B6eCldJh+iLJ1jencuTN//PFHjn0RK+rS09Pp168/hw8fxnL4Kqo0Nv9uexV1TezGb0OjTDXMLSw5e/ZsHiUVZCE2NhYra2sePHqK/QQ3SpSrke2+KUnx+K4eTOLHFwQFBlC/fv1cTCoIgpA7li1bxqJFi2jTbTq1DTtlOV6suB6lKjUg+tNH4uPjZZAw9+zevZsBAwehrl2aDlP3olmyXMaxL4WydVSo2wZ5BUW2bNnC/fv3ZZg2b4gimSAIgiAIMnH8+HH+/PNPWrQfR+VGZiipqGM5fBWtO09i/vz5ODo5icXAf1N6ejoDBgzkwIEDWAxbQdWmVtnqp6z2pVCmXkIfewfHXE4pyEp8fDzWNjbcunMf+wlulNSvne2+qcmf8Vs9jLioxwQGnKBJkya5mFQQBCF3uLq6MmnSJOqa9OLj64dERz3LdFwqlXL20D88OOvOjh07qFEj+xcS8rs9e/bQf8BA1LRK0X7K3q+uQ6mgpEITm6FI0tOoWasW1atXl0HSvCWKZIIgCIIg5Lm7d+/Sq3cfqjWzpqn9qIz75eTkaGI7HNuxWwgOPUmLFq24d++eDJMWXBKJhMGDh7Bn7x7MhiylWnPbn+r/7tktPkU+plWrwrFjl5DZ58+fsbN34Oq1m9iN207pSvUyjiXGfeRO+EES476+EH9aShJ+a0fw6dVd/P18adGiRV7FFgRByDHHjh1jyJAh1DXpgZpmce6dOsytkN2Z2lz13cz1E9tYtWoVvXv3llHSnLdv3z769R+AmlZJOkzdh6qGDg/OH+ft0xuZ2kU+vor3qqGULFmac2fPoqSkJKPEeUcUyQRBEARByFPR0dE4OjqhplMW04H/ICcnl6VNpQYmdJxxhI8J6TRv0RJPT08ZJC24JBIJw4YNY+eunZgNWkyNlg7cCtnDq3vns9X/9YML+K0ZhrFRW46JNeIKnaSkJBydnDh/4SJ247ahW7URKUnx3D/rjtfKweyY2IawnTM5d3hxlr5pqcn4rx/Fh2fX8fXxpk2bNjJ4BoIgCL8nODiY7j16ULWZDUa9/+TV3S9LC9wM3MGGITWIfRfBnbD9nD+6lD/++IOxY8fKOHHO2b9/P3369kNVsyQGXabiu2YEbuNbEbRlIoFbJ2a0i3pyDc/l/dHSLMaDB/coXry4DFPnHVEkEwRBEAQhz6Snp9OzVy9eRb7FatR6lFSLfbOtjm5lOsw4hG6N1jg5OTF37lyxTlk2/LtD1/bt2zEdsIiardvz6fUjTu75k7OH//lh/zcPL+O7eiht2hjgefw4qqqqeZBayCvJycl06NiRk6dOYzt2C9q6lfHfMAa3Ca0J3jaZ1KQEmtoNB6BszcyjCNPTUgjYOIaohxfx9DyOsbGxLJ6CIAjCb7l06RJOTu0pW6MlZoOXkPI5lqgn15CTV8ho8/RaEOG7Z+Ps7Mxff/0lw7Q568CBA/Tu0xdVzZJ0nLqXl3fP8vHVfdLTUgAoX9sAgKgn1zm+rB9aGuo8fHCfEiVKyDJ2nlKUdQBBEARBEIqO2bNn4+/vj93YrWiXqfTD9sqqGliNXMtl73XMnj2bq1evsWOHG5qamnmQtuCRSqWMHj2azZs3YzpgIbXadATgyRV/AN49u0lM1HO0db/+2kc+vorv6sG0btUSby9P1NTU8iy7kPtSUlLo0rUbQUEh2I7ZRLmaLYmJes6Ty34AFC9Xg4YW/UmK/4ScvEKmXVDT01IJ3DyBV3dP43n8OObm398AQhAEIT+6d+8e1ja2aOnVwGrUOhQUlUnlM6Uq1UNduzS123RCQUkF//Wj6NGjJ6tXr/7qiPeC6NChQ/Tq3QdVjRJ0mLIHrdIVadtzFhXrG3Fi45eRctWa2/L26Q08l/VDs5g69+/dK1IFMhAjyQRBEARByCOHDx9mwYIFtOo0iYr1jbLdT05enuaOY7AZvREfvxO0amXAo0ePcjFpwSSVShk3bhzr16/HpN88arftknHsyWV/Kjc2R1FFnYAtE7gVsofPMe8y9Y96cg2flYNo3qwpPt5eqKur5/VTEHJRWloaPXv2ws/PD2vn9VSoawiAtm4l+i09jXGfv1BQVCZ89xweXfKlXK1WqGroACBJTyNk+2QibgRz5PBhrK2tZfhMBEEQfk1ERATmFpbIq5XAZsxmlFS+fM6paujQeeYR7MZsolhxXQI3j8PSwpwdO9yQly8cJZO3b9/Sf8BAVIrp0GHK3owLlUoq6lRrbouaZkkAFFXUOL60Lxrqqjy4f49SpUrJMrZMFI7/4oIgCIIg5Gs3b96kf/8B1GhpT2PrIb90jiqNzek44zBR0Z9p1rwF4eHhOZyy4JJKpbi4uLBmzRqM+/xFXePuGcdi3j7n/Yu71GzdHtMBC0lJjOfknj/ZMcmQxLgPALx9dhOfVYNp2qQhvj7eFCv27WmwQsGTnp5Onz59cffwwHL4airWzzxNsphOGeq164XpwIUkxX/i9b1zGTuhSiTphLhN58llPw4cOICjo9jtVBCEgic5ORkLCysSUsBu/PaMiwD/kpOT4+Orh/iuHkKzpk04cuRIoVqk/uLFiyR+TqCJzbCvjiYfsOIcXWYdw2v5AIqpq3Lv3t0iWSADUSQTBEEQBCGXffz4ESenDmiUroRJ/4W/NW2heNlqWI/eTFxcLIcOHcrBlAWXVCplypQprFy5EqNec6jXrlem408uf5lq+T7iLpe91hMT9RRlNQ1qtu6AorIa757fxnvFABrWr4Ofr6+YylrISCQSBg0azKFDh7AYtoLKjb89TbJkhdoUL1sNgMqNzZFKJITv/INH54+zZ88eOnXqlFexBUEQcpREIuHNm9eoahRHQVE5y/HY9y/xWTmQalUqFsrR1NbW1vTs2Ytzh//h8SXfLMffPb+Nx9K+qKsqc//eXcqUKSODlPlDrhbJKleujJycXJabs7PzV9u7ubllaSsWixUEQRCEgistLY1u3bvz9sMnLEeuQ0nl99a4Sk9LJXzHDEqVKs20adNyKGXBJZVKmTFjBkuXLsWwx0zqm/XJ0iby8VUAbgS6UaJ8dWycN9B/+TnMBy8m5u1zvFcMoF6dmpzw90dLSyuvn4KQi/7d5XTX7l2YDVlKtWY2320vJyeHUe+/MBu8hGI6upzc+xf3Th/Gzc2N7t27f7evIAhCfqampkZoaAipsW84vqQ38Z8iM459jnmPz8qBlNLRIODECXR0dGQXNJcoKiqya9dOevbsSeDmCTy64J1x7F3EbTyW9EFdVSlLgSw5OZkRI0Yyffp00tPTZRE9z+Xqwv0XL17M9ELeunULS0tLunbt+s0+Wlpa3L9/P+PvwrJIniAIgpD7Fi5cyNGjR7l37x5qamq0adOGf/75h1q1amW0SUpKYuLEiezfv5/k5GSsra1Zv349urq6MkxeeE2fPp2Q4BDsJ7iiVarCb5/v9IH5RD29RmhICOXLl8+BhAXbnDlzWLRoEW26TaehxYCvtjHpO5carRyp1LBdxvorAB9e3sd7eX9q16xGYEAA2traeZRayAv/buKwfft2TAf+Q42WDtnqV752K6RSKacPzOd26F62bdtG3759czmtIAhC7mvWrBlnTp/C3MKS4//0wG6CG2qaJfFdPRhlkgkKOo2enp6sY+YaBQUFduz4MjBpzxYXpEgprleV40v6oqaiyP17dzN9H5ZIJPTr15+jx44hSU/nwYOH7Nmzu9APZMrVkWSlS5dGT08v4+bl5UW1atUwMTH5Zh85OblMfcSPFkEQBCG7wsLCcHZ25ty5cwQEBJCamoqVlRUJCQkZbSZMmICnpyeHDh0iLCyM169fiylEuWTv3r0sXboUg27TqFDH4LfPdyf8ILdD9rBu7VoMDQ1zIGHB9tdffzF37lxad55MI6tB32ynrl2K6i3sMhXIPr56iPfy/lSvUonAgMJ51byoO3HiBBs2bKBVp4kZu5xmh1Qq5dyRJdwM3MH69esZNOjb/28J+dPevXtp0rQZ/v7+so4iCDKRnp7OzFmz+Pvvv5FIJJmO1apVi7NnTqNbQpPji3vis2ogSdGvCTjhT5UqVWSUOO8oKCjg5uZK3759CdoyEfd/eqGqrMC9u3cy1V6kUiljx47l0OFDmA9ZjtWodXh6eWFtY0tMTIwMn0Huy9WRZP8rJSWF3bt34+Li8t3RYfHx8VSqVAmJRELTpk1ZsGAB9erV+2b75ORkkpOTM/6OjY3N0dyCIAhCweHn55fpbzc3N8qUKcPly5cxNjYmJiaGbdu2sXfvXszMzABwdXWlTp06nDt3jtatW8sidqF05coVBg0aTK02HWlg3v+3zxf5+Cqn9/3FsGHDGDZsWA4kLNjmz5/Pn3/+ScuOLjSx/bnX49PrR3gt70eVSuUIDg4sclu7FxV169alfAV97p8+TI1WTmiUKJutfhc9VnHNbwsrV65k5MiRuZxSyGl79+6lb9++qGmVwql9ezzc3bGx+f40W0EoTP6dZu7q5gZSKU+ePGXbtq0oKChktNHX1+f06ZNY29hy984dgoICadCggexC5zEFBQW2b99G8eI6PHn6HDfXbVm+CyxYsIB169Zh0ncuVZt92dHYfoIbfmuHY2zcDn9/30I76i7PFu53d3cnOjqaAQMGfLNNrVq12L59Ox4eHuzevRuJREKbNm14+fLlN/ssXLgQbW3tjJu+vn4upBcEQRAKon+vdP37wX/58mVSU1OxsLDIaFO7dm0qVqzI2bNnv3me5ORkYmNjM92Eb3v37h3tO3REp1wNjPv8/dtLJyRERxGwcTQtWjRnzZo1OZSy4Dp58iSzZs2iic0wmtn/XBHjU+QTvJb3o1IFXYKDgihZsmQupRRkTV9fn1Mnw9FUAc+lvYl9/+3v0/+67LWOy17rWLx4MePGjcuDlEJO2r9/P3379qVG6/b0WhBE+TqGOLVvj69v1kW6BaEwkkqljBs3DldXV0wH/oPZoCXs2rWLXr16k5qamqltqVKlOH/uLDEx0RgY/P5o94JGQUGBlStXctzjWJYC2datW5k1axYt2o+jrkmPjPvL1miO0+S9PH3xBgMDQ6Kjo/M4dd7IsyLZtm3bsLW1pVy5ct9sY2BgQL9+/WjcuDEmJiYcPXqU0qVLs2nTpm/2mT59OjExMRm3Fy9e5Eb8IiktLY1Zs2axaNEipFKprOMIgpCLUlJSGDN2LLNmzcoyLL2gkkgkjB8/HkNDQ+rXrw9AZGQkysrKWaaW6erqEhkZ+ZWzfCEuyGRfamoqXbp241PsZ6xGrkNR+b/rVryPuMPN4F0/9ZmSnppMwIYxaKgqcvTIEZSVs+5IVdRUqFCBkqVK8/JOOIlxH7LdLybqOd7L+lGhbGlCgoMpXbp0LqYU8oPKlStz6mQ4xTWU8VzSm5io599se9VvCxfcVzJv3jwmT56chymFnHDgwAF69+5N9VZOtBuwEEVlVSxHrKFCXSPad+iAj4+PrCMKQq6SSqVMnz6dtWvXYtTnL2oZdKCmQXsshq/kyNGjdO7SNdMMNPiymL2SkpKMEudP7u7uDB8+nPqmfWjmkHXDxRLla6JXsxUvX0YQFxcng4S5L0+KZM+fPycwMJAhQ4b8VD8lJSWaNGnCo0ePvtlGRUUFLS2tTDfh9yUnJ9OlazcWLlzE9OnTmTVrlqwjCYKQS1JTU+nevQcbNmxkwYIFDBo0uFDsXuPs7MytW7fYv3//b59LXJDJvokTJ3Lq1Ckshq/OMr3rqt9mTu39m4ibYdk6l1Qq5eSev3j/4jYe7scK7bD+n1WlShXCQkOQJn7Ea1k/Pse8/2GfmLfP8VreF71SOoQEBxXprd2Lmn9HlOmW0MRzaW8+vXmcpc2NQDfOHV7MrFmzmDlzpgxSCr/j0KFDXwpkLR0xHbgIefkv08oUFJWxHLGa8nUM6dCxI69evZJxUkHIPfPnz+eff/6hTbcZ1DPpmXF/tWY2WI9aj5+fPw6OjiQmJsowZf4WHh5O9x49qNLUGsOes746E+DCsRU8PH8cV1fXQnvROE+KZK6urpQpUwZ7e/uf6peens7NmzcpWzZ7aygIOSMhIQF7Bwe8vX2wdl5P6y5TWLBgAUuWLJF1NEEQclhaWhp9+vTluKcnViPWYDZ4CTt37aRv336kpaXJOt4vGz16NF5eXoSEhFChwn93VNTT0yMlJSXL8PCoqKjvFmDEBZnscXNzY82aNRj2mEW5mi2yHI96ch2Aa35bsnW+26F7uXvqEJs2bqRly5Y5mrWgq1evHuFhocinxuK1rC+fY959s23suxd4LetHaZ1ihIYGi2JjEVSuXDlOngyjgl4pvJb15cOrBxnHboXs4fT++UyePJm///5bhimFX3H48GF69uxJ+bpteXDOgxsBbpmOJ8V/IvbtM8qU0UVdXf3rJxGEAm758uX88ccftOwwnkZWA7Mcr9SwHfXN+hEYEMDdu3dlkDD/u3HjBg6OTpSp2hTTAQt4di2IMwcXkZr836LijcAdXPHZwLJly+jTp48M0+auXC+SSSQSXF1d6d+/P4qKmfcJ6NevH9OnT8/4+++//+bEiRM8efKEK1eu0KdPH54/f/7TI9CEXxcdHY25hQWnTp/Fbtw2Kjcyo4nNUJraj2TKlCls3rxZ1hEFQcgh6enpDBw4iMOHD2M5bCWVG5tTs3V7LIet5MCBA/Ts2SvL+g35nVQqZfTo0Rw7dozg4OAsuxQ1a9YMJSUlgoKCMu67f/8+ERERRXI9ipx04cIFho8YQR2jbtRr1yvL8c8x74h7/5LqLex5/eACh/7uQPjuOTy5cuKr53v94CKn989j9OjRDByY9QuvAHXq1CE8LBQl6Wc8l/YhIToqS5u4D6/wWt6PktpqhIWFfnfZC6Fw09XVJSwshCr65fBa1pf3L+5y9+QhTu75k3HjxvHPP//89vqBQt46cuQIPXr0oGpzO0pVrAvAw3MefHr9ZRZOQvRbvJb1Q1U+lbDQEIoXLw7AnTt3+Pjxo8xyC0JO2rhxIxMnTqS+aR90qzb56pIOTy77c/3ENgYMGEDjxo3zPmQ+9+zZM6ysbVArXh55BSXcXAzwX+/M9RPbeHErHICHF7w4c2A+kyZNwsXFRcaJc1euF8kCAwOJiIj46vbRERERvHnzJuPvT58+MXToUOrUqYOdnR2xsbGcOXOGunXr5nZMgS8jKYyN23Hj1j3sJ+ygfO1WGcdadphAfbO+jBgxIkemLgmCIFv/7vyzZ+8ezIYszdi1BqBac1usRq7hmLs7Xbp2IyUlRYZJf46zszO7d+9m7969aGpqEhkZSWRkZMbQem1tbQYPHoyLiwshISFcvnyZgQMHYmBgIHa2/A2RkZF06NiJkhXqYtRr9ld/aP87iqxV50m06T6DuPcvuR26l9P752VpG//xDYGbxmJoaMjy5ctzPX9BVqtWLU6Gh6Eqn4Ln0j7Ef/zv96r4j2/wWtYPnWJKhIWGUL58eRkmFfKD0qVLExoaTK3qVTi+pDdhO2cycuRIVqxYIQpkBczRo0fp3qMHVZvZYDrwH55c9gfg/Yu77J9ty9OrgXgv74eKXDJhoSFUq1YNgNWrV1OvXj1atmwlpl8KBd7OnTsZOXIkDcz78f7FXTyX9yfy0ZVMbZ7fCCVwywS6du3K1q1bkZfPs2XZC4R3795hYWlFslQZa+dNRNwKJy0lCQA5OXnK127Ni9unCNk+hb59+7F48WIZJ859ctJCtiJ7bGws2traxMTEiOkwPyEiIgJzC0si30djN96VkuVrZmkjlUgIdp3K44teeLi7//T0WUEQ8gepVMqoUaPYtGkTpgP/oVabjl9t9/xGCCc2jMHUtB0BJ/wLxPvqt37kubq6ZuyunJSUxMSJE9m3bx/JyclYW1uzfv36n5qCJj5r/islJYV2pqbcvPuITjOPUkxH96vtzh1Zyu3QPTSyGszN4F0kx0dTpakVja2HoFu1UUa7tJQkji/uiVJ6LFcuXxKLy2fTkydPMGlnSkKKHA4Td4GcHJ5L+6CpAifDw6hUqZKsIwr5SHR0NEOHDadypYr8888/Mv/RKN5Ts/rea+Lu7k6Xrl2p0sQa8yFLif/4mj3TzQGQk1dAXbs0isoqqMmncjI8jOrVqwOwdu1axowZQ12THry6HU5JbXXCQjMvSyAIBcWhQ4fo0aMHtQy7YNznb3ZObkti7Hs0S5anQl1D2vWfz6t75/BdPQQbGxuOHD4kFun/f+Lj4zFpZ8qDx89pP3U/WqUrEvsugqu+m7kTfgAdvaqYD16C57K+WJi1w93dvUC/htn9rBFFMoEHDx5gZm5BfLIU+wluaJf59hdpSXoaARvH8urOSfz8fGnXrl3eBRUE4bdJpVLGjx/P6tWradd/AXWMun63/YvbJ/FbN5K0lGTxvvo/xGfNf40YMYJt211xnLQbvWpNvtnu+LL+vLp7BgUlFWobdqaR5SC0dTN/3kilUkJcp/L8ih+nT5+iadOmuR2/UHn27BkmJqbEJqV/+aGsmM7J8LAs044FIb8R76lZfes18fDwoEuXLlRqbInF0OXIK3xZzuZ26F5UNYpTulID/NYOQ0mSQHhYKDVq1ABg3bp1jB49mkbWgzHoMpW49y/xXNaHklpqhIWGFNoFuIXCycvLiw4dO1K1mS1mg5fw7tlNji7I/J22w9T9+KwahLGRIV6enqioqMgobf6UkpKCnb09p06fw2nynowp2wCfYz+ww6U1tdt2JeJGEPXr1CAkOLjAr2uY3c8aMdawiLt+/TqGbY1IkqjgNGXfdwtkAPIKilgMW0GZas1wcHDk4sWLeZRUEITfJZVKmTJlCqtXr8ao958/LJAB6Nczwm7s1jxIJxREmzdvZtOmTRj2nPPdAhlAm65TadF+HH0Xh2Hc568sBTKAm0E7uH/mGNu2bRUFsl9QuXJlwsND0VFXQk0+jbDQEFEgE4RCZtDgIRQvXwvzIcsyCmQA9dr1omzNlvitHYZiejxhoSEZBbL169d/KZBZfSmQycnJoVVaH8eJu/kYl4SxcTsiIiJk9ZQE4acEBgbSqXNnKjU0xWzQYuTlFUiK/wSAZsnyNLUbidWINfiuGUrLFs3xcHcXBbKvGDRoMGFh4Vg7b8hUIANQ1ypJv6WneHPvDPpldfHx9i7wBbKfIYpkRdjZs2cxNmmHQrEyOE7eg0bx7E01UlBSwdp5PVpla2BlbcPt27dzOakgCDnhjz/+YOnSpRj2mEl9097Z7le+tlirS8jqzJkzOI8eTT3T3tQ17vbD9qUq1qW542jUNEt+9fjLu2c5e3AREydOpFevrAv/C9lTqVIlHj9+yIsXzzPWIBIEofAYOKA/757f5nbo3kz3J8Z9wHt5PxTS4ggLDaFmzS9Lp2zcuBFnZ2eqtbCjpkH7TMsSaJXWx6jPPJ49eyLWHBYKhFOnTuHk1J6ytVpjMXRFRqG4UkNTei0IpPfCYKq3cuDUntk0qFcbH2+vIlXc+RlBwSFol6lEyQq1sxxL/hyLz6rBFFORIyDAn5Ilv/7drbASRbIiKjAwEHNzCzR1a2DvshM1zRI/1V9JRR3bMVtQ0tTF3MKSJ0+e5FJSQRBywty5c5k/fz6tO0+mocUAWccRCrhXr17RsVNndKs0xrD7jN8+X+z7lwRtHoepmSmLFi3KgYRFm7y8vMzXmBIEIXcsWbKEiRMncnr/PG4EugFfCmRey/ohnxpLWGgItWrVAmDTpk2MHDmSeu168/iiD4f+csq081901DPC3KZRs2Zt+vXrJ4unIwjZduXKFWxs7ShZqQHWI9ehoJR5dJh2mUrEvH2Oz4r+VK9SiRP+/mhqasoobf7n4X4UadInji/plWnTn7SUJPzXjiAt/h0BJ/yL5FRs8Q2qCHJ3d8fO3p4y1VtgO24bKuq/9uahUkwbu3HbSUEFM3MLsUOOIORTixcvZvbs2bTsMJ4mtsNkHUco4JKSkujQsSNJaXJYDF+NgqLyb50vNTmRE+tHUaZkcQ4eOICiouKPOwmCIBRRcnJyLFmyhEmTJnF6/3wuea7Fa3l/5FJiCA0JpnbtL6NCNm/ezIgRI2hg0Z+yNVtk9D+6sCsv754hJuo5Xsv6UrZ0ccLCQtDT0+PevXt06NiRY8eOyerpCcI3+fn5kRAfRyOb4Sgqq2Y5Hvv+Jd4r+lNetzSBgScoXry4DFIWHC1btuTM6VOoK6Tg8U8PPr1+hCQ9jaAtLnyIuIWPtxd169b98YkKIVEkK2J27dpFly5dqNjQHGvn9SipqP3W+dS1S2E/YQef4pIxt7Dk/fv3OZRUEIScsHLlSqZOnUozh1E0c3CWdRyhgPt3Z9Rr125gOXId6tqlfvt8YTumE//uOcePu1OixM+NahYEIW+dP38eHR0dzM3MiY+Pl3WcIktOTo7FixczZcoULnqsgqRPhIYEU6dOHQC2bt3K8OHDaWDWF8PuM3l6xT9jWtrbJ9e55rcVz2V90CupTWhoMHp6ety/fx+Tdqb4+PrTtWtXDh8+LMunKAjAl+8JZ86c4f3794wZM4a2RsYEbHDm5d0zmdrFf4rEe3l/SmqpExwcmO92xk5NTeX8+fOkpKTIOkomtWrV4tzZM1TQK47H4p6c2DiW5zeCOXz4EAYGBrKOJzOiSFaErFu3jn79+lHToBMWw1b89tX/f2mWLIf9BDdevn6LlbUNMTExOXJeQRB+z/r165kwYQKNbYbSov14WccRCoH169fj6uqKcd+5lKnc4LfPd81/Kw8veLNz5w4aNPj98wmCkHtu3bqFjbUNpdSLc+7sWczNzPn48aOsYxV6ju07cO/evSz3y8nJsWjRIvz9/Tl/7mzGiI9t27YxdOhQ6pv2wbDnH8jJyZGSlADIUamRGQZdpxH9+gG6JbUICwuhbNmyPHjwAJN2pqQratJzfgBVm9vSo0cPDh06lMfPVhAymzlzJoaGhhi0MSQ6Ohp/P1+Mjdviu3oYz2+EAF92YvReMYBiylJCQoIoV66cjFNnlp6eTr9+/WndujUOjo4kJCTIOlIm5cqV49TJkzRpVJ+nVwPYvn079vb2so4lU3LS/52YXgiILaSzkkqlLFy4kJkzZ9LIchAG3aZlWrQzp7yPuIPn0r40a9qIE/5+YpFEQZChrVu3MnToUBpY9Mew+8zf/je/YUgN8b76P4riZ01YWBjmFhbUa9cHwx4zf/t8EbdO4rN6CNOmTmXBggU5kFAQhNzy6NEj2hq2pZi8Kn/3mkrkp7f8fXAZ+pUqEhgUiJ5e9jZ/+pai+J76I/++JqoaOhRTVSLghD9Nmnx/F+F9+/bRu3dv6rbrhVGvORmf/anJn5Gkp5GcEI3n0r6U1lEnPDyMcuXK8fDhQ4xN2pEqp47DxF2oa5dCkp5G8PYpPLnkw969e+nW7cebswhCTps/fz6zZs2iic0wnlz2RktNkdCQYMqXL0/Xbt3x8fHBqPdf3A7ZhVzyJ06dDM/Y0TW/kEgkODk54e3jQ1O7EdwK2kGzpo3x8fZGR0dH1vEySU5OJjY2Nt+NwstJ2f2sESPJCjmpVMqUKVOYOXMmLdqPy7UCGXzZucx27BYuXrxEp86d891wUkEoKnbu3MmwYcOo165XjhTIBOHFixd07tKVsjVaYNB16m+fLybqOUFbxmNtbc3cuXNzIKEgCLklNTUVC3MLFCXyzO4+CQ3VYlQvW4WR1gO4fec27u7uso5YaNU16UmPuf4oapXF2KQdp06d+m77U6dOISevQO02nTJ99iupqJOcEIPnsr6U0lEnLCw0U4EsRU4Nh4k7M6bQyysoYjZ4CdVaONCrVy/Onz+fq89TEP6/lStXMmvWLFq0H0frLpNxnLSX+BQ5DNsa8fTpU44cPkSXzp0J3TGDtPi3BAcF5rsCGYCpqSne3t4oqRSjahMrHFx2cu3GHYyN2xEVFSXreJmoqKgU6gLZzxBFskIsPT2d4cOHs3TpUgx7zKS54+hc/7GsV70pVqPWExQUTJ8+fUlPT8/VxxMEIbP9+/czcOBAarftmukqsiD8jrt37/Lh/TtKlK+FnNzvfXVITUrgxIZRlNcrw769e1FQUMihlIIg5AY5OTmSkpIASE1PBSAq+h1bA/dQv159unfvLst4hZpR7zmoaZbAwWUHOuXrYGlphZ+f3zfbL1iwgGbNmuK9ciBvn93MuD/2/Uu8lvelpJYaYaEhlC9fnkePHmHSzpRkqTIfXz/mis/GzCeTSpFK00FOTuyWK+SpzZs3M2HCBJrYDMtYT1ezZDmcJu8lTUGDtkbG3L17lz17drN7925OnQynXr16Mk6dVceOHQkPD6eJ7XCKl6uGx9I+pKcm4Th5D09fvqGNYVueP38u65jCV4h3vEIqNTWV3r37sHXbNkwHLqKhxYA8e2z9em0xH7qCw0cOM3z4cArZjF5ByLeOHDlCnz59qN7KCZO+c5ETX2qFHGJpacmiRYu4GbQDv3UjSP4c90vnkUqlBG+fQmL0Gzw9j+e7qQaCIGSlqKjIqdOnUFRXZubuBdx6fo8/9y9Bp1RxAgIDxA5yuUhe/stFBGVVDWzHbkWvZiscnZx49OjRV9tra2sTcOIEDevXwXvFAN4+vUHch1d4Le1DCQ1VwsNCqVChAo8fP84okDW2HQnAzaCdxH98A4BEkv6f6Za+HDxwgBYtWnz18QQhp+3evfvLrqxmfWnVeVKmi73q2qVxmLgLhWJlvoyATEn5Mr04H+7AOGTIENzd3WloOZBWnSbi5LKDMlUa4bVyMHHvX+I0eS8vXr5iwgQXWUcVvkL8giqEEhMT6dChA4ePHMFy+CpqG3bO8wxVm1rRrv9Ctm3bxsSJE0WhTBBy2fHjx+nRowdVm9lgOnCRKJAJOUpOTo6pU6fi4+PDx2dXcV/YhU9vHv/0ea54b+DJlRPs2b0rYxc2QRDyv+rVq3P6zGlK6JZi1p6FyKsqEhQc9NtrkQnZJ5VKSUmKp1gxDVRVVb/Z7t9CWaMGdfFaMQDPJb0prqFCWFgIFSpU4MmTJ5i0MyVJooTDxF28unc2o++uKcbcO30kYz2y/fv306lTp7x4eoLA4cOH6d+/PzVat6eeaR+kkqwzkiTpaaQmJ6Cjo5NvZ0tMnTqVbdu2U8uwM226TUdOTg4l1WLYj91MxfrG+K0bxel980hNTqRjxw6yjit8hfgVVcjExsZibWNLQFAwNqM3Ua2Zjcyy1DbsRNtes1mxYoVYc0YQcpGfnx9dunalYkMzTActybjyLAg5zcbGhksXL1BGR5VjC7rw7FpQtvs+ux7MRY+VzJkzh/bt2+diSkEQckP58uU5eeok8+fPJyg4iEqVKsk6UqH34vZJ0lOTSU9L4cQGZ6Jf3sXfz5cKFSp8t5+WlhYn/P1p06oF5XWLExYWgr6+Pk+ePMHYpB2f0xRwmLgLNc2SWd7HH5735MlFb/bt20eXLl1y8+kJQgZvb2969uxJtRZ2qGmVYv8f1jy/GZapTWLcl10sVeVTCQkO+m6xWFYWL17MkiVLqdLEgnb95mUq5CkoqWAxdDmqGsWJuH2KVatW0bdvXxmmFb5F7G5ZiHz48AFrGxtu3bmP7ZgtlK3RTNaRALjstZ4L7itYtWoVY8eOlXUcQShUAgMDsXdwoHwdQyxHrEFBUTlXHkfsbplZUf6sAYiLi6NP334c93CnRftxNLMf9d3Ri5/ePMZ9YVcsLcxwP3ZUrG8jCEImRf099Wv+fU0aWPTn86coXtwKwdfHBzMzs18639OnTzE2aUdCqjwOE3ehUfzLKMBbIXtIjP1AlWZW3DixnUfnj7Nnzx6x1pyQZ4KCgrCzt6dCXWMshq/i+LJ+RD68RInyNanZ2omGlgNJS0nCa1k/JJ/fcepkOLVq1ZJ17Cy2bt3KsOHDKVerNSZ9/+bxJV8iH12hXf8FX3aNlaQTuNmFx5d96dunDzt37pR15CInu581inmYSchFr1+/xsLCiohXkThO2kXpivln8cKm9iNJToxl3LhxaGlpMWDAAFlHEoRCISwsDEdHJ/RqtsJy+OpcK5AJwv+nqanJsaNHmDdvHnPmzOHDi7uYDvoHZVWNLG2TP8dxYv0oKulXYPeunaJAJgiCkE3FdHSJjnzK67unOXLkyC8XyACGDhvO+0+xdJ19PKNABlDftDcSSTqhbtN5eM5DFMiEPHX69OmM77IWw1aQkhhL1KMrAHx89YBzR5ZSuYklYa7TSIl9Q3hYaL4skB05coThI0ZSqmJdXt09w94ZFhnH3kXcpmJ9Y07unsPjSz7Y2dmxY8cOGaYVfkR8Uy0Enj59iqGhEa/efsRx8p58VSCDL2vZGHSZSh3j7gwePJijR4/KOpIgFHinT5/Gzs6e0lWbYD1yHQpKKrKOJBQx8vLyzJ49Gw8PD6IenMVjUXdiojLv0iSVSAjeNpHUhA8cP+4uRogIgiD8hGot7HhxKxw3N7ffnqZuYW5GUnw0989k/h4ulUgI2zGTh+c82LVrFz169PitxxGE7Lp06RI2tnaUqFgfqxFrUVBSITryGVKpBHlFJao0tcJs8BJO7vqDuLePCTjhT4MGDWQdO4tnz57Rs1dvtMtUxm7MlkzHFJXVKF+7NeePLOVO+AHatDHE09Mz366nJnwhimQF3J07d2hj2JaYJAlOU/ZRvGw1WUf6Kjk5OYz7/EXVZrZ079GDEydOyDqSIBRYFy5cwNrGluL6dbF23oCicv5bk0EoOpycnLh08QLaqlKOLuhExK2TGccuHl/N8xuh7N+3l5o1a8owpSAIQsFzI8CVpUuX0qdPn98+17Rp0/j777+54L6SS55rgS8FstCdM3lw9hg7d+6kV69ev/04gpAdN2/exNLKGs0y1bAZswklFTUAytZoRsdpBxiw7CyWQ5fz6IInHyNu4efrQ/PmzWWcOqvIyEjMzC1Q0y6N06SdqGuXYvimu7RoPw6AkhVqcTPQjat+m6lXvz5hYaFiRH0BIKZbFmCXL1/G0soahWKlcBrvirp2aVlH+i55eQXMBi/hxPoE2rfvQGBgAIaGhrKOJQgFypUrV7C0skZLrzo2YzajpKIu60iCQO3atbl86SI9e/XCZ/UQWnWciFaZilz2WseCBQuws7OTdURBEIQCaejQoTl2rj/++AM5OTn++OMPpBIJCdFR3D99hJ07d9K7d+8cexxB+J6PHz9iZm6BspYetuO2ZlmqQa96U9LTUgncPI7IB+fx8fbOt78Z582bx7NnT+n+p3fGb3F5BUXqm/bmZtBOdPSqcu7IUipVrsylixdRVBTll4JAlDELqPDwcEzamaJavCKOE3fn+wLZvxQUlbAcsYYSFetha2fP58+fZR1JEAqMGzduYG5hiVrJitiO3fbV9Z8EQVa0tbXx8vRk+rRpnDuyhICNY+ncuQvTpk2TdTRBEAThP2bNmsX8+fO55LmGe6cO4ebmliMj1QQhu9LS0vickICSSrGvbvojkaQT4jqFiJuhHD1yBHNz87wPmU2dOnVCVUWVk3vmkJIYl3G/qkZx2vaazf0zxyhTpgw3rl/Pl7txCl8nimQFkI+PD1ZW1hTXr4/9BFdUNXRkHemnxLyLIPZdBGXKlBHVdEHIpjt37vznqltZ7MdtR0VdU9aRBCELeXl55s+fz5EjR3BxccHNzVWsuyEI/xEfH0/3Hj0ZM2YMaWlpso4jFGEzZszgyJEj+Pr60q9fP1nHEYqYMmXKEBBwgpg39/FePoDkhJiMY1KJhPCdf/D4og/79u7F3t5ehkl/zMzMjMDAAOIiH+C1rB+JcR8AeH4jlKCtE9HW1ub27dtiTdYCRk4qlUplHSInFfYtpA8ePEjv3r3Rr2+CxfBVKBawxbrfPr2Bz6rBVK9SkYCAE+jq6so6kiDkew8ePMDI2ASJsjYOLjtR0yyR5xk2DKlRaN9Xf0Vh/6wRBCFnJSUlYWfvwOkzZ0lLTcHBwYGDB/ajolKwvsflFvGempV4TYTC7vLly1hYWqGkqYvdeFfUNEtwet9cboXsZseOHfTt21fWEbPtxo0bWFhaIVHUoKnDGELcpqGnW4ZTJ8OpVKmSrOMJ/5Hd91UxkqwA2bp1Kz169KBKczssR6wpcAWyV/fO47W8Hw3r1yYsLFQUyAQhGx4/fkw7UzPSFTSwn7BDJgUyQRAE4delpaXRvUdPTp06he2Yzdg4r8fX1xc7e3sSEhJkHU8QBEEmmjVrxsnwMEj6iOfS3pzaN5ebwbvYuHFjgSqQATRs2JDTp06irphGwObxGLYx4OGD+6JAVkCJIlkBsXz5coYOHUpdk56YD1qCgqKSrCP9lGfXg/FZNZi2hgYEBQZSvHhxWUcShHzv+fPntDM1I0mihL3LDtS1Sso6kiAIgvATJBIJAwcOwsvLC8sRaylXqxWVGppiO3Ybp8+cw9zCgujoaFnHFAqJS5cuYWlljYeHh6yjCEK21K9fn1Mnw1GVS+ZW8C5WrFjBsGHDZB3rl9SoUYMzp0+xadMmPI975NgaZDExMRw/fpzExMQcOZ/wY6JIls9JpVL++OMPJk6cSBO7ERj1/vOrCxz+r8te67nsvSGPEv7Yw/Oe+K93xt7eDm8vL4oVKybrSIKQ7718+ZJ27cyITwYHl50U0ykj60iCIAjCT5BKpYwePZo9e3ZjPngplRq2yzhWvnYr7Cfs4MatexgbtyMqKkp2QYVC4c6dO1hZ2xB+6gydO3dm165dso4kCNlSs2ZNLl28wMWLFxk/frys4/yWChUqMGzYMDQ1c2bt4Li4OKxtbGjfvj129g5i9HEeEUWyfEwikTBu3DjmzZtH686TaN1p4g8XQE6M+8AlzzVc8lxDapLs/xHdCdtP0NaJ9O7Vi8OHDoq1NwQhG968eUM7UzM+xSfhMHEnGiXKyjqSIAiC8JNmzJjBhg0bMO47j+otsy4+rVu1EY6T9/D0xWvaGhkTEREhg5RCYfD06VPMLSxRKFaKXguCqGnQiX79+rFu3TpZRxOKqLCwMP76669sF3XKli1L8+bNczlVwZKQkICdvQPXb9zGqNcczp47j42tHXFxcT/uLPwWUSTLp9LS0hg0aDBr167FuM9fNLEdnq1+jy54I5VIkKSl8vLumVxO+X1X/bYQtusPnJ2dcXNzFTtZCkI2vH37FlMzc95+jMPeZSeaJcvLOpIgCF8RHR2No1N7nJ2dxU6FQhb//PMPixYtok236dQ17vbNdiXL18Rpyj7eRX+mjWFbHjx4kIcphcLg9evXmJlZkCxVxv4/i5+b9J9PI8tBjB49mgULFvDvPm0fP35k+/btfPz4MddzRUZG8vbt21x/HCH/CQ4Oxtrahj///BMbWzvi4+NlHanASUxMxNGpPRcuXsJ27Fbqm/XBbrwrly5fxdLKipiYmB+fRPhlokiWDyUnJ9O9ew927dqF2ZCl1GvXK9t97591p1IjM3T0qvLsenAupvw2qVTK+WPLOXd4MTNnzmT16tXI/2CKqCAI8OHDB8zMzXkV+R57lx1olxGLfQpCfvTv9IfAoFA2btpMjx49SUlJkXUsIZ/YsGED06ZNo5njaBpZDfphe+0ylXCavJdEiTKGbY3ypIAhFA4fPnzAwsKKj3GJ2E9wQ127NABycnIYdJtGi/ZjmTlzJjt37sx43xo8eDDGJiZERkbmWq7Tp09TvXoNataqzZUrV3LtcYT8JywsDHt7B3RrtsRp0i4uXrqClbWNGP30E5KTk+nQsSOnTp/GduwW9Ko3BUCvWhPsJ7hx/eYdzC0s+PTpk4yTFl6icpHPJCQk4ODoyHFPL6xHraNmK6ds9/34+iHvnt2klkEHKjUy5fmNUKQSSS6mzUoqkXBq31yueG9gyZIlzJs374dTRAVBgE+fPmFuYcmziDfYu+yguF7V3z7n59gPpCSJq3eCkJM+f/6MvYMjN27ewd7FDauRa/E4fpyOHTuSlJQk63iCjO3evRtnZ2caWPSnhdPYbPdT0yxBMR09Pn/+LEYmCtnyb9Hr+as3yCmosHtqu4wRY/ClUCZJ//L/koqKCo5O7bl1+x4WQ5cT8fo9bdq05dmzZzme68yZM1hb26BToQ5qJSpiZm7B5cuXc/xxCrLjx49To2YtNm7cmOm/WUF38uRJ7OzsKV2tKdYj11G+dmvsxm/nyrUbYvRTNqWkpNC5SxdCQkKxGb2JcjVbZjpepkpD7Me7cuXyZebOnSujlIWfKJLlI9HR0VhaWXHy1Bnsxm2lcmPzn+r/4Iw7KsV0qNSwHZUbmZEY+563z27mUtqsJOlpBLtO5XbIbjZv3sykSZPy7LEFoSCLiYnB0sqKB4+eYj/BjRLlavzyuZI/x3Hv9BG8Vgxk5yRD/NaOysGkglC0/Xt19/z5C9iM2UKZyg2o0tgcm9EbCQgKFovqFnEeHh4MGDCAWm06YdhtRrYvEkrS0wjcMoGox5fw9vKkTBmxUYvwfYmJiTg4OnH7zn2sRq4n5u0zAO6E7ycl8cuInWv+W7nstZ6FCxeyZ+++L8Wr0Zuo0coRpyn7iP6cRhvDtty9ezfHcp07dw4raxt0ytfBdswW7Ma7ZhTKLl26lGOPU5DduXOHXr168zY6kZEjRzJs2DCSk5NlHeu3nT59GhtbO0pUaoj1qA0oKn/Z2VGvWhMc/jP6ycLSUuzm+x2pqan06NETf/8TWI1aT4U6bbK0kaSncc1vCwqKitjY2MggZdEgimT5xNu3bzFpZ8q1G7exn+BG+dqtf6q/RJLOg/MeVG9pj4KSCnrVmqKirs3zGyG5lDiz9NRkAjaO5fFFL/bu3cvQoUPz5HEFoaCLi4vDxtaOO3cfYD/BjZL6tX/6HGkpSTy+5IvfulHscGlNiNt00lKTUVRWRVVDJ+dDC0IRlJqaSrfuPQgJCcN69EbK1miWcUy/nhF247Zx5uw5cbW8iAoKCqJrt25UbmyBSb95P9yJ/F9SiYRQtxk8vxHCkcOHadeuXe4GFQq81NRUunbrxtlz57AZs5mkhOiMY+G7ZrNtTFPuhB/g7KF/mDZtGjdv3sLHxwfLkesoV7MFAFqlKuA4eS+p8sVoa2TM7du3fzvX+fPnsbSyRrtcLWzHbkFJtRgq6prYjXdFvVRlzC0si3yhLDo6Gqf2HVArXo6uc47TbsAC3HbsxKRdO968eSPreL/s3LlzWNvYUrxCXWxGb0RJRS3T8TJVGmI+dAWXLl5k69atMkqZv6Wnp9O3bz88jh/Hcvhq9OsZEfv+ZaY2Ekk6wdun8Ox6IIcPHcLKykpGaQs/USTLB168eEHbtsY8ef4Kx8l70a3a+KfP8freORI+RVHLoAMA8gqKVGxgzPM8WJcsNSkB3zXDeXXnJB7u7vTo0SPXH1MQCoN/d625eu0GtuO2U7pSvWz3TU9L5fmNUIK2TsJtQmtObBxL/Mc3tOzoQt9/wjAfvJTUpASqNP39D9B/p2sIQlGVnp5Ov3798fLywmrkmq9e3S1XsyX2E3Zw9fotTM3M+fDhgwySCrJw7tw5nJzaU7ZmK8yHLENe4ctGRcmf47gTfpBTe/8mLTXrSBGpVMqp/fN4cM6d3bt24eDgkNfRhQJGIpHQv/8A/Pz8sRq5jrI1mvPiVnjGcUVlVao0sSJ81x+MGjWK6Oho9u3bS7XmdvisGkLk46sZbYvplKGx7Qg+fnjP+fPnfyvXhQsXsLC0Qr1kRV4/uMSTKycyjqmoa2I1ch2fPyeycNGi33qcgiw9PZ2ePXvy6k0UViPXo6yqQZ22XXGavIc795/SpGkzzp07J+uYP+3ixYtYWlmjVbYmlRqZ8+Hl/SxtEuM+cv7wP5TR1cPJKftLCRUV6enpDBgwkEOHDtGm+0zev7jLgdm27Jlmyr3TR4AvF1TC3Gbw5JIP+/fto3379jJOXbiJ7QZl7OHDh5iZWxCXmI7T5H1o6/7aQt33z7qjo1eVMlUaZdxXqaEpD897Ev/xDRolyuZU5EySE2LwXTOU2DcP8ff3w8TEJFceRxAKm//dtcZ+vCu6VRv9sI9UIuHNo0s8PO/Fk8t+JMV/QkevKo1tBlO9hQM6elUy2t4I3IG8ohKVGvzev8kvV60m/9Y5BKEgk0gkDBs2jAMHD2A5bCWVGpp+s61u1UY4TtyF98qBGJuYEBwUhK6ubh6mFfLajRs3sLaxRadCHaxGrkNOXoHnN0J5cNadp9cCSf9Pcaxqc9uMUTz/uuC+glvBu9i0aZO4wCj8kFQqxdnZmf0H9mM5bCUV6xsD0Nh6MIpKqlRvac/n2A/4rx9Fr1690dLSYtGiRZj0m8/Tq1+KVpc819LccTS6VRrx5tFlwnbOwN7Bkb59+/5yrosXL2JhaYWmbnX0G5jw9tktboXuoUpTS5RVNUhNTiR420SUlRSZMH58TrwUBdKsWbPwP3ECu7FbM/3e063amI4zjxKwcQzGJiZs3LCBQYN+vOFHfnD58uWM//YtO03E458vm82N3Powo01S/Ce8VwyA5GhCw0KpWbOmjNLmT/9+x9izdw/mQ5YRuHlCpuMq6lpIpVLCd8/m/tlj7N69my5dusgobdGRqyPJ/vzzT+Tk5DLdatf+/lSiQ4cOUbt2bVRVVWnQoAE+Pj65GVGmbty4gWFbIz6nK+E05dcLZClJ8Ty57E9Ngw6Z1r+oWN8YOXkFAjZP4MH546QmJ+ZUdAA+x7zHc2kfEj88JyQkWBTIBCGbMu1aM2Zzxq41XyOVSnn37BZnDi5i11QTPBb3JuJmGLXbdqHrbA96zPWjueOYTAUygKdXTlChThuU1TR/OeeXaUDTeXLJ95fPIQgFmVQqZdy4cbi6umI68B+qNbf9YZ9SFeviNGkPEa/f0dbIGEkeb6Aj5J2HDx9iYWmFavHy2IzeTMTNUHZONsJn9VA+vHpAi/bjqGPUDWV1LfSqNcnU96rfFq54b2Dx4sUMGzZMNk9AKFCuXr3Kxo0baWw9NNN7kVbpirTpPp201CQCN43B3t6eevXqsmjRIgy6TqNmayde3/8yUuzFrXCOLezGizun8V87nDYGBhw+dBAlJaVfynTp0iXMLSzRKFMVu3FbefafGSxvn1xn2+gmvLp3Hr+1w/n4/CZ+fr60bdv291+IAujgwYMsWrSIVh0nUrG+UZbjxXTK4DhpFzVad2Tw4ME4OzuTmpoqg6TZd/XqVcwtLFEvVRm7cVt5cfNkxrENQ2rw5uFlkuKj8V4xkPSEd4SGBFOnTh0ZJs5//i18u7q6YjpgETVaOtDEbkTG7vZy8gqUr2PIqX1zuXvyIK6urvTq1UvGqYuGXJ9uWa9ePd68eZNxO3Xq1Dfbnjlzhp49ezJ48GCuXr1Khw4d6NChA7du3crtmHnu3LlzGBmbIKdWCqfJe35rpNeTyydIS0mkZuvMwy5VimljOvDLsOagLRPZMdGAELfpvL5//rd3vYz78JrjS3oinxrDqZPhtGjR4sedBEH4z641XQkODsXGeSPlarX6artPbx5zwWMV+2ZZcXheR+6fPUaVxhZ0mLqfPotCMOgyhVIV6351YejEuA+8eXiJqk2tfzmnVCIhbNcfPDznwebNm3/5PIJQUEmlUqZNm8batWsx6vNXxnIG2aGuXRo1zZJER0eLIlkhFRERgZm5BemKGtiN24aKuiaJsR9IivsIgJKyGnrVmvDu+S0q1jfOmIIJcCdsP+cOL2bGjBlMnixG6hZECxcupEWLFmhqalKmTBk6dOjA/fuZp5klJSXh7OxMyZIl0dDQoHPnzkRFRf3yY9asWRODNobcDtnFy7tnMh179+wWfmuGY2hoiLWVJTNmzKCZwygaWw8m6sl10lK+7LyrUaIslRuZE7xtIvXr1cbzuAeqqqq/lOfy5ctfCmSlq2I3bhspifG8fXI9U5sLx5by8fkNfH19MDLKWhwqCm7cuMGAAQOp3sKexjbfXrNZQVEZk37zaGwzlPXr13Px4sU8TPlzrl+/jpm5Baol9LEbtx0lVQ0eX/ZDQUklo8275zfxWTmQ1LhIQkOCqVcv+0uKFAVSqZTx48ezceNGTPrNp1abjgC07jQR29EbAVDVKM6l46szRhz3799flpGLFDlpLu47++eff+Lu7s61a9ey1b579+4kJCTg5eWVcV/r1q1p3LgxGzdu/Gqf5OTkTDuCxMbGoq+vT0xMDFpaWr+VP7cEBQXh5NQenQp1sBm9GRX1Xx/pAXB8aT+kSGk/adc328REPef+OXcenHUn7v1LNEtVoGbr9tQ06ICObuWferzoyKd4rxhAcU0VgoMCqVq16m/lF4SiIi0tje7de3Dc0xOrUeu/OxVy49BaSKUSiuno0rbnH1RubJ7pR9b33D15iLCds+i//AxqmiV/OqdUKuXk3r+4E7oXNzc3OnTogLa2dr5+X81rsbGx4jUp5P7++2/mzJlDm24zqGPUhYRPUeiUrfbDHQtTkuLxXjGQpI8RhIeF0qBBgzxKLOSVqKgo2hoZ8y76M05T9qFRXC/jWGLcRyJuhnHJcw2qmiV4++Q6FkOXU6OVIwAPz3sStHUizs7OrF69Ots7YBZ2Be091cbGhh49etCiRQvS0tKYMWMGt27d4s6dOxQrVgyAkSNH4u3tjZubG9ra2owePRp5eXlOnz6drcf42mvy+fNnOnTsSEhIKBbDVlGliQWfXj/i+NLe1KtdnSmTJ9OtWzfqmfahbc8/kJOTQyqRcCt0DyUr1KKYjh6eS3ujr1eSkyfDKVGixC89/1u3btHWyBi1kpWwG7cdFXVNJJJ0zh1egoq6JpUamXHu0D+8f3oVX1+fIjvb5MOHDzRr3oLPElXaT92Pkor6d9t/jv3AsfmdqF65HKdOhqOmpvbd9rJw69YtTNqZoqipi8OEHagU00YiScdz+QASPkVRvaU9Feu34+yBuSR9ekFoSDCNGv14SZGiRCqVMnXqVJYsWYJR7z+pb9o7S5uXd8/y+LIfd0L3snbtWpydnWWQtPDJ7mdNro8ke/jwIeXKlaNq1ar07t2biIiIb7Y9e/YsFhYWme6ztrbm7Nmz3+yzcOFCtLW1M276+vo5lj03eHh4YGtnR6lqzTI+VH5H3IfXvLp/jloGHb/bTlu3Ei3bj6P3giDaT9lLhTptuBm0g30zLfFY0ifbj/c+4g7Hl/SiXBkdTp86KQpkgpBN6enp9OnTF3cPDyyGr/7hWmEt2o+jVMW6JERHEbjFBb91o7h36jCJ/xml8D1PrvhTtkbzXy6QnTm4kNshe9i8eTP9+vX76XMIQkG3fPly5syZQ8sOE2hkNZBLXuvYP9uWt09vfLdfanIifmuGEf/2CYEBJ0SBrBD69OkTlpbWRL77hP0Et0wFMgA1zRLUatORBub9efvkOnLyChlrRz27Hkzw9in07duPVatWiQJZAebn58eAAQOoV68ejRo1ws3NjYiICC5fvgxATEwM27ZtY/ny5ZiZmdGsWTNcXV05c+bMby3Orq6ujpenJ+2dnDixYTTX/LfivXIgVSqWw8/Xl+joaKRSKVql9DP+/5KTl6eBWV90dKvgu3owJbXUCQwM+OUCGXyZERMT/Yl67fpk/JaRl1egTbdpNLIazLnDXwpkPj7eRbZAlpaWRrfu3Xn3IRqrUet/WCBLT0slcNNYlOXTcD92NF8WyO7cuUM7UzMUNcpgP94NlWLawJf/9o4TXOk5z5/GVoM4d3A+nz88JygwQBTIviIyMpIlS5ZQ06DDVwtkAG8eXuJO6F6WL18uCmQykKtFslatWuHm5oafnx8bNmzg6dOnGBkZERcX99X2kZGRWRa41dXVJTIy8puPMX36dGJiYjJuL168yNHnkJN2795N586dqdjADBvnDVm2x/0VD855oKikStVm2dvBLiE6io+vHpAQHUV62pe57imJ8dnqG/noCp5L+1KzWiVOhodRvnz5X84tCEWJRCJh4MBBHDp0CIthK6jS2PyHfZo5jKLrbA96LwymVadJJCfGEbJjBjtcDPBY2pebwbuI/5h1u/Dkz3G8vHuGKk0tfzqnVCrl/JGl3AhwZd26dQwZMuSnzyEIBd3GjRuZOHEiTexG0MxhFFKplGdXAwF4eM7jm/3SU5M5sX4UHyNu4+frQ7NmzfIqspBH4uPjsbWz49HT59hNcMtYN+ZrarRyQF5BkbLVm6FSTJtX984RsHEsTk5ObNu2FXl5scF8YRITEwOQUXi6fPkyqampmS7+165dm4oVK37z4n9ycjKxsbGZbl+jrKzM/v376NevL2cP/UMp7WIEBnwpeg0ePJjJkydz5uACLh5fw78ThpITYvBeORAVuRSCgwMpW/b3NvTq378/nTp1JnTHdJ7+5/0RIC01Gf/1o3j3+Are3l60a9futx6nIJs6dSqhIaGYD1uJVqkKP2x/9uBCoh5f4eiRw/l20EfHTp1JV1DHfoIbqho6mY7JKyiSmpyA7+qhxL97QlBgAE2bfnvN3aKsTJky9OnTl4fnj/PgbNbvFVd8NnHp+GoWLlzIhAkTvnIGIbfl6u6Wtrb/XVSyYcOGtGrVikqVKnHw4EEGDx6cI4+hoqKCiorKjxvK2Pr163F2dqZ22y6Y9JuHvLzCb59TKpXy4Kw7VZtaoayq8fU2Eglvn93k+Y1gnl8P4f2Lu8grKKJXozmtOk6kUiPTbE23fHH7FCfWj6JlyxZ4e3kWiKHwgpAf/Ltrze49uzEfvJRqzWx+qr9WaX0aWQ2kkdVAPse859m1QJ5cOcGZAws5c2AB/ZedQVWjeEb7iJuhSNJSqdIke4Xz/3Xx+Gqu+m1mxYoVjBo16qf7C0JBt2PHDkaOHEkD83606ugCwKc3j4h5+5wS5Wvx8IIX5Wq1Qr9eW5RUi2X0S09LJWDzeCIfXcTH2xtDQ0NZPQUhlyQnJ9O+QweuXb+Jg8tOSpb//g5tapol6Tj9IKrFdIh6ch3/dSMwMTFi/769KCqKzeULE4lEwvjx4zE0NKR+/frAlwv/ysrK6OjoZGr7vYv/Cxcu5K+//srWYyoqKrJt2zZ69OhBgwYN0NP7MqJRTk6OxYsXU6JECaZPn05KYhwt2o/Fd/VQ0hPeE3YynCpVqvzg7D+mpKTE/v376NmzF8c2jsFy+CoqNjDBf/0o3j66hLe3F6am394JuLDbs2cPy5cvp033GVSo04anVwPwWzeKjtMOfHWzpnunDnMzeBfr16/P12u3NWvWjIMHD/L6wUWqNs38PTM1+TN+q4cR8+YBQYEBNG/eXEYp8z8FBQXc3FxRUlLEbftk0tNTqNO2KwDXT2zn/NGlzJkzh2nTpsk4adGVp5/SOjo61KxZk0ePHn31uJ6eXpYFLaOiojLe+AuqhQsXMmPGDBpaDKBNt+nI5dDVw7dPbxAd+QSjXrMz3Z+alMCLO6d5fiOE5zdCSYx9j4q6NhUbmNDEdjj69Y1QUc9+kevJlRMEbZmAhYU5R48cyZfDfwUhP5JKpYwePZrt27fTbsCijDVpfpW6dinqmvSgrkkP4j++YffUdjy57E9dkx4ZbZ5eDaB05QZoliz3U+e+7LWey55r+eeffxhfhLdoF4qugwcPMmjQIOoYdcOwx6yMqUrPrgWhqKKO2cBFBG2fgv+G0SgoKtPUfiTNHUcjkaQTvH0yL26F4X7sGObmPx4pKhQ8U6dNIywsHPvxrpSp0jBbfcpUbsCHVw/wXT2YJo0b4uHuXiAu7Ao/x9nZmVu3bn13c7LsmD59Oi4uLhl//7vO8rfIy8tjbf31DXqmTZuGlpYWzs7OPLrgiVx6MqEhwdStW/e3Mv4vJSUl9u3bS69evTm66csSETGvH+Dt7YWZmVmOPU5Bc+XKFQYPHkJNgw40tBgAwJsHlwA4d3QpTWyGUb52axSVv2yYEPXkGif3zGHw4MGMGDFCVrGzZYebK2mpqRzZNA6Locszdln9d6mB6Fd3OXHCn1atvr4plfBfCgoKbN26FRUVFTZunIEkLRWJJJ0zBxcyffp05syZI+uIRVqeFsni4+N5/Pgxffv2/epxAwMDgoKCMv1ACwgIwMDAII8S5qx/d8ZavHgxzZ3G0txxdI6uP3H/7DGKFdelXO3WxH14xfPrITy7Hsyr++eQpKVSvGw1arXpSKWGpuhVa5LtRb8zPcaZY4S6TadLly7s2rUTZWXlHMsvCIWZVCplwoQJbNiwAZN+86lt2ClHz69Roizl6xjw8IJXRpEsLSWJ5zfDaGr3c1+yrvlv5YL7Cv766y+mTJmSozkFoSDw9PSkV+/eVGvpgHHfvzN9Vj+9Fkj5Wi2J/fASHd3KREc+QZKeRnpqypddYHfO4sklXw4ePIi9vb0Mn4WQk2JiYnBq34GkpCQ8j3ugp6tLeloqUU+uUb529n4AxkQ9x2fFAGpUrYKvj0/Ggu5C4TF69Gi8vLwIDw+nQoX/TqnT09MjJSWF6OjoTKPJvnfxP6dnx4waNQptbW1WrlrN0iWLc2Vkz7+Fsr59++Hj64uXl2eRvlDw9u1bnNp3QKdsdUz6zs34LHn77Mt6lm8eXOTNg4vYjdtKpQYmfI55R8CG0TRt2oR169bl+3UKlZSU2Lt3D3379uPg5glIJRIqNzbHf90IPkbcwt/fjzZt2sg6ZoEhLy/P+vXrUVZWZvXqL0UxFxcX5s+fn+//XyjscrVINmnSJBwdHalUqRKvX79mzpw5KCgo0LNnTwD69etH+fLlWbhwIQDjxo3DxMSEZcuWYW9vz/79+7l06RKbN2/OzZi5QiKRMGrUKDZt2kSb7jNoZDkwR8+fnprMowveqKhrceiv9nx8df/L2hc1W2LQZQqVGpp+d62M7LgZvItTe/9m8ODBbNq0CQWF358iKghFwb+71qxatQqj3n9S17hbrjxOjZYOhOyYQfynSDSK6/HizmnSkj9nGQL/PTcCd3D20D/MmDGD2bNn/7iDIBQygYGBdO7ShUoNzTAb+E+m5RA+x7zj7ZPrADy/EUrpSvUx6DqVGi0dUNMqxal9c7l/+gg7d+6kc+fOsnoKQg6Li4vD2saG6zfvoKikgrFxO4KDA0lMTOTvv/8mJTGOVp0mfvdHTPzHN3iv6E/Z0iUICPDPMu1OKNikUiljxozh2LFjhIaGZpnC2KxZM5SUlAgKCsp4b7h//z4RERF5evG/d+/e9O799YXBc4qioiL79u3N1ccoCFJTU+nStRvRcYl0mrknY6RYeloq757fAkBBSYUKdQwopV+H9LQUTmwYjbqKPMeOHi0wo0wVFRXZvXsXioqK7NnqQskKtUh49xw/P1/atm0r63gFjpycHCtXrqRq1aqkp6czYcIEUSDLB3K1SPby5Ut69uzJhw8fKF26NG3btuXcuXOULl0agIiIiEwLl7Zp04a9e/cya9YsZsyYQY0aNXB3d8+Y319QpKam0r//APYf2E+7AQsy5hjnpOTEeOQVFElJjEOvejOaOzpToW7b394tE7588F/x3sAF9xW4uLiwdOlS8Y9VEH7C7NmzWbJkCW26z/jmrjU5oUpTK8J2z+bxRV8aWQ3k6ZUT6OhVpXjZatnqfzt0L6f3z2PSpEnMmzcv13IKQn516tQpnJzaU66WARbDVmQZca2qUZz6pn1QKaZFjVZOGf+2/t3k4lbwLjZt2kSfPtnfJVrI3xISErCzd+Da9VvYT/iye5v38n4YG5sQGhqCjo4OLi4upCTGYdRrzleX0EiM+4D3yoFoqikQFBRAmTJlZPBMhNzk7OzM3r178fDwQFNTM2OdMW1tbdTU1NDW1mbw4MG4uLhQokQJtLS0GDNmDAYGBrRu3VrG6YXc4OLiwunTp3GcuBONEv/dGEFeQZHmjmNIS02mvmlv1DS/bO4QtusP3kfcIjwsjHLlfm6JDFn7d00tZWUljnt64eXlibGxsaxjFVhycnKMGzdO1jGE/yEn/Xfbk0IiNjYWbW1tYmJiZLK4fFJSEl26dsXPzx/zIcsy5mrnymPFf0JZXStHNgH4l1Qq5ezhf7juv425c+cyc+ZMUSAThJ8wf/58Zs2aRevOk2hiOzzXH89v3SgSPkXRYdp+dkxsQz2TnrTq5PLDfndPHSLUbQZjx45l5cqV3/13Luv31fxIvCYF38WLFzE1M6e4fj1sx2zJuOqfHZc813LRYxUrVqwQa/gVIomJidg7OHL6zFnsx7tmLLAd++4FXsv6oqOhTGhIMEFBQQwdOpTqrRwxHbAIBUWljHMkf47Da1lfpInvOX3qJDVq1JDV0ylQCtp76rc+M11dXRkwYADw5TfBxIkT2bdvH8nJyVhbW7N+/fpsr7Vc0F6ToszV1ZVBgwZh1PvPbF0cvR22j/Bds9myZYvYSVwQ8lB231fF/tM5KC4uDhtbO04EBGIzemOuFsjgyxXunCyQSSTphO/6g+v+21i1ahWzZs0SBTJB+AlLlixh1qxZtGg/Lk8KZADVWzrw9tkN7p8+QnJCdLamWj4460HYjpmMGDHihwUyQSiMbty4gaWVNVp61bFx3vhTBbJr/tu46LGKefPmiQJZIZKcnEyHjh05dfo0tmO3ZNqBTqu0Po6T9xKbKMHI2IR27dqxf/9+nl7yIWDTWNJSk4H/LF69dhjJMa8JCgwQBbJCTCqVfvX2b4EMQFVVlXXr1vHx40cSEhI4evRogd+MTMjq/PnzDB8xgjpG3ajXrtcP2795eJnT++YycuRIUSATss3Ly4uePXt+cwNEIWeJIlkO+fjxI2bm5py/cAn78a5UrF+whpymp6USvHUi904dws3NjbFjx8o6kiAUKKtWrWLKlCkZO97llUoNTVFUUefMoUVolChHqUr1vtv+0QVvgl2nMHDgwAKxSKwg5LQ3b95gZm6Bik45bMduQ0k1+4up3wrZw9lDi5g2bRozZ87MxZRCXkpJSaFzly6EhIRiM3oT5Wq2zNJGs2Q5HCbt5nOaAkbGJjRq1AgPDw9e3zmF7+qhJMVHc2KDM9Ev7+Ln60ODBg1k8EwEQchLkZGRdOjYiVL69TDqNfuH36niP0USuGk0rVu3ZuXKlXkTUijwgoOD6dypM0cOH6GtYVtu3rwp60iFniiS5YA3b95gZGTMnfuPcZi4i7I1cn73mNyUlpLEifWjeHr1BIcOHaJ///6yjiQIBcqGDRsYP348ja2H0LLDhDx9bCUVNao0Nic1KYEqTS2/+wXtyZUTBG2dSO9evdm8eXOmNSEFoaiIi4sj+tNHNEvq/9QIsnunj3Jyz5+MHTuWBQsW5GJCIS+lpqbSo0dP/P1PYDVqPRXqfHtnNo3iejhO2kOKnDpGxiZUrlyZEyf8+fTiFnummRL58AKensfFmlOCUER079GD+KQ0LEeuQUHp+wvvp6UmE7BhNFrFVDly+BDKysp5lFIoyN69e4ejoyP19GuxfsQ/xMXEMnnSJFnHKvTEL6Tf9OzZMwwNjXgR+QGnyXsp/YNRHPlNSmIcPqsGE/XwAt5eXnTq1EnWkQShQNm2bRujRo2igUV/WneZIpORWbXadKJE+ZpUb273zTbPrgcTuHk8Xbp0wdV1u9itViiyatasycGDB3l+I4iATWNJ/89Uue95dNGH0B3TGTJkiJiiXIikp6fTt28/PI4fx3L46mzNAlDXLo3jpF2gWhxjk3bo6OgQFhqCWTsjDh86hJmZWR4kFwQhP4iLi0eSlkp05NPvtpNKpZzcNZuPr+7h4X4MXV3dPEooFHRKSkpoFNPgXdwHDpx0JzEliQEDB8o6VqEnimS/4e7du7QxbEv05zScpuzL9o5y+UVS/Ce8lvcn9s19AgJOYGX147WMBEH4r127djF06FDqmvTEsLvsNrnQr9eW7n95Z1pD539F3DpJwMYxODo4ZGzbXZiFh4fj6OhIuXLlkJOTw93dPdNxqVTK7NmzKVu2LGpqalhYWPDw4UPZhBVkolOnTni4u/P6zin81o4gNTnxm22fXQsiaOtEevboycaNG0WBrJBIT09nwICBHDp0CMthK6nc2DzbfdU0S2I5Yi0fP35k9erVNGvWDF9fHxwdHXMxsSAI+U3ACX9atWiK1/IB3Ajcwbf2w7sVspt7Z46yZfNmmjcvWDOOBNnS0dHh9JnTyKsqEng9nA0bNtCjRw9Zxyr0RJHsF125coW2RsakyhfDcfJetEpVkHWkn5IQ/X/s3WVYVOn/x/H30AKC3d1dqFh0h73umih2B2vX6qprd8uq2F10p92dgGICCtIN83/gLvvjb6ECA3i/rosHO+c+53zOuAxnvueOCBxXDiAjPpzAAH86d+4s60iCUKQcOXKEIUOG0LBzb3QHLCi0X5xfPriA55YxmJqacOTIYRQVFb++UxGXmJhIy5Yt2bx58ye3r1ixgg0bNrBt2zYuXbqEmpoaZmZmpKSkFHBSQZYsLS1xdXXh3dMbuG0YRlpKwkdtXt4/h9f2iXTv1o09exxED8xiIisri5EjR3Lg4AEMh6+ijpbZN+2fmhSPz992lCpVikmTJuVTSkEQCruyZcvi6eHO5MmTOHd4MX67Z2Qv5PGvVw8vcf7wEiZNmoSNjY2MkgpFWb169bh67Sq3b99m1KiCWRjsZyeRfq7kXUQVxHLJQUFBWFpZo16+NhYT/0ZFvVS+nCe/xL19jsvaIagqSvH18aZhw4ayjiQIRcrJkyf59ddfqdu+Kwa2y/J0ldm89PrxFdzWD0NPTwfHM2dQUcn9/Ev/qygvQy+RSDh16hQ9evQAPvQiq1KlCr///jtT/5nTITY2looVK+Lg4JDrp3NF+T0Rcjp//jxm5haoV6iD5cS/UVbTBP77/TEw0MPxzBkxf0wxIZVKGTt2LNu3b8fAdjkNO/X8pv3TUhJwXTeUxLdP8ffzpXXr1vmU9OciPlM/Jt6TomX//v0MHz6CUlUbYDp6E+plKhMf9ZqTS3rSXqsVXp4exb4nvyAUdrn9XBU9yb6Ru7s7pqZmlKrWBCs7hyJXIIt+/QTHFf0pU1KZ8+fOigKZIHwjJycnfuvbl9ptzDEYsrTQFsjCQ27gvnEEnTp15Mzp099dICtunj59Snh4OMbGxtmvaWpqoq2tzYULFz67X2pqKnFxcTl+hOKhU6dO+Pv5khL9HOc1NiTHRxERegv3jSPp2LEDp06eFAWyYkIqlTJ58mS2bduGns2Sby6Qpacm4b5xJPERIXh7eYoCmSB8RWZmJmlpabKOUSAGDhzIuXNnUUh7z8klvXhx7yyeW8ZStlRJjh09IgpkglCEiCLZNzh+/Dhdu3WjcqOOWEz8GyUVdVlH+iaRz+7gtHIANatV4NzZIGrWrCnrSIJQpLi7u9P7l1+o0Vwfw2ErkZMvnDc8kU9v47Z+GG212uDs5EiJEiVkHanQCA8PB/ho0tyKFStmb/uUpUuXoqmpmf1TvXr1fM0pFCwtLS2CAgOQJkfhuKI/bhuG0bpVc/H7U4xIpVJmzJjBhg0b0BmwgMY6fb5p/4y0FDw2j+H9i/t4uLvRrl27fEoqCMVDdHQ0LVq2pHyFipw/f17WcQqElpYW169dpXWLJjivtSUuIhTHM6cpV66crKMJeSg2Npbo6GhZxxDykSiSfYOVq1ajWEIDoxHrUPjKMr+FzevHl3FebUOzJg0IDAigUqVKso4kCEXKlStX6NGjJ1Ubd8Z45DrkFQrn3F5vn9/DZZ0tLVs0xc3VBTU1NVlHKhZmzZpFbGxs9s+LFy9kHUnIY82aNeNsUCAllaU0bdwQN1dX1NWL1sMw4fPmz5/PypUr6dx3Ds0MBmS/nhwfxV2/A5xZNYgX94I+uW9meioeW8fxNvQGbm6udOrUqaBiC0KRFB8fj5m5Oc9evEGtfG2MjU3w8vKSdawCUaFCBXx9vFm/fj1ubq60atVK1pGEPPTkyRPqN2hIrdp1fpri78+ocHaDKKSWLF6EpZUVfrtnYDxybaEdZvX/hd32x2vbeLp06YLjmdPipl8QvsPz589JTU2hYt02yCsUzqFXUS8f4brWliaNGuDh7k7JkiVlHanQ+fcBQUREBJUrV85+PSIi4os3ssrKyigrF62HI8K3a9CgAc+ehso6hpDHFi9ezOLFi+nwy3RaGA8hLTmepze8eXLZmZf3z4FUilSaRdkq9aneVCfHvpkZaXhtn0jE48u4uDijq6sro6sQhKIhJSWFrt26c/feQ6x/30epSnXw2jYBK2trjhw+TM+e3zbMuShSVFRk4sSJso4h5LHnz59jaGRMpoI6GpVrYGxswpkzpzExMZF1NCGPiZ5k38DY2Jgjhw/z7IYnAXvnfXaZ38Ik+LIL7pvHYG5ujquLsyiQCcJ36tWrF7Nnz+bSydVcc94i6zgfef8mBJe1Q6hXpyZenh5oamrKOlKhVLt2bSpVqoSPj0/2a3FxcVy6dImOHTvKMFnxFBcXh5eXF+np6bKOIvykVqxYwbx582jfYzKtzUcQGxmGg11HfHdNJ/F9OF36zcNiwnYAarfJucplZkY63jum8PL+WU6fPoWRkZEsLkEQioz09HT6/Pob5y9cwGz8dsrXbIqicgnMxm2hZksTfvnlF/bs2SPrmILwzSIjIzEyNiE+JQurKQ5YTtpFxfrtsbK25tSpU7KOJ+QxUST7Rj179mTXrl08PHuMC0eXFepC2f3Ao3jbT6Ff376cOH5MTNxdDF25coUNGzb8NJOiypJEImHJkiUsWLCAy6fXcvnM+kLz+x8T8QznNTbUrFYRH28vypQpI+tIMpWQkMDNmze5efMm8GGy/ps3b/L8+XMkEgmTJ09m8eLFODo6cufOHWxsbKhSpUr2CphC3khKSsLK0gJTU1N69uhOUlKSrCMJP5l169YxY8YMtKzHoWU9DgBFZVUq1mmFRCLH+9fBvHt+nzdPrqKiXprK9bWy983KysRv1zSe3/blxPHjmJuby+oyBKFIyMrKYsgQW9zc3DAds5kqDf6bt09eQQmjEaup0qgjQ4YMEQ9OCtCdO3eYNWsWb968kXWUIismJgYTEzPC377HarID6qUroaCkgunYzdRsaUKfPn3Yu3evrGMKeUgMt/wONjY2xMXFMWHCBJRUS9K263hZR/rITY+dXDi2jDFjxrBp0ybk5EQ9tLg5f/48ZqYmJCQm4enhwYmTJ8VwsALwxx9/oKyszKxZs8hKT0O791QkEonM8sS9fYHLGhuqlC+Dr4+PmBwWuHr1KgYGBtn/bWdnB8DgwYNxcHBg+vTpJCYmMnLkSGJiYujSpQvu7u7iQUIeSk9Pp03r1jx58oSZlvXZ4OONmYkxTi6ulCpVStbxhJ/A1q1bmTJlCq3MR9Cu+6Ts11U1y9N92n6S46O45rKVe34HUNWsQK1WRtmLsUizsvB3mEXoNXeOHj1K165dZXUZglAkSKVSxo8fz6FDBzEeuY5KdVsTdieAGs10s++RXt4/x5vHl+nXrz8KCgqEhISwYcMGhg8fTvPmzWV8BcXTw4cPMTA0IurdW44eO46frw81atSQdawiJTExEQtLS56EPqPrtANoVvxv4bt/i78+f3+4x9TR0aF27doyTCvkFVE5+U7jx49n8eLFXDmzntveDrKOk00qlXL59DouHFvGzJkz2bx5syiQFUMXL17E3NSUpmU02WptiI+XJz26dyclJUXW0X4KM2fOZM2aNdxw38H5o0tl1qMsPuo1zmsGUa6UGn5+Ph+t2Piz0tfXRyqVfvTj4OAAfOgV+OeffxIeHk5KSgre3t40aNBAtqGLkaysLGyHDOHR48dkSaWUU1dmU/9mnD1/gb///lvW8YSfwM6dOxk7diwtjIfQofe0Tz7IKFGyLG0sRiPNyiIh+jV12pgCHwpkAfvm8eTiGfbv30+vXr0KOr4gFDlz5sxh69atNDUYyNMbXjjYdcR1/XAenf8wDO3Nk6t4bh2Hubk5e/Y48PTpU/T0DdiwYQP6Bobcvn1bxldQ/ISGhmJgaIREpTS9554kOj6Vzl10CA4OlnW0IiM1NZXuPXpw4+ZtLCftpGzVj+8VE99H8Db0Bg0aNPrpR3IUJ6J68gNmz57N1KlTOXd4CQ/PnZB1HKRZWZw7tIhrzptZtmwZS5culWkPFyF/XLlyBTMTExqWLsnfXY0wq1eTHV2N8Pf1oXu3biQnJ8s64k9hypQpbNq0idteuzl78E+kWVkFev6E9+E4rx5EKTUl/P18qVKlSoGeXxA+RSqVMmnSJA4cPJj92tSjdxm6+wbly5WhW7duMkwn/Az27t3LiBEjaGowgE6/zf7ifZCqZjmqNu6IorIaVRt3QiqVEnRwIQ/PHsPBwYG+ffsWYHJBKJpWrFjB0qVL6dhnJnd99xF82ZnM9FQAylZryNuwe7hvHEmnjh05fuwoL1++RE/fgKQMeX5d4IyCekX0DQy5deuWjK+k+Hjx4gUGBkakSpWwmuJAhVrN6TrtIEkZCnTR0eX+/fuyjljoZWRk0LdvPwIDz2I2bjvqZSpz29uBhOj/hq0mxb7Ddd0QNNWU8PX1FvMBFyOiSPYDJBIJK1asYMSIEfjvmU3INXeZZcnKzMDPYSZ3/fazdetWZsyYIbMsQv65du0apsbG1NdUZ1c3Y9SUFAHoUqMKf3czItDfn67W1mLunwIybtw4duzYwT3/AwTsm1dghbKk2Le4rLFBTUmKv58v1atXL5DzCsLXLFq0iE2bNtGrTeWPtrl7eIkee0K+Onz4MLa2tjTq0gedfvNz9aDQfNwWBq0MQl5BiXNHlnDP/yD29vYMGjSoABILQtG2Y8eOf+b9G0srs2Ho2SymSkNtAJRUNZBXVMZ1/VCaNmmIk+MZwsPD0dM3IDFdDuvf91G2WkOspjigqFEJA0Oj7LlEhe8XHh6OgaERsUnpWNvtRVWzPADqpSvRdeoBMhU10NHVE733viArK4uhQ4fh6OREE4MBXD61hr1Tu3Du8BIuHF8OQGpSHK7rh6KQlYyvjzdVq1aVcWohL4ki2Q+SSCRs3bqVX/v8io+9Hc/vBhV4hsz0VLy2TyL4kiP79+9n9OjRBZ5ByH83btzAxMiIWuol2NXNCPV/CmT/6lS9Cru7G3Ph7FmsLS1JTEyUUdKfy4gRI9i9ezcPzx7Dz2EmWVmZ+Xq+5PgonNcMRolUAvz9qFWrVr6eTxC+xV6H3dQur8bgztVpXUOTBhXVUVZSwt/fnzZt2sg6nlCMnThxgoEDB1K/Q3f0Bi1CksupJhSVVVEqoc7FEyu5472HLVu2MGzYsHxOKwhF39GjRxk9ejTNDAfRrvtkAJro/obBkL8AKFmmCq7rbKlZtTIe7u5ER0ejq6dPfCpY2+1FvXQlAFTUS2E9ZQ9KGpUxMDQiMjJSVpdU5EVFRWFkbExkVBxWdntQL5PzgZWqZjk6951HdNQ7nJycZJSycJNKpUycOJH9+/dhOGwld333Ex5yHan0w4PwKg21SU9Nwm3DCNLjI/Dx9qJu3boyTi3kNVEkywPy8vLs27cXM1MTPLeO5c2TawV27vTUJNw3jeblvQBOnTpF//79C+zcQsG5desWxoaG1FBVxqG7MSWVlT7ZTrtaJXZ1N+LyxQtYWViQkJBQwEkLl+joaPT0DTAwNCImJibfzjN48GD2799P8CVHfHdOJSszI1/Ok5LwHpe1Q5BLj8Pfz1f8US4mHj58yLlz52QdI0/s+Hsnb5OkLHR6Qt0KaoS8S+LosWPo6enJOppQjAUEBPBb377U0TJHf8jSXBfI/nXlzHpuutuzbt06xowZk08pBaF4OX36DHIKijQzHJij16ZG+Rr8MvcUmWlJlNVQxcfHi4SEBHR19YlPkdL1930fFW/kFJRQLKFOWmoqmZn5+7CxuIqNjcXE1IywF+FUbtSJoIN/ftTm/ZsQvHdMpmWr1owbN04GKQu/uXPnsnnzZnQH/kn99tb0X+JFh1+mZ2+v3qQzHlvGEvfmMZ4e7jRr1kyGaYX8IopkeURRUZHjx4/TQVsbt40jePv8Xr6fMzUpDpe1trx7dhN3Nzex+lIxdefOHYwMDKiqosie7sZofKZA9q/2VSuxu7sx1y5fxsLMjPj4+AJKWrhERUVhaGTM1eu3uHTlGiampsTGxubb+fr378/hw4cJveaO944pZGbk7fLmqUlxuKyzJTPxHX6+PjRs2DBPjy/IRkhICNra7ejSpQuTJ08mq4DntstrhoaG+Pr5ExYHx6++Zteu3WIeMiHfPX36lMyMDCrW00JOTv6b9r3mvJlrzptZsWIFkyZN+voOgiAAsGLFcurWroPzqoFEvXiY/XpqYiz+e2aiIp+Br6836enp6OkbEJucSYc+s7jltZsjf1hnf1dKT03GfdMoosPu4O7uRuXKHw/XF74sISEBcwsLHj4OxnyiPY8vnOLF3UDePrub3Sbu7Qtc1g6hepUKeHt5ipWmP2HlypX89ddfdOwzgyZ6H+akVC9TmVZmw7PbXDi2jMjgazg7O9GuXTtZRRXymUQqq2XZ8klcXByamprExsaioaEhk/MbGhnx8PFTuk47QOnK+dPTIykuCrf1Q0mNfYOnhzvt27fPl/MIsnXv3j0M9PQoryBhf09TSqko53rfG28iGXLGm2YtW+Hu6SmT3wdZeffuHYaGxoSEvcB6yh6ysjJxXmNDi2aN8crn9+LMmTP80qcP1ZvqYjJqPfKKuf83+5y05Hhc1tqSHP2cAH8/WrZsmQdJc0/Wn6uFUV68J1KpFK02rYiJCqVFA3XO+ITz6tWrYrEIw/Pnz4mKiqJ169ayjiL8BKRSKZMnT2bDhg107juXFsaDc7XfDXd7Lh5fwaJFi5g7d24+pxS+RPyd+VhReE/evn2LqZk5Dx8HYzHxb8pWbYDLWltS3j/nbFAgJUuWRFdXn5ikdGIiX+TY13DoCuq2tcB902jePb2Bu7sburq6JCUlMWSILZlZWezd44CampqMrq5oSE5OxtLKmgsXL2M1xYGMtGQcV/03p6JmxVp0+30vjiv7U1ZDhbNBgaIQ+T/S09O5ffs28fHxGBgY0Mp8BB3/p+fYv6RZWfg5zCT4shNnTp/GyspKBmmFH5Xbz1XRkyyPaWho4OHuTvWqFXFdZ0t81Ks8P0dC9BucVvaDlGjOBgWKAlkx9eDBAwz19SkrD/t6mHxTgQygdeUK7O1hwr1btzAxNs7XXlSFybt37zAwNCQ07OWHSWGrN6J8zaZYTXHg9p0HmOZz77ru3bvjeOYMrx6cxWPLODLSUn7oeOkpibhtGEHiu2f4eHsVeIFMyD8SiYSoqChSU7PwuRDFgAH9i82Na40aNUSBTCgwEomEdevWMW3aNM4dXswNd/uv7nPb24GLx1cwd+5cUSAThO9Uvnx5/P180WrdEpc1g3FabUNcRDCeHu5oaGigp2fA+8Q0rH/fl+OhoZy8ItWadMZ984cCmZuba3aBzLprN844OuHi6oaFpZWYY/cL0tLS6NW7N+fOn8d8wg4q1mnJ05veOdqUKFkWl7WDKakih5+vT7G5z8gLGRkZ9O8/gLZt27J4yV+oqZfkzeNLpCS8z9FOKpVy9vBiHl84zf59+0SB7CcgimT5oGzZsvh4e1GmZAlc1gwmKfZtnh07JuIZjiv6oqaYxbmzQWIcdDH16NEjDPX1KS2Rsq+HKaVLqHzXcVpWKs++niY8unsHY6P8nZerMHj79i36BgY8ff7mQ4Gs6n8r6VWo1RyrKbu5efseZubm+Voos7CwwMXZmYgnl3HfNJr01OTvOs6/QxBi3zzGy9MDLS2tPE4qyNq58xdp2KQtbbTasW3b9lytxicUTa6urvTt15/g4GBZRykW3Nzc+KVPHx48eAB8KJQtX76cOXPmcPH4Cq45b/7svnf9DnDu8BKmTZvGn39+PG+PIAi5p6mpiaeHOwYGeiREhuLi7ETlypXR1dMnOiEF69/3UbJsVUZuvYvp6A0AqJetgv/uGbwNuY6rqwt6enokJyfTtVs3zp07j8VEeywm7uTylauYW1j+9HPsfkpGRgb9+vXH29sHs7FbqdLgw9C/Wi0NqdqoI3qDFtF/iRcZKfEoSlPw8/WhRo0aMk5deGRmZmJjM5iTp07R2nI0AQGBtGrVkrSYVzitGkhizH8LSFw+vZa7vvvYtm0bffv2lWFqoaCI4Zb56OnTp3Tq3IUMhZJ0m3oAZTXNHzpe1IuHuK4fSpUKZfHx8aJatWp5lFQoTJ48eYKejg7qmekc6GVKOdUSP3zMe5FRDDrtRd2GjfD09qZMmTJ5kLRwiYyMRF/fkBdvIrCy20uZKvU/2S4i9Cau64bSpnVLPNzdUFdXz7dM/v7+WFlZU7p6Uywm7EBRJfdDBjLSU3HfNIqopzfx8HCnS5cu+ZbzawrT52phId4T4VscOXKEAQMHgkSOKpUrc/7cWfE3/Ad4enrStVs3srJAU1MDP18fmjdvnr190aJFzJ8/Hy3rcbTrPilH8flB0DH898xm0qRJrF27VhSmCwnxmfqxf9+Tt2/fMnLUaC5evIjjmdO0bdtW1tG+ysjYhItXbtBz9nE0yv33WZeRloLXjikkvn9DfEQorq4uGBgYZBfIgoLOYT7BnqqNtAEID76O6/phtNVqjbuba77esxUlWVlZ2NgM5tDhw5iN2UStVkYftUlNisdljQ3p8eEEBQbQpEkTGSQtnLKyshg6dBh79+3FZOQ66ra14MW9IDw2j6Ft27Y8DQ0lRaqE1RQHQq66cfH4ClauXMnUqVNlHV34QWK4ZSFQu3ZtfLy9yEx892GZ2JTv7y4cHnIDx1UDqFe7OmfPBoqb62IqJCQEAz09VDPS2N8zbwpkAE0rlOVAT1NCHz3EyMCAqKioPDluYREREYGevgEv3kRi/fv+zxbIACrWaYXFpJ1cu34z37vx6+vr4+npQcyrB7iuH0Zacu56r2Wmp+K5dTxvQ67j4uIs0wKZIAg/xt7enn79+lG3rRV9/3QnNikdIyMT3r7Nu17mPxM/Pz+6detOlYYd6b/UB3m18ujpG3Djxo3sNvPmzWP58uVcc97MpROr+Pd58KMLpwnYO4cxY8aIAplQZAweMgQnJyfS5NQwMjbhypUrso70VVptWpMUF8XLe2dzbpBIyMpMIy48BBcXZwwMDEhJSaFb9+4EBp3FfMKO7AIZQKV6bbCcvIvLV64yZqxYjfFf58+f58CB/WhZj/1kgSw9NQn3jSNIfv8SH28vUSD7H1KplNGjR7N37x4Mh62kblsLAKo31cFi4t9cu3admrVqoakix8nFPbl4fAVz5swRBbKfjCiS5bMmTZrg5elBXEQwHlvGkpGe+s3HeHn/HC5rh9CmVQv8/XwpX758PiQVZO3p06cY6OmhnJbCgV6mlFfLmwLZvxqXL8OBXqa8CAnGUF+/2HxBCw8PR0/PgJfh77Ceuj9Xi2VUqtsay0k7uXL1GpZW1vlaKOvcuTM+3l7ERwTjstaW1MQvzw2XmZGO147JvHl0AUfHM+jr6+dbNkEQ8tfq1asZOXIkTfT7Yzh0BRrlq2M1ZQ+vI6MwNTP/aeaKzCtnz57FysqaivXaYjJmE2qlKmI1ZQ9KmlXQNzDMUTyYPn06a9eu5Yb7Ds4f+Ysnl53x2z2DoUOHsmnTJlEgE4oGiQQPDy9Mx2yix4zDqJWvjZGxCZcvX5Z1si9avnw548aNI2DfPO4FHAL+eQC4ZRwRjy/j4uKMoaEhKSkpdO/Rg4DAIDQr1sF/z+yPjvX+dTDpqcnUrFG9oC+j0GrZsiUdO3XitudOXj/O+f9CRnoqHpvHEPv6EZ4e7rRq1Uo2IQshqVTKhAkTsLe3R3/IUhpo51x9u2qjDlhO3sXNW7epW68u+rqdmTNnDosWLZJRYkFWRJGsAGhpaeHq4kxkyHW8d0wmMyM91/s+veGN28aR6Ovp4OXpgabmjw3ZFAqnsLAwDPT0kE9OZH9PEyqoqebLeRqV+1Aoe/3sKYb6+kRGRn59p0LszZs36OkZ8Doymq6/76d0pTq53rdSvTZYTNzJxUuXsbLuSlJSUr7l1NbWxt/Pl5T3L3BeM/ijCUH/lZWZgY+9HS/uBnDq5ElMTEzyLZMgCPlHKpUyd+5cpk6dShvLMej0/wOJ3IdbrlIVa2E5aRcPHwdjaWWdr589xcnFixcxt7CkbK0WmI7dgsI/k4CrqJfCasoe1MrXxtDImPPnz2fvM3nyZLZs2cJtbwe8d0xh4ICB7NixAzk5cfsrFA0SiRymYzZSs4UBSiVKYjlpJ+oV6mJkbMKlS5dkHS+br68v586dy/5viUTCxo0bmTBhAoH75nPbew+eW8cT/vgSzs5OGBkZkZKSQo+ePfHz88doxDrePb9P3NsXvHp4Mbv354OzxwjYO4fRo0eLQsX/KFmyJF6ennTsoI3r+mG8uBcEQGZGGl5bx/M29AYuLs5oa2t/5Ug/D6lUyu+//87mzZvRG7SIRp17f7Kdsqom8orKSKVSXF2cWbx4sXio8hMSc5IVIFdXV7p1707dtlYYDl2RfcP8OY8vnMFv9wx69erFgQP7UVJSKqCkQkF6/vw5+ro6ZMbFcqiXGZVL5v9S18HRMQw85Um5qtXx8fOjUqVK+X7OvPb69Wv09A2IiIrD+vd9lKpY6/uO8/gKbhuG06ljB1ycnVBVzZ8CJcDt27cxMDRCTrUsVlP2oKpRNntbVlYmvjunEnrNnRPHj9O9e/d8y/GtCvPnqqyI90T4nKysLCZNmsSmTZvo0HsarS1GfrJdeMgNXNYOQV9PBydHR/E3/guuXbuGgaERJSvVx3Li35+c3zEtJQH3DSN5//J+9kp5/zp+/DghISFMnToVeXn5gowu5JL4TP1YXFwcfecc+mg4XVpKAm7rhxMX/oRbN29Qp07uHxDmh127djFs2DDk5eXZu3cv/fv3z94mlUqZPHkyGzZsQElZGSdHR0xNTUlNTaVnz554+/hiPn47SfFR+Nj/nr1f6Sr1aWU2DH+HWYwcOZItW7aI4vYnJCcn07t3b7y8fTAasZbgS448v+OHk6MjZmZmso5XaEilUmbPns2yZcvo0n8+zQ0HfbLd+zchOK8eRO3qVfD39y2Wczj/7MScZIWQpaUlBw8c4MklR4IO/cmX6pN3/Q7gs3MqgwfbcPjwIXHzXEy9fPkSQ309MmJjONDTtEAKZAD1ypTiYE8zol69xEBPlzdv3hTIefPKq1ev0NXVIzI6ga5T9393gQygSoN2WEy05/z5C3Tt1o3k5O9biTI3WrRoQWCAP5LUGJxXD8peOUealYW/wyxCrrhy6ODBQlUgEwQh9zIyMrC1HcrmzZvRHfTnZwtk8GHYt9nYLfj6+jFgwEAyMzMLMGnRcevWLYyMTVArX/uLC6AoqahjMelvytRsgZmZOT4+PtnbfvnlF2bMmCEKZEKRE7BvPkmx73K8pqBUArUylUlOSszXe5bcePjwIcOHD8ewRRd0m3Zk4MCB7Nq1K3u7RCJh3bp1HDx4kAB//+wCWa9evfDy9sVs3DaqNenM0+ueOY6rqlkef4dZjBgxQhTIvqBEiRKcPn2abl274rFlHM9uenPs6FFRIPt/Fi5cyLJly+j06yyaGQzk3YsHvHlyLUeb2IgwnNfYUL1yBXx9i+ciZ0LuiZ5kMvD3338zYsQI2liOQbuX3Ufbr7tu49LJ1UyePJk1a9aILp7F1OvXr9HX1SHp3TsO9TKlmmbJAs/w9H0sA055olGhIr7+AVStWrXAM3yrly9foqdvQFRsEta/70OjfN4sZ/3q4SXcNg5HT/dDrw4VFZU8Oe6nPHr0CH0DQ9JQxmrKHq46b+Zh0FH279+f4wlsYVEUPlcLmnhPhP8vNTWVvn374ejoiOGwldTX7pqr/Z7e8MJz6wQGD7Zh586d4m/+/7h37x66evoolqyEld0elFW//ruWkZaC24YRvH12k4jwcDFNRREhPlM/9u970qH3NFqZj0AikXzodb5rOqFXXDh06BB9+vSRacaIiAjatG6DJE3K/N9+Z+HhVSiVVObps2efbJ+amkqv3r3x9PTGfPxWqjfVAT70rH107gR121kSH/WagD2zGTZsGNu3bxcFslzIyMhgw4YNNG7cGAsLC1nHKVSWLl3K7NmzaWM1hoy0FF7cDeT9mxDk5BUZvPocKuqliXv7AqdVA6lYpiRBQQFUrFhR1rGFfCJ6khViw4cPZ/Xq1Vx33coNtx3Zr0ulUi4eX8mlk6v5448/RIGsGHvz5g0GerokvnvLARkVyABql9bkUC8zEt5Goq+rw8uXL2WSI7devHiBrq4+UXHJWP++P88KZABVG2ljPn4HAYFBdO/Rg5SUlDw79v/XsGFDzgYFoqqQyeF55jwIPMKuXbsKZYFMEISvS0xMxMraGmcXV8zGbs51gQygdmsTdActYvfu3ezbty8fUxYtjx49wsDQCHm18lhO3p2rAhlAYkwEsRFPqVWrtuiFLxQLF0+sZNuIBmRlZeK3eyahV1w4ePCgzAtkAJqamgwdNpRMBSkT7efw5n0Ei5cs+Wz7RYsW4ebqitm4LdkFMvjQs1bPZjFJMW8J2DOboUOHFvkCWWZmJsOHD6NTB22efaZomFcUFBSws7MTBbL/Z82aNcyePZu23SZSoXZLbnvt5v2bEAA0yldHRb008VGvcV5jQ7lSqvj7+4oCmQDkc5Fs6dKltGvXjpIlS1KhQgV69OjBo0ePvriPg4MDEokkx09+9uiQFTs7O+bOncvFEyu5F3CIrKxMAvfP54b7DtauXcuCBQtEgayYioiIwFBfj7jISA70NKWGjApk/6pZSoNDvcxIiY5CT0eH58+fyzTP54SFhaGjq0d0Quo/BbK8X+WoWuOOmI/fjp+fPz169iQ19dtXo82tunXrcjYoECsLM3bt2sWQIUPy7VyCIOSfmJgYjE1MOHvuApaT/v5o/qCvSU9NIuSqC8rKKtSt+/XVeX8GwcHB6BsYIlXSxGrKblTUS+Vqv/ioVzivsaFCWQ38fH0oUSJvV4kWBFny2z2T4EuOHDhwgF9//VXWcYiNjcXM1IzFixeTlJSInr4ee/bsYcCAAZ/dp379+kilUl4+OP/RtDOPL57Bd/d0bG1ti/wCG1KpFFNTU3bu3MWT+7fo3LED0dHRso71U9m0aRO///47rS1H07breKo20kZ/8F+oapYHoE4bMxJjInBeY4OmqgL+fr5UqVJFxqmFwiJfP30CAgIYN24cFy9exMvLi/T0dExNTUlMTPzifhoaGrx58yb7JywsLD9jysyff/7JhAkTCNr/B44rB/Iw6Cg7d+5k8uTJso4m5JPIyEgM9fWJfvOGAz1NqFmqcAwpqK5ZkoM9TUmPfY+ejk6+P/H6Vs+ePUNXT5/YpAy6Tt2PRrlq+Xauao07YT5+O76+/vTq1StfC2U1a9bk1KmT2Nra5ts5BEHIP5GRkejq6nPrzn2spuyhaqMOBOybh9um0bnaPzUpDpe1Q4h+dgt3dzc6d+6cz4kLv2fPnv0zHF0FqykOlChZ9us7AQnRb3BePYgy6ir4+/lSuXLlfE4qCPmvQ+9p9PvLm4ademYXyH777TdZxwJg9erVBAYFMrP3BMqrl+X27dtf/QwbPHgwGzZs4JbHTi4cW55dKHt8yRHfXdMZbGODvb19kS6QAcydOxdfX18A3sWn8i4qWubzx/1MduzYwYQJE2hpMhTtnnZIJBKUVNRprNOHGs31AKjcoB3Oq21QU8zC38+X6tXz/uG7UHTl6yeQu7s7Q4YMoWnTprRs2RIHBweeP3/OtWvXvrifRCKhUqVK2T/Ftdvjv5NZDhkyhJiX9zl8+DBDhw6VdSwhn7x79w5jQwPevXrBwZ6m1C5duOZJqfZPoUwaH4eejg6hoaGyjgTA06dP0dXTJy45C+vf91OybP7Pm1atSWfMxm3B08uH3r/8QlpaWr6fUxCEouX58+d07qLD0xev6TrtIBXrtCQ5PoqHZ4/z7KYPsZFffsCXFPsOp1UDSY4Kw8/PF319/YIJXoi9ePECPX0DEtPlsLLbk/3EPzUpnofnTvDg7LFP7pcYE4nzGhvUlSX4+/tSrVr+PUgRhILUymw4N1y28uTiGfbt20ffvn1lHSmbhYUFJVRU8Lt7njm/TEY+U4KJsQnv3r374n4TJkxg06ZN3PLcyfmjS3lyyQnfndOwGfRhXsaiXiC7evUqf/31F9XL/NeTdYe9fZGY97c42L17N6NGjaKZ4SA6/jrzo5FZugP/pP9f3lw8thRFaTL+fr7Url1bRmmFwqpAP4ViY2MBvrpaREJCAjVr1qR69ep0796de/fufbZtamoqcXFxOX6KEjk5OXbt2kVSUmKhmFtAyB9RUVEYGxryJiyM/T1NqVOmcBXI/lVVQ52DvUyRT0pAX1eX4OBgmeYJDQ1FV0+f+FToOnU/JcsWXDfo6k11MBu3BQ8PT37p86solAmCkO3x48d06tyFtzFJdJt+iLJVGwAQfNkFADkFRZ7d8v3s/vFRr3Fa1R+5tBiCAgNo165dgeQuzF6/fo2+gSGxSRlY2+1FRa0UIVfdcN88Fgc7bfx2z8R/zxxSk3Le5yXFReGyZjAqcukE+PtRs2ZNGV2BIOQ9/z2zeXzhFHv37i1085Z27NiR4ydOcC3kFgcCTtCyZhNCn4bmagTQuHHj2Lx5M7e9duNtb8eggYPYufPvPCuQZWVlMXPmdGbNmpWvIwI+pWbNmtSvW4fI+A/ndXBwYPDgwQWa4WcVEhLCsGHDaNCxB136zfvk1EXpKYl4bZuAXHoc/n6+1KtXTwZJhcKuwFa3zMrKolu3bsTExHD27NnPtrtw4QJPnjyhRYsWxMbGsmrVKgIDA7l3794nnwwuWLCAhQsXfvS6WB1HKCzev3+PkYEBz4OfcKCXKQ3KlpZ1pK+KSEhiwClPUpVU8PX3p0GDBgWeISQkBD19A5Iy5LH+fR/qpSsVeAaAsNv+eG4dh6WlJcePHUVRUVEmOWRJrDr2MfGe/Lxu3bqFsYkpWYolsZq8G/Uy/w3rO76oJ2qlK5GZnsrL++eo1qQTDTr2oI6WOQqKygDEhD/FZe0QSpdUxsfbS8xDxoe5OnV19QmPiqXr1AM8On+KW567SE9NpHyt5tRvb03wFRfkFJToOeNQ9n7J8dE4r7FBLi2Ws0GB1K9fX4ZXIfwI8Zn6sX/fkx07djBixAhZx/msAwcOMHDgQABWrlzJ1KlTc73vkSNHeP78OXZ2dsjLy+dZpu3btzN69OjscxT0HG6RkZH8PmUyuvoGhfrfrriJjo6mjVZbYhLSsP5970cLfKUmxeGyZjBpcW8IDPCnWbNmMkoqyEqhW91y3Lhx3L17l8OHD3+xXceOHbGxsaFVq1bo6elx8uRJypcvz/bt2z/ZftasWcTGxmb/vHjxIj/iC8J3iYmJwcTIiGdPHrOvp0mRKJABVFRX5WAvU1TSUtDX1eXhw4cFev7g4GB09fRJzlSQaYEMoGYLfUzHbMLFxYXffutLenq6zLIIgiBbFy5cQFdPHzm18nSbdiBHgSz61RPeht2lYccedPxlOuVqNuXFvbP4/D2V6JcfFi169+IBjiv7U7mcJufOBokCGR+mIjA0NOL122is7fahUb46sZFhpKcmIqegiEa5atRuY0rUi4fUaW2SvV9qYiyu62yRJkfj7+crCmRCsVVY5iD7nAEDBuDm5oarq+s3Fcjgw7VNmzYtTwtkAKVLf7jfLl+uLF26dMnTY+dGhQoV2HfgYKEvkN24cYO1a9cW+fnSUlNTWbNmDU+ePOFsUCDlNEvgtGogMRHPstukJcfjtn44KTGv8PXxFgUy4YsUCuIk48ePx9nZmcDAwG+eJ0JRUZHWrVt/dtiXsrIyysrKeRFTEPJUbGwspiYmhDx6yIGepjQq9+VhxoVNBTVVDvYyY+ApT/R1dfH196dJkyb5ft4nT56gp29AqlQZ69/3olZK9nMS1mxhgOnojThum0C/fv05dOjgT9mjTBB+Zt7e3nTr1p0y1ZtiNn47yqo5VyZ+fOE0AJdOrSEmPBQ5eQWqNu5EzRb6lKpch/Dg67htHEHjhvXx9HCnXLlyMriKwiU6OhojIxPCXkXQdep+NCt+GCppPGI1WtZjCb3uyZUz60lOeE9mRhq1/ymSpSbF47J+KOnxEQT4+9G4cWNZXoYg/PTMzc1lHSGHX3/9lfr161O2bFmxYuFnBAQEYGXdlcSEeLx9fDh96lSRvLdNS0uj9y+/4OLsTIkSqri6uhAUFIi+viFOK/tjbbcX9TKVcds4kvjIEPx8fWjVqpWsYwuFXL72JJNKpYwfP55Tp07h6/t9k+JlZmZy584dsUqRUKTExcVhZmrK43t32dvDmMbli1aB7F/l1UpwoJcppSVZGOjpcffu3Xw936NHj9DR1SMVFax/31coCmT/qtXKCJNRGzh1+jQDBgwkIyND1pEEQSggp0+fxtLKivL12mIxaedHBTIA1VIVUFEvTYXaLTAdvYEhay/T7fc9tDSxJTL0Fi5rh9C2TSv8/XxFgYwPD5JMTM0IfhqGld0eSlfO2auudOW6aFmNoVqTzrx+eJFyNZqgUb46aSkJuG0YRnLUc3y8vWjevLmMrkAQhMKsdevW1KhR4+sNf0Kurq6YmZlTpkZzTMdsxMPDExubwWRmZso62jdJT0+nb99+eHh4Yj5uC+XqtMbCwpK7d+8SGOhPjaoVcFo1EJf1w4h99RAvTw/atm0r69hCEZCvRbJx48axf/9+Dh48SMmSJQkPDyc8PDxHl04bGxtmzZqV/d9//vknnp6ehIaGcv36dQYOHEhYWBjDhw/Pz6iCkGfi4+OxMDPjwe3b7OlhQrMKRfvLUDnVEuzvaUpZOTDQ0+P27dv5cp6HDx+ip29Ahrw6XX/fj1qpCvlynh9Ru7UxJqPWc+LkSQYOHCQKZYLwE9i3bx+//PILNVoYYT5uK4rKJT7ZroXxYIasuYjRsJXUbWuRXUh7esMLt40jMTDQw8PdTcy3xIe/k6ZmZjx49ASrKQ7ZCx98Sv321gDUaWNKemoS7htHEh8RgreXJ61bty6oyIIgCMXCkSNH6Na9O1WadMF8wg7qapljNGINR44eYcyYMRTQdOU/LCMjg0GDbHB0csJk9EZqtzbBfPx2KjZoj3XXrly7do0Af3/q1q5OzMsHuLm50qFDB1nHFoqIfC2Sbd26ldjYWPT19alcuXL2z5EjR7LbPH/+nDdv3mT/9/v37xkxYgSNGzfG0tKSuLg4zp8/XyDDvAThRyUkJGBlYcGdmzfY08OYFhWLdoHsX2VKqLC3hwkpCQnMnDE9z4//4MED9PQNyFQoibXdXlQ1C+/7VqeNKSYj13Hs2DEGDbIRhTJBKMY2b96MjY0NDTr2wnjkWuQVlL7YXvL/VmZ7dOE0nlsn0LNHDxzPnEFVVTU/4xYJiYmJWFhacfvOfSwn76ZcjS/f39XRMsNi/DYadOyJx+YxvH9xHw93N7EiqCAIwjeyt7enX79+1G1rhcmoDdkLytTVMkffZgn29vZMnz690BfKMjMzsbUdyrFjxzAeuY5aLQ0BUFBUxmzMZqo20aF7jx5cv36dq1cuEx0dhY6OjoxTC0VJvs5JlptfMH9//xz/vXbtWtauXZtPiQQh/yQmJmJtacmNq1fZ08OYlpXKyzpSnsnMymJp0BWSMzOxHTosT499//599A0MkSppYmW3F1WNsnl6/PxQR8sM45FrObpjCnJycuzduyfPJ50VBEG2Dhw4wPjx42lhYkunX2d9cin5L7nru5+ggwsZNmwY27dvF58RfPhiY921G1evXcdy8m4q1Pr6UElFZVWqN+2C+5axvA29gYeHO506dSqAtIIgCMXH6tWrmTp1Ks0MBtKl37yPHuo06vILSXFRrFq1CiMjo0I3z9y/srKyGDlyJAcOHsB4xFrqtDHNsV1eUZlmhjY8veHNhQsXMDU1LZJzrQmyVSAT9wtCcZeUlEQ3a2uuXLqEQw9jWlcufEMFv1eWVMosn/OcfhTKgQMH6NOnT54d++7du+gbGCKnWhbrKQ6UKFn4C2T/qtvWAqlUymF7OxQUFNizx0HWkQRByEPx8fEAlKlS/5sKZFKplOuu27h8ag1Tpkxh9erV31xgy29hYWGEh4ejra1doOdNS0vjbFAglRq0p3zN3K0slpmRhtf2iUQ8voyLizO6urr5nFIQBKH4kEqlzJs3jyVLltDc0Aa1MpVIjInIsTozQHJ8NM+uu6OhWarQzuUmlUoZN24cu3fvxnDoSuq1s/yoTXjIDTy3jMHA0Ijp0/N+9Ivwc8jX4ZaC8DNITk6mW9euXDh/jl3djWhbpfBMNv+jsqRS5vic58T9YPbu3Uvfvn3z7Nh37tz5p0BWDqspe4pUgexftVsZUa5mU06fOSPrKIKQQ2xsLLGxsWRlZck6SpE1atQoxowZQ8C+uYRcdcvVPlKplIvHV3D51BoWLlxYKAtkd+/epW2bNnTo0KHAe+6XKFGC48ePE/7kCt7bJ5GZnvrF9pkZ6XjvmMLL+2c5ffoURkZGBZRUEASh6MvKymLixIksWbKEDr2noaSmwcXjK7jrfzBHu4T34TitHEBmYiSBAQWzmv23kkqlTJ48mW3bttGl/x9I5OVx2zSaoIN/Zo9ei3x2B7f1w2ir1QYnxzOUKPHpOUQF4WtEkUwQfkBKSgo9unfnXFAQO7sa0b5qJVlHyjNSqZT5vhc4eu8JDg4ODBgwIM+OfevWLfQNDJFXK4+13R5KlCx6q39mpqfiuXU8Ma8fceTwIVnHEYRsoaGh1KhRg1KlSiEvL0/JkiXR6aKTY9Ec4eskEgmbNm2if7/++Pz9O2F3Ar7YPisrk8B987jp8Tfr1q1j/vz5ha5Adv/+fYwMDCivIIdt6ybY2dmxatWqPD/Pixcv2LFjBwkJCR9t6969O2dOn+bl/UA8to4j4zOFsqysTPx2TeP5bV9OHD9eaIf+CIIgFEYZGRnY2g5l8+bN6A76k1bmIwi54grADddtHF3YlfSUROLePsdpZX9U5FI5dzaIli1byjj5x6RSKdOnT2fDhg006tKHoAML8N4xhed3Agm75QvAu+f3cV1nS8sWTXFzdUFNTU3GqYWiTAy3FITvlJqaSq+ePQn09+PvbkZ0qF756zsVEVKplAX+Fzl45xG7du3CxsYmz4598+ZNDI2MUdSohNVkB1TUS+XZsQtKRnoqnlvGEf74Ek6Ojpiamn59J0EoIE+ePCEuLo7R5oORl5Pn8esQvM4FEB0dTdWqVWUdr0iRk5Nj9+5dxMXH4751HJaTd1GlQfuP2mVmpOO3axohV93YvXs3Q4YMKfiwX/Hw4UMM9fUpIydlX09TSqsoo6qowLRp08jIyGDmzJl5cp5nz56hp2fA8+fP2Lf/AO5urh99WbG0tMTJ0ZFu3brjvmkUZmNzrhqalZWJ3+6ZhF5z5+jRo3Tt2jVPsgmCIPwMUlNT+a1vP5ycnDAavpr62l2JCL1FTHhodpuoFw+JDLuH799TqFSuFD7eXoV2mOW8efNYtWoVnfvORb1MZR6ePQZAVmY6tduYEv36CS5rh9CkUX083N0pWbKkjBMLRZ3oSSYI3yEtLY1fevfG19ub7daGdKpeRdaR8oxUKmVRwCX23XqIvb09tra2eXbsGzduYGBohJJGZayn7CmyBTKPzWMIf3wJZ2cnUSATCp3Y2FgAdJp0wKSVHi1rNQVAQ0NDlrGKLEVFRY4eOUznzp1x3ziKt8/u5tiekZaC55axPL3hydGjRwtlgezJkycY6uujIc1kbw8TypRQQSKRYNexDRO1WzFr1iyWLFnyw+d5+vQpunr6xKdKMR29gStXr2FpZU1iYuJHbU1NTXFzc+Vd6A3cN44gPeVDG2lWFoF75xF8yZH9+/fTq1evH84lCML3S01N5dGjR4V+xUPhg8TERKysrXFxccVs7Gbqa394yKBUQh210pWo2UIf3UF/YjFhB15bx1G7WmXOBgUW2gLZokWLWLJkCR37zKCF8WDqtDFl5Lb72dvL1WiKy5rB1KtTEy9PTzQ1NWWYViguRJFMEL5Reno6v/b5BU8Pd7ZZG6BTs/j0zJBKpSwJvILDzQds27aN4cOH59mxr1+/joGhEcqlq2FltwdltaL3RywjLQWPTaOJDL6Ki4szxsbGso4kCB+Ji4sDwPWaD67XfLjx9C4SiUQMPfgBKioqODmeoUXzJriuH0r06ycApCXH47p+2IeiuZMTvXv3lnHSjwUHB2Ogp4dqRhr7e5pSTvW/HlsSiYTJHVszuUNr5s6dy4IFC777PKGhoejq6ZOQJsH6933UbWuBxcSdXLx0GSvrriQlJX20j4GBAZ6eHrx/eR/XDcNJS44n6OBCHp47joODQ57OgykIwrdLT0+nV69eNGrUiPHjx4t5Lgu5mJgYjE1MOHvuApaTdlKrpWH2ttKV6zJoRSCWE+0pVbEOvn9PoUWzRgQE+FGxYuGcT3nFihXMnz+f9j2m0Mrsv+8k8gqK6NksoUrD9lw6sZxa1Svh4+1F6dKlZZhWKE5EkUwQvkF6ejr9+vbF1cWVrVYG6NWqJutIeUYqlbL87FV23bjHpk2bGDVqVJ4d++rVqxgYGlGiTA2sJu9GWbXo9WhJT03GfdNoIkOu4erqIiaQFgotHR0dGtRvgNttX3b5HMTnViAdtDsgJyf+5P8IdXV13N3cqF2zKq5rbYl8dgfntUOIff0Qb28vzMzMZB3xI6GhoRjo6aGclsKBXqaUV/v0JMYTO7Ti905tWLhwIfPmzfvmHiMhISHo6umTmC6H9e/7sldNq1xfC4uJf3Ph4iWsu3b75Lx4Xbp0wdvLk/jwJxyaY8I9/4PY29szaNCgb79gQRDyTFZWFkOG2OLh6UVzIxu2bt3KwIGDSE9Pl3U04TMOHDjAxQsX0LddQdVGH69eLJFICLvth9uG4XTp3BEfb+88Lyy9efOGW7du/fBxnj17xowZM2jU5Re0rMd+tL1a444kvHtB1Qpl8fXxoVy5cj98TkH4l7hjFoRcysjIYED//jieOcMWK30MaleXdaQ8I5VKWXnuGjuu3WX9+vWMGzcuz4595coVDI2MUStXG8siXSAbxbunN3Bzc8XAwEDWkQThsxo2bMijx4+Iio4iLS2N5ORkAgK/POm8kDtlypTB28uL8mVKcmJxLzLi3hAY4E/nzp1lHe0jz549w0BPD8WUJA70NKWCmuoX249r35LpnbVYvHgxc+bMyXWhLDg4GB1dPZIzFeg6dT/qpXMuYFOlQTssJtpz7tx5unb7dKGsQ4cO+Pn60KJpQ7Zv386wYcNyf6GCIOQ5qVTKpEmTOHToIIbDVtGl3zxMRq3n6LFj9OjR45M9Q4u6ly9fMm7cOKZOncr27duL5PDS7t27U71GTa6cWk1SXNRH259cdsZ981gsLS1wcXbO8x7mwcHBtG3ThlatWrF58+Zv3j8iIoKUlBQAKlasSKfOXXh6zY03T67maBcf9RrnNYMoX0oNf39fKlSokCf5BeFfokgmCLmQkZHBoIEDOXXqJBst9DCqUzjH7X8PqVTK2gs32Hb1DmvWrGHixIl5duxLly5haGSMeoU6WE7eibJq0ZtIMz01CfdNI4kOu427uxv6+vqyjiQIuSaRSFBRUUFRUVHWUYqNSpUq4evjzfTp0zkbFEjr1q1lHekjz58/x0BPD0liAvt7mlJR/csFsn+NbteC2TrtWLp0KTNmzPjql8QnT56go6tHqlQZ69/3o1bq00N2qjRoj8VEe4KCztGte/fsL0H/S0tLiwvnzzFy5MhcZRUEIf8sWLCATZs2oTvwT+q1swT4MIR6wna8ffwwMTX75Oq1RdmaNWv4e4c9B3bvY/To0Vy7dk3Wkb5ZtWrV8PH2Qj4zEbf1Q0lNisvedj/gMD72dgzo35/jx46irKycp+f+t+eySnoq/Zs3ZPz48WzYsCHX++/cuZPKlSujb2BAfHw8JUqUwMPdjQ7a7XFdN5RXDy8CkPA+HJc1NpRSU8Lf34/KlYvPwmlC4SGKZILwFZmZmQwZPJhjx46x3lwP03o18+1cGVlZBIW94n3yx18g8suGSzfZdPkWK1asYMqUKXl23IsXL2JsYopGpfpYTt6FUom8K5ClJsXx6MJp3DaN5sll5zw77v+XnpqE24YRvH9+Fw8Pd3R1dfPtXIIgFB01a9Zk+fLlNGrUSNZRPvLixQsM9PTIjI/lQC8TKpf8tp4Cw7WaMVe3PStXrmTq1Kmfbffo0SN0dPVIk5Sg69T9qJX68pP8Kg21MZ+wg4DAILr36PHJQpkgCLK3YcMG/vzzTxp16UOl+lo5tlVvqkNj/f6cP3eW0NDQHNvWrVtHi5atOH/+fEHGzTOxsbFoqmnQ5Z9hinldRCoo9evXx8fbi9TYN7hvHEl6ahI33O0J2DePcePG4eCwGwUFhTw95//vubzIsCMjtZoxadIk1q5d+9X97e3tGT58OHW0LLh15z5m5ubEx8ejrq6Oq4szOjqdcdswnMcXz+CyxgY1JSn+fr5Uq1Z8pr0RCheJtCj2Jf2CuLg4NDU1iY2NFSt5CT8sMzOToba2HDhwgPXmulg2qJ3358jK4urrSJwfP8X9yTOiklMwrlOdHd3yf1L4TZdusubCDZYuXcrMmTPz7Ljnz5/H1MwczSoNsZhoj5KK+g8fMyUhhmc3vQm55s7L++fJyvwwJ0bNFvpYTrT/4eP/f+kpibhtGEHMqwd4eLgXyuFUBUV8rn5MvCdCYfTq1Sv0dXVIiY7iUC8zqmp832evVCql33E3bkRGk5aW9tH2hw8foqdvQKZCSazt9qKqmfu5YF4+OI/7xpEYGRni7OSEvLz8d2UUihfxmfoxWbwn+/fvZ9CgQTTR7899/4MAjPn7Sfb24MsueNtPYdy4cWzYsAGJRALAX3/9xZw5c1AvUwnSk/Dz9aFt27YFkjmvHDhwgNGjR5OQkECtGjW5cesmpUqVknWs73bp0iUMDY1QUCtD3NsXzJkzh0WLFmX/m+WV58+fo6ejgzQ+joO9TLMfzEilUladv87WK7dZsWIF06ZN++T+27dvZ/To0TQzHESXfvOIfHob13W2tGndEg93N9TV1UlJSaF37964urpSuUpVggIDqFu3bp5eh/BzyO3nquhJJgifkZWVxfDhw9m/fz9rzHTytEAmlUq5/jqSP/0v0XnnUfodd8Pv6Qu6NqyNBGhaoWyenetztly+zZoLN1i0aFGeFsjOnTuHqakZpao2wnLi3z9UIEuOj+Z+4FGc19qy5/eO+O2ZTXpKIh37zKDnrKMA1G1nlVfRs6WlJOC6YTgxrx/i6enxUxfIBEEoGt68eYOhvh5JUe840NP0hwpkiwIuc/lVxCfnlHnw4AG6evpkKWpg/fu+byqQAVRt2IEqjTrh4e7+yfnJBEGQDScnJ4YMGUKjLr9QoVaL7Nf3Tu1C2J0Ant8NwnfXNPr3H8D69euRSCRIpVLmz5/PnDlzaNd9Ir/96UbJCnUxNjHl5s2bsruY7zBgwADi4+PJzMzkcfCTIl0gA9DW1sbJyZGKpVVZuXIlixcvzvMC2Zd6LkskEqZ2asP49i2ZPn06y5Yt+2j/rVu3Mnr0aJob2dCl3zwkEgkV67TEYtJOrl2/iYWlFYmJiaioqHDq1Cl2794tCmRCgcjbvpaCUExkZWUxcuRI9uzZw2ozHbo2rPPDx5RKpdyNjMLl8VOcHz/ldXwiFdRKYFm/NtYNatO6cnn8nr3E4eYDzOvV+vGL+IIdV++w6vw1/vjjD+bOnZtnxw0KCsLc3IIyNZphPmEHisq5mwfnfyXFvuPpDU9Crnnw+tElkEqp3LA9nfvOpXZrk+whPbc8dyGvoETtVnm7ymRacjyu64cTHxGMt5cnHTp0yNPjC4Ig5LXw8HAM9fWIj4zkYC9Tqmt+3/B2qVTKX0FXcLh5n61btzJixIgc2+/fv4+evgGolMbabg8lSn7bAx1pVhYBe+fy/I4/u3fvRl39x3sZC4Lw4wIDA+nT51dqtjREb9AivO3tkJNXJCszncSYCO747CXiyRXMzc3YvXsXcnJySKVSZs6cyYoVK+jQeyqtLT6sim4x6W9c1gzGyNiEAH8/mjVrJuOr+zZycnLFZjVoQ0NDHj96mC/HfvXqFYb6eqTFRHOolxlVSn78eS6RSLDr1IbUzExmzZqFrq4unTp1AmDz5s2MHz+eFsZD6PTb7BwFvEp1W2M5aSeu64diYWmFl6cHysrKDBkyJF+uRRD+P1EkE4T/RyqVMnbsWHbt2sVKky70aPT9TyukUimPot5nF8bCYuIpW0IF8/q1sG5Qm7ZVKiD/P3+I3Z+EUbu0Bg3KlsqDK/m0ndfvsezsVebMmcMff/yRZ8cNDAzEwsKSMjWbYz5++zcVyBJjIgi97knoVXdeP7mCRCJH1UYd0B2wgFqtTVDV+PiLWPBVV6o3083Tuc4+FMiGkRAZio+3F+3bt8+zYwuCIOSHyMhIjAwMeP/mDQd7mVKz1MfDB17FJZCamUmd0pqfPY5UKmX52avsvH6PjRs3Mnr06Bzb7969i76BIXKqZbGa4pCjQJaVmcHrx5eRSOSp+s98Pv9fVlYmAXvm8PjCKfbs2cOgQYO+84oFQchLN27cwMq6K+XrtMZoxFrk5BVIT/mwemXtNqZUbajNlTPraN++HceOHkVRURGpVMqUKVNYv349nX6dTUtT2+zjKatqYDl5N86rB2FoZMyjhw8oXbq0rC5PyAf/23P5a0P73yUl4x/2miqVKlGjxoeFzzZu3MjEiRNpaTKUjr/O/GQPt4p1W1O9uQFBgc5ERUVRpUqVfLseQfj/RJFMEP6HVCpl/PjxbN++neUmXejVpN53HSc0Ohbnx09xfhxKcHQsGspKmNeryZ8GHelYvTIKn3hClZ6ZhXfoc/o3b5jn3aH/5XDjPksCLzNz5sw8nZfA398fS0srytVuhdm4bSgql/jqPgnRbwi55k7oNQ/CQ64jJydPtcad0R+8hNqtjFFR//wNVdy7l0SG3sJ4xJo8yQ+QmhSP2/phJLz9UCBr165dnh1bEAQhP7x9+xYjAwPevXrBwZ5m1P6fIlhkYhKuj5/h8vgp195EoqmsxPnhv1FC8eNbv3/njtlx7S7r1q1j/PjxObbfuXMHfQND5NXK/1MgK0NmRjqvH10k5JoHT697kpLwHnlFZWzXXfnob0BWVib+DrN4cvEMe/fuZcCAAfnzhgiC8E2Cg4MxMTVDrXwtzMZtQUHxw2T1pmM2kpGWTFpyPI4r+tGoQT2cnRwpUaIEWVlZjBs3jm3btqEzYAHNDD7+fZaTl0dRRY3Y6JdkZmYW9GUJ+Si75/LbSA72/NBz+XlsPH5PX/BLk/qoKf23mnZ0cgqDTnkRL6eAv78/1apVY926dUyZMoWWZsPo0Gsa4cHXeX4ngBYmQyhRsgzw4W/S+aNLCb7szMaNG0WBTChwokgmCP+QSqVMmjSJLVu28JdRJ/o0rf9N+yenZ+Bw8z7Oj57y4F006kqKmNStwUyddnSpUQWlr0xOfPHlG2JSUjGvnz+rZ+699YA/Ay4xdepU/vrrrzwrkPn6+mJlZU35um0wH7cNBSWVr+4T/foJR+Z/WFK8XPXGGNoup1ZLQ5TVPt/L4X+FXHFFXlGZmi0Nfij7v1KT4nBdN5SkqDB8fbyL3GSzgiD8fN69e4exoSERz8M42MuUOmU+fH46Pgzl0J1HXH4VjoKcHDo1q1KzVEnUFBU/WSADWHfxJluv3Gb16tVMmjQpx7Zbt25hYGiEYslKWE3ZjYp6aS6dWsM9v4OkJsWiUb4Gjbv04elNb0polP1kgcxv90yCLzmyf/9++vXrlz9viCAI3+zs2bNEvXuLvtWUHHPIKiqXIC05Dtd1tlQqXxpPD3c0NTXJzMxk5MiR7N69G/3Bf9GgQzdeP75C5fpts+8rU5PicF0/jMS3T/H29qJcuW+bt1AovP5/z+Wj957gGRxGyPtYAKKTU5nSsTUA75NTGHjKkxjk8PP3p2HDhqxduxY7OztamY8gJjyU7aP+WyFaIi9P++6TkEqlnDuyhDvee9i0aRPjxo2TybUKP7fiMeBaEH6QVCrFzs6OjRs3ssiwI32bN/zmY7xNSmbluWs8eBdN/TKl2NHViJWmOhjWrv7VAhmAe3AY1TXUaVo+7yftP3D7IQv8LjJ58mRWrFiRZwUyHx8frKysqVBXK9cFMgAV9TLUbWuBgpIK71484K7vfh6cPUZ81Otc7R9y1Y2aLfTzZNXM1MRYXNYOITn6eZFcjUkQhJ9PdHQ0JkZGvHoayv6eptQtUyp7m8PN+1x6FY6KggJTO7dhg6Ue7xKTMa/36QcwGy7eZOOlmyxfvhw7O7sc227evPmhQKZRCaspDtk9fCNCbpKaFIuCsiq1WhrS3HgwcW+fU6+tZY79szIz8N01nZDLThw8eFAUyISfzubNm6lVqxYqKipoa2tz+fJlWUfKoX///vz6628E7JvLg7PHsl9PSYjBdd1QVBWleHt5UqFCBTIyMhg8eAi7HRxopPMrLx+cY/cUbc6s6M89vwP/7Pce5zU2JEeF4efrI+Z1LUb+t+fygZ6m1C6tydYrt7MLZACdq1cGICYllUGnvYjKBB8/Pxo3bsyqVauws7OjteVoOvSexrObPjmOX6ulIVKplLOHFnHHew9bt24VBTJBZkRPMuGnJ5VKmT59OuvWrWOBQQcGtGj09Z0+oWpJNfb0NMU9OAzPkDD6n3CnsroqZvVqYl6/FlqVc84/9r8ys7LwDAmjV+N6eT7U8vCdR8zzvcCECRNYs2ZNnh3fy8uLrl27UbFBe8zG/tdFPzdUNcpiOnoD6SmJhN32J/iKC5dPreXCseVUrNuaum0tqNvWAvXSlT7aNzYijLdhd2ltMfKHr+HfAllq7Gv8fH1o3br1Dx9TEAQhP71//x4TIyOehwRzoKcp9f/fHJabrQzwDAnD9ckzlgZd5U18EonpGVjUr/XRsTZfvsW6izdYsmQJ06dPz7Ht+vXrGBmboKxZBaspDjl6+na1cyA85DqPLzpy29uB+OjXSLOyqKNllt3mQ4FsGqFX3Th06BB9+vTJ0/dBEAq7I0eOYGdnx7Zt29DW1mbdunWYmZnx6NEjKlSoIOt4ACgpKXHw4AFKly7F9u2zSU2Mpal+f9w3jSQzKQrvs0HUrFmT9PR0+vcfwMmTJzEevgavHZNzHKd0lXokxUXhsnYwWUlRBPj70bJlS9lclJDnoqKiPtlzOdD2F/6+fo+9tx4AoFWlArEpqQw+7UVkeha+/v40bdqUFStWMGPGDNpYjaF9jylIJBKGb77FTfe/ueq0EXkFJcrVaErQwYXc8zvA9u3bGTnyx+/zBeF7SaRSqVTWIfJSXFwcmpqaxMbGoqHx8eS1gvC/pFIps2bNYvny5czX02ZI6yZ5ctzMrCyuvo7EPfgZ7k/CiEhMopxqCUzr1qBH47q0rVIxR/tLL8Ppd9yNE79Z0bpy3t04Hb33mJle5xg7diybNm3KswKZh4cH3bp3p3LDjpiO2fRNBbLPSUuO59ktX4KvuPLibhBZWRk06vwLBkP+ytHuuus2rjlvYcjai9+1eua/PjwlHUJaXDh+vj7iZu4LxOfqx8R7IshCTEwMJkZGBD98wIGepjQuX+azbaVSKfq7j/MyLoF6ZUrhYdMzx/ZtV26z4tw1Fi5cyPz583Nsu3btGkbGJqiUqY7V5N0oq376/3GpVMqhuWbERjylSqMOdJ+6D/hQIPP5eypPb3hw5PBhevfu/YNXLhR3xfEzVVtbm3bt2rFp0ybgw8rp1atXZ8KECcycOfOr+xfkeyKVSpk7dy5//fUXGuWrk5n0Hj8/X9q1a0dqaip9fv0NV1dXjEeuo04bU8Ju+3HLy4FXD86jol6aX+afwW3dUOQzE/Dz9aFJk7y5nxZkLzo6GiMDA16EBHOwl9lHD2biUlJpte0g7atWZEc3Ywaf9uJFchq+/v60bNmS7du3M3r0aLSsx9Gu+6Qc30VSk+LYNVGLhp16oaCkwj3/g9jb2zN8+PACvkrhZ5Hbz1Ux3FL4qc2fP5/ly5czR7d9nhXIAOTl5NCuVok/9DtwbvivHP/Vih6N6uD37CXDz3iT9v8mMXUPfkZldVVaViqfZxlO3H/CLO/zjBo1Kk8LZO7u7nTr3p0qDTtilkcFMgClEiVp0KE7lhO2M2TtRVoYDebxhdOkJsbmaBd8xYVaLQ1/sED2Hpe1g0mPj8Dfz1cUyH5ShX0YjCD8r7i4OMxMTXny4D77eph8sUAGIJFIsGpQGyl8NNel/bU7rDh3jXnz5n1UILt69SqGRsaUKFPjiwWyf89RV8scgHrtPgy1zMxIx9vejqc3PDh65IgokAk/pbS0NK5du4axsXH2a3JychgbG3PhwoVP7pOamkpcXFyOn4IikUhYsmQJq1evplIZNRwdz9CuXTtSUlLo0bMnbm7umI3dTJ02pgDUbGFApz4zAChTtQEua2xQIpmgwABRICtmRg4fTsjjR+z/RM9lAA0VZUIn22LfzRjbM96EJabg7fvfvXVIaChycvJUrq/10XcRZVUNRu94hLyCIvcDDrFz505RIBMKBVEkE35aCxYsYPHixczs0pZhbZrm23nkJBLaVKnAbN327OpuQlxqGkFh/829lSWV4hEchlm9msjlUSHr9IMQpnudY+jQoWzZsiXPCmSurq50696dqo07YzpmE/J5VCD7/5RVNWhlPpyszHSe3fLNfv19eChRLx5St63Fdx87OT4a5zWDyUx8i7+fLy1atMiLyEIR8+8wmD/++IPr16/TsmVLzMzMiIyMlHU0QfhIfHw85qamPLp7h709TGhaIXdzV/7StD6DWzWmR6O62a/tun6PpUFXmT17NgsXLszRPjIyEkMjY1TL1cLyKwWyfzXR64vuwIXU1bIgMyMdH/sphN3y5vixY/Tq1evbLlQQiol3796RmZlJxYo5Rw5UrFiR8PDwT+6zdOlSNDU1s3+qV69eEFFzsLOz49HDBxgaGgIwbtx4fHx8MR+/lZotci6WVLZ6Y7pNO0Bi1EvUlaScDQqkQYMGBZ5ZyF/1GjQgPiWV628+f3+UkJbOUEcfQuOT8Pb1zTF9yaI//8Tc3AyPzWN4ef9cjv2kWVkE7JvHg6Cj7Nq1i6FDh+bbdQjCtxBFMuGntHjxYhYuXMi0zlqMbNu8wM7bsFxp6pcphcvjp9mv3XzzlvCEJMzr1cqTczg+DGWqZxCDBw9mx44dyH1mHrRv5ezsTI+ePanWVBeT0RvzrUD2L7VSFalUT4uQa+7Zr4VccUVRWY0azfW+65jJ8VE4r7FBmvxhvozmzQvu314oXNasWcOIESOwtbWlSZMmbNu2DVVVVXbt2iXraIKQQ2ZmJpbm5ty9dROH7sY0r5j7leLqlNbkD/0O1C79Yf6YPTfvszjwMtOnT2fx4sWffYAikcghkcvdw5WSZavQVL8/iiqqeO+YRNhtX04cP06PHj1ynVMQBJg1axaxsbHZPy9evJB1JDRLaZKZkUFaSuJH2+Iin+O/axplSqoQFBhAnTp1ZJBQyG9Lly5l3LhxzPE5z/5bDz/anpiWzlBHb57EJuDp7Y2WllaO7crKypw4cQJDQwPcN43i5YPzwIcCmf/eOTw8ewwHBweGDBlSEJcjCLkiimTCT2fZsmXMmzcPu46tGdOu4HsRWTesjVdIGCkZGcCHoZblVEugVeXH5yJzefwUO49ABg4cyN9//51nBTInJyd69upF9WZ6mIxaj7yCUp4c92vqaJnx4l4QacnxAARfcaVWK6Ncr6L5v5LionBebYMkNYYAfz+aNs2/3oNC4VbUhsEIP7eMjAwuX7lC0/JlaFTuy0Msv2TfrQcs9L+EnZ0dy5Yt+2SBrEKFCnh7eRIfEYzb+uGkpSTk6tiZGWl4b5/Mi7sBnDp5km7dun13TkEoDsqVK4e8vDwRERE5Xo+IiKBSpY8XJYIPxQQNDY0cP7K2csUKdHS64LV9Uo7VCN+/CcFp1QAqlilJUFAANWt+evVcoeiTSCRs3LiRiRMnMt/vAntv3s/elpSeznAnHx7FxOPh5UX79u0BuHfvHu/fv89up6KiwulTp9DX18sulPk7zOLx+ZPs3bsXGxubAr8uQfgSUSQTfiorV65k1qxZTOrQivHarWSSwapBbRLTM/B/9hKpVIrbkzBM69b47MqXueX25BmT3QPp168fu3bvRl5ePk/ynjlzhl69e1OzhSHGIwuuQAZQV8ucrIwPQy6jXz3h/esn1P1n3ptvkRT7DufVg5CkxRLg7yfmy/jJFdVhMMLPSVlZmTOOjtyIiGKsix+p/zxg+RaH7jziD7+LTJo0iVWrVn1xCH779u3x9vIk9s1j3DeMJP0TPUj+V2Z6Kl7bJvLi3ocCmbW19TfnE4TiRklJCS0tLXx8/issZWVl4ePjQ8eOHWWY7NsEBARw8cIFNEqUxGPreJ7fDSLq1WOcVw+iWqVyBAb6U7VqVVnHFPKZRCJh3bp12NnZscD/Eruu3yM5PYMRTr7ci47F3cOTDh06ALB27VqaNWtGhw6deP36v+llVFRUOHP6NLo6XXBaPZjHF0+zd+9eBg4cKKvLEoTPEkUy4aexdu1apk+fzvj2LZkoowIZfBj+0qR8GVwePeVuZBSv4hMwr1/rh47pGRzGJPcA+vTpg8OePXlWIDt16hS//PILNVsYYTRiLfIKinly3NxSL1OZinVaEXrNg+CrriiVKEmNpl2+6RhJsW9xXj0IhcwEAgP8ady4cT6lFYqzwjgMRvh5mJub4+TszIXXkYxy9svuiZwbR+4+Zo7PecaNG8fatWtzNUeltrY2Xp4exLx+iNvGEaSnJn2yXWZ6Kp7bJvDqwVnOnD6NlZVVrnMJQnFnZ2eHvb09e/bs4cGDB4wZM4bExERsbW1lHS1X0tPT6WrdlbqVarN6yB+UUFTGbdMonFcNpHb1KgQE+H22V5xQ/EgkElatWvVhuH7gZboecuL2u/e4urnTqVMn4MOCSHZ2djTR/Y3wqFh0dPVy3C+VKFECJ0dHZs+ezYkTJxgwYICsLkcQvkgUyYSfwoYNG7Czs2NMuxZM6dg6zyay/17WDWrj+/Qlpx4EU1pFGe2q33+T4RP6nAluAfTs2Yt9+/ejoKCQJxlPnDhBn19/pVZrU4xGrCnwAtm/6miZ8/xOAE8unqF2a+NvmgstMSYSp9WDUJQmERjgT6NGjfIxqVBUFJdhMMLPxcTEBBdXV66Ev2Okky/J6V8vlB2/94TZPucZPXo0Gzdu/Ka/fR07dsTTw52Ylw9w3ziS9NTkHNsz0lPx2DqO1w/P43jmDBYW37+giiAUR7/99hurVq1i/vz5tGrVips3b+Lu7v5RL+bCSl5enpYtW/Is8jlLT25AQUkBYyMjunTSxt/fl/Ll825FdqFokEgkLFu2jLlz5yLRKIWLqxs6OjoA2NvbM378eFoYD0F30CK6Tt1PVFwyOrp6PHv2LPsYJUqUYMmSJWLeSqFQE0UyodjbvHkzkyZNYqRWM6Z2aiPzAhl8GHKZnJHB3lsPMa5bA0X57/tV9Hv6gnGu/nTt1o0DBw/mWYHs2LFj/Pbbb9RpY4bR8NUyK5DBh3nJMjPSiHv7grptcz/UMjEmAufVA1EmhcAAf7HikpCtuAyDEX4+hoaGuLq5cT0ymhFOPiSlp3+27akHwczwPsfw4cPZvHnzd/3t69SpE+7ubkQ/v4v7plHZhbKM9FQ8towl/NElnBwdMTMz++5rEoTibPz48YSFhZGamsqlS5fQ1taWdaRck5OTw9PLk/ba2rxPicPbxxsPdzc83N0oU+b750cUijaJRMKiRYsIfvoUPb0PC2nt2bOHUaNG0dRgAJ1+m41EIkGjfA26Tj1AbFImOrp6hISEyDi5IOSeKJIJxdq2bdsYP348Q1s3ZUaXtoWiQAZQXbMkLSuWI0sqxbze9012GvjsFWNc/LCwtOTQ4cMoKuZNIevo0aP069ePOm0tMRy2Cjn5vCm8fS+NctUoX6s5yqqaVGvSKVf7JLwPx2nVQFTk0gkM8Kd+/fr5nFIoaor6MBjh56Wvr4+7hwe33sUw3NGXxLSPC2VnHoYwzfMstra2bNu27ZOLuOzbt48VK1aQlZX1xfN16dLlQ6Es7DYem0eTkhCDx6bRRD65grOzE6ampnl2bYIgFC7q6ur4+fsRFR1F27Ztv+sYFy5c4PDhw0il0jxOJxQGhw4dYujQoTTq0gedfvNzfNcqWbYqZuO38erlC9auXSvDlILwbWT77VcQ8pG9vT1jxoxhSKvGzNFtV2gKZP8a1a45PqEv6FS9yjfvGxT2ilHOvpiamXP02HGUlPJmMv3Dhw8zcOBA6razxmDocuTk8mZusx9lOdGerMz0XC0akBD9Buc1NqgqZBLg70/dunULIKFQ1Pz222+8ffuW+fPnEx4eTqtWrYrUMBjh56ajo4OHpyfmZqYMdfRhZzcj1JU+PChxfhTK7x5B2NjYYG9v/8kC2YYNG5g0aRIAd+/eY/fuXV+cy1JHRwdXVxcsLCw5MMsAOWkmLi7OGBoa5s8FCoJQLAQGBmJqakZqagr+/v5s3rw5z+bNLQwePXpEnTp18uxBdVFz/PhxBg0aRP0O3enSbz4v7p+lbLWGqJX6cC+VlpJA4J45aGiWYvjw4TJOKwi5J3qSCcXSrl27GDlyJINaNmKennahK5ABmNerxUpTHZQVvu1m4fyL14xy9sXQxITjJ07kWYHsyJEjDBgwgHra3QpVgQxAVaMs6qW/Pm9bQvQbnFcPQk0xi8AAUSATvqwoD4MRhM6dO+Pp5c2jmHiGnvEmPjUN1yfPmOIRRP/+/fl7585PFsi2bdvGpEmTaGU2HOORazlw4AADBgwk/QtDNwH09PRwdXVBt0snXF1dRIFMEIQvunHjBlbWXSlfpzW6Axeyw96e/v0HkJaWJutoP+zq1asYGRvTqFEjfv31N1nHkYkzZ87Qt18/KjfsgBTYN10Hl3XD8Pl7GgDpqUm4bRhBfGQI3l6etGrVSqZ5BeFbiJ5kQrGzZ88ehg8fTv/mDVmg36FQFsi+18UXbxju6IuuvgEnT55EWTn3k9h/zYEDB5FXVEG719RCVSDLrfio1zivHkRJFQkB/v7Url1b1pEEQRDyVceOHfHy8cHU2Jhfjrny9H0sv/7662dXOd61axdjxoyhufFgOvwyHYlEgry8Isd2TCYtLY3Dhw998cGLvr4++vr6+XhFgiAUB48fP8bE1Ay18rUwG7cFJRV1SmiU46T9FGK6dePkiROoqanJOuY3e/z4MXPmzOX48WOUrVqfZoaDOH1630/3oM3V1ZVf+vShVksj4qJe8/L+uextFWo1Jz01GfeNo4h99RAvL8/vHqorCLIiepIJxcr+/fuxtbXl16b1+dOwY7EqkF1+Fc4wJx+66Opy+swZVFRU8vT4K1euoExpTVzX2ZIUF5Wnx85v8VGvcF49EI0ScgQGiAKZIAg/j/bt2+Pt60taCXX69u3L3n37Plkg279/P8OHD6epfn86/zYn++9jHS0zzMZswsnZmV69e5OamlrQlyAIQjHy8uVLjIxNQEkTiwn2KKmoA1CnjSkWE+0JCDyLsYkJ79+/JyIignfv3sk4ce4sWrSIJk2a4Ol3DgPbZfzyhxOd+86hXLUGTJ8x86eZc83Ly4uevXpRvakuRiPWYjx8NW27TczeXqOFPp5bxhIVdhs3N1exIJJQJBVIkWzz5s3UqlULFRUVtLW1uXz58hfbHzt2jEaNGqGiokLz5s1xdXUtiJhCEXf48GEGDx5M7yb1WGLUCbliVCC7+jqCoWd86NipM2ccHSlRokSen6Nhw4b4+/kilx6HyxobkuOLRqEs7t1LnFYNRFNVgaDAAGrVqiXrSIIgCAWqbdu2PHv+nH37D3xyleMjR44wePBgGnX+BZ3+f3z0AKlWKyPMxm3F09OLbt27k5ycXFDRBUEoRt69e4eRkQlxyRlYTt5FiZI5V8Gs1rgTBsNWc/HCBRYsWECdOnVp2Kgxd+7ckVHi3Dtx8hQly9fkt8WeNOrcGzk5eeTk5GnXw47AAH88PT1lHTHfnT17lm7dulO5QQdMRq1HXkGRUpVq067bhOw2N922ExF8FWdnJ3R0dGSYVhC+X74XyY4cOYKdnR1//PEH169fp2XLlpiZmREZGfnJ9ufPn6dfv34MGzaMGzdu0KNHD3r06MHdu3fzO6pQhJ09e5aBAwfSo2EdlhazAtn115HYnvGmnbY2js7OqKqq5tu5GjVqhL+fL5K0WJzXDCY5PjrfzpUX4t6+wGnVAEqrKREUGECNGjVkHUkQBKFQOXXqVPZ8k7o2i5B8Yp4ygBrNdDGfsAM//wCsrLuSmJhYwEkFQSjK4uPjMTO34FX4W6wmO6BepvJHbVISYrh6eg1ly5XH/u+dlKnRDAX1iugbGHL79m0ZpM692bNmEhMeSuTTnDlrtjSkcr02zJg566urBRd17u7upKQk08p8BPKKOad8sV13hZotDHj96AJnzpwW81YKRVq+F8nWrFnDiBEjsLW1pUmTJmzbtg1VVVV27dr1yfbr16/H3NycadOm0bhxYxYtWkSbNm3YtGlTfkcVirC0tDQyMzMpoahQrIZY3gp/i62jN23atsPZ1bVA5m9o3Lgx/n6+SJOjcVk7hJSE9/l+zu8R9/Y5TqsGUlajBEFBAVSvXl3WkQRBEAoVZ2dnfv3tN2q3McdgyNKvzjdZrXEnLCft5Nz5C/TrP6CAUgqCUNSlpKTQrXsP7j94hMXkncjJK3DlzAbePLmW3SY9NQn3jSNJjQ0nKSmRsjWaYT5hB1ZTHFAo+aFQduvWLRlexZf98ssvtGrdhssnV+UYWimRSGjfayq3bt7g2LFjMkyY/6ZPn06Hjh1x3zQ6x79tVmYGQQf+4NWDs5w8cQIzMzMZphSEH5evRbK0tDSuXbuGsbHxfyeUk8PY2JgLFy58cp8LFy7kaA9gZmb22fapqanExcXl+BF+PoaGhvz9998cvPOIeT7nySoG8wLcjnjH4NPeNG/VGhc3N9TV1Qvs3E2aNMHfz5espHf/FMpiCuzcuREbGYbTqoGUL6VKUGAA1apVk3UkQRCEQsXDw4NevXtTo7k+hsNWIiefu7WalEqURF5BUcxNJghCrmRkZPBb336cO3eO5qbDcF0/ggOzDLnqtJGLx1cAkJmeiueWcbx/9ZCMzEzKVG+K+YQdKCqroqJeCqvJDihpVEbfwJCbN2/K9oI+Q05OjhXLl/Em+Dpht3yzX5dKpVRp0I5aLfSZPWfuV1cKLso0NDTw8vSkfTstXNcP5dXDS2RlZeK7ewZPr3tw9MgRrK2tZR1TEH5YvhbJ3r17R2ZmJhUrVszxesWKFQkPD//kPuHh4d/UfunSpWhqamb/iN4kP69hw4axa9cuDt97wuwiXii7G/mOwae9aNyiBW4eHpQsWbLAMzRr1gw/Xx8yEiILVaEsJuIZTqsGUqFMSQIDA6hataqsIwmCIBQqvr6+dO/eg6qNO2M8ch3yCoq52i/q1WNc1g6hUYN6HDl8KJ9TCoJQ1EmlUkaMGIGzkxPGI9cTGXqTpNi32durNe1CVlYmPjun8erRRSQSCerlatLceCiKyv9NH6KiXgrdwX8R8z6avXv3yuJScsXY2Bh9A0OunF5DVlYmUS8fcWiuGVccN9K2+2Sehoawc+dOWcfMV+rq6ri7udKlc0fcNg7HbeMoQi47c/DgQXr27CnreIKQJ4r86pazZs0iNjY2++fFixeyjiTI0JAhQ9izZw/H7wczw+scmUVwboAHb6OxOe1N/SZN8fD0RENDQ2ZZmjdvjp+vD+nx4biusyU1MVZmWQBiwp/ivGoglcpqEhjgT5UqVWSaRxB+Fjdv3mTFihVERRWNBT1+ZkFBQVhbd6Vi/XaYjt6IvIIS0qws3jy5RtCBhfjsnE5mRtpH+71/E4Lr2iHUrVkNby9PSpUqVfDhBUEoMqRSKVOnTsXBwQED2+XUamWEyagNWIzflt2mRjNdAvf/Qeg1d5QUlShdvQlvn9/HffOYHPeU8VGv8dwyllq16jBlyhRZXE6uSCQSli9byruXj3ly0ZHYyDBiI55y1XEDQQcXUrOlEX8sWEhSUpKso+YrVVVVXJydMTTQ58XdQPbs2cOvv/4q61iCkGfytUhWrlw55OXliYiIyPF6REQElSpV+uQ+lSpV+qb2ysrKaGho5PgRfm6DBg1i3759nHoYwnSvs0WqUPbwXTQDT3lSt2EjvLy90dTUlHUkWrRogZ+vD6mxr3FZP5TUJNkMaX4fHorT6oFULl+awEB/Klf+eEJYQRDyVkhICP369ad169bMmDGDefPmyTqS8AUXLlzA3MKSsrVaYjpmM+/fhHDh+Ar2zzTg9PK+3PU/wJNLjqQlJ+TYLzYiDOc1NlSvXAEfH2/KlCnzmTMIgiB8sGzZMtasWUOXfvNo0LE7AIrKJajVyogKtVoAEHrdkweBR1BWUaF0tcZ0/HVW9v5HFlhxw92e+KjXOK8eSGk1JQID/Qv9qKD27dvTq1dvrjmtzzEPckTIDZ7d9CYqKor169fLMGHBUFFRwcXZmfj4eAYOHCjrOIKQp/K1SKakpISWlhY+Pj7Zr2VlZeHj40PHjh0/uU/Hjh1ztAfw8vL6bHtB+JT+/ftz8OBBHB8/43fPIDKKQKHscdR7Bp3yolb9Bnh6exeqp/gtW7bE18eblOgXuK4bSmpSfIGe//3rYJxXDaRapXIEBvp/tmguCELeCA8PZ+zYsTRs1AgXD1/0bBbTvscU7O3tCQkJkXU84ROuXLmCqZk5pas2xnz8Ns4eWsSxP7tz22s3NZrp0H3afqo36UzlBu0oUfK/Iljc2xc4r7GhcrnS+Pp6U758eRlehSAIRYG/vz+zZ8+mjdUYmhvZfLS999wTdOwzg5vuO1BRKUHpqo2xmPQ3z+8EZrdJfB/BxeMrcF49CE1VBQIC/Ap9gexfS5YsJj7qDc/vBmW/1tp8JBYTttNYty9Lly0nOrpwrxCfF+Tl5QtkUTFBKGj5PtzSzs4Oe3t79uzZw4MHDxgzZgyJiYnY2toCYGNjw6xZ/z1VmDRpEu7u7qxevZqHDx+yYMECrl69yvjx4/M7qlDM/Pbbbxw+fBjX4DCmuAeSnll4C2XB0TEMPOVFtTp18fLxKZRP8Vu3bo2vjzdJUWG4rh9KWnLBFMqiXz/50MOhSgUC/P0+mrNQEIS8Exsby9y5c6lTpy679x6gXfcp9F3iTRPd32hhYkuJkmWZN2++rGMK/8/NmzcxNjGlZMW6mE/8MBl2mSr1UClZhqzMDMJu+yORU+DVw0vUaWOavV981Guc19hQVrMEfn4+4gGEIAi5UrZsWVRV1Qh/fPmTIwwenD3GhWPLUVZWoXS1RlhO+hslFXWkUily8gpUb6ZLxz4z0SxfDQ0VOQL8/ahRo4YMruT7NGrUiCFDhhB6zY2KdVtjNXkXHX6ZRq2WhmhZjSU1LZ1ly5bJOqYgCN9JIpXm/+zmmzZtYuXKlYSHh9OqVSs2bNiAtrY2APr6+tSqVQsHB4fs9seOHWPu3Lk8e/aM+vXrs2LFCiwtLXN1rri4ODQ1NYmNjRVDLwUATp06xa+/9sG4dnXWm+uhKF+4puILjY6l/ykPKtaoia9/AOXKlZN1pC+6du0ahkbGqJWvjeXkXSip5N+qm9GvPhTIalWvhJ+vr+jhICPic/Vjxe09SUlJYfPmzSxe8heJSUk0MxxMa/MRKKvlHPJ9P+AwAfvmcePGDVq1aiWbsEIOd+/eRVdPHyXNKlhN2YOy6n8LvWRlZRIefB2PLeNQVtMkNuIZg1YEol6mMokxETiuHICGsoSgoIAi9QVVKH6K22dqXijs78mlS5cwM7dASaMSlpN2oqr54R4t9JoHHtsmoKSkTNlqjbGcvBOlEh8+l6RSKWlJcaSnJeO0aiAllSEwwJ9atWrJ8Eq+z8uXL6lbrx4tTEfSrvvEHNsun17HXa+dBAc/ESuwC0IhktvP1QKpFowfP56wsDBSU1O5dOlSdoEMPnTX/d8CGUCfPn149OgRqamp3L17N9cFMkH4lJ49e3LixEl8nr5kgps/aZmZso6U7en7WPqf8qRc1ep4+/oV+gIZgJaWFt5eniREhuK2fjhpKQlf3+k7RL16jPPqQdSuUVkUyAQhn23YsIGpU6dSpYUp/ZZ406H31I8KZAANO/emdKVazPyfHuCC7Dx8+BADQyMUS1bEctKuHAUyADk5eao0aEcdLXNiI55RoXYL1MtUJin2Hc6rbVBVyMTf31cUyARB+Gba2tqcOxuEfHosZ1b0I+7tC17eP4fP33bo6uiSlppC1SZdsgtk8GHi+/S0ZJxXDURdSVpkC2QA1apVY8L48dzx3kVSXM5FbVqZDUNeqQQLFy6UUTpBEH5E4epSIwj5pFu3bpw8dQr/sNeMdw0gNUP2hbKwmDgGnPKiTOXK+Pr7U6FCBVlHyrV27drh5elBXPgT3DaMID0lMU+PH/XiIc6rB1G3VnVRIBOEAtC8eXMAarU0RK3U54c0yyso0q6HHR7u7gQEBBRUPOETnjx5gr6BISiXwnLyblTUS322bX1tawBqtzYlOT4Kl7WDUZQmE+DvR+3atQsosSAIxU3Tpk25cP4cZdQVObOiLx5bxmJoaICXlyfz58/nqtNGrrv+t9plYkwEzqsHoaqYVaQLZP+aNWsWigryXHfdmuN1pRIlaajzG7t37+b58+cySicIwvcSRTLhp2Ftbc3pM2cIevGGsa5+pGZkyCzL89h4BpzyRKNCRXz9A4rkPFva2tp4eXoQ+/pRnhbK3r14gPMaG+rXqYmvr3eR6F0nCEWdubk5Orp6XD61mqysLz9EqNPGjIq1mjF9xgwKYMYG4TOmz5hB9PsYLCfvyjER/6dUrteWwWsu0rBTL1zW2kJqDP5+vtSrV6+A0gqCUFzVqlWL8+fO0qJJA/T1dDl54gRKSkosXLiQP/74g0snV3PNeUt2gayEfAaBAf7FokBftmxZZs2cwYOAQ8S9ewlA3LuX+Oycxk237dSoUUvcxwpCESSKZMJPxcLCgjOOjlx4FcloZ9kUyl7GxjPgpCeqZcvj6+9P5cqVCzxDXunQoQNenh7EvHqA28aRpKcm/dDx3j2/j/NqGxrUq42vjzdly5bNo6SCIHyJRCJh+bKlvHvxiOBLzl9uKydHu56/c/nSJRwdHQsoofD/9e/Xj6yMNC4cXUpmRvoX20rk5JBXUMRj00gyEiLx8/WhUaNGBZRUEITirmLFipw7G4SHu1uO1Q4XLFjAwoULuXx6LccWdkNFLp3AAH/q1Kkjw7R5a9KkSZQuXZoLx5Zx9tAiDs81JSb0Ips3b+bhw/uoqqrKOqIgCN9IFMmEn46ZmRlOzs5cDn/HCCdfUgqwUPYqLoEBpzxRLl0Gv4AAqlatWmDnzi8dO3bEw8Od9y/u4b5pFOmpyd91nLdh93BeY0OjBnXx9fEulCt8CkJx1rFjR7p2685Vx3VkZqR9sW31pl2o3qQTM2fNJrMQzfP4M+nTpw9Hjhwh9LoH3jsmf/HfLC0lAbf1w0mJeYWvjzfNmjUrwKSCIPzM5s+fz/Lly2mv1ZIAfz/q1q0r60h5Sk1NjQV/zCf0mgdPL5/mz4ULCA0JZsyYMSgpKck6niAI30EUyYSfkrGxMS6urlyPjGaEoy/J6flfKHsTn8iAU54oaJbCLyCgWK1207lzZ9zd3YgOu/NdhbK3z+7ivMaGJo3q4+vjTenSpfMpqSAIX7L0ryXER73mfsDhr7Zt3/N3Hj64z759+wogmfApvXv35tTJk7y464/ntglkpqd+1CY9NQm3DSOIjwzB28tTrEoqCEKBmz59erEe4j1q1Cj8/Px49jSU2bNn5+hNJwhC0SOKZMJPy8DAAFc3N26+e88wRx+S0r88XOVHhCd8KJBJ1DXwCwgsliuJ6ejo4ObmStSzW3hsHkNGWkqu9ot8ehvntYNp3rQRPt7elCpVKn+DCoLwWU2bNmXQIBtuuGz56sq1FWq3oK6WOfPm/0FKSu5+34W817VrVxzPnOHNg/O4/7/P3vTUZNw3jSL21UM8Pdxp27atDJMKgiAUT/Ly8ujr64tpQgShmBBFMuGnpqenh7uHB3ejY7A940NiWt4XyiITkxh4yovMEmr4BQRQs2bNPD9HYaGrq4ubmyvvnt7AffPorxbKIkJv4bLOlhbNGuPl6YmmpmYBJRUE4XP+/HMhaSnx3PZyyPG6NCuL4CuuxEaEZb/WrucUXr96xdatWxFkx9zcHBcXZyKDr2bPD5mRnornlrFEPbuNm5srHTt2lHVMQRCEfGVvb4+amjpz5syRdRRBEIowUSQTfnpdunTBw9OLhzFx2J7xJiEPC2VvE5MYcNKTVCUV/AIC8nUln9u3b3Pz5s18O35u6enp4eLizNuQ63hsGUvGJ4b/AISH3MBlnS0tmzcRBTJBKERq1KjBuLFjue25k+T4qOzXI57ewmv7JI7+2Y2HZ48jlUopXakODTv3ZtHiJcTFxckwtWBkZPTPsPfbuG0YjtfW8UQEX8XZ2QkdHR1ZxxMEQcg3UqmUJUuWMHLkSDSqNOKvv/5i3bp1so4lCEIRJYpkggB06tQJL28fHsclMOSMF3GpX560OjfeJSUz8JQXSQpK+AUE5OtEpe7u7rRt14627dpx/PjxfDtPbhkYGODs7ETEkyt4bhn30Tw54SE3cF03lDatWuDl6YmGhoaMkgqC8Clz5sxBQV7Cddft2a/JyysCUKFmM/wcZuG1fRIpCTG07TqehIREVq9eLau4wj/09PTw8vIk9vUjXj08z5kzpzE0NJR1LEEQhHyTlZXFpEmTmDt3Lu26T6THjEO0Mh/BlClTOHDggKzjCYJQBIkimSD8Q1tbG28fX0Ljkxlyxpu4lE/3gMqNqKQUBp3yIl5OAb+AAOrXr5+HSXPy8fGhe48eVGuiQx0tC/r27cvBgwfz7Xy5ZWRkhLOzE+GPL+G5dXx2oezNk2u4rrOlrVZrPNzdKFmypIyTCoLw/5UrV44Z06dx3/8A8VGvAJBT+FAk0+49FdPRG3hx/xxHF3YlNjKMpgYDWbVqNREREbKMLfDhoc+9u3e4f+8eZmZmso4jCIKQb9LS0hgwYCCbNm1Cd+BC2nadgEQioUPvaTTq3JvBQ4bg7u4u65iCIBQxokgmCP+jXbt2ePv6EpaYgs1pb2K/o1D2PjmFQac9eS+V4OvvT8OGDfMh6QcBAQFYW3elcoMOmIxaj+GwldTX7s7AgQPZs2dPvp03t4yNjXF0PMPrRxfw3DaBlw8u4LZhGO3aauHu5ioKZIJQiE2ZMoVSpUpx9cwGAOQVPixlHxFyk3NHlpCVkUbi+3A8tk6gldkIsiTyLF68WJaRhX/UqFGj2K4iJwiCAJCQkICVtTXHjh/HwHYZTfT6ZW+TSCTo2SymepMu9OrVm/DwcBkmFQShqBFFMkH4f7S0tPD19+dVajqDTnsR8w2FspiUVAad9iIqE3z9/WncuHG+5Tx//jyWllaUr9MG07GbkVdURk5OHv0hS2ms8yu2trbY29vn2/lzy9TUFMczZ3j98DxOq23Qbt8OdzdX1NXVZR1NEIQvUFdX54/583h08TTRr54gr/ihSHbp5CqUVUvRtut4Ov06G4Mhf1FCowwtzUawbft2QkNDZZxcEARBKM7evXuHgaEhQWfPU0fLAt9dM3h5/1yONgnRb4iNfEZJDQ2UlZVllDT3pFKprCMIgvAPUSQThE9o1aoVvv7+hKdlMvCUJ9HJX16lESA2JZXBp72ISMvEx8+Ppk2b5lu+y5cvY2ZuQenqTTEbtxUFxf/++Evk5NAdtIimBgMZOXIkmzdvzrccuWVmZoabqyt//PEHri7OqKmpyTqSIAi5MHLkSKpXr8Hl02uQ+2dOMomcHKaj19PaYhQtTW2p3doEgOZGNqiolWL+/D9kGVkQBEEoxp4/f07nzjo8eBSKtd0+ol4+AuCOzx7eh394SBP14iFnlv9GaTUFLpw/R+nSpWUZ+YukUikrVqxAXb0ky5cvl3UcQRAQRTJB+KwWLVrgFxDA2wwpA095EpX0+UJZXGoaQ8548yolHR8/P5o3b55vua5fv46JqRkalepjMWEHisolPmojkUjo0m8eLU2GMn78eNauXZtveXLL0NCQBQsWiAKZIBQhSkpKLFm8iKc3vIkJf0pT/f7oDFhA6cofL0SiqKxKG+vxHDx4gNu3b8sgrSAIglCc3b9/n46dOhPxPoFuMw6DBKJffSiShd325/BcM14/voLjqgHUrVWN8+fOUqdOHRmn/rz4+Hj69PmVGTNmoFGlEbNmzcLDw0PWsQThpyeKZILwBc2aNcM/MJD3UjkGnPLkbWLyR23iU9OwPeNNWGIK3r6+tGzZMt/y3L59GyNjE1TL1sRioj2KKp8vOEkkEjr+OpPWlqOxs7Nj2bJl+ZZLEITiq3///jRr3pxrTuvRGbCARp17f7Ztoy59KFWxJrNmzSrAhIIgCEJxd+HCBTp30SFNTo3uMw5TqmItIp9+eCAjkchRvmYzqjfTxXWdLdrttAgM8KdixYoyTv15jx8/pr12B5xd3TEbu5ke0w9So7kev/XtJ6YtEAQZE0UyQfiKJk2a4B8YSLycwj+FsqTsbQlp6Qx19CE0PglvX19at26dbznu37+PoZExypqVsZy0E6USX5/0XiKRoN3TjrbdJjJr1iwWLlyYb/kEQSie5OTkWLZ0Ka8eXeH53cAvtpVXUKRtt8m4uroSFBRUQAkFQRCE4uzs2bMYGRmjWr4O3aYdRK3Uh+JXU71+WE60x3bdZZoZDODV/XNYW1nh7uaKhoaGjFN/nqOjI1pt2xEZk0LP2cep08YUiZwcRsNWIaesQY8ePUlKSvr6gQRByBeiSCYIudCoUSP8AwNJUlCi/0lPIhKSSExLZ6ijN09iE/D09kZLSyvfzv/48WMMDI2QUy2L5eTdKKtp5npfiURCu24TaN/TjgULFjBnzhwxOaggCN/E0tKSzl10uHJqNdKsrC+2rdvWggo1mzJj5kzxWSMIgiD8sKtXr5KcnEQr81Eoq/5X/JLIyVGzhT73A4/g5zCL4cOHcezYUVRUVGSY9vOysrKYP38+3bt3p0I9bXrOPp5j+gJlNU1Mxmzm0eMnjBs3ToZJBeHnJopkgpBLDRo0ICAoiFTlEvQ/6cFQRx8excTj4eVF+/bt8+28ISEh6BsYkqlQEsvJDqiof9/ko1pWY+jYZyZ//fUX06ZNE19eBUHINYlEwvJlS3n7/AHBV1y+3FZOjnY9f+fC+fM4OzsXUEJBEAShuBo7dizGJib42E/hbdi97NelWVmcP7qMiydWMnfuXLZt24a8vLwMk37e+/fvsba2ZvHixbTvaYfpmE1IJB9/FU+MiUBOXpHnL1/JIKUgCCCKZILwTerVq0dAUBCZqurcj47Fzd2DDh065Nv5wsLC0DcwJFWqhNUUB1Q1yv7Q8VqZDaNLv3msXr2aSZMmiUKZIAi51rlzZ6ysrbl6Zh2ZGWlfbFu9aReqNe7AzFmzyMzMLKCEgiAIQnGkpKTEqZMnad6sMW7rhxET8YzMjHT8HGZyy3MnGzZsYNGiRUgkEgAyMjIICgoiMTFRxsk/uHPnDlpt2+EXeI6aLY14G3aXfTP02D1Fm6gXDzpex7gAAFxpSURBVIEPBb9rzltwXT8cfb3OHD96RMapBeHnJYpkgvCN6tSpw70HD3jx8iWdO3fOt/O8fPkSfX1DElLBym4vaqUq5MlxmxvZoDdoERs3bmTMmDFkfWXolCAIwr+WLV1K7NsXPAg69sV278LukZGWyv1794iOji6gdIIgCEJxpa6ujpurK1UqlsN1nS0em8cQcsWZgwcPMmHChOx26enp9O3bD11dXaysu8p8bq/Dhw+jrd2BhAwlDIet4tlNb55e9yQxJoKsjHQUlEqQlhyP59bxXD69lrlz5+Li7Ezp0t83ckQQhB8nimSC8B3U1dXz9Y/Xmzdv0Nc34H1iKta/70W9dKU8PX4Tvb4YDFnKjh07GD58uOjpIQhCrjRr1oyBAwdxw2Uz6akff/GIiXiG1/ZJHF/cEw35ZE6dOkX58uVlkFQQBEEobsqVK8f/tXfXcVHk/x/AX4u0CBgIooCAioWFBRYlYefZiSgeNmdg15kc2Kh3it1n00gZWNiFYiICegZgkDu/P/zJ9zhCUJYlXs/HYx8Pd+Yzs6/5yM7svnfmM0FBAVCWE/Dm8VV4nz6NQYMGZc1PTU1Fn779cPzECbTqORkRERfRs1cvpKSkFHvWjIwMuLi4YNCgQajVxAo9Zx6EbuNOsHPeBIWK6oAgwLClPcSZ6Ti2rB9eR1/CyZMnsXjxYsjI8Cs6kTTJSjsAEWX3+vVrWFha4c2HT+j+2x5UqlpTIq9Tv30/yMjKYcf2GUhLS8eOHV6QleUugYjyt3jxIhw4eAC3AnfApNuvAL6OoXL11AY8OHsYWjVqYNu2bRg+fDj3KUREVKR0dXXx4P49pKamZvsR5suXL+jdpw/OnAmBrbMn9Iw7QauOCfzWO6Jv3744duwY5OXliy1n3379cfrUKbQbOAfGViOyLgXVb94ZFw6tQOqnD9CobYxjy/tBv7YeToReQb169YotHxHljWVqohLk7du3sLSyQmz8P+g6bRdUNXQl+nr12vaEtaMH9u/fj8GDhyA9PV2ir0dEpV/t2rUx3skJtwL+QuLr57j4txv2z7HGq1sBWLVqJR5HP8Lo0aNZICMiIolQVVXNViD7/PkzunXvjuCQUNhP3AI9404AgFoNTGHzqycCAoMwYMDAYv2c+/TpU6hV10WDDr9kFci+6TF9D+q27YmIwyvQrYs9rly+xAIZUQnCIhlRCfH+/XtYWXfGsxdx6DZtF9Q1axfL69Zp1QU249fj6LFj6P/LAKSl5T8gNxHR3LlzUUEkYP+czngQuhszp/+GZ0+fwMXFBYqKitKOR0RE5URycjJs7exx7nwEukz6C7UaZh8vWLdxB5j2d8Xx48fg7Z3/3ZmL0v59e5Ga9BphO2dnu1FWysf3CN85G48vn8KqVatw+PAhqKioFFsuIvo+FsmISoDExER0trHBo8fP0HXaTlTWrlOsr6/fvDNsf90Ib28f9O7TRypjNxBR6aGhoYGDBw9g5syZePLkMZYsWQI1NTVpxyIionLk2+fnq5HXoWnYAidWD0VGWvbPsO9fReO6zybUq1dfonek/69GjRphxw4vPLrsjVuBXgCAf17cw9Hf+yA57j78/f0xffr0HGeZEZH0sUhGJGXJycmws7fH3fsP0WWqF6rWMpJKDr0mFrCb4InAwCB079EDX758kUoOIiodunbtiuXLl0NLq2hvLEJERPQ9X6/AsMbN2/fQZfI2xNw9BwC4F34AmRlfL6t8G/MAp9yGQrdmdYSHhxb78ap///6YPn06Lh5ZhYt/u+H4ygHQr1Ud1yKvwtraulizEFHBsUhGJEWfPn1Cl67dcOPmHXSd4gUN3UZSzaPTqAPsJv6J8PBzsO/SFZ8+fZJqHiIiIiKif/vnn39gbmGJ+1GP0d1lNzLS/zdUyPkDv2OrU0O8eX4Xp/4YBkN9HYSFhkJTU1MqWZctW4ZO5ua47rsFgwYMwIXz51C7dm2pZCGigmGRjEhKvnz5gu49euLylauwn/QXqus3kXYkAF8HObWfvA0XL12GrZ09kpOTpR2JiIiIiAgJCQno2Mkcj5+9RLff9qCabkM8vR6QrY12vdY47T4cDevXQWhIMKpVqyaltICsrCxOnTyBmzdvYscOLygpKUktCxEVDItkRFKQmpqKXr1749z587Cf9Ce06rSQdqRstOu1QpcpXoi8fhPWnTsjKSlJ2pGIyoSMjAz8+uuvMDVrh5iYGGnHISIiKjVevXqFjh3NERP3Bt1/24OqNb/eEbKZ7Rg0txuLXjP3o+f0fXgbcw/NmjRC8JkzqFy5spRTAxUrVkSTJk04/hhRKcEiGVExS0tLQ99+/RASEga7CVugXa+1tCPlSsuwOZraOuLypUuIjIyUdhyiUu/Lly/o07cftmzZijv3H6FzZ1u8fftW2rGIiIhKBevONoh/m4juv+1F5RqGWdNVqtRA237TIc7MhO/6MWjdygSBAQFQVVWVYloiKq1YJCMqRhkZGRg0aDD8/QNg8+tG1GpgJu1IeXp24wyunFiLX34ZgA4dOkg7DlGp9uHDB9jY2sHP3x92EzzR/be9iIlLgH2XLhz7j4iIqADk5eUhFmfmOu/lvfPwXT8G7duZws/XByoqKsWcjojKChbJiIpJZmYmhg4dhuMnTqDzuHXQbdxR2pHy9OJOOAK3TELPHj2wZ89uyMrKSjsSUakVFxeHDh07IvL6TXSbuhN6TSygrqUP+0l/4dbtu+jTty/S09OlHZOIiKhE8/fzhbZGFXi7D0fSm/8NWfD8Vih814+DlaUFvE+fhrKyshRTElFpJ5Ei2bNnz+Dg4AB9fX0oKSnB0NAQCxYsQFpaWr7LmZubQyQSZXs4OTlJIiJRsRKLxRg1ajQOHz6MzmPXoHYzK2lHytPL+xcQsMkZtrY2OHBgP+Tk5KQdiajUevz4MczM2uN57Bv0mL4/2/iD1Wsbw+bXTQgODsHIkaMgFoulmJSIiKhk09TUREjIGVRVU8Zp9+FIfvsKT68Hwn/Tr7C3t8PxY8egqKgo7ZhEVMpJpEj24MEDiMVibNmyBXfv3oWHhwc2b96M2bNnf3dZR0dHxMXFZT1WrVoliYhExUYsFmPs2LHYs3cPLMe4wcDEVtqR8vTq4WX4bRgHc/OO+PvIEcjLy0s7ElGh/P777zAzM4OysjLU1dVzbfPixQt07doVysrKqF69OqZPn46MjIwiz3L9+nW0NTVDUqqAnjMPoErNujna1GpgBksHN+zfvw9Tp06FIAhFnoOIiKis0NbWRmhIMNSUZHFi5UAEbJ6E3r164e8jh6GgoCDteERUBkjkGio7OzvY2dllPTcwMEBUVBQ8PT3h5uaW77LKysrQ0tKSRCyiYicIAiZMmIDt27fDYvQq1G3dTdqR8hQffQ2+6xzRzswMJ44f5wcNKpXS0tLQv39/mJqaYtu2bTnmZ2ZmomvXrtDS0sKFCxcQFxeH4cOHQ05ODsuWLSuyHKGhoejWvQdUquuj28StUKpUNc+2hi3t8SX5HdatWwgtLS24uroWWQ4iIqKyRldXF6GhwejX/xc0b9YFmzdv5tAgRFRkim1vkpiYiCpVqny33d69e7Fnzx5oaWmhe/fumDdvXr7XlaempiI1NTXreVJSUpHkJfpZgiBg6tSp8PT0hPmIZTAy7SXtSHl6/fQWfNeNQauWJjh96iSUlJSkHYnohyxatAgAsGPHjlznBwQE4N69ewgKCoKmpiaaNWuGJUuWYObMmVi4cGGRnD159OhRDBw0CFp1W8F2/EbIKVb87jKNLYYg5eM7zJ49G40aNUKPHj1+OgcREVFZpa+vj8irV6Qdg4jKoGIZuD86Ohrr16/HuHHj8m03ePBg7NmzByEhIXB1dcXu3bsxdOjQfJdZvnw51NTUsh46OjpFGZ3ohwiCgJkzZ2Lt2rXoMGQhGnToL+1IeXrz4i6814xC0yaN4OvjjYoVv/+Fnqi0ioiIgLGxMTQ1NbOm2draIikpCXfv3s1zudTUVCQlJWV75ObPP/9E//79ode0M+wmbIHvhvE4u2/xd3OJxZn4kvQOwNez3YiIiIiIqPgVqkg2a9asHAPr//fx4MGDbMvExsbCzs4O/fv3h6OjY77rHzt2LGxtbWFsbIwhQ4Zg165dOHbsGB4/fpznMq6urkhMTMx6xMTE5NmWqLjMnz8fq1evRruBc9DYYshPrSsjLQWPr/rCb5MzQnbMzvPW1z/i7cso+HiMQqMG9eDv54dKlSoV2bqJSqL4+PhsBTIAWc/j4+PzXO57P8gIgoDff/8dY8eORUPzIbB2dMfrp7cQ+yACUeePIjMj7xvXZKan4syf03AvbB+2bt2K3r17/8QWEhERERHRjypUkczFxQX379/P92FgYJDV/tWrV7CwsICZmRm2bt1a6HBt2rQB8PVMtLwoKChAVVU124NImpYuXYqlS5eibb8ZaGI98ofWkZmRjue3w3Bm23TsmNYWAZsn4fmtEDy7eabIcr5/FQ1v9xGoY6CHwIAAqKmpFdm6iYrSj/xAU9Ty+0FGLBZj8uTJmDt3Llr1nIz2g+ZBJCODB+eOQE6xItJTPyHuUWSu601L+Qjf9ePw4tYZHDly5Ls/JpVHDx48QLv2HTB5yhTe2ICIiEql7du3w7hJUwQGBko7ChF9R6HGJNPQ0ICGhkaB2sbGxsLCwgImJibw8vKCjEzhr+y8ceMGAKBGjRqFXpZIGlavXo158+ahda8paG5XuC+7gliMuOhIPLp0Ck8i/ZDy8T3UtQzQ1MYBdVp3he+6sdA2agMZmQo/nfND/FOcdh8OPR0tnAkKROXKlX96nUSS4uLigpEjR+bb5t8/0ORHS0sLly9fzjYtISEha15eFBQUcr2ZRVpaGoYMGYqDBw+g49BFaGQ++Ov0lI94fNUXze0dcSdkH3zXj4WBiR3qt+sHbaPWEIlE+JL8Fr7rHPHxzTP4+/nB3Ny8QNtQnhw/fhxDhw2HjLwKLpw/hzqGhpg4caK0YxERERXY1q1bMW7cOKhU1kL37j3g4+MNS0tLacciojxIZOD+2NhYmJubQ09PD25ubnjz5k3WvG9fQmJjY2FlZYVdu3ahdevWePz4Mfbt24cuXbqgatWquHXrFqZOnYqOHTuiSZMmkohJVKTWrl2LGTNmwKSbM0y6ORdoGUEQ8M+Le3h0+RSiL3vj0/t4qFSpgfrt+qJOm26optMQIpEIb2MfIvH1c7QbNO+ncya9eYHT7sOhXb0qQoKDUa1atZ9eJ5EkFeYHmu8xNTXF77//jtevX6N69eoAgMDAQKiqqqJhw4aFXl//Xwbg3Llz6DxuLQxb2mdNj77sg4z0FBiZ9YWGXmOE7Z6PhxHH8TDiOJy2RiH53Sv4rBkFmYxPOBsehubNmxfJ9pUVmZmZmD9/PpYtWwaDFjawGL0SV06sw9Rp09CsWTN06NBB2hGJiIi+a9OmTXB2doax1XC07Tsd/hvHo2vXbvDz80WnTp2kHY+IciGRIllgYCCio6MRHR2NWrVqZZv37VKJ9PR0REVF4fPnzwAAeXl5BAUFYc2aNfj06RN0dHTQt29fzJ07VxIRiYqUp6cnpkyZgmZ2jmjVc/J327+Pf4LoS6fx6PJpJCY8haJKZRi2tEed1t1Qo44JRP858/LpNX/IKVZErfptfypn8ttYnHYfjuqVVRAaGpxVJCAqK168eIF3797hxYsXyMzMzDojuU6dOlBRUYGNjQ0aNmyIYcOGYdWqVYiPj8fcuXPh7Oyc65li+RHJVEDExcuwn/QXajUwzTbvwfkjqCArj6PL+uFz4hvIKVRE7WbW0DXuiHdx0fBdMxpV1SoiMPw86tSpU1SbXya8e/cOgwYPRmBAANr0+Q3N7cdCJBKhbd/pePviLvr264/r1yJRs2ZNaUclIiLK07p16zB58mQ06TwKZr+4QiQSwdbZE34bnNClS1f4+fnyRx+iEkgklLEBPpKSkqCmpobExESOT0bFYtu2bRgzZgyMrUeg3YA5EIlEubb7+C4Ojy6fRvTl0/jnxT3IKVaEfnMb1G3TDTXrm6KCrFyer3FoUXdUrlEHncd6/HDOj+/jcWr1EKgpV8C5s+E5CthEeSlN+9WRI0di586dOaaHhIRkXc74/PlzjB8/HqGhoahYsSJGjBiBFStWQFa24L8bJSUloXoNXXR32QkNvUY55gdvn4E3z+9Bt3EH6Bp3gladFqggK4+4R5Hw2zAWdQ304e/vy+EE/uPmzZvo1asPEv55BytHd+g0yv7l4XPSWxxd2gsN6uohPCys0IVNIiqZSstx5tmzZ1iyZAmCg4MRHx8PbW1tDB06FHPmzIG8vHxWu1u3bsHZ2RlXrlyBhoYGJk6ciBkzZhTqtUpLn1DuPDw8MG3aNDSzHYO2/WZk+36QnvoFfhvG4v2LOzhzJihrHG4ikqyC7lclciYZUXmxe/duODo6opH54HwLZAGbJ+PxVR9UkJWHXlMLtOg6HnrG5pCVV/zuayS9eYG3MQ9g0vXXH8756cNreLsPh4oCEBoSzAIZlVk7duzAjh078m2jp6cHHx+fn36tPnOOQF2zdq7zLEatzLE/eH4rBIFbJqFN69Y4dfIE1NXVfzpDWbJv3z44OIyBqqY++sw5ClUNnRxtlFWrorPTBpxcPRiTJ0/G5s2bpZCUiMqrBw8eQCwWY8uWLahTpw7u3LkDR0dHfPr0CW5ubgC+fgmzsbGBtbU1Nm/ejNu3b2P06NFQV1fH2LFjpbwFVBxWr16NGTNmoHkXJ7TpPS3H5wE5BSWYdJ+IE6uGwMfHh0UyohKGRTKiH3Tw4EGMHDkS9dv3R4fBC/IskAGAWJwB4Ovlxukpn5H68QPSvnwsUJHsybVAVJCVh27jjj+U83PSW3h7jIQ8UhEaEo7atWv/0HqIKLu8CmQAcuwPoi4cQ+gOV3Tr3h0H9u+DkpKShNOVHunp6Zg+fTrWrl2Lem17ouOwJZBTyLt/NA2aot2gBdiyZQ5at26N0aNHF2NaIirP7OzsYGdnl/XcwMAAUVFR8PT0zCqS7d27F2lpadi+fTvk5eXRqFEj3LhxA+7u7iySlQPLly/H7NmzYdLNGU1tHBB9+TSS375CczvHrOFUEp7chP/G8Wjdpg2mTZsm5cRE9F8skhH9gKNHj2LIkCGo27YnOg1bkmMMsf+y+3UjPr6Lw9MbQXhyLQDhexcibM98aNUxgUFzG+i36AzVarmf3fX0WgB0GneAnGLFQudM+fge3h4jIJOehNDwMBgaGhZ6HUT0c274b0PE4RUYPXo0tmzZUqjLOsu6hIQE9P9lAM6fP4/2g+ahseWwfH9w+KZhx1/w+tktOI0fD2NjY7Rq1aoY0hIR5ZSYmIgqVapkPY+IiEDHjh2zXX5pa2uLlStX4v3793neUTw1NRWpqalZz5OSkiQXuph9/PgRSUlJ0NbWlnYUiVqyZAnmz5+PpjYOuBmwHZGnN2bN02nYDhq1GyP+8XX4rnVA82bG8PP15aW0RCVQ/t/siSiHU6dOYcDAgTAwsYP5yOXfLZB9o1KlBowth6Hnb7sx4o8LsBixDApKlXDpqBv2zrLA4cU98Tnxn2zLfPrwGvFPrkO/uU2hc6Z+SoS3xyiIP79FSPAZGBkZFXodRJS3xITn+c4XBAERR1Yh4vAKzJo1C3/99RcLZP9y6dIlNG9hguu37qG7y04YWw0vUIHsG4MWtkhPS8PRo0clmJKIKG/R0dFYv349xo0blzUtPj4empqa2dp9ex4fH5/nupYvXw41NbWsh45OzkvOS6O4uDi0adMWtWrVwsGDB6UdRyIEQcD8+fMxf/58tO41BfXb9UVG2pes+RUra6GabkPEPboKnzWj0NKkOQL8/VkgIyqhWCQjKgQ/Pz/07dcPusYWsBi9GjIyFX5oPUqVqqB++37oMmkrRq65hM5j1+Bd7CM8vpp9nKRnN4IgEsmgdlPLQq0/7UsyfNY6IC0pDsFngtCoUc6BxYno55z8Yxg+fXid6zxxZgZCd7jiht+f8PDwwPLlywtVACrr/vzzT3To2BFQqo7ec45Cu17rAi8rCAJuBnjBd50jLCyt4OLiIsGkRFQezJo1CyKRKN/HgwcPsi0TGxsLOzs79O/fH46Ojj+dwdXVFYmJiVmPmJiYn16ntD19+hRmZu0RE/8WtZtbY8jQoUUyJmhJIggC5s6diyVLlqBNn99g0s0ZVWrWxXC389A2+jrWWJ1WXREXfRU+ax3Qtk1r+Pn6QEVFRcrJiSgv/EmbqIDOnDmDXr16o1bD9rAe65Hv3SgLQ15RBXVad8XDiyfw6PJpGFsNz5r35FoAtI3aQFFFvcDrS0/5BJ+1Y/Dpn2cIDQlG06ZNiyQnEWX36cNr+K51QPfpe6Gg/L9fgzPSUhC4dTJi7oRjz549GDJkiBRTliypqamYMGEC/vrrLzTsNAjtB86BIAiIj76GhKc38ebZbbx+dhvGVsNhbDksx/LpqZ8RtmsOHl06jd9++w3Lly/n2XlE9NNcXFwwcuTIfNsYGBhk/fvVq1ewsLCAmZkZtm7dmq2dlpYWEhISsk379lxLSyvP9SsoKJSpO/bev38fVtad8SVTFj1m7IdKZS0EbJ6IPn36IiDAHx07/thYuyWNu7s7li1bBtP+M9HMdkzW9Irq1WFk1huvoi5BVUMHvmvHoH17M5w6eRLKyspSTExE38NPlkQFEB4ejm7dukOzXmt0HrcOFWTlv79QIdVp0w1n/nRB0j8voVqtFlI/JeJV1CW0GzCnwOtIT/0C3/VjkRT/CGeCAtGiRYsiz0lEXwniTKQmxcN/gxO6TNkOWXlFpH5KhN9GJ7x7cRenTp6Evb19vuu4cuUKNm7chOnTfyvzZ3y+fPkSvfv0wY0bN2E+chm06pjg6LL+eBv7EII4ExVk5VG5hiESE57hXezDHMsnvn6OAE9nfH77EocOHUL//v2lsBVEVBZpaGhAQ0OjQG1jY2NhYWEBExMTeHl5QeY/w26Ymppizpw5SE9Ph5zc1x9UAwMDYWRklOd4ZGVNZGQkOtvYooJyVfSY5oWK6tUBANZj18Bv/Vh06doNYaEhMDExkXLSn5fw+jVkZCqgco2c4/7Wb9cXKpW14LfRCZ06dsDJEyd44x6iUoCXWxJ9x4ULF2Bv3wUaBs1hO34jKshJ5lc+/aZWkJVXRPRlbwDAs1shEGdmoHZz6wItn5GeCv+N4/E+5i78/Xx5O2miYvD3kcN4++IOgrZOwcd3cTjpNgSfXj9GcPCZ7xbILly4AAtLK+zeswcWllaIiooqptTFLywsDM1bmODhk5foOWM/GrTvj9RPifiQ8BSCOBPa9Vpj4FJ/NOg4ACKRDJrZjMm2/PPbYTj6ex+oymfi0qWLLJARkVTExsbC3Nwcurq6cHNzw5s3bxAfH59trLHBgwdDXl4eDg4OuHv3Lg4ePIi1a9eWm7sYhoeHo5O5BeTVa6Gby56sAhkAyMopwPbXTVBQ1US//r9IMWXR+X3pUnTr3h0BnhPw4s7ZbPNe3DkLvw1OsLQwx6mTJ1kgIyolWCQjyseVK1dga2ePyjoNYeu8GbLyihJ7LTnFiqjd1ArRl08D+HpXS02DZlCpnPep+d9kpqciYJMz3jy5Bm/v02jXrp3EchLR/7Ru3RpHj/6NF3fCsH9OZ8iLP+HC+XMwNTXNd7mwsDB07mwD9ZoNMGhpAMRyqjC3sMTjx4+LKXnxEAQBa9asgZWVFRSr6qP3nGOort8EAKBl2BxDV4bBpPsEvHl+F9d9NuOG/18wbGkPNU29r8uLxbh6agN81znC0rwDIq9eQePGjaW5SURUjgUGBiI6OhpnzpxBrVq1UKNGjazHN2pqaggICMDTp09hYmICFxcXzJ8/H2PHjpVi8uLh6+sLGxtbqNdqhIQnN7Fjas7xJh9H+uF9/BN07NBBCgmLnpycHA4fOghbWxv4bxyPl/fOAwCe3wqF/8bx6NzZCieOH4eiouS+QxBR0WKRjCgP169fh3VnG1TSrAO7iVshpyD5X3/qtO6Gty8f4PWz24i5exb6zTt/d5nMjDQEbpmMuIcXcfLkCZibm0s8JxH9j729Pfbv24d+ffsg4sJ5NGzYMN/2QUFBsLOzR9XazWA/6S+oauig27SdSIMCzC0s8fx5/nfNLC0+f/6MIUOGYurUqWhsNRJdp+6AsmrVbG2UKlVB656Tod+iMx5cOIrkf16imf3XAbBTPyfD39MZV06sxfz583Hq5Emoq6tLYUuIiL4aOXIkBEHI9fFvTZo0wdmzZ5GSkoKXL19i5syZUkpcfA4dOoQePXpAu0E7tO7zvxuq3As/iPSUTwCAW0E7EOI1C45jxmD79m3Silrk5OXl8feRI7C2toTfhnG4cnI9AjydYW9vh2NHj5apseaIygMWyYhycfv2bVhZd4ZSVV3YT/oL8orFcwca3cYdIK+sitCds5GRlgL9Fjb5thdnZuDMn9MQczccx44eRefO3y+qEVHR69+/P/bv3wddXd182/n4+KBrt27QrNsadhO3ZBXfldU00HXaLnxMBTqZW+Dly5fFEVtinjx5gjZtTfH30WOwHusBs19mQaZC3sOgVtdvAnFGOnQatYeGbiO8e/UIx5f3xT+Pr+DUqVNYuHBhjnF/iIioZNi2bRsGDRoE/ZZd0NlpPR7//9AhABC2ay58Nzjh6qn1OH/gd0yfPh2bN29GhQo/dof4kkpBQQHHjh5Fp04dcfXkOvTo3h1/HzkMefmiH8eYiCSLnziJ/uP+/fuwsLSCXCUtdJ28HQrKlYrttSvIKcCghS3exjxAlZpGUNesnWdbsTgTwdun49nNMzhy+DC6dOlSbDmJqPBOnDiBnr16oWbDDrD9dSNk/zO+oUplLXRz2YX3H1Nhbm6BuLg4KSX9ecuWLcO9u3fRY+Z+1G3d7bvta9U3hV4Tc7To+iseX/XFsWX9oKmujMirV9Ct2/eXJyIi6XB3d8eYMWPQoNMgWI1ejQqycoh7dBUAICuvhKo6DSCnoIIrJ9Zh2bJlWLlyJUQikZRTF96nT5+QlJSUbxtFRUWcPHEC586dw4ED+7Nu3EBEpQuLZET/8vDhQ5hbWEKkVAVdpmyHQkW1Ys9Qt83XL4T6LfI+K0wQixG6wxVPrvriwP796NmzZ3HFI6IfcPjwYfTt1w96Ta2/3iE3jxuAVKpaE91dduP1+4+wsLTC69evizlp0Rg2bBhEIuDRxVMFal9Zuw7sJmzGi1uhCNg8CT27d8PlyxdRt25dCSclIqIfNX/+fLi4uKB5Fyd0GLwAov8/49d6rAe6TtmGER4XUV2vMZ7dDMKGDRvg6upaKgtkDx8+RIOGjVCjhjauXLmSb1slJSW0a9eOBTKiUoxFMqL/9+TJE5hbWCKzggq6Tt0JpUpVpJJD26gNBizyQTOb0bnOF8RihO2eh4cRx7Fr1y7069evmBMSUWHs3bsXAwcOhEHLLrB2dEcF2fw/OKtq6KLbtF2Ijf8HllZWSE1NLaakRadTp074448/cDNgW9Yde/OT8vE9fNY44GbAX1i9ejUOHjwAFZXiucydiKi8unPnDrp374GTJ08WellBELB06VLUatgObfu4ZCt+qWvWRs36bRHqNQsPI45i165dcHZ2LsroxSYyMhJm7drjU4YsVGvUhZ19lzJ9N2oiYpGMCMD/39LbwhIpYjl0nbYzx+DSxUlGpgKq1KwLeaWcl3kKgoCz+xfjwbnD8PLywuDBg6WQkIgKysvLC8OGDUO9tr1gOXpVvuNy/ZtKlRpQ0zJAdHQ00tPTJZxSMiZNmoRBgwYjbKcr3r7M+wvFm+d3cXRpb3yMf4CAgAD89ttvpfJMAyKi0uTatWvo2Mkc/oFn0LdfP/j4+BRqeZFIhKVLl+LlvfO4FbQj27z01C/w2zgeL26dwZEjRzBs2LAiTF58goOD0amTOeTUaqLnjP2wn/gnRIqVYd3ZBrGxsdKOR0QSwiIZEb7eyTLmxXPU7zAAFdWrSztOrgRBwIVDy3E3ZC+2bNmCESNGSDsSEeVjy5YtGD16NBp0HIBOI5bBZ60jLhxa8d3l0lM/w3e9I949vw0fb+9Se0aVSCTCX3/9CaN6dRHg6YzUzznHcom6cAwnVg6Aga4Wrl+LhJWVlRSSEhGVLxcvXoS5hSXk1Wti8PJg6DTuhN59+uDMmTOFWo+rqyumT5+O8wd+x4NzRwB8vTOx71oH/PM4Ej7e3ujVq5cEtkDyjhw5Ajt7e1TVb45uU3dAUaUyFFXU0WXyNiR9zkDnzrY57mpKRGUDi2REALp06QIHBwdcPLIK98IPSTtODoIg4NLfbrgV6IUNGzbA0dFR2pGIKB/r1q2Dk5MTjK2Go+PQxXjz/A5i7p7FvbADyEjP+/LJtJSP8F07Bu9f3IG/vx8sLS2LMXXRU1ZWxvHjxyCkJuHMX9MgiMUAgMyMNJzdtxjB22dg8KBBOH/uLPT09KScloio7AsPD4e1dWdU0qyDblN3QFm1KjqPXQOtem3QvXsPnD17tsDrEolEWLlyJcaOHYvQXXNwL/wgvN2HITnhEYKCAmFtbS3BLZGczZs345dffkHt5jZoP3hBth95xJnpkJGVQ1JSEs96JiqjWCQjAiAjI4OtW7fCyckJYbvm4E7wHmlHyubKyXW47rcV7u7upXZMB6LyYvXq1Zg8eTKa2Y5Bu4FzIRKJ8OjyKcgqKCM99RNi7p7LdbnUz0nw8RiNpPiHCAoKRIcOHYo5uWQYGBjgwP59eHE7HFdOrcfnxDc47T4CD84ewKZNm+DltR1KSkrSjklEVOYFBQXB1tYOCpW1Ydx5dNbQHhXkFGA7fiOUq+pgwMBBhVqnSCTCpk2bMOCXAQjbNReZn94gPCwUpqamktgEiRIEAUuWLMH48eOhrmWAf2KisG+2NU66DYMgFuNtzAOcWDkQVVXkcO5cuLTjEpGEFGxwFKJyQEZGBhs3boSCggLWrFmEzIx0NLUZJe1YiDy9CZGnNmD58uWYOnWqtOMQUT6WLl2KefPmwaTbr2jVcwpEIhHE4kw8vuKDBu374cWdcAR4TkCtBu1Qt013GLbqggqyckj5+AE+a0Yh5UMsQoLPwMTERNqbUqTs7OywdOlSzJkzB/fD9qOSsjzCQkNhZmYm7WhEROXC6dOn0bdfP1Q3NMHLexfgv8kZTlujsu5I+ejyKbyLfQgHB4dCr7tChQrYtWsnunSxh5mZGQwNDYs6vsSJxWJMmjQJGzduhFYdE8RHR36dIRJBrXptxEVHwn+jE+rXqwN/P19Ur14yh2chop/HIhnRv4hEIri7u0NBQQErVy6DODMNze3HSS3PDf+/cPm4BxYuXIhZs2ZJLQcR5U8QBMyfPx9Lly5Fq56T0bL7hKx5r6Iu43PiG9Rp3Q2GLbsgZIcrXtwJw4s7YajV0AwiGRl4e4xC5qc3CAsNQdOmTaW4JZIza9YsvIyNxYsXMfhz6xbUqFFD2pGIiMqFv//+GwMHDYJuY3PUatQeL+9dAABscWoIG6d1+Jz4Bmf3LoSTkxM2btz4Q68hJydXagfoT0tLw/DhI3Do0EF0HLYYBi1sEHXhOCIOrwAEAVVq1YPPmtFo184MJ44fg6qqqrQjE5EEsUhG9B8ikQjLly+HvLw8lixZgsyM9GxfeIvLraCdiDi8Eq6urpg/f36xvz4RFYwgCJg5cyZWr16Ntn2no7n92Gzzoy+fBkQihO6cjfevoiErrwT9FjYwaGEDcWYGfNwdIJOehPCwUDRq1EhKWyF5MjIy2PSDX76IiOjH7Nu3D8OHD4eBiR0sRq/G6TWjUUFWHpkZaRDEmbjuswWvn93ClClT4O7uXi7H2Zo0aTL+PnoUncethWFLewBAM1uHr0UyADf9t6F3797Yu3cPFBUVpRmViIoBi2REuRCJRFi8eDEUFBQwd+5cZGakoXWvqcX2weFu6D6cP7AULi4u+P3338vlBxai0kAQBEyZMgXr1q1Du4Fz0MR6ZI428sqVoKCkCg09Y7TpPQ21GraHnIISPr6Ph7f7cMgjFaHhYTAyMir+DSAiojJr+/btGDNmDIxMe6PTyGWQkamAimoaUFavDv1m1khL+YQH5w7D1dW1XH/erFhRGWKxGLIKytmmm3RzRuTpjXB0HINNmzahQoUKUkpIRMVJJJSxe9cmJSVBTU0NiYmJPBWWioSbmxumT5+OprYOMO03U+IfIO6fO4zQHbMxceJErF27ttx+YKGSg/vVnL71yajRo+G1fTs6Dl2ERuaDc20rCALEmRmoICuXNS357St4uw9HRXkBoSHBpXL8FiKiosLjTE4/2yevX7+GpqYmDExsYTNuXdbYY4JYDAHAlRNrcM3bE0uWLMHcuXOLOH3pkpGRgd59+sI/IADdpu2Cpn5TRPy9Cjf9t2Hu3LlYvHgxP48TlQEF3a/yTDKi7/jtt98gLy+PyZMnQ5yRnnW3Okl4GHECYTvnYOzYsSyQEZUCO7y8YD5yGRq0759nG5FIlK1AlvTmBU67D4d6RXmEhYZAT0+vOKISEVE5oqamhnbtOyDyegT+ibkPDb3/v5xfJMLFQytwM3A73Nzc4OLiIt2gJYCsrCwOHTyAzja28FvviBpGbfEk0h/r1q3DxIkTpR2PiIqZjLQDEJUGkyZNgqenJ26f2YXwPQsgiMVF/hrRl70R7DUDI0aMgKenJwtkRKVA+8EL8i2Q/deH+Kc4uXoIqqkq4dzZcBbIiIhIIhQUFODjfRrGjerD22Mk3r6MgiAW4+zehbgZuB0bNmzIUSA7deoUNm3aBLEEPueWdEpKSjh96iQM9XXx4uYZ7Nu3jwUyonKKZ5IRFZCTkxPk5eUxZswYiDPT0Wn4UsjIFM3YBE+uBeDMXy4YPGgw/vrrL8jIsH5NVNKJZCrg0cXjqGfaE/KKKt9t/+7VI3i7j0CtGhoIPhPEuzsSEZFEqaqqwt/PD+YWlvD2GAmtuq3xJNIX27Ztw+jRo7O13bRpE5ydnQEADx8+hIeHR7n7wVZdXR2XL13Cx48foaGhIe04RCQl/CZOVAijR4/Grl278PDCUYRsnwlxZsZPr/PZzWAEbZ2Cvn37YscOLw4KSlRKCOJMfHr9BL7rHJGe+jnftm9jHuC02zDo1dJEWGgIC2RERFQsKleujDNBgdDV1sSz6/7YvXt3jgLZ8uXL4ezsjCbWI9Fh8AKsXbsWixcvllJi6VJSUmKBjKic45lkRIU0dOhQyMnJYciQIRBnZsDSYXW28YYK48WdswjcPBHdu3XD3r17ICvLtyRRaXLs2FH07NkLfhucYD9xK2Tlc94a/s3zu/D2GAmjugYICgxA1apVpZCUiIjKq2rVquHy5YtISkqCtrZ21nRBEDBr1iysWrUKLXtMQsvuEyASiZCW8hELFy6Euro6Jk+eLMXkRETFj2eSEf2AAQMG4PDhw3h2IwBBWycjMyOt0Ot4eT8CAZvGw6azNQ4ePAA5uR8rtBGR9LRq1Qq+vj54++wm/Df9ioz01GzzE57cgLf7cDRuWA8hwWdYICMiIqlQUVHJViATi8UYP348Vq1aBbMBs9Gqx8Ssyyub249DMztHTJkyBT4+PtKKTEQkFSySEf2g3r1749jRo4i5E4YAzwk5vhzn59XDK/DfMA6dOnXE33//DXl5eQkmJSJJ6tChA06fPoWER1cQuHliVtE87tFVeHuMQvNmTRAUGAh1dXXpBiUiIgKQnp6OoUOHYeuff0JeqRIuHFwGQRD+10AQkJmWktWWiKg8YZGM6Cd069YNp06eRNyDCPhvcELG/3+gyE/84+vwW+8IU9O2OHniBBQVc16eRUSli6WlJU6cOI7Y++cRtHUqYu6eg89aB7Rt0wr+fr5QVVWVdkQiIiKkpKSgd58+OHT4MDoMWYS0L8kAgKfXAiAWZ0KcmYGQHbNwJ2QPPD090bNnTyknJiIqXhwAiegn2drawsfHG926dYfvOkfYTdwCOQXlXNu+fnYbvmsd0NKkBbxPn4KSklIxpyUiSbG1tcXfR46gT58+eHItANadO+PE8eNQVs59f0BERFSckpOT0b1HT1yIiIDdhM14+zIqa56/5wRo6DVCparaeH4zGHv27MHgwYOlmJaISDokdiZZ7dq1IRKJsj1WrFiR7zIpKSlwdnZG1apVoaKigr59+yIhIUFSEYmKjKWlJfz9/fA+5g58145BWsrHHG3+eXEP3h4j0cS4IXx9vFGxYkUpJCUiSerevTtOnTqFxYsX49TJkyyQERFRifDu3TtYWlnh0uWr6DrFC7qNO+L5zeBsbdJSPuHl3XAcO3aMBTIiKrdEQrYL0ItO7dq14eDgAEdHx6xplSpVyrcwMH78eHh7e2PHjh1QU1PDhAkTICMjg/Pnzxf4dZOSkqCmpobExERe3kLFLiIiAja2dqikaQj7SdugoFwJAPA29iFOuw2FUV0DBJ8J4thEVKpwv5oT+4SIqOhwn5pTUfZJfHw8rKys8exlHLpM3g4NvUYAgLcxDxD36Cpq1GuFs3vmIynuIU6dOgkLC4ui2AQiohKloPtViV5uWalSJWhpaRWobWJiIrZt24Z9+/bB0tISAODl5YUGDRrg4sWLaNu2ba7LpaamIjX1fwOmJyUl/Xxwoh9kamqK4DNBsO5sA2+PEegyeTu+JL+Ft/sIGOrrIigwgAUyIiIiIio2J06cwL17d2Ezfn1WgQwAqurUh5KaBnzWjEJ6cgKCg8+gdevWUkxKRCR9Eh24f8WKFahatSqaN2+O1atXIyMjI8+2kZGRSE9Ph7W1dda0+vXrQ1dXFxEREXkut3z5cqipqWU9dHR0inQbiAqrVatWCAsNQVriK5z+YxhOuw+Hbk1NBJ8JQpUqVaQdj4iIiIjKkQEDBqBJk2a4sH8xEl8/z5qe/PYVTq0eBFHqe5wND2OBjIgIEiySTZo0CQcOHEBISAjGjRuHZcuWYcaMGXm2j4+Ph7y8fI6zbDQ1NREfH5/ncq6urkhMTMx6xMTEFNUmEP2wZs2aITwsFELKO2hrVEFI8BlUq1ZN2rGIiIiIqJxRV1dHYKA/tDQqw9tjJD6+j8eH+Kc4uWoQVOQFXDh/Do0bN5Z2TCKiEqFQl1vOmjULK1euzLfN/fv3Ub9+fUybNi1rWpMmTSAvL49x48Zh+fLlUFBQ+LG0uVBQUCjS9REVlcaNGyM+7hVEIhFkZCR60iYRERERUZ6qV6+O4DNBMGvXHt7uI5D6OQm1amjgTFAgatasKe14REQlRqGKZC4uLhg5cmS+bQwMDHKd3qZNG2RkZODZs2cwMjLKMV9LSwtpaWn48OFDtrPJEhISCjyuGVFJU6FCBWlHICIqce7cuYOKFStCX19f2lGIiMoNHR0dnAkKhLmFJWob1oafnw80NDSkHYuIqEQpVJFMQ0Pjh3ekN27cgIyMDKpXr57rfBMTE8jJyeHMmTPo27cvACAqKgovXryAqanpD70mERERlSwHDhzA0KFDoaRcEcFngtCqVStpRyIiKjfq1auHV7EvpR2DiKjEksg1YBEREVizZg1u3ryJJ0+eYO/evZg6dSqGDh2KypUrAwBiY2NRv359XL58GQCgpqYGBwcHTJs2DSEhIYiMjMSoUaNgamqa550tiYiIqPTYu3cvhgwZAsNW3VBJ0xDWnW1w7do1acciIiIiIgJQyDPJCkpBQQEHDhzAwoULkZqaCn19fUydOjXbOGXp6emIiorC58+fs6Z5eHhARkYGffv2RWpqKmxtbbFp0yZJRCQiIqJitHPnTowaNQpG7fqi0/ClSE/5DJ81o2BpZY3QkGA0a9ZM2hGJiIiIqJyTyJlkLVq0wMWLF/Hhwwd8+fIF9+7dg6ura7YB9mvXrg1BEGBubp41TVFRERs3bsS7d+/w6dMnHD16lOORERERlXLbt2/HqFGjUL/DLzAf/jtkZCpAQbkSukzZDkX1mrC0ssbt27elHZOIqFQQBAGurq6ooV0Lvr6+0o5TbIKCgtDJ3AJ+fn7SjkJEZRhvuUdEREQSs3XrVjg4OKBhp0HoNHQxRP+626+Csiq6TPGCXCUtWFha4e7du1JMSkRU8gmCgJkzZ2LFihVIr6CCXr17l4ui0aFDh2DfpQsuXb2OPn364uLFi9KORERlFItkREREJBGenp4YN24cGlsOQ4chC7MVyL5RVFFH16lekFGqCnMLS9y/f18KSYmISj5BEDBjxgysXr0a7QbOQd+5R1GzQXv07NUL/v7+0o4nMZ6enhg4cCAMTLpg6IoQVNZpCPsuXXHv3j1pRyOiMohFMiIiIipy69evx6+//gpj6xFoP2geRCJRnm0VVSqj67QdgII6zC0sERUVVXxBiYhKAUEQMH36dLi5uaHdwLloYj0SFWTl0dlpHWo2aIcePXuWuUKZIAhYuHDh12OJ1QhYjl4FeaVKsJuwBbIqGuhsY4sXL15IOyYRlTEskhEREVGR8vDwwKRJk9DUxgHtBszJt0D2jVKlqug6bRcyZSvB3MISKSkpxZCUiKjkEwQBv/32G/744w+Y/eIKY6vhWfMqyMqj87h1qFRND6NGj5FiyqKVmZkJZ2dnLFq0CG36uMBswOyss5EVlFXRZfJ2fEoDevTsJd2gRFTmsEhGRERlwrNnz+Dg4AB9fX0oKSnB0NAQCxYsQFpaWrZ2t27dQocOHaCoqAgdHR2sWrVKSonLJjc3N0ybNg3N7cbCtP/MAhXIsggCRCIRMjMzJReQiKgUEQQBLi4ucHd3R416rXHh0HIkPLmRrc2d0H14G/sQw4YOlk7IIpaamopBgwZj85Yt6DT8dxhbDUfy29hsbT7EP0HKxw9QU1OTUkoiKqtYJCMiojLhwYMHEIvF2LJlC+7evQsPDw9s3rwZs2fPzmqTlJQEGxsb6OnpITIyEqtXr8bChQuxdetWKSYvO1asWIHp06ejRdfxaNP3t0IVyD59eI1TfwxFhYxkhIeFQlFRUYJJiYhKB1dXV3h4eKDD4AVIT/kIALh6agPio69BEATcDPDChYPLMGPGDKxYsULKaX9ecnIyunTtiqPHjsOghR0eX/XB9sktcXhRd3x8FwcAeBLpD5+1Dmjfri28T5+ScmIiKmtkpR2AiIioKNjZ2cHOzi7ruYGBAaKiouDp6Qk3NzcAwN69e5GWlobt27dDXl4ejRo1wo0bN+Du7o6xY8dKK3qZsHTpUsybNw8tu09Eyx4TswpkSW9e4HGkP2LunIXZL7NQTbdhjmU/vo+Ht/twKIjSEBYehrp16xZ3fCKiEsnT0xMdhiyEbuMOOLtvEQAg5k44Yu6Ew7TfTEQcWYmZM2di+fLlhTtztwR68+YN7Lt0wZ27D9CyxyRcOvr12C0SyUBGSR5KqlVxL+wAwvcuQP9+/bF79y7Iy8tLOTURlTUskhERUZmVmJiIKlWqZD2PiIhAx44ds32otrW1xcqVK/H+/XtUrlw51/WkpqYiNTU163lSUpLkQpcy3wZWXrx4MVr1nIyW3Sfg/atoPLnmjyeR/vgn5j5EMhUgiDMRc/dsjiLZx3dxOO0+HMqymQgLDYOhoaGUtoSIqOTpMGQhGlsMwcOLJ7KmVTdoigqyCog4shKzZs3CsmXLSn2B7Pnz5+jc2RavXr9F9+l7oF69NuQUlXHxbzdkpH6GkWkv3PD7C5ePe8DZ2Rnr1q2DTC53TCYi+lkskhERUZkUHR2N9evXZ51FBgDx8fHQ19fP1k5TUzNrXl5FsuXLl2PRokWSC1tKCYKAefPm4ffff0ebPi5o0cUJz2+FwGfd17PytI3awMZpHVI/JyNs1xwYtuySbfnkt69w+o9hqKQoQlhoWI7/GyKi8uxbgQwA6rbuDhkZWWjVaYHoK96IOLwSrq6u+P3330t9gSwhIQGmZu3wJUMGPWccgJqmHgDA2HIY7p89hLcxD/A58R/cDNiORYsWYd68/O+YTET0M1h+JyKiEm3WrFkQiUT5Ph48eJBtmdjYWNjZ2aF///5wdHT86Qyurq5ITEzMesTExPz0Oks7QRCyvqC17TcDLbo4AQBUq+tBr4k5ZCrIIe7hFbyPe4znt0JQ3aApVDV0spZP+uclTrkNgZpyBYSHhbJARkT0H98KZAAgkpFBndZd8ejyaUQcXonZs2eXiQIZAHz48AEJ8XGoYWQK1eq62eZ1nbwNuo07IvryKXh6emL+/PllYpuJqOTimWRERFSiubi4YOTIkfm2MTAwyPr3q1evYGFhATMzsxwD8mtpaSEhISHbtG/PtbS08ly/goICFBQUCpm87BIEAdOnT8cff/wBs19mo6nNqKx5lbUM0GXSn0j5+AHhe+bjbuh+pHx6j7Z9pme1SXrzAqf/GIYqlZQQGhoMXV3d3F6GiKhce34rFHpNzLOeX/fdiot/r8acOXOwZMmSMlMsMjIygqenJ8aNGweVKjXQsvsEAEB66meE7piFuIeXcOjQIfTr10/KSYmoPGCRjIiISjQNDQ1oaGgUqG1sbCwsLCxgYmICLy+vHOOVmJqaYs6cOUhPT4ecnBwAIDAwEEZGRnleaknZCYKAqVOnYu3atWg/aB6MrYbn2k5RRR2NzAfj8VVfAIBhy683VUhMeI7T7sOgoV4RoaEhqFWrVrFlJyIqTXw3jIf9xM3QM+6E675bcPFvN8ydOxeLFy8uMwWyb8aOHYs3b95g7ty5UKpUBYYt7eG7fiyS46Ph6+MDKysraUckonKCRTIiIioTYmNjYW5uDj09Pbi5ueHNmzdZ876dJTZ48GAsWrQIDg4OmDlzJu7cuYO1a9fCw8NDWrFLFUEQMHHiRGzcuDHbWDl5qVG3JRRVKqNyDUOoVKmBD/FPcfqPYdCqpo7Q0GBoa2sXU3IiotJHEDIRsMkZdVp3w4Pzf5fZAtk3s2fPRkJCAjZsWIgbflshhzSEhYXCxMRE2tGIqBxhkYyIiMqEwMBAREdHIzo6OsfZSYIgAADU1NQQEBAAZ2dnmJiYoFq1apg/fz7Gjh0rjcilzr1797Bx40YYW4/4boEMAGQqyGLYqnBAJML7uMc4/ccw1NSqhtCQ4HwvbyUiIgCCACsrC/j7/Y158+Zh0aJFZbZABgAikQhr1qxBSkoKIq9dx/59e1GvXj1pxyKickYkfPvmUEYkJSVBTU0NiYmJUFVVlXYcIqJSj/vVnMprn6SmpqJb9+4IDz8H+8l/Qbte6wIt9y72Ebw9RkC3ZnWEBAejevXqEk5KRKVJed2n5udbn/zzzz/4/PkzatWqVaYLZEREklbQYw3vbklEREQFoqCggJMnTqBdOzP4rnNE3KPI7y7z9mUUTv8xDLV1tBAaEsICGRFRIcjJyUFHR6fMFMhSU1Px+PFjlLHzNIioDGGRjIiIiApMSUkJp0+dRNs2reG7zgHxj6/n2fafmPs4/ccwGOrrIDQkpMA3YCAiorInNTUVPXr2RJ06dTB79mwWyoioRGKRjIiIiApFWVkZPt6n0dKkBXzXOiDhyc0cbd48v4vTfwyHUV0DhASfQdWqVaWQlIiISoLU1FT06dMHISGhaNhpIFasWIEZM2awUEZEJQ6LZERERFRoFStWhK+PN5o3M4bP2tF48+xO1rzXz27jtPtwNGpQF8FnglClShUpJiUiImlKS0tD/18GICDwDGydPdFp2BK0GzgXbm5umDZtGgtlRFSi8O6WRERE9EMqVaoEP19fdLaxgfeakeg6bSfEGenwWeuAJo0bIMDfH2pqatKOSUREUpKeno6BAwfB19cXNr9ugk6jDgCAJtYjICNTAWvWLEJmZibWrVsn5aRERF/xTDIiIiL6Yaqqqgjw90fD+nXh7T4S3h6j0LypMQIDAlggIyIqxzIyMjB48BCcPHUKnZ3WQ8+4U7b5Dc0HQdOgGbZv95JSQiKinFgkIyIiop+ipqaGoMBAmLZpCWtLcwT4++V7a20iIirbMjIyMHToMBw9dgzWY9dCqVIV3A3dB0EsBgAIYjHCdszGm2e3sG3bX1JOS0T0P7zckoiIiH6auro6zgQFSjsGERFJWWZmJkaOHIVDhw5Bo3ZjnNu3CJ8+JAAAVKrWhG6jDgjbPQ8PLx7H7t27MWDAACknJiL6H55JRkREVM5lZGRg//79ePTokbSjEBFRKSYWizF6tAP279+PjsMW4/XTW1kFMhlZOWgZtkD43gV4cO4wvLy8MHjwYCknJiLKjkUyIiKiciwlJQV9+/XH4MGD0aFjJzx79kzakYiISrzU1FQ0a9YMIpEIN27cyDbv1q1b6NChAxQVFaGjo4NVq1ZJJ2QxE4vFGDNmDHbv2Q1Lh9Vo0OEX/LLgFHQadwQA1GrQDpePueNe2AFs27YNw4cPl3JiIqKcWCQjIiIqpxITE2FjawdfPz9YjFqBVLEcrK074/Xr19KORkRUos2YMQPa2to5piclJcHGxgZ6enqIjIzE6tWrsXDhQmzdulUKKYvXpEmTsGPHDliOWoW6bbpDJBKhqk59NLMdAwBIT/2EOyF7sHXrVowaNUrKaYmIcsciGRERUTkUHx+PDh064UrkdXSdsgP12/VFlynbkfD2A2zt7JGUlCTtiEREJZKvry8CAgLg5uaWY97evXuRlpaG7du3o1GjRhg4cCAmTZoEd3d3KSQtXsEhoVCtVgu6xh2zTa9Zvy2adh6NuIdX4OnpCUdHRyklJCL6PhbJiIiIypnHjx/D1LQdnr2MR48Z+1GjrgkAQK26HuwnbcP9qEfo2as3UlNTpZyUiKhkSUhIgKOjI3bv3g1lZeUc8yMiItCxY0fIy8tnTbO1tUVUVBTev3+f53pTU1ORlJSU7VHaHDywH7JCCk67j8CX5HcAAEEQcPHv1bgZuB3r16+Hk5OTlFMSEeWPRTIiIqJy5ObNmzA1a4fEFDF6zDyAqjXrZZtfTbch7CZsxrlz5zFkyFBkZmZKKSkRUckiCAJGjhwJJycntGzZMtc28fHx0NTUzDbt2/P4+Pg81718+XKoqallPXR0dIoueDExNjZGWGgIhC9vcdp9OD4nvcWlY+644fcnPDw8MGHCBGlHJCL6LhbJiIiIyonw8HC079ARMsoa6DHjAFSr1cq1nXa91rAeuwZHjx3F5MmTizklEVHxmjVrFkQiUb6PBw8eYP369UhOToarq2uRZ3B1dUViYmLWIyYmpshfozg0atQI4WGhkElLxKEFXXDdZzPc3NwwZcoUaUcjIioQWWkHICIiIsk7ceIEfhkwAJqGJrD5dSPkFVXyba+h1wiVKmsh/Oy5YkpIRCQdLi4uGDlyZL5tDAwMEBwcjIiICCgoKGSb17JlSwwZMgQ7d+6ElpYWEhISss3/9lxLSyvP9SsoKORYb2nVoEEDhIeFYpzTePTo3g3Tpk2TdiQiogKTSJEsNDQUFhYWuc67fPkyWrVqles8c3NzhIWFZZs2btw4bN68ucgzEhERlRfbt2+Ho6Mj9JvbwGqMGyrIKSAzIx0f4p+gai2jHO3fxz+B75pRUFWWxeFDB6WQmIio+GhoaEBDQ+O77datW4elS5dmPX/16hVsbW1x8OBBtGnTBgBgamqKOXPmID09HXJycgCAwMBAGBkZoXLlypLZgBLIyMgIoSHB0o5BRFRoEimSmZmZIS4uLtu0efPm4cyZM3lev/+No6MjFi9enPU8twExiYiI6PsEQcDq1asxc+ZMNOw0CC26OCEq4gRe3AlD7P0LSPvyEbbjN8DAxDZrmddPb8F3vSP0atZAYKA/atasKcUtICIqOXR1dbM9V1H5ekauoaEhatX6evn64MGDsWjRIjg4OGDmzJm4c+cO1q5dCw8Pj2LPS0REhSeRIpm8vHy204nT09Nx4sQJTJw4ESKRKN9llZWV8z0VmYiIiL5PLBZj+vTpcHd3R902PRAffQ17ZnaCSCSD6gZNoWnQDDF3z0FV43+DQ8fcPYcAT2c0b9YEPt7eqFKlihS3gIio9FFTU0NAQACcnZ1hYmKCatWqYf78+Rg7dqy0oxERUQEUy5hkJ0+exNu3bzFq1Kjvtt27dy/27NkDLS0tdO/eHfPmzcv3bLLU1NRst6gvjbdLJiIiKkrp6elwcBiDPXt2o/3g+QCAR5dOAgDqtO6Gtv2m49y+xaim2xDVdBsCAKKv+CB422/o3Nkafx85wjO5iYi+o3bt2hAEIcf0Jk2a4OzZs1JIREREP6tYimTbtm2Dra1t1mnIeRk8eDD09PSgra2NW7duYebMmYiKisLRo0fzXGb58uVYtGhRUUcmIiIqlT5//oz+/fvDPyAQVo7uqNu6GwCgVgMzPDj/N+4E74acghKe3wpBuwFzAAB3Qvbi3L5FGDx4CLy8tmeNo0NEREREVJ4Uqkg2a9YsrFy5Mt829+/fR/369bOev3z5Ev7+/jh06NB31//v05CNjY1Ro0YNWFlZ4fHjxzA0NMx1GVdX12x3TElKSoKOjk6ubYmIiMqy9+/fo0vXrrh2/SbsJ26BTqMOWfMq1zCEab8Z+PjuFe6FH0QFWXkYtu6KKyfW4eqp9Zg8eTLc3d0hIyMjxS0gIiIiIpKeQhXJCnp75H/z8vJC1apV0aNHj0KH+3aXmOjo6DyLZGXpdslEREQ/6vXr17CwsMSzmFfoNm0XNA2a5tpOt3EnRF/2Ru3m1rhyYi3uhuzF8uXLMXPmzO+OG0pEREREVJYVqkhW0NsjfyMIAry8vDB8+PAfunTjxo0bAIAaNWoUelkiIqLy5OrVq7h37y7aDZyTZ4EMAHSNO6FJ51F4FxuFp5F++PPPPzFmzJhiTEpEREREVDJJ9JqK4OBgPH36NNcP37Gxsahfvz4uX74MAHj8+DGWLFmCyMhIPHv2DCdPnsTw4cPRsWNHNGnSRJIxiYiISj1bW1v06dMXl4/+gVcPL+fZroKcPN7HPsTrx5H4+++/WSAjIiIiIvp/Ei2Sbdu2DWZmZtnGKPsmPT0dUVFR+Pz5MwBAXl4eQUFBsLGxQf369eHi4oK+ffvi1KlTkoxIRERUJlSoUAH79u1Fu3Zm8N84Hv/E3M/R5kvyW5z+Yzjex9xGgL8/evXqVfxBiYiIiIhKKIne3XLfvn15zvvvLZN1dHQQFhYmyThERERlmoKCAk4cP4ZO5hbwXeuAnjMPQFVDFwCQ/DYWPmtGoULmZ5wND0OzZs2kG5aIiIiIqIThLayIiIjKEFVVVfj7+aJ6VTV4rxmNz4n/4F3sI5xYMQCqCiJEXDjPAhkRERERUS4keiYZERERFb/q1asjKDAApmbtcNp9OD4nvkYdfT34+/vxZjhERERERHngmWRERERlkL6+PgID/CGTnoSWLZohPDyMBTIiIiIionzwTDIiIqIyytjYGK8T4qUdg4iIiIioVOCZZEREREREREREVO6xSEZEREREREREROUei2RERERERERERFTusUhGRERERERERETlHotkRERERERERERU7rFIRkRERERERERE5R6LZEREREREREREVO6xSEZEREREREREROUei2RERERERERERFTusUhGRERERERERETlHotkRERERERERERU7slKO0BREwQBAJCUlCTlJEREZcO3/em3/SvxWENEVJR4nMmJxxkioqJV0GNNmSuSJScnAwB0dHSknISIqGxJTk6GmpqatGOUCDzWEBEVPR5n/ofHGSIiyfjesUYklLGfbMRiMV69eoVKlSpBJBIV6bqTkpKgo6ODmJgYqKqqFum6ixqzSkZpyVpacgLMKglFnVMQBCQnJ0NbWxsyMrxKH5DssaY4lZa/6Z/BbSwbuI1lQ17byONMTtI6zpSHv8PCYH/kxD7Jjv2RU0ntk4Iea8rcmWQyMjKoVauWRF9DVVW1RP1n54dZJaO0ZC0tOQFmlYSizMlf9rMrjmNNcSotf9M/g9tYNnAby4bctpHHmeykfZwpD3+HhcH+yIl9kh37I6eS2CcFOdbwpxoiIiIiIiIiIir3WCQjIiIiIiIiIqJyj0WyQlBQUMCCBQugoKAg7SjfxaySUVqylpacALNKQmnJSdJXHv5WuI1lA7exbCgP21ja8f8oO/ZHTuyT7NgfOZX2PilzA/cTEREREREREREVFs8kIyIiIiIiIiKico9FMiIiIiIiIiIiKvdYJCMiIiIiIiIionKPRTIiIiIiIiIiIir3WCQjIiIiIiIiIqJyj0WyfISGhkIkEuX6uHLlSp7LmZub52jv5OQk8by1a9fO8borVqzId5mUlBQ4OzujatWqUFFRQd++fZGQkCCxjM+ePYODgwP09fWhpKQEQ0NDLFiwAGlpafkuV1x9unHjRtSuXRuKiopo06YNLl++nG/7w4cPo379+lBUVISxsTF8fHyKPNN/LV++HK1atUKlSpVQvXp19OrVC1FRUfkus2PHjhz9p6ioKPGsCxcuzPG69evXz3cZafQpkPv7RyQSwdnZOdf2xdmn4eHh6N69O7S1tSESiXD8+PFs8wVBwPz581GjRg0oKSnB2toajx49+u56C/v3TmVHQffFt27dQocOHaCoqAgdHR2sWrVKSol/zO+//w4zMzMoKytDXV091zYvXrxA165doaysjOrVq2P69OnIyMgo3qA/oay9jyW1vyspCnIML+7PZkXN09MTTZo0gaqqKlRVVWFqagpfX9+s+aV9+8qD1NRUNGvWDCKRCDdu3Mg2r7QfFwqqvBwnC6usHXMKqjzsu3/WihUrIBKJMGXKlKxppbVPWCTLh5mZGeLi4rI9xowZA319fbRs2TLfZR0dHbMtV1w7zMWLF2d73YkTJ+bbfurUqTh16hQOHz6MsLAwvHr1Cn369JFYvgcPHkAsFmPLli24e/cuPDw8sHnzZsyePfu7y0q6Tw8ePIhp06ZhwYIFuHbtGpo2bQpbW1u8fv061/YXLlzAoEGD4ODggOvXr6NXr17o1asX7ty5U6S5/issLAzOzs64ePEiAgMDkZ6eDhsbG3z69Cnf5VRVVbP13/PnzyWa85tGjRple91z587l2VZafQoAV65cyZYzMDAQANC/f/88lymuPv306ROaNm2KjRs35jp/1apVWLduHTZv3oxLly6hYsWKsLW1RUpKSp7rLOzfO5UtBdkXJyUlwcbGBnp6eoiMjMTq1auxcOFCbN26VYrJCyctLQ39+/fH+PHjc52fmZmJrl27Ii0tDRcuXMDOnTuxY8cOzJ8/v5iT/piy+D6WxP6uJCnIMby4P5sVtVq1amHFihWIjIzE1atXYWlpiZ49e+Lu3bsASv/2lQczZsyAtrZ2jull4bhQUOXlOFkYZfGYU1DlYd/9M65cuYItW7agSZMm2aaX2j4RqMDS0tIEDQ0NYfHixfm269SpkzB58uTiCfUvenp6goeHR4Hbf/jwQZCTkxMOHz6cNe3+/fsCACEiIkICCXO3atUqQV9fP982xdGnrVu3FpydnbOeZ2ZmCtra2sLy5ctzbf/LL78IXbt2zTatTZs2wrhx4ySa879ev34tABDCwsLybOPl5SWoqakVX6j/t2DBAqFp06YFbl9S+lQQBGHy5MmCoaGhIBaLc50vrT4FIBw7dizruVgsFrS0tITVq1dnTfvw4YOgoKAg7N+/P8/1FPbvncq+/+6LN23aJFSuXFlITU3NmjZz5kzByMhIGvF+Sl7vVx8fH0FGRkaIj4/Pmubp6Smoqqpm2+6Sqqy/j4tqf1eS/fcYXlI+mxW1ypUrC3/99VeZ3b6yxMfHR6hfv75w9+5dAYBw/fr1rHll6bjwI8rycbIgyvoxpzDKy767IJKTk4W6desKgYGB2b6zl+Y+4ZlkhXDy5Em8ffsWo0aN+m7bvXv3olq1amjcuDFcXV3x+fPnYkj49TTHqlWronnz5li9enW+l4xERkYiPT0d1tbWWdPq168PXV1dREREFEdcAEBiYiKqVKny3XaS7NO0tDRERkZm6wsZGRlYW1vn2RcRERHZ2gOAra1tsfYd8LX/AHy3Dz9+/Ag9PT3o6Ohk+0VX0h49egRtbW0YGBhgyJAhePHiRZ5tS0qfpqWlYc+ePRg9ejREIlGe7aTVp//29OlTxMfHZ+s3NTU1tGnTJs9++5G/dyr7/rsvjoiIQMeOHSEvL581zdbWFlFRUXj//r00Iha5iIgIGBsbQ1NTM2uara0tkpKSpPJ+Lozy+D7+kf1dSfffY3hJ+WxWVDIzM3HgwAF8+vQJpqamZW77ypqEhAQ4Ojpi9+7dUFZWzjG/PBwX8lMej5PflMdjTn7K+r67MJydndG1a9cc3+FKc5/ISjtAabJt2zbY2tqiVq1a+bYbPHgw9PT0oK2tjVu3bmHmzJmIiorC0aNHJZpv0qRJaNGiBapUqYILFy7A1dUVcXFxcHd3z7V9fHw85OXlc4zToqmpifj4eIlm/SY6Ohrr16+Hm5tbvu0k3af//PMPMjMzs31RAr72xYMHD3JdJj4+Ptf2xdV3ACAWizFlyhS0a9cOjRs3zrOdkZERtm/fjiZNmiAxMRFubm4wMzPD3bt3v/v3/DPatGmDHTt2wMjICHFxcVi0aBE6dOiAO3fuoFKlSjnal4Q+BYDjx4/jw4cPGDlyZJ5tpNWn//WtbwrTbz/y905lW2774vj4eOjr62dr9+1vJj4+HpUrVy7WjJKQ1z7n27ySrDy+j39kf1eS5XYMLwmfzYrC7du3YWpqipSUFKioqODYsWNo2LAhbty4USa2rywSBAEjR46Ek5MTWrZsiWfPnuVoUx6OC3kpr8fJb8rjMScvZXnfXVgHDhzAtWvXch2vvTT3Sbk8k2zWrFl5Dsj/7fHfN/vLly/h7+8PBweH765/7NixsLW1hbGxMYYMGYJdu3bh2LFjePz4sUSzTps2Debm5mjSpAmcnJzwxx9/YP369UhNTS3060oy5zexsbGws7ND//794ejomO/6i7JPyxJnZ2fcuXMHBw4cyLedqakphg8fjmbNmqFTp044evQoNDQ0sGXLFonms7e3R//+/dGkSRPY2trCx8cHHz58wKFDhyT6uj9r27ZtsLe3z3U8jm+k1adE+ZH0vrgk+JFtJCqJCnoML42MjIxw48YNXLp0CePHj8eIESNw7949accqlwq6z1y/fj2Sk5Ph6uoq7cgSVR6OkyRZZXnfXRgxMTGYPHky9u7dWyw3hCtO5fJMMhcXl3zPEAEAAwODbM+9vLxQtWpV9OjRo9Cv16ZNGwBff4EwNDQs1LI/kvXfr5uRkYFnz57ByMgox3wtLS2kpaXhw4cP2Sq8CQkJ0NLSkmjOV69ewcLCAmZmZj80uOXP9GluqlWrhgoVKuS420Z+faGlpVWo9kVtwoQJOH36NMLDwwt95pKcnByaN2+O6OhoCaXLnbq6OurVq5fn60q7TwHg+fPnCAoKKvRZitLq0299k5CQgBo1amRNT0hIQLNmzXJd5kf+3ql0KMp9cV7vx2/zpOVnjov/paWllePOXCVhGwuiPL6Pf2R/V1LldQwvys9m0iQvL486deoAAExMTHDlyhWsXbsWAwYMKBPbV5oUdJ8ZHByMiIgIKCgoZJvXsmVLDBkyBDt37iyxx4XCKA/HSUkoj8ec3JT1fXdhREZG4vXr12jRokXWtMzMTISHh2PDhg3w9/cvvX0i7UHRSgOxWCzo6+sLLi4uP7T8uXPnBADCzZs3izhZ/vbs2SPIyMgI7969y3X+t8H0jhw5kjXtwYMHEh9M7+XLl0LdunWFgQMHChkZGT+0Dkn0aevWrYUJEyZkPc/MzBRq1qyZ78D93bp1yzbN1NRU4oPMi8ViwdnZWdDW1hYePnz4Q+vIyMgQjIyMhKlTpxZxuvwlJycLlStXFtauXZvrfGn16b8tWLBA0NLSEtLT0wu1XHH1KfIYyNrNzS1rWmJiYoEG7i/M3zuVPd/bF38bkDgtLS1rmqura6kckPh7A/cnJCRkTduyZYugqqoqpKSkFGPCH1PW38dFtb8rSb53DJfWZzNJs7CwEEaMGFFmt68seP78uXD79u2sh7+/vwBAOHLkiBATEyMIQtk6LhREeTpOFkRZP+bkp7zuu/OTlJSUbZ9x+/ZtoWXLlsLQoUOF27dvl+o+YZGsAIKCggQAwv3793PMe/nypWBkZCRcunRJEARBiI6OFhYvXixcvXpVePr0qXDixAnBwMBA6Nixo0QzXrhwQfDw8BBu3LghPH78WNizZ4+goaEhDB8+PM+sgiAITk5Ogq6urhAcHCxcvXpVMDU1FUxNTSWW8+XLl0KdOnUEKysr4eXLl0JcXFzWI6+cxdWnBw4cEBQUFIQdO3YI9+7dE8aOHSuoq6tn3fVs2LBhwqxZs7Lanz9/XpCVlRXc3NyE+/fvCwsWLBDk5OSE27dvF2mu/xo/frygpqYmhIaGZuu/z58/Z7X5b9ZFixYJ/v7+wuPHj4XIyEhh4MCBgqKionD37l2JZnVxcRFCQ0OFp0+fCufPnxesra2FatWqCa9fv841p7T69JvMzExBV1dXmDlzZo550uzT5ORk4fr168L169cFAIK7u7tw/fp14fnz54IgCMKKFSsEdXV14cSJE8KtW7eEnj17Cvr6+sKXL1+y1mFpaSmsX78+6/n3/t6pbCvIvvjDhw+CpqamMGzYMOHOnTvCgQMHBGVlZWHLli1STF44z58/F65fvy4sWrRIUFFRyXofJScnC4LwtbjduHFjwcbGRrhx44bg5+cnaGhoCK6urlJOXjBl8X1cFPu7kqwgx/Di/mxW1GbNmiWEhYUJT58+FW7duiXMmjVLEIlEQkBAgCAIpX/7younT5/muLtlWTguFFR5OU4WRlk85hRUedh3F4V/391SEEpvn7BIVgCDBg0SzMzMcp337QASEhIiCIIgvHjxQujYsaNQpUoVQUFBQahTp44wffp0ITExUaIZIyMjhTZt2ghqamqCoqKi0KBBA2HZsmXZfgn/b1ZBEIQvX74Iv/76q1C5cmVBWVlZ6N27d7adf1Hz8vISAOT6yCtncfbp+vXrBV1dXUFeXl5o3bq1cPHixax5nTp1EkaMGJGt/aFDh4R69eoJ8vLyQqNGjQRvb+8iz/RfefWfl5dXnlmnTJmStV2amppCly5dhGvXrkk864ABA4QaNWoI8vLyQs2aNYUBAwYI0dHReeYUBOn06TfffjWNiorKMU+afRoSEpLr//m3PGKxWJg3b56gqakpKCgoCFZWVjm2QU9PT1iwYEG2afn9vVPZVpB9sSAIws2bN4X27dsLCgoKQs2aNYUVK1ZIKfGPGTFiRK7b+O/j4LNnzwR7e3tBSUlJqFatmuDi4lLoM0mlqay9j4tif1eSFeQYXtyfzYra6NGjBT09PUFeXl7Q0NAQrKyssgpkglD6t6+8yK1IJgil/7hQUOXlOFlYZe2YU1DlYd9dFP5bJCutfSISBEH4mcs1iYiIiIiIiIiISrtyeXdLIiIiIiIiIiKif2ORjIiIiIiIiIiIyj0WyYiIiIiIiIiIqNxjkYyIiIiIiIiIiMo9FsmIiIiIiIiIiKjcY5GMiIiIiIiIiIjKPRbJiIiIiIiIiIio3GORjIiIiIiIiIiIyj0WyYiIiIiIiIiIqNxjkYyIiIiIiIiIiMo9FsmIiIiIiIiIiKjc+z8Bznu/uDAwKQAAAABJRU5ErkJggg==", "text/plain": [ "
      " ] diff --git a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py index a6495839..233cba1a 100644 --- a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py @@ -203,19 +203,8 @@ def _get_av2_camera_metadata( camera_type=camera_type, width=row["width_px"], height=row["height_px"], - intrinsics=PinholeIntrinsics( - fx=row["fx_px"], - fy=row["fy_px"], - cx=row["cx_px"], - cy=row["cy_px"], - ), - distortion=PinholeDistortion( - k1=row["k1"], - k2=row["k2"], - p1=0.0, - p2=0.0, - k3=row["k3"], - ), + intrinsics=PinholeIntrinsics(fx=row["fx_px"], fy=row["fy_px"], cx=row["cx_px"], cy=row["cy_px"]), + distortion=PinholeDistortion(k1=row["k1"], k2=row["k2"], p1=0.0, p2=0.0, k3=row["k3"]), ) return camera_metadata diff --git a/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py b/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py index 18766ba6..437dde09 100644 --- a/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py +++ b/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py @@ -188,8 +188,7 @@ def _write_av2_road_edge(drivable_areas: Dict[int, Polyline3D], map_writer: Abst drivable_polygons = [geom.Polygon(drivable_area.array[:, :2]) for drivable_area in drivable_areas.values()] road_edges_2d = get_road_edge_linear_rings(drivable_polygons) - outlines_linestrings = [drivable_area.linestring for drivable_area in drivable_areas.values()] - non_conflicting_road_edges = lift_road_edges_to_3d(road_edges_2d, outlines_linestrings) + non_conflicting_road_edges = lift_road_edges_to_3d(road_edges_2d, list(drivable_areas.values())) road_edges = split_line_geometry_by_max_length(non_conflicting_road_edges, MAX_ROAD_EDGE_LENGTH) for idx, road_edge in enumerate(road_edges): diff --git a/src/py123d/conversion/datasets/carla/__init__.py b/src/py123d/conversion/datasets/carla/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/py123d/conversion/datasets/carla/carla_data_converter.py b/src/py123d/conversion/datasets/carla/carla_data_converter.py deleted file mode 100644 index 548a99bc..00000000 --- a/src/py123d/conversion/datasets/carla/carla_data_converter.py +++ /dev/null @@ -1,460 +0,0 @@ -import gc -import gzip -import hashlib -import json -import os -from dataclasses import asdict -from functools import partial -from pathlib import Path -from typing import Any, Dict, Final, List, Optional, Tuple, Union - -import numpy as np -import pyarrow as pa - -from py123d.common.multithreading.worker_utils import WorkerPool, worker_map -from py123d.common.utils.arrow_helper import open_arrow_table, write_arrow_table -from py123d.conversion.abstract_dataset_converter import AbstractDataConverter, DatasetConverterConfig -from py123d.conversion.utils.map_utils.opendrive.opendrive_map_conversion import convert_from_xodr -from py123d.conversion.utils.sensor.lidar_index_registry import CARLALidarIndex -from py123d.datatypes.maps.abstract_map import AbstractMap, MapLayer -from py123d.datatypes.maps.abstract_map_objects import AbstractLane -from py123d.datatypes.maps.gpkg.gpkg_map import get_global_map_api -from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import ( - PinholeCameraMetadata, - PinholeCameraType, - camera_metadata_dict_to_json, -) -from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json -from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3Index -from py123d.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters -from py123d.geometry import BoundingBoxSE3Index, Point2D, Point3D, Vector3DIndex - -AVAILABLE_CARLA_MAP_LOCATIONS: Final[List[str]] = [ - "Town01", # A small, simple town with a river and several bridges. - "Town02", # A small simple town with a mixture of residential and commercial buildings. - "Town03", # A larger, urban map with a roundabout and large junctions. - "Town04", # A small town embedded in the mountains with a special "figure of 8" infinite highway. - "Town05", # Squared-grid town with cross junctions and a bridge. It has multiple lanes per direction. Useful to perform lane changes. - "Town06", # Long many lane highways with many highway entrances and exits. It also has a Michigan left. - "Town07", # A rural environment with narrow roads, corn, barns and hardly any traffic lights. - "Town10HD", # A downtown urban environment with skyscrapers, residential buildings and an ocean promenade. - "Town11", # A Large Map that is undecorated. Serves as a proof of concept for the Large Maps feature. - "Town12", # A Large Map with numerous different regions, including high-rise, residential and rural environments. - "Town13", # ??? - "Town15", # ??? -] - -CARLA_DT: Final[float] = 0.1 # [s] -TRAFFIC_LIGHT_ASSIGNMENT_DISTANCE: Final[float] = 1.0 # [m] -SORT_BY_TIMESTAMP: Final[bool] = True - -CARLA_CAMERA_TYPES = {PinholeCameraType.CAM_F0} - -# TODO: remove envinronment variable dependency -CARLA_DATA_ROOT: Final[Path] = Path(os.environ["CARLA_DATA_ROOT"]) - - -# TODO: Refactor this files and convert coordinate systems more elegantly. -# NOTE: Currently some coordinate transforms from Unreal to ISO 8855 are done in the data agent of PDM-Lite. -# Ideally a general function to transform poses and points between coordinate systems would be nice - - -def _load_json_gz(path: Path) -> Dict: - """Helper function to load a gzipped JSON file.""" - with gzip.open(path, "rt") as f: - data = json.load(f) - return data - - -def create_token(input_data: str) -> str: - # TODO: Refactor this function. - # TODO: Add a general function to create tokens from arbitrary data. - if isinstance(input_data, str): - input_data = input_data.encode("utf-8") - - hash_obj = hashlib.sha256(input_data) - return hash_obj.hexdigest()[:16] - - -class CarlaDataConverter(AbstractDataConverter): - - def __init__( - self, - splits: List[str], - log_path: Union[Path, str], - dataset_converter_config: DatasetConverterConfig, - ) -> None: - super().__init__(dataset_converter_config) - for split in splits: - assert ( - split in self.get_available_splits() - ), f"Split {split} is not available. Available splits: {self.available_splits}" - - self._splits: str = splits - self._log_path: Path = Path(log_path) - self._log_paths_per_split: Dict[str, List[Path]] = self._collect_log_paths() - - def _collect_log_paths(self) -> Dict[str, List[Path]]: - # TODO: fix "carla" split placeholder and add support for other splits - log_paths_per_split: Dict[str, List[Path]] = {} - log_paths = [ - log_path for log_path in self._log_path.iterdir() if log_path.is_dir() and log_path.stem != "sensor_blobs" - ] - log_paths_per_split["carla"] = log_paths - return log_paths_per_split - - def get_available_splits(self) -> List[str]: - """Returns a list of available raw data types.""" - return ["carla"] # TODO: fix the placeholder - - def convert_maps(self, worker: WorkerPool) -> None: - worker_map( - worker, - partial( - convert_carla_map_to_gpkg, - dataset_converter_config=self.dataset_converter_config, - ), - list(AVAILABLE_CARLA_MAP_LOCATIONS), - ) - - def convert_logs(self, worker: WorkerPool) -> None: - - log_args = [ - {"log_path": log_path, "split": split} - for split, log_paths in self._log_paths_per_split.items() - for log_path in log_paths - ] - - worker_map( - worker, - partial(convert_carla_log_to_arrow, dataset_converter_config=self.dataset_converter_config), - log_args, - ) - - -def convert_carla_map_to_gpkg(locations: List[str], dataset_converter_config: DatasetConverterConfig) -> List[Any]: - - # TODO: add to config - _interpolation_step_size = 0.5 # [m] - _connection_distance_threshold = 0.1 # [m] - for location in locations: - map_path = dataset_converter_config.output_path / "maps" / f"carla_{location.lower()}.gpkg" - if dataset_converter_config.force_map_conversion or not map_path.exists(): - map_path.unlink(missing_ok=True) - assert os.environ["CARLA_ROOT"] is not None - CARLA_ROOT = Path(os.environ["CARLA_ROOT"]) - - if location not in ["Town11", "Town12", "Town13", "Town15"]: - carla_maps_root = CARLA_ROOT / "CarlaUE4" / "Content" / "Carla" / "Maps" / "OpenDrive" - carla_map_path = carla_maps_root / f"{location}.xodr" - else: - carla_map_path = ( - CARLA_ROOT / "CarlaUE4" / "Content" / "Carla" / "Maps" / location / "OpenDrive" / f"{location}.xodr" - ) - - convert_from_xodr( - carla_map_path, - f"carla_{location.lower()}", - _interpolation_step_size, - _connection_distance_threshold, - ) - return [] - - -def convert_carla_log_to_arrow( - args: List[Dict[str, Union[List[str], List[Path]]]], dataset_converter_config: DatasetConverterConfig -) -> List[Any]: - def convert_log_internal(args: List[Dict[str, Union[List[str], List[Path]]]]) -> None: - for log_info in args: - log_path: Path = log_info["log_path"] - split: str = log_info["split"] - output_path: Path = dataset_converter_config.output_path - - log_file_path = output_path / split / f"{log_path.stem}.arrow" - - if dataset_converter_config.force_log_conversion or not log_file_path.exists(): - log_file_path.unlink(missing_ok=True) - if not log_file_path.parent.exists(): - log_file_path.parent.mkdir(parents=True, exist_ok=True) - - bounding_box_paths = sorted([bb_path for bb_path in (log_path / "boxes").iterdir()]) - first_log_dict = _load_json_gz(bounding_box_paths[0]) - location = first_log_dict["location"] - map_api = get_global_map_api("carla", location) - - metadata = _get_metadata(location, str(log_path.stem)) - vehicle_parameters = get_carla_lincoln_mkz_2020_parameters() - camera_metadata = get_carla_camera_metadata(first_log_dict) - lidar_metadata = get_carla_lidar_metadata(first_log_dict) - - schema_column_list = [ - ("token", pa.string()), - ("timestamp", pa.int64()), - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ("scenario_tag", pa.list_(pa.string())), - ("route_lane_group_ids", pa.list_(pa.int64())), - ] - if dataset_converter_config.lidar_store_option is not None: - for lidar_type in lidar_metadata.keys(): - if dataset_converter_config.lidar_store_option == "path": - schema_column_list.append((lidar_type.serialize(), pa.string())) - elif dataset_converter_config.lidar_store_option == "binary": - raise NotImplementedError("Binary lidar storage is not implemented.") - - # TODO: Adjust how cameras are added - if dataset_converter_config.camera_store_option is not None: - for camera_type in camera_metadata.keys(): - if dataset_converter_config.camera_store_option == "path": - schema_column_list.append((camera_type.serialize(), pa.string())) - schema_column_list.append( - (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 16)) - ) - elif dataset_converter_config.camera_store_option == "binary": - raise NotImplementedError("Binary camera storage is not implemented.") - - recording_schema = pa.schema(schema_column_list) - recording_schema = recording_schema.with_metadata( - { - "log_metadata": json.dumps(asdict(metadata)), - "vehicle_parameters": json.dumps(asdict(vehicle_parameters)), - "camera_metadata": camera_metadata_dict_to_json(camera_metadata), - "lidar_metadata": lidar_metadata_dict_to_json(lidar_metadata), - } - ) - - _write_recording_table( - bounding_box_paths, - map_api, - recording_schema, - log_file_path, - dataset_converter_config, - ) - - gc.collect() - - convert_log_internal(args) - gc.collect() - return [] - - -def _get_metadata(location: str, log_name: str) -> LogMetadata: - return LogMetadata( - dataset="carla", - log_name=log_name, - location=location, - timestep_seconds=CARLA_DT, - map_has_z=True, - ) - - -def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - - # FIXME: This is a placeholder function to return camera metadata. - - intrinsic = np.array( - first_log_dict[f"{PinholeCameraType.CAM_F0.serialize()}_intrinsics"], - dtype=np.float64, - ) - camera_metadata = { - PinholeCameraType.CAM_F0: PinholeCameraMetadata( - camera_type=PinholeCameraType.CAM_F0, - width=1024, - height=512, - intrinsics=intrinsic, - distortion=np.zeros((5,), dtype=np.float64), - ) - } - return camera_metadata - - -def get_carla_lidar_metadata(first_log_dict: Dict[str, Any]) -> Dict[LiDARType, LiDARMetadata]: - - # TODO: add lidar extrinsic - lidar_metadata = { - LiDARType.LIDAR_TOP: LiDARMetadata( - lidar_type=LiDARType.LIDAR_TOP, - lidar_index=CARLALidarIndex, - extrinsic=None, - ) - } - return lidar_metadata - - -def _write_recording_table( - bounding_box_paths: List[Path], - map_api: AbstractMap, - recording_schema: pa.Schema, - log_file_path: Path, - dataset_converter_config: DatasetConverterConfig, -) -> pa.Table: - # TODO: Refactor this function to be more readable - log_name = str(bounding_box_paths[0].parent.parent.stem) - - with pa.OSFile(str(log_file_path), "wb") as sink: - with pa.ipc.new_file(sink, recording_schema) as writer: - for box_path in bounding_box_paths: - sample_name = box_path.stem.split(".")[0] - - data = _load_json_gz(box_path) - traffic_light_ids, traffic_light_types = _extract_traffic_light_data( - data["traffic_light_states"], data["traffic_light_positions"], map_api - ) - route_lane_group_ids = _extract_route_lane_group_ids(data["route"], map_api) if "route" in data else [] - - row_data = { - "token": [create_token(f"{log_name}_{box_path.stem}")], - "timestamp": [data["timestamp"]], - "detections_state": [_extract_detection_states(data["detections_state"])], - "detections_velocity": [ - ( - data["detections_velocity"] - if "detections_velocity" in data - else np.zeros((len(data["detections_types"]), 3)).tolist() - ) - ], - "detections_token": [data["detections_token"]], - "detections_type": [data["detections_types"]], - "ego_states": [_extract_ego_vehicle_state(data["ego_state"])], - "traffic_light_ids": [traffic_light_ids], - "traffic_light_types": [traffic_light_types], - "scenario_tag": [data["scenario_tag"]], - "route_lane_group_ids": [route_lane_group_ids], - } - if dataset_converter_config.lidar_store_option is not None: - lidar_data_dict = _extract_lidar(log_name, sample_name, dataset_converter_config) - for lidar_type, lidar_data in lidar_data_dict.items(): - row_data[lidar_type.serialize()] = [lidar_data] - - if dataset_converter_config.camera_store_option is not None: - camera_data_dict = _extract_cameras(data, log_name, sample_name, dataset_converter_config) - for camera_type, camera_data in camera_data_dict.items(): - if camera_data is not None: - row_data[camera_type.serialize()] = [camera_data[0]] - row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]] - else: - row_data[camera_type.serialize()] = [None] - - batch = pa.record_batch(row_data, schema=recording_schema) - writer.write_batch(batch) - del batch, row_data - - if SORT_BY_TIMESTAMP: - recording_table = open_arrow_table(log_file_path) - recording_table = recording_table.sort_by([("timestamp", "ascending")]) - write_arrow_table(recording_table, log_file_path) - - -def _extract_ego_vehicle_state(ego_state_list: List[float]) -> List[float]: - # NOTE: This function used to convert coordinate systems, but it is not needed anymore. - assert len(ego_state_list) == len(EgoStateSE3Index), "Ego state list has incorrect length." - return ego_state_list - - -def _extract_detection_states(detection_states: List[List[float]]) -> List[List[float]]: - # NOTE: This function used to convert coordinate systems, but it is not needed anymore. - detection_state_converted = [] - for detection_state in detection_states: - assert len(detection_state) == len(BoundingBoxSE3Index), "Detection state has incorrect length." - detection_state_converted.append(detection_state) - return detection_state_converted - - -def _extract_traffic_light_data( - traffic_light_states: List[int], traffic_light_positions: List[List[float]], map_api: AbstractMap -) -> Tuple[List[int], List[int]]: - traffic_light_types: List[int] = [] - traffic_light_ids: List[int] = [] - for traffic_light_state, traffic_light_waypoints in zip(traffic_light_states, traffic_light_positions): - for traffic_light_waypoint in traffic_light_waypoints: - point_3d = Point3D(*traffic_light_waypoint) - nearby_lanes = map_api.get_proximal_map_objects( - point_3d, TRAFFIC_LIGHT_ASSIGNMENT_DISTANCE, [MapLayer.LANE] - )[MapLayer.LANE] - - for lane in nearby_lanes: - lane: AbstractLane - lane_start_point = lane.centerline.array[0] - distance_to_lane_start = np.linalg.norm(lane_start_point - point_3d.array) - if distance_to_lane_start < TRAFFIC_LIGHT_ASSIGNMENT_DISTANCE: - traffic_light_ids.append(int(lane.object_id)) - traffic_light_types.append(traffic_light_state) - return traffic_light_ids, traffic_light_types - - -def _extract_route_lane_group_ids(route: List[List[float]], map_api: AbstractMap) -> List[int]: - - # FIXME: Carla route is very buggy. No check if lanes are connected. - route = np.array(route, dtype=np.float64) - route[..., 1] = -route[..., 1] # Unreal coordinate system to ISO 8855 - route = route[::2] - - route_lane_group_ids: List[int] = [] - - for point in route[:200]: - point_2d = Point2D(point[0], point[1]) - nearby_lane_groups = map_api.query(point_2d.shapely_point, [MapLayer.LANE_GROUP], predicate="intersects")[ - MapLayer.LANE_GROUP - ] - if len(nearby_lane_groups) == 0: - continue - elif len(nearby_lane_groups) > 1: - possible_lane_group_ids = [lane_group.object_id for lane_group in nearby_lane_groups] - if len(route_lane_group_ids) > 0: - prev_lane_group_id = route_lane_group_ids[-1] - if prev_lane_group_id in possible_lane_group_ids: - continue - else: - # TODO: Choose with least heading difference? - route_lane_group_ids.append(int(nearby_lane_groups[0].object_id)) - else: - # TODO: Choose with least heading difference? - route_lane_group_ids.append(int(nearby_lane_groups[0].object_id)) - elif len(nearby_lane_groups) == 1: - route_lane_group_ids.append(int(nearby_lane_groups[0].object_id)) - - return list(dict.fromkeys(route_lane_group_ids)) # Remove duplicates while preserving order - - -def _extract_cameras( - data: Dict[str, Any], log_name: str, sample_name: str, dataset_converter_config: DatasetConverterConfig -) -> Dict[PinholeCameraType, Optional[str]]: - camera_dict: Dict[str, Union[str, bytes]] = {} - for camera_type in CARLA_CAMERA_TYPES: - camera_full_path = CARLA_DATA_ROOT / "sensor_blobs" / log_name / camera_type.name / f"{sample_name}.jpg" - if camera_full_path.exists(): - if dataset_converter_config.camera_store_option == "path": - path = f"{log_name}/{camera_type.name}/{sample_name}.jpg" - extrinsics = data.get(f"{camera_type.serialize()}_transform", None) - camera_dict[camera_type] = path, ( - np.array(extrinsics, dtype=np.float64).flatten() if extrinsics is not None else None - ) - - elif dataset_converter_config.camera_store_option == "binary": - raise NotImplementedError("Binary camera storage is not implemented.") - else: - camera_dict[camera_type] = None - return camera_dict - - -def _extract_lidar( - log_name: str, sample_name: str, dataset_converter_config: DatasetConverterConfig -) -> Dict[LiDARType, Optional[str]]: - - lidar: Optional[str] = None - lidar_full_path = CARLA_DATA_ROOT / "sensor_blobs" / log_name / "lidar" / f"{sample_name}.npy" - if lidar_full_path.exists(): - if dataset_converter_config.lidar_store_option == "path": - lidar = f"{log_name}/lidar/{sample_name}.npy" - elif dataset_converter_config.lidar_store_option == "binary": - raise NotImplementedError("Binary lidar storage is not implemented.") - else: - raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}") - - return {LiDARType.LIDAR_TOP: lidar} if lidar else None diff --git a/src/py123d/conversion/datasets/carla/carla_load_sensor.py b/src/py123d/conversion/datasets/carla/carla_load_sensor.py deleted file mode 100644 index f3d85d00..00000000 --- a/src/py123d/conversion/datasets/carla/carla_load_sensor.py +++ /dev/null @@ -1,10 +0,0 @@ -from pathlib import Path - -import numpy as np - -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata - - -def load_carla_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: - assert filepath.exists(), f"LiDAR file not found: {filepath}" - return LiDAR(metadata=lidar_metadata, point_cloud=np.load(filepath)) diff --git a/src/py123d/conversion/map_writer/gpkg_map_writer.py b/src/py123d/conversion/map_writer/gpkg_map_writer.py index 8190d61d..5e68a411 100644 --- a/src/py123d/conversion/map_writer/gpkg_map_writer.py +++ b/src/py123d/conversion/map_writer/gpkg_map_writer.py @@ -8,6 +8,7 @@ from py123d.conversion.dataset_converter_config import DatasetConverterConfig from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.map_writer.utils.gpkg_utils import IntIDMapping from py123d.datatypes.maps.abstract_map_objects import ( AbstractCarpark, AbstractCrosswalk, @@ -32,9 +33,10 @@ class GPKGMapWriter(AbstractMapWriter): """Abstract base class for map writers.""" - def __init__(self, maps_root: Union[str, Path]) -> None: + def __init__(self, maps_root: Union[str, Path], remap_ids: bool = False) -> None: self._maps_root = Path(maps_root) self._crs: str = "EPSG:4326" # WGS84 + self._remap_ids = remap_ids # Data to be written to the map for each object type self._map_data: Optional[Dict[MapLayer, MAP_OBJECT_DATA]] = None @@ -129,14 +131,26 @@ def close(self) -> None: if not self._map_file.parent.exists(): self._map_file.parent.mkdir(parents=True, exist_ok=True) + # Accumulate GeoDataFrames for each map layer + map_gdf: Dict[MapLayer, gpd.GeoDataFrame] = {} for map_layer, layer_data in self._map_data.items(): if len(layer_data["id"]) > 0: df = pd.DataFrame(layer_data) - gdf = gpd.GeoDataFrame(df, geometry="geometry", crs=self._crs) + map_gdf[map_layer] = gpd.GeoDataFrame(df, geometry="geometry", crs=self._crs) else: - gdf = gpd.GeoDataFrame({"id": [], "geometry": []}, geometry="geometry", crs=self._crs) + map_gdf[map_layer] = gpd.GeoDataFrame( + {"id": [], "geometry": []}, geometry="geometry", crs=self._crs + ) + + # Optionally remap string IDs to integers + if self._remap_ids: + _map_ids_to_integer(map_gdf) + + # Write each map layer to the GPKG file + for map_layer, gdf in map_gdf.items(): gdf.to_file(self._map_file, driver="GPKG", layer=map_layer.serialize()) + # Write map metadata as a separate layer metadata_df = gpd.GeoDataFrame(pd.DataFrame([self._map_metadata.to_dict()])) metadata_df.to_file(self._map_file, driver="GPKG", layer="map_metadata") @@ -158,7 +172,7 @@ def _write_surface_layer(self, layer: MapLayer, surface_object: AbstractSurfaceM """ self._assert_initialized() self._map_data[layer]["id"].append(surface_object.object_id) - # NOTE: if outline outline has a z-coordinate, we store it, an otherwise infer from the geometry + # NOTE: if the outline has a z-coordinate, we store it, otherwise we infer from the outline from the polygon if isinstance(surface_object.outline, Polyline3D): self._map_data[layer]["outline"].append(surface_object.outline.linestring) self._map_data[layer]["geometry"].append(surface_object.shapely_polygon) @@ -172,3 +186,47 @@ def _write_line_layer(self, layer: MapLayer, line_object: AbstractLineMapObject) self._assert_initialized() self._map_data[layer]["id"].append(line_object.object_id) self._map_data[layer]["geometry"].append(line_object.shapely_linestring) + + +def _map_ids_to_integer( + map_dfs: Dict[MapLayer, gpd.GeoDataFrame], +) -> None: + + # initialize id mappings + lane_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.LANE]["id"]) + walkway_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.WALKWAY]["id"]) + carpark_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.CARPARK]["id"]) + generic_drivable_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.GENERIC_DRIVABLE]["id"]) + lane_group_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.LANE_GROUP]["id"]) + + # Adjust cross reference in map_dfs[MapLayer.LANE] and map_dfs[MapLayer.LANE_GROUP] + map_dfs[MapLayer.LANE]["lane_group_id"] = map_dfs[MapLayer.LANE]["lane_group_id"].map( + lane_group_id_mapping.str_to_int + ) + map_dfs[MapLayer.LANE_GROUP]["lane_ids"] = map_dfs[MapLayer.LANE_GROUP]["lane_ids"].apply( + lambda x: lane_id_mapping.map_list(x) + ) + + # Adjust predecessor/successor in map_dfs[MapLayer.LANE] and map_dfs[MapLayer.LANE_GROUP] + for column in ["predecessor_ids", "successor_ids"]: + map_dfs[MapLayer.LANE][column] = map_dfs[MapLayer.LANE][column].apply(lambda x: lane_id_mapping.map_list(x)) + map_dfs[MapLayer.LANE_GROUP][column] = map_dfs[MapLayer.LANE_GROUP][column].apply( + lambda x: lane_group_id_mapping.map_list(x) + ) + + for column in ["left_lane_id", "right_lane_id"]: + map_dfs[MapLayer.LANE][column] = map_dfs[MapLayer.LANE][column].apply( + lambda x: str(lane_id_mapping.str_to_int[x]) if pd.notna(x) and x is not None else x + ) + + map_dfs[MapLayer.LANE]["id"] = map_dfs[MapLayer.LANE]["id"].map(lane_id_mapping.str_to_int) + map_dfs[MapLayer.WALKWAY]["id"] = map_dfs[MapLayer.WALKWAY]["id"].map(walkway_id_mapping.str_to_int) + map_dfs[MapLayer.CARPARK]["id"] = map_dfs[MapLayer.CARPARK]["id"].map(carpark_id_mapping.str_to_int) + map_dfs[MapLayer.GENERIC_DRIVABLE]["id"] = map_dfs[MapLayer.GENERIC_DRIVABLE]["id"].map( + generic_drivable_id_mapping.str_to_int + ) + map_dfs[MapLayer.LANE_GROUP]["id"] = map_dfs[MapLayer.LANE_GROUP]["id"].map(lane_group_id_mapping.str_to_int) + + map_dfs[MapLayer.INTERSECTION]["lane_group_ids"] = map_dfs[MapLayer.INTERSECTION]["lane_group_ids"].apply( + lambda x: lane_group_id_mapping.map_list(x) + ) diff --git a/src/py123d/conversion/utils/map_utils/opendrive/utils/id_mapping.py b/src/py123d/conversion/map_writer/utils/gpkg_utils.py similarity index 62% rename from src/py123d/conversion/utils/map_utils/opendrive/utils/id_mapping.py rename to src/py123d/conversion/map_writer/utils/gpkg_utils.py index 78c0572f..6ad4e559 100644 --- a/src/py123d/conversion/utils/map_utils/opendrive/utils/id_mapping.py +++ b/src/py123d/conversion/map_writer/utils/gpkg_utils.py @@ -24,3 +24,18 @@ def map_list(self, id_list: Optional[List[str]]) -> pd.Series: if id_list is None: return [] return [self.str_to_int.get(id_str, -1) for id_str in id_list] + + +class IncrementalIntIDMapping: + + def __init__(self): + self.str_to_int: Dict[str, int] = {} + self.int_to_str: Dict[int, str] = {} + self.next_id: int = 0 + + def get_int_id(self, str_id: str) -> int: + if str_id not in self.str_to_int: + self.str_to_int[str_id] = self.next_id + self.int_to_str[self.next_id] = str_id + self.next_id += 1 + return self.str_to_int[str_id] diff --git a/src/py123d/conversion/utils/map_utils/opendrive/__init__ copy.py b/src/py123d/conversion/utils/map_utils/opendrive/__init__ copy.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py b/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py index c904efe2..eb90efc0 100644 --- a/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py +++ b/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py @@ -1,40 +1,45 @@ import logging -import os -import warnings from pathlib import Path from typing import Dict, Final, List -import geopandas as gpd -import numpy as np -import pandas as pd import shapely from shapely.ops import polygonize, unary_union +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.utils.map_utils.opendrive.parser.opendrive import Junction, OpenDrive from py123d.conversion.utils.map_utils.opendrive.utils.collection import collect_element_helpers -from py123d.conversion.utils.map_utils.opendrive.utils.id_mapping import IntIDMapping from py123d.conversion.utils.map_utils.opendrive.utils.lane_helper import ( OpenDriveLaneGroupHelper, OpenDriveLaneHelper, ) -from py123d.conversion.utils.map_utils.opendrive.utils.objects_helper import ( - OpenDriveObjectHelper, -) +from py123d.conversion.utils.map_utils.opendrive.utils.objects_helper import OpenDriveObjectHelper from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length -from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import get_road_edges_3d_from_gdf -from py123d.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType +from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import get_road_edges_3d_from_drivable_surfaces + +from py123d.datatypes.maps.cache.cache_map_objects import ( + CacheCarpark, + CacheCrosswalk, + CacheGenericDrivable, + CacheIntersection, + CacheLane, + CacheLaneGroup, + CacheRoadEdge, + CacheRoadLine, + CacheWalkway, +) +from py123d.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType +from py123d.geometry.polyline import Polyline3D logger = logging.getLogger(__name__) -PY123D_MAPS_ROOT = Path(os.environ.get("PY123D_MAPS_ROOT")) MAX_ROAD_EDGE_LENGTH: Final[float] = 100.0 # [m] -def convert_from_xodr( +def convert_xodr_map( xordr_file: Path, - location: str, - interpolation_step_size: float, - connection_distance_threshold: float, + map_writer: AbstractMapWriter, + interpolation_step_size: float = 0.5, + connection_distance_threshold: float = 0.1, ) -> None: opendrive = OpenDrive.parse_from_file(xordr_file) @@ -43,216 +48,139 @@ def convert_from_xodr( opendrive, interpolation_step_size, connection_distance_threshold ) - # Collect data frames and store - dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {} - dataframes[MapLayer.LANE] = _extract_lane_dataframe(lane_group_helper_dict) - dataframes[MapLayer.LANE_GROUP] = _extract_lane_group_dataframe(lane_group_helper_dict) - dataframes[MapLayer.WALKWAY] = _extract_walkways_dataframe(lane_helper_dict) - dataframes[MapLayer.CARPARK] = _extract_carpark_dataframe(lane_helper_dict) - dataframes[MapLayer.GENERIC_DRIVABLE] = _extract_generic_drivable_dataframe(lane_helper_dict) - dataframes[MapLayer.INTERSECTION] = _extract_intersections_dataframe(junction_dict, lane_group_helper_dict) - dataframes[MapLayer.CROSSWALK] = _extract_crosswalk_dataframe(object_helper_dict) - - _convert_ids_to_int( - dataframes[MapLayer.LANE], - dataframes[MapLayer.WALKWAY], - dataframes[MapLayer.CARPARK], - dataframes[MapLayer.GENERIC_DRIVABLE], - dataframes[MapLayer.LANE_GROUP], - dataframes[MapLayer.INTERSECTION], - dataframes[MapLayer.CROSSWALK], - ) - dataframes[MapLayer.ROAD_EDGE] = _extract_road_edge_df( - dataframes[MapLayer.LANE], - dataframes[MapLayer.CARPARK], - dataframes[MapLayer.GENERIC_DRIVABLE], - dataframes[MapLayer.LANE_GROUP], + # Collect data frames and store (needed for road edge/line extraction) + lanes = _extract_and_write_lanes(lane_group_helper_dict, map_writer) + lane_groups = _extract_and_write_lane_groups(lane_group_helper_dict, map_writer) + car_parks = _extract_and_write_carparks(lane_helper_dict, map_writer) + generic_drivables = _extract_and_write_generic_drivables(lane_helper_dict, map_writer) + + # Write other map elements + _write_walkways(lane_helper_dict, map_writer) + _write_intersections(junction_dict, lane_group_helper_dict, map_writer) + _write_crosswalks(object_helper_dict, map_writer) + + # Extract polyline elements that are inferred of other road surfaces. + _write_road_lines(lanes, lane_groups, map_writer) + _write_road_edges( + lanes=lanes, + lane_groups=lane_groups, + car_parks=car_parks, + generic_drivables=generic_drivables, + map_writer=map_writer, ) - dataframes[MapLayer.ROAD_LINE] = _extract_road_line_df( - dataframes[MapLayer.LANE], - dataframes[MapLayer.LANE_GROUP], - ) - map_file_name = PY123D_MAPS_ROOT / f"{location}.gpkg" - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", message="'crs' was not provided") - for layer, gdf in dataframes.items(): - gdf.to_file(map_file_name, layer=layer.serialize(), driver="GPKG", mode="a") - - -def _extract_lane_dataframe(lane_group_helper_dict: Dict[str, OpenDriveLaneGroupHelper]) -> gpd.GeoDataFrame: - - ids = [] - lane_group_ids = [] - speed_limits_mps = [] - predecessor_ids = [] - successor_ids = [] - left_boundaries = [] - right_boundaries = [] - left_lane_ids = [] - right_lane_ids = [] - baseline_paths = [] - geometries = [] + +def _extract_and_write_lanes( + lane_group_helper_dict: Dict[str, OpenDriveLaneGroupHelper], + map_writer: AbstractMapWriter, +) -> List[CacheLane]: + + lanes: List[CacheLane] = [] for lane_group_helper in lane_group_helper_dict.values(): lane_group_id = lane_group_helper.lane_group_id lane_helpers = lane_group_helper.lane_helpers num_lanes = len(lane_helpers) # NOTE: Lanes are going left to right, ie. inner to outer for lane_idx, lane_helper in enumerate(lane_helpers): - ids.append(lane_helper.lane_id) - lane_group_ids.append(lane_group_id) - speed_limits_mps.append(lane_helper.speed_limit_mps) - predecessor_ids.append(lane_helper.predecessor_lane_ids) - successor_ids.append(lane_helper.successor_lane_ids) - left_boundaries.append(shapely.LineString(lane_helper.inner_polyline_3d)) - right_boundaries.append(shapely.LineString(lane_helper.outer_polyline_3d)) - baseline_paths.append(shapely.LineString(lane_helper.center_polyline_3d)) - geometries.append(lane_helper.shapely_polygon) left_lane_id = lane_helpers[lane_idx - 1].lane_id if lane_idx > 0 else None right_lane_id = lane_helpers[lane_idx + 1].lane_id if lane_idx < num_lanes - 1 else None - left_lane_ids.append(left_lane_id) - right_lane_ids.append(right_lane_id) - - data = pd.DataFrame( - { - "id": ids, - "lane_group_id": lane_group_ids, - "speed_limit_mps": speed_limits_mps, - "predecessor_ids": predecessor_ids, - "successor_ids": successor_ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - "left_lane_id": left_lane_ids, - "right_lane_id": right_lane_ids, - "baseline_path": baseline_paths, - } - ) - return gpd.GeoDataFrame(data, geometry=geometries) - - -def _extract_lane_group_dataframe(lane_group_helper_dict: Dict[str, OpenDriveLaneGroupHelper]) -> gpd.GeoDataFrame: - - ids = [] - lane_ids = [] - predecessor_lane_group_ids = [] - successor_lane_group_ids = [] - intersection_ids = [] - left_boundaries = [] - right_boundaries = [] - geometries = [] - + lane = CacheLane( + object_id=lane_helper.lane_id, + lane_group_id=lane_group_id, + left_boundary=lane_helper.inner_polyline_3d, + right_boundary=lane_helper.outer_polyline_3d, + centerline=lane_helper.center_polyline_3d, + left_lane_id=left_lane_id, + right_lane_id=right_lane_id, + predecessor_ids=lane_helper.predecessor_lane_ids, + successor_ids=lane_helper.successor_lane_ids, + speed_limit_mps=lane_helper.speed_limit_mps, + outline=lane_helper.outline_polyline_3d, + geometry=None, + ) + lanes.append(lane) + map_writer.write_lane(lane) + + return lanes + + +def _extract_and_write_lane_groups( + lane_group_helper_dict: Dict[str, OpenDriveLaneGroupHelper], map_writer: AbstractMapWriter +) -> List[CacheLaneGroup]: + + lane_groups: List[CacheLaneGroup] = [] for lane_group_helper in lane_group_helper_dict.values(): lane_group_helper: OpenDriveLaneGroupHelper - ids.append(lane_group_helper.lane_group_id) - lane_ids.append([lane_helper.lane_id for lane_helper in lane_group_helper.lane_helpers]) - predecessor_lane_group_ids.append(lane_group_helper.predecessor_lane_group_ids) - successor_lane_group_ids.append(lane_group_helper.successor_lane_group_ids) - intersection_ids.append(lane_group_helper.junction_id) - left_boundaries.append(shapely.LineString(lane_group_helper.inner_polyline_3d)) - right_boundaries.append(shapely.LineString(lane_group_helper.outer_polyline_3d)) - geometries.append(lane_group_helper.shapely_polygon) - - data = pd.DataFrame( - { - "id": ids, - "lane_ids": lane_ids, - "predecessor_ids": predecessor_lane_group_ids, - "successor_ids": successor_lane_group_ids, - "intersection_id": intersection_ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - } - ) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - - return gdf - + lane_group = CacheLaneGroup( + object_id=lane_group_helper.lane_group_id, + lane_ids=[lane_helper.lane_id for lane_helper in lane_group_helper.lane_helpers], + left_boundary=lane_group_helper.inner_polyline_3d, + right_boundary=lane_group_helper.outer_polyline_3d, + intersection_id=lane_group_helper.junction_id, + predecessor_ids=lane_group_helper.predecessor_lane_group_ids, + successor_ids=lane_group_helper.successor_lane_group_ids, + outline=lane_group_helper.outline_polyline_3d, + geometry=None, + ) + lane_groups.append(lane_group) + map_writer.write_lane_group(lane_group) -def _extract_walkways_dataframe(lane_helper_dict: Dict[str, OpenDriveLaneHelper]) -> gpd.GeoDataFrame: + return lane_groups - ids = [] - left_boundaries = [] - right_boundaries = [] - outlines = [] - geometries = [] +def _write_walkways(lane_helper_dict: Dict[str, OpenDriveLaneHelper], map_writer: AbstractMapWriter) -> None: for lane_helper in lane_helper_dict.values(): if lane_helper.type == "sidewalk": - ids.append(lane_helper.lane_id) - left_boundaries.append(shapely.LineString(lane_helper.inner_polyline_3d)) - right_boundaries.append(shapely.LineString(lane_helper.outer_polyline_3d)) - outlines.append(shapely.LineString(lane_helper.outline_polyline_3d)) - geometries.append(lane_helper.shapely_polygon) - - data = pd.DataFrame( - { - "id": ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - "outline": outlines, - } - ) - return gpd.GeoDataFrame(data, geometry=geometries) + map_writer.write_walkway( + CacheWalkway( + object_id=lane_helper.lane_id, + outline=lane_helper.outline_polyline_3d, + geometry=None, + ) + ) -def _extract_carpark_dataframe(lane_helper_dict: Dict[str, OpenDriveLaneHelper]) -> gpd.GeoDataFrame: - - ids = [] - left_boundaries = [] - right_boundaries = [] - outlines = [] - geometries = [] +def _extract_and_write_carparks( + lane_helper_dict: Dict[str, OpenDriveLaneHelper], map_writer: AbstractMapWriter +) -> List[CacheCarpark]: + carparks: List[CacheCarpark] = [] for lane_helper in lane_helper_dict.values(): if lane_helper.type == "parking": - ids.append(lane_helper.lane_id) - left_boundaries.append(shapely.LineString(lane_helper.inner_polyline_3d)) - right_boundaries.append(shapely.LineString(lane_helper.outer_polyline_3d)) - outlines.append(shapely.LineString(lane_helper.outline_polyline_3d)) - geometries.append(lane_helper.shapely_polygon) - - data = pd.DataFrame( - { - "id": ids, - "left_boundary": left_boundaries, - "right_boundary": right_boundaries, - "outline": outlines, - } - ) - return gpd.GeoDataFrame(data, geometry=geometries) + carpark = CacheCarpark( + object_id=lane_helper.lane_id, + outline=lane_helper.outline_polyline_3d, + geometry=None, + ) + carparks.append(carpark) + map_writer.write_carpark(carpark) + return carparks -def _extract_generic_drivable_dataframe(lane_helper_dict: Dict[str, OpenDriveLaneHelper]) -> gpd.GeoDataFrame: - ids = [] - left_boundaries = [] - right_boundaries = [] - outlines = [] - geometries = [] +def _extract_and_write_generic_drivables( + lane_helper_dict: Dict[str, OpenDriveLaneHelper], map_writer: AbstractMapWriter +) -> List[CacheGenericDrivable]: + generic_drivables: List[CacheGenericDrivable] = [] for lane_helper in lane_helper_dict.values(): if lane_helper.type in ["none", "border", "bidirectional"]: - ids.append(lane_helper.lane_id) - left_boundaries.append(shapely.LineString(lane_helper.inner_polyline_3d)) - right_boundaries.append(shapely.LineString(lane_helper.outer_polyline_3d)) - outlines.append(shapely.LineString(lane_helper.outline_polyline_3d)) - geometries.append(lane_helper.shapely_polygon) - - data = pd.DataFrame( - { - "id": ids, - "left_boundary": left_boundaries, - "right_boundary": left_boundaries, - "outline": outlines, - } - ) - return gpd.GeoDataFrame(data, geometry=geometries) + generic_drivable = CacheGenericDrivable( + object_id=lane_helper.lane_id, + outline=lane_helper.outline_polyline_3d, + geometry=None, + ) + generic_drivables.append(generic_drivable) + map_writer.write_generic_drivable(generic_drivable) + return generic_drivables -def _extract_intersections_dataframe( +def _write_intersections( junction_dict: Dict[str, Junction], lane_group_helper_dict: Dict[str, OpenDriveLaneGroupHelper], -) -> gpd.GeoDataFrame: + map_writer: AbstractMapWriter, +) -> None: + def _find_lane_group_helpers_with_junction_id(junction_id: int) -> List[OpenDriveLaneGroupHelper]: return [ lane_group_helper @@ -260,9 +188,6 @@ def _find_lane_group_helpers_with_junction_id(junction_id: int) -> List[OpenDriv if lane_group_helper.junction_id == junction_id ] - ids = [] - lane_group_ids = [] - geometries = [] for junction in junction_dict.values(): lane_group_helpers = _find_lane_group_helpers_with_junction_id(junction.id) lane_group_ids_ = [lane_group_helper.lane_group_id for lane_group_helper in lane_group_helpers] @@ -270,145 +195,124 @@ def _find_lane_group_helpers_with_junction_id(junction_id: int) -> List[OpenDriv logger.debug(f"Skipping Junction {junction.id} without lane groups!") continue + # TODO @DanielDauner: Create a method that extracts 3D outlines of intersections. polygon = extract_exteriors_polygon(lane_group_helpers) - ids.append(junction.id) - lane_group_ids.append(lane_group_ids_) - geometries.append(polygon) - - data = pd.DataFrame({"id": ids, "lane_group_ids": lane_group_ids}) - return gpd.GeoDataFrame(data, geometry=geometries) + map_writer.write_intersection( + CacheIntersection( + object_id=junction.id, + lane_group_ids=lane_group_ids_, + outline=None, + geometry=polygon, + ) + ) -def _extract_crosswalk_dataframe(object_helper_dict: Dict[int, OpenDriveObjectHelper]) -> gpd.GeoDataFrame: - ids = [] - outlines = [] - geometries = [] +def _write_crosswalks(object_helper_dict: Dict[int, OpenDriveObjectHelper], map_writer: AbstractMapWriter) -> None: for object_helper in object_helper_dict.values(): - ids.append(object_helper.object_id) - outlines.append(shapely.LineString(object_helper.outline_3d)) - geometries.append(object_helper.shapely_polygon) - - data = pd.DataFrame({"id": ids, "outline": outlines}) - return gpd.GeoDataFrame(data, geometry=geometries) - - -def _convert_ids_to_int( - lane_df: gpd.GeoDataFrame, - walkways_df: gpd.GeoDataFrame, - carpark_df: gpd.GeoDataFrame, - generic_drivable_area_df: gpd.GeoDataFrame, - lane_group_df: gpd.GeoDataFrame, - intersections_df: gpd.GeoDataFrame, - crosswalk_df: gpd.GeoDataFrame, -) -> None: - - # NOTE: intersection and crosswalk ids are already integers - - # initialize id mappings - lane_id_mapping = IntIDMapping.from_series(lane_df["id"]) - walkway_id_mapping = IntIDMapping.from_series(walkways_df["id"]) - carpark_id_mapping = IntIDMapping.from_series(carpark_df["id"]) - generic_drivable_id_mapping = IntIDMapping.from_series(generic_drivable_area_df["id"]) - lane_group_id_mapping = IntIDMapping.from_series(lane_group_df["id"]) - - # Adjust cross reference in lane_df and lane_group_df - lane_df["lane_group_id"] = lane_df["lane_group_id"].map(lane_group_id_mapping.str_to_int) - lane_group_df["lane_ids"] = lane_group_df["lane_ids"].apply(lambda x: lane_id_mapping.map_list(x)) - - # Adjust predecessor/successor in lane_df and lane_group_df - for column in ["predecessor_ids", "successor_ids"]: - lane_df[column] = lane_df[column].apply(lambda x: lane_id_mapping.map_list(x)) - lane_group_df[column] = lane_group_df[column].apply(lambda x: lane_group_id_mapping.map_list(x)) - - for column in ["left_lane_id", "right_lane_id"]: - lane_df[column] = lane_df[column].apply( - lambda x: str(lane_id_mapping.str_to_int[x]) if pd.notna(x) and x is not None else x + map_writer.write_crosswalk( + CacheCrosswalk( + object_id=object_helper.object_id, + outline=object_helper.outline_polyline_3d, + geometry=None, + ) ) - lane_df["id"] = lane_df["id"].map(lane_id_mapping.str_to_int) - walkways_df["id"] = walkways_df["id"].map(walkway_id_mapping.str_to_int) - carpark_df["id"] = carpark_df["id"].map(carpark_id_mapping.str_to_int) - generic_drivable_area_df["id"] = generic_drivable_area_df["id"].map(generic_drivable_id_mapping.str_to_int) - lane_group_df["id"] = lane_group_df["id"].map(lane_group_id_mapping.str_to_int) - intersections_df["lane_group_ids"] = intersections_df["lane_group_ids"].apply( - lambda x: lane_group_id_mapping.map_list(x) - ) +def _write_road_lines(lanes: List[CacheLane], lane_groups: List[CacheLaneGroup], map_writer: AbstractMapWriter) -> None: + # NOTE @DanielDauner: This method of extracting road lines is very simplistic and needs improvement. + # The OpenDRIVE format provides lane boundary types that could be used here. + # Additionally, the logic of inferring road lines is somewhat flawed, e.g, assuming constant types/colors of lines. -def _extract_road_line_df( - lane_df: gpd.GeoDataFrame, - lane_group_df: gpd.GeoDataFrame, -) -> None: - - lane_group_on_intersection = { - lane_group_id: str(intersection_id) != "nan" - for lane_group_id, intersection_id in zip(lane_group_df.id.tolist(), lane_group_df.intersection_id.tolist()) + lane_group_on_intersection: Dict[str, bool] = { + lane_group.object_id: lane_group.intersection_id is not None for lane_group in lane_groups } - ids = [] - road_line_types = [] - geometries = [] + + ids: List[int] = [] + road_line_types: List[RoadLineType] = [] + polylines: List[Polyline3D] = [] running_id = 0 - for lane_row in lane_df.itertuples(): - on_intersection = lane_group_on_intersection.get(lane_row.lane_group_id, False) + for lane in lanes: + + on_intersection = lane_group_on_intersection.get(lane.lane_group_id, False) if on_intersection: # Skip road lines on intersections continue - if str(lane_row.right_lane_id) in ["nan", "None"]: + + if lane.right_lane_id is None: # This is a boundary lane, e.g. a border or sidewalk ids.append(running_id) - road_line_types.append(int(RoadLineType.SOLID_WHITE)) - geometries.append(lane_row.right_boundary) + road_line_types.append(RoadLineType.SOLID_WHITE) + polylines.append(lane.right_boundary) running_id += 1 else: # This is a regular lane ids.append(running_id) - road_line_types.append(int(RoadLineType.DASHED_WHITE)) - geometries.append(lane_row.right_boundary) + road_line_types.append(RoadLineType.DASHED_WHITE) + polylines.append(lane.right_boundary) running_id += 1 - if str(lane_row.left_lane_id) in ["nan", "None"]: + if lane.left_lane_id is None: # This is a boundary lane, e.g. a border or sidewalk ids.append(running_id) - road_line_types.append(int(RoadLineType.DASHED_WHITE)) - geometries.append(lane_row.left_boundary) + road_line_types.append(RoadLineType.DASHED_WHITE) + polylines.append(lane.left_boundary) running_id += 1 - data = pd.DataFrame({"id": ids, "road_line_type": road_line_types}) - return gpd.GeoDataFrame(data, geometry=geometries) + for object_id, road_line_type, polyline in zip(ids, road_line_types, polylines): + map_writer.write_road_line( + CacheRoadLine( + object_id=object_id, + road_line_type=road_line_type, + polyline=polyline, + ) + ) -def _extract_road_edge_df( - lane_df: gpd.GeoDataFrame, - carpark_df: gpd.GeoDataFrame, - generic_drivable_area_df: gpd.GeoDataFrame, - lane_group_df: gpd.GeoDataFrame, +def _write_road_edges( + lanes: List[CacheLane], + lane_groups: List[CacheLaneGroup], + car_parks: List[CacheCarpark], + generic_drivables: List[CacheGenericDrivable], + map_writer: AbstractMapWriter, ) -> None: - road_edges = get_road_edges_3d_from_gdf(lane_df, carpark_df, generic_drivable_area_df, lane_group_df) - road_edges = split_line_geometry_by_max_length(road_edges, MAX_ROAD_EDGE_LENGTH) - ids = np.arange(len(road_edges), dtype=np.int64).tolist() - # TODO @DanielDauner: Figure out if other types should/could be assigned here. - road_edge_types = [int(RoadEdgeType.ROAD_EDGE_BOUNDARY)] * len(road_edges) - geometries = road_edges - return gpd.GeoDataFrame(pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}), geometry=geometries) + road_edges_ = get_road_edges_3d_from_drivable_surfaces( + lanes=lanes, + lane_groups=lane_groups, + car_parks=car_parks, + generic_drivables=generic_drivables, + ) + road_edge_linestrings = split_line_geometry_by_max_length( + [road_edges.linestring for road_edges in road_edges_], MAX_ROAD_EDGE_LENGTH + ) + + running_id = 0 + for road_edge_linestring in road_edge_linestrings: + # TODO @DanielDauner: Figure out if other types should/could be assigned here. + map_writer.write_road_edge( + CacheRoadEdge( + object_id=running_id, + road_edge_type=RoadEdgeType.ROAD_EDGE_BOUNDARY, + polyline=Polyline3D.from_linestring(road_edge_linestring), + ) + ) + running_id += 1 -# TODO: move this somewhere else and improve def extract_exteriors_polygon(lane_group_helpers: List[OpenDriveLaneGroupHelper]) -> shapely.Polygon: + # TODO @DanielDauner: Needs improvement !!! + # Fails if the intersection has several non overlapping parts. + # Does not provide 3D outline, just 2D shapely polygon. + # Step 1: Extract all boundary line segments all_polygons = [] for lane_group_helper in lane_group_helpers: all_polygons.append(lane_group_helper.shapely_polygon) # Step 2: Merge all boundaries and extract the enclosed polygons - # try: merged_boundaries = unary_union(all_polygons) - # except Exception as e: - # warnings.warn(f"Topological error during polygon union: {e}") - # print([(helper.lane_group_id, poly.is_valid) for poly, helper in zip(all_polygons, lane_group_helpers)]) - # merged_boundaries = unary_union([poly for poly in all_polygons if poly.is_valid]) # Step 3: Generate polygons from the merged lines polygons = list(polygonize(merged_boundaries)) diff --git a/src/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py b/src/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py index 5401859d..03f2cd0d 100644 --- a/src/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py +++ b/src/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py @@ -15,6 +15,7 @@ lane_group_id_from_lane_id, ) from py123d.geometry import StateSE2Index +from py123d.geometry.polyline import Polyline3D, PolylineSE2 from py123d.geometry.utils.units import kmph_to_mps, mph_to_mps @@ -66,7 +67,7 @@ def _lane_section_end_mask(self) -> npt.NDArray[np.float64]: return lane_section_end_mask @cached_property - def inner_polyline_se2(self) -> npt.NDArray[np.float64]: + def inner_polyline_se2(self) -> PolylineSE2: inner_polyline = np.array( [ self.inner_boundary.interpolate_se2(self.s_inner_offset + s - self.s_range[0], lane_section_end=end) @@ -74,10 +75,11 @@ def inner_polyline_se2(self) -> npt.NDArray[np.float64]: ], dtype=np.float64, ) - return np.flip(inner_polyline, axis=0) if self.id > 0 else inner_polyline + polyline_array = np.flip(inner_polyline, axis=0) if self.id > 0 else inner_polyline + return PolylineSE2.from_array(polyline_array) @cached_property - def inner_polyline_3d(self) -> npt.NDArray[np.float64]: + def inner_polyline_3d(self) -> Polyline3D: inner_polyline = np.array( [ self.inner_boundary.interpolate_3d(self.s_inner_offset + s - self.s_range[0], lane_section_end=end) @@ -85,10 +87,11 @@ def inner_polyline_3d(self) -> npt.NDArray[np.float64]: ], dtype=np.float64, ) - return np.flip(inner_polyline, axis=0) if self.id > 0 else inner_polyline + polyline_array = np.flip(inner_polyline, axis=0) if self.id > 0 else inner_polyline + return Polyline3D.from_array(polyline_array) @cached_property - def outer_polyline_se2(self) -> npt.NDArray[np.float64]: + def outer_polyline_se2(self) -> PolylineSE2: outer_polyline = np.array( [ self.outer_boundary.interpolate_se2(s - self.s_range[0], lane_section_end=end) @@ -96,10 +99,11 @@ def outer_polyline_se2(self) -> npt.NDArray[np.float64]: ], dtype=np.float64, ) - return np.flip(outer_polyline, axis=0) if self.id > 0 else outer_polyline + polyline_array = np.flip(outer_polyline, axis=0) if self.id > 0 else outer_polyline + return PolylineSE2.from_array(polyline_array) @cached_property - def outer_polyline_3d(self) -> npt.NDArray[np.float64]: + def outer_polyline_3d(self) -> Polyline3D: outer_polyline = np.array( [ self.outer_boundary.interpolate_3d(s - self.s_range[0], lane_section_end=end) @@ -107,32 +111,54 @@ def outer_polyline_3d(self) -> npt.NDArray[np.float64]: ], dtype=np.float64, ) - return np.flip(outer_polyline, axis=0) if self.id > 0 else outer_polyline + polyline_array = np.flip(outer_polyline, axis=0) if self.id > 0 else outer_polyline + return Polyline3D.from_array(polyline_array) @property - def center_polyline_se2(self) -> npt.NDArray[np.float64]: - return np.concatenate([self.inner_polyline_se2[None, ...], self.outer_polyline_se2[None, ...]], axis=0).mean( - axis=0 + def center_polyline_se2(self) -> PolylineSE2: + return PolylineSE2.from_array( + np.concatenate( + [ + self.inner_polyline_se2.array[None, ...], + self.outer_polyline_se2.array[None, ...], + ], + axis=0, + ).mean(axis=0) ) @property - def center_polyline_3d(self) -> npt.NDArray[np.float64]: - return np.concatenate([self.outer_polyline_3d[None, ...], self.inner_polyline_3d[None, ...]], axis=0).mean( - axis=0 + def center_polyline_3d(self) -> Polyline3D: + return Polyline3D.from_array( + np.concatenate( + [ + self.outer_polyline_3d.array[None, ...], + self.inner_polyline_3d.array[None, ...], + ], + axis=0, + ).mean(axis=0) ) @property - def outline_polyline_3d(self) -> npt.NDArray[np.float64]: - inner_polyline = self.inner_polyline_3d[::-1] - outer_polyline = self.outer_polyline_3d - return np.concatenate([inner_polyline, outer_polyline, inner_polyline[None, 0]], axis=0, dtype=np.float64) + def outline_polyline_3d(self) -> Polyline3D: + inner_polyline = self.inner_polyline_3d.array[::-1] + outer_polyline = self.outer_polyline_3d.array + return Polyline3D.from_array( + np.concatenate( + [ + inner_polyline, + outer_polyline, + inner_polyline[None, 0], + ], + axis=0, + dtype=np.float64, + ) + ) @property def shapely_polygon(self) -> shapely.Polygon: inner_polyline = self.inner_polyline_se2[..., StateSE2Index.XY][::-1] outer_polyline = self.outer_polyline_se2[..., StateSE2Index.XY] polygon_exterior = np.concatenate([inner_polyline, outer_polyline], axis=0, dtype=np.float64) - return shapely.Polygon(polygon_exterior) @@ -172,21 +198,37 @@ def _get_outer_lane_helper(self) -> OpenDriveLaneHelper: return self.lane_helpers[outer_lane_helper_idx] @cached_property - def inner_polyline_se2(self): + def inner_polyline_se2(self) -> PolylineSE2: return self._get_inner_lane_helper().inner_polyline_se2 @cached_property - def outer_polyline_se2(self): + def outer_polyline_se2(self) -> PolylineSE2: return self._get_outer_lane_helper().outer_polyline_se2 @cached_property - def inner_polyline_3d(self): + def inner_polyline_3d(self) -> Polyline3D: return self._get_inner_lane_helper().inner_polyline_3d @cached_property - def outer_polyline_3d(self): + def outer_polyline_3d(self) -> Polyline3D: return self._get_outer_lane_helper().outer_polyline_3d + @property + def outline_polyline_3d(self) -> Polyline3D: + inner_polyline = self.inner_polyline_3d.array[::-1] + outer_polyline = self.outer_polyline_3d.array + return Polyline3D.from_array( + np.concatenate( + [ + inner_polyline, + outer_polyline, + inner_polyline[None, 0], + ], + axis=0, + dtype=np.float64, + ) + ) + @property def shapely_polygon(self) -> shapely.Polygon: inner_polyline = self.inner_polyline_se2[..., StateSE2Index.XY][::-1] diff --git a/src/py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py b/src/py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py index 72104899..2e0117a4 100644 --- a/src/py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py +++ b/src/py123d/conversion/utils/map_utils/opendrive/utils/objects_helper.py @@ -7,12 +7,12 @@ from py123d.conversion.utils.map_utils.opendrive.parser.objects import Object from py123d.conversion.utils.map_utils.opendrive.parser.reference import ReferenceLine -from py123d.geometry import Point2D, Point3D, Point3DIndex, StateSE2 -from py123d.geometry.transform.tranform_2d import translate_along_yaw +from py123d.geometry import Point3D, Point3DIndex, StateSE2, Vector2D +from py123d.geometry.geometry_index import StateSE2Index +from py123d.geometry.polyline import Polyline3D +from py123d.geometry.transform.transform_se2 import translate_se2_along_body_frame from py123d.geometry.utils.rotation_utils import normalize_angle -# TODO: make naming consistent with group_collections.py - @dataclass class OpenDriveObjectHelper: @@ -24,6 +24,10 @@ def __post_init__(self) -> None: assert self.outline_3d.ndim == 2 assert self.outline_3d.shape[1] == len(Point3DIndex) + @property + def outline_polyline_3d(self) -> Polyline3D: + return Polyline3D.from_array(self.outline_3d) + @property def shapely_polygon(self) -> shapely.Polygon: return shapely.geometry.Polygon(self.outline_3d[:, Point3DIndex.XY]) @@ -39,23 +43,24 @@ def get_object_helper(object: Object, reference_line: ReferenceLine) -> OpenDriv object_3d: Point3D = Point3D.from_array(reference_line.interpolate_3d(s=object.s, t=object.t)) # Adjust yaw angle from object data - object_se2.yaw = normalize_angle(object_se2.yaw + object.hdg) + # TODO: Consider adding setters to StateSE2 to make this cleaner + object_se2._array[StateSE2Index.YAW] = normalize_angle(object_se2.yaw + object.hdg) if len(object.outline) == 0: outline_3d = np.zeros((4, len(Point3DIndex)), dtype=np.float64) # Fill XY - outline_3d[0, Point3DIndex.XY] = translate_along_yaw( - object_se2, Point2D(object.length / 2.0, object.width / 2.0) + outline_3d[0, Point3DIndex.XY] = translate_se2_along_body_frame( + object_se2, Vector2D(object.length / 2.0, object.width / 2.0) ).point_2d.array - outline_3d[1, Point3DIndex.XY] = translate_along_yaw( - object_se2, Point2D(object.length / 2.0, -object.width / 2.0) + outline_3d[1, Point3DIndex.XY] = translate_se2_along_body_frame( + object_se2, Vector2D(object.length / 2.0, -object.width / 2.0) ).point_2d.array - outline_3d[2, Point3DIndex.XY] = translate_along_yaw( - object_se2, Point2D(-object.length / 2.0, -object.width / 2.0) + outline_3d[2, Point3DIndex.XY] = translate_se2_along_body_frame( + object_se2, Vector2D(-object.length / 2.0, -object.width / 2.0) ).point_2d.array - outline_3d[3, Point3DIndex.XY] = translate_along_yaw( - object_se2, Point2D(-object.length / 2.0, object.width / 2.0) + outline_3d[3, Point3DIndex.XY] = translate_se2_along_body_frame( + object_se2, Vector2D(-object.length / 2.0, object.width / 2.0) ).point_2d.array # Fill Z @@ -66,8 +71,8 @@ def get_object_helper(object: Object, reference_line: ReferenceLine) -> OpenDriv assert len(object.outline) > 3, f"Object outline must have at least 3 corners, got {len(object.outline)}" outline_3d = np.zeros((len(object.outline), len(Point3DIndex)), dtype=np.float64) for corner_idx, corner_local in enumerate(object.outline): - outline_3d[corner_idx, Point3DIndex.XY] = translate_along_yaw( - object_se2, Point2D(corner_local.u, corner_local.v) + outline_3d[corner_idx, Point3DIndex.XY] = translate_se2_along_body_frame( + object_se2, Vector2D(corner_local.u, corner_local.v) ).point_2d.array outline_3d[corner_idx, Point3DIndex.Z] = object_3d.z + corner_local.z object_helper = OpenDriveObjectHelper(object_id=object.id, outline_3d=outline_3d) diff --git a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py index f0eb580d..3eb87d0c 100644 --- a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py +++ b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py @@ -4,6 +4,7 @@ import shapely from shapely import LinearRing, LineString, Polygon, union_all + ROAD_EDGE_BUFFER: Final[float] = 0.05 @@ -34,7 +35,7 @@ def _polygon_to_linear_rings(polygon: Polygon) -> List[LinearRing]: def split_line_geometry_by_max_length( - geometries: List[Union[LineString, LinearRing]], + geometries: Union[LineString, LinearRing, List[Union[LineString, LinearRing]]], max_length_meters: float, ) -> List[LineString]: # TODO: move somewhere more appropriate or implement in Polyline2D, PolylineSE2, etc. diff --git a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py index 837abc80..990bd0b3 100644 --- a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py +++ b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py @@ -2,85 +2,109 @@ from collections import defaultdict from typing import Dict, List, Set -import geopandas as gpd -import networkx as nx import numpy as np import numpy.typing as npt import shapely -from shapely.geometry import LineString +import shapely.geometry as geom +import networkx as nx from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import get_road_edge_linear_rings +from py123d.datatypes.maps.abstract_map_objects import ( + AbstractCarpark, + AbstractGenericDrivable, + AbstractLane, + AbstractLaneGroup, + AbstractSurfaceMapObject, + MapObjectIDType, +) from py123d.geometry import Point3DIndex from py123d.geometry.occupancy_map import OccupancyMap2D +from py123d.geometry.polyline import Polyline3D logger = logging.getLogger(__name__) -def get_road_edges_3d_from_gdf( - lane_df: gpd.GeoDataFrame, - carpark_df: gpd.GeoDataFrame, - generic_drivable_area_df: gpd.GeoDataFrame, - lane_group_df: gpd.GeoDataFrame, -) -> List[LineString]: +def get_road_edges_3d_from_drivable_surfaces( + lanes: List[AbstractLane], + lane_groups: List[AbstractLaneGroup], + car_parks: List[AbstractCarpark], + generic_drivables: List[AbstractGenericDrivable], +) -> List[Polyline3D]: + """Generates 3D road edges from drivable surfaces, i.e., lane groups, car parks, and generic drivables. + This method merges polygons in 2D and lifts them to 3D using the boundaries/outlines of elements. + Conflicting lane groups (e.g., bridges) are merged/lifted separately to ensure correct Z-values. + + :param lanes: A list of lanes in the map. + :param lane_groups: A list of lane groups in the map. + :param car_parks: A list of car parks in the map. + :param generic_drivables: A list of generic drivable areas in the map. + :return: A list of 3D interpolatable polylines representing the road edges. + """ # 1. Find conflicting lane groups, e.g. groups of lanes that overlap in 2D but have different Z-values (bridges) - conflicting_lane_groups = _get_conflicting_lane_groups(lane_group_df, lane_df) + conflicting_lane_groups = _get_conflicting_lane_groups(lane_groups, lanes) # 2. Extract road edges in 2D (including conflicting lane groups) - drivable_polygons = ( - lane_group_df.geometry.tolist() + carpark_df.geometry.tolist() + generic_drivable_area_df.geometry.tolist() - ) + drivable_polygons: List[shapely.Polygon] = [] + for map_surface in lane_groups + car_parks + generic_drivables: + map_surface: AbstractSurfaceMapObject + drivable_polygons.append(map_surface.shapely_polygon) road_edges_2d = get_road_edge_linear_rings(drivable_polygons) # 3. Collect 3D boundaries of non-conflicting lane groups and other drivable areas - non_conflicting_boundaries: List[LineString] = [] - for lane_group_id, lane_group_helper in lane_group_df.iterrows(): + non_conflicting_boundaries: List[Polyline3D] = [] + for lane_group in lane_groups: + lane_group_id = lane_group.object_id if lane_group_id not in conflicting_lane_groups.keys(): - non_conflicting_boundaries.append(lane_group_helper["left_boundary"]) - non_conflicting_boundaries.append(lane_group_helper["right_boundary"]) - for outline in carpark_df.outline.tolist() + generic_drivable_area_df.outline.tolist(): - non_conflicting_boundaries.append(outline) + non_conflicting_boundaries.append(lane_group.left_boundary) + non_conflicting_boundaries.append(lane_group.right_boundary) + for drivable_surface in car_parks + generic_drivables: + non_conflicting_boundaries.append(drivable_surface.outline) # 4. Lift road edges to 3D using the boundaries of non-conflicting elements non_conflicting_road_edges = lift_road_edges_to_3d(road_edges_2d, non_conflicting_boundaries) # 5. Add road edges from conflicting lane groups - resolved_road_edges = _resolve_conflicting_lane_groups(conflicting_lane_groups, lane_group_df) + resolved_road_edges = _resolve_conflicting_lane_groups(conflicting_lane_groups, lane_groups) all_road_edges = non_conflicting_road_edges + resolved_road_edges return all_road_edges -def _get_conflicting_lane_groups(lane_group_df: gpd.GeoDataFrame, lane_df: gpd.GeoDataFrame) -> Dict[int, List[int]]: - """ - Even more optimized version using vectorized operations where possible. +def _get_conflicting_lane_groups( + lane_groups: List[AbstractLaneGroup], lanes: List[AbstractLane], z_threshold: float = 5.0 +) -> Dict[int, List[int]]: + """Identifies conflicting lane groups based on their 2D footprints and Z-values. + The z-values are inferred from the centerlines of the lanes within each lane group. + + :param lane_groups: List of all lane groups in the map. + :param lanes: List of all lanes in the map. + :param z_threshold: Z-value threshold over which a 2D overlap is considered a conflict. + :return: A dictionary mapping lane group IDs to conflicting lane IDs. """ - Z_THRESHOLD = 5.0 # [m] Z-value threshold for conflict detection - # Convert to regular dictionaries for faster access - lane_group_dict = lane_group_df.set_index("id").to_dict("index") - lane_baseline_dict = dict(zip(lane_df.id.values, lane_df.baseline_path.values)) + # Convert to regular dictionaries for simpler access + lane_group_dict: Dict[MapObjectIDType, AbstractLaneGroup] = { + lane_group.object_id: lane_group for lane_group in lane_groups + } + lane_centerline_dict: Dict[MapObjectIDType, Polyline3D] = {lane.object_id: lane.centerline for lane in lanes} # Pre-compute all centerlines - centerlines_cache = {} - polygons = [] - ids = [] - - for lane_group_id, data in lane_group_dict.items(): - geometry = data["geometry"] - lane_ids = data["lane_ids"] + centerlines_cache: Dict[MapObjectIDType, npt.NDArray[np.float64]] = {} + polygons: List[geom.Polygon] = [] + ids: List[MapObjectIDType] = [] - # Vectorized centerline computation - centerlines = [np.array(lane_baseline_dict[lane_id].coords, dtype=np.float64) for lane_id in lane_ids] - centerlines_3d = np.concatenate(centerlines, axis=0) + for lane_group_id, lane_group in lane_group_dict.items(): + centerlines = [lane_centerline_dict[lane_id].array for lane_id in lane_group.lane_ids] + centerlines_3d_array = np.concatenate(centerlines, axis=0) - centerlines_cache[lane_group_id] = centerlines_3d - polygons.append(geometry) + centerlines_cache[lane_group_id] = centerlines_3d_array + polygons.append(lane_group.shapely_polygon) ids.append(lane_group_id) occupancy_map = OccupancyMap2D(polygons, ids) - conflicting_lane_groups: Dict[int, List[int]] = defaultdict(list) + conflicting_lane_groups: Dict[MapObjectIDType, List[MapObjectIDType]] = defaultdict(list) processed_pairs = set() for i, lane_group_id in enumerate(ids): @@ -110,13 +134,14 @@ def _get_conflicting_lane_groups(lane_group_df: gpd.GeoDataFrame, lane_df: gpd.G if intersection.is_empty: continue + # NOTE @DanielDauner: We query the centroid of the intersection polygon to get a representative point + # We cannot calculate the Z-difference at any area, e.g. due to arcs or complex shapes of bridges. intersection_centroid = np.array(intersection.centroid.coords[0], dtype=np.float64) intersecting_centerlines = centerlines_cache[intersecting_id] z_at_intersecting = _get_nearest_z_from_points_3d(intersecting_centerlines, intersection_centroid) z_at_lane_group = _get_nearest_z_from_points_3d(lane_group_centerlines, intersection_centroid) - - if np.abs(z_at_lane_group - z_at_intersecting) >= Z_THRESHOLD: + if np.abs(z_at_lane_group - z_at_intersecting) >= z_threshold: conflicting_lane_groups[lane_group_id].append(intersecting_id) conflicting_lane_groups[intersecting_id].append(lane_group_id) @@ -125,55 +150,112 @@ def _get_conflicting_lane_groups(lane_group_df: gpd.GeoDataFrame, lane_df: gpd.G def lift_road_edges_to_3d( road_edges_2d: List[shapely.LinearRing], - boundaries: List[LineString], + boundaries: List[Polyline3D], max_distance: float = 0.01, -) -> List[LineString]: - """ - Even faster version using batch processing and optimized data structures. +) -> List[Polyline3D]: + """Lift 2D road edges to 3D by querying elevation from boundary segments. + + :param road_edges_2d: List of 2D road edge geometries. + :param boundaries: List of 3D boundary geometries. + :param max_distance: Maximum 2D distance for edge-boundary association. + :return: List of lifted 3D road edge geometries. """ - if not road_edges_2d or not boundaries: - return [] - # 1. Build comprehensive spatial index with all boundary segments - boundary_segments = [] + road_edges_3d: List[Polyline3D] = [] + + if len(road_edges_2d) >= 1 and len(boundaries) >= 1: + + # 1. Build comprehensive spatial index with all boundary segments + # NOTE @DanielDauner: We split each boundary polyline into small segments. + # The spatial indexing uses axis-aligned bounding boxes, where small geometries lead to better performance. + boundary_segments = [] + for boundary in boundaries: + coords = boundary.array.reshape(-1, 1, 3) + segment_coords_boundary = np.concatenate([coords[:-1], coords[1:]], axis=1) + boundary_segments.append(segment_coords_boundary) + + boundary_segments = np.concatenate(boundary_segments, axis=0) + boundary_segment_linestrings = shapely.creation.linestrings(boundary_segments) + occupancy_map = OccupancyMap2D(boundary_segment_linestrings) - for boundary_idx, boundary in enumerate(boundaries): - coords = np.array(boundary.coords, dtype=np.float64).reshape(-1, 1, 3) - segment_coords_boundary = np.concatenate([coords[:-1], coords[1:]], axis=1) - boundary_segments.append(segment_coords_boundary) + for linear_ring in road_edges_2d: + points_2d = np.array(linear_ring.coords, dtype=np.float64) + points_3d = np.zeros((len(points_2d), len(Point3DIndex)), dtype=np.float64) + points_3d[..., Point3DIndex.XY] = points_2d - boundary_segments = np.concatenate(boundary_segments, axis=0) - boundary_segment_linestrings = shapely.creation.linestrings(boundary_segments) + # 3. Batch query for all points + query_points = shapely.creation.points(points_2d) + results = occupancy_map.query_nearest(query_points, max_distance=max_distance, exclusive=True) - occupancy_map = OccupancyMap2D(boundary_segment_linestrings) + for query_idx, geometry_idx in zip(*results): + query_point = query_points[query_idx] + segment_coords = boundary_segments[geometry_idx] + best_z = _interpolate_z_on_segment(query_point, segment_coords) + points_3d[query_idx, 2] = best_z - road_edges_3d = [] - for linear_ring in road_edges_2d: - points_2d = np.array(linear_ring.coords, dtype=np.float64) - points_3d = np.zeros((len(points_2d), 3), dtype=np.float64) - points_3d[:, :2] = points_2d + continuous_segments = _split_continuous_segments(np.array(results[0])) + for segment_indices in continuous_segments: + if len(segment_indices) >= 2: + segment_points = points_3d[segment_indices] + road_edges_3d.append(Polyline3D.from_array(segment_points)) + + return road_edges_3d + + +def _resolve_conflicting_lane_groups( + conflicting_lane_groups: Dict[MapObjectIDType, List[MapObjectIDType]], + lane_groups: List[AbstractLaneGroup], +) -> List[Polyline3D]: + """Resolve conflicting lane groups by merging their geometries. + + :param conflicting_lane_groups: A dictionary mapping lane group IDs to their conflicting lane group IDs. + :param lane_groups: A list of all lane groups. + :return: A list of merged 3D road edge geometries. + """ + + # Helper dictionary for easy access to lane group data + lane_group_dict: Dict[MapObjectIDType, AbstractLaneGroup] = { + lane_group.object_id: lane_group for lane_group in lane_groups + } + + # NOTE @DanielDauner: A non-conflicting set has overlapping lane groups separated into different layers (e.g., bridges). + # For each non-conflicting set, we can repeat the process of merging polygons in 2D and lifting to 3D. + # For edge-continuity, we include the neighboring lane groups (predecessors and successors) as well in the 2D merging + # but only use the original lane group boundaries for lifting to 3D. + + # Split conflicting lane groups into non-conflicting sets for further merging + non_conflicting_sets = _create_non_conflicting_sets(conflicting_lane_groups) + + road_edges_3d: List[Polyline3D] = [] + for non_conflicting_set in non_conflicting_sets: - # 3. Batch query for all points - query_points = shapely.creation.points(points_2d) - results = occupancy_map.query_nearest(query_points, max_distance=max_distance, exclusive=True) + # Collect 2D polygons of non-conflicting lane group set and their neighbors + merge_lane_group_data: Dict[MapObjectIDType, geom.Polygon] = {} + for lane_group_id in non_conflicting_set: + merge_lane_group_data[lane_group_id] = lane_group_dict[lane_group_id].shapely_polygon + for neighbor_id in ( + lane_group_dict[lane_group_id].predecessor_ids + lane_group_dict[lane_group_id].successor_ids + ): + merge_lane_group_data[neighbor_id] = lane_group_dict[neighbor_id].shapely_polygon - for query_idx, geometry_idx in zip(*results): - query_point = query_points[query_idx] - segment_coords = boundary_segments[geometry_idx] - best_z = _interpolate_z_on_segment(query_point, segment_coords) - points_3d[query_idx, 2] = best_z + # Get 2D road edge linestrings for the non-conflicting set + set_road_edges_2d = get_road_edge_linear_rings(list(merge_lane_group_data.values())) - continuous_segments = _find_continuous_segments(np.array(results[0])) + # Collect 3D boundaries only of non-conflicting lane groups + set_boundaries_3d: List[Polyline3D] = [] + for lane_group_id in non_conflicting_set: + set_boundaries_3d.append(lane_group_dict[lane_group_id].left_boundary) + set_boundaries_3d.append(lane_group_dict[lane_group_id].right_boundary) - for segment_indices in continuous_segments: - if len(segment_indices) >= 2: - segment_points = points_3d[segment_indices] - road_edges_3d.append(LineString(segment_points)) + # Lift road edges to 3D using the boundaries of non-conflicting lane groups + lifted_road_edges_3d = lift_road_edges_to_3d(set_road_edges_2d, set_boundaries_3d) + road_edges_3d.extend(lifted_road_edges_3d) return road_edges_3d def _get_nearest_z_from_points_3d(points_3d: npt.NDArray[np.float64], query_point: npt.NDArray[np.float64]) -> float: + """Helpers function to get the Z-value of the nearest 3D point to a query point.""" assert points_3d.ndim == 2 and points_3d.shape[1] == len( Point3DIndex ), "points_3d must be a 2D array with shape (N, 3)" @@ -183,7 +265,7 @@ def _get_nearest_z_from_points_3d(points_3d: npt.NDArray[np.float64], query_poin def _interpolate_z_on_segment(point: shapely.Point, segment_coords: npt.NDArray[np.float64]) -> float: - """Interpolate Z coordinate along a 3D line segment.""" + """Helpers function to interpolate the Z-value on a 3D segment given a 2D point.""" p1, p2 = segment_coords[0], segment_coords[1] # Project point onto segment @@ -203,8 +285,8 @@ def _interpolate_z_on_segment(point: shapely.Point, segment_coords: npt.NDArray[ return p1[2] + t * (p2[2] - p1[2]) -def _find_continuous_segments(indices: np.ndarray) -> List[np.ndarray]: - """Vectorized version of finding continuous segments.""" +def _split_continuous_segments(indices: npt.NDArray[np.int64]) -> List[npt.NDArray[np.int64]]: + """Helper function to find continuous segments in a list of indices.""" if len(indices) == 0: return [] @@ -216,60 +298,25 @@ def _find_continuous_segments(indices: np.ndarray) -> List[np.ndarray]: return [seg for seg in segments if len(seg) >= 2] -def _resolve_conflicting_lane_groups( - conflicting_lane_groups: Dict[int, List[int]], lane_group_df: gpd.GeoDataFrame -) -> List[LineString]: - - # Split conflicting lane groups into non-conflicting sets for further merging - non_conflicting_sets = _create_non_conflicting_sets(conflicting_lane_groups) - - road_edges_3d: List[LineString] = [] - for non_conflicting_set in non_conflicting_sets: - - # Collect 2D polygons of non-conflicting lane group set - set_lane_group_rows = lane_group_df[lane_group_df.id.isin(non_conflicting_set)] - connected_lane_group = [] - for row in set_lane_group_rows.itertuples(): - connected_lane_group.extend(row.predecessor_ids) - connected_lane_group.extend(row.successor_ids) - connected_lane_group_rows = lane_group_df[lane_group_df.id.isin(connected_lane_group)] - - set_polygons = set_lane_group_rows.geometry.tolist() + connected_lane_group_rows.geometry.tolist() - - # Get 2D road edge linestrings for the non-conflicting set - set_road_edges_2d = get_road_edge_linear_rings(set_polygons) +def _create_non_conflicting_sets(conflicts: Dict[MapObjectIDType, List[MapObjectIDType]]) -> List[Set[MapObjectIDType]]: + """Helper function to create non-conflicting sets from a conflict dictionary.""" - # Collect 3D boundaries of non-conflicting lane groups - set_boundaries_3d: List[LineString] = [] - for lane_group_id in non_conflicting_set: - lane_group_helper = lane_group_df[lane_group_df.id == lane_group_id] - set_boundaries_3d.append(lane_group_helper.left_boundary.values[0]) - set_boundaries_3d.append(lane_group_helper.right_boundary.values[0]) + # NOTE @DanielDauner: The conflict problem is a graph coloring problem. Map objects are nodes, conflicts are edges. + # https://en.wikipedia.org/wiki/Graph_coloring - # Lift road edges to 3D using the boundaries of non-conflicting lane groups - lifted_road_edges_3d = lift_road_edges_to_3d(set_road_edges_2d, set_boundaries_3d) - road_edges_3d.extend(lifted_road_edges_3d) - - return road_edges_3d - - -def _create_non_conflicting_sets(conflicts: Dict[int, List[int]]) -> List[Set[int]]: - """ - Creates sets of non-conflicting indices using NetworkX. - """ # Create graph from conflicts G = nx.Graph() for idx, conflict_list in conflicts.items(): for conflict_idx in conflict_list: G.add_edge(idx, conflict_idx) - result = [] + result: List[Set[MapObjectIDType]] = [] # Process each connected component for component in nx.connected_components(G): subgraph = G.subgraph(component) - # Try bipartite coloring first (most common case) + # Try bipartite coloring first if nx.is_bipartite(subgraph): sets = nx.bipartite.sets(subgraph) result.extend([set(s) for s in sets]) diff --git a/src/py123d/geometry/polyline.py b/src/py123d/geometry/polyline.py index 6d971682..3f3b7f65 100644 --- a/src/py123d/geometry/polyline.py +++ b/src/py123d/geometry/polyline.py @@ -138,21 +138,21 @@ def project( class PolylineSE2(ArrayMixin): """Represents a interpolatable SE2 polyline.""" - se2_array: npt.NDArray[np.float64] + _array: npt.NDArray[np.float64] linestring: Optional[geom.LineString] = None _progress: Optional[npt.NDArray[np.float64]] = None _interpolator: Optional[interp1d] = None def __post_init__(self): - assert self.se2_array is not None + assert self._array is not None if self.linestring is None: - self.linestring = geom_creation.linestrings(self.se2_array[..., StateSE2Index.XY]) + self.linestring = geom_creation.linestrings(self._array[..., StateSE2Index.XY]) - self.se2_array[:, StateSE2Index.YAW] = np.unwrap(self.se2_array[:, StateSE2Index.YAW], axis=0) - self._progress = get_path_progress(self.se2_array) - self._interpolator = interp1d(self._progress, self.se2_array, axis=0, bounds_error=False, fill_value=0.0) + self._array[:, StateSE2Index.YAW] = np.unwrap(self._array[:, StateSE2Index.YAW], axis=0) + self._progress = get_path_progress(self._array) + self._interpolator = interp1d(self._progress, self._array, axis=0, bounds_error=False, fill_value=0.0) @classmethod def from_linestring(cls, linestring: geom.LineString) -> PolylineSE2: @@ -196,6 +196,14 @@ def from_discrete_se2(cls, discrete_se2: List[StateSE2]) -> PolylineSE2: """ return PolylineSE2.from_array(np.array(discrete_se2, dtype=np.float64)) + @property + def array(self) -> npt.NDArray[np.float64]: + """Converts the polyline to a numpy array, indexed by :class:`~py123d.geometry.StateSE2Index`. + + :return: A numpy array of shape (N, 3) representing the polyline. + """ + return self._array + @property def length(self) -> float: """Returns the length of the polyline. @@ -275,7 +283,7 @@ def from_array(cls, array: npt.NDArray[np.float64]) -> Polyline3D: :class:`~py123d.geometry.Point3DIndex`. :return: A Polyline3D instance. """ - assert array.ndim == 2 and array.shape[1] == 3, "Array must be 3D with shape (N, 3)" + assert array.ndim == 2 and array.shape[1] == len(Point3DIndex), "Array must be 3D with shape (N, 3)" linestring = geom_creation.linestrings(*array.T) return Polyline3D(linestring) diff --git a/src/py123d/geometry/test/test_polyline.py b/src/py123d/geometry/test/test_polyline.py index 32e0acc7..a2614410 100644 --- a/src/py123d/geometry/test/test_polyline.py +++ b/src/py123d/geometry/test/test_polyline.py @@ -130,21 +130,21 @@ def test_from_linestring(self): linestring = geom.LineString(coords) polyline = PolylineSE2.from_linestring(linestring) self.assertIsInstance(polyline, PolylineSE2) - self.assertEqual(polyline.se2_array.shape, (3, 3)) + self.assertEqual(polyline.array.shape, (3, 3)) def test_from_array_2d(self): """Test creating PolylineSE2 from 2D array.""" array = np.array([[0.0, 0.0], [1.0, 0.0], [2.0, 0.0]], dtype=np.float32) polyline = PolylineSE2.from_array(array) self.assertIsInstance(polyline, PolylineSE2) - self.assertEqual(polyline.se2_array.shape, (3, 3)) + self.assertEqual(polyline.array.shape, (3, 3)) def test_from_array_se2(self): """Test creating PolylineSE2 from SE2 array.""" array = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float32) polyline = PolylineSE2.from_array(array) self.assertIsInstance(polyline, PolylineSE2) - np.testing.assert_array_almost_equal(polyline.se2_array, array) + np.testing.assert_array_almost_equal(polyline.array, array) def test_from_array_invalid_shape(self): """Test creating PolylineSE2 from invalid array shape.""" @@ -157,7 +157,7 @@ def test_from_discrete_se2(self): states = [StateSE2(0.0, 0.0, 0.0), StateSE2(1.0, 0.0, 0.0), StateSE2(2.0, 0.0, 0.0)] polyline = PolylineSE2.from_discrete_se2(states) self.assertIsInstance(polyline, PolylineSE2) - self.assertEqual(polyline.se2_array.shape, (3, 3)) + self.assertEqual(polyline.array.shape, (3, 3)) def test_length_property(self): """Test length property.""" diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index b5923db5..e5822140 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -45,13 +45,13 @@ class ViserConfig: # Map map_visible: bool = True - map_radius: float = 100.0 # [m] + map_radius: float = 200.0 # [m] map_non_road_z_offset: float = 0.1 # small z-translation to place crosswalks, parking, etc. on top of the road map_requery: bool = True # Re-query map when ego vehicle moves out of current map bounds # Bounding boxes bounding_box_visible: bool = True - bounding_box_type: Literal["mesh", "lines"] = "lines" + bounding_box_type: Literal["mesh", "lines"] = "mesh" bounding_box_line_width: float = 4.0 # Cameras diff --git a/test_viser.py b/test_viser.py index 6a1b2cf0..1381639b 100644 --- a/test_viser.py +++ b/test_viser.py @@ -9,11 +9,10 @@ # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] - # splits = ["carla"] + splits = ["carla_test"] # splits = ["wopd_val"] # splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] - splits = ["pandaset_test"] log_names = None scene_uuids = None From a82576f827733bc102e77d1b25b7b9168dbcfd98 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sun, 19 Oct 2025 21:53:14 +0200 Subject: [PATCH 100/145] Add a few ease of use features in geometry, configs, etc. --- .../conversion/log_writer/arrow_log_writer.py | 14 +- .../vehicle_state/vehicle_parameters.py | 12 ++ src/py123d/geometry/rotation.py | 5 +- src/py123d/geometry/se.py | 5 +- .../transform/test/test_transform_se3.py | 85 ++++++++++- .../geometry/transform/transform_se3.py | 142 +++++++++++++----- src/py123d/geometry/utils/rotation_utils.py | 41 ++++- .../utils/test/test_bounding_box_utils.py | 4 +- .../utils/test/test_rotation_utils.py | 95 ++++++++++++ .../visualization/viser/viser_config.py | 4 +- 10 files changed, 350 insertions(+), 57 deletions(-) diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 7b623e61..e502e8cd 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -1,6 +1,8 @@ from pathlib import Path from typing import Any, Dict, List, Literal, Optional, Tuple, Union +import cv2 +import numpy as np import pyarrow as pa from py123d.common.utils.uuid_utils import create_deterministic_uuid @@ -159,13 +161,23 @@ def write( for camera_type in expected_cameras: camera_name = camera_type.serialize() - # NOTE: Missing cameras are allowed, e.g., for synchronization mismatches. + # NOTE @DanielDauner: Missing cameras are allowed, e.g., for synchronization mismatches. # In this case, we write None/null to the arrow table. camera_data: Optional[Any] = None camera_pose: Optional[StateSE3] = None if camera_type in provided_cameras: camera_data, camera_pose = cameras[camera_type] + # TODO: Refactor how camera data handed to the writer. + # This should be combined with configurations to write to log, sensor_root, or sensor_root as mp4. + if isinstance(camera_data, Path) or isinstance(camera_data, str): + camera_data = str(camera_data) + elif isinstance(camera_data, bytes): + camera_data = camera_data + elif isinstance(camera_data, np.ndarray): + _, encoded_img = cv2.imencode('.jpg', camera_data) + camera_data = encoded_img.tobytes() + record_batch_data[f"{camera_name}_data"] = [camera_data] record_batch_data[f"{camera_name}_extrinsic"] = [camera_pose.array if camera_pose else None] diff --git a/src/py123d/datatypes/vehicle_state/vehicle_parameters.py b/src/py123d/datatypes/vehicle_state/vehicle_parameters.py index 2e177f74..92ce83b0 100644 --- a/src/py123d/datatypes/vehicle_state/vehicle_parameters.py +++ b/src/py123d/datatypes/vehicle_state/vehicle_parameters.py @@ -27,6 +27,18 @@ def from_dict(cls, data_dict: dict) -> VehicleParameters: def to_dict(self) -> dict: return asdict(self) + @property + def half_width(self) -> float: + return self.width / 2.0 + + @property + def half_length(self) -> float: + return self.length / 2.0 + + @property + def half_height(self) -> float: + return self.height / 2.0 + def get_nuplan_chrysler_pacifica_parameters() -> VehicleParameters: # NOTE: use parameters from nuPlan dataset diff --git a/src/py123d/geometry/rotation.py b/src/py123d/geometry/rotation.py index fb537717..f39e49e0 100644 --- a/src/py123d/geometry/rotation.py +++ b/src/py123d/geometry/rotation.py @@ -10,6 +10,7 @@ from py123d.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex from py123d.geometry.utils.rotation_utils import ( get_euler_array_from_quaternion_array, + get_euler_array_from_rotation_matrix, get_quaternion_array_from_rotation_matrix, get_rotation_matrix_from_euler_array, get_rotation_matrix_from_quaternion_array, @@ -57,9 +58,7 @@ def from_rotation_matrix(cls, rotation_matrix: npt.NDArray[np.float64]) -> Euler """ assert rotation_matrix.ndim == 2 assert rotation_matrix.shape == (3, 3) - quaternion = pyquaternion.Quaternion(matrix=rotation_matrix) - yaw, pitch, roll = quaternion.yaw_pitch_roll - return EulerAngles(roll=roll, pitch=pitch, yaw=yaw) + return EulerAngles.from_array(get_euler_array_from_rotation_matrix(rotation_matrix), copy=False) @property def roll(self) -> float: diff --git a/src/py123d/geometry/se.py b/src/py123d/geometry/se.py index 357a062d..2163370e 100644 --- a/src/py123d/geometry/se.py +++ b/src/py123d/geometry/se.py @@ -477,12 +477,11 @@ def euler_angles(self) -> EulerAngles: return EulerAngles.from_array(self.array[EulerStateSE3Index.EULER_ANGLES]) @property - def quaternion_se3(self) -> StateSE3: + def state_se3(self) -> StateSE3: quaternion_se3_array = np.zeros(len(StateSE3Index), dtype=np.float64) quaternion_se3_array[StateSE3Index.XYZ] = self.array[EulerStateSE3Index.XYZ] quaternion_se3_array[StateSE3Index.QUATERNION] = Quaternion.from_euler_angles(self.euler_angles) - - return StateSE3.from_array(quaternion_se3_array) + return StateSE3.from_array(quaternion_se3_array, copy=False) @property def quaternion(self) -> Quaternion: diff --git a/src/py123d/geometry/transform/test/test_transform_se3.py b/src/py123d/geometry/transform/test/test_transform_se3.py index 0303eecf..7176a654 100644 --- a/src/py123d/geometry/transform/test/test_transform_se3.py +++ b/src/py123d/geometry/transform/test/test_transform_se3.py @@ -10,11 +10,14 @@ convert_absolute_to_relative_se3_array, convert_relative_to_absolute_points_3d_array, convert_relative_to_absolute_se3_array, + convert_se3_array_between_origins, + convert_points_3d_array_between_origins, translate_se3_along_body_frame, translate_se3_along_x, translate_se3_along_y, translate_se3_along_z, ) + from py123d.geometry.utils.rotation_utils import ( get_rotation_matrices_from_euler_array, get_rotation_matrices_from_quaternion_array, @@ -49,9 +52,9 @@ def setUp(self): yaw=np.deg2rad(90), ) - quat_se3_a: StateSE3 = euler_se3_a.quaternion_se3 - quat_se3_b: StateSE3 = euler_se3_b.quaternion_se3 - quat_se3_c: StateSE3 = euler_se3_c.quaternion_se3 + quat_se3_a: StateSE3 = euler_se3_a.state_se3 + quat_se3_b: StateSE3 = euler_se3_b.state_se3 + quat_se3_c: StateSE3 = euler_se3_c.state_se3 self.euler_se3 = [euler_se3_a, euler_se3_b, euler_se3_c] self.quat_se3 = [quat_se3_a, quat_se3_b, quat_se3_c] @@ -82,6 +85,12 @@ def _convert_euler_se3_array_to_quat_se3_array( quat_se3_array[idx, StateSE3Index.QUATERNION] = quat.array return quat_se3_array + def _get_random_quat_se3_array(self, size: int) -> npt.NDArray[np.float64]: + """Generate a random SE3 poses in Quaternion representation""" + random_euler_se3_array = self._get_random_euler_se3_array(size) + random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array) + return random_quat_se3_array + def test_sanity(self): for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3): @@ -177,6 +186,76 @@ def test_convert_relative_to_absolute_se3_array(self): abs_se3_euler[..., EulerStateSE3Index.EULER_ANGLES] ) np.testing.assert_allclose(quat_rotation_matrices, euler_rotation_matrices, atol=1e-6) + # convert_points_3d_array_between_origins(quat_se3, random_quat_se3_array) + + def test_convert_se3_array_between_origins(self): + for _ in range(10): + random_quat_se3_array = self._get_random_quat_se3_array(np.random.randint(1, 10)) + + from_se3 = StateSE3.from_array(self._get_random_quat_se3_array(1)[0]) + to_se3 = StateSE3.from_array(self._get_random_quat_se3_array(1)[0]) + identity_se3_array = np.zeros(len(StateSE3Index), dtype=np.float64) + identity_se3_array[StateSE3Index.QW] = 1.0 + identity_se3 = StateSE3.from_array(identity_se3_array) + + # Check if consistent with absolute-relative-absolute conversion + converted_se3_quat = convert_se3_array_between_origins(from_se3, to_se3, random_quat_se3_array) + + abs_from_se3_quat = convert_relative_to_absolute_se3_array(from_se3, random_quat_se3_array) + rel_to_se3_quat = convert_absolute_to_relative_se3_array(to_se3, abs_from_se3_quat) + + np.testing.assert_allclose( + converted_se3_quat[..., StateSE3Index.XYZ], + rel_to_se3_quat[..., StateSE3Index.XYZ], + atol=1e-6, + ) + np.testing.assert_allclose( + converted_se3_quat[..., StateSE3Index.QUATERNION], + rel_to_se3_quat[..., StateSE3Index.QUATERNION], + atol=1e-6, + ) + + # Check if consistent with absolute conversion to identity origin + absolute_se3_quat = convert_se3_array_between_origins(from_se3, identity_se3, random_quat_se3_array) + np.testing.assert_allclose( + absolute_se3_quat[..., StateSE3Index.XYZ], + abs_from_se3_quat[..., StateSE3Index.XYZ], + atol=1e-6, + ) + + def test_convert_points_3d_array_between_origins(self): + random_points_3d = np.random.rand(10, 3) + for _ in range(10): + from_se3 = StateSE3.from_array(self._get_random_quat_se3_array(1)[0]) + to_se3 = StateSE3.from_array(self._get_random_quat_se3_array(1)[0]) + identity_se3_array = np.zeros(len(StateSE3Index), dtype=np.float64) + identity_se3_array[StateSE3Index.QW] = 1.0 + identity_se3 = StateSE3.from_array(identity_se3_array) + + # Check if consistent with absolute-relative-absolute conversion + converted_points_quat = convert_points_3d_array_between_origins(from_se3, to_se3, random_points_3d) + abs_from_se3_quat = convert_relative_to_absolute_points_3d_array(from_se3, random_points_3d) + rel_to_se3_quat = convert_absolute_to_relative_points_3d_array(to_se3, abs_from_se3_quat) + np.testing.assert_allclose(converted_points_quat, rel_to_se3_quat, atol=1e-6) + + # Check if consistent with se3 array conversion + random_se3_poses = np.zeros((random_points_3d.shape[0], len(StateSE3Index)), dtype=np.float64) + random_se3_poses[:, StateSE3Index.XYZ] = random_points_3d + random_se3_poses[:, StateSE3Index.QUATERNION] = np.array([1.0, 0.0, 0.0, 0.0]) # Identity rotation + converted_se3_quat_poses = convert_se3_array_between_origins(from_se3, to_se3, random_se3_poses) + np.testing.assert_allclose( + converted_se3_quat_poses[:, StateSE3Index.XYZ], + converted_points_quat, + atol=1e-6, + ) + + # Check if consistent with absolute conversion to identity origin + absolute_se3_quat = convert_points_3d_array_between_origins(from_se3, identity_se3, random_points_3d) + np.testing.assert_allclose( + absolute_se3_quat[..., StateSE3Index.XYZ], + abs_from_se3_quat[..., StateSE3Index.XYZ], + atol=1e-6, + ) def test_translate_se3_along_x(self): for _ in range(10): diff --git a/src/py123d/geometry/transform/transform_se3.py b/src/py123d/geometry/transform/transform_se3.py index bc6b5fca..8bf907ba 100644 --- a/src/py123d/geometry/transform/transform_se3.py +++ b/src/py123d/geometry/transform/transform_se3.py @@ -1,4 +1,4 @@ -from typing import Union +from typing import Tuple, Union import numpy as np import numpy.typing as npt @@ -12,6 +12,30 @@ ) +def _extract_rotation_translation_pose_arrays( + pose: Union[StateSE3, npt.NDArray[np.float64]], +) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.float64]]: + """Helper function to extract rotation matrix and translation vector from a StateSE3 or np.ndarray. + + :param pose: A StateSE3 pose or np.ndarray, indexed by :class:`~py123d.geometry.StateSE3Index`. + :raises TypeError: If the pose is not a StateSE3 or np.ndarray. + :return: A tuple containing the rotation matrix, translation vector, and pose array. + """ + if isinstance(pose, StateSE3): + translation = pose.point_3d.array + rotation = pose.rotation_matrix + pose_array = pose.array + elif isinstance(pose, np.ndarray): + assert pose.ndim == 1 and pose.shape[-1] == len(StateSE3Index) + translation = pose[StateSE3Index.XYZ] + rotation = get_rotation_matrix_from_quaternion_array(pose[StateSE3Index.QUATERNION]) + pose_array = pose + else: + raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(pose)}") + + return rotation, translation, pose_array + + def convert_absolute_to_relative_points_3d_array( origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: @@ -23,15 +47,7 @@ def convert_absolute_to_relative_points_3d_array( :return: The 3D points in the relative frame, indexed by :class:`~py123d.geometry.Point3DIndex`. """ - if isinstance(origin, StateSE3): - t_origin = origin.point_3d.array - R_origin = origin.rotation_matrix - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) - t_origin = origin[StateSE3Index.XYZ] - R_origin = get_rotation_matrix_from_quaternion_array(origin[StateSE3Index.QUATERNION]) - else: - raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + R_origin, t_origin, _ = _extract_rotation_translation_pose_arrays(origin) assert points_3d_array.ndim >= 1 assert points_3d_array.shape[-1] == len(Point3DIndex) @@ -51,17 +67,7 @@ def convert_absolute_to_relative_se3_array( :raises TypeError: If the origin is not a StateSE3 or np.ndarray. :return: The SE3 array in the relative frame, indexed by :class:`~py123d.geometry.StateSE3Index`. """ - if isinstance(origin, StateSE3): - origin_array = origin.array - t_origin = origin.point_3d.array - R_origin = origin.rotation_matrix - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) - origin_array = origin - t_origin = origin_array[StateSE3Index.XYZ] - R_origin = get_rotation_matrix_from_quaternion_array(origin_array[StateSE3Index.QUATERNION]) - else: - raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}") + R_origin, t_origin, origin_array = _extract_rotation_translation_pose_arrays(origin) assert se3_array.ndim >= 1 assert se3_array.shape[-1] == len(StateSE3Index) @@ -94,15 +100,7 @@ def convert_relative_to_absolute_points_3d_array( :raises TypeError: If the origin is not a StateSE3 or np.ndarray. :return: The 3D points in the absolute frame, indexed by :class:`~py123d.geometry.Point3DIndex`. """ - if isinstance(origin, StateSE3): - t_origin = origin.point_3d.array - R_origin = origin.rotation_matrix - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) - t_origin = origin[StateSE3Index.XYZ] - R_origin = get_rotation_matrix_from_quaternion_array(origin[StateSE3Index.QUATERNION]) - else: - raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") + R_origin, t_origin, _ = _extract_rotation_translation_pose_arrays(origin) assert points_3d_array.shape[-1] == len(Point3DIndex) @@ -121,17 +119,7 @@ def convert_relative_to_absolute_se3_array( :return: The SE3 array in the absolute frame, indexed by :class:`~py123d.geometry.StateSE3Index`. """ - if isinstance(origin, StateSE3): - origin_array = origin.array - t_origin = origin.point_3d.array - R_origin = origin.rotation_matrix - elif isinstance(origin, np.ndarray): - assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index) - origin_array = origin - t_origin = origin_array[StateSE3Index.XYZ] - R_origin = get_rotation_matrix_from_quaternion_array(origin_array[StateSE3Index.QUATERNION]) - else: - raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}") + R_origin, t_origin, origin_array = _extract_rotation_translation_pose_arrays(origin) assert se3_array.ndim >= 1 assert se3_array.shape[-1] == len(StateSE3Index) @@ -152,6 +140,78 @@ def convert_relative_to_absolute_se3_array( return abs_se3_array +def convert_se3_array_between_origins( + from_origin: Union[StateSE3, npt.NDArray[np.float64]], + to_origin: Union[StateSE3, npt.NDArray[np.float64]], + se3_array: npt.NDArray[np.float64], +) -> npt.NDArray[np.float64]: + """Converts an SE3 array from one origin frame to another origin frame. + + :param from_origin: The source origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param to_origin: The target origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param se3_array: The SE3 array in the source origin frame. + :raises TypeError: If the origins are not StateSE3 or np.ndarray. + :return: The SE3 array in the target origin frame, indexed by :class:`~py123d.geometry.StateSE3Index`. + """ + # Parse from_origin & to_origin + R_from, t_from, from_origin_array = _extract_rotation_translation_pose_arrays(from_origin) + R_to, t_to, to_origin_array = _extract_rotation_translation_pose_arrays(to_origin) + + assert se3_array.ndim >= 1 + assert se3_array.shape[-1] == len(StateSE3Index) + + rel_positions = se3_array[..., StateSE3Index.XYZ] + rel_quaternions = se3_array[..., StateSE3Index.QUATERNION] + + # Compute relative transformation: T_to^-1 * T_from + R_rel = R_to.T @ R_from # Relative rotation matrix + t_rel = R_to.T @ (t_from - t_to) # Relative translation + + q_rel = multiply_quaternion_arrays( + conjugate_quaternion_array(to_origin_array[StateSE3Index.QUATERNION]), + from_origin_array[StateSE3Index.QUATERNION], + ) + + # Transform positions: rotate and translate + new_rel_positions = (R_rel @ rel_positions.T).T + t_rel + + # Transform orientations: quaternion multiplication + new_rel_quaternions = multiply_quaternion_arrays(q_rel, rel_quaternions) + + # Prepare output array + result_se3_array = np.zeros_like(se3_array) + result_se3_array[..., StateSE3Index.XYZ] = new_rel_positions + result_se3_array[..., StateSE3Index.QUATERNION] = new_rel_quaternions + + return result_se3_array + + +def convert_points_3d_array_between_origins( + from_origin: Union[StateSE3, npt.NDArray[np.float64]], + to_origin: Union[StateSE3, npt.NDArray[np.float64]], + points_3d_array: npt.NDArray[np.float64], +) -> npt.NDArray[np.float64]: + """Converts 3D points from one origin frame to another origin frame. + + :param from_origin: The source origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param to_origin: The target origin state in the absolute frame, as a StateSE3 or np.ndarray. + :param points_3d_array: The 3D points in the source origin frame. + :raises TypeError: If the origins are not StateSE3 or np.ndarray. + :return: The 3D points in the target origin frame, indexed by :class:`~py123d.geometry.Point3DIndex`. + """ + # Parse from_origin & to_origin + R_from, t_from, _ = _extract_rotation_translation_pose_arrays(from_origin) + R_to, t_to, _ = _extract_rotation_translation_pose_arrays(to_origin) + + assert points_3d_array.ndim >= 1 + assert points_3d_array.shape[-1] == len(Point3DIndex) + + abs_points = points_3d_array @ R_from.T + t_from + new_rel_points = (abs_points - t_to) @ R_to + + return new_rel_points + + def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3: """Translates an SE3 state along the Z-axis. diff --git a/src/py123d/geometry/utils/rotation_utils.py b/src/py123d/geometry/utils/rotation_utils.py index ea31dd0f..6032a3ba 100644 --- a/src/py123d/geometry/utils/rotation_utils.py +++ b/src/py123d/geometry/utils/rotation_utils.py @@ -84,8 +84,46 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl return rotation_matrices +def get_euler_array_from_rotation_matrices(rotation_matrices: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Convert rotation matrices to Euler angles using Tait-Bryan ZYX convention (yaw-pitch-roll). + + :param rotation_matrices: Rotation matrices of shape (..., 3, 3) + :return: Euler angles of shape (..., 3), indexed by EulerAnglesIndex + """ + assert rotation_matrices.ndim == 3 and rotation_matrices.shape[-2:] == (3, 3) + + original_shape = rotation_matrices.shape[:-2] + + # Flatten to 3D if needed, i.e. (N, 3, 3) + if rotation_matrices.ndim > 3: + R = rotation_matrices.reshape(-1, 3, 3) + else: + R = rotation_matrices + + batch_size = R.shape[0] + euler_angles = np.zeros((batch_size, len(EulerAnglesIndex)), dtype=np.float64) + + # Calculate yaw (rotation around Z-axis) + euler_angles[:, EulerAnglesIndex.YAW] = np.arctan2(-R[:, 0, 1], R[:, 0, 0]) + + # Calculate pitch (rotation around Y-axis) + # NOTE: Clip to avoid numerical issues with arcsin + sin_pitch = np.clip(R[:, 0, 2], -1.0, 1.0) + euler_angles[:, EulerAnglesIndex.PITCH] = np.arcsin(sin_pitch) + + # Calculate roll (rotation around X-axis) + euler_angles[:, EulerAnglesIndex.ROLL] = np.arctan2(-R[:, 1, 2], R[:, 2, 2]) + + # Reshape back to original batch dimensions + (3,) + if len(original_shape) > 1: + euler_angles = euler_angles.reshape(original_shape + (len(EulerAnglesIndex),)) + + return euler_angles + + def get_euler_array_from_rotation_matrix(rotation_matrix: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - raise NotImplementedError + assert rotation_matrix.ndim == 2 and rotation_matrix.shape == (3, 3) + return get_euler_array_from_rotation_matrices(rotation_matrix[None, ...])[0] def get_quaternion_array_from_rotation_matrices(rotation_matrices: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: @@ -138,7 +176,6 @@ def get_quaternion_array_from_rotation_matrices(rotation_matrices: npt.NDArray[n quaternions[mask4, QuaternionIndex.QZ] = 0.25 * s4 # z assert np.all(mask1 | mask2 | mask3 | mask4), "All matrices should fall into one of the four cases." - return normalize_quaternion_array(quaternions) diff --git a/src/py123d/geometry/utils/test/test_bounding_box_utils.py b/src/py123d/geometry/utils/test/test_bounding_box_utils.py index 0cdb52e6..3b5718ca 100644 --- a/src/py123d/geometry/utils/test/test_bounding_box_utils.py +++ b/src/py123d/geometry/utils/test/test_bounding_box_utils.py @@ -198,7 +198,7 @@ def test_bbse3_array_to_corners_array_one_dim(self): def test_bbse3_array_to_corners_array_one_dim_rotation(self): for _ in range(self._num_consistency_checks): - se3_state = EulerStateSE3.from_array(self._get_random_euler_se3_array(1)[0]).quaternion_se3 + se3_state = EulerStateSE3.from_array(self._get_random_euler_se3_array(1)[0]).state_se3 se3_array = se3_state.array # construct a bounding box @@ -227,7 +227,7 @@ def test_bbse3_array_to_corners_array_n_dim(self): for _ in range(self._num_consistency_checks): N = np.random.randint(1, 20) se3_array = self._get_random_euler_se3_array(N) - se3_state_array = np.array([EulerStateSE3.from_array(arr).quaternion_se3.array for arr in se3_array]) + se3_state_array = np.array([EulerStateSE3.from_array(arr).state_se3.array for arr in se3_array]) # construct a bounding box bounding_box_se3_array = np.zeros((N, len(BoundingBoxSE3Index)), dtype=np.float64) diff --git a/src/py123d/geometry/utils/test/test_rotation_utils.py b/src/py123d/geometry/utils/test/test_rotation_utils.py index e69de29b..4c956b13 100644 --- a/src/py123d/geometry/utils/test/test_rotation_utils.py +++ b/src/py123d/geometry/utils/test/test_rotation_utils.py @@ -0,0 +1,95 @@ +import unittest + +import numpy as np +import numpy.typing as npt + + +from py123d.geometry.utils.rotation_utils import ( + conjugate_quaternion_array, + get_euler_array_from_quaternion_array, +) + +# TODO @DanielDauner: Add more tests for the remaining functions +# from py123d.geometry.utils.rotation_utils import ( +# conjugate_quaternion_array, +# get_euler_array_from_quaternion_array, +# get_euler_array_from_rotation_matrices, +# get_euler_array_from_rotation_matrix, +# get_q_bar_matrices, +# get_q_matrices, +# get_quaternion_array_from_euler_array, +# get_quaternion_array_from_rotation_matrices, +# get_quaternion_array_from_rotation_matrix, +# get_rotation_matrices_from_euler_array, +# get_rotation_matrices_from_quaternion_array, +# get_rotation_matrix_from_euler_array, +# get_rotation_matrix_from_quaternion_array, +# invert_quaternion_array, +# multiply_quaternion_arrays, +# normalize_angle, +# normalize_quaternion_array, +# ) + + +from pyquaternion import Quaternion as PyQuaternion + + +class TestRotationUtils(unittest.TestCase): + + def setUp(self): + pass + + def _get_random_quaternion(self) -> npt.NDArray[np.float64]: + random_quat: npt.NDArray[np.float64] = np.random.rand(4) + random_quat /= np.linalg.norm(random_quat) + return random_quat + + def _get_random_quaternion_array(self, n: int) -> npt.NDArray[np.float64]: + random_quat_array: npt.NDArray[np.float64] = np.random.rand(n, 4) + random_quat_array /= np.linalg.norm(random_quat_array, axis=1)[:, np.newaxis] + return random_quat_array + + def test_conjugate_quaternion_array(self): + for _ in range(10): + random_quat = self._get_random_quaternion() + conj_quat = conjugate_quaternion_array(random_quat) + + # Check if conjugation is correct + np.testing.assert_allclose( + conj_quat, + np.array([random_quat[0], -random_quat[1], -random_quat[2], -random_quat[3]]), + atol=1e-8, + ) + + # Check if double conjugation returns original quaternion + double_conj_quat = conjugate_quaternion_array(conj_quat) + np.testing.assert_allclose( + double_conj_quat, + random_quat, + atol=1e-8, + ) + + def test_get_euler_array_from_quaternion_array(self): + for _ in range(10): + random_quat_array = self._get_random_quaternion_array(np.random.randint(0, 10)) + pyquaternions = [PyQuaternion(array=q) for q in random_quat_array] + + # Convert to Euler angles using our function + euler_array = get_euler_array_from_quaternion_array(random_quat_array) + + # Test against pyquaternion results + for i, pyq in enumerate(pyquaternions): + # Convert to Euler angles using pyquaternion for comparison + yaw, pitch, roll = pyq.yaw_pitch_roll + euler_from_pyq = np.array([roll, pitch, yaw], dtype=np.float64) + + # Check if conversion is correct + np.testing.assert_allclose( + euler_array[i], + euler_from_pyq, + atol=1e-6, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index e5822140..3d48803b 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -45,9 +45,9 @@ class ViserConfig: # Map map_visible: bool = True - map_radius: float = 200.0 # [m] + map_radius: float = 500.0 # [m] map_non_road_z_offset: float = 0.1 # small z-translation to place crosswalks, parking, etc. on top of the road - map_requery: bool = True # Re-query map when ego vehicle moves out of current map bounds + map_requery: bool = False # Re-query map when ego vehicle moves out of current map bounds # Bounding boxes bounding_box_visible: bool = True From b434ed4445e279f3d2ca1d89794571fc64c18082 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 20 Oct 2025 18:12:51 +0200 Subject: [PATCH 101/145] Fix some bugs in the euler angle - rotation matrix convention. Add tests for rotation utils. --- src/py123d/common/utils/mixin.py | 4 + .../datatypes/vehicle_state/ego_state.py | 5 +- src/py123d/geometry/bounding_box.py | 11 +- src/py123d/geometry/point.py | 3 +- src/py123d/geometry/rotation.py | 14 +- src/py123d/geometry/se.py | 5 +- src/py123d/geometry/test/test_rotation.py | 29 - .../test/test_transform_consistency.py | 20 +- .../transform/test/test_transform_se3.py | 15 +- .../geometry/transform/transform_euler_se3.py | 16 +- src/py123d/geometry/utils/rotation_utils.py | 62 +- .../utils/test/test_rotation_utils.py | 798 ++++++++++++++++-- .../viser/elements/sensor_elements.py | 20 +- .../visualization/viser/viser_config.py | 2 +- 14 files changed, 859 insertions(+), 145 deletions(-) diff --git a/src/py123d/common/utils/mixin.py b/src/py123d/common/utils/mixin.py index 138a0e57..2935f99f 100644 --- a/src/py123d/common/utils/mixin.py +++ b/src/py123d/common/utils/mixin.py @@ -54,3 +54,7 @@ def tolist(self) -> list: def copy(self) -> ArrayMixin: """Return a copy of the object with a copied array.""" return self.__class__.from_array(self.array, copy=True) + + def __repr__(self) -> str: + """String representation of the ArrayMixin instance.""" + return f"{self.__class__.__name__}(array={self.array})" diff --git a/src/py123d/datatypes/vehicle_state/ego_state.py b/src/py123d/datatypes/vehicle_state/ego_state.py index d8748658..2c2d21f0 100644 --- a/src/py123d/datatypes/vehicle_state/ego_state.py +++ b/src/py123d/datatypes/vehicle_state/ego_state.py @@ -2,7 +2,6 @@ from dataclasses import dataclass from enum import IntEnum -from functools import cached_property from typing import Final, Optional import numpy as np @@ -128,7 +127,7 @@ def rear_axle_se2(self) -> StateSE2: def rear_axle(self) -> StateSE3: return self.rear_axle_se3 - @cached_property + @property def bounding_box(self) -> BoundingBoxSE3: return BoundingBoxSE3( center=self.center_se3, @@ -216,7 +215,7 @@ def rear_axle_se2(self) -> StateSE2: def rear_axle(self) -> StateSE2: return self.rear_axle_se2 - @cached_property + @property def bounding_box(self) -> BoundingBoxSE2: return BoundingBoxSE2( center=self.center_se2, diff --git a/src/py123d/geometry/bounding_box.py b/src/py123d/geometry/bounding_box.py index 2e3b3a77..bc3e1b73 100644 --- a/src/py123d/geometry/bounding_box.py +++ b/src/py123d/geometry/bounding_box.py @@ -1,7 +1,6 @@ from __future__ import annotations from ast import Dict -from functools import cached_property from typing import Union import numpy as np @@ -92,7 +91,7 @@ def width(self) -> float: """ return self._array[BoundingBoxSE2Index.WIDTH] - @cached_property + @property def array(self) -> npt.NDArray[np.float64]: """Converts the BoundingBoxSE2 instance to a numpy array, indexed by :class:`~py123d.geometry.BoundingBoxSE2Index`. @@ -100,7 +99,7 @@ def array(self) -> npt.NDArray[np.float64]: """ return self._array - @cached_property + @property def shapely_polygon(self) -> geom.Polygon: """Return a Shapely polygon representation of the bounding box. @@ -116,7 +115,7 @@ def bounding_box_se2(self) -> BoundingBoxSE2: """ return self - @cached_property + @property def corners_array(self) -> npt.NDArray[np.float64]: """Returns the corner points of the bounding box as a numpy array. @@ -259,7 +258,7 @@ def shapely_polygon(self) -> geom.Polygon: """ return self.bounding_box_se2.shapely_polygon - @cached_property + @property def corners_array(self) -> npt.NDArray[np.float64]: """Returns the corner points of the bounding box as a numpy array, shape (8, 3). @@ -268,7 +267,7 @@ def corners_array(self) -> npt.NDArray[np.float64]: """ return bbse3_array_to_corners_array(self.array) - @cached_property + @property def corners_dict(self) -> Dict[Corners3DIndex, Point3D]: """Returns the corner points of the bounding box as a dictionary. diff --git a/src/py123d/geometry/point.py b/src/py123d/geometry/point.py index 29d7b00a..571567be 100644 --- a/src/py123d/geometry/point.py +++ b/src/py123d/geometry/point.py @@ -1,6 +1,5 @@ from __future__ import annotations -from functools import cached_property from typing import Iterable import numpy as np @@ -108,7 +107,7 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Point3 object.__setattr__(instance, "_array", array.copy() if copy else array) return instance - @cached_property + @property def array(self) -> npt.NDArray[np.float64]: """The array representation of the point. diff --git a/src/py123d/geometry/rotation.py b/src/py123d/geometry/rotation.py index f39e49e0..1f54431a 100644 --- a/src/py123d/geometry/rotation.py +++ b/src/py123d/geometry/rotation.py @@ -1,7 +1,5 @@ from __future__ import annotations -from functools import cached_property - import numpy as np import numpy.typing as npt import pyquaternion @@ -11,6 +9,7 @@ from py123d.geometry.utils.rotation_utils import ( get_euler_array_from_quaternion_array, get_euler_array_from_rotation_matrix, + get_quaternion_array_from_euler_array, get_quaternion_array_from_rotation_matrix, get_rotation_matrix_from_euler_array, get_rotation_matrix_from_quaternion_array, @@ -97,7 +96,7 @@ def array(self) -> npt.NDArray[np.float64]: def quaternion(self) -> Quaternion: return Quaternion.from_euler_angles(self) - @cached_property + @property def rotation_matrix(self) -> npt.NDArray[np.float64]: """Returns the 3x3 rotation matrix representation of the Euler angles. NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll). @@ -164,8 +163,7 @@ def from_euler_angles(cls, euler_angles: EulerAngles) -> Quaternion: :param euler_angles: An EulerAngles instance representing the Euler angles. :return: A Quaternion instance. """ - rotation_matrix = euler_angles.rotation_matrix - return Quaternion.from_rotation_matrix(rotation_matrix) + return Quaternion.from_array(get_quaternion_array_from_euler_array(euler_angles.array), copy=False) @property def qw(self) -> float: @@ -208,7 +206,7 @@ def array(self) -> npt.NDArray[np.float64]: """ return self._array - @cached_property + @property def pyquaternion(self) -> pyquaternion.Quaternion: """Returns the pyquaternion.Quaternion representation of the quaternion. @@ -216,7 +214,7 @@ def pyquaternion(self) -> pyquaternion.Quaternion: """ return pyquaternion.Quaternion(array=self.array) - @cached_property + @property def euler_angles(self) -> EulerAngles: """Returns the Euler angles (roll, pitch, yaw) representation of the quaternion. NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll). @@ -225,7 +223,7 @@ def euler_angles(self) -> EulerAngles: """ return EulerAngles.from_array(get_euler_array_from_quaternion_array(self.array), copy=False) - @cached_property + @property def rotation_matrix(self) -> npt.NDArray[np.float64]: """Returns the 3x3 rotation matrix representation of the quaternion. diff --git a/src/py123d/geometry/se.py b/src/py123d/geometry/se.py index 2163370e..b8b30cc8 100644 --- a/src/py123d/geometry/se.py +++ b/src/py123d/geometry/se.py @@ -5,7 +5,6 @@ import numpy as np import numpy.typing as npt import shapely.geometry as geom -from pyparsing import cached_property from py123d.common.utils.mixin import ArrayMixin from py123d.geometry.geometry_index import EulerStateSE3Index, Point3DIndex, StateSE2Index, StateSE3Index @@ -248,7 +247,7 @@ def shapely_point(self) -> geom.Point: """ return self.point_3d.shapely_point - @cached_property + @property def quaternion(self) -> Quaternion: """Returns the quaternion (w, x, y, z) representation of the state's orientation. @@ -472,7 +471,7 @@ def transformation_matrix(self) -> npt.NDArray[np.float64]: transformation_matrix[:3, 3] = self.array[EulerStateSE3Index.XYZ] return transformation_matrix - @cached_property + @property def euler_angles(self) -> EulerAngles: return EulerAngles.from_array(self.array[EulerStateSE3Index.EULER_ANGLES]) diff --git a/src/py123d/geometry/test/test_rotation.py b/src/py123d/geometry/test/test_rotation.py index 133d056b..66f51c49 100644 --- a/src/py123d/geometry/test/test_rotation.py +++ b/src/py123d/geometry/test/test_rotation.py @@ -207,34 +207,5 @@ def test_hash(self): self.assertNotEqual(hash(quat1), hash(quat3)) -class TestRotationConversions(unittest.TestCase): - """Test conversions between EulerAngles and Quaternion.""" - - def test_euler_to_quaternion_to_euler(self): - """Test round-trip conversion from Euler to Quaternion and back.""" - original_euler = EulerAngles(0.1, 0.2, 0.3) - quaternion = Quaternion.from_euler_angles(original_euler) - converted_euler = quaternion.euler_angles - - self.assertAlmostEqual(original_euler.roll, converted_euler.roll, places=10) - self.assertAlmostEqual(original_euler.pitch, converted_euler.pitch, places=10) - self.assertAlmostEqual(original_euler.yaw, converted_euler.yaw, places=10) - - def test_rotation_matrix_consistency(self): - """Test that rotation matrix conversions are consistent.""" - euler = EulerAngles(0.1, 0.2, 0.3) - quat = Quaternion.from_euler_angles(euler) - - euler_from_matrix = EulerAngles.from_rotation_matrix(euler.rotation_matrix) - quat_from_matrix = Quaternion.from_rotation_matrix(quat.rotation_matrix) - self.assertAlmostEqual(euler.roll, euler_from_matrix.roll, places=10) - self.assertAlmostEqual(euler.pitch, euler_from_matrix.pitch, places=10) - self.assertAlmostEqual(euler.yaw, euler_from_matrix.yaw, places=10) - self.assertAlmostEqual(quat.qw, quat_from_matrix.qw, places=10) - self.assertAlmostEqual(quat.qx, quat_from_matrix.qx, places=10) - self.assertAlmostEqual(quat.qy, quat_from_matrix.qy, places=10) - self.assertAlmostEqual(quat.qz, quat_from_matrix.qz, places=10) - - if __name__ == "__main__": unittest.main() diff --git a/src/py123d/geometry/transform/test/test_transform_consistency.py b/src/py123d/geometry/transform/test/test_transform_consistency.py index 32805f60..a798fabd 100644 --- a/src/py123d/geometry/transform/test/test_transform_consistency.py +++ b/src/py123d/geometry/transform/test/test_transform_consistency.py @@ -24,6 +24,7 @@ translate_se2_along_y, translate_se2_array_along_body_frame, ) +from py123d.geometry.utils.rotation_utils import get_rotation_matrices_from_euler_array class TestTransformConsistency(unittest.TestCase): @@ -151,7 +152,24 @@ def test_se3_absolute_relative_conversion_consistency(self) -> None: relative_poses = convert_absolute_to_relative_euler_se3_array(reference, absolute_poses) recovered_absolute = convert_relative_to_absolute_euler_se3_array(reference, relative_poses) - np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal) + np.testing.assert_array_almost_equal( + absolute_poses[..., EulerStateSE3Index.XYZ], + recovered_absolute[..., EulerStateSE3Index.XYZ], + decimal=self.decimal, + ) + + absolute_rotation_matrices = get_rotation_matrices_from_euler_array( + absolute_poses[..., EulerStateSE3Index.EULER_ANGLES] + ) + recovered_rotation_matrices = get_rotation_matrices_from_euler_array( + recovered_absolute[..., EulerStateSE3Index.EULER_ANGLES] + ) + + np.testing.assert_array_almost_equal( + absolute_rotation_matrices, + recovered_rotation_matrices, + decimal=self.decimal, + ) def test_se3_points_absolute_relative_conversion_consistency(self) -> None: """Test that converting absolute->relative->absolute returns original points""" diff --git a/src/py123d/geometry/transform/test/test_transform_se3.py b/src/py123d/geometry/transform/test/test_transform_se3.py index 7176a654..b035a04f 100644 --- a/src/py123d/geometry/transform/test/test_transform_se3.py +++ b/src/py123d/geometry/transform/test/test_transform_se3.py @@ -2,6 +2,8 @@ import numpy as np import numpy.typing as npt +from pyquaternion import Quaternion as PyQuaternion + import py123d.geometry.transform.transform_euler_se3 as euler_transform_se3 from py123d.geometry import EulerStateSE3, EulerStateSE3Index, Point3D, Quaternion, StateSE3, StateSE3Index @@ -19,6 +21,7 @@ ) from py123d.geometry.utils.rotation_utils import ( + get_quaternion_array_from_euler_array, get_rotation_matrices_from_euler_array, get_rotation_matrices_from_quaternion_array, ) @@ -79,10 +82,9 @@ def _convert_euler_se3_array_to_quat_se3_array( """Convert an array of SE3 poses from Euler angles to Quaternion representation""" quat_se3_array = np.zeros((euler_se3_array.shape[0], len(StateSE3Index)), dtype=np.float64) quat_se3_array[:, StateSE3Index.XYZ] = euler_se3_array[:, EulerStateSE3Index.XYZ] - rotation_matrices = get_rotation_matrices_from_euler_array(euler_se3_array[:, EulerStateSE3Index.EULER_ANGLES]) - for idx, rotation_matrix in enumerate(rotation_matrices): - quat = Quaternion.from_rotation_matrix(rotation_matrix) - quat_se3_array[idx, StateSE3Index.QUATERNION] = quat.array + quat_se3_array[:, StateSE3Index.QUATERNION] = get_quaternion_array_from_euler_array( + euler_se3_array[:, EulerStateSE3Index.EULER_ANGLES] + ) return quat_se3_array def _get_random_quat_se3_array(self, size: int) -> npt.NDArray[np.float64]: @@ -178,6 +180,11 @@ def test_convert_relative_to_absolute_se3_array(self): np.testing.assert_allclose( abs_se3_euler[..., EulerStateSE3Index.XYZ], abs_se3_quat[..., StateSE3Index.XYZ], atol=1e-6 ) + + # pyquat_rotation_matrices = [ + # PyQuaternion(array=q).rotation_matrix for q in abs_se3_quat[..., StateSE3Index.QUATERNION] + # ] + # We compare rotation matrices to avoid issues with quaternion sign ambiguity quat_rotation_matrices = get_rotation_matrices_from_quaternion_array( abs_se3_quat[..., StateSE3Index.QUATERNION] diff --git a/src/py123d/geometry/transform/transform_euler_se3.py b/src/py123d/geometry/transform/transform_euler_se3.py index e7c4d298..15bb48ce 100644 --- a/src/py123d/geometry/transform/transform_euler_se3.py +++ b/src/py123d/geometry/transform/transform_euler_se3.py @@ -5,6 +5,7 @@ from py123d.geometry import EulerAngles, EulerStateSE3, EulerStateSE3Index, Point3DIndex, Vector3D, Vector3DIndex from py123d.geometry.utils.rotation_utils import ( + get_euler_array_from_rotation_matrices, get_rotation_matrices_from_euler_array, get_rotation_matrix_from_euler_array, normalize_angle, @@ -81,9 +82,13 @@ def convert_absolute_to_relative_euler_se3_array( # Convert absolute rotation matrices to relative rotation matrices abs_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., EulerStateSE3Index.EULER_ANGLES]) rel_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin.T, abs_rotation_matrices) + if se3_array.shape[0] != 0: - rel_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in rel_rotation_matrices]) - rel_se3_array[..., EulerStateSE3Index.EULER_ANGLES] = normalize_angle(rel_euler_angles) + # rel_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in rel_rotation_matrices]) + # rel_se3_array[..., EulerStateSE3Index.EULER_ANGLES] = normalize_angle(rel_euler_angles) + rel_se3_array[..., EulerStateSE3Index.EULER_ANGLES] = get_euler_array_from_rotation_matrices( + rel_rotation_matrices + ) return rel_se3_array @@ -118,11 +123,10 @@ def convert_relative_to_absolute_euler_se3_array( # Convert relative rotation matrices to absolute rotation matrices rel_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., EulerStateSE3Index.EULER_ANGLES]) abs_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin, rel_rotation_matrices) - if se3_array.shape[0] != 0: - abs_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in abs_rotation_matrices]) - abs_se3_array[..., EulerStateSE3Index.EULER_ANGLES] = normalize_angle(abs_euler_angles) - + abs_se3_array[..., EulerStateSE3Index.EULER_ANGLES] = get_euler_array_from_rotation_matrices( + abs_rotation_matrices + ) return abs_se3_array diff --git a/src/py123d/geometry/utils/rotation_utils.py b/src/py123d/geometry/utils/rotation_utils.py index 6032a3ba..2429ffda 100644 --- a/src/py123d/geometry/utils/rotation_utils.py +++ b/src/py123d/geometry/utils/rotation_utils.py @@ -37,7 +37,7 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl Convert Euler angles to rotation matrices using Tait-Bryan ZYX convention (yaw-pitch-roll). Convention: Intrinsic rotations in order Z-Y-X (yaw, pitch, roll) - Equivalent to: R = R_x(roll) @ R_y(pitch) @ R_z(yaw) + Equivalent to: R = R_z(yaw) @ R_y(pitch) @ R_x(roll) """ assert euler_angles_array.ndim >= 1 and euler_angles_array.shape[-1] == len(EulerAnglesIndex) @@ -56,26 +56,32 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl yaw = euler_angles_array_[:, EulerAnglesIndex.YAW] # Compute sin/cos for all angles at once - cos_roll, sin_roll = np.cos(roll), np.sin(roll) - cos_pitch, sin_pitch = np.cos(pitch), np.sin(pitch) - cos_yaw, sin_yaw = np.cos(yaw), np.sin(yaw) + # NOTE: (c/s = cos/sin, r/p/y = roll/pitch/yaw) + cr, sr = np.cos(roll), np.sin(roll) + cp, sp = np.cos(pitch), np.sin(pitch) + cy, sy = np.cos(yaw), np.sin(yaw) # Build rotation matrices for entire batch batch_size = euler_angles_array_.shape[0] rotation_matrices = np.zeros((batch_size, 3, 3), dtype=np.float64) + # Formula for ZYX Tait-Bryan rotation matrix: + # R = | cy*cp cy*sp*sr - sy*cr cy*sp*cr + sy*sr | + # | sy*cp sy*sp*sr + cy*cr sy*sp*cr - cy*sr | + # | -sp cp*sr cp*cr | + # ZYX Tait-Bryan rotation matrix elements - rotation_matrices[:, 0, 0] = cos_pitch * cos_yaw - rotation_matrices[:, 0, 1] = -cos_pitch * sin_yaw - rotation_matrices[:, 0, 2] = sin_pitch + rotation_matrices[:, 0, 0] = cy * cp + rotation_matrices[:, 1, 0] = sy * cp + rotation_matrices[:, 2, 0] = -sp - rotation_matrices[:, 1, 0] = sin_roll * sin_pitch * cos_yaw + cos_roll * sin_yaw - rotation_matrices[:, 1, 1] = -sin_roll * sin_pitch * sin_yaw + cos_roll * cos_yaw - rotation_matrices[:, 1, 2] = -sin_roll * cos_pitch + rotation_matrices[:, 0, 1] = cy * sp * sr - sy * cr + rotation_matrices[:, 1, 1] = sy * sp * sr + cy * cr + rotation_matrices[:, 2, 1] = cp * sr - rotation_matrices[:, 2, 0] = -cos_roll * sin_pitch * cos_yaw + sin_roll * sin_yaw - rotation_matrices[:, 2, 1] = cos_roll * sin_pitch * sin_yaw + sin_roll * cos_yaw - rotation_matrices[:, 2, 2] = cos_roll * cos_pitch + rotation_matrices[:, 0, 2] = cy * sp * cr + sy * sr + rotation_matrices[:, 1, 2] = sy * sp * cr - cy * sr + rotation_matrices[:, 2, 2] = cp * cr # Reshape back to original batch dimensions + (3, 3) if len(original_shape) > 1: @@ -90,7 +96,7 @@ def get_euler_array_from_rotation_matrices(rotation_matrices: npt.NDArray[np.flo :param rotation_matrices: Rotation matrices of shape (..., 3, 3) :return: Euler angles of shape (..., 3), indexed by EulerAnglesIndex """ - assert rotation_matrices.ndim == 3 and rotation_matrices.shape[-2:] == (3, 3) + assert rotation_matrices.ndim >= 2 and rotation_matrices.shape[-2:] == (3, 3) original_shape = rotation_matrices.shape[:-2] @@ -104,15 +110,15 @@ def get_euler_array_from_rotation_matrices(rotation_matrices: npt.NDArray[np.flo euler_angles = np.zeros((batch_size, len(EulerAnglesIndex)), dtype=np.float64) # Calculate yaw (rotation around Z-axis) - euler_angles[:, EulerAnglesIndex.YAW] = np.arctan2(-R[:, 0, 1], R[:, 0, 0]) + euler_angles[:, EulerAnglesIndex.YAW] = np.arctan2(R[:, 1, 0], R[:, 0, 0]) # Calculate pitch (rotation around Y-axis) # NOTE: Clip to avoid numerical issues with arcsin - sin_pitch = np.clip(R[:, 0, 2], -1.0, 1.0) + sin_pitch = np.clip(-R[:, 2, 0], -1.0, 1.0) euler_angles[:, EulerAnglesIndex.PITCH] = np.arcsin(sin_pitch) # Calculate roll (rotation around X-axis) - euler_angles[:, EulerAnglesIndex.ROLL] = np.arctan2(-R[:, 1, 2], R[:, 2, 2]) + euler_angles[:, EulerAnglesIndex.ROLL] = np.arctan2(R[:, 2, 1], R[:, 2, 2]) # Reshape back to original batch dimensions + (3,) if len(original_shape) > 1: @@ -127,18 +133,23 @@ def get_euler_array_from_rotation_matrix(rotation_matrix: npt.NDArray[np.float64 def get_quaternion_array_from_rotation_matrices(rotation_matrices: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - assert rotation_matrices.ndim == 3 + assert rotation_matrices.ndim >= 2 assert rotation_matrices.shape[-1] == rotation_matrices.shape[-2] == 3 # http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/ # TODO: Update with: # https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf - N = rotation_matrices.shape[0] - quaternions = np.zeros((N, 4), dtype=np.float64) + original_shape = rotation_matrices.shape[:-2] # Extract rotation matrix elements for vectorized operations - R = rotation_matrices + if rotation_matrices.ndim > 3: + R = rotation_matrices.reshape(-1, 3, 3) + else: + R = rotation_matrices + + N = R.shape[0] + quaternions = np.zeros((N, 4), dtype=np.float64) # Compute trace for each matrix trace = np.trace(R, axis1=1, axis2=2) @@ -176,7 +187,14 @@ def get_quaternion_array_from_rotation_matrices(rotation_matrices: npt.NDArray[n quaternions[mask4, QuaternionIndex.QZ] = 0.25 * s4 # z assert np.all(mask1 | mask2 | mask3 | mask4), "All matrices should fall into one of the four cases." - return normalize_quaternion_array(quaternions) + + quaternions = normalize_quaternion_array(quaternions) + + # Reshape back to original batch dimensions + (4,) + if len(original_shape) > 1: + quaternions = quaternions.reshape(original_shape + (len(QuaternionIndex),)) + + return quaternions def get_quaternion_array_from_rotation_matrix(rotation_matrix: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: diff --git a/src/py123d/geometry/utils/test/test_rotation_utils.py b/src/py123d/geometry/utils/test/test_rotation_utils.py index 4c956b13..30b3515b 100644 --- a/src/py123d/geometry/utils/test/test_rotation_utils.py +++ b/src/py123d/geometry/utils/test/test_rotation_utils.py @@ -1,94 +1,788 @@ +from typing import Tuple import unittest import numpy as np import numpy.typing as npt - +from py123d.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex from py123d.geometry.utils.rotation_utils import ( conjugate_quaternion_array, get_euler_array_from_quaternion_array, + get_euler_array_from_rotation_matrices, + get_euler_array_from_rotation_matrix, + get_q_bar_matrices, + get_q_matrices, + get_quaternion_array_from_euler_array, + get_quaternion_array_from_rotation_matrices, + get_quaternion_array_from_rotation_matrix, + get_rotation_matrices_from_euler_array, + get_rotation_matrices_from_quaternion_array, + get_rotation_matrix_from_euler_array, + get_rotation_matrix_from_quaternion_array, + invert_quaternion_array, + multiply_quaternion_arrays, + normalize_angle, + normalize_quaternion_array, ) -# TODO @DanielDauner: Add more tests for the remaining functions -# from py123d.geometry.utils.rotation_utils import ( -# conjugate_quaternion_array, -# get_euler_array_from_quaternion_array, -# get_euler_array_from_rotation_matrices, -# get_euler_array_from_rotation_matrix, -# get_q_bar_matrices, -# get_q_matrices, -# get_quaternion_array_from_euler_array, -# get_quaternion_array_from_rotation_matrices, -# get_quaternion_array_from_rotation_matrix, -# get_rotation_matrices_from_euler_array, -# get_rotation_matrices_from_quaternion_array, -# get_rotation_matrix_from_euler_array, -# get_rotation_matrix_from_quaternion_array, -# invert_quaternion_array, -# multiply_quaternion_arrays, -# normalize_angle, -# normalize_quaternion_array, -# ) - from pyquaternion import Quaternion as PyQuaternion +def _get_rotation_matrix_helper(euler_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + """Helper function to ensure ZYX (Yaw-Pitch-Roll) intrinsic Euler angle convention, aka Tait-Bryan angles. + + :param euler_array: Array of Euler angles [roll, pitch, yaw] in radians. + :type euler_array: npt.NDArray[np.float64] + :return: Rotation matrix corresponding to the given Euler angles. + """ + + R_x = np.array( + [ + [1, 0, 0], + [0, np.cos(euler_array[EulerAnglesIndex.ROLL]), -np.sin(euler_array[EulerAnglesIndex.ROLL])], + [0, np.sin(euler_array[EulerAnglesIndex.ROLL]), np.cos(euler_array[EulerAnglesIndex.ROLL])], + ], + dtype=np.float64, + ) + R_y = np.array( + [ + [np.cos(euler_array[EulerAnglesIndex.PITCH]), 0, np.sin(euler_array[EulerAnglesIndex.PITCH])], + [0, 1, 0], + [-np.sin(euler_array[EulerAnglesIndex.PITCH]), 0, np.cos(euler_array[EulerAnglesIndex.PITCH])], + ], + dtype=np.float64, + ) + R_z = np.array( + [ + [np.cos(euler_array[EulerAnglesIndex.YAW]), -np.sin(euler_array[EulerAnglesIndex.YAW]), 0], + [np.sin(euler_array[EulerAnglesIndex.YAW]), np.cos(euler_array[EulerAnglesIndex.YAW]), 0], + [0, 0, 1], + ], + dtype=np.float64, + ) + return R_z @ R_y @ R_x + + class TestRotationUtils(unittest.TestCase): def setUp(self): pass def _get_random_quaternion(self) -> npt.NDArray[np.float64]: - random_quat: npt.NDArray[np.float64] = np.random.rand(4) - random_quat /= np.linalg.norm(random_quat) - return random_quat + return PyQuaternion.random().q def _get_random_quaternion_array(self, n: int) -> npt.NDArray[np.float64]: - random_quat_array: npt.NDArray[np.float64] = np.random.rand(n, 4) - random_quat_array /= np.linalg.norm(random_quat_array, axis=1)[:, np.newaxis] + random_quat_array = np.zeros((n, len(QuaternionIndex)), dtype=np.float64) + for i in range(n): + random_quat_array[i] = self._get_random_quaternion() return random_quat_array + def _get_random_euler_array(self, n: int) -> npt.NDArray[np.float64]: + random_euler_array: npt.NDArray[np.float64] = np.zeros((n, 3), dtype=np.float64) + for i in range(n): + random_euler_array[i] = PyQuaternion.random().yaw_pitch_roll[ + ::-1 + ] # Convert (yaw, pitch, roll) to (roll, pitch, yaw) + return random_euler_array + def test_conjugate_quaternion_array(self): + """Test the conjugate_quaternion_array function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + random_quat_flat = self._get_random_quaternion_array(N) + + random_quat = random_quat_flat.reshape(shape + (len(QuaternionIndex),)) + conj_quat = conjugate_quaternion_array(random_quat) + + np.testing.assert_allclose( + conj_quat[..., QuaternionIndex.QW], + random_quat[..., QuaternionIndex.QW], + atol=1e-8, + ) + np.testing.assert_allclose( + conj_quat[..., QuaternionIndex.QX], + -random_quat[..., QuaternionIndex.QX], + atol=1e-8, + ) + np.testing.assert_allclose( + conj_quat[..., QuaternionIndex.QY], + -random_quat[..., QuaternionIndex.QY], + atol=1e-8, + ) + np.testing.assert_allclose( + conj_quat[..., QuaternionIndex.QZ], + -random_quat[..., QuaternionIndex.QZ], + atol=1e-8, + ) + + # Check if double conjugation returns original quaternion + double_conj_quat = conjugate_quaternion_array(conj_quat) + np.testing.assert_allclose( + double_conj_quat, + random_quat, + atol=1e-8, + ) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((2, 3)) + _test_by_shape((1, 2, 3)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((0,)) # Zero quaternion (invalid) + conjugate_quaternion_array(invalid_quat) + + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((len(QuaternionIndex), 8)) # Zero quaternion (invalid) + conjugate_quaternion_array(invalid_quat) + + def test_get_euler_array_from_quaternion_array(self): + """Test the get_euler_array_from_quaternion_array function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + random_quat_array_flat = self._get_random_quaternion_array(N) + random_quat_array = random_quat_array_flat.reshape(shape + (len(QuaternionIndex),)) + + # Convert to Euler angles using our function + euler_array = get_euler_array_from_quaternion_array(random_quat_array) + + euler_array_flat = euler_array.reshape((N, 3)) + # Test against pyquaternion results + for i, q in enumerate(random_quat_array_flat): + pyq = PyQuaternion(array=q) + # Convert to Euler angles using pyquaternion for comparison + yaw, pitch, roll = pyq.yaw_pitch_roll + euler_from_pyq = np.array([roll, pitch, yaw], dtype=np.float64) + + # Check if conversion is correct + np.testing.assert_allclose(euler_array_flat[i], euler_from_pyq, atol=1e-6) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((2, 3)) + _test_by_shape((1, 2, 3)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((0,)) # Zero quaternion (invalid) + get_euler_array_from_quaternion_array(invalid_quat) + + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((len(QuaternionIndex), 8)) # Zero quaternion (invalid) + get_euler_array_from_quaternion_array(invalid_quat) + + def test_get_euler_array_from_rotation_matrices(self): + """Test the get_euler_array_from_rotation_matrices function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + rotation_matrices_flat: npt.NDArray[np.float64] = np.zeros((N, 3, 3), dtype=np.float64) + for i in range(N): + random_euler = self._get_random_euler_array(1)[0] + rotation_matrices_flat[i] = _get_rotation_matrix_helper(random_euler) + + rotation_matrices = rotation_matrices_flat.reshape(shape + (3, 3)) + + # Convert to Euler angles using our function + euler_array = get_euler_array_from_rotation_matrices(rotation_matrices) + + # Test against helper function results + euler_array_flat = euler_array.reshape((N, 3)) + for i in range(N): + expected_rotation_matrix = _get_rotation_matrix_helper(euler_array_flat[i]) + np.testing.assert_allclose( + rotation_matrices_flat[i], + expected_rotation_matrix, + atol=1e-8, + ) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((2, 1)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_rot = np.zeros((0, 3)) # (0, 3) rotation matrix shape (invalid) + get_euler_array_from_rotation_matrices(invalid_rot) + + with self.assertRaises(AssertionError): + invalid_rot = np.zeros((3, 3, 8)) # (3, 3, 8) rotation matrix shape (invalid) + get_euler_array_from_rotation_matrices(invalid_rot) + + def test_get_euler_array_from_rotation_matrix(self): + """Test the get_euler_array_from_rotation_matrix function.""" for _ in range(10): - random_quat = self._get_random_quaternion() - conj_quat = conjugate_quaternion_array(random_quat) + random_euler = self._get_random_euler_array(1)[0] + rotation_matrix = _get_rotation_matrix_helper(random_euler) + + # Convert to Euler angles using our function + euler_array = get_euler_array_from_rotation_matrix(rotation_matrix) - # Check if conjugation is correct + # Check if conversion is correct np.testing.assert_allclose( - conj_quat, - np.array([random_quat[0], -random_quat[1], -random_quat[2], -random_quat[3]]), + euler_array, + random_euler, atol=1e-8, ) - # Check if double conjugation returns original quaternion - double_conj_quat = conjugate_quaternion_array(conj_quat) + # Test invalid input + with self.assertRaises(AssertionError): + invalid_rot = np.zeros((3,)) # (0, 3) rotation matrix shape (invalid) + get_euler_array_from_rotation_matrix(invalid_rot) + + with self.assertRaises(AssertionError): + invalid_rot = np.zeros((3, 8)) # (3, 8) rotation matrix shape (invalid) + get_euler_array_from_rotation_matrix(invalid_rot) + + def test_get_q_bar_matrices(self): + """Test the get_q_bar_matrices function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + random_quat_array_flat = self._get_random_quaternion_array(N) + random_quat_array = random_quat_array_flat.reshape(shape + (len(QuaternionIndex),)) + + # Compute Q_bar matrices using our function + q_bar_matrices = get_q_bar_matrices(random_quat_array) + + q_bar_matrices_flat = q_bar_matrices.reshape((N, 4, 4)) + + # Test against pyquaternion results + for i, q in enumerate(random_quat_array_flat): + expected_q_bar = PyQuaternion(array=q)._q_bar_matrix() + + # Check if Q_bar matrix is correct + np.testing.assert_allclose( + q_bar_matrices_flat[i], + expected_q_bar, + atol=1e-8, + ) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((3, 2)) + _test_by_shape((1, 2)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((0,)) # Zero quaternion (invalid) + get_q_bar_matrices(invalid_quat) + + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((len(QuaternionIndex), 8)) # Zero quaternion (invalid) + get_q_bar_matrices(invalid_quat) + + def test_get_q_matrices(self): + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + random_quat_array_flat = self._get_random_quaternion_array(N) + random_quat_array = random_quat_array_flat.reshape(shape + (len(QuaternionIndex),)) + + # Compute Q matrices using our function + q_matrices = get_q_matrices(random_quat_array) + + q_matrices_flat = q_matrices.reshape((N, 4, 4)) + + # Test against pyquaternion results + for i, q in enumerate(random_quat_array_flat): + expected_q = PyQuaternion(array=q)._q_matrix() + + # Check if Q matrix is correct + np.testing.assert_allclose( + q_matrices_flat[i], + expected_q, + atol=1e-8, + ) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((3, 2)) + _test_by_shape((1, 2)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((0,)) # Zero quaternion (invalid) + get_q_matrices(invalid_quat) + + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((len(QuaternionIndex), 8)) # Zero quaternion (invalid) + get_q_matrices(invalid_quat) + + def test_get_quaternion_array_from_euler_array(self): + """test the get_quaternion_array_from_euler_array function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + random_euler_array_flat = self._get_random_euler_array(N) + random_euler_array = random_euler_array_flat.reshape(shape + (3,)) + + # Convert to quaternion array using our function + quat_array = get_quaternion_array_from_euler_array(random_euler_array) + + quat_array_flat = quat_array.reshape((N, len(QuaternionIndex))) + + # Test against pyquaternion results + for i in range(N): + roll = random_euler_array_flat[i][EulerAnglesIndex.ROLL] + pitch = random_euler_array_flat[i][EulerAnglesIndex.PITCH] + yaw = random_euler_array_flat[i][EulerAnglesIndex.YAW] + + pyquaternion = ( + PyQuaternion(axis=[0, 0, 1], angle=yaw) + * PyQuaternion(axis=[0, 1, 0], angle=pitch) + * PyQuaternion(axis=[1, 0, 0], angle=roll) + ) + + expected_quat = pyquaternion.q + + # Check if conversion is correct + np.testing.assert_allclose( + quat_array_flat[i], + expected_quat, + atol=1e-8, + ) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((3, 5)) + _test_by_shape((1, 0, 2)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_euler = np.zeros((0,)) # Zero euler angles (invalid) + get_quaternion_array_from_euler_array(invalid_euler) + + with self.assertRaises(AssertionError): + invalid_euler = np.zeros((3, 8)) # Zero euler angles (invalid) + get_quaternion_array_from_euler_array(invalid_euler) + + def test_get_quaternion_array_from_rotation_matrices(self): + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + rotation_matrices_flat: npt.NDArray[np.float64] = np.zeros((N, 3, 3), dtype=np.float64) + for i in range(N): + random_euler = self._get_random_euler_array(1)[0] + rotation_matrices_flat[i] = _get_rotation_matrix_helper(random_euler) + + rotation_matrices = rotation_matrices_flat.reshape(shape + (3, 3)) + + # Convert to quaternion array using our function + quat_array = get_quaternion_array_from_rotation_matrices(rotation_matrices) + + quat_array_flat = quat_array.reshape((N, len(QuaternionIndex))) + + # Test against pyquaternion results + for i in range(N): + expected_quaternion = PyQuaternion(matrix=rotation_matrices_flat[i]).q + actual_quaternion = quat_array_flat[i] + + # Check if quaternions are equivalent (considering sign ambiguity) + # Quaternions q and -q represent the same rotation + np.testing.assert_equal( + np.allclose(actual_quaternion, expected_quaternion, atol=1e-8) + or np.allclose(actual_quaternion, -expected_quaternion, atol=1e-8), + True, + ) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((3, 5)) + _test_by_shape((1, 0, 2)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_rot = np.zeros((0, 3)) # (0, 3) rotation matrix shape (invalid) + get_quaternion_array_from_rotation_matrices(invalid_rot) + + with self.assertRaises(AssertionError): + invalid_rot = np.zeros((3, 3, 8)) # (3, 3, 8) rotation matrix shape (invalid) + get_quaternion_array_from_rotation_matrices(invalid_rot) + + def test_get_quaternion_array_from_rotation_matrix(self): + """Test the get_quaternion_array_from_rotation_matrix function.""" + for _ in range(10): + random_euler = self._get_random_euler_array(1)[0] + rotation_matrix = _get_rotation_matrix_helper(random_euler) + + # Convert to quaternion array using our function + quat_array = get_quaternion_array_from_rotation_matrix(rotation_matrix) + + expected_quaternion = PyQuaternion(matrix=rotation_matrix).q + actual_quaternion = quat_array + + # Check if quaternions are equivalent (considering sign ambiguity) + # Quaternions q and -q represent the same rotation + np.testing.assert_equal( + np.allclose(actual_quaternion, expected_quaternion, atol=1e-8) + or np.allclose(actual_quaternion, -expected_quaternion, atol=1e-8), + True, + ) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_rot = np.zeros((3,)) # (0, 3) rotation matrix shape (invalid) + get_quaternion_array_from_rotation_matrix(invalid_rot) + + with self.assertRaises(AssertionError): + invalid_rot = np.zeros((3, 8)) # (3, 8) rotation matrix shape (invalid) + get_quaternion_array_from_rotation_matrix(invalid_rot) + + def test_normalize_quaternion_array(self): + """Test the normalize_quaternion_array function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + scale = np.random.uniform(0.1, 10.0) + random_quat_array_flat = self._get_random_quaternion_array(N) * scale # Scale to ensure non-unit norm + random_quat_array = random_quat_array_flat.reshape(shape + (len(QuaternionIndex),)) + + # Normalize using our function + normalized_quat_array = normalize_quaternion_array(random_quat_array) + + normalized_quat_array_flat = normalized_quat_array.reshape((N, len(QuaternionIndex))) + + # Check if each quaternion is normalized + for i in range(N): + norm = np.linalg.norm(normalized_quat_array_flat[i]) + np.testing.assert_allclose(norm, 1.0, atol=1e-8) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((2, 3)) + _test_by_shape((1, 5, 3)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((0,)) # Zero quaternion (invalid) + normalize_quaternion_array(invalid_quat) + + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((len(QuaternionIndex), 8)) # Zero quaternion (invalid) + normalize_quaternion_array(invalid_quat) + + def test_get_rotation_matrices_from_euler_array(self): + """Test the get_rotation_matrices_from_euler_array function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + random_euler_array_flat = self._get_random_euler_array(N) + random_euler_array = random_euler_array_flat.reshape(shape + (3,)) + + # Convert to rotation matrices using our function + rotation_matrices = get_rotation_matrices_from_euler_array(random_euler_array) + + rotation_matrices_flat = rotation_matrices.reshape((N, 3, 3)) + + # Test against helper function results + for i in range(N): + expected_rotation_matrix = _get_rotation_matrix_helper(random_euler_array_flat[i]) + np.testing.assert_allclose( + rotation_matrices_flat[i], + expected_rotation_matrix, + atol=1e-8, + ) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((2, 1)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_euler = np.zeros((0, 5)) # Zero euler angles (invalid) + get_rotation_matrices_from_euler_array(invalid_euler) + + with self.assertRaises(AssertionError): + invalid_euler = np.zeros((3, 8)) # Zero euler angles (invalid) + get_rotation_matrices_from_euler_array(invalid_euler) + + def test_get_rotation_matrices_from_quaternion_array(self): + """Test the get_rotation_matrices_from_quaternion_array function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + random_quat_array_flat = self._get_random_quaternion_array(N) + random_quat_array = random_quat_array_flat.reshape(shape + (len(QuaternionIndex),)) + + # Convert to rotation matrices using our function + rotation_matrices = get_rotation_matrices_from_quaternion_array(random_quat_array) + + rotation_matrices_flat = rotation_matrices.reshape((N, 3, 3)) + + # Test against pyquaternion results + for i, q in enumerate(random_quat_array_flat): + expected_rotation_matrix = PyQuaternion(array=q).rotation_matrix + + # Check if rotation matrix is correct + np.testing.assert_allclose( + rotation_matrices_flat[i], + expected_rotation_matrix, + atol=1e-8, + ) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((2, 3)) + _test_by_shape((1, 5, 3)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((0,)) # Zero quaternion (invalid) + get_rotation_matrices_from_quaternion_array(invalid_quat) + + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((len(QuaternionIndex), 8)) # Zero quaternion (invalid) + get_rotation_matrices_from_quaternion_array(invalid_quat) + + def test_get_rotation_matrix_from_euler_array(self): + """Test the get_rotation_matrix_from_euler_array function.""" + for _ in range(10): + random_euler = self._get_random_euler_array(1)[0] + + # Convert to rotation matrix using our function + rotation_matrix = get_rotation_matrix_from_euler_array(random_euler) + + expected_rotation_matrix = _get_rotation_matrix_helper(random_euler) + + # Check if conversion is correct np.testing.assert_allclose( - double_conj_quat, - random_quat, + rotation_matrix, + expected_rotation_matrix, atol=1e-8, ) - def test_get_euler_array_from_quaternion_array(self): + # Test invalid input + with self.assertRaises(AssertionError): + invalid_euler = np.zeros((0,)) # Zero euler angles (invalid) + get_rotation_matrix_from_euler_array(invalid_euler) + + with self.assertRaises(AssertionError): + invalid_euler = np.zeros((8,)) # Zero euler angles (invalid) + get_rotation_matrix_from_euler_array(invalid_euler) + + def test_get_rotation_matrix_from_quaternion_array(self): + """Test the get_rotation_matrix_from_quaternion_array function.""" for _ in range(10): - random_quat_array = self._get_random_quaternion_array(np.random.randint(0, 10)) - pyquaternions = [PyQuaternion(array=q) for q in random_quat_array] + random_quat = self._get_random_quaternion() - # Convert to Euler angles using our function - euler_array = get_euler_array_from_quaternion_array(random_quat_array) + # Convert to rotation matrix using our function + rotation_matrix = get_rotation_matrix_from_quaternion_array(random_quat) - # Test against pyquaternion results - for i, pyq in enumerate(pyquaternions): - # Convert to Euler angles using pyquaternion for comparison - yaw, pitch, roll = pyq.yaw_pitch_roll - euler_from_pyq = np.array([roll, pitch, yaw], dtype=np.float64) + expected_rotation_matrix = PyQuaternion(array=random_quat).rotation_matrix - # Check if conversion is correct - np.testing.assert_allclose( - euler_array[i], - euler_from_pyq, - atol=1e-6, - ) + # Check if conversion is correct + np.testing.assert_allclose( + rotation_matrix, + expected_rotation_matrix, + atol=1e-8, + ) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((0,)) # Zero quaternion (invalid) + get_rotation_matrix_from_quaternion_array(invalid_quat) + + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((8,)) # Zero quaternion (invalid) + get_rotation_matrix_from_quaternion_array(invalid_quat) + + def test_invert_quaternion_array(self): + """Test the invert_quaternion_array function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + random_quat_array_flat = self._get_random_quaternion_array(N) + random_quat_array = random_quat_array_flat.reshape(shape + (len(QuaternionIndex),)) + + # Invert using our function + inverted_quat_array = invert_quaternion_array(random_quat_array) + + inverted_quat_array_flat = inverted_quat_array.reshape((N, len(QuaternionIndex))) + + # Test against pyquaternion results + for i, q in enumerate(random_quat_array_flat): + pyq = PyQuaternion(array=q) + expected_inverse = pyq.inverse.q + + # Check if inversion is correct + np.testing.assert_allclose( + inverted_quat_array_flat[i], + expected_inverse, + atol=1e-8, + ) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((2, 3)) + _test_by_shape((1, 5, 3)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((0,)) # Zero quaternion (invalid) + invert_quaternion_array(invalid_quat) + + with self.assertRaises(AssertionError): + invalid_quat = np.zeros((len(QuaternionIndex), 8)) # Zero quaternion (invalid) + invert_quaternion_array(invalid_quat) + + def test_multiply_quaternion_arrays(self): + """Test the multiply_quaternion_arrays function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + quat_array1_flat = self._get_random_quaternion_array(N) + quat_array2_flat = self._get_random_quaternion_array(N) + + quat_array1 = quat_array1_flat.reshape(shape + (len(QuaternionIndex),)) + quat_array2 = quat_array2_flat.reshape(shape + (len(QuaternionIndex),)) + + # Multiply using our function + multiplied_quat_array = multiply_quaternion_arrays(quat_array1, quat_array2) + + multiplied_quat_array_flat = multiplied_quat_array.reshape((N, len(QuaternionIndex))) + + # Test against pyquaternion results + for i in range(N): + pyq1 = PyQuaternion(array=quat_array1_flat[i]) + pyq2 = PyQuaternion(array=quat_array2_flat[i]) + expected_product = (pyq1 * pyq2).q + + # Check if multiplication is correct + np.testing.assert_allclose( + multiplied_quat_array_flat[i], + expected_product, + atol=1e-8, + ) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((2, 3)) + _test_by_shape((1, 5, 3)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test invalid input + with self.assertRaises(AssertionError): + invalid_quat1 = np.zeros((0,)) # Zero quaternion (invalid) + invalid_quat2 = np.zeros((0,)) # Zero quaternion (invalid) + multiply_quaternion_arrays(invalid_quat1, invalid_quat2) + + with self.assertRaises(AssertionError): + invalid_quat1 = np.zeros((len(QuaternionIndex), 8)) # Zero quaternion (invalid) + invalid_quat2 = np.zeros((len(QuaternionIndex), 4)) # Zero quaternion (invalid) + multiply_quaternion_arrays(invalid_quat1, invalid_quat2) + + def test_normalize_angle(self): + """Test the normalize_angle function.""" + + def _test_by_shape(shape: Tuple[int, ...]) -> None: + for _ in range(10): + N = np.prod(shape) + random_angles_flat = np.random.uniform(-10 * np.pi, 10 * np.pi, size=N) + random_angles = random_angles_flat.reshape(shape) + + # Normalize using our function + normalized_angles = normalize_angle(random_angles) + + normalized_angles_flat = normalized_angles.reshape((N,)) + + # Check if each angle is within [-pi, pi] + for i in range(N): + angle = normalized_angles_flat[i] + self.assertGreaterEqual(angle, -np.pi - 1e-8) + self.assertLessEqual(angle, np.pi + 1e-8) + + # Test single-dim shape + _test_by_shape((1,)) + + # Test multi-dim shape + _test_by_shape((2, 3)) + _test_by_shape((1, 5, 3)) + + # Test zero-dim shape + _test_by_shape((0,)) + + # Test float + with self.subTest("Test float input"): + angle = 4 * np.pi + normalized_angle = normalize_angle(angle) + self.assertGreaterEqual(normalized_angle, -np.pi - 1e-8) + self.assertLessEqual(normalized_angle, np.pi + 1e-8) if __name__ == "__main__": diff --git a/src/py123d/visualization/viser/elements/sensor_elements.py b/src/py123d/visualization/viser/elements/sensor_elements.py index d6901077..2f3a9c15 100644 --- a/src/py123d/visualization/viser/elements/sensor_elements.py +++ b/src/py123d/visualization/viser/elements/sensor_elements.py @@ -6,7 +6,6 @@ import numpy.typing as npt import viser -from py123d.common.utils.timer import Timer from py123d.visualization.viser.viser_config import ViserConfig from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType @@ -107,12 +106,9 @@ def add_lidar_pc_to_viser_server( ) -> None: if viser_config.lidar_visible: - timer = Timer() - timer.start() scene_center_array = initial_ego_state.center.point_3d.array ego_pose = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array ego_pose[StateSE3Index.XYZ] -= scene_center_array - timer.log("1. prepare ego pose") def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]: lidar = scene.get_lidar_at_iteration(scene_interation, lidar_type) @@ -129,13 +125,23 @@ def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]: for future in concurrent.futures.as_completed(future_to_lidar): lidar_points_3d_list.append(future.result()) - timer.log("2. load lidar points") points_3d_local = ( np.concatenate(lidar_points_3d_list, axis=0) if lidar_points_3d_list else np.zeros((0, 3), dtype=np.float32) ) points = convert_relative_to_absolute_points_3d_array(ego_pose, points_3d_local) colors = np.zeros_like(points) - timer.log("3. convert lidar points") + + # # TODO: remove: + # lidar = scene.get_lidar_at_iteration(scene_interation, LiDARType.LIDAR_TOP) + # lidar_extrinsic = convert_relative_to_absolute_se3_array( + # origin=ego_pose, se3_array=lidar.metadata.extrinsic.array + # ) + + # viser_server.scene.add_frame( + # "lidar_frame", + # position=lidar_extrinsic[StateSE3Index.XYZ], + # wxyz=lidar_extrinsic[StateSE3Index.QUATERNION], + # ) if lidar_pc_handle is not None: lidar_pc_handle.points = points @@ -148,8 +154,6 @@ def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]: point_size=viser_config.lidar_point_size, point_shape=viser_config.lidar_point_shape, ) - timer.log("4. add lidar to viser server") - timer.end() def _get_camera_values( diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index 3d48803b..f92d53c7 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -51,7 +51,7 @@ class ViserConfig: # Bounding boxes bounding_box_visible: bool = True - bounding_box_type: Literal["mesh", "lines"] = "mesh" + bounding_box_type: Literal["mesh", "lines"] = "lines" bounding_box_line_width: float = 4.0 # Cameras From 91b65a1b8f561657694abbfd4cb19870f2973b2c Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 20 Oct 2025 18:19:42 +0200 Subject: [PATCH 102/145] Run pre-commit manually. --- README.md | 1 - docs/installation.md | 2 +- notebooks/bev_matplotlib.ipynb | 41 ++++--------------- scripts/conversion/av2_sensor_conversion.sh | 2 +- scripts/conversion/nuplan_mini_conversion.sh | 2 +- scripts/conversion/pandaset_conversion.sh | 2 +- .../conversion/dataset_converter_config.py | 2 +- .../conversion/log_writer/arrow_log_writer.py | 2 +- .../log_writer/utils/lidar_compression.py | 2 +- .../opendrive/opendrive_map_conversion.py | 1 - .../map_utils/road_edge/road_edge_2d_utils.py | 1 - .../map_utils/road_edge/road_edge_3d_utils.py | 2 +- .../scene/arrow/utils/arrow_getters.py | 3 +- .../transform/test/test_transform_se3.py | 11 +---- .../geometry/transform/transform_euler_se3.py | 1 - .../utils/test/test_rotation_utils.py | 6 +-- .../config/common/default_dataset_paths.yaml | 2 +- .../script/config/viser/default_viser.yaml | 2 +- src/py123d/script/run_viser.py | 2 +- src/py123d/script/utils/dataset_path_utils.py | 3 -- src/py123d/visualization/color/default.py | 6 +-- src/py123d/visualization/matplotlib/camera.py | 2 +- .../visualization/matplotlib/observation.py | 18 ++++---- src/py123d/visualization/matplotlib/plots.py | 2 +- src/py123d/visualization/matplotlib/utils.py | 2 +- .../viser/elements/detection_elements.py | 4 +- .../viser/elements/map_elements.py | 5 +-- .../viser/elements/sensor_elements.py | 2 +- .../visualization/viser/viser_viewer.py | 10 ++--- test_viser.py | 3 +- 30 files changed, 49 insertions(+), 95 deletions(-) diff --git a/README.md b/README.md index 192d794a..7f1d75db 100644 --- a/README.md +++ b/README.md @@ -3,4 +3,3 @@

      123D: One Library for 2D and 3D Driving Dataset

      - diff --git a/docs/installation.md b/docs/installation.md index 5b96008c..6d45b57b 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -41,4 +41,4 @@ export CARLA_SIMULATOR_ROOT="$HOME/carla_workspace/carla_garage/carla" # nuPlan export NUPLAN_DATA_ROOT="/path/to/nuplan/dataset" -export NUPLAN_MAPS_ROOT="/path/to/nuplan/dataset/maps" --> \ No newline at end of file +export NUPLAN_MAPS_ROOT="/path/to/nuplan/dataset/maps" --> diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index 3b13b455..7fb87a18 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -2,18 +2,10 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "0", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Dataset paths not set. Using default config: /home/daniel/py123d_workspace/py123d/src/py123d/script/config/common/default_dataset_paths.yaml\n" - ] - } - ], + "outputs": [], "source": [ "from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", "from py123d.datatypes.scene.scene_filter import SceneFilter\n", @@ -24,7 +16,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "1", "metadata": {}, "outputs": [], @@ -39,18 +31,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "2", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found 82 scenes\n" - ] - } - ], + "outputs": [], "source": [ "\n", "# splits = [\"wopd_val\"]\n", @@ -85,21 +69,10 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "3", "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABMkAAAGBCAYAAAB8Y+YyAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjcsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvTLEjVAAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3WVYlGnfx/EvHVImGNjdrYiAdGN3d2Bix+rummt3K9itIC2N3d2NBSYpPfO88F7umwcDFRji/BzHvGCu87zmN7PrxP86Q04qlUoRBEEQBEEQBEEQBEEQhCJMXtYBBEEQBEEQBEEQBEEQBEHWRJFMEARBEARBEARBEARBKPJEkUwQBEEQBEEQBEEQBEEo8kSRTBAEQRAEQRAEQRAEQSjyRJFMEARBEARBEARBEARBKPJEkUwQBEEQBEEQBEEQBEEo8kSRTBAEQRAEQRAEQRAEQSjyFGUdIKdJJBJev36NpqYmcnJyso4jCIJQ4EmlUuLi4ihXrhzy8uLaCojPGkEQhJwkPmeyEp8zgiAIOSu7nzWFrkj2+vVrFvgnAUmyjpLFm4eX8VzeH0laKlKpRNZxBEEQfsqLFy+oUKGCrGPkC69fv0ZfX1/WMQShyJGTV6CYThk6TjuARomyREc9Y99MSwDsxm2lUgOTr/ZLTU7Ea8VAop5cRSoR38HyK/E581/ic0YQhF8hJ69AiXI1aD9lLyrqmgRuceHheU/k5OQpXak+ejWacTNo55d6hFQq67gy8aPPmkJXJNPU1CQ/FsjePruJ75qhtGzejKNHj6Curi7rSIIgCNkSGxuLvr7+f95fBSDjtXjx4gVaWloyTiMIhd/79++xtbXjzftYnCbvQaNEWQCeXQtEQUkFjeJ6XPXZiJKKGnrVmyEvr5DRNz01mRPrRxH96i5+vr60bt1aVk9D+AbxOZOV+JwRBOFnPHv2DCtrG9IVNHCYuAMV9S/vIa07T6ZMlYZUa25HdORTfFYPxtrKkt27d6OkpCTj1Hkru581ha5Ilh+HI394eR+flYNoWL8O/v5+4guAIAgFUn58f5WVf18LLS0t8eNFEHJZdHQ0nTp34VXUBxwn70Gr9H9H1zy9FkSFuoZUa2bDuSNL8FjcG1WN4lgOX0mFOm1IT0slYPN4Ih9dxMfbG3Nzcxk+E+FHxOfMf4nPGUEQsuv+/fs4ODqRggqOE9xQ0yyRcUyjRFkaWgwg8vFV/NYOw9ioLceOHUNVVVWGiWXrR581YtJ/LouOfIrPyoHUqFYZfz9RIBMEQchNGzZsoGHDhhk/KgwMDPD19c04npSUhLOzMyVLlkRDQ4POnTsTFRUlw8SCIHxPXFwc1jY2PHj0FPsJbhTXq5px7HPsByIfXUa3amOSEqJR1y4DgCQ9DakkHYkknZDtk3lxK4yjR46IApkgCIJQ6GzYsIHatWvz6tUrVDVKoKxaLEub9xF38F09hObNmnLcw6NIF8iyQxTJclHs+5d4r+hPeb1SBAacoHjx4rKOJAiCUKhVqFCBRYsWcfnyZS5duoSZmRnt27fn9u3bAEyYMAFPT08OHTpEWFgYr1+/plOnTjJOLQjC13z+/Bl7B0du3LyD3fjtlKxQK9PxiBshIJVy4dhyzh1ZgmbJcliNXEP/5WepUMeQsJ2zeHzJl/379mFvby+jZyEIgiAIuWPVqlWMGjWKGq2cUFZR5+2zm3gu70/y59iMNp9eP8J75UDq1q6Jj7cXxYplLaIJmRW66Zb5RfynSLyX96OktjohwUGULl1a1pEEQRAKPUdHx0x/z58/nw0bNnDu3DkqVKjAtm3b2Lt3L2ZmZgC4urpSp04dzp07J9YpEoR8JDk5mQ4dO3L+/AXsxrtSpnKDLG306xtTrbkdFeq2oVozG1SKaQNfdq86tW8u908fYefOnXTu3Dmv4wuCIAhCrlq6dCmTJ0+mkfVgDLpM5d2zm3gt78+HF3fxXNIHexc3UhLj8FrRn8r6ZTnh74e2trasYxcIokiWCz7HvMd7eX+KKUNIcBBly5aVdSRBEIQiJz09nUOHDpGQkICBgQGXL18mNTUVCwuLjDa1a9emYsWKnD179rtFsuTkZJKTkzP+jo2N/WZbQRB+T2pqKt269yAkJAzbsZspW6PZV9sV0ymD1YhVme6TSqWcO7KEW8G72LRpE3369MmLyIIgCIKQZxYsWMDMmTNpajeSlh0nICcnR5kqDbEbvx3vFQOIjnrC8cW9SE9NQrekNkGBgZQsWVLWsQuMQjndUirDrUyT4qPxXjkARclnQkOCqVSpksyyCIIgFEU3b95EQ0MDFRUVRowYwbFjx6hbty6RkZEoKyujo6OTqb2uri6RkZHfPefChQvR1tbOuOnr63+3vSAIvyY9PZ1+/frj7e2N1cg1VKjT5qf6X/ZayzW/LaxYsYJhw4blUkpBEARByHtSqZQ///yTmTNn0txpbEaB7F961ZpgO3YryMmTGBOJlroiIcFB6OnpyTB1wVMoi2Thu/5AKpHk+eOmJMbhs3ow6QnvCQ4KpHr16nmeQRAEoairVasW165d4/z584wcOZL+/ftz586d3zrn9OnTiYmJybi9ePEih9IKgvAviUTC0KFDOXDwAOZDllGpoelP9b/mv42LHquZP38+48ePz52QgiAIgiADUqmUWbNm8ddff9Giw3iaOzh/dZdGVQ0dlFWL0aRJE549fSIu7P6CQjnd8k74AdJSkjAduAh5hbx5iqnJn/FdM4zP758TFhpCvXr18uRxBUEQhMyUlZUzLlI0a9aMixcvsmrVKrp3705KSgrR0dGZRpNFRUX98AqbiooKKioquRlbEIq8wMBAXF1dad15MtWa2/5U31shezh7aBHTp09nxowZuZRQEARBEPKeVCplypQpLF26FIOuUzl76B8uuq9kxJYHmQplH17cw3vFAKpUKof7saPIyxfKMVG5rtC+ao8veRO4eTzpaSm5/lhpqcn4rxtF9Kt7nPD3o0mTJrn+mIIgCEL2SCQSkpOTadasGUpKSgQFBWUcu3//PhERERgYGMgwoSAIAA0bNqRqtercDtnFp8gn2e537/RRTu75k7FjxzJ//vxcTCgIgiAIeUsqlTJ+/HiWLl2KYY9ZVGpklnHsRqAbSfGfAHgXcRvPZX2pXrUioSEhYuPA31Boi2R7du8m4mYIJ9Y7k5aa/OMOvyg9LYWAjWN4+/gyPt5eYnc0QRAEGZo+fTrh4eE8e/aMmzdvMn36dEJDQ+nduzfa2toMHjwYFxcXQkJCuHz5MgMHDsTAwEC8dwtCPqCnp8epk+FU0C2J55LefHhx74d9Hl3wJnTHdIYMGcLKlSu/OvVEEARBEAoiiUSCs7Mzq1evxqj3nzS06M+j814Zx88cWID7Pz15+/QGXsv6Ubd2dUKCg8Qi/b+p0BbJbG1t8fbyIvLBeXxXDyU1KSHHH0OSnkbQ1km8vHMKd/djmJiY5PhjCIIgCNn39u1b+vXrR61atTA3N+fixYv4+/tjaWkJwIoVK3BwcKBz584YGxujp6fH0aNHZZxaEIomHx8f+g8YSERERMZ9ZcuWJTw8lBpVK+K5rA9vn938Zv9n14II2jaJXj17sXHjxiJdIDt79ixjxozh1atXso4iCIIg5ACJRMLw4cPZuHEj7fovoL5pbwDePLwEgKKyKnrVm1HDoAPeKwbQqEFdggIDKV68uCxjFwpyUlluBZkLYmNj0dbWJiYmBi0tLcLDw7Gzs0erbE1sx25FRV0zRx5HKpEQ4jqNRxeOc/jwYTp06JAj5xUEQchv/v/7qiBeE0H4XQEBAdg7OJCeLqVcubKEhgRTrVq1jOPR0dHY2Npy/cZtbMZsoWyNZpn6v7h9Cr+1w3FydOTAgf0oKhbKZXaz5erVqxibtCMhPg79ipUICgwocJtHiffUrMRrIghFV3p6OoMHD2Hnzh20G7CI2oadMo7FfXjFu2e30K9vxPuIO/iuHkLzZk3x9fFGUzNnah2FVXbfVwvtSLJ/GRsbExwcRMK7J3gv75cxZ/d3SKVSwvf8yYNz7uzatUsUyARBEARBELLp5MmTODm1p1xtA3rO8+dzmiKGbY24e/duRhsdHR0CAwJo2aIZPisH8vLumYxjrx9cxH/9SCwtzNm3b2+RLpDdv38fSytripWuTLe/vIlPkaeNYVtu3Lgh62iCIAjCLxoxYgQ7d+3EbMjSTAUyAM2S5anazJq3T2/is3IQrVu1xN/PVxTIclChL5IBtGzZkvCwUNLio/Bc2ofPMe9/+VxSqZSzh/7hTtg+tm7dSs+ePXMwqSAIgiAIQuF14cIFbO3sKVWlMVYj1qJVWh/HSXtIV9LCyNiEa9euZbTV0NDAz9cHU1MTfFcP4/mNEKKeXMdvzTAM27ThyJEjKCsry+7JyNiVK1do3doAiZImdmO3UqJcDZym7EVevRRGxiacPXtW1hEFQRCEX3Dl2nU0i+tSofbXN5Z6cfsUvmuGYGRkiI+3F8WKFcvjhIXbLxfJwsPDcXR0pFy5csjJyeHu7p7puFQqZfbs2ZQtWxY1NTUsLCx4+PDhD8+7bt06KleujKqqKq1ateLChQu/GjGTRo0acTI8DMX0eDyX9iL+45tfOs/F46u5fmIba9asYdCgQTmSTRAEQRAEobC7fv06VtY2aOnVwMZ5A4rKqgCoa5fCceJuFDX1MGlnyvnz5zP6qKmp4eHujoO9HX7rRuGzchBNGjfA87gHampqsnoqMvfo0SPaGBoRHf0JqRTS01IBUNMsib3LLjR1a2BubkFAQICMkwqCIAg/6+D+fRRTkcdreX8+x37IdOz5zTD81o7AwswUL09P1NXVZZSy8PrlIllCQgKNGjVi3bp1Xz2+ePFiVq9ezcaNGzl//jzFihXD2tqapKSkb57zwIEDuLi4MGfOHK5cuUKjRo2wtrbm7du3vxozkzp16nDqZDjFlCQcX9KL2HcRP+70P676buKy51oWLVrE6NGjcySTIAiCIAhCYXf37l3MLSxR1SmP7ditKKlmvuqtqqGD/YQdaJSphrm5BeHh4RnHVFRUOHToIMOGDsHK0gw/X180NDTy+inkG2/evKFxk6ZIkMNqxGpSkxNw/6cHse9eAKCirontuG2UqdESO3t7jhw5IuPEgiAIwo+EhIRkXNyoVq0aoSHBKKTF4b2iP4lxH4EvG9acWD8KGxtrjh07hqqqqoxTF045snC/nJwcx44dy1ibSyqVUq5cOSZOnMikSZMAiImJQVdXFzc3N3r06PHV87Rq1YoWLVqwdu1a4MuODvr6+owZM4Zp06ZlK0t2FmN78eIFpqZmvP0Uj/0EN4qXrfbVdv/rZtBOTu2byx9//MHff/+drSyCIAiFgVg8OCvxmghC9j1+/Ji2RsakKWjgOGk3qhrf3nkrNfkz/utG8e7JFTw83LGyssrDpPlfdHQ0VatWIzY+AadJu9Cr1oTY9y/xWj6A1OREHF3cKFG+BgDpaSmEbJ/C40u+bN26lYEDB8o4/beJ99SsxGsiCEXH2bNnsbCwJB15JGnJHDl8GEdHR+7evYuRsQkKxUpTz7QvYbv+oEP79uzbt7dILzfwq2S6cP/Tp0+JjIzEwsIi4z5tbW1atWr1zfURUlJSuHz5cqY+8vLyWFhYfHdNheTkZGJjYzPdfkRfX59Tp05SQbcknkv78OHFve+2v3vqEKf2zcXFxYW//vrrh+cXBEEQBEEQ/nNh0sycFFSwn+D23QIZgJKKOjZjNqFXqzUOjo4cP348j5Lmf4mJidSpW5eYmFhsR29Er1oTALRKVaDD1H2oaZbAfXEvop5cB0BBURmzIcuobdSdQYMGsXnzZlnGFwRBEL7i6tWrWNvYUrxCXXovCqViA1M6durEgQMHqFOnDqEhwaTGRRHiNp2uXbpw4MB+USDLZblSJIuMjARAV1c30/26uroZx/6/9+/fk56e/lN9ABYuXIi2tnbGTV9fP1sZ9fT0CA8PpUbVingu68PbZze/2u7heU/CdsxkxIgRLF26FDk5uWydXxAEQRAEoSiLjIzE1NSM2M9p2E/Ygbp26Wz1U1RSwWrkWkrq16Nnz165nLJgSEtLo379BkRGRmI5fCX69dpmOq6uXZr2U/ZQXK8qnsv68/LulwvM8vIKNLEZgpKKGsEhIbKILgiCIHzDnTt3sLC0Qr1UJWzGbEJFXQuLYSup1tyeXr164erqSv369Tl96iQ7d+5k9+5dRXpH57xS4He3nD59OjExMRm3Fy9eZLtvqVKlCAkOomH9Ongt68ebh5cyHX9y5QTB2ybTt28/1q1bJwpkgiAIgiAI2fD+/XvMLSx4+zEOh4k70ShR9qf6P78RytunN+jatWsuJSw4pFIpLVq04MmTx7TrP5+qzay/2k5FXQsHF1f0qjfBZ9UQnl4NJCH6Ld4rBqJfoTyrVq7M2+CCIAjCNz169Agzcwvk1UtiN3YbymqaAMgrKGI2aDG1jboxaNAgdu3aRZ06dejbt68okOWRXCmS6enpARAVFZXp/qioqIxj/1+pUqVQUFD4qT7wZTFXLS2tTLefoaOjQ2BAAK1btcBn5SBe3jkNQMStcAI3j6dz585s27YVefkCX08UBEEQBEHIddHR0VhaWfP8RST2LjvQKl2R0/vnczN4V7b6P78Zluk7WFFnZmbGtWvXaNNtOnXafr9oqKSiju3ojVRqZIb/htEcW9gVNcV0goMCs8zWEARBEGQjIiICUzNzUuVUKVO1Kc+uB2c6LicvT83WHZCTl+fGjRsySll05Urlp0qVKujp6REUFJRxX2xsLOfPn8fAwOCrfZSVlWnWrFmmPhKJhKCgoG/2ySkaGhr4+nhjamqC75rhXPJcm7FrxJ49u0XFVhAEQRAEIRvi4+OxsbXl/sPHGZsjxbx9zo1ANy56rCY9LeW7/V/dO0fABmdsbW3Zs2c3CgoKeZQ8f+rcuTOhoaE0cxhFI6tB2eqjoKRCbcPOSCXppCVGExQYQKVKlXI5qSAIQtEUExODYRtDGjVslK1ZbZGRkZiZmROXJMGozzzuhO0jxHUqkvS0jDbvnt/Gb81Q2rY1Emuiy8AvF8ni4+O5du0a165dA74s1n/t2jUiIiKQk5Nj/PjxzJs3j+PHj3Pz5k369etHuXLlMnbABDA3N8/YyRLAxcWFLVu2sGPHDu7evcvIkSNJSEjIk9141NTU8HB3x9HBnoseqzA2NuLwoUMoKSnl+mMLgiAIgiAUdImJiTg4OnHt+i3sxm2npH5t4MuW9XLyCiQnRPPi1slv9o98dAW/tcMxNjbi0MEDRf472NChQzl69Cj1zfrSov34TMcS4z5+s9+bh5fxX++MgqISIcHB1KlTJ5eTCoIgFE3Jyck42Dtw8/oNol6+wbCNIQ8fPvxm+w8fPmQsRWA/wS3TuuibhtfhVsgePr56iM/KgTSoXwdvL0/U1dXz4qkI/+OXh0hdunQJU1PTjL9dXFwA6N+/P25ubkyZMoWEhASGDRtGdHQ0bdu2xc/PD1VV1Yw+jx8/5v379xl/d+/enXfv3jF79mwiIyNp3Lgxfn5+eTY8XEVFhYMHD3Du3DmaNm2aKasgCIIgCILwdSkpKXTq3JkzZ89iN247Zao0zDj29FogFesbEffhDYFbJqJf34gqTSyo2tQaReUv37XePb+Nz+ohtGjejOMeHkX+O9i0adPYunUbNVo50bbHLOTk5Pgc+4GH54/z4MwxPr5+iP347VSok3m2xfuIO3ivGoxUKsHXx5tWrVrJ6BkIgiAUfp8+feLU6VO0qd0Cp5Y2TNs5lx07djBv3rwsbWNiYrC0subZizc4TdqDdplKPLsWiLyiEpK01C/ne/OEq97rqFa5Iv5+fmhqaub1UxIAOalUKpV1iJwUGxuLtrY2MTExP70+mSAIgpCVeF/NSrwmgvBfaWlpdOvWHU8vL2xGb8q082Ji3Ed2uBhg3PdvdKs1IdR1Om+ffVlfpdOMw+hWbcSHVw/wWtqHurWrExQYWOT/TS1ZsoSpU6dRsYEJFsNW8PL2Ke6fPUbEzTBADh29qnx8dZ+O0w+iV61JRr/oyKccW9SdlM+x7Nu3l27dusnuSfwk8Z6alXhNBKFg2Lp1K8OGDUNZUZkyumU4feY0+vr6mdokJCRgZW3DlWs3cJy4i1IV6wJwev983jy8RE2D9uhVb0rAhjHoldTi5MkwypQpI4unU6hl931VLLYlCIIgCIIg/JL09HT69x+Ax/HjWI9cm6lABl92qZRKJTy/EcLZQ/+QkhhHqYr1qNnaieJlqxId9QyfFf2pWlkffz+/Il8M2LZtG1OnTUevRjOsRq7h4bnjhO6YAUDlxuYY9/mLm8G7+RzzNtNovfiPbzi+rB/Jn2NZv25tgSqQCYIgFGRDhgxBR0eHPbv3sHjJ4iwFMoD+AwZw+cpV7Ma7ZhTIAAx7zAQgIfotnkt7U1xTheDgQFEgkzFRJBMEQRAEQRB+mlQqZcSIEezbvw/LYSup3Ng8S5v4j68B+PDyPvXN+lCzlRPFy1UHIPb9S7yX96dcmVIEBQZQokSJPM2f3zx48IDhI0ZSskIt7MZuRlFJhbI1W1DXpCcRN0N5di0IVY0SvHt2k4oNTJCX/7KpQWLcB44v68fnmHfM/fsvRowYIdsnIgiCUMR06dKFLl26fPN4UlIycnLySCWSLMcS4z7is3IgagpphASHU758+dyMKmSDKJIJgiAIgiAIP0UqlTJ+/Hi2bt2K6cB/qNbc9qvtmtmPolKDdpSqVA85ObmM++M/ReK9vD8lNFXFVXPgxYsXmJlboFVaH4cJriiragCgo1sZk75/I5VKOX9kKTeDd5KWkkRT+5EAJH+Ow2v5IGLfvWDc2DHMnDlTlk9DEARB+Ip9e/dg7+CIz8qBWI/eSIU6bQBI/hyLz6pBkBxN8MlwqlSpIuOkAvzG7paCIAiCIAhC0TRz5kxWr16NUe8/qW3Y6Zvt5OTlKV25fqYC2efYD3ivGEAxZSkhIUFF/qr527dvMTe3JD5ZioPLDtQ0s46ok5OTo45xN9JSkpCTV0C/XlvSUpLwXTOMD6/u06d3L5YvXy6D9IIgCMKPaGpq4ufrQ7t2xviuHsaza0GkJiXgu3ooydGvCQoMoFatWrKOKfyHGEkmCIIgCIIgZNv8+fNZuHAhBl2nUd+090/1TYqPxmflQBTS4gk+GU7lypVzJ2QB0q//AF5FvaPD1INoFNf7ZjvtMpWoadABBSUVFJXV8Fs3ishHV7Czs2XHjh2ZCpGCIAiCbERFRXHjxg3Mzc2Rl//vmCR1dXWOe3jQs2cvPDaMpkS5GiR+eklIcBANGzb8zhmFvCZGkgmCIAiCIAjZkpiYyKxZs6jc2JzG1oN/qm9KYhy+q4eQFv+W4KBAatasmUspC5aK+hVIio8h8vGVH7Y1H7wEkz5/E7x9KhG3wmjTpg0eHh6iQCYIgpAPREVFYWRkgpWVFYMHDyY9PT3TcRUVFQ4ePEDvXr0g6QM+3l60aNFCRmnzH6lUysqVK1m5ciVSqVRmOcRIMkEQBEEQBCFbVFVVGTVqFOvXr+dGoBsNLQZkq19qciJ+a4aT8P4ZoSHB1K9fP3eDFiAbNmwgPT2d7dunkJIYRwPzft9sK5VKObn3Lx5d8KJBwwYEBQWioKCQh2kFQRCEr/n48SPmFha8ef+J1p0ns3PnchITk9i1aydKSkoZ7RQVFdm5c4cMk+Zf8+bNY/bs2QDcuHGDzZs3o6iY9yUrUSQTBEEQBEEQskVOTo61a9eirq7O0qXzvywib/f93RTTUpPxXz+STy/vEBgYQNOmTfMobcGgoKDA1q1bKV68OMuWzSX5cwzNHEZ/dXTYhWMruB26lypVq3L2zBlUVFRkkFgQBEH4X7GxsVhZW/M04jWOk3ZTolwNtMpU5PAWFz4nJnLo4AHxfv0DW7duZfbs2bTsMB7NkhXY4TaNqKgoDh48SLFixfI0i5huKQiCIAiCIGSbnJwcixcvZs6cOZw/uowL7iu+OS0iPS2VgE1jefvoMl5enhgYGORx2oJBTk6OJUuWsGDBAi56rOb0gflIJZJMba75b+WKzwb09PS4fOlSnv9oEL5u0aJFyMnJMX78+Iz7kpKScHZ2pmTJkmhoaNC5c2eioqJkF1IQhFyTkJCArZ09t+8+wG78dkqUqwFAtWY2WI9aj6+vH45OTnz+/FnGSfMvT09Phg8fTr12vWhqP4qaBu2xG7uZwOBQTM3MeP/+fZ7mEUUyQRAEQRAE4afIycnx559/snjxYi57refMwYVZCmUSSTrB2ybz8vZJjh07iqmpqYzSFgxycnJMnz6d9evXcytoJ6Fu05GkpwFwJ/wgZw/9g4GBAVevXqV48eIyTisAXLx4kU2bNmVZdHvChAl4enpy6NAhwsLCeP36NZ06fXsXWEEQCqakpCSc2rfn8pWr2I7dSumK9TIdr9SwHa06TybgxAmCg4NllDJ/O3v2LN26dadyYwva9pqdMYpav54RjpN2c+feYwzbGudpJjHdUhAEQRAEQfglkydPplixYjg7O5OWkohx77+Qk5dHKpEQ5jaDp1f8OHjwILa2trKOWmCMHDkSbW1t+vXvT0pSHFWaWhO++w9GjRrF2rVrxSL9+UR8fDy9e/dmy5YtzJs3L+P+mJgYtm3bxt69ezEzMwPA1dWVOnXqcO7cOVq3bv3V8yUnJ5OcnJzxd2xsbO4+AUEQfktKSgqdu3Th5MlTNLEbiZxc1vFHH17c44rXGlobGNCuXbu8D5nP3bt3Dzt7B0pWrI/50OXIy2deY1NHtzLq2qWJi4tFIpFk2i00N4mRZIIgCIIgCMIvGzVqFNu3b+feyYMEu05Fkp7GyX1/c//sMXbs2CFG0PyCXr164eHuzqvbJwnaOomePXqyZs0aUSDLR5ydnbG3t8fCwiLT/ZcvXyY1NTXT/bVr16ZixYqcPXv2m+dbuHAh2traGTd9ff1cyy4Iwu9JS0ujd+8+nDgRgHH/hVxwX8nRBV0ytfn05jHeKwZQu2Z1/Hx90dDQkFHa/On169dYWlmjWKwU1s4biHn7nA8v72ccT09Nxn+9M0kxbzjh75dnBTIQI8kEQRAEQRCE3zRw4EDU1dXp3acPUU+uERP1jC1bttC7d29ZRyuw7O3tCQkJ5tKlS4wYMSJPfyAI37d//36uXLnCxYsXsxyLjIxEWVkZHR2dTPfr6uoSGRn5zXNOnz4dFxeXjL9jY2NFoUwQ8iGJRMKgQYM5evQoViPXkhj73/WyNg6rjfXItZSsUBPv5f2ppK9HwAl/tLW1ZZg4/4mOjsbK2pqYhGTqmXbHa9Vg3j65jrKaBgNWnEdeXpHg7VN5++QKASdO5PmO2KJIJgiCIAiCIPy27t27o6amxrjxE5i7ejVDhgyRdaQCz8DAIN9sdnD//n3Onj1L7969UVJSknUcmXnx4gXjxo0jICAAVVXVHDuvioqK2P1OEPI5qVTKqFGj2L17F+ZDllGliQXeq4agoKhMeloKUkk6jy/5cPbgfHRLaRMUGEjJkiVlHTtf+bKOWwceP42gkc1wzh1ZknFMW7cK8gpKnD4wn8eXfDh8+DDGxnm7HhmIIpkgCIIgCIKQQ5ycnHBycpJ1DCGH3b17FyNjEz68f4e7x3EOHtiPsrKyrGPJxOXLl3n79i1NmzbNuC89PZ3w8HDWrl2Lv78/KSkpREdHZxpNFhUVhZ6engwSC4KQE6RSKZMmTWLTpk20G7CAGq0cAZBXUEJBSYWaBh2o2MCEC0eXoKWmQHBQoPg3//+kp6fTp09fzp07h/0ENzRLVSDu/UseX/IlOSEa/Xptuea/lZuBO1i/fr3MlmsQRTJBEARBEARBEL7qwYMHtDM1Q06tBGaDp+C9cyYdO3XiyOHDOTqSqqAwNzfn5s2bme4bOHAgtWvXZurUqejr66OkpERQUBCdO3cGvozCi4iIyDejAgVB+Hlz5sxh+fLltO01mzptu2bcbzViFVKplLTkz3gu7YOKXAqhISfFdOn/RyqVMm7cOI4eO4r1yLWUrdEcAJO+fyOVSrgbfgCpVMq5w4uZOXMmI0eOlFlWUSQTBEEQBEEoohITE9m5cyempqbUrFlT1nGEfObJkye0MzUjXUEDhwk7UNcqiZpmSU6sH4mjkxPHPTxQU1OTdcw8pampmWV9nGLFilGyZMmM+wcPHoyLiwslSpRAS0uLMWPGYGBg8M2dLQVByN927tzJ3Llzad15Mg3M+mY6pqCoTPLnOHxWDkKa9ImQk+FUrVpVRknzrwMHDrBu3TqMev9JlSaWmY4Zdp9J2RotCHGdysCBA5k7d66MUn4hVgAVBEEQBEEogpKSkujQsSMjRoygrZExt2/flnUkIR95/vw57UzNSJIoYe/ypUAGULG+ETZjthAefgo7ewcSEhJknDT/WbFiBQ4ODnTu3BljY2P09PQ4evSorGMJgvCLkpOTAZBKJVmOpSYl4Lt6CEnRrwgKDKB27dp5Ha9AKFWqFAoKCry6c5q0lKRMxz69ecSpPbOxtbVh8+bNMt/JWRTJBEEQBEEQipjk5GQ6d+5MSEgolsNXgWpxTNqZZplGJhRNL1++pF07M+KSpDi47KSYTplMxyvUMcB23DbOnjuPtY0tcXFxMkqaP4SGhrJy5cqMv1VVVVm3bh0fP34kISGBo0ePirWJBKEASUhIwNGpPZ06dSY+Pp4hQ4YwZ84czh9dxgX3FUilUgDSUpLwWzeS2MiHnPD3o1GjRjJOnjMOHDiAtk5xxowZk/Fcf5eFhQUeHh68vnsK7xUDSIqPBiAm6jl+q4fSuFEDDh08iKKi7Cc7iiKZIAiCIAhCEZKSkkLnLl05ERCEtfMGqreww8FlJwrFSmPSzpTr16/LOqIgQ2/evMHUzJyP8Uk4TNyJRomyX21XrmYL7Ma7cunKNSytrIiJicnjpIIgCDkvOTmZDh07EhAYjI/fCUzNzPj48SN//vknixcv5rLXes4cXEh6ajInNo7h/dNr+Pp407JlS1lHzxHbt2+nV+8+xMXFsXbtWpydnXOsUGZvb09ISDCJH55xfEkv3j67ic+qQZTTLYW3lxfq6uo58ji/SxTJBEEQBEEQiojU1FS6duuOv78/VqPWoV/PCABVjeLYT9iBslZZ2pmacfXqVRknFWTh7du3mJlbEPUhFseJu9AsWf677fWqNcHBZQfXb97BzNyCjx8/5lFSQRCEnPfv7ouhoeFYO2/EcdJu7t5/QlsjY169esXkyZNZt24dNwJc2f+HDa/vneH4cQ+MjIxkHT1HuLq6MnTYcIrp6FKuVkvkFZXZsGEDo0aNQiLJOtX0V7Ru3ZqzZ06jrpDCkXmdUJFP5cQJP0qVKpUj588JokgmCIIgCIJMxcXF0bRZc6pVr8GtW7dkHafQSk1NpUePnnh7e2M5Yg2VGphkOq6qoYO9yw5Ui+vTztSMS5cuySipIAsfPnzAzNycl2/e4eCyE63SFbPVr0zlBliNXMeVy5dYv359LqcUBEHIHVKplJEjR3L06FEshq2kfO1WlK5UD6cp+3j9LhqDNoY8ePCAUaNGsX37dsqV1uLwoUNYWlr++OQFgJubG0OGDqOYji4dpu7DbuwWytdujbyCEhs3bmTkyJE5ViirVasW586eYfr06QSc8Kdy5co5ct6cIopkgiAIgiDIjEQioU/ffty994DYZHlatWotFrjOBWlpafTp0xd3Dw8sR6yhciOzr7ZTUdfCbrwrxUpVwczcggsXLuRxUkEWPn36hLmFJc8i3mDvsgMdvSrZ7pv8OY7zRxZTvERJOnTokHshBUEQctGMGTPYsmULJv0XUKWJRcb9OnpVaD9lPwlpirQxbMubN28YOHAgd+/cxsnJSYaJc86uXbsYPGQo6tplaD9lLxolyqKopIKN83oq1GmDvIISmzdvZsSIETlWKCtXrhwLFiygQYMGOXK+nCSKZIIgCIIgyMzcuXPxPO6B6eCldJh+iLJ1jencuTN//PFHjn0RK+rS09Pp168/hw8fxnL4Kqo0Nv9uexV1TezGb0OjTDXMLSw5e/ZsHiUVZCE2NhYra2sePHqK/QQ3SpSrke2+KUnx+K4eTOLHFwQFBlC/fv1cTCoIgpA7li1bxqJFi2jTbTq1DTtlOV6suB6lKjUg+tNH4uPjZZAw9+zevZsBAwehrl2aDlP3olmyXMaxL4WydVSo2wZ5BUW2bNnC/fv3ZZg2b4gimSAIgiAIMnH8+HH+/PNPWrQfR+VGZiipqGM5fBWtO09i/vz5ODo5icXAf1N6ejoDBgzkwIEDWAxbQdWmVtnqp6z2pVCmXkIfewfHXE4pyEp8fDzWNjbcunMf+wlulNSvne2+qcmf8Vs9jLioxwQGnKBJkya5mFQQBCF3uLq6MmnSJOqa9OLj64dERz3LdFwqlXL20D88OOvOjh07qFEj+xcS8rs9e/bQf8BA1LRK0X7K3q+uQ6mgpEITm6FI0tOoWasW1atXl0HSvCWKZIIgCIIg5Lm7d+/Sq3cfqjWzpqn9qIz75eTkaGI7HNuxWwgOPUmLFq24d++eDJMWXBKJhMGDh7Bn7x7MhiylWnPbn+r/7tktPkU+plWrwrFjl5DZ58+fsbN34Oq1m9iN207pSvUyjiXGfeRO+EES476+EH9aShJ+a0fw6dVd/P18adGiRV7FFgRByDHHjh1jyJAh1DXpgZpmce6dOsytkN2Z2lz13cz1E9tYtWoVvXv3llHSnLdv3z769R+AmlZJOkzdh6qGDg/OH+ft0xuZ2kU+vor3qqGULFmac2fPoqSkJKPEeUcUyQRBEARByFPR0dE4OjqhplMW04H/ICcnl6VNpQYmdJxxhI8J6TRv0RJPT08ZJC24JBIJw4YNY+eunZgNWkyNlg7cCtnDq3vns9X/9YML+K0ZhrFRW46JNeIKnaSkJBydnDh/4SJ247ahW7URKUnx3D/rjtfKweyY2IawnTM5d3hxlr5pqcn4rx/Fh2fX8fXxpk2bNjJ4BoIgCL8nODiY7j16ULWZDUa9/+TV3S9LC9wM3MGGITWIfRfBnbD9nD+6lD/++IOxY8fKOHHO2b9/P3369kNVsyQGXabiu2YEbuNbEbRlIoFbJ2a0i3pyDc/l/dHSLMaDB/coXry4DFPnHVEkEwRBEAQhz6Snp9OzVy9eRb7FatR6lFSLfbOtjm5lOsw4hG6N1jg5OTF37lyxTlk2/LtD1/bt2zEdsIiardvz6fUjTu75k7OH//lh/zcPL+O7eiht2hjgefw4qqqqeZBayCvJycl06NiRk6dOYzt2C9q6lfHfMAa3Ca0J3jaZ1KQEmtoNB6BszcyjCNPTUgjYOIaohxfx9DyOsbGxLJ6CIAjCb7l06RJOTu0pW6MlZoOXkPI5lqgn15CTV8ho8/RaEOG7Z+Ps7Mxff/0lw7Q568CBA/Tu0xdVzZJ0nLqXl3fP8vHVfdLTUgAoX9sAgKgn1zm+rB9aGuo8fHCfEiVKyDJ2nlKUdQBBEARBEIqO2bNn4+/vj93YrWiXqfTD9sqqGliNXMtl73XMnj2bq1evsWOHG5qamnmQtuCRSqWMHj2azZs3YzpgIbXadATgyRV/AN49u0lM1HO0db/+2kc+vorv6sG0btUSby9P1NTU8iy7kPtSUlLo0rUbQUEh2I7ZRLmaLYmJes6Ty34AFC9Xg4YW/UmK/4ScvEKmXVDT01IJ3DyBV3dP43n8OObm398AQhAEIT+6d+8e1ja2aOnVwGrUOhQUlUnlM6Uq1UNduzS123RCQUkF//Wj6NGjJ6tXr/7qiPeC6NChQ/Tq3QdVjRJ0mLIHrdIVadtzFhXrG3Fi45eRctWa2/L26Q08l/VDs5g69+/dK1IFMhAjyQRBEARByCOHDx9mwYIFtOo0iYr1jbLdT05enuaOY7AZvREfvxO0amXAo0ePcjFpwSSVShk3bhzr16/HpN88arftknHsyWV/Kjc2R1FFnYAtE7gVsofPMe8y9Y96cg2flYNo3qwpPt5eqKur5/VTEHJRWloaPXv2ws/PD2vn9VSoawiAtm4l+i09jXGfv1BQVCZ89xweXfKlXK1WqGroACBJTyNk+2QibgRz5PBhrK2tZfhMBEEQfk1ERATmFpbIq5XAZsxmlFS+fM6paujQeeYR7MZsolhxXQI3j8PSwpwdO9yQly8cJZO3b9/Sf8BAVIrp0GHK3owLlUoq6lRrbouaZkkAFFXUOL60Lxrqqjy4f49SpUrJMrZMFI7/4oIgCIIg5Gs3b96kf/8B1GhpT2PrIb90jiqNzek44zBR0Z9p1rwF4eHhOZyy4JJKpbi4uLBmzRqM+/xFXePuGcdi3j7n/Yu71GzdHtMBC0lJjOfknj/ZMcmQxLgPALx9dhOfVYNp2qQhvj7eFCv27WmwQsGTnp5Onz59cffwwHL4airWzzxNsphOGeq164XpwIUkxX/i9b1zGTuhSiTphLhN58llPw4cOICjo9jtVBCEgic5ORkLCysSUsBu/PaMiwD/kpOT4+Orh/iuHkKzpk04cuRIoVqk/uLFiyR+TqCJzbCvjiYfsOIcXWYdw2v5AIqpq3Lv3t0iWSADUSQTBEEQBCGXffz4ESenDmiUroRJ/4W/NW2heNlqWI/eTFxcLIcOHcrBlAWXVCplypQprFy5EqNec6jXrlem408uf5lq+T7iLpe91hMT9RRlNQ1qtu6AorIa757fxnvFABrWr4Ofr6+YylrISCQSBg0azKFDh7AYtoLKjb89TbJkhdoUL1sNgMqNzZFKJITv/INH54+zZ88eOnXqlFexBUEQcpREIuHNm9eoahRHQVE5y/HY9y/xWTmQalUqFsrR1NbW1vTs2Ytzh//h8SXfLMffPb+Nx9K+qKsqc//eXcqUKSODlPlDrhbJKleujJycXJabs7PzV9u7ubllaSsWixUEQRCEgistLY1u3bvz9sMnLEeuQ0nl99a4Sk9LJXzHDEqVKs20adNyKGXBJZVKmTFjBkuXLsWwx0zqm/XJ0iby8VUAbgS6UaJ8dWycN9B/+TnMBy8m5u1zvFcMoF6dmpzw90dLSyuvn4KQi/7d5XTX7l2YDVlKtWY2320vJyeHUe+/MBu8hGI6upzc+xf3Th/Gzc2N7t27f7evIAhCfqampkZoaAipsW84vqQ38Z8iM459jnmPz8qBlNLRIODECXR0dGQXNJcoKiqya9dOevbsSeDmCTy64J1x7F3EbTyW9EFdVSlLgSw5OZkRI0Yyffp00tPTZRE9z+Xqwv0XL17M9ELeunULS0tLunbt+s0+Wlpa3L9/P+PvwrJIniAIgpD7Fi5cyNGjR7l37x5qamq0adOGf/75h1q1amW0SUpKYuLEiezfv5/k5GSsra1Zv349urq6MkxeeE2fPp2Q4BDsJ7iiVarCb5/v9IH5RD29RmhICOXLl8+BhAXbnDlzWLRoEW26TaehxYCvtjHpO5carRyp1LBdxvorAB9e3sd7eX9q16xGYEAA2traeZRayAv/buKwfft2TAf+Q42WDtnqV752K6RSKacPzOd26F62bdtG3759czmtIAhC7mvWrBlnTp/C3MKS4//0wG6CG2qaJfFdPRhlkgkKOo2enp6sY+YaBQUFduz4MjBpzxYXpEgprleV40v6oqaiyP17dzN9H5ZIJPTr15+jx44hSU/nwYOH7Nmzu9APZMrVkWSlS5dGT08v4+bl5UW1atUwMTH5Zh85OblMfcSPFkEQBCG7wsLCcHZ25ty5cwQEBJCamoqVlRUJCQkZbSZMmICnpyeHDh0iLCyM169fiylEuWTv3r0sXboUg27TqFDH4LfPdyf8ILdD9rBu7VoMDQ1zIGHB9tdffzF37lxad55MI6tB32ynrl2K6i3sMhXIPr56iPfy/lSvUonAgMJ51byoO3HiBBs2bKBVp4kZu5xmh1Qq5dyRJdwM3MH69esZNOjb/28J+dPevXtp0rQZ/v7+so4iCDKRnp7OzFmz+Pvvv5FIJJmO1apVi7NnTqNbQpPji3vis2ogSdGvCTjhT5UqVWSUOO8oKCjg5uZK3759CdoyEfd/eqGqrMC9u3cy1V6kUiljx47l0OFDmA9ZjtWodXh6eWFtY0tMTIwMn0Huy9WRZP8rJSWF3bt34+Li8t3RYfHx8VSqVAmJRELTpk1ZsGAB9erV+2b75ORkkpOTM/6OjY3N0dyCIAhCweHn55fpbzc3N8qUKcPly5cxNjYmJiaGbdu2sXfvXszMzABwdXWlTp06nDt3jtatW8sidqF05coVBg0aTK02HWlg3v+3zxf5+Cqn9/3FsGHDGDZsWA4kLNjmz5/Pn3/+ScuOLjSx/bnX49PrR3gt70eVSuUIDg4sclu7FxV169alfAV97p8+TI1WTmiUKJutfhc9VnHNbwsrV65k5MiRuZxSyGl79+6lb9++qGmVwql9ezzc3bGx+f40W0EoTP6dZu7q5gZSKU+ePGXbtq0oKChktNHX1+f06ZNY29hy984dgoICadCggexC5zEFBQW2b99G8eI6PHn6HDfXbVm+CyxYsIB169Zh0ncuVZt92dHYfoIbfmuHY2zcDn9/30I76i7PFu53d3cnOjqaAQMGfLNNrVq12L59Ox4eHuzevRuJREKbNm14+fLlN/ssXLgQbW3tjJu+vn4upBcEQRAKon+vdP37wX/58mVSU1OxsLDIaFO7dm0qVqzI2bNnv3me5ORkYmNjM92Eb3v37h3tO3REp1wNjPv8/dtLJyRERxGwcTQtWjRnzZo1OZSy4Dp58iSzZs2iic0wmtn/XBHjU+QTvJb3o1IFXYKDgihZsmQupRRkTV9fn1Mnw9FUAc+lvYl9/+3v0/+67LWOy17rWLx4MePGjcuDlEJO2r9/P3379qVG6/b0WhBE+TqGOLVvj69v1kW6BaEwkkqljBs3DldXV0wH/oPZoCXs2rWLXr16k5qamqltqVKlOH/uLDEx0RgY/P5o94JGQUGBlStXctzjWJYC2datW5k1axYt2o+jrkmPjPvL1miO0+S9PH3xBgMDQ6Kjo/M4dd7IsyLZtm3bsLW1pVy5ct9sY2BgQL9+/WjcuDEmJiYcPXqU0qVLs2nTpm/2mT59OjExMRm3Fy9e5Eb8IiktLY1Zs2axaNEipFKprOMIgpCLUlJSGDN2LLNmzcoyLL2gkkgkjB8/HkNDQ+rXrw9AZGQkysrKWaaW6erqEhkZ+ZWzfCEuyGRfamoqXbp241PsZ6xGrkNR+b/rVryPuMPN4F0/9ZmSnppMwIYxaKgqcvTIEZSVs+5IVdRUqFCBkqVK8/JOOIlxH7LdLybqOd7L+lGhbGlCgoMpXbp0LqYU8oPKlStz6mQ4xTWU8VzSm5io599se9VvCxfcVzJv3jwmT56chymFnHDgwAF69+5N9VZOtBuwEEVlVSxHrKFCXSPad+iAj4+PrCMKQq6SSqVMnz6dtWvXYtTnL2oZdKCmQXsshq/kyNGjdO7SNdMMNPiymL2SkpKMEudP7u7uDB8+nPqmfWjmkHXDxRLla6JXsxUvX0YQFxcng4S5L0+KZM+fPycwMJAhQ4b8VD8lJSWaNGnCo0ePvtlGRUUFLS2tTDfh9yUnJ9OlazcWLlzE9OnTmTVrlqwjCYKQS1JTU+nevQcbNmxkwYIFDBo0uFDsXuPs7MytW7fYv3//b59LXJDJvokTJ3Lq1Ckshq/OMr3rqt9mTu39m4ibYdk6l1Qq5eSev3j/4jYe7scK7bD+n1WlShXCQkOQJn7Ea1k/Pse8/2GfmLfP8VreF71SOoQEBxXprd2Lmn9HlOmW0MRzaW8+vXmcpc2NQDfOHV7MrFmzmDlzpgxSCr/j0KFDXwpkLR0xHbgIefkv08oUFJWxHLGa8nUM6dCxI69evZJxUkHIPfPnz+eff/6hTbcZ1DPpmXF/tWY2WI9aj5+fPw6OjiQmJsowZf4WHh5O9x49qNLUGsOes746E+DCsRU8PH8cV1fXQnvROE+KZK6urpQpUwZ7e/uf6peens7NmzcpWzZ7aygIOSMhIQF7Bwe8vX2wdl5P6y5TWLBgAUuWLJF1NEEQclhaWhp9+vTluKcnViPWYDZ4CTt37aRv336kpaXJOt4vGz16NF5eXoSEhFChwn93VNTT0yMlJSXL8PCoqKjvFmDEBZnscXNzY82aNRj2mEW5mi2yHI96ch2Aa35bsnW+26F7uXvqEJs2bqRly5Y5mrWgq1evHuFhocinxuK1rC+fY959s23suxd4LetHaZ1ihIYGi2JjEVSuXDlOngyjgl4pvJb15cOrBxnHboXs4fT++UyePJm///5bhimFX3H48GF69uxJ+bpteXDOgxsBbpmOJ8V/IvbtM8qU0UVdXf3rJxGEAm758uX88ccftOwwnkZWA7Mcr9SwHfXN+hEYEMDdu3dlkDD/u3HjBg6OTpSp2hTTAQt4di2IMwcXkZr836LijcAdXPHZwLJly+jTp48M0+auXC+SSSQSXF1d6d+/P4qKmfcJ6NevH9OnT8/4+++//+bEiRM8efKEK1eu0KdPH54/f/7TI9CEXxcdHY25hQWnTp/Fbtw2Kjcyo4nNUJraj2TKlCls3rxZ1hEFQcgh6enpDBw4iMOHD2M5bCWVG5tTs3V7LIet5MCBA/Ts2SvL+g35nVQqZfTo0Rw7dozg4OAsuxQ1a9YMJSUlgoKCMu67f/8+ERERRXI9ipx04cIFho8YQR2jbtRr1yvL8c8x74h7/5LqLex5/eACh/7uQPjuOTy5cuKr53v94CKn989j9OjRDByY9QuvAHXq1CE8LBQl6Wc8l/YhIToqS5u4D6/wWt6PktpqhIWFfnfZC6Fw09XVJSwshCr65fBa1pf3L+5y9+QhTu75k3HjxvHPP//89vqBQt46cuQIPXr0oGpzO0pVrAvAw3MefHr9ZRZOQvRbvJb1Q1U+lbDQEIoXLw7AnTt3+Pjxo8xyC0JO2rhxIxMnTqS+aR90qzb56pIOTy77c/3ENgYMGEDjxo3zPmQ+9+zZM6ysbVArXh55BSXcXAzwX+/M9RPbeHErHICHF7w4c2A+kyZNwsXFRcaJc1euF8kCAwOJiIj46vbRERERvHnzJuPvT58+MXToUOrUqYOdnR2xsbGcOXOGunXr5nZMgS8jKYyN23Hj1j3sJ+ygfO1WGcdadphAfbO+jBgxIkemLgmCIFv/7vyzZ+8ezIYszdi1BqBac1usRq7hmLs7Xbp2IyUlRYZJf46zszO7d+9m7969aGpqEhkZSWRkZMbQem1tbQYPHoyLiwshISFcvnyZgQMHYmBgIHa2/A2RkZF06NiJkhXqYtRr9ld/aP87iqxV50m06T6DuPcvuR26l9P752VpG//xDYGbxmJoaMjy5ctzPX9BVqtWLU6Gh6Eqn4Ln0j7Ef/zv96r4j2/wWtYPnWJKhIWGUL58eRkmFfKD0qVLExoaTK3qVTi+pDdhO2cycuRIVqxYIQpkBczRo0fp3qMHVZvZYDrwH55c9gfg/Yu77J9ty9OrgXgv74eKXDJhoSFUq1YNgNWrV1OvXj1atmwlpl8KBd7OnTsZOXIkDcz78f7FXTyX9yfy0ZVMbZ7fCCVwywS6du3K1q1bkZfPs2XZC4R3795hYWlFslQZa+dNRNwKJy0lCQA5OXnK127Ni9unCNk+hb59+7F48WIZJ859ctJCtiJ7bGws2traxMTEiOkwPyEiIgJzC0si30djN96VkuVrZmkjlUgIdp3K44teeLi7//T0WUEQ8gepVMqoUaPYtGkTpgP/oVabjl9t9/xGCCc2jMHUtB0BJ/wLxPvqt37kubq6ZuyunJSUxMSJE9m3bx/JyclYW1uzfv36n5qCJj5r/islJYV2pqbcvPuITjOPUkxH96vtzh1Zyu3QPTSyGszN4F0kx0dTpakVja2HoFu1UUa7tJQkji/uiVJ6LFcuXxKLy2fTkydPMGlnSkKKHA4Td4GcHJ5L+6CpAifDw6hUqZKsIwr5SHR0NEOHDadypYr8888/Mv/RKN5Ts/rea+Lu7k6Xrl2p0sQa8yFLif/4mj3TzQGQk1dAXbs0isoqqMmncjI8jOrVqwOwdu1axowZQ12THry6HU5JbXXCQjMvSyAIBcWhQ4fo0aMHtQy7YNznb3ZObkti7Hs0S5anQl1D2vWfz6t75/BdPQQbGxuOHD4kFun/f+Lj4zFpZ8qDx89pP3U/WqUrEvsugqu+m7kTfgAdvaqYD16C57K+WJi1w93dvUC/htn9rBFFMoEHDx5gZm5BfLIU+wluaJf59hdpSXoaARvH8urOSfz8fGnXrl3eBRUE4bdJpVLGjx/P6tWradd/AXWMun63/YvbJ/FbN5K0lGTxvvo/xGfNf40YMYJt211xnLQbvWpNvtnu+LL+vLp7BgUlFWobdqaR5SC0dTN/3kilUkJcp/L8ih+nT5+iadOmuR2/UHn27BkmJqbEJqV/+aGsmM7J8LAs044FIb8R76lZfes18fDwoEuXLlRqbInF0OXIK3xZzuZ26F5UNYpTulID/NYOQ0mSQHhYKDVq1ABg3bp1jB49mkbWgzHoMpW49y/xXNaHklpqhIWGFNoFuIXCycvLiw4dO1K1mS1mg5fw7tlNji7I/J22w9T9+KwahLGRIV6enqioqMgobf6UkpKCnb09p06fw2nynowp2wCfYz+ww6U1tdt2JeJGEPXr1CAkOLjAr2uY3c8aMdawiLt+/TqGbY1IkqjgNGXfdwtkAPIKilgMW0GZas1wcHDk4sWLeZRUEITfJZVKmTJlCqtXr8ao958/LJAB6Nczwm7s1jxIJxREmzdvZtOmTRj2nPPdAhlAm65TadF+HH0Xh2Hc568sBTKAm0E7uH/mGNu2bRUFsl9QuXJlwsND0VFXQk0+jbDQEFEgE4RCZtDgIRQvXwvzIcsyCmQA9dr1omzNlvitHYZiejxhoSEZBbL169d/KZBZfSmQycnJoVVaH8eJu/kYl4SxcTsiIiJk9ZQE4acEBgbSqXNnKjU0xWzQYuTlFUiK/wSAZsnyNLUbidWINfiuGUrLFs3xcHcXBbKvGDRoMGFh4Vg7b8hUIANQ1ypJv6WneHPvDPpldfHx9i7wBbKfIYpkRdjZs2cxNmmHQrEyOE7eg0bx7E01UlBSwdp5PVpla2BlbcPt27dzOakgCDnhjz/+YOnSpRj2mEl9097Z7le+tlirS8jqzJkzOI8eTT3T3tQ17vbD9qUq1qW542jUNEt+9fjLu2c5e3AREydOpFevrAv/C9lTqVIlHj9+yIsXzzPWIBIEofAYOKA/757f5nbo3kz3J8Z9wHt5PxTS4ggLDaFmzS9Lp2zcuBFnZ2eqtbCjpkH7TMsSaJXWx6jPPJ49eyLWHBYKhFOnTuHk1J6ytVpjMXRFRqG4UkNTei0IpPfCYKq3cuDUntk0qFcbH2+vIlXc+RlBwSFol6lEyQq1sxxL/hyLz6rBFFORIyDAn5Ilv/7drbASRbIiKjAwEHNzCzR1a2DvshM1zRI/1V9JRR3bMVtQ0tTF3MKSJ0+e5FJSQRBywty5c5k/fz6tO0+mocUAWccRCrhXr17RsVNndKs0xrD7jN8+X+z7lwRtHoepmSmLFi3KgYRFm7y8vMzXmBIEIXcsWbKEiRMncnr/PG4EugFfCmRey/ohnxpLWGgItWrVAmDTpk2MHDmSeu168/iiD4f+csq081901DPC3KZRs2Zt+vXrJ4unIwjZduXKFWxs7ShZqQHWI9ehoJR5dJh2mUrEvH2Oz4r+VK9SiRP+/mhqasoobf7n4X4UadInji/plWnTn7SUJPzXjiAt/h0BJ/yL5FRs8Q2qCHJ3d8fO3p4y1VtgO24bKuq/9uahUkwbu3HbSUEFM3MLsUOOIORTixcvZvbs2bTsMJ4mtsNkHUco4JKSkujQsSNJaXJYDF+NgqLyb50vNTmRE+tHUaZkcQ4eOICiouKPOwmCIBRRcnJyLFmyhEmTJnF6/3wuea7Fa3l/5FJiCA0JpnbtL6NCNm/ezIgRI2hg0Z+yNVtk9D+6sCsv754hJuo5Xsv6UrZ0ccLCQtDT0+PevXt06NiRY8eOyerpCcI3+fn5kRAfRyOb4Sgqq2Y5Hvv+Jd4r+lNetzSBgScoXry4DFIWHC1btuTM6VOoK6Tg8U8PPr1+hCQ9jaAtLnyIuIWPtxd169b98YkKIVEkK2J27dpFly5dqNjQHGvn9SipqP3W+dS1S2E/YQef4pIxt7Dk/fv3OZRUEIScsHLlSqZOnUozh1E0c3CWdRyhgPt3Z9Rr125gOXId6tqlfvt8YTumE//uOcePu1OixM+NahYEIW+dP38eHR0dzM3MiY+Pl3WcIktOTo7FixczZcoULnqsgqRPhIYEU6dOHQC2bt3K8OHDaWDWF8PuM3l6xT9jWtrbJ9e55rcVz2V90CupTWhoMHp6ety/fx+Tdqb4+PrTtWtXDh8+LMunKAjAl+8JZ86c4f3794wZM4a2RsYEbHDm5d0zmdrFf4rEe3l/SmqpExwcmO92xk5NTeX8+fOkpKTIOkomtWrV4tzZM1TQK47H4p6c2DiW5zeCOXz4EAYGBrKOJzOiSFaErFu3jn79+lHToBMWw1b89tX/f2mWLIf9BDdevn6LlbUNMTExOXJeQRB+z/r165kwYQKNbYbSov14WccRCoH169fj6uqKcd+5lKnc4LfPd81/Kw8veLNz5w4aNPj98wmCkHtu3bqFjbUNpdSLc+7sWczNzPn48aOsYxV6ju07cO/evSz3y8nJsWjRIvz9/Tl/7mzGiI9t27YxdOhQ6pv2wbDnH8jJyZGSlADIUamRGQZdpxH9+gG6JbUICwuhbNmyPHjwAJN2pqQratJzfgBVm9vSo0cPDh06lMfPVhAymzlzJoaGhhi0MSQ6Ohp/P1+Mjdviu3oYz2+EAF92YvReMYBiylJCQoIoV66cjFNnlp6eTr9+/WndujUOjo4kJCTIOlIm5cqV49TJkzRpVJ+nVwPYvn079vb2so4lU3LS/52YXgiILaSzkkqlLFy4kJkzZ9LIchAG3aZlWrQzp7yPuIPn0r40a9qIE/5+YpFEQZChrVu3MnToUBpY9Mew+8zf/je/YUgN8b76P4riZ01YWBjmFhbUa9cHwx4zf/t8EbdO4rN6CNOmTmXBggU5kFAQhNzy6NEj2hq2pZi8Kn/3mkrkp7f8fXAZ+pUqEhgUiJ5e9jZ/+pai+J76I/++JqoaOhRTVSLghD9Nmnx/F+F9+/bRu3dv6rbrhVGvORmf/anJn5Gkp5GcEI3n0r6U1lEnPDyMcuXK8fDhQ4xN2pEqp47DxF2oa5dCkp5G8PYpPLnkw969e+nW7cebswhCTps/fz6zZs2iic0wnlz2RktNkdCQYMqXL0/Xbt3x8fHBqPdf3A7ZhVzyJ06dDM/Y0TW/kEgkODk54e3jQ1O7EdwK2kGzpo3x8fZGR0dH1vEySU5OJjY2Nt+NwstJ2f2sESPJCjmpVMqUKVOYOXMmLdqPy7UCGXzZucx27BYuXrxEp86d891wUkEoKnbu3MmwYcOo165XjhTIBOHFixd07tKVsjVaYNB16m+fLybqOUFbxmNtbc3cuXNzIKEgCLklNTUVC3MLFCXyzO4+CQ3VYlQvW4WR1gO4fec27u7uso5YaNU16UmPuf4oapXF2KQdp06d+m77U6dOISevQO02nTJ99iupqJOcEIPnsr6U0lEnLCw0U4EsRU4Nh4k7M6bQyysoYjZ4CdVaONCrVy/Onz+fq89TEP6/lStXMmvWLFq0H0frLpNxnLSX+BQ5DNsa8fTpU44cPkSXzp0J3TGDtPi3BAcF5rsCGYCpqSne3t4oqRSjahMrHFx2cu3GHYyN2xEVFSXreJmoqKgU6gLZzxBFskIsPT2d4cOHs3TpUgx7zKS54+hc/7GsV70pVqPWExQUTJ8+fUlPT8/VxxMEIbP9+/czcOBAarftmukqsiD8jrt37/Lh/TtKlK+FnNzvfXVITUrgxIZRlNcrw769e1FQUMihlIIg5AY5OTmSkpIASE1PBSAq+h1bA/dQv159unfvLst4hZpR7zmoaZbAwWUHOuXrYGlphZ+f3zfbL1iwgGbNmuK9ciBvn93MuD/2/Uu8lvelpJYaYaEhlC9fnkePHmHSzpRkqTIfXz/mis/GzCeTSpFK00FOTuyWK+SpzZs3M2HCBJrYDMtYT1ezZDmcJu8lTUGDtkbG3L17lz17drN7925OnQynXr16Mk6dVceOHQkPD6eJ7XCKl6uGx9I+pKcm4Th5D09fvqGNYVueP38u65jCV4h3vEIqNTWV3r37sHXbNkwHLqKhxYA8e2z9em0xH7qCw0cOM3z4cArZjF5ByLeOHDlCnz59qN7KCZO+c5ETX2qFHGJpacmiRYu4GbQDv3UjSP4c90vnkUqlBG+fQmL0Gzw9j+e7qQaCIGSlqKjIqdOnUFRXZubuBdx6fo8/9y9Bp1RxAgIDxA5yuUhe/stFBGVVDWzHbkWvZiscnZx49OjRV9tra2sTcOIEDevXwXvFAN4+vUHch1d4Le1DCQ1VwsNCqVChAo8fP84okDW2HQnAzaCdxH98A4BEkv6f6Za+HDxwgBYtWnz18QQhp+3evfvLrqxmfWnVeVKmi73q2qVxmLgLhWJlvoyATEn5Mr04H+7AOGTIENzd3WloOZBWnSbi5LKDMlUa4bVyMHHvX+I0eS8vXr5iwgQXWUcVvkL8giqEEhMT6dChA4ePHMFy+CpqG3bO8wxVm1rRrv9Ctm3bxsSJE0WhTBBy2fHjx+nRowdVm9lgOnCRKJAJOUpOTo6pU6fi4+PDx2dXcV/YhU9vHv/0ea54b+DJlRPs2b0rYxc2QRDyv+rVq3P6zGlK6JZi1p6FyKsqEhQc9NtrkQnZJ5VKSUmKp1gxDVRVVb/Z7t9CWaMGdfFaMQDPJb0prqFCWFgIFSpU4MmTJ5i0MyVJooTDxF28unc2o++uKcbcO30kYz2y/fv306lTp7x4eoLA4cOH6d+/PzVat6eeaR+kkqwzkiTpaaQmJ6Cjo5NvZ0tMnTqVbdu2U8uwM226TUdOTg4l1WLYj91MxfrG+K0bxel980hNTqRjxw6yjit8hfgVVcjExsZibWNLQFAwNqM3Ua2Zjcyy1DbsRNtes1mxYoVYc0YQcpGfnx9dunalYkMzTActybjyLAg5zcbGhksXL1BGR5VjC7rw7FpQtvs+ux7MRY+VzJkzh/bt2+diSkEQckP58uU5eeok8+fPJyg4iEqVKsk6UqH34vZJ0lOTSU9L4cQGZ6Jf3sXfz5cKFSp8t5+WlhYn/P1p06oF5XWLExYWgr6+Pk+ePMHYpB2f0xRwmLgLNc2SWd7HH5735MlFb/bt20eXLl1y8+kJQgZvb2969uxJtRZ2qGmVYv8f1jy/GZapTWLcl10sVeVTCQkO+m6xWFYWL17MkiVLqdLEgnb95mUq5CkoqWAxdDmqGsWJuH2KVatW0bdvXxmmFb5F7G5ZiHz48AFrGxtu3bmP7ZgtlK3RTNaRALjstZ4L7itYtWoVY8eOlXUcQShUAgMDsXdwoHwdQyxHrEFBUTlXHkfsbplZUf6sAYiLi6NP334c93CnRftxNLMf9d3Ri5/ePMZ9YVcsLcxwP3ZUrG8jCEImRf099Wv+fU0aWPTn86coXtwKwdfHBzMzs18639OnTzE2aUdCqjwOE3ehUfzLKMBbIXtIjP1AlWZW3DixnUfnj7Nnzx6x1pyQZ4KCgrCzt6dCXWMshq/i+LJ+RD68RInyNanZ2omGlgNJS0nCa1k/JJ/fcepkOLVq1ZJ17Cy2bt3KsOHDKVerNSZ9/+bxJV8iH12hXf8FX3aNlaQTuNmFx5d96dunDzt37pR15CInu581inmYSchFr1+/xsLCiohXkThO2kXpivln8cKm9iNJToxl3LhxaGlpMWDAAFlHEoRCISwsDEdHJ/RqtsJy+OpcK5AJwv+nqanJsaNHmDdvHnPmzOHDi7uYDvoHZVWNLG2TP8dxYv0oKulXYPeunaJAJgiCkE3FdHSJjnzK67unOXLkyC8XyACGDhvO+0+xdJ19PKNABlDftDcSSTqhbtN5eM5DFMiEPHX69OmM77IWw1aQkhhL1KMrAHx89YBzR5ZSuYklYa7TSIl9Q3hYaL4skB05coThI0ZSqmJdXt09w94ZFhnH3kXcpmJ9Y07unsPjSz7Y2dmxY8cOGaYVfkR8Uy0Enj59iqGhEa/efsRx8p58VSCDL2vZGHSZSh3j7gwePJijR4/KOpIgFHinT5/Gzs6e0lWbYD1yHQpKKrKOJBQx8vLyzJ49Gw8PD6IenMVjUXdiojLv0iSVSAjeNpHUhA8cP+4uRogIgiD8hGot7HhxKxw3N7ffnqZuYW5GUnw0989k/h4ulUgI2zGTh+c82LVrFz169PitxxGE7Lp06RI2tnaUqFgfqxFrUVBSITryGVKpBHlFJao0tcJs8BJO7vqDuLePCTjhT4MGDWQdO4tnz57Rs1dvtMtUxm7MlkzHFJXVKF+7NeePLOVO+AHatDHE09Mz366nJnwhimQF3J07d2hj2JaYJAlOU/ZRvGw1WUf6Kjk5OYz7/EXVZrZ079GDEydOyDqSIBRYFy5cwNrGluL6dbF23oCicv5bk0EoOpycnLh08QLaqlKOLuhExK2TGccuHl/N8xuh7N+3l5o1a8owpSAIQsFzI8CVpUuX0qdPn98+17Rp0/j777+54L6SS55rgS8FstCdM3lw9hg7d+6kV69ev/04gpAdN2/exNLKGs0y1bAZswklFTUAytZoRsdpBxiw7CyWQ5fz6IInHyNu4efrQ/PmzWWcOqvIyEjMzC1Q0y6N06SdqGuXYvimu7RoPw6AkhVqcTPQjat+m6lXvz5hYaFiRH0BIKZbFmCXL1/G0soahWKlcBrvirp2aVlH+i55eQXMBi/hxPoE2rfvQGBgAIaGhrKOJQgFypUrV7C0skZLrzo2YzajpKIu60iCQO3atbl86SI9e/XCZ/UQWnWciFaZilz2WseCBQuws7OTdURBEIQCaejQoTl2rj/++AM5OTn++OMPpBIJCdFR3D99hJ07d9K7d+8cexxB+J6PHz9iZm6BspYetuO2ZlmqQa96U9LTUgncPI7IB+fx8fbOt78Z582bx7NnT+n+p3fGb3F5BUXqm/bmZtBOdPSqcu7IUipVrsylixdRVBTll4JAlDELqPDwcEzamaJavCKOE3fn+wLZvxQUlbAcsYYSFetha2fP58+fZR1JEAqMGzduYG5hiVrJitiO3fbV9Z8EQVa0tbXx8vRk+rRpnDuyhICNY+ncuQvTpk2TdTRBEAThP2bNmsX8+fO55LmGe6cO4ebmliMj1QQhu9LS0vickICSSrGvbvojkaQT4jqFiJuhHD1yBHNz87wPmU2dOnVCVUWVk3vmkJIYl3G/qkZx2vaazf0zxyhTpgw3rl/Pl7txCl8nimQFkI+PD1ZW1hTXr4/9BFdUNXRkHemnxLyLIPZdBGXKlBHVdEHIpjt37vznqltZ7MdtR0VdU9aRBCELeXl55s+fz5EjR3BxccHNzVWsuyEI/xEfH0/3Hj0ZM2YMaWlpso4jFGEzZszgyJEj+Pr60q9fP1nHEYqYMmXKEBBwgpg39/FePoDkhJiMY1KJhPCdf/D4og/79u7F3t5ehkl/zMzMjMDAAOIiH+C1rB+JcR8AeH4jlKCtE9HW1ub27dtiTdYCRk4qlUplHSInFfYtpA8ePEjv3r3Rr2+CxfBVKBawxbrfPr2Bz6rBVK9SkYCAE+jq6so6kiDkew8ePMDI2ASJsjYOLjtR0yyR5xk2DKlRaN9Xf0Vh/6wRBCFnJSUlYWfvwOkzZ0lLTcHBwYGDB/ajolKwvsflFvGempV4TYTC7vLly1hYWqGkqYvdeFfUNEtwet9cboXsZseOHfTt21fWEbPtxo0bWFhaIVHUoKnDGELcpqGnW4ZTJ8OpVKmSrOMJ/5Hd91UxkqwA2bp1Kz169KBKczssR6wpcAWyV/fO47W8Hw3r1yYsLFQUyAQhGx4/fkw7UzPSFTSwn7BDJgUyQRAE4delpaXRvUdPTp06he2Yzdg4r8fX1xc7e3sSEhJkHU8QBEEmmjVrxsnwMEj6iOfS3pzaN5ebwbvYuHFjgSqQATRs2JDTp06irphGwObxGLYx4OGD+6JAVkCJIlkBsXz5coYOHUpdk56YD1qCgqKSrCP9lGfXg/FZNZi2hgYEBQZSvHhxWUcShHzv+fPntDM1I0mihL3LDtS1Sso6kiAIgvATJBIJAwcOwsvLC8sRaylXqxWVGppiO3Ybp8+cw9zCgujoaFnHFAqJS5cuYWlljYeHh6yjCEK21K9fn1Mnw1GVS+ZW8C5WrFjBsGHDZB3rl9SoUYMzp0+xadMmPI975NgaZDExMRw/fpzExMQcOZ/wY6JIls9JpVL++OMPJk6cSBO7ERj1/vOrCxz+r8te67nsvSGPEv7Yw/Oe+K93xt7eDm8vL4oVKybrSIKQ7718+ZJ27cyITwYHl50U0ykj60iCIAjCT5BKpYwePZo9e3ZjPngplRq2yzhWvnYr7Cfs4MatexgbtyMqKkp2QYVC4c6dO1hZ2xB+6gydO3dm165dso4kCNlSs2ZNLl28wMWLFxk/frys4/yWChUqMGzYMDQ1c2bt4Li4OKxtbGjfvj129g5i9HEeEUWyfEwikTBu3DjmzZtH686TaN1p4g8XQE6M+8AlzzVc8lxDapLs/xHdCdtP0NaJ9O7Vi8OHDoq1NwQhG968eUM7UzM+xSfhMHEnGiXKyjqSIAiC8JNmzJjBhg0bMO47j+otsy4+rVu1EY6T9/D0xWvaGhkTEREhg5RCYfD06VPMLSxRKFaKXguCqGnQiX79+rFu3TpZRxOKqLCwMP76669sF3XKli1L8+bNczlVwZKQkICdvQPXb9zGqNcczp47j42tHXFxcT/uLPwWUSTLp9LS0hg0aDBr167FuM9fNLEdnq1+jy54I5VIkKSl8vLumVxO+X1X/bYQtusPnJ2dcXNzFTtZCkI2vH37FlMzc95+jMPeZSeaJcvLOpIgCF8RHR2No1N7nJ2dxU6FQhb//PMPixYtok236dQ17vbNdiXL18Rpyj7eRX+mjWFbHjx4kIcphcLg9evXmJlZkCxVxv4/i5+b9J9PI8tBjB49mgULFvDvPm0fP35k+/btfPz4MddzRUZG8vbt21x/HCH/CQ4Oxtrahj///BMbWzvi4+NlHanASUxMxNGpPRcuXsJ27Fbqm/XBbrwrly5fxdLKipiYmB+fRPhlokiWDyUnJ9O9ew927dqF2ZCl1GvXK9t97591p1IjM3T0qvLsenAupvw2qVTK+WPLOXd4MTNnzmT16tXI/2CKqCAI8OHDB8zMzXkV+R57lx1olxGLfQpCfvTv9IfAoFA2btpMjx49SUlJkXUsIZ/YsGED06ZNo5njaBpZDfphe+0ylXCavJdEiTKGbY3ypIAhFA4fPnzAwsKKj3GJ2E9wQ127NABycnIYdJtGi/ZjmTlzJjt37sx43xo8eDDGJiZERkbmWq7Tp09TvXoNataqzZUrV3LtcYT8JywsDHt7B3RrtsRp0i4uXrqClbWNGP30E5KTk+nQsSOnTp/GduwW9Ko3BUCvWhPsJ7hx/eYdzC0s+PTpk4yTFl6icpHPJCQk4ODoyHFPL6xHraNmK6ds9/34+iHvnt2klkEHKjUy5fmNUKQSSS6mzUoqkXBq31yueG9gyZIlzJs374dTRAVBgE+fPmFuYcmziDfYu+yguF7V3z7n59gPpCSJq3eCkJM+f/6MvYMjN27ewd7FDauRa/E4fpyOHTuSlJQk63iCjO3evRtnZ2caWPSnhdPYbPdT0yxBMR09Pn/+LEYmCtnyb9Hr+as3yCmosHtqu4wRY/ClUCZJ//L/koqKCo5O7bl1+x4WQ5cT8fo9bdq05dmzZzme68yZM1hb26BToQ5qJSpiZm7B5cuXc/xxCrLjx49To2YtNm7cmOm/WUF38uRJ7OzsKV2tKdYj11G+dmvsxm/nyrUbYvRTNqWkpNC5SxdCQkKxGb2JcjVbZjpepkpD7Me7cuXyZebOnSujlIWfKJLlI9HR0VhaWXHy1Bnsxm2lcmPzn+r/4Iw7KsV0qNSwHZUbmZEY+563z27mUtqsJOlpBLtO5XbIbjZv3sykSZPy7LEFoSCLiYnB0sqKB4+eYj/BjRLlavzyuZI/x3Hv9BG8Vgxk5yRD/NaOysGkglC0/Xt19/z5C9iM2UKZyg2o0tgcm9EbCQgKFovqFnEeHh4MGDCAWm06YdhtRrYvEkrS0wjcMoGox5fw9vKkTBmxUYvwfYmJiTg4OnH7zn2sRq4n5u0zAO6E7ycl8cuInWv+W7nstZ6FCxeyZ+++L8Wr0Zuo0coRpyn7iP6cRhvDtty9ezfHcp07dw4raxt0ytfBdswW7Ma7ZhTKLl26lGOPU5DduXOHXr168zY6kZEjRzJs2DCSk5NlHeu3nT59GhtbO0pUaoj1qA0oKn/Z2VGvWhMc/jP6ycLSUuzm+x2pqan06NETf/8TWI1aT4U6bbK0kaSncc1vCwqKitjY2MggZdEgimT5xNu3bzFpZ8q1G7exn+BG+dqtf6q/RJLOg/MeVG9pj4KSCnrVmqKirs3zGyG5lDiz9NRkAjaO5fFFL/bu3cvQoUPz5HEFoaCLi4vDxtaOO3cfYD/BjZL6tX/6HGkpSTy+5IvfulHscGlNiNt00lKTUVRWRVVDJ+dDC0IRlJqaSrfuPQgJCcN69EbK1miWcUy/nhF247Zx5uw5cbW8iAoKCqJrt25UbmyBSb95P9yJ/F9SiYRQtxk8vxHCkcOHadeuXe4GFQq81NRUunbrxtlz57AZs5mkhOiMY+G7ZrNtTFPuhB/g7KF/mDZtGjdv3sLHxwfLkesoV7MFAFqlKuA4eS+p8sVoa2TM7du3fzvX+fPnsbSyRrtcLWzHbkFJtRgq6prYjXdFvVRlzC0si3yhLDo6Gqf2HVArXo6uc47TbsAC3HbsxKRdO968eSPreL/s3LlzWNvYUrxCXWxGb0RJRS3T8TJVGmI+dAWXLl5k69atMkqZv6Wnp9O3bz88jh/Hcvhq9OsZEfv+ZaY2Ekk6wdun8Ox6IIcPHcLKykpGaQs/USTLB168eEHbtsY8ef4Kx8l70a3a+KfP8freORI+RVHLoAMA8gqKVGxgzPM8WJcsNSkB3zXDeXXnJB7u7vTo0SPXH1MQCoN/d625eu0GtuO2U7pSvWz3TU9L5fmNUIK2TsJtQmtObBxL/Mc3tOzoQt9/wjAfvJTUpASqNP39D9B/p2sIQlGVnp5Ov3798fLywmrkmq9e3S1XsyX2E3Zw9fotTM3M+fDhgwySCrJw7tw5nJzaU7ZmK8yHLENe4ctGRcmf47gTfpBTe/8mLTXrSBGpVMqp/fN4cM6d3bt24eDgkNfRhQJGIpHQv/8A/Pz8sRq5jrI1mvPiVnjGcUVlVao0sSJ81x+MGjWK6Oho9u3bS7XmdvisGkLk46sZbYvplKGx7Qg+fnjP+fPnfyvXhQsXsLC0Qr1kRV4/uMSTKycyjqmoa2I1ch2fPyeycNGi33qcgiw9PZ2ePXvy6k0UViPXo6yqQZ22XXGavIc795/SpGkzzp07J+uYP+3ixYtYWlmjVbYmlRqZ8+Hl/SxtEuM+cv7wP5TR1cPJKftLCRUV6enpDBgwkEOHDtGm+0zev7jLgdm27Jlmyr3TR4AvF1TC3Gbw5JIP+/fto3379jJOXbiJ7QZl7OHDh5iZWxCXmI7T5H1o6/7aQt33z7qjo1eVMlUaZdxXqaEpD897Ev/xDRolyuZU5EySE2LwXTOU2DcP8ff3w8TEJFceRxAKm//dtcZ+vCu6VRv9sI9UIuHNo0s8PO/Fk8t+JMV/QkevKo1tBlO9hQM6elUy2t4I3IG8ohKVGvzev8kvV60m/9Y5BKEgk0gkDBs2jAMHD2A5bCWVGpp+s61u1UY4TtyF98qBGJuYEBwUhK6ubh6mFfLajRs3sLaxRadCHaxGrkNOXoHnN0J5cNadp9cCSf9Pcaxqc9uMUTz/uuC+glvBu9i0aZO4wCj8kFQqxdnZmf0H9mM5bCUV6xsD0Nh6MIpKqlRvac/n2A/4rx9Fr1690dLSYtGiRZj0m8/Tq1+KVpc819LccTS6VRrx5tFlwnbOwN7Bkb59+/5yrosXL2JhaYWmbnX0G5jw9tktboXuoUpTS5RVNUhNTiR420SUlRSZMH58TrwUBdKsWbPwP3ECu7FbM/3e063amI4zjxKwcQzGJiZs3LCBQYN+vOFHfnD58uWM//YtO03E458vm82N3Powo01S/Ce8VwyA5GhCw0KpWbOmjNLmT/9+x9izdw/mQ5YRuHlCpuMq6lpIpVLCd8/m/tlj7N69my5dusgobdGRqyPJ/vzzT+Tk5DLdatf+/lSiQ4cOUbt2bVRVVWnQoAE+Pj65GVGmbty4gWFbIz6nK+E05dcLZClJ8Ty57E9Ngw6Z1r+oWN8YOXkFAjZP4MH546QmJ+ZUdAA+x7zHc2kfEj88JyQkWBTIBCGbMu1aM2Zzxq41XyOVSnn37BZnDi5i11QTPBb3JuJmGLXbdqHrbA96zPWjueOYTAUygKdXTlChThuU1TR/OeeXaUDTeXLJ95fPIQgFmVQqZdy4cbi6umI68B+qNbf9YZ9SFeviNGkPEa/f0dbIGEkeb6Aj5J2HDx9iYWmFavHy2IzeTMTNUHZONsJn9VA+vHpAi/bjqGPUDWV1LfSqNcnU96rfFq54b2Dx4sUMGzZMNk9AKFCuXr3Kxo0baWw9NNN7kVbpirTpPp201CQCN43B3t6eevXqsmjRIgy6TqNmayde3/8yUuzFrXCOLezGizun8V87nDYGBhw+dBAlJaVfynTp0iXMLSzRKFMVu3FbefafGSxvn1xn2+gmvLp3Hr+1w/n4/CZ+fr60bdv291+IAujgwYMsWrSIVh0nUrG+UZbjxXTK4DhpFzVad2Tw4ME4OzuTmpoqg6TZd/XqVcwtLFEvVRm7cVt5cfNkxrENQ2rw5uFlkuKj8V4xkPSEd4SGBFOnTh0ZJs5//i18u7q6YjpgETVaOtDEbkTG7vZy8gqUr2PIqX1zuXvyIK6urvTq1UvGqYuGXJ9uWa9ePd68eZNxO3Xq1Dfbnjlzhp49ezJ48GCuXr1Khw4d6NChA7du3crtmHnu3LlzGBmbIKdWCqfJe35rpNeTyydIS0mkZuvMwy5VimljOvDLsOagLRPZMdGAELfpvL5//rd3vYz78JrjS3oinxrDqZPhtGjR4sedBEH4z641XQkODsXGeSPlarX6artPbx5zwWMV+2ZZcXheR+6fPUaVxhZ0mLqfPotCMOgyhVIV6351YejEuA+8eXiJqk2tfzmnVCIhbNcfPDznwebNm3/5PIJQUEmlUqZNm8batWsx6vNXxnIG2aGuXRo1zZJER0eLIlkhFRERgZm5BemKGtiN24aKuiaJsR9IivsIgJKyGnrVmvDu+S0q1jfOmIIJcCdsP+cOL2bGjBlMnixG6hZECxcupEWLFmhqalKmTBk6dOjA/fuZp5klJSXh7OxMyZIl0dDQoHPnzkRFRf3yY9asWRODNobcDtnFy7tnMh179+wWfmuGY2hoiLWVJTNmzKCZwygaWw8m6sl10lK+7LyrUaIslRuZE7xtIvXr1cbzuAeqqqq/lOfy5ctfCmSlq2I3bhspifG8fXI9U5sLx5by8fkNfH19MDLKWhwqCm7cuMGAAQOp3sKexjbfXrNZQVEZk37zaGwzlPXr13Px4sU8TPlzrl+/jpm5Baol9LEbtx0lVQ0eX/ZDQUklo8275zfxWTmQ1LhIQkOCqVcv+0uKFAVSqZTx48ezceNGTPrNp1abjgC07jQR29EbAVDVKM6l46szRhz3799flpGLFDlpLu47++eff+Lu7s61a9ey1b579+4kJCTg5eWVcV/r1q1p3LgxGzdu/Gqf5OTkTDuCxMbGoq+vT0xMDFpaWr+VP7cEBQXh5NQenQp1sBm9GRX1Xx/pAXB8aT+kSGk/adc328REPef+OXcenHUn7v1LNEtVoGbr9tQ06ICObuWferzoyKd4rxhAcU0VgoMCqVq16m/lF4SiIi0tje7de3Dc0xOrUeu/OxVy49BaSKUSiuno0rbnH1RubJ7pR9b33D15iLCds+i//AxqmiV/OqdUKuXk3r+4E7oXNzc3OnTogLa2dr5+X81rsbGx4jUp5P7++2/mzJlDm24zqGPUhYRPUeiUrfbDHQtTkuLxXjGQpI8RhIeF0qBBgzxKLOSVqKgo2hoZ8y76M05T9qFRXC/jWGLcRyJuhnHJcw2qmiV4++Q6FkOXU6OVIwAPz3sStHUizs7OrF69Ots7YBZ2Be091cbGhh49etCiRQvS0tKYMWMGt27d4s6dOxQrVgyAkSNH4u3tjZubG9ra2owePRp5eXlOnz6drcf42mvy+fNnOnTsSEhIKBbDVlGliQWfXj/i+NLe1KtdnSmTJ9OtWzfqmfahbc8/kJOTQyqRcCt0DyUr1KKYjh6eS3ujr1eSkyfDKVGixC89/1u3btHWyBi1kpWwG7cdFXVNJJJ0zh1egoq6JpUamXHu0D+8f3oVX1+fIjvb5MOHDzRr3oLPElXaT92Pkor6d9t/jv3AsfmdqF65HKdOhqOmpvbd9rJw69YtTNqZoqipi8OEHagU00YiScdz+QASPkVRvaU9Feu34+yBuSR9ekFoSDCNGv14SZGiRCqVMnXqVJYsWYJR7z+pb9o7S5uXd8/y+LIfd0L3snbtWpydnWWQtPDJ7mdNro8ke/jwIeXKlaNq1ar07t2biIiIb7Y9e/YsFhYWme6ztrbm7Nmz3+yzcOFCtLW1M276+vo5lj03eHh4YGtnR6lqzTI+VH5H3IfXvLp/jloGHb/bTlu3Ei3bj6P3giDaT9lLhTptuBm0g30zLfFY0ifbj/c+4g7Hl/SiXBkdTp86KQpkgpBN6enp9OnTF3cPDyyGr/7hWmEt2o+jVMW6JERHEbjFBb91o7h36jCJ/xml8D1PrvhTtkbzXy6QnTm4kNshe9i8eTP9+vX76XMIQkG3fPly5syZQ8sOE2hkNZBLXuvYP9uWt09vfLdfanIifmuGEf/2CYEBJ0SBrBD69OkTlpbWRL77hP0Et0wFMgA1zRLUatORBub9efvkOnLyChlrRz27Hkzw9in07duPVatWiQJZAebn58eAAQOoV68ejRo1ws3NjYiICC5fvgxATEwM27ZtY/ny5ZiZmdGsWTNcXV05c+bMby3Orq6ujpenJ+2dnDixYTTX/LfivXIgVSqWw8/Xl+joaKRSKVql9DP+/5KTl6eBWV90dKvgu3owJbXUCQwM+OUCGXyZERMT/Yl67fpk/JaRl1egTbdpNLIazLnDXwpkPj7eRbZAlpaWRrfu3Xn3IRqrUet/WCBLT0slcNNYlOXTcD92NF8WyO7cuUM7UzMUNcpgP94NlWLawJf/9o4TXOk5z5/GVoM4d3A+nz88JygwQBTIviIyMpIlS5ZQ06DDVwtkAG8eXuJO6F6WL18uCmQykKtFslatWuHm5oafnx8bNmzg6dOnGBkZERcX99X2kZGRWRa41dXVJTIy8puPMX36dGJiYjJuL168yNHnkJN2795N586dqdjADBvnDVm2x/0VD855oKikStVm2dvBLiE6io+vHpAQHUV62pe57imJ8dnqG/noCp5L+1KzWiVOhodRvnz5X84tCEWJRCJh4MBBHDp0CIthK6jS2PyHfZo5jKLrbA96LwymVadJJCfGEbJjBjtcDPBY2pebwbuI/5h1u/Dkz3G8vHuGKk0tfzqnVCrl/JGl3AhwZd26dQwZMuSnzyEIBd3GjRuZOHEiTexG0MxhFFKplGdXAwF4eM7jm/3SU5M5sX4UHyNu4+frQ7NmzfIqspBH4uPjsbWz49HT59hNcMtYN+ZrarRyQF5BkbLVm6FSTJtX984RsHEsTk5ObNu2FXl5scF8YRITEwOQUXi6fPkyqampmS7+165dm4oVK37z4n9ycjKxsbGZbl+jrKzM/v376NevL2cP/UMp7WIEBnwpeg0ePJjJkydz5uACLh5fw78ThpITYvBeORAVuRSCgwMpW/b3NvTq378/nTp1JnTHdJ7+5/0RIC01Gf/1o3j3+Are3l60a9futx6nIJs6dSqhIaGYD1uJVqkKP2x/9uBCoh5f4eiRw/l20EfHTp1JV1DHfoIbqho6mY7JKyiSmpyA7+qhxL97QlBgAE2bfnvN3aKsTJky9OnTl4fnj/PgbNbvFVd8NnHp+GoWLlzIhAkTvnIGIbfl6u6Wtrb/XVSyYcOGtGrVikqVKnHw4EEGDx6cI4+hoqKCiorKjxvK2Pr163F2dqZ22y6Y9JuHvLzCb59TKpXy4Kw7VZtaoayq8fU2Eglvn93k+Y1gnl8P4f2Lu8grKKJXozmtOk6kUiPTbE23fHH7FCfWj6JlyxZ4e3kWiKHwgpAf/Ltrze49uzEfvJRqzWx+qr9WaX0aWQ2kkdVAPse859m1QJ5cOcGZAws5c2AB/ZedQVWjeEb7iJuhSNJSqdIke4Xz/3Xx+Gqu+m1mxYoVjBo16qf7C0JBt2PHDkaOHEkD83606ugCwKc3j4h5+5wS5Wvx8IIX5Wq1Qr9eW5RUi2X0S09LJWDzeCIfXcTH2xtDQ0NZPQUhlyQnJ9O+QweuXb+Jg8tOSpb//g5tapol6Tj9IKrFdIh6ch3/dSMwMTFi/769KCqKzeULE4lEwvjx4zE0NKR+/frAlwv/ysrK6OjoZGr7vYv/Cxcu5K+//srWYyoqKrJt2zZ69OhBgwYN0NP7MqJRTk6OxYsXU6JECaZPn05KYhwt2o/Fd/VQ0hPeE3YynCpVqvzg7D+mpKTE/v376NmzF8c2jsFy+CoqNjDBf/0o3j66hLe3F6am394JuLDbs2cPy5cvp033GVSo04anVwPwWzeKjtMOfHWzpnunDnMzeBfr16/P12u3NWvWjIMHD/L6wUWqNs38PTM1+TN+q4cR8+YBQYEBNG/eXEYp8z8FBQXc3FxRUlLEbftk0tNTqNO2KwDXT2zn/NGlzJkzh2nTpsk4adGVp5/SOjo61KxZk0ePHn31uJ6eXpYFLaOiojLe+AuqhQsXMmPGDBpaDKBNt+nI5dDVw7dPbxAd+QSjXrMz3Z+alMCLO6d5fiOE5zdCSYx9j4q6NhUbmNDEdjj69Y1QUc9+kevJlRMEbZmAhYU5R48cyZfDfwUhP5JKpYwePZrt27fTbsCijDVpfpW6dinqmvSgrkkP4j++YffUdjy57E9dkx4ZbZ5eDaB05QZoliz3U+e+7LWey55r+eeffxhfhLdoF4qugwcPMmjQIOoYdcOwx6yMqUrPrgWhqKKO2cBFBG2fgv+G0SgoKtPUfiTNHUcjkaQTvH0yL26F4X7sGObmPx4pKhQ8U6dNIywsHPvxrpSp0jBbfcpUbsCHVw/wXT2YJo0b4uHuXiAu7Ao/x9nZmVu3bn13c7LsmD59Oi4uLhl//7vO8rfIy8tjbf31DXqmTZuGlpYWzs7OPLrgiVx6MqEhwdStW/e3Mv4vJSUl9u3bS69evTm66csSETGvH+Dt7YWZmVmOPU5Bc+XKFQYPHkJNgw40tBgAwJsHlwA4d3QpTWyGUb52axSVv2yYEPXkGif3zGHw4MGMGDFCVrGzZYebK2mpqRzZNA6Locszdln9d6mB6Fd3OXHCn1atvr4plfBfCgoKbN26FRUVFTZunIEkLRWJJJ0zBxcyffp05syZI+uIRVqeFsni4+N5/Pgxffv2/epxAwMDgoKCMv1ACwgIwMDAII8S5qx/d8ZavHgxzZ3G0txxdI6uP3H/7DGKFdelXO3WxH14xfPrITy7Hsyr++eQpKVSvGw1arXpSKWGpuhVa5LtRb8zPcaZY4S6TadLly7s2rUTZWXlHMsvCIWZVCplwoQJbNiwAZN+86lt2ClHz69Roizl6xjw8IJXRpEsLSWJ5zfDaGr3c1+yrvlv5YL7Cv766y+mTJmSozkFoSDw9PSkV+/eVGvpgHHfvzN9Vj+9Fkj5Wi2J/fASHd3KREc+QZKeRnpqypddYHfO4sklXw4ePIi9vb0Mn4WQk2JiYnBq34GkpCQ8j3ugp6tLeloqUU+uUb529n4AxkQ9x2fFAGpUrYKvj0/Ggu5C4TF69Gi8vLwIDw+nQoX/TqnT09MjJSWF6OjoTKPJvnfxP6dnx4waNQptbW1WrlrN0iWLc2Vkz7+Fsr59++Hj64uXl2eRvlDw9u1bnNp3QKdsdUz6zs34LHn77Mt6lm8eXOTNg4vYjdtKpQYmfI55R8CG0TRt2oR169bl+3UKlZSU2Lt3D3379uPg5glIJRIqNzbHf90IPkbcwt/fjzZt2sg6ZoEhLy/P+vXrUVZWZvXqL0UxFxcX5s+fn+//XyjscrVINmnSJBwdHalUqRKvX79mzpw5KCgo0LNnTwD69etH+fLlWbhwIQDjxo3DxMSEZcuWYW9vz/79+7l06RKbN2/OzZi5QiKRMGrUKDZt2kSb7jNoZDkwR8+fnprMowveqKhrceiv9nx8df/L2hc1W2LQZQqVGpp+d62M7LgZvItTe/9m8ODBbNq0CQWF358iKghFwb+71qxatQqj3n9S17hbrjxOjZYOhOyYQfynSDSK6/HizmnSkj9nGQL/PTcCd3D20D/MmDGD2bNn/7iDIBQygYGBdO7ShUoNzTAb+E+m5RA+x7zj7ZPrADy/EUrpSvUx6DqVGi0dUNMqxal9c7l/+gg7d+6kc+fOsnoKQg6Li4vD2saG6zfvoKikgrFxO4KDA0lMTOTvv/8mJTGOVp0mfvdHTPzHN3iv6E/Z0iUICPDPMu1OKNikUiljxozh2LFjhIaGZpnC2KxZM5SUlAgKCsp4b7h//z4RERF5evG/d+/e9O799YXBc4qioiL79u3N1ccoCFJTU+nStRvRcYl0mrknY6RYeloq757fAkBBSYUKdQwopV+H9LQUTmwYjbqKPMeOHi0wo0wVFRXZvXsXioqK7NnqQskKtUh49xw/P1/atm0r63gFjpycHCtXrqRq1aqkp6czYcIEUSDLB3K1SPby5Ut69uzJhw8fKF26NG3btuXcuXOULl0agIiIiEwLl7Zp04a9e/cya9YsZsyYQY0aNXB3d8+Y319QpKam0r//APYf2E+7AQsy5hjnpOTEeOQVFElJjEOvejOaOzpToW7b394tE7588F/x3sAF9xW4uLiwdOlS8Y9VEH7C7NmzWbJkCW26z/jmrjU5oUpTK8J2z+bxRV8aWQ3k6ZUT6OhVpXjZatnqfzt0L6f3z2PSpEnMmzcv13IKQn516tQpnJzaU66WARbDVmQZca2qUZz6pn1QKaZFjVZOGf+2/t3k4lbwLjZt2kSfPtnfJVrI3xISErCzd+Da9VvYT/iye5v38n4YG5sQGhqCjo4OLi4upCTGYdRrzleX0EiM+4D3yoFoqikQFBRAmTJlZPBMhNzk7OzM3r178fDwQFNTM2OdMW1tbdTU1NDW1mbw4MG4uLhQokQJtLS0GDNmDAYGBrRu3VrG6YXc4OLiwunTp3GcuBONEv/dGEFeQZHmjmNIS02mvmlv1DS/bO4QtusP3kfcIjwsjHLlfm6JDFn7d00tZWUljnt64eXlibGxsaxjFVhycnKMGzdO1jGE/yEn/Xfbk0IiNjYWbW1tYmJiZLK4fFJSEl26dsXPzx/zIcsy5mrnymPFf0JZXStHNgH4l1Qq5ezhf7juv425c+cyc+ZMUSAThJ8wf/58Zs2aRevOk2hiOzzXH89v3SgSPkXRYdp+dkxsQz2TnrTq5PLDfndPHSLUbQZjx45l5cqV3/13Luv31fxIvCYF38WLFzE1M6e4fj1sx2zJuOqfHZc813LRYxUrVqwQa/gVIomJidg7OHL6zFnsx7tmLLAd++4FXsv6oqOhTGhIMEFBQQwdOpTqrRwxHbAIBUWljHMkf47Da1lfpInvOX3qJDVq1JDV0ylQCtp76rc+M11dXRkwYADw5TfBxIkT2bdvH8nJyVhbW7N+/fpsr7Vc0F6ToszV1ZVBgwZh1PvPbF0cvR22j/Bds9myZYvYSVwQ8lB231fF/tM5KC4uDhtbO04EBGIzemOuFsjgyxXunCyQSSTphO/6g+v+21i1ahWzZs0SBTJB+AlLlixh1qxZtGg/Lk8KZADVWzrw9tkN7p8+QnJCdLamWj4460HYjpmMGDHihwUyQSiMbty4gaWVNVp61bFx3vhTBbJr/tu46LGKefPmiQJZIZKcnEyHjh05dfo0tmO3ZNqBTqu0Po6T9xKbKMHI2IR27dqxf/9+nl7yIWDTWNJSk4H/LF69dhjJMa8JCgwQBbJCTCqVfvX2b4EMQFVVlXXr1vHx40cSEhI4evRogd+MTMjq/PnzDB8xgjpG3ajXrtcP2795eJnT++YycuRIUSATss3Ly4uePXt+cwNEIWeJIlkO+fjxI2bm5py/cAn78a5UrF+whpymp6USvHUi904dws3NjbFjx8o6kiAUKKtWrWLKlCkZO97llUoNTVFUUefMoUVolChHqUr1vtv+0QVvgl2nMHDgwAKxSKwg5LQ3b95gZm6Bik45bMduQ0k1+4up3wrZw9lDi5g2bRozZ87MxZRCXkpJSaFzly6EhIRiM3oT5Wq2zNJGs2Q5HCbt5nOaAkbGJjRq1AgPDw9e3zmF7+qhJMVHc2KDM9Ev7+Ln60ODBg1k8EwEQchLkZGRdOjYiVL69TDqNfuH36niP0USuGk0rVu3ZuXKlXkTUijwgoOD6dypM0cOH6GtYVtu3rwp60iFniiS5YA3b95gZGTMnfuPcZi4i7I1cn73mNyUlpLEifWjeHr1BIcOHaJ///6yjiQIBcqGDRsYP348ja2H0LLDhDx9bCUVNao0Nic1KYEqTS2/+wXtyZUTBG2dSO9evdm8eXOmNSEFoaiIi4sj+tNHNEvq/9QIsnunj3Jyz5+MHTuWBQsW5GJCIS+lpqbSo0dP/P1PYDVqPRXqfHtnNo3iejhO2kOKnDpGxiZUrlyZEyf8+fTiFnummRL58AKensfFmlOCUER079GD+KQ0LEeuQUHp+wvvp6UmE7BhNFrFVDly+BDKysp5lFIoyN69e4ejoyP19GuxfsQ/xMXEMnnSJFnHKvTEL6Tf9OzZMwwNjXgR+QGnyXsp/YNRHPlNSmIcPqsGE/XwAt5eXnTq1EnWkQShQNm2bRujRo2igUV/WneZIpORWbXadKJE+ZpUb273zTbPrgcTuHk8Xbp0wdV1u9itViiyatasycGDB3l+I4iATWNJ/89Uue95dNGH0B3TGTJkiJiiXIikp6fTt28/PI4fx3L46mzNAlDXLo3jpF2gWhxjk3bo6OgQFhqCWTsjDh86hJmZWR4kFwQhP4iLi0eSlkp05NPvtpNKpZzcNZuPr+7h4X4MXV3dPEooFHRKSkpoFNPgXdwHDpx0JzEliQEDB8o6VqEnimS/4e7du7QxbEv05zScpuzL9o5y+UVS/Ce8lvcn9s19AgJOYGX147WMBEH4r127djF06FDqmvTEsLvsNrnQr9eW7n95Z1pD539F3DpJwMYxODo4ZGzbXZiFh4fj6OhIuXLlkJOTw93dPdNxqVTK7NmzKVu2LGpqalhYWPDw4UPZhBVkolOnTni4u/P6zin81o4gNTnxm22fXQsiaOtEevboycaNG0WBrJBIT09nwICBHDp0CMthK6nc2DzbfdU0S2I5Yi0fP35k9erVNGvWDF9fHxwdHXMxsSAI+U3ACX9atWiK1/IB3Ajcwbf2w7sVspt7Z46yZfNmmjcvWDOOBNnS0dHh9JnTyKsqEng9nA0bNtCjRw9Zxyr0RJHsF125coW2RsakyhfDcfJetEpVkHWkn5IQ/X/s3WVYVOn/x/H30AKC3d1dqFh0h73umih2B2vX6qprd8uq2F10p92dgGICCtIN83/gLvvjb6ECA3i/rosHO+c+53zOuAxnvueOCBxXDiAjPpzAAH86d+4s60iCUKQcOXKEIUOG0LBzb3QHLCi0X5xfPriA55YxmJqacOTIYRQVFb++UxGXmJhIy5Yt2bx58ye3r1ixgg0bNrBt2zYuXbqEmpoaZmZmpKSkFHBSQZYsLS1xdXXh3dMbuG0YRlpKwkdtXt4/h9f2iXTv1o09exxED8xiIisri5EjR3Lg4AEMh6+ijpbZN+2fmhSPz992lCpVikmTJuVTSkEQCruyZcvi6eHO5MmTOHd4MX67Z2Qv5PGvVw8vcf7wEiZNmoSNjY2MkgpFWb169bh67Sq3b99m1KiCWRjsZyeRfq7kXUQVxHLJQUFBWFpZo16+NhYT/0ZFvVS+nCe/xL19jsvaIagqSvH18aZhw4ayjiQIRcrJkyf59ddfqdu+Kwa2y/J0ldm89PrxFdzWD0NPTwfHM2dQUcn9/Ev/qygvQy+RSDh16hQ9evQAPvQiq1KlCr///jtT/5nTITY2looVK+Lg4JDrp3NF+T0Rcjp//jxm5haoV6iD5cS/UVbTBP77/TEw0MPxzBkxf0wxIZVKGTt2LNu3b8fAdjkNO/X8pv3TUhJwXTeUxLdP8ffzpXXr1vmU9OciPlM/Jt6TomX//v0MHz6CUlUbYDp6E+plKhMf9ZqTS3rSXqsVXp4exb4nvyAUdrn9XBU9yb6Ru7s7pqZmlKrWBCs7hyJXIIt+/QTHFf0pU1KZ8+fOigKZIHwjJycnfuvbl9ptzDEYsrTQFsjCQ27gvnEEnTp15Mzp099dICtunj59Snh4OMbGxtmvaWpqoq2tzYULFz67X2pqKnFxcTl+hOKhU6dO+Pv5khL9HOc1NiTHRxERegv3jSPp2LEDp06eFAWyYkIqlTJ58mS2bduGns2Sby6Qpacm4b5xJPERIXh7eYoCmSB8RWZmJmlpabKOUSAGDhzIuXNnUUh7z8klvXhx7yyeW8ZStlRJjh09IgpkglCEiCLZNzh+/Dhdu3WjcqOOWEz8GyUVdVlH+iaRz+7gtHIANatV4NzZIGrWrCnrSIJQpLi7u9P7l1+o0Vwfw2ErkZMvnDc8kU9v47Z+GG212uDs5EiJEiVkHanQCA8PB/ho0tyKFStmb/uUpUuXoqmpmf1TvXr1fM0pFCwtLS2CAgOQJkfhuKI/bhuG0bpVc/H7U4xIpVJmzJjBhg0b0BmwgMY6fb5p/4y0FDw2j+H9i/t4uLvRrl27fEoqCMVDdHQ0LVq2pHyFipw/f17WcQqElpYW169dpXWLJjivtSUuIhTHM6cpV66crKMJeSg2Npbo6GhZxxDykSiSfYOVq1ajWEIDoxHrUPjKMr+FzevHl3FebUOzJg0IDAigUqVKso4kCEXKlStX6NGjJ1Ubd8Z45DrkFQrn3F5vn9/DZZ0tLVs0xc3VBTU1NVlHKhZmzZpFbGxs9s+LFy9kHUnIY82aNeNsUCAllaU0bdwQN1dX1NWL1sMw4fPmz5/PypUr6dx3Ds0MBmS/nhwfxV2/A5xZNYgX94I+uW9meioeW8fxNvQGbm6udOrUqaBiC0KRFB8fj5m5Oc9evEGtfG2MjU3w8vKSdawCUaFCBXx9vFm/fj1ubq60atVK1pGEPPTkyRPqN2hIrdp1fpri78+ocHaDKKSWLF6EpZUVfrtnYDxybaEdZvX/hd32x2vbeLp06YLjmdPipl8QvsPz589JTU2hYt02yCsUzqFXUS8f4brWliaNGuDh7k7JkiVlHanQ+fcBQUREBJUrV85+PSIi4os3ssrKyigrF62HI8K3a9CgAc+ehso6hpDHFi9ezOLFi+nwy3RaGA8hLTmepze8eXLZmZf3z4FUilSaRdkq9aneVCfHvpkZaXhtn0jE48u4uDijq6sro6sQhKIhJSWFrt26c/feQ6x/30epSnXw2jYBK2trjhw+TM+e3zbMuShSVFRk4sSJso4h5LHnz59jaGRMpoI6GpVrYGxswpkzpzExMZF1NCGPiZ5k38DY2Jgjhw/z7IYnAXvnfXaZ38Ik+LIL7pvHYG5ujquLsyiQCcJ36tWrF7Nnz+bSydVcc94i6zgfef8mBJe1Q6hXpyZenh5oamrKOlKhVLt2bSpVqoSPj0/2a3FxcVy6dImOHTvKMFnxFBcXh5eXF+np6bKOIvykVqxYwbx582jfYzKtzUcQGxmGg11HfHdNJ/F9OF36zcNiwnYAarfJucplZkY63jum8PL+WU6fPoWRkZEsLkEQioz09HT6/Pob5y9cwGz8dsrXbIqicgnMxm2hZksTfvnlF/bs2SPrmILwzSIjIzEyNiE+JQurKQ5YTtpFxfrtsbK25tSpU7KOJ+QxUST7Rj179mTXrl08PHuMC0eXFepC2f3Ao3jbT6Ff376cOH5MTNxdDF25coUNGzb8NJOiypJEImHJkiUsWLCAy6fXcvnM+kLz+x8T8QznNTbUrFYRH28vypQpI+tIMpWQkMDNmze5efMm8GGy/ps3b/L8+XMkEgmTJ09m8eLFODo6cufOHWxsbKhSpUr2CphC3khKSsLK0gJTU1N69uhOUlKSrCMJP5l169YxY8YMtKzHoWU9DgBFZVUq1mmFRCLH+9fBvHt+nzdPrqKiXprK9bWy983KysRv1zSe3/blxPHjmJuby+oyBKFIyMrKYsgQW9zc3DAds5kqDf6bt09eQQmjEaup0qgjQ4YMEQ9OCtCdO3eYNWsWb968kXWUIismJgYTEzPC377HarID6qUroaCkgunYzdRsaUKfPn3Yu3evrGMKeUgMt/wONjY2xMXFMWHCBJRUS9K263hZR/rITY+dXDi2jDFjxrBp0ybk5EQ9tLg5f/48ZqYmJCQm4enhwYmTJ8VwsALwxx9/oKyszKxZs8hKT0O791QkEonM8sS9fYHLGhuqlC+Dr4+PmBwWuHr1KgYGBtn/bWdnB8DgwYNxcHBg+vTpJCYmMnLkSGJiYujSpQvu7u7iQUIeSk9Pp03r1jx58oSZlvXZ4OONmYkxTi6ulCpVStbxhJ/A1q1bmTJlCq3MR9Cu+6Ts11U1y9N92n6S46O45rKVe34HUNWsQK1WRtmLsUizsvB3mEXoNXeOHj1K165dZXUZglAkSKVSxo8fz6FDBzEeuY5KdVsTdieAGs10s++RXt4/x5vHl+nXrz8KCgqEhISwYcMGhg8fTvPmzWV8BcXTw4cPMTA0IurdW44eO46frw81atSQdawiJTExEQtLS56EPqPrtANoVvxv4bt/i78+f3+4x9TR0aF27doyTCvkFVE5+U7jx49n8eLFXDmzntveDrKOk00qlXL59DouHFvGzJkz2bx5syiQFUMXL17E3NSUpmU02WptiI+XJz26dyclJUXW0X4KM2fOZM2aNdxw38H5o0tl1qMsPuo1zmsGUa6UGn5+Ph+t2Piz0tfXRyqVfvTj4OAAfOgV+OeffxIeHk5KSgre3t40aNBAtqGLkaysLGyHDOHR48dkSaWUU1dmU/9mnD1/gb///lvW8YSfwM6dOxk7diwtjIfQofe0Tz7IKFGyLG0sRiPNyiIh+jV12pgCHwpkAfvm8eTiGfbv30+vXr0KOr4gFDlz5sxh69atNDUYyNMbXjjYdcR1/XAenf8wDO3Nk6t4bh2Hubk5e/Y48PTpU/T0DdiwYQP6Bobcvn1bxldQ/ISGhmJgaIREpTS9554kOj6Vzl10CA4OlnW0IiM1NZXuPXpw4+ZtLCftpGzVj+8VE99H8Db0Bg0aNPrpR3IUJ6J68gNmz57N1KlTOXd4CQ/PnZB1HKRZWZw7tIhrzptZtmwZS5culWkPFyF/XLlyBTMTExqWLsnfXY0wq1eTHV2N8Pf1oXu3biQnJ8s64k9hypQpbNq0idteuzl78E+kWVkFev6E9+E4rx5EKTUl/P18qVKlSoGeXxA+RSqVMmnSJA4cPJj92tSjdxm6+wbly5WhW7duMkwn/Az27t3LiBEjaGowgE6/zf7ifZCqZjmqNu6IorIaVRt3QiqVEnRwIQ/PHsPBwYG+ffsWYHJBKJpWrFjB0qVL6dhnJnd99xF82ZnM9FQAylZryNuwe7hvHEmnjh05fuwoL1++RE/fgKQMeX5d4IyCekX0DQy5deuWjK+k+Hjx4gUGBkakSpWwmuJAhVrN6TrtIEkZCnTR0eX+/fuyjljoZWRk0LdvPwIDz2I2bjvqZSpz29uBhOj/hq0mxb7Ddd0QNNWU8PX1FvMBFyOiSPYDJBIJK1asYMSIEfjvmU3INXeZZcnKzMDPYSZ3/fazdetWZsyYIbMsQv65du0apsbG1NdUZ1c3Y9SUFAHoUqMKf3czItDfn67W1mLunwIybtw4duzYwT3/AwTsm1dghbKk2Le4rLFBTUmKv58v1atXL5DzCsLXLFq0iE2bNtGrTeWPtrl7eIkee0K+Onz4MLa2tjTq0gedfvNz9aDQfNwWBq0MQl5BiXNHlnDP/yD29vYMGjSoABILQtG2Y8eOf+b9G0srs2Ho2SymSkNtAJRUNZBXVMZ1/VCaNmmIk+MZwsPD0dM3IDFdDuvf91G2WkOspjigqFEJA0Oj7LlEhe8XHh6OgaERsUnpWNvtRVWzPADqpSvRdeoBMhU10NHVE733viArK4uhQ4fh6OREE4MBXD61hr1Tu3Du8BIuHF8OQGpSHK7rh6KQlYyvjzdVq1aVcWohL4ki2Q+SSCRs3bqVX/v8io+9Hc/vBhV4hsz0VLy2TyL4kiP79+9n9OjRBZ5ByH83btzAxMiIWuol2NXNCPV/CmT/6lS9Cru7G3Ph7FmsLS1JTEyUUdKfy4gRI9i9ezcPzx7Dz2EmWVmZ+Xq+5PgonNcMRolUAvz9qFWrVr6eTxC+xV6H3dQur8bgztVpXUOTBhXVUVZSwt/fnzZt2sg6nlCMnThxgoEDB1K/Q3f0Bi1CksupJhSVVVEqoc7FEyu5472HLVu2MGzYsHxOKwhF39GjRxk9ejTNDAfRrvtkAJro/obBkL8AKFmmCq7rbKlZtTIe7u5ER0ejq6dPfCpY2+1FvXQlAFTUS2E9ZQ9KGpUxMDQiMjJSVpdU5EVFRWFkbExkVBxWdntQL5PzgZWqZjk6951HdNQ7nJycZJSycJNKpUycOJH9+/dhOGwld333Ex5yHan0w4PwKg21SU9Nwm3DCNLjI/Dx9qJu3boyTi3kNVEkywPy8vLs27cXM1MTPLeO5c2TawV27vTUJNw3jeblvQBOnTpF//79C+zcQsG5desWxoaG1FBVxqG7MSWVlT7ZTrtaJXZ1N+LyxQtYWViQkJBQwEkLl+joaPT0DTAwNCImJibfzjN48GD2799P8CVHfHdOJSszI1/Ok5LwHpe1Q5BLj8Pfz1f8US4mHj58yLlz52QdI0/s+Hsnb5OkLHR6Qt0KaoS8S+LosWPo6enJOppQjAUEBPBb377U0TJHf8jSXBfI/nXlzHpuutuzbt06xowZk08pBaF4OX36DHIKijQzHJij16ZG+Rr8MvcUmWlJlNVQxcfHi4SEBHR19YlPkdL1930fFW/kFJRQLKFOWmoqmZn5+7CxuIqNjcXE1IywF+FUbtSJoIN/ftTm/ZsQvHdMpmWr1owbN04GKQu/uXPnsnnzZnQH/kn99tb0X+JFh1+mZ2+v3qQzHlvGEvfmMZ4e7jRr1kyGaYX8IopkeURRUZHjx4/TQVsbt40jePv8Xr6fMzUpDpe1trx7dhN3Nzex+lIxdefOHYwMDKiqosie7sZofKZA9q/2VSuxu7sx1y5fxsLMjPj4+AJKWrhERUVhaGTM1eu3uHTlGiampsTGxubb+fr378/hw4cJveaO944pZGbk7fLmqUlxuKyzJTPxHX6+PjRs2DBPjy/IRkhICNra7ejSpQuTJ08mq4DntstrhoaG+Pr5ExYHx6++Zteu3WIeMiHfPX36lMyMDCrW00JOTv6b9r3mvJlrzptZsWIFkyZN+voOgiAAsGLFcurWroPzqoFEvXiY/XpqYiz+e2aiIp+Br6836enp6OkbEJucSYc+s7jltZsjf1hnf1dKT03GfdMoosPu4O7uRuXKHw/XF74sISEBcwsLHj4OxnyiPY8vnOLF3UDePrub3Sbu7Qtc1g6hepUKeHt5ipWmP2HlypX89ddfdOwzgyZ6H+akVC9TmVZmw7PbXDi2jMjgazg7O9GuXTtZRRXymUQqq2XZ8klcXByamprExsaioaEhk/MbGhnx8PFTuk47QOnK+dPTIykuCrf1Q0mNfYOnhzvt27fPl/MIsnXv3j0M9PQoryBhf09TSqko53rfG28iGXLGm2YtW+Hu6SmT3wdZeffuHYaGxoSEvcB6yh6ysjJxXmNDi2aN8crn9+LMmTP80qcP1ZvqYjJqPfKKuf83+5y05Hhc1tqSHP2cAH8/WrZsmQdJc0/Wn6uFUV68J1KpFK02rYiJCqVFA3XO+ITz6tWrYrEIw/Pnz4mKiqJ169ayjiL8BKRSKZMnT2bDhg107juXFsaDc7XfDXd7Lh5fwaJFi5g7d24+pxS+RPyd+VhReE/evn2LqZk5Dx8HYzHxb8pWbYDLWltS3j/nbFAgJUuWRFdXn5ikdGIiX+TY13DoCuq2tcB902jePb2Bu7sburq6JCUlMWSILZlZWezd44CampqMrq5oSE5OxtLKmgsXL2M1xYGMtGQcV/03p6JmxVp0+30vjiv7U1ZDhbNBgaIQ+T/S09O5ffs28fHxGBgY0Mp8BB3/p+fYv6RZWfg5zCT4shNnTp/GyspKBmmFH5Xbz1XRkyyPaWho4OHuTvWqFXFdZ0t81Ks8P0dC9BucVvaDlGjOBgWKAlkx9eDBAwz19SkrD/t6mHxTgQygdeUK7O1hwr1btzAxNs7XXlSFybt37zAwNCQ07OWHSWGrN6J8zaZYTXHg9p0HmOZz77ru3bvjeOYMrx6cxWPLODLSUn7oeOkpibhtGEHiu2f4eHsVeIFMyD8SiYSoqChSU7PwuRDFgAH9i82Na40aNUSBTCgwEomEdevWMW3aNM4dXswNd/uv7nPb24GLx1cwd+5cUSAThO9Uvnx5/P180WrdEpc1g3FabUNcRDCeHu5oaGigp2fA+8Q0rH/fl+OhoZy8ItWadMZ984cCmZuba3aBzLprN844OuHi6oaFpZWYY/cL0tLS6NW7N+fOn8d8wg4q1mnJ05veOdqUKFkWl7WDKakih5+vT7G5z8gLGRkZ9O8/gLZt27J4yV+oqZfkzeNLpCS8z9FOKpVy9vBiHl84zf59+0SB7CcgimT5oGzZsvh4e1GmZAlc1gwmKfZtnh07JuIZjiv6oqaYxbmzQWIcdDH16NEjDPX1KS2Rsq+HKaVLqHzXcVpWKs++niY8unsHY6P8nZerMHj79i36BgY8ff7mQ4Gs6n8r6VWo1RyrKbu5efseZubm+Voos7CwwMXZmYgnl3HfNJr01OTvOs6/QxBi3zzGy9MDLS2tPE4qyNq58xdp2KQtbbTasW3b9lytxicUTa6urvTt15/g4GBZRykW3Nzc+KVPHx48eAB8KJQtX76cOXPmcPH4Cq45b/7svnf9DnDu8BKmTZvGn39+PG+PIAi5p6mpiaeHOwYGeiREhuLi7ETlypXR1dMnOiEF69/3UbJsVUZuvYvp6A0AqJetgv/uGbwNuY6rqwt6enokJyfTtVs3zp07j8VEeywm7uTylauYW1j+9HPsfkpGRgb9+vXH29sHs7FbqdLgw9C/Wi0NqdqoI3qDFtF/iRcZKfEoSlPw8/WhRo0aMk5deGRmZmJjM5iTp07R2nI0AQGBtGrVkrSYVzitGkhizH8LSFw+vZa7vvvYtm0bffv2lWFqoaCI4Zb56OnTp3Tq3IUMhZJ0m3oAZTXNHzpe1IuHuK4fSpUKZfHx8aJatWp5lFQoTJ48eYKejg7qmekc6GVKOdUSP3zMe5FRDDrtRd2GjfD09qZMmTJ5kLRwiYyMRF/fkBdvIrCy20uZKvU/2S4i9Cau64bSpnVLPNzdUFdXz7dM/v7+WFlZU7p6Uywm7EBRJfdDBjLSU3HfNIqopzfx8HCnS5cu+ZbzawrT52phId4T4VscOXKEAQMHgkSOKpUrc/7cWfE3/Ad4enrStVs3srJAU1MDP18fmjdvnr190aJFzJ8/Hy3rcbTrPilH8flB0DH898xm0qRJrF27VhSmCwnxmfqxf9+Tt2/fMnLUaC5evIjjmdO0bdtW1tG+ysjYhItXbtBz9nE0yv33WZeRloLXjikkvn9DfEQorq4uGBgYZBfIgoLOYT7BnqqNtAEID76O6/phtNVqjbuba77esxUlWVlZ2NgM5tDhw5iN2UStVkYftUlNisdljQ3p8eEEBQbQpEkTGSQtnLKyshg6dBh79+3FZOQ66ra14MW9IDw2j6Ft27Y8DQ0lRaqE1RQHQq66cfH4ClauXMnUqVNlHV34QWK4ZSFQu3ZtfLy9yEx892GZ2JTv7y4cHnIDx1UDqFe7OmfPBoqb62IqJCQEAz09VDPS2N8zbwpkAE0rlOVAT1NCHz3EyMCAqKioPDluYREREYGevgEv3kRi/fv+zxbIACrWaYXFpJ1cu34z37vx6+vr4+npQcyrB7iuH0Zacu56r2Wmp+K5dTxvQ67j4uIs0wKZIAg/xt7enn79+lG3rRV9/3QnNikdIyMT3r7Nu17mPxM/Pz+6detOlYYd6b/UB3m18ujpG3Djxo3sNvPmzWP58uVcc97MpROr+Pd58KMLpwnYO4cxY8aIAplQZAweMgQnJyfS5NQwMjbhypUrso70VVptWpMUF8XLe2dzbpBIyMpMIy48BBcXZwwMDEhJSaFb9+4EBp3FfMKO7AIZQKV6bbCcvIvLV64yZqxYjfFf58+f58CB/WhZj/1kgSw9NQn3jSNIfv8SH28vUSD7H1KplNGjR7N37x4Mh62kblsLAKo31cFi4t9cu3admrVqoakix8nFPbl4fAVz5swRBbKfjCiS5bMmTZrg5elBXEQwHlvGkpGe+s3HeHn/HC5rh9CmVQv8/XwpX758PiQVZO3p06cY6OmhnJbCgV6mlFfLmwLZvxqXL8OBXqa8CAnGUF+/2HxBCw8PR0/PgJfh77Ceuj9Xi2VUqtsay0k7uXL1GpZW1vlaKOvcuTM+3l7ERwTjstaW1MQvzw2XmZGO147JvHl0AUfHM+jr6+dbNkEQ8tfq1asZOXIkTfT7Yzh0BRrlq2M1ZQ+vI6MwNTP/aeaKzCtnz57FysqaivXaYjJmE2qlKmI1ZQ9KmlXQNzDMUTyYPn06a9eu5Yb7Ds4f+Ysnl53x2z2DoUOHsmnTJlEgE4oGiQQPDy9Mx2yix4zDqJWvjZGxCZcvX5Z1si9avnw548aNI2DfPO4FHAL+eQC4ZRwRjy/j4uKMoaEhKSkpdO/Rg4DAIDQr1sF/z+yPjvX+dTDpqcnUrFG9oC+j0GrZsiUdO3XitudOXj/O+f9CRnoqHpvHEPv6EZ4e7rRq1Uo2IQshqVTKhAkTsLe3R3/IUhpo51x9u2qjDlhO3sXNW7epW68u+rqdmTNnDosWLZJRYkFWRJGsAGhpaeHq4kxkyHW8d0wmMyM91/s+veGN28aR6Ovp4OXpgabmjw3ZFAqnsLAwDPT0kE9OZH9PEyqoqebLeRqV+1Aoe/3sKYb6+kRGRn59p0LszZs36OkZ8Doymq6/76d0pTq53rdSvTZYTNzJxUuXsbLuSlJSUr7l1NbWxt/Pl5T3L3BeM/ijCUH/lZWZgY+9HS/uBnDq5ElMTEzyLZMgCPlHKpUyd+5cpk6dShvLMej0/wOJ3IdbrlIVa2E5aRcPHwdjaWWdr589xcnFixcxt7CkbK0WmI7dgsI/k4CrqJfCasoe1MrXxtDImPPnz2fvM3nyZLZs2cJtbwe8d0xh4ICB7NixAzk5cfsrFA0SiRymYzZSs4UBSiVKYjlpJ+oV6mJkbMKlS5dkHS+br68v586dy/5viUTCxo0bmTBhAoH75nPbew+eW8cT/vgSzs5OGBkZkZKSQo+ePfHz88doxDrePb9P3NsXvHp4Mbv354OzxwjYO4fRo0eLQsX/KFmyJF6ennTsoI3r+mG8uBcEQGZGGl5bx/M29AYuLs5oa2t/5Ug/D6lUyu+//87mzZvRG7SIRp17f7Kdsqom8orKSKVSXF2cWbx4sXio8hMSc5IVIFdXV7p1707dtlYYDl2RfcP8OY8vnMFv9wx69erFgQP7UVJSKqCkQkF6/vw5+ro6ZMbFcqiXGZVL5v9S18HRMQw85Um5qtXx8fOjUqVK+X7OvPb69Wv09A2IiIrD+vd9lKpY6/uO8/gKbhuG06ljB1ycnVBVzZ8CJcDt27cxMDRCTrUsVlP2oKpRNntbVlYmvjunEnrNnRPHj9O9e/d8y/GtCvPnqqyI90T4nKysLCZNmsSmTZvo0HsarS1GfrJdeMgNXNYOQV9PBydHR/E3/guuXbuGgaERJSvVx3Li35+c3zEtJQH3DSN5//J+9kp5/zp+/DghISFMnToVeXn5gowu5JL4TP1YXFwcfecc+mg4XVpKAm7rhxMX/oRbN29Qp07uHxDmh127djFs2DDk5eXZu3cv/fv3z94mlUqZPHkyGzZsQElZGSdHR0xNTUlNTaVnz554+/hiPn47SfFR+Nj/nr1f6Sr1aWU2DH+HWYwcOZItW7aI4vYnJCcn07t3b7y8fTAasZbgS448v+OHk6MjZmZmso5XaEilUmbPns2yZcvo0n8+zQ0HfbLd+zchOK8eRO3qVfD39y2Wczj/7MScZIWQpaUlBw8c4MklR4IO/cmX6pN3/Q7gs3MqgwfbcPjwIXHzXEy9fPkSQ309MmJjONDTtEAKZAD1ypTiYE8zol69xEBPlzdv3hTIefPKq1ev0NXVIzI6ga5T9393gQygSoN2WEy05/z5C3Tt1o3k5O9biTI3WrRoQWCAP5LUGJxXD8peOUealYW/wyxCrrhy6ODBQlUgEwQh9zIyMrC1HcrmzZvRHfTnZwtk8GHYt9nYLfj6+jFgwEAyMzMLMGnRcevWLYyMTVArX/uLC6AoqahjMelvytRsgZmZOT4+PtnbfvnlF2bMmCEKZEKRE7BvPkmx73K8pqBUArUylUlOSszXe5bcePjwIcOHD8ewRRd0m3Zk4MCB7Nq1K3u7RCJh3bp1HDx4kAB//+wCWa9evfDy9sVs3DaqNenM0+ueOY6rqlkef4dZjBgxQhTIvqBEiRKcPn2abl274rFlHM9uenPs6FFRIPt/Fi5cyLJly+j06yyaGQzk3YsHvHlyLUeb2IgwnNfYUL1yBXx9i+ciZ0LuiZ5kMvD3338zYsQI2liOQbuX3Ufbr7tu49LJ1UyePJk1a9aILp7F1OvXr9HX1SHp3TsO9TKlmmbJAs/w9H0sA055olGhIr7+AVStWrXAM3yrly9foqdvQFRsEta/70OjfN4sZ/3q4SXcNg5HT/dDrw4VFZU8Oe6nPHr0CH0DQ9JQxmrKHq46b+Zh0FH279+f4wlsYVEUPlcLmnhPhP8vNTWVvn374ejoiOGwldTX7pqr/Z7e8MJz6wQGD7Zh586d4m/+/7h37x66evoolqyEld0elFW//ruWkZaC24YRvH12k4jwcDFNRREhPlM/9u970qH3NFqZj0AikXzodb5rOqFXXDh06BB9+vSRacaIiAjatG6DJE3K/N9+Z+HhVSiVVObps2efbJ+amkqv3r3x9PTGfPxWqjfVAT70rH107gR121kSH/WagD2zGTZsGNu3bxcFslzIyMhgw4YNNG7cGAsLC1nHKVSWLl3K7NmzaWM1hoy0FF7cDeT9mxDk5BUZvPocKuqliXv7AqdVA6lYpiRBQQFUrFhR1rGFfCJ6khViw4cPZ/Xq1Vx33coNtx3Zr0ulUi4eX8mlk6v5448/RIGsGHvz5g0GerokvnvLARkVyABql9bkUC8zEt5Goq+rw8uXL2WSI7devHiBrq4+UXHJWP++P88KZABVG2ljPn4HAYFBdO/Rg5SUlDw79v/XsGFDzgYFoqqQyeF55jwIPMKuXbsKZYFMEISvS0xMxMraGmcXV8zGbs51gQygdmsTdActYvfu3ezbty8fUxYtjx49wsDQCHm18lhO3p2rAhlAYkwEsRFPqVWrtuiFLxQLF0+sZNuIBmRlZeK3eyahV1w4ePCgzAtkAJqamgwdNpRMBSkT7efw5n0Ei5cs+Wz7RYsW4ebqitm4LdkFMvjQs1bPZjFJMW8J2DOboUOHFvkCWWZmJsOHD6NTB22efaZomFcUFBSws7MTBbL/Z82aNcyePZu23SZSoXZLbnvt5v2bEAA0yldHRb008VGvcV5jQ7lSqvj7+4oCmQDkc5Fs6dKltGvXjpIlS1KhQgV69OjBo0ePvriPg4MDEokkx09+9uiQFTs7O+bOncvFEyu5F3CIrKxMAvfP54b7DtauXcuCBQtEgayYioiIwFBfj7jISA70NKWGjApk/6pZSoNDvcxIiY5CT0eH58+fyzTP54SFhaGjq0d0Quo/BbK8X+WoWuOOmI/fjp+fPz169iQ19dtXo82tunXrcjYoECsLM3bt2sWQIUPy7VyCIOSfmJgYjE1MOHvuApaT/v5o/qCvSU9NIuSqC8rKKtSt+/XVeX8GwcHB6BsYIlXSxGrKblTUS+Vqv/ioVzivsaFCWQ38fH0oUSJvV4kWBFny2z2T4EuOHDhwgF9//VXWcYiNjcXM1IzFixeTlJSInr4ee/bsYcCAAZ/dp379+kilUl4+OP/RtDOPL57Bd/d0bG1ti/wCG1KpFFNTU3bu3MWT+7fo3LED0dHRso71U9m0aRO///47rS1H07breKo20kZ/8F+oapYHoE4bMxJjInBeY4OmqgL+fr5UqVJFxqmFwiJfP30CAgIYN24cFy9exMvLi/T0dExNTUlMTPzifhoaGrx58yb7JywsLD9jysyff/7JhAkTCNr/B44rB/Iw6Cg7d+5k8uTJso4m5JPIyEgM9fWJfvOGAz1NqFmqcAwpqK5ZkoM9TUmPfY+ejk6+P/H6Vs+ePUNXT5/YpAy6Tt2PRrlq+Xauao07YT5+O76+/vTq1StfC2U1a9bk1KmT2Nra5ts5BEHIP5GRkejq6nPrzn2spuyhaqMOBOybh9um0bnaPzUpDpe1Q4h+dgt3dzc6d+6cz4kLv2fPnv0zHF0FqykOlChZ9us7AQnRb3BePYgy6ir4+/lSuXLlfE4qCPmvQ+9p9PvLm4ademYXyH777TdZxwJg9erVBAYFMrP3BMqrl+X27dtf/QwbPHgwGzZs4JbHTi4cW55dKHt8yRHfXdMZbGODvb19kS6QAcydOxdfX18A3sWn8i4qWubzx/1MduzYwYQJE2hpMhTtnnZIJBKUVNRprNOHGs31AKjcoB3Oq21QU8zC38+X6tXz/uG7UHTl6yeQu7s7Q4YMoWnTprRs2RIHBweeP3/OtWvXvrifRCKhUqVK2T/Ftdvjv5NZDhkyhJiX9zl8+DBDhw6VdSwhn7x79w5jQwPevXrBwZ6m1C5duOZJqfZPoUwaH4eejg6hoaGyjgTA06dP0dXTJy45C+vf91OybP7Pm1atSWfMxm3B08uH3r/8QlpaWr6fUxCEouX58+d07qLD0xev6TrtIBXrtCQ5PoqHZ4/z7KYPsZFffsCXFPsOp1UDSY4Kw8/PF319/YIJXoi9ePECPX0DEtPlsLLbk/3EPzUpnofnTvDg7LFP7pcYE4nzGhvUlSX4+/tSrVr+PUgRhILUymw4N1y28uTiGfbt20ffvn1lHSmbhYUFJVRU8Lt7njm/TEY+U4KJsQnv3r374n4TJkxg06ZN3PLcyfmjS3lyyQnfndOwGfRhXsaiXiC7evUqf/31F9XL/NeTdYe9fZGY97c42L17N6NGjaKZ4SA6/jrzo5FZugP/pP9f3lw8thRFaTL+fr7Url1bRmmFwqpAP4ViY2MBvrpaREJCAjVr1qR69ep0796de/fufbZtamoqcXFxOX6KEjk5OXbt2kVSUmKhmFtAyB9RUVEYGxryJiyM/T1NqVOmcBXI/lVVQ52DvUyRT0pAX1eX4OBgmeYJDQ1FV0+f+FToOnU/JcsWXDfo6k11MBu3BQ8PT37p86solAmCkO3x48d06tyFtzFJdJt+iLJVGwAQfNkFADkFRZ7d8v3s/vFRr3Fa1R+5tBiCAgNo165dgeQuzF6/fo2+gSGxSRlY2+1FRa0UIVfdcN88Fgc7bfx2z8R/zxxSk3Le5yXFReGyZjAqcukE+PtRs2ZNGV2BIOQ9/z2zeXzhFHv37i1085Z27NiR4ydOcC3kFgcCTtCyZhNCn4bmagTQuHHj2Lx5M7e9duNtb8eggYPYufPvPCuQZWVlMXPmdGbNmpWvIwI+pWbNmtSvW4fI+A/ndXBwYPDgwQWa4WcVEhLCsGHDaNCxB136zfvk1EXpKYl4bZuAXHoc/n6+1KtXTwZJhcKuwFa3zMrKolu3bsTExHD27NnPtrtw4QJPnjyhRYsWxMbGsmrVKgIDA7l3794nnwwuWLCAhQsXfvS6WB1HKCzev3+PkYEBz4OfcKCXKQ3KlpZ1pK+KSEhiwClPUpVU8PX3p0GDBgWeISQkBD19A5Iy5LH+fR/qpSsVeAaAsNv+eG4dh6WlJcePHUVRUVEmOWRJrDr2MfGe/Lxu3bqFsYkpWYolsZq8G/Uy/w3rO76oJ2qlK5GZnsrL++eo1qQTDTr2oI6WOQqKygDEhD/FZe0QSpdUxsfbS8xDxoe5OnV19QmPiqXr1AM8On+KW567SE9NpHyt5tRvb03wFRfkFJToOeNQ9n7J8dE4r7FBLi2Ws0GB1K9fX4ZXIfwI8Zn6sX/fkx07djBixAhZx/msAwcOMHDgQABWrlzJ1KlTc73vkSNHeP78OXZ2dsjLy+dZpu3btzN69OjscxT0HG6RkZH8PmUyuvoGhfrfrriJjo6mjVZbYhLSsP5970cLfKUmxeGyZjBpcW8IDPCnWbNmMkoqyEqhW91y3Lhx3L17l8OHD3+xXceOHbGxsaFVq1bo6elx8uRJypcvz/bt2z/ZftasWcTGxmb/vHjxIj/iC8J3iYmJwcTIiGdPHrOvp0mRKJABVFRX5WAvU1TSUtDX1eXhw4cFev7g4GB09fRJzlSQaYEMoGYLfUzHbMLFxYXffutLenq6zLIIgiBbFy5cQFdPHzm18nSbdiBHgSz61RPeht2lYccedPxlOuVqNuXFvbP4/D2V6JcfFi169+IBjiv7U7mcJufOBokCGR+mIjA0NOL122is7fahUb46sZFhpKcmIqegiEa5atRuY0rUi4fUaW2SvV9qYiyu62yRJkfj7+crCmRCsVVY5iD7nAEDBuDm5oarq+s3Fcjgw7VNmzYtTwtkAKVLf7jfLl+uLF26dMnTY+dGhQoV2HfgYKEvkN24cYO1a9cW+fnSUlNTWbNmDU+ePOFsUCDlNEvgtGogMRHPstukJcfjtn44KTGv8PXxFgUy4YsUCuIk48ePx9nZmcDAwG+eJ0JRUZHWrVt/dtiXsrIyysrKeRFTEPJUbGwspiYmhDx6yIGepjQq9+VhxoVNBTVVDvYyY+ApT/R1dfH196dJkyb5ft4nT56gp29AqlQZ69/3olZK9nMS1mxhgOnojThum0C/fv05dOjgT9mjTBB+Zt7e3nTr1p0y1ZtiNn47yqo5VyZ+fOE0AJdOrSEmPBQ5eQWqNu5EzRb6lKpch/Dg67htHEHjhvXx9HCnXLlyMriKwiU6OhojIxPCXkXQdep+NCt+GCppPGI1WtZjCb3uyZUz60lOeE9mRhq1/ymSpSbF47J+KOnxEQT4+9G4cWNZXoYg/PTMzc1lHSGHX3/9lfr161O2bFmxYuFnBAQEYGXdlcSEeLx9fDh96lSRvLdNS0uj9y+/4OLsTIkSqri6uhAUFIi+viFOK/tjbbcX9TKVcds4kvjIEPx8fWjVqpWsYwuFXL72JJNKpYwfP55Tp07h6/t9k+JlZmZy584dsUqRUKTExcVhZmrK43t32dvDmMbli1aB7F/l1UpwoJcppSVZGOjpcffu3Xw936NHj9DR1SMVFax/31coCmT/qtXKCJNRGzh1+jQDBgwkIyND1pEEQSggp0+fxtLKivL12mIxaedHBTIA1VIVUFEvTYXaLTAdvYEhay/T7fc9tDSxJTL0Fi5rh9C2TSv8/XxFgYwPD5JMTM0IfhqGld0eSlfO2auudOW6aFmNoVqTzrx+eJFyNZqgUb46aSkJuG0YRnLUc3y8vWjevLmMrkAQhMKsdevW1KhR4+sNf0Kurq6YmZlTpkZzTMdsxMPDExubwWRmZso62jdJT0+nb99+eHh4Yj5uC+XqtMbCwpK7d+8SGOhPjaoVcFo1EJf1w4h99RAvTw/atm0r69hCEZCvRbJx48axf/9+Dh48SMmSJQkPDyc8PDxHl04bGxtmzZqV/d9//vknnp6ehIaGcv36dQYOHEhYWBjDhw/Pz6iCkGfi4+OxMDPjwe3b7OlhQrMKRfvLUDnVEuzvaUpZOTDQ0+P27dv5cp6HDx+ip29Ahrw6XX/fj1qpCvlynh9Ru7UxJqPWc+LkSQYOHCQKZYLwE9i3bx+//PILNVoYYT5uK4rKJT7ZroXxYIasuYjRsJXUbWuRXUh7esMLt40jMTDQw8PdTcy3xIe/k6ZmZjx49ASrKQ7ZCx98Sv321gDUaWNKemoS7htHEh8RgreXJ61bty6oyIIgCMXCkSNH6Na9O1WadMF8wg7qapljNGINR44eYcyYMRTQdOU/LCMjg0GDbHB0csJk9EZqtzbBfPx2KjZoj3XXrly7do0Af3/q1q5OzMsHuLm50qFDB1nHFoqIfC2Sbd26ldjYWPT19alcuXL2z5EjR7LbPH/+nDdv3mT/9/v37xkxYgSNGzfG0tKSuLg4zp8/XyDDvAThRyUkJGBlYcGdmzfY08OYFhWLdoHsX2VKqLC3hwkpCQnMnDE9z4//4MED9PQNyFQoibXdXlQ1C+/7VqeNKSYj13Hs2DEGDbIRhTJBKMY2b96MjY0NDTr2wnjkWuQVlL7YXvL/VmZ7dOE0nlsn0LNHDxzPnEFVVTU/4xYJiYmJWFhacfvOfSwn76ZcjS/f39XRMsNi/DYadOyJx+YxvH9xHw93N7EiqCAIwjeyt7enX79+1G1rhcmoDdkLytTVMkffZgn29vZMnz690BfKMjMzsbUdyrFjxzAeuY5aLQ0BUFBUxmzMZqo20aF7jx5cv36dq1cuEx0dhY6OjoxTC0VJvs5JlptfMH9//xz/vXbtWtauXZtPiQQh/yQmJmJtacmNq1fZ08OYlpXKyzpSnsnMymJp0BWSMzOxHTosT499//599A0MkSppYmW3F1WNsnl6/PxQR8sM45FrObpjCnJycuzduyfPJ50VBEG2Dhw4wPjx42lhYkunX2d9cin5L7nru5+ggwsZNmwY27dvF58RfPhiY921G1evXcdy8m4q1Pr6UElFZVWqN+2C+5axvA29gYeHO506dSqAtIIgCMXH6tWrmTp1Ks0MBtKl37yPHuo06vILSXFRrFq1CiMjo0I3z9y/srKyGDlyJAcOHsB4xFrqtDHNsV1eUZlmhjY8veHNhQsXMDU1LZJzrQmyVSAT9wtCcZeUlEQ3a2uuXLqEQw9jWlcufEMFv1eWVMosn/OcfhTKgQMH6NOnT54d++7du+gbGCKnWhbrKQ6UKFn4C2T/qtvWAqlUymF7OxQUFNizx0HWkQRByEPx8fEAlKlS/5sKZFKplOuu27h8ag1Tpkxh9erV31xgy29hYWGEh4ejra1doOdNS0vjbFAglRq0p3zN3K0slpmRhtf2iUQ8voyLizO6urr5nFIQBKH4kEqlzJs3jyVLltDc0Aa1MpVIjInIsTozQHJ8NM+uu6OhWarQzuUmlUoZN24cu3fvxnDoSuq1s/yoTXjIDTy3jMHA0Ijp0/N+9Ivwc8jX4ZaC8DNITk6mW9euXDh/jl3djWhbpfBMNv+jsqRS5vic58T9YPbu3Uvfvn3z7Nh37tz5p0BWDqspe4pUgexftVsZUa5mU06fOSPrKIKQQ2xsLLGxsWRlZck6SpE1atQoxowZQ8C+uYRcdcvVPlKplIvHV3D51BoWLlxYKAtkd+/epW2bNnTo0KHAe+6XKFGC48ePE/7kCt7bJ5GZnvrF9pkZ6XjvmMLL+2c5ffoURkZGBZRUEASh6MvKymLixIksWbKEDr2noaSmwcXjK7jrfzBHu4T34TitHEBmYiSBAQWzmv23kkqlTJ48mW3bttGl/x9I5OVx2zSaoIN/Zo9ei3x2B7f1w2ir1QYnxzOUKPHpOUQF4WtEkUwQfkBKSgo9unfnXFAQO7sa0b5qJVlHyjNSqZT5vhc4eu8JDg4ODBgwIM+OfevWLfQNDJFXK4+13R5KlCx6q39mpqfiuXU8Ma8fceTwIVnHEYRsoaGh1KhRg1KlSiEvL0/JkiXR6aKTY9Ec4eskEgmbNm2if7/++Pz9O2F3Ar7YPisrk8B987jp8Tfr1q1j/vz5ha5Adv/+fYwMDCivIIdt6ybY2dmxatWqPD/Pixcv2LFjBwkJCR9t6969O2dOn+bl/UA8to4j4zOFsqysTPx2TeP5bV9OHD9eaIf+CIIgFEYZGRnY2g5l8+bN6A76k1bmIwi54grADddtHF3YlfSUROLePsdpZX9U5FI5dzaIli1byjj5x6RSKdOnT2fDhg006tKHoAML8N4xhed3Agm75QvAu+f3cV1nS8sWTXFzdUFNTU3GqYWiTAy3FITvlJqaSq+ePQn09+PvbkZ0qF756zsVEVKplAX+Fzl45xG7du3CxsYmz4598+ZNDI2MUdSohNVkB1TUS+XZsQtKRnoqnlvGEf74Ek6Ojpiamn59J0EoIE+ePCEuLo7R5oORl5Pn8esQvM4FEB0dTdWqVWUdr0iRk5Nj9+5dxMXH4751HJaTd1GlQfuP2mVmpOO3axohV93YvXs3Q4YMKfiwX/Hw4UMM9fUpIydlX09TSqsoo6qowLRp08jIyGDmzJl5cp5nz56hp2fA8+fP2Lf/AO5urh99WbG0tMTJ0ZFu3brjvmkUZmNzrhqalZWJ3+6ZhF5z5+jRo3Tt2jVPsgmCIPwMUlNT+a1vP5ycnDAavpr62l2JCL1FTHhodpuoFw+JDLuH799TqFSuFD7eXoV2mOW8efNYtWoVnfvORb1MZR6ePQZAVmY6tduYEv36CS5rh9CkUX083N0pWbKkjBMLRZ3oSSYI3yEtLY1fevfG19ub7daGdKpeRdaR8oxUKmVRwCX23XqIvb09tra2eXbsGzduYGBohJJGZayn7CmyBTKPzWMIf3wJZ2cnUSATCp3Y2FgAdJp0wKSVHi1rNQVAQ0NDlrGKLEVFRY4eOUznzp1x3ziKt8/u5tiekZaC55axPL3hydGjRwtlgezJkycY6uujIc1kbw8TypRQQSKRYNexDRO1WzFr1iyWLFnyw+d5+vQpunr6xKdKMR29gStXr2FpZU1iYuJHbU1NTXFzc+Vd6A3cN44gPeVDG2lWFoF75xF8yZH9+/fTq1evH84lCML3S01N5dGjR4V+xUPhg8TERKysrXFxccVs7Gbqa394yKBUQh210pWo2UIf3UF/YjFhB15bx1G7WmXOBgUW2gLZokWLWLJkCR37zKCF8WDqtDFl5Lb72dvL1WiKy5rB1KtTEy9PTzQ1NWWYViguRJFMEL5Reno6v/b5BU8Pd7ZZG6BTs/j0zJBKpSwJvILDzQds27aN4cOH59mxr1+/joGhEcqlq2FltwdltaL3RywjLQWPTaOJDL6Ki4szxsbGso4kCB+Ji4sDwPWaD67XfLjx9C4SiUQMPfgBKioqODmeoUXzJriuH0r06ycApCXH47p+2IeiuZMTvXv3lnHSjwUHB2Ogp4dqRhr7e5pSTvW/HlsSiYTJHVszuUNr5s6dy4IFC777PKGhoejq6ZOQJsH6933UbWuBxcSdXLx0GSvrriQlJX20j4GBAZ6eHrx/eR/XDcNJS44n6OBCHp47joODQ57OgykIwrdLT0+nV69eNGrUiPHjx4t5Lgu5mJgYjE1MOHvuApaTdlKrpWH2ttKV6zJoRSCWE+0pVbEOvn9PoUWzRgQE+FGxYuGcT3nFihXMnz+f9j2m0Mrsv+8k8gqK6NksoUrD9lw6sZxa1Svh4+1F6dKlZZhWKE5EkUwQvkF6ejr9+vbF1cWVrVYG6NWqJutIeUYqlbL87FV23bjHpk2bGDVqVJ4d++rVqxgYGlGiTA2sJu9GWbXo9WhJT03GfdNoIkOu4erqIiaQFgotHR0dGtRvgNttX3b5HMTnViAdtDsgJyf+5P8IdXV13N3cqF2zKq5rbYl8dgfntUOIff0Qb28vzMzMZB3xI6GhoRjo6aGclsKBXqaUV/v0JMYTO7Ti905tWLhwIfPmzfvmHiMhISHo6umTmC6H9e/7sldNq1xfC4uJf3Ph4iWsu3b75Lx4Xbp0wdvLk/jwJxyaY8I9/4PY29szaNCgb79gQRDyTFZWFkOG2OLh6UVzIxu2bt3KwIGDSE9Pl3U04TMOHDjAxQsX0LddQdVGH69eLJFICLvth9uG4XTp3BEfb+88Lyy9efOGW7du/fBxnj17xowZM2jU5Re0rMd+tL1a444kvHtB1Qpl8fXxoVy5cj98TkH4l7hjFoRcysjIYED//jieOcMWK30MaleXdaQ8I5VKWXnuGjuu3WX9+vWMGzcuz4595coVDI2MUStXG8siXSAbxbunN3Bzc8XAwEDWkQThsxo2bMijx4+Iio4iLS2N5ORkAgK/POm8kDtlypTB28uL8mVKcmJxLzLi3hAY4E/nzp1lHe0jz549w0BPD8WUJA70NKWCmuoX249r35LpnbVYvHgxc+bMyXWhLDg4GB1dPZIzFeg6dT/qpXMuYFOlQTssJtpz7tx5unb7dKGsQ4cO+Pn60KJpQ7Zv386wYcNyf6GCIOQ5qVTKpEmTOHToIIbDVtGl3zxMRq3n6LFj9OjR45M9Q4u6ly9fMm7cOKZOncr27duL5PDS7t27U71GTa6cWk1SXNRH259cdsZ981gsLS1wcXbO8x7mwcHBtG3ThlatWrF58+Zv3j8iIoKUlBQAKlasSKfOXXh6zY03T67maBcf9RrnNYMoX0oNf39fKlSokCf5BeFfokgmCLmQkZHBoIEDOXXqJBst9DCqUzjH7X8PqVTK2gs32Hb1DmvWrGHixIl5duxLly5haGSMeoU6WE7eibJq0ZtIMz01CfdNI4kOu427uxv6+vqyjiQIuSaRSFBRUUFRUVHWUYqNSpUq4evjzfTp0zkbFEjr1q1lHekjz58/x0BPD0liAvt7mlJR/csFsn+NbteC2TrtWLp0KTNmzPjql8QnT56go6tHqlQZ69/3o1bq00N2qjRoj8VEe4KCztGte/fsL0H/S0tLiwvnzzFy5MhcZRUEIf8sWLCATZs2oTvwT+q1swT4MIR6wna8ffwwMTX75Oq1RdmaNWv4e4c9B3bvY/To0Vy7dk3Wkb5ZtWrV8PH2Qj4zEbf1Q0lNisvedj/gMD72dgzo35/jx46irKycp+f+t+eySnoq/Zs3ZPz48WzYsCHX++/cuZPKlSujb2BAfHw8JUqUwMPdjQ7a7XFdN5RXDy8CkPA+HJc1NpRSU8Lf34/KlYvPwmlC4SGKZILwFZmZmQwZPJhjx46x3lwP03o18+1cGVlZBIW94n3yx18g8suGSzfZdPkWK1asYMqUKXl23IsXL2JsYopGpfpYTt6FUom8K5ClJsXx6MJp3DaN5sll5zw77v+XnpqE24YRvH9+Fw8Pd3R1dfPtXIIgFB01a9Zk+fLlNGrUSNZRPvLixQsM9PTIjI/lQC8TKpf8tp4Cw7WaMVe3PStXrmTq1Kmfbffo0SN0dPVIk5Sg69T9qJX68pP8Kg21MZ+wg4DAILr36PHJQpkgCLK3YcMG/vzzTxp16UOl+lo5tlVvqkNj/f6cP3eW0NDQHNvWrVtHi5atOH/+fEHGzTOxsbFoqmnQ5Z9hinldRCoo9evXx8fbi9TYN7hvHEl6ahI33O0J2DePcePG4eCwGwUFhTw95//vubzIsCMjtZoxadIk1q5d+9X97e3tGT58OHW0LLh15z5m5ubEx8ejrq6Oq4szOjqdcdswnMcXz+CyxgY1JSn+fr5Uq1Z8pr0RCheJtCj2Jf2CuLg4NDU1iY2NFSt5CT8sMzOToba2HDhwgPXmulg2qJ3358jK4urrSJwfP8X9yTOiklMwrlOdHd3yf1L4TZdusubCDZYuXcrMmTPz7Ljnz5/H1MwczSoNsZhoj5KK+g8fMyUhhmc3vQm55s7L++fJyvwwJ0bNFvpYTrT/4eP/f+kpibhtGEHMqwd4eLgXyuFUBUV8rn5MvCdCYfTq1Sv0dXVIiY7iUC8zqmp832evVCql33E3bkRGk5aW9tH2hw8foqdvQKZCSazt9qKqmfu5YF4+OI/7xpEYGRni7OSEvLz8d2UUihfxmfoxWbwn+/fvZ9CgQTTR7899/4MAjPn7Sfb24MsueNtPYdy4cWzYsAGJRALAX3/9xZw5c1AvUwnSk/Dz9aFt27YFkjmvHDhwgNGjR5OQkECtGjW5cesmpUqVknWs73bp0iUMDY1QUCtD3NsXzJkzh0WLFmX/m+WV58+fo6ejgzQ+joO9TLMfzEilUladv87WK7dZsWIF06ZN++T+27dvZ/To0TQzHESXfvOIfHob13W2tGndEg93N9TV1UlJSaF37964urpSuUpVggIDqFu3bp5eh/BzyO3nquhJJgifkZWVxfDhw9m/fz9rzHTytEAmlUq5/jqSP/0v0XnnUfodd8Pv6Qu6NqyNBGhaoWyenetztly+zZoLN1i0aFGeFsjOnTuHqakZpao2wnLi3z9UIEuOj+Z+4FGc19qy5/eO+O2ZTXpKIh37zKDnrKMA1G1nlVfRs6WlJOC6YTgxrx/i6enxUxfIBEEoGt68eYOhvh5JUe840NP0hwpkiwIuc/lVxCfnlHnw4AG6evpkKWpg/fu+byqQAVRt2IEqjTrh4e7+yfnJBEGQDScnJ4YMGUKjLr9QoVaL7Nf3Tu1C2J0Ant8NwnfXNPr3H8D69euRSCRIpVLmz5/PnDlzaNd9Ir/96UbJCnUxNjHl5s2bsruY7zBgwADi4+PJzMzkcfCTIl0gA9DW1sbJyZGKpVVZuXIlixcvzvMC2Zd6LkskEqZ2asP49i2ZPn06y5Yt+2j/rVu3Mnr0aJob2dCl3zwkEgkV67TEYtJOrl2/iYWlFYmJiaioqHDq1Cl2794tCmRCgcjbvpaCUExkZWUxcuRI9uzZw2ozHbo2rPPDx5RKpdyNjMLl8VOcHz/ldXwiFdRKYFm/NtYNatO6cnn8nr3E4eYDzOvV+vGL+IIdV++w6vw1/vjjD+bOnZtnxw0KCsLc3IIyNZphPmEHisq5mwfnfyXFvuPpDU9Crnnw+tElkEqp3LA9nfvOpXZrk+whPbc8dyGvoETtVnm7ymRacjyu64cTHxGMt5cnHTp0yNPjC4Ig5LXw8HAM9fWIj4zkYC9Tqmt+3/B2qVTKX0FXcLh5n61btzJixIgc2+/fv4+evgGolMbabg8lSn7bAx1pVhYBe+fy/I4/u3fvRl39x3sZC4Lw4wIDA+nT51dqtjREb9AivO3tkJNXJCszncSYCO747CXiyRXMzc3YvXsXcnJySKVSZs6cyYoVK+jQeyqtLT6sim4x6W9c1gzGyNiEAH8/mjVrJuOr+zZycnLFZjVoQ0NDHj96mC/HfvXqFYb6eqTFRHOolxlVSn78eS6RSLDr1IbUzExmzZqFrq4unTp1AmDz5s2MHz+eFsZD6PTb7BwFvEp1W2M5aSeu64diYWmFl6cHysrKDBkyJF+uRRD+P1EkE4T/RyqVMnbsWHbt2sVKky70aPT9TyukUimPot5nF8bCYuIpW0IF8/q1sG5Qm7ZVKiD/P3+I3Z+EUbu0Bg3KlsqDK/m0ndfvsezsVebMmcMff/yRZ8cNDAzEwsKSMjWbYz5++zcVyBJjIgi97knoVXdeP7mCRCJH1UYd0B2wgFqtTVDV+PiLWPBVV6o3083Tuc4+FMiGkRAZio+3F+3bt8+zYwuCIOSHyMhIjAwMeP/mDQd7mVKz1MfDB17FJZCamUmd0pqfPY5UKmX52avsvH6PjRs3Mnr06Bzb7969i76BIXKqZbGa4pCjQJaVmcHrx5eRSOSp+s98Pv9fVlYmAXvm8PjCKfbs2cOgQYO+84oFQchLN27cwMq6K+XrtMZoxFrk5BVIT/mwemXtNqZUbajNlTPraN++HceOHkVRURGpVMqUKVNYv349nX6dTUtT2+zjKatqYDl5N86rB2FoZMyjhw8oXbq0rC5PyAf/23P5a0P73yUl4x/2miqVKlGjxoeFzzZu3MjEiRNpaTKUjr/O/GQPt4p1W1O9uQFBgc5ERUVRpUqVfLseQfj/RJFMEP6HVCpl/PjxbN++neUmXejVpN53HSc0Ohbnx09xfhxKcHQsGspKmNeryZ8GHelYvTIKn3hClZ6ZhXfoc/o3b5jn3aH/5XDjPksCLzNz5sw8nZfA398fS0srytVuhdm4bSgql/jqPgnRbwi55k7oNQ/CQ64jJydPtcad0R+8hNqtjFFR//wNVdy7l0SG3sJ4xJo8yQ+QmhSP2/phJLz9UCBr165dnh1bEAQhP7x9+xYjAwPevXrBwZ5m1P6fIlhkYhKuj5/h8vgp195EoqmsxPnhv1FC8eNbv3/njtlx7S7r1q1j/PjxObbfuXMHfQND5NXK/1MgK0NmRjqvH10k5JoHT697kpLwHnlFZWzXXfnob0BWVib+DrN4cvEMe/fuZcCAAfnzhgiC8E2Cg4MxMTVDrXwtzMZtQUHxw2T1pmM2kpGWTFpyPI4r+tGoQT2cnRwpUaIEWVlZjBs3jm3btqEzYAHNDD7+fZaTl0dRRY3Y6JdkZmYW9GUJ+Si75/LbSA72/NBz+XlsPH5PX/BLk/qoKf23mnZ0cgqDTnkRL6eAv78/1apVY926dUyZMoWWZsPo0Gsa4cHXeX4ngBYmQyhRsgzw4W/S+aNLCb7szMaNG0WBTChwokgmCP+QSqVMmjSJLVu28JdRJ/o0rf9N+yenZ+Bw8z7Oj57y4F006kqKmNStwUyddnSpUQWlr0xOfPHlG2JSUjGvnz+rZ+699YA/Ay4xdepU/vrrrzwrkPn6+mJlZU35um0wH7cNBSWVr+4T/foJR+Z/WFK8XPXGGNoup1ZLQ5TVPt/L4X+FXHFFXlGZmi0Nfij7v1KT4nBdN5SkqDB8fbyL3GSzgiD8fN69e4exoSERz8M42MuUOmU+fH46Pgzl0J1HXH4VjoKcHDo1q1KzVEnUFBU/WSADWHfxJluv3Gb16tVMmjQpx7Zbt25hYGiEYslKWE3ZjYp6aS6dWsM9v4OkJsWiUb4Gjbv04elNb0polP1kgcxv90yCLzmyf/9++vXrlz9viCAI3+zs2bNEvXuLvtWUHHPIKiqXIC05Dtd1tlQqXxpPD3c0NTXJzMxk5MiR7N69G/3Bf9GgQzdeP75C5fpts+8rU5PicF0/jMS3T/H29qJcuW+bt1AovP5/z+Wj957gGRxGyPtYAKKTU5nSsTUA75NTGHjKkxjk8PP3p2HDhqxduxY7OztamY8gJjyU7aP+WyFaIi9P++6TkEqlnDuyhDvee9i0aRPjxo2TybUKP7fiMeBaEH6QVCrFzs6OjRs3ssiwI32bN/zmY7xNSmbluWs8eBdN/TKl2NHViJWmOhjWrv7VAhmAe3AY1TXUaVo+7yftP3D7IQv8LjJ58mRWrFiRZwUyHx8frKysqVBXK9cFMgAV9TLUbWuBgpIK71484K7vfh6cPUZ81Otc7R9y1Y2aLfTzZNXM1MRYXNYOITn6eZFcjUkQhJ9PdHQ0JkZGvHoayv6eptQtUyp7m8PN+1x6FY6KggJTO7dhg6Ue7xKTMa/36QcwGy7eZOOlmyxfvhw7O7sc227evPmhQKZRCaspDtk9fCNCbpKaFIuCsiq1WhrS3HgwcW+fU6+tZY79szIz8N01nZDLThw8eFAUyISfzubNm6lVqxYqKipoa2tz+fJlWUfKoX///vz6628E7JvLg7PHsl9PSYjBdd1QVBWleHt5UqFCBTIyMhg8eAi7HRxopPMrLx+cY/cUbc6s6M89vwP/7Pce5zU2JEeF4efrI+Z1LUb+t+fygZ6m1C6tydYrt7MLZACdq1cGICYllUGnvYjKBB8/Pxo3bsyqVauws7OjteVoOvSexrObPjmOX6ulIVKplLOHFnHHew9bt24VBTJBZkRPMuGnJ5VKmT59OuvWrWOBQQcGtGj09Z0+oWpJNfb0NMU9OAzPkDD6n3CnsroqZvVqYl6/FlqVc84/9r8ys7LwDAmjV+N6eT7U8vCdR8zzvcCECRNYs2ZNnh3fy8uLrl27UbFBe8zG/tdFPzdUNcpiOnoD6SmJhN32J/iKC5dPreXCseVUrNuaum0tqNvWAvXSlT7aNzYijLdhd2ltMfKHr+HfAllq7Gv8fH1o3br1Dx9TEAQhP71//x4TIyOehwRzoKcp9f/fHJabrQzwDAnD9ckzlgZd5U18EonpGVjUr/XRsTZfvsW6izdYsmQJ06dPz7Ht+vXrGBmboKxZBaspDjl6+na1cyA85DqPLzpy29uB+OjXSLOyqKNllt3mQ4FsGqFX3Th06BB9+vTJ0/dBEAq7I0eOYGdnx7Zt29DW1mbdunWYmZnx6NEjKlSoIOt4ACgpKXHw4AFKly7F9u2zSU2Mpal+f9w3jSQzKQrvs0HUrFmT9PR0+vcfwMmTJzEevgavHZNzHKd0lXokxUXhsnYwWUlRBPj70bJlS9lclJDnoqKiPtlzOdD2F/6+fo+9tx4AoFWlArEpqQw+7UVkeha+/v40bdqUFStWMGPGDNpYjaF9jylIJBKGb77FTfe/ueq0EXkFJcrVaErQwYXc8zvA9u3bGTnyx+/zBeF7SaRSqVTWIfJSXFwcmpqaxMbGoqHx8eS1gvC/pFIps2bNYvny5czX02ZI6yZ5ctzMrCyuvo7EPfgZ7k/CiEhMopxqCUzr1qBH47q0rVIxR/tLL8Ppd9yNE79Z0bpy3t04Hb33mJle5xg7diybNm3KswKZh4cH3bp3p3LDjpiO2fRNBbLPSUuO59ktX4KvuPLibhBZWRk06vwLBkP+ytHuuus2rjlvYcjai9+1eua/PjwlHUJaXDh+vj7iZu4LxOfqx8R7IshCTEwMJkZGBD98wIGepjQuX+azbaVSKfq7j/MyLoF6ZUrhYdMzx/ZtV26z4tw1Fi5cyPz583Nsu3btGkbGJqiUqY7V5N0oq376/3GpVMqhuWbERjylSqMOdJ+6D/hQIPP5eypPb3hw5PBhevfu/YNXLhR3xfEzVVtbm3bt2rFp0ybgw8rp1atXZ8KECcycOfOr+xfkeyKVSpk7dy5//fUXGuWrk5n0Hj8/X9q1a0dqaip9fv0NV1dXjEeuo04bU8Ju+3HLy4FXD86jol6aX+afwW3dUOQzE/Dz9aFJk7y5nxZkLzo6GiMDA16EBHOwl9lHD2biUlJpte0g7atWZEc3Ywaf9uJFchq+/v60bNmS7du3M3r0aLSsx9Gu+6Qc30VSk+LYNVGLhp16oaCkwj3/g9jb2zN8+PACvkrhZ5Hbz1Ux3FL4qc2fP5/ly5czR7d9nhXIAOTl5NCuVok/9DtwbvivHP/Vih6N6uD37CXDz3iT9v8mMXUPfkZldVVaViqfZxlO3H/CLO/zjBo1Kk8LZO7u7nTr3p0qDTtilkcFMgClEiVp0KE7lhO2M2TtRVoYDebxhdOkJsbmaBd8xYVaLQ1/sED2Hpe1g0mPj8Dfz1cUyH5ShX0YjCD8r7i4OMxMTXny4D77eph8sUAGIJFIsGpQGyl8NNel/bU7rDh3jXnz5n1UILt69SqGRsaUKFPjiwWyf89RV8scgHrtPgy1zMxIx9vejqc3PDh65IgokAk/pbS0NK5du4axsXH2a3JychgbG3PhwoVP7pOamkpcXFyOn4IikUhYsmQJq1evplIZNRwdz9CuXTtSUlLo0bMnbm7umI3dTJ02pgDUbGFApz4zAChTtQEua2xQIpmgwABRICtmRg4fTsjjR+z/RM9lAA0VZUIn22LfzRjbM96EJabg7fvfvXVIaChycvJUrq/10XcRZVUNRu94hLyCIvcDDrFz505RIBMKBVEkE35aCxYsYPHixczs0pZhbZrm23nkJBLaVKnAbN327OpuQlxqGkFh/829lSWV4hEchlm9msjlUSHr9IMQpnudY+jQoWzZsiXPCmSurq50696dqo07YzpmE/J5VCD7/5RVNWhlPpyszHSe3fLNfv19eChRLx5St63Fdx87OT4a5zWDyUx8i7+fLy1atMiLyEIR8+8wmD/++IPr16/TsmVLzMzMiIyMlHU0QfhIfHw85qamPLp7h709TGhaIXdzV/7StD6DWzWmR6O62a/tun6PpUFXmT17NgsXLszRPjIyEkMjY1TL1cLyKwWyfzXR64vuwIXU1bIgMyMdH/sphN3y5vixY/Tq1evbLlQQiol3796RmZlJxYo5Rw5UrFiR8PDwT+6zdOlSNDU1s3+qV69eEFFzsLOz49HDBxgaGgIwbtx4fHx8MR+/lZotci6WVLZ6Y7pNO0Bi1EvUlaScDQqkQYMGBZ5ZyF/1GjQgPiWV628+f3+UkJbOUEcfQuOT8Pb1zTF9yaI//8Tc3AyPzWN4ef9cjv2kWVkE7JvHg6Cj7Nq1i6FDh+bbdQjCtxBFMuGntHjxYhYuXMi0zlqMbNu8wM7bsFxp6pcphcvjp9mv3XzzlvCEJMzr1cqTczg+DGWqZxCDBw9mx44dyH1mHrRv5ezsTI+ePanWVBeT0RvzrUD2L7VSFalUT4uQa+7Zr4VccUVRWY0azfW+65jJ8VE4r7FBmvxhvozmzQvu314oXNasWcOIESOwtbWlSZMmbNu2DVVVVXbt2iXraIKQQ2ZmJpbm5ty9dROH7sY0r5j7leLqlNbkD/0O1C79Yf6YPTfvszjwMtOnT2fx4sWffYAikcghkcvdw5WSZavQVL8/iiqqeO+YRNhtX04cP06PHj1ynVMQBJg1axaxsbHZPy9evJB1JDRLaZKZkUFaSuJH2+Iin+O/axplSqoQFBhAnTp1ZJBQyG9Lly5l3LhxzPE5z/5bDz/anpiWzlBHb57EJuDp7Y2WllaO7crKypw4cQJDQwPcN43i5YPzwIcCmf/eOTw8ewwHBweGDBlSEJcjCLkiimTCT2fZsmXMmzcPu46tGdOu4HsRWTesjVdIGCkZGcCHoZblVEugVeXH5yJzefwUO49ABg4cyN9//51nBTInJyd69upF9WZ6mIxaj7yCUp4c92vqaJnx4l4QacnxAARfcaVWK6Ncr6L5v5LionBebYMkNYYAfz+aNs2/3oNC4VbUhsEIP7eMjAwuX7lC0/JlaFTuy0Msv2TfrQcs9L+EnZ0dy5Yt+2SBrEKFCnh7eRIfEYzb+uGkpSTk6tiZGWl4b5/Mi7sBnDp5km7dun13TkEoDsqVK4e8vDwRERE5Xo+IiKBSpY8XJYIPxQQNDY0cP7K2csUKdHS64LV9Uo7VCN+/CcFp1QAqlilJUFAANWt+evVcoeiTSCRs3LiRiRMnMt/vAntv3s/elpSeznAnHx7FxOPh5UX79u0BuHfvHu/fv89up6KiwulTp9DX18sulPk7zOLx+ZPs3bsXGxubAr8uQfgSUSQTfiorV65k1qxZTOrQivHarWSSwapBbRLTM/B/9hKpVIrbkzBM69b47MqXueX25BmT3QPp168fu3bvRl5ePk/ynjlzhl69e1OzhSHGIwuuQAZQV8ucrIwPQy6jXz3h/esn1P1n3ptvkRT7DufVg5CkxRLg7yfmy/jJFdVhMMLPSVlZmTOOjtyIiGKsix+p/zxg+RaH7jziD7+LTJo0iVWrVn1xCH779u3x9vIk9s1j3DeMJP0TPUj+V2Z6Kl7bJvLi3ocCmbW19TfnE4TiRklJCS0tLXx8/issZWVl4ePjQ8eOHWWY7NsEBARw8cIFNEqUxGPreJ7fDSLq1WOcVw+iWqVyBAb6U7VqVVnHFPKZRCJh3bp12NnZscD/Eruu3yM5PYMRTr7ci47F3cOTDh06ALB27VqaNWtGhw6deP36v+llVFRUOHP6NLo6XXBaPZjHF0+zd+9eBg4cKKvLEoTPEkUy4aexdu1apk+fzvj2LZkoowIZfBj+0qR8GVwePeVuZBSv4hMwr1/rh47pGRzGJPcA+vTpg8OePXlWIDt16hS//PILNVsYYTRiLfIKinly3NxSL1OZinVaEXrNg+CrriiVKEmNpl2+6RhJsW9xXj0IhcwEAgP8ady4cT6lFYqzwjgMRvh5mJub4+TszIXXkYxy9svuiZwbR+4+Zo7PecaNG8fatWtzNUeltrY2Xp4exLx+iNvGEaSnJn2yXWZ6Kp7bJvDqwVnOnD6NlZVVrnMJQnFnZ2eHvb09e/bs4cGDB4wZM4bExERsbW1lHS1X0tPT6WrdlbqVarN6yB+UUFTGbdMonFcNpHb1KgQE+H22V5xQ/EgkElatWvVhuH7gZboecuL2u/e4urnTqVMn4MOCSHZ2djTR/Y3wqFh0dPVy3C+VKFECJ0dHZs+ezYkTJxgwYICsLkcQvkgUyYSfwoYNG7Czs2NMuxZM6dg6zyay/17WDWrj+/Qlpx4EU1pFGe2q33+T4RP6nAluAfTs2Yt9+/ejoKCQJxlPnDhBn19/pVZrU4xGrCnwAtm/6miZ8/xOAE8unqF2a+NvmgstMSYSp9WDUJQmERjgT6NGjfIxqVBUFJdhMMLPxcTEBBdXV66Ev2Okky/J6V8vlB2/94TZPucZPXo0Gzdu/Ka/fR07dsTTw52Ylw9w3ziS9NTkHNsz0lPx2DqO1w/P43jmDBYW37+giiAUR7/99hurVq1i/vz5tGrVips3b+Lu7v5RL+bCSl5enpYtW/Is8jlLT25AQUkBYyMjunTSxt/fl/Ll825FdqFokEgkLFu2jLlz5yLRKIWLqxs6OjoA2NvbM378eFoYD0F30CK6Tt1PVFwyOrp6PHv2LPsYJUqUYMmSJWLeSqFQE0UyodjbvHkzkyZNYqRWM6Z2aiPzAhl8GHKZnJHB3lsPMa5bA0X57/tV9Hv6gnGu/nTt1o0DBw/mWYHs2LFj/Pbbb9RpY4bR8NUyK5DBh3nJMjPSiHv7grptcz/UMjEmAufVA1EmhcAAf7HikpCtuAyDEX4+hoaGuLq5cT0ymhFOPiSlp3+27akHwczwPsfw4cPZvHnzd/3t69SpE+7ubkQ/v4v7plHZhbKM9FQ8towl/NElnBwdMTMz++5rEoTibPz48YSFhZGamsqlS5fQ1taWdaRck5OTw9PLk/ba2rxPicPbxxsPdzc83N0oU+b750cUijaJRMKiRYsIfvoUPb0PC2nt2bOHUaNG0dRgAJ1+m41EIkGjfA26Tj1AbFImOrp6hISEyDi5IOSeKJIJxdq2bdsYP348Q1s3ZUaXtoWiQAZQXbMkLSuWI0sqxbze9012GvjsFWNc/LCwtOTQ4cMoKuZNIevo0aP069ePOm0tMRy2Cjn5vCm8fS+NctUoX6s5yqqaVGvSKVf7JLwPx2nVQFTk0gkM8Kd+/fr5nFIoaor6MBjh56Wvr4+7hwe33sUw3NGXxLSPC2VnHoYwzfMstra2bNu27ZOLuOzbt48VK1aQlZX1xfN16dLlQ6Es7DYem0eTkhCDx6bRRD65grOzE6ampnl2bYIgFC7q6ur4+fsRFR1F27Ztv+sYFy5c4PDhw0il0jxOJxQGhw4dYujQoTTq0gedfvNzfNcqWbYqZuO38erlC9auXSvDlILwbWT77VcQ8pG9vT1jxoxhSKvGzNFtV2gKZP8a1a45PqEv6FS9yjfvGxT2ilHOvpiamXP02HGUlPJmMv3Dhw8zcOBA6razxmDocuTk8mZusx9lOdGerMz0XC0akBD9Buc1NqgqZBLg70/dunULIKFQ1Pz222+8ffuW+fPnEx4eTqtWrYrUMBjh56ajo4OHpyfmZqYMdfRhZzcj1JU+PChxfhTK7x5B2NjYYG9v/8kC2YYNG5g0aRIAd+/eY/fuXV+cy1JHRwdXVxcsLCw5MMsAOWkmLi7OGBoa5s8FCoJQLAQGBmJqakZqagr+/v5s3rw5z+bNLQwePXpEnTp18uxBdVFz/PhxBg0aRP0O3enSbz4v7p+lbLWGqJX6cC+VlpJA4J45aGiWYvjw4TJOKwi5J3qSCcXSrl27GDlyJINaNmKennahK5ABmNerxUpTHZQVvu1m4fyL14xy9sXQxITjJ07kWYHsyJEjDBgwgHra3QpVgQxAVaMs6qW/Pm9bQvQbnFcPQk0xi8AAUSATvqwoD4MRhM6dO+Pp5c2jmHiGnvEmPjUN1yfPmOIRRP/+/fl7585PFsi2bdvGpEmTaGU2HOORazlw4AADBgwk/QtDNwH09PRwdXVBt0snXF1dRIFMEIQvunHjBlbWXSlfpzW6Axeyw96e/v0HkJaWJutoP+zq1asYGRvTqFEjfv31N1nHkYkzZ87Qt18/KjfsgBTYN10Hl3XD8Pl7GgDpqUm4bRhBfGQI3l6etGrVSqZ5BeFbiJ5kQrGzZ88ehg8fTv/mDVmg36FQFsi+18UXbxju6IuuvgEnT55EWTn3k9h/zYEDB5FXVEG719RCVSDLrfio1zivHkRJFQkB/v7Url1b1pEEQRDyVceOHfHy8cHU2Jhfjrny9H0sv/7662dXOd61axdjxoyhufFgOvwyHYlEgry8Isd2TCYtLY3Dhw998cGLvr4++vr6+XhFgiAUB48fP8bE1Ay18rUwG7cFJRV1SmiU46T9FGK6dePkiROoqanJOuY3e/z4MXPmzOX48WOUrVqfZoaDOH1630/3oM3V1ZVf+vShVksj4qJe8/L+uextFWo1Jz01GfeNo4h99RAvL8/vHqorCLIiepIJxcr+/fuxtbXl16b1+dOwY7EqkF1+Fc4wJx+66Opy+swZVFRU8vT4K1euoExpTVzX2ZIUF5Wnx85v8VGvcF49EI0ScgQGiAKZIAg/j/bt2+Pt60taCXX69u3L3n37Plkg279/P8OHD6epfn86/zYn++9jHS0zzMZswsnZmV69e5OamlrQlyAIQjHy8uVLjIxNQEkTiwn2KKmoA1CnjSkWE+0JCDyLsYkJ79+/JyIignfv3sk4ce4sWrSIJk2a4Ol3DgPbZfzyhxOd+86hXLUGTJ8x86eZc83Ly4uevXpRvakuRiPWYjx8NW27TczeXqOFPp5bxhIVdhs3N1exIJJQJBVIkWzz5s3UqlULFRUVtLW1uXz58hfbHzt2jEaNGqGiokLz5s1xdXUtiJhCEXf48GEGDx5M7yb1WGLUCbliVCC7+jqCoWd86NipM2ccHSlRokSen6Nhw4b4+/kilx6HyxobkuOLRqEs7t1LnFYNRFNVgaDAAGrVqiXrSIIgCAWqbdu2PHv+nH37D3xyleMjR44wePBgGnX+BZ3+f3z0AKlWKyPMxm3F09OLbt27k5ycXFDRBUEoRt69e4eRkQlxyRlYTt5FiZI5V8Gs1rgTBsNWc/HCBRYsWECdOnVp2Kgxd+7ckVHi3Dtx8hQly9fkt8WeNOrcGzk5eeTk5GnXw47AAH88PT1lHTHfnT17lm7dulO5QQdMRq1HXkGRUpVq067bhOw2N922ExF8FWdnJ3R0dGSYVhC+X74XyY4cOYKdnR1//PEH169fp2XLlpiZmREZGfnJ9ufPn6dfv34MGzaMGzdu0KNHD3r06MHdu3fzO6pQhJ09e5aBAwfSo2EdlhazAtn115HYnvGmnbY2js7OqKqq5tu5GjVqhL+fL5K0WJzXDCY5PjrfzpUX4t6+wGnVAEqrKREUGECNGjVkHUkQBKFQOXXqVPZ8k7o2i5B8Yp4ygBrNdDGfsAM//wCsrLuSmJhYwEkFQSjK4uPjMTO34FX4W6wmO6BepvJHbVISYrh6eg1ly5XH/u+dlKnRDAX1iugbGHL79m0ZpM692bNmEhMeSuTTnDlrtjSkcr02zJg566urBRd17u7upKQk08p8BPKKOad8sV13hZotDHj96AJnzpwW81YKRVq+F8nWrFnDiBEjsLW1pUmTJmzbtg1VVVV27dr1yfbr16/H3NycadOm0bhxYxYtWkSbNm3YtGlTfkcVirC0tDQyMzMpoahQrIZY3gp/i62jN23atsPZ1bVA5m9o3Lgx/n6+SJOjcVk7hJSE9/l+zu8R9/Y5TqsGUlajBEFBAVSvXl3WkQRBEAoVZ2dnfv3tN2q3McdgyNKvzjdZrXEnLCft5Nz5C/TrP6CAUgqCUNSlpKTQrXsP7j94hMXkncjJK3DlzAbePLmW3SY9NQn3jSNJjQ0nKSmRsjWaYT5hB1ZTHFAo+aFQduvWLRlexZf98ssvtGrdhssnV+UYWimRSGjfayq3bt7g2LFjMkyY/6ZPn06Hjh1x3zQ6x79tVmYGQQf+4NWDs5w8cQIzMzMZphSEH5evRbK0tDSuXbuGsbHxfyeUk8PY2JgLFy58cp8LFy7kaA9gZmb22fapqanExcXl+BF+PoaGhvz9998cvPOIeT7nySoG8wLcjnjH4NPeNG/VGhc3N9TV1Qvs3E2aNMHfz5espHf/FMpiCuzcuREbGYbTqoGUL6VKUGAA1apVk3UkQRCEQsXDw4NevXtTo7k+hsNWIiefu7WalEqURF5BUcxNJghCrmRkZPBb336cO3eO5qbDcF0/ggOzDLnqtJGLx1cAkJmeiueWcbx/9ZCMzEzKVG+K+YQdKCqroqJeCqvJDihpVEbfwJCbN2/K9oI+Q05OjhXLl/Em+Dpht3yzX5dKpVRp0I5aLfSZPWfuV1cKLso0NDTw8vSkfTstXNcP5dXDS2RlZeK7ewZPr3tw9MgRrK2tZR1TEH5YvhbJ3r17R2ZmJhUrVszxesWKFQkPD//kPuHh4d/UfunSpWhqamb/iN4kP69hw4axa9cuDt97wuwiXii7G/mOwae9aNyiBW4eHpQsWbLAMzRr1gw/Xx8yEiILVaEsJuIZTqsGUqFMSQIDA6hataqsIwmCIBQqvr6+dO/eg6qNO2M8ch3yCoq52i/q1WNc1g6hUYN6HDl8KJ9TCoJQ1EmlUkaMGIGzkxPGI9cTGXqTpNi32durNe1CVlYmPjun8erRRSQSCerlatLceCiKyv9NH6KiXgrdwX8R8z6avXv3yuJScsXY2Bh9A0OunF5DVlYmUS8fcWiuGVccN9K2+2Sehoawc+dOWcfMV+rq6ri7udKlc0fcNg7HbeMoQi47c/DgQXr27CnreIKQJ4r86pazZs0iNjY2++fFixeyjiTI0JAhQ9izZw/H7wczw+scmUVwboAHb6OxOe1N/SZN8fD0RENDQ2ZZmjdvjp+vD+nx4biusyU1MVZmWQBiwp/ivGoglcpqEhjgT5UqVWSaRxB+Fjdv3mTFihVERRWNBT1+ZkFBQVhbd6Vi/XaYjt6IvIIS0qws3jy5RtCBhfjsnE5mRtpH+71/E4Lr2iHUrVkNby9PSpUqVfDhBUEoMqRSKVOnTsXBwQED2+XUamWEyagNWIzflt2mRjNdAvf/Qeg1d5QUlShdvQlvn9/HffOYHPeU8VGv8dwyllq16jBlyhRZXE6uSCQSli9byruXj3ly0ZHYyDBiI55y1XEDQQcXUrOlEX8sWEhSUpKso+YrVVVVXJydMTTQ58XdQPbs2cOvv/4q61iCkGfytUhWrlw55OXliYiIyPF6REQElSpV+uQ+lSpV+qb2ysrKaGho5PgRfm6DBg1i3759nHoYwnSvs0WqUPbwXTQDT3lSt2EjvLy90dTUlHUkWrRogZ+vD6mxr3FZP5TUJNkMaX4fHorT6oFULl+awEB/Klf+eEJYQRDyVkhICP369ad169bMmDGDefPmyTqS8AUXLlzA3MKSsrVaYjpmM+/fhHDh+Ar2zzTg9PK+3PU/wJNLjqQlJ+TYLzYiDOc1NlSvXAEfH2/KlCnzmTMIgiB8sGzZMtasWUOXfvNo0LE7AIrKJajVyogKtVoAEHrdkweBR1BWUaF0tcZ0/HVW9v5HFlhxw92e+KjXOK8eSGk1JQID/Qv9qKD27dvTq1dvrjmtzzEPckTIDZ7d9CYqKor169fLMGHBUFFRwcXZmfj4eAYOHCjrOIKQp/K1SKakpISWlhY+Pj7Zr2VlZeHj40PHjh0/uU/Hjh1ztAfw8vL6bHtB+JT+/ftz8OBBHB8/43fPIDKKQKHscdR7Bp3yolb9Bnh6exeqp/gtW7bE18eblOgXuK4bSmpSfIGe//3rYJxXDaRapXIEBvp/tmguCELeCA8PZ+zYsTRs1AgXD1/0bBbTvscU7O3tCQkJkXU84ROuXLmCqZk5pas2xnz8Ns4eWsSxP7tz22s3NZrp0H3afqo36UzlBu0oUfK/Iljc2xc4r7GhcrnS+Pp6U758eRlehSAIRYG/vz+zZ8+mjdUYmhvZfLS999wTdOwzg5vuO1BRKUHpqo2xmPQ3z+8EZrdJfB/BxeMrcF49CE1VBQIC/Ap9gexfS5YsJj7qDc/vBmW/1tp8JBYTttNYty9Lly0nOrpwrxCfF+Tl5QtkUTFBKGj5PtzSzs4Oe3t79uzZw4MHDxgzZgyJiYnY2toCYGNjw6xZ/z1VmDRpEu7u7qxevZqHDx+yYMECrl69yvjx4/M7qlDM/Pbbbxw+fBjX4DCmuAeSnll4C2XB0TEMPOVFtTp18fLxKZRP8Vu3bo2vjzdJUWG4rh9KWnLBFMqiXz/50MOhSgUC/P0+mrNQEIS8Exsby9y5c6lTpy679x6gXfcp9F3iTRPd32hhYkuJkmWZN2++rGMK/8/NmzcxNjGlZMW6mE/8MBl2mSr1UClZhqzMDMJu+yORU+DVw0vUaWOavV981Guc19hQVrMEfn4+4gGEIAi5UrZsWVRV1Qh/fPmTIwwenD3GhWPLUVZWoXS1RlhO+hslFXWkUily8gpUb6ZLxz4z0SxfDQ0VOQL8/ahRo4YMruT7NGrUiCFDhhB6zY2KdVtjNXkXHX6ZRq2WhmhZjSU1LZ1ly5bJOqYgCN9JIpXm/+zmmzZtYuXKlYSHh9OqVSs2bNiAtrY2APr6+tSqVQsHB4fs9seOHWPu3Lk8e/aM+vXrs2LFCiwtLXN1rri4ODQ1NYmNjRVDLwUATp06xa+/9sG4dnXWm+uhKF+4puILjY6l/ykPKtaoia9/AOXKlZN1pC+6du0ahkbGqJWvjeXkXSip5N+qm9GvPhTIalWvhJ+vr+jhICPic/Vjxe09SUlJYfPmzSxe8heJSUk0MxxMa/MRKKvlHPJ9P+AwAfvmcePGDVq1aiWbsEIOd+/eRVdPHyXNKlhN2YOy6n8LvWRlZRIefB2PLeNQVtMkNuIZg1YEol6mMokxETiuHICGsoSgoIAi9QVVKH6K22dqXijs78mlS5cwM7dASaMSlpN2oqr54R4t9JoHHtsmoKSkTNlqjbGcvBOlEh8+l6RSKWlJcaSnJeO0aiAllSEwwJ9atWrJ8Eq+z8uXL6lbrx4tTEfSrvvEHNsun17HXa+dBAc/ESuwC0IhktvP1QKpFowfP56wsDBSU1O5dOlSdoEMPnTX/d8CGUCfPn149OgRqamp3L17N9cFMkH4lJ49e3LixEl8nr5kgps/aZmZso6U7en7WPqf8qRc1ep4+/oV+gIZgJaWFt5eniREhuK2fjhpKQlf3+k7RL16jPPqQdSuUVkUyAQhn23YsIGpU6dSpYUp/ZZ406H31I8KZAANO/emdKVazPyfHuCC7Dx8+BADQyMUS1bEctKuHAUyADk5eao0aEcdLXNiI55RoXYL1MtUJin2Hc6rbVBVyMTf31cUyARB+Gba2tqcOxuEfHosZ1b0I+7tC17eP4fP33bo6uiSlppC1SZdsgtk8GHi+/S0ZJxXDURdSVpkC2QA1apVY8L48dzx3kVSXM5FbVqZDUNeqQQLFy6UUTpBEH5E4epSIwj5pFu3bpw8dQr/sNeMdw0gNUP2hbKwmDgGnPKiTOXK+Pr7U6FCBVlHyrV27drh5elBXPgT3DaMID0lMU+PH/XiIc6rB1G3VnVRIBOEAtC8eXMAarU0RK3U54c0yyso0q6HHR7u7gQEBBRUPOETnjx5gr6BISiXwnLyblTUS322bX1tawBqtzYlOT4Kl7WDUZQmE+DvR+3atQsosSAIxU3Tpk25cP4cZdQVObOiLx5bxmJoaICXlyfz58/nqtNGrrv+t9plYkwEzqsHoaqYVaQLZP+aNWsWigryXHfdmuN1pRIlaajzG7t37+b58+cySicIwvcSRTLhp2Ftbc3pM2cIevGGsa5+pGZkyCzL89h4BpzyRKNCRXz9A4rkPFva2tp4eXoQ+/pRnhbK3r14gPMaG+rXqYmvr3eR6F0nCEWdubk5Orp6XD61mqysLz9EqNPGjIq1mjF9xgwKYMYG4TOmz5hB9PsYLCfvyjER/6dUrteWwWsu0rBTL1zW2kJqDP5+vtSrV6+A0gqCUFzVqlWL8+fO0qJJA/T1dDl54gRKSkosXLiQP/74g0snV3PNeUt2gayEfAaBAf7FokBftmxZZs2cwYOAQ8S9ewlA3LuX+Oycxk237dSoUUvcxwpCESSKZMJPxcLCgjOOjlx4FcloZ9kUyl7GxjPgpCeqZcvj6+9P5cqVCzxDXunQoQNenh7EvHqA28aRpKcm/dDx3j2/j/NqGxrUq42vjzdly5bNo6SCIHyJRCJh+bKlvHvxiOBLzl9uKydHu56/c/nSJRwdHQsoofD/9e/Xj6yMNC4cXUpmRvoX20rk5JBXUMRj00gyEiLx8/WhUaNGBZRUEITirmLFipw7G4SHu1uO1Q4XLFjAwoULuXx6LccWdkNFLp3AAH/q1Kkjw7R5a9KkSZQuXZoLx5Zx9tAiDs81JSb0Ips3b+bhw/uoqqrKOqIgCN9IFMmEn46ZmRlOzs5cDn/HCCdfUgqwUPYqLoEBpzxRLl0Gv4AAqlatWmDnzi8dO3bEw8Od9y/u4b5pFOmpyd91nLdh93BeY0OjBnXx9fEulCt8CkJx1rFjR7p2685Vx3VkZqR9sW31pl2o3qQTM2fNJrMQzfP4M+nTpw9Hjhwh9LoH3jsmf/HfLC0lAbf1w0mJeYWvjzfNmjUrwKSCIPzM5s+fz/Lly2mv1ZIAfz/q1q0r60h5Sk1NjQV/zCf0mgdPL5/mz4ULCA0JZsyYMSgpKck6niAI30EUyYSfkrGxMS6urlyPjGaEoy/J6flfKHsTn8iAU54oaJbCLyCgWK1207lzZ9zd3YgOu/NdhbK3z+7ivMaGJo3q4+vjTenSpfMpqSAIX7L0ryXER73mfsDhr7Zt3/N3Hj64z759+wogmfApvXv35tTJk7y464/ntglkpqd+1CY9NQm3DSOIjwzB28tTrEoqCEKBmz59erEe4j1q1Cj8/Px49jSU2bNn5+hNJwhC0SOKZMJPy8DAAFc3N26+e88wRx+S0r88XOVHhCd8KJBJ1DXwCwgsliuJ6ejo4ObmStSzW3hsHkNGWkqu9ot8ehvntYNp3rQRPt7elCpVKn+DCoLwWU2bNmXQIBtuuGz56sq1FWq3oK6WOfPm/0FKSu5+34W817VrVxzPnOHNg/O4/7/P3vTUZNw3jSL21UM8Pdxp27atDJMKgiAUT/Ly8ujr64tpQgShmBBFMuGnpqenh7uHB3ejY7A940NiWt4XyiITkxh4yovMEmr4BQRQs2bNPD9HYaGrq4ubmyvvnt7AffPorxbKIkJv4bLOlhbNGuPl6YmmpmYBJRUE4XP+/HMhaSnx3PZyyPG6NCuL4CuuxEaEZb/WrucUXr96xdatWxFkx9zcHBcXZyKDr2bPD5mRnornlrFEPbuNm5srHTt2lHVMQRCEfGVvb4+amjpz5syRdRRBEIowUSQTfnpdunTBw9OLhzFx2J7xJiEPC2VvE5MYcNKTVCUV/AIC8nUln9u3b3Pz5s18O35u6enp4eLizNuQ63hsGUvGJ4b/AISH3MBlnS0tmzcRBTJBKERq1KjBuLFjue25k+T4qOzXI57ewmv7JI7+2Y2HZ48jlUopXakODTv3ZtHiJcTFxckwtWBkZPTPsPfbuG0YjtfW8UQEX8XZ2QkdHR1ZxxMEQcg3UqmUJUuWMHLkSDSqNOKvv/5i3bp1so4lCEIRJYpkggB06tQJL28fHsclMOSMF3GpX560OjfeJSUz8JQXSQpK+AUE5OtEpe7u7rRt14627dpx/PjxfDtPbhkYGODs7ETEkyt4bhn30Tw54SE3cF03lDatWuDl6YmGhoaMkgqC8Clz5sxBQV7Cddft2a/JyysCUKFmM/wcZuG1fRIpCTG07TqehIREVq9eLau4wj/09PTw8vIk9vUjXj08z5kzpzE0NJR1LEEQhHyTlZXFpEmTmDt3Lu26T6THjEO0Mh/BlClTOHDggKzjCYJQBIkimSD8Q1tbG28fX0Ljkxlyxpu4lE/3gMqNqKQUBp3yIl5OAb+AAOrXr5+HSXPy8fGhe48eVGuiQx0tC/r27cvBgwfz7Xy5ZWRkhLOzE+GPL+G5dXx2oezNk2u4rrOlrVZrPNzdKFmypIyTCoLw/5UrV44Z06dx3/8A8VGvAJBT+FAk0+49FdPRG3hx/xxHF3YlNjKMpgYDWbVqNREREbKMLfDhoc+9u3e4f+8eZmZmso4jCIKQb9LS0hgwYCCbNm1Cd+BC2nadgEQioUPvaTTq3JvBQ4bg7u4u65iCIBQxokgmCP+jXbt2ePv6EpaYgs1pb2K/o1D2PjmFQac9eS+V4OvvT8OGDfMh6QcBAQFYW3elcoMOmIxaj+GwldTX7s7AgQPZs2dPvp03t4yNjXF0PMPrRxfw3DaBlw8u4LZhGO3aauHu5ioKZIJQiE2ZMoVSpUpx9cwGAOQVPixlHxFyk3NHlpCVkUbi+3A8tk6gldkIsiTyLF68WJaRhX/UqFGj2K4iJwiCAJCQkICVtTXHjh/HwHYZTfT6ZW+TSCTo2SymepMu9OrVm/DwcBkmFQShqBFFMkH4f7S0tPD19+dVajqDTnsR8w2FspiUVAad9iIqE3z9/WncuHG+5Tx//jyWllaUr9MG07GbkVdURk5OHv0hS2ms8yu2trbY29vn2/lzy9TUFMczZ3j98DxOq23Qbt8OdzdX1NXVZR1NEIQvUFdX54/583h08TTRr54gr/ihSHbp5CqUVUvRtut4Ov06G4Mhf1FCowwtzUawbft2QkNDZZxcEARBKM7evXuHgaEhQWfPU0fLAt9dM3h5/1yONgnRb4iNfEZJDQ2UlZVllDT3pFKprCMIgvAPUSQThE9o1aoVvv7+hKdlMvCUJ9HJX16lESA2JZXBp72ISMvEx8+Ppk2b5lu+y5cvY2ZuQenqTTEbtxUFxf/++Evk5NAdtIimBgMZOXIkmzdvzrccuWVmZoabqyt//PEHri7OqKmpyTqSIAi5MHLkSKpXr8Hl02uQ+2dOMomcHKaj19PaYhQtTW2p3doEgOZGNqiolWL+/D9kGVkQBEEoxp4/f07nzjo8eBSKtd0+ol4+AuCOzx7eh394SBP14iFnlv9GaTUFLpw/R+nSpWUZ+YukUikrVqxAXb0ky5cvl3UcQRAQRTJB+KwWLVrgFxDA2wwpA095EpX0+UJZXGoaQ8548yolHR8/P5o3b55vua5fv46JqRkalepjMWEHisolPmojkUjo0m8eLU2GMn78eNauXZtveXLL0NCQBQsWiAKZIBQhSkpKLFm8iKc3vIkJf0pT/f7oDFhA6cofL0SiqKxKG+vxHDx4gNu3b8sgrSAIglCc3b9/n46dOhPxPoFuMw6DBKJffSiShd325/BcM14/voLjqgHUrVWN8+fOUqdOHRmn/rz4+Hj69PmVGTNmoFGlEbNmzcLDw0PWsQThpyeKZILwBc2aNcM/MJD3UjkGnPLkbWLyR23iU9OwPeNNWGIK3r6+tGzZMt/y3L59GyNjE1TL1sRioj2KKp8vOEkkEjr+OpPWlqOxs7Nj2bJl+ZZLEITiq3///jRr3pxrTuvRGbCARp17f7Ztoy59KFWxJrNmzSrAhIIgCEJxd+HCBTp30SFNTo3uMw5TqmItIp9+eCAjkchRvmYzqjfTxXWdLdrttAgM8KdixYoyTv15jx8/pr12B5xd3TEbu5ke0w9So7kev/XtJ6YtEAQZE0UyQfiKJk2a4B8YSLycwj+FsqTsbQlp6Qx19CE0PglvX19at26dbznu37+PoZExypqVsZy0E6USX5/0XiKRoN3TjrbdJjJr1iwWLlyYb/kEQSie5OTkWLZ0Ka8eXeH53cAvtpVXUKRtt8m4uroSFBRUQAkFQRCE4uzs2bMYGRmjWr4O3aYdRK3Uh+JXU71+WE60x3bdZZoZDODV/XNYW1nh7uaKhoaGjFN/nqOjI1pt2xEZk0LP2cep08YUiZwcRsNWIaesQY8ePUlKSvr6gQRByBeiSCYIudCoUSP8AwNJUlCi/0lPIhKSSExLZ6ijN09iE/D09kZLSyvfzv/48WMMDI2QUy2L5eTdKKtp5npfiURCu24TaN/TjgULFjBnzhwxOaggCN/E0tKSzl10uHJqNdKsrC+2rdvWggo1mzJj5kzxWSMIgiD8sKtXr5KcnEQr81Eoq/5X/JLIyVGzhT73A4/g5zCL4cOHcezYUVRUVGSY9vOysrKYP38+3bt3p0I9bXrOPp5j+gJlNU1Mxmzm0eMnjBs3ToZJBeHnJopkgpBLDRo0ICAoiFTlEvQ/6cFQRx8excTj4eVF+/bt8+28ISEh6BsYkqlQEsvJDqiof9/ko1pWY+jYZyZ//fUX06ZNE19eBUHINYlEwvJlS3n7/AHBV1y+3FZOjnY9f+fC+fM4OzsXUEJBEAShuBo7dizGJib42E/hbdi97NelWVmcP7qMiydWMnfuXLZt24a8vLwMk37e+/fvsba2ZvHixbTvaYfpmE1IJB9/FU+MiUBOXpHnL1/JIKUgCCCKZILwTerVq0dAUBCZqurcj47Fzd2DDh065Nv5wsLC0DcwJFWqhNUUB1Q1yv7Q8VqZDaNLv3msXr2aSZMmiUKZIAi51rlzZ6ysrbl6Zh2ZGWlfbFu9aReqNe7AzFmzyMzMLKCEgiAIQnGkpKTEqZMnad6sMW7rhxET8YzMjHT8HGZyy3MnGzZsYNGiRUgkEgAyMjIICgoiMTFRxsk/uHPnDlpt2+EXeI6aLY14G3aXfTP02D1Fm6gXDzpex7gAAFxpSURBVIEPBb9rzltwXT8cfb3OHD96RMapBeHnJYpkgvCN6tSpw70HD3jx8iWdO3fOt/O8fPkSfX1DElLBym4vaqUq5MlxmxvZoDdoERs3bmTMmDFkfWXolCAIwr+WLV1K7NsXPAg69sV278LukZGWyv1794iOji6gdIIgCEJxpa6ujpurK1UqlsN1nS0em8cQcsWZgwcPMmHChOx26enp9O3bD11dXaysu8p8bq/Dhw+jrd2BhAwlDIet4tlNb55e9yQxJoKsjHQUlEqQlhyP59bxXD69lrlz5+Li7Ezp0t83ckQQhB8nimSC8B3U1dXz9Y/Xmzdv0Nc34H1iKta/70W9dKU8PX4Tvb4YDFnKjh07GD58uOjpIQhCrjRr1oyBAwdxw2Uz6akff/GIiXiG1/ZJHF/cEw35ZE6dOkX58uVlkFQQBEEobsqVK8f/tXfXcVHk/x/AX4u0CBgIooCAioWFBRYlYefZiSgeNmdg15kc2Kh3it1n00gZWNiFYiICegZgkDu/P/zJ9zhCUJYlXs/HYx8Pd+Yzs6/5yM7svnfmM0FBAVCWE/Dm8VV4nz6NQYMGZc1PTU1Fn779cPzECbTqORkRERfRs1cvpKSkFHvWjIwMuLi4YNCgQajVxAo9Zx6EbuNOsHPeBIWK6oAgwLClPcSZ6Ti2rB9eR1/CyZMnsXjxYsjI8Cs6kTTJSjsAEWX3+vVrWFha4c2HT+j+2x5UqlpTIq9Tv30/yMjKYcf2GUhLS8eOHV6QleUugYjyt3jxIhw4eAC3AnfApNuvAL6OoXL11AY8OHsYWjVqYNu2bRg+fDj3KUREVKR0dXXx4P49pKamZvsR5suXL+jdpw/OnAmBrbMn9Iw7QauOCfzWO6Jv3744duwY5OXliy1n3379cfrUKbQbOAfGViOyLgXVb94ZFw6tQOqnD9CobYxjy/tBv7YeToReQb169YotHxHljWVqohLk7du3sLSyQmz8P+g6bRdUNXQl+nr12vaEtaMH9u/fj8GDhyA9PV2ir0dEpV/t2rUx3skJtwL+QuLr57j4txv2z7HGq1sBWLVqJR5HP8Lo0aNZICMiIolQVVXNViD7/PkzunXvjuCQUNhP3AI9404AgFoNTGHzqycCAoMwYMDAYv2c+/TpU6hV10WDDr9kFci+6TF9D+q27YmIwyvQrYs9rly+xAIZUQnCIhlRCfH+/XtYWXfGsxdx6DZtF9Q1axfL69Zp1QU249fj6LFj6P/LAKSl5T8gNxHR3LlzUUEkYP+czngQuhszp/+GZ0+fwMXFBYqKitKOR0RE5URycjJs7exx7nwEukz6C7UaZh8vWLdxB5j2d8Xx48fg7Z3/3ZmL0v59e5Ga9BphO2dnu1FWysf3CN85G48vn8KqVatw+PAhqKioFFsuIvo+FsmISoDExER0trHBo8fP0HXaTlTWrlOsr6/fvDNsf90Ib28f9O7TRypjNxBR6aGhoYGDBw9g5syZePLkMZYsWQI1NTVpxyIionLk2+fnq5HXoWnYAidWD0VGWvbPsO9fReO6zybUq1dfonek/69GjRphxw4vPLrsjVuBXgCAf17cw9Hf+yA57j78/f0xffr0HGeZEZH0sUhGJGXJycmws7fH3fsP0WWqF6rWMpJKDr0mFrCb4InAwCB079EDX758kUoOIiodunbtiuXLl0NLq2hvLEJERPQ9X6/AsMbN2/fQZfI2xNw9BwC4F34AmRlfL6t8G/MAp9yGQrdmdYSHhxb78ap///6YPn06Lh5ZhYt/u+H4ygHQr1Ud1yKvwtraulizEFHBsUhGJEWfPn1Cl67dcOPmHXSd4gUN3UZSzaPTqAPsJv6J8PBzsO/SFZ8+fZJqHiIiIiKif/vnn39gbmGJ+1GP0d1lNzLS/zdUyPkDv2OrU0O8eX4Xp/4YBkN9HYSFhkJTU1MqWZctW4ZO5ua47rsFgwYMwIXz51C7dm2pZCGigmGRjEhKvnz5gu49euLylauwn/QXqus3kXYkAF8HObWfvA0XL12GrZ09kpOTpR2JiIiIiAgJCQno2Mkcj5+9RLff9qCabkM8vR6QrY12vdY47T4cDevXQWhIMKpVqyaltICsrCxOnTyBmzdvYscOLygpKUktCxEVDItkRFKQmpqKXr1749z587Cf9Ce06rSQdqRstOu1QpcpXoi8fhPWnTsjKSlJ2pGIyoSMjAz8+uuvMDVrh5iYGGnHISIiKjVevXqFjh3NERP3Bt1/24OqNb/eEbKZ7Rg0txuLXjP3o+f0fXgbcw/NmjRC8JkzqFy5spRTAxUrVkSTJk04/hhRKcEiGVExS0tLQ99+/RASEga7CVugXa+1tCPlSsuwOZraOuLypUuIjIyUdhyiUu/Lly/o07cftmzZijv3H6FzZ1u8fftW2rGIiIhKBevONoh/m4juv+1F5RqGWdNVqtRA237TIc7MhO/6MWjdygSBAQFQVVWVYloiKq1YJCMqRhkZGRg0aDD8/QNg8+tG1GpgJu1IeXp24wyunFiLX34ZgA4dOkg7DlGp9uHDB9jY2sHP3x92EzzR/be9iIlLgH2XLhz7j4iIqADk5eUhFmfmOu/lvfPwXT8G7duZws/XByoqKsWcjojKChbJiIpJZmYmhg4dhuMnTqDzuHXQbdxR2pHy9OJOOAK3TELPHj2wZ89uyMrKSjsSUakVFxeHDh07IvL6TXSbuhN6TSygrqUP+0l/4dbtu+jTty/S09OlHZOIiKhE8/fzhbZGFXi7D0fSm/8NWfD8Vih814+DlaUFvE+fhrKyshRTElFpJ5Ei2bNnz+Dg4AB9fX0oKSnB0NAQCxYsQFpaWr7LmZubQyQSZXs4OTlJIiJRsRKLxRg1ajQOHz6MzmPXoHYzK2lHytPL+xcQsMkZtrY2OHBgP+Tk5KQdiajUevz4MczM2uN57Bv0mL4/2/iD1Wsbw+bXTQgODsHIkaMgFoulmJSIiKhk09TUREjIGVRVU8Zp9+FIfvsKT68Hwn/Tr7C3t8PxY8egqKgo7ZhEVMpJpEj24MEDiMVibNmyBXfv3oWHhwc2b96M2bNnf3dZR0dHxMXFZT1WrVoliYhExUYsFmPs2LHYs3cPLMe4wcDEVtqR8vTq4WX4bRgHc/OO+PvIEcjLy0s7ElGh/P777zAzM4OysjLU1dVzbfPixQt07doVysrKqF69OqZPn46MjIwiz3L9+nW0NTVDUqqAnjMPoErNujna1GpgBksHN+zfvw9Tp06FIAhFnoOIiKis0NbWRmhIMNSUZHFi5UAEbJ6E3r164e8jh6GgoCDteERUBkjkGio7OzvY2dllPTcwMEBUVBQ8PT3h5uaW77LKysrQ0tKSRCyiYicIAiZMmIDt27fDYvQq1G3dTdqR8hQffQ2+6xzRzswMJ44f5wcNKpXS0tLQv39/mJqaYtu2bTnmZ2ZmomvXrtDS0sKFCxcQFxeH4cOHQ05ODsuWLSuyHKGhoejWvQdUquuj28StUKpUNc+2hi3t8SX5HdatWwgtLS24uroWWQ4iIqKyRldXF6GhwejX/xc0b9YFmzdv5tAgRFRkim1vkpiYiCpVqny33d69e7Fnzx5oaWmhe/fumDdvXr7XlaempiI1NTXreVJSUpHkJfpZgiBg6tSp8PT0hPmIZTAy7SXtSHl6/fQWfNeNQauWJjh96iSUlJSkHYnohyxatAgAsGPHjlznBwQE4N69ewgKCoKmpiaaNWuGJUuWYObMmVi4cGGRnD159OhRDBw0CFp1W8F2/EbIKVb87jKNLYYg5eM7zJ49G40aNUKPHj1+OgcREVFZpa+vj8irV6Qdg4jKoGIZuD86Ohrr16/HuHHj8m03ePBg7NmzByEhIXB1dcXu3bsxdOjQfJdZvnw51NTUsh46OjpFGZ3ohwiCgJkzZ2Lt2rXoMGQhGnToL+1IeXrz4i6814xC0yaN4OvjjYoVv/+Fnqi0ioiIgLGxMTQ1NbOm2draIikpCXfv3s1zudTUVCQlJWV75ObPP/9E//79ode0M+wmbIHvhvE4u2/xd3OJxZn4kvQOwNez3YiIiIiIqPgVqkg2a9asHAPr//fx4MGDbMvExsbCzs4O/fv3h6OjY77rHzt2LGxtbWFsbIwhQ4Zg165dOHbsGB4/fpznMq6urkhMTMx6xMTE5NmWqLjMnz8fq1evRruBc9DYYshPrSsjLQWPr/rCb5MzQnbMzvPW1z/i7cso+HiMQqMG9eDv54dKlSoV2bqJSqL4+PhsBTIAWc/j4+PzXO57P8gIgoDff/8dY8eORUPzIbB2dMfrp7cQ+yACUeePIjMj7xvXZKan4syf03AvbB+2bt2K3r17/8QWEhERERHRjypUkczFxQX379/P92FgYJDV/tWrV7CwsICZmRm2bt1a6HBt2rQB8PVMtLwoKChAVVU124NImpYuXYqlS5eibb8ZaGI98ofWkZmRjue3w3Bm23TsmNYWAZsn4fmtEDy7eabIcr5/FQ1v9xGoY6CHwIAAqKmpFdm6iYrSj/xAU9Ty+0FGLBZj8uTJmDt3Llr1nIz2g+ZBJCODB+eOQE6xItJTPyHuUWSu601L+Qjf9ePw4tYZHDly5Ls/JpVHDx48QLv2HTB5yhTe2ICIiEql7du3w7hJUwQGBko7ChF9R6HGJNPQ0ICGhkaB2sbGxsLCwgImJibw8vKCjEzhr+y8ceMGAKBGjRqFXpZIGlavXo158+ahda8paG5XuC+7gliMuOhIPLp0Ck8i/ZDy8T3UtQzQ1MYBdVp3he+6sdA2agMZmQo/nfND/FOcdh8OPR0tnAkKROXKlX96nUSS4uLigpEjR+bb5t8/0ORHS0sLly9fzjYtISEha15eFBQUcr2ZRVpaGoYMGYqDBw+g49BFaGQ++Ov0lI94fNUXze0dcSdkH3zXj4WBiR3qt+sHbaPWEIlE+JL8Fr7rHPHxzTP4+/nB3Ny8QNtQnhw/fhxDhw2HjLwKLpw/hzqGhpg4caK0YxERERXY1q1bMW7cOKhU1kL37j3g4+MNS0tLacciojxIZOD+2NhYmJubQ09PD25ubnjz5k3WvG9fQmJjY2FlZYVdu3ahdevWePz4Mfbt24cuXbqgatWquHXrFqZOnYqOHTuiSZMmkohJVKTWrl2LGTNmwKSbM0y6ORdoGUEQ8M+Le3h0+RSiL3vj0/t4qFSpgfrt+qJOm26optMQIpEIb2MfIvH1c7QbNO+ncya9eYHT7sOhXb0qQoKDUa1atZ9eJ5EkFeYHmu8xNTXF77//jtevX6N69eoAgMDAQKiqqqJhw4aFXl//Xwbg3Llz6DxuLQxb2mdNj77sg4z0FBiZ9YWGXmOE7Z6PhxHH8TDiOJy2RiH53Sv4rBkFmYxPOBsehubNmxfJ9pUVmZmZmD9/PpYtWwaDFjawGL0SV06sw9Rp09CsWTN06NBB2hGJiIi+a9OmTXB2doax1XC07Tsd/hvHo2vXbvDz80WnTp2kHY+IciGRIllgYCCio6MRHR2NWrVqZZv37VKJ9PR0REVF4fPnzwAAeXl5BAUFYc2aNfj06RN0dHTQt29fzJ07VxIRiYqUp6cnpkyZgmZ2jmjVc/J327+Pf4LoS6fx6PJpJCY8haJKZRi2tEed1t1Qo44JRP858/LpNX/IKVZErfptfypn8ttYnHYfjuqVVRAaGpxVJCAqK168eIF3797hxYsXyMzMzDojuU6dOlBRUYGNjQ0aNmyIYcOGYdWqVYiPj8fcuXPh7Oyc65li+RHJVEDExcuwn/QXajUwzTbvwfkjqCArj6PL+uFz4hvIKVRE7WbW0DXuiHdx0fBdMxpV1SoiMPw86tSpU1SbXya8e/cOgwYPRmBAANr0+Q3N7cdCJBKhbd/pePviLvr264/r1yJRs2ZNaUclIiLK07p16zB58mQ06TwKZr+4QiQSwdbZE34bnNClS1f4+fnyRx+iEkgklLEBPpKSkqCmpobExESOT0bFYtu2bRgzZgyMrUeg3YA5EIlEubb7+C4Ojy6fRvTl0/jnxT3IKVaEfnMb1G3TDTXrm6KCrFyer3FoUXdUrlEHncd6/HDOj+/jcWr1EKgpV8C5s+E5CthEeSlN+9WRI0di586dOaaHhIRkXc74/PlzjB8/HqGhoahYsSJGjBiBFStWQFa24L8bJSUloXoNXXR32QkNvUY55gdvn4E3z+9Bt3EH6Bp3gladFqggK4+4R5Hw2zAWdQ304e/vy+EE/uPmzZvo1asPEv55BytHd+g0yv7l4XPSWxxd2gsN6uohPCys0IVNIiqZSstx5tmzZ1iyZAmCg4MRHx8PbW1tDB06FHPmzIG8vHxWu1u3bsHZ2RlXrlyBhoYGJk6ciBkzZhTqtUpLn1DuPDw8MG3aNDSzHYO2/WZk+36QnvoFfhvG4v2LOzhzJihrHG4ikqyC7lclciYZUXmxe/duODo6opH54HwLZAGbJ+PxVR9UkJWHXlMLtOg6HnrG5pCVV/zuayS9eYG3MQ9g0vXXH8756cNreLsPh4oCEBoSzAIZlVk7duzAjh078m2jp6cHHx+fn36tPnOOQF2zdq7zLEatzLE/eH4rBIFbJqFN69Y4dfIE1NXVfzpDWbJv3z44OIyBqqY++sw5ClUNnRxtlFWrorPTBpxcPRiTJ0/G5s2bpZCUiMqrBw8eQCwWY8uWLahTpw7u3LkDR0dHfPr0CW5ubgC+fgmzsbGBtbU1Nm/ejNu3b2P06NFQV1fH2LFjpbwFVBxWr16NGTNmoHkXJ7TpPS3H5wE5BSWYdJ+IE6uGwMfHh0UyohKGRTKiH3Tw4EGMHDkS9dv3R4fBC/IskAGAWJwB4Ovlxukpn5H68QPSvnwsUJHsybVAVJCVh27jjj+U83PSW3h7jIQ8UhEaEo7atWv/0HqIKLu8CmQAcuwPoi4cQ+gOV3Tr3h0H9u+DkpKShNOVHunp6Zg+fTrWrl2Lem17ouOwJZBTyLt/NA2aot2gBdiyZQ5at26N0aNHF2NaIirP7OzsYGdnl/XcwMAAUVFR8PT0zCqS7d27F2lpadi+fTvk5eXRqFEj3LhxA+7u7iySlQPLly/H7NmzYdLNGU1tHBB9+TSS375CczvHrOFUEp7chP/G8Wjdpg2mTZsm5cRE9F8skhH9gKNHj2LIkCGo27YnOg1bkmMMsf+y+3UjPr6Lw9MbQXhyLQDhexcibM98aNUxgUFzG+i36AzVarmf3fX0WgB0GneAnGLFQudM+fge3h4jIJOehNDwMBgaGhZ6HUT0c274b0PE4RUYPXo0tmzZUqjLOsu6hIQE9P9lAM6fP4/2g+ahseWwfH9w+KZhx1/w+tktOI0fD2NjY7Rq1aoY0hIR5ZSYmIgqVapkPY+IiEDHjh2zXX5pa2uLlStX4v3793neUTw1NRWpqalZz5OSkiQXuph9/PgRSUlJ0NbWlnYUiVqyZAnmz5+PpjYOuBmwHZGnN2bN02nYDhq1GyP+8XX4rnVA82bG8PP15aW0RCVQ/t/siSiHU6dOYcDAgTAwsYP5yOXfLZB9o1KlBowth6Hnb7sx4o8LsBixDApKlXDpqBv2zrLA4cU98Tnxn2zLfPrwGvFPrkO/uU2hc6Z+SoS3xyiIP79FSPAZGBkZFXodRJS3xITn+c4XBAERR1Yh4vAKzJo1C3/99RcLZP9y6dIlNG9hguu37qG7y04YWw0vUIHsG4MWtkhPS8PRo0clmJKIKG/R0dFYv349xo0blzUtPj4empqa2dp9ex4fH5/nupYvXw41NbWsh45OzkvOS6O4uDi0adMWtWrVwsGDB6UdRyIEQcD8+fMxf/58tO41BfXb9UVG2pes+RUra6GabkPEPboKnzWj0NKkOQL8/VkgIyqhWCQjKgQ/Pz/07dcPusYWsBi9GjIyFX5oPUqVqqB++37oMmkrRq65hM5j1+Bd7CM8vpp9nKRnN4IgEsmgdlPLQq0/7UsyfNY6IC0pDsFngtCoUc6BxYno55z8Yxg+fXid6zxxZgZCd7jiht+f8PDwwPLlywtVACrr/vzzT3To2BFQqo7ec45Cu17rAi8rCAJuBnjBd50jLCyt4OLiIsGkRFQezJo1CyKRKN/HgwcPsi0TGxsLOzs79O/fH46Ojj+dwdXVFYmJiVmPmJiYn16ntD19+hRmZu0RE/8WtZtbY8jQoUUyJmhJIggC5s6diyVLlqBNn99g0s0ZVWrWxXC389A2+jrWWJ1WXREXfRU+ax3Qtk1r+Pn6QEVFRcrJiSgv/EmbqIDOnDmDXr16o1bD9rAe65Hv3SgLQ15RBXVad8XDiyfw6PJpGFsNz5r35FoAtI3aQFFFvcDrS0/5BJ+1Y/Dpn2cIDQlG06ZNiyQnEWX36cNr+K51QPfpe6Gg/L9fgzPSUhC4dTJi7oRjz549GDJkiBRTliypqamYMGEC/vrrLzTsNAjtB86BIAiIj76GhKc38ebZbbx+dhvGVsNhbDksx/LpqZ8RtmsOHl06jd9++w3Lly/n2XlE9NNcXFwwcuTIfNsYGBhk/fvVq1ewsLCAmZkZtm7dmq2dlpYWEhISsk379lxLSyvP9SsoKJSpO/bev38fVtad8SVTFj1m7IdKZS0EbJ6IPn36IiDAHx07/thYuyWNu7s7li1bBtP+M9HMdkzW9Irq1WFk1huvoi5BVUMHvmvHoH17M5w6eRLKyspSTExE38NPlkQFEB4ejm7dukOzXmt0HrcOFWTlv79QIdVp0w1n/nRB0j8voVqtFlI/JeJV1CW0GzCnwOtIT/0C3/VjkRT/CGeCAtGiRYsiz0lEXwniTKQmxcN/gxO6TNkOWXlFpH5KhN9GJ7x7cRenTp6Evb19vuu4cuUKNm7chOnTfyvzZ3y+fPkSvfv0wY0bN2E+chm06pjg6LL+eBv7EII4ExVk5VG5hiESE57hXezDHMsnvn6OAE9nfH77EocOHUL//v2lsBVEVBZpaGhAQ0OjQG1jY2NhYWEBExMTeHl5QeY/w26Ymppizpw5SE9Ph5zc1x9UAwMDYWRklOd4ZGVNZGQkOtvYooJyVfSY5oWK6tUBANZj18Bv/Vh06doNYaEhMDExkXLSn5fw+jVkZCqgco2c4/7Wb9cXKpW14LfRCZ06dsDJEyd44x6iUoCXWxJ9x4ULF2Bv3wUaBs1hO34jKshJ5lc+/aZWkJVXRPRlbwDAs1shEGdmoHZz6wItn5GeCv+N4/E+5i78/Xx5O2miYvD3kcN4++IOgrZOwcd3cTjpNgSfXj9GcPCZ7xbILly4AAtLK+zeswcWllaIiooqptTFLywsDM1bmODhk5foOWM/GrTvj9RPifiQ8BSCOBPa9Vpj4FJ/NOg4ACKRDJrZjMm2/PPbYTj6ex+oymfi0qWLLJARkVTExsbC3Nwcurq6cHNzw5s3bxAfH59trLHBgwdDXl4eDg4OuHv3Lg4ePIi1a9eWm7sYhoeHo5O5BeTVa6Gby56sAhkAyMopwPbXTVBQ1US//r9IMWXR+X3pUnTr3h0BnhPw4s7ZbPNe3DkLvw1OsLQwx6mTJ1kgIyolWCQjyseVK1dga2ePyjoNYeu8GbLyihJ7LTnFiqjd1ArRl08D+HpXS02DZlCpnPep+d9kpqciYJMz3jy5Bm/v02jXrp3EchLR/7Ru3RpHj/6NF3fCsH9OZ8iLP+HC+XMwNTXNd7mwsDB07mwD9ZoNMGhpAMRyqjC3sMTjx4+LKXnxEAQBa9asgZWVFRSr6qP3nGOort8EAKBl2BxDV4bBpPsEvHl+F9d9NuOG/18wbGkPNU29r8uLxbh6agN81znC0rwDIq9eQePGjaW5SURUjgUGBiI6OhpnzpxBrVq1UKNGjazHN2pqaggICMDTp09hYmICFxcXzJ8/H2PHjpVi8uLh6+sLGxtbqNdqhIQnN7Fjas7xJh9H+uF9/BN07NBBCgmLnpycHA4fOghbWxv4bxyPl/fOAwCe3wqF/8bx6NzZCieOH4eiouS+QxBR0WKRjCgP169fh3VnG1TSrAO7iVshpyD5X3/qtO6Gty8f4PWz24i5exb6zTt/d5nMjDQEbpmMuIcXcfLkCZibm0s8JxH9j729Pfbv24d+ffsg4sJ5NGzYMN/2QUFBsLOzR9XazWA/6S+oauig27SdSIMCzC0s8fx5/nfNLC0+f/6MIUOGYurUqWhsNRJdp+6AsmrVbG2UKlVB656Tod+iMx5cOIrkf16imf3XAbBTPyfD39MZV06sxfz583Hq5Emoq6tLYUuIiL4aOXIkBEHI9fFvTZo0wdmzZ5GSkoKXL19i5syZUkpcfA4dOoQePXpAu0E7tO7zvxuq3As/iPSUTwCAW0E7EOI1C45jxmD79m3Silrk5OXl8feRI7C2toTfhnG4cnI9AjydYW9vh2NHj5apseaIygMWyYhycfv2bVhZd4ZSVV3YT/oL8orFcwca3cYdIK+sitCds5GRlgL9Fjb5thdnZuDMn9MQczccx44eRefO3y+qEVHR69+/P/bv3wddXd182/n4+KBrt27QrNsadhO3ZBXfldU00HXaLnxMBTqZW+Dly5fFEVtinjx5gjZtTfH30WOwHusBs19mQaZC3sOgVtdvAnFGOnQatYeGbiO8e/UIx5f3xT+Pr+DUqVNYuHBhjnF/iIioZNi2bRsGDRoE/ZZd0NlpPR7//9AhABC2ay58Nzjh6qn1OH/gd0yfPh2bN29GhQo/dof4kkpBQQHHjh5Fp04dcfXkOvTo3h1/HzkMefmiH8eYiCSLnziJ/uP+/fuwsLSCXCUtdJ28HQrKlYrttSvIKcCghS3exjxAlZpGUNesnWdbsTgTwdun49nNMzhy+DC6dOlSbDmJqPBOnDiBnr16oWbDDrD9dSNk/zO+oUplLXRz2YX3H1Nhbm6BuLg4KSX9ecuWLcO9u3fRY+Z+1G3d7bvta9U3hV4Tc7To+iseX/XFsWX9oKmujMirV9Ct2/eXJyIi6XB3d8eYMWPQoNMgWI1ejQqycoh7dBUAICuvhKo6DSCnoIIrJ9Zh2bJlWLlyJUQikZRTF96nT5+QlJSUbxtFRUWcPHEC586dw4ED+7Nu3EBEpQuLZET/8vDhQ5hbWEKkVAVdpmyHQkW1Ys9Qt83XL4T6LfI+K0wQixG6wxVPrvriwP796NmzZ3HFI6IfcPjwYfTt1w96Ta2/3iE3jxuAVKpaE91dduP1+4+wsLTC69evizlp0Rg2bBhEIuDRxVMFal9Zuw7sJmzGi1uhCNg8CT27d8PlyxdRt25dCSclIqIfNX/+fLi4uKB5Fyd0GLwAov8/49d6rAe6TtmGER4XUV2vMZ7dDMKGDRvg6upaKgtkDx8+RIOGjVCjhjauXLmSb1slJSW0a9eOBTKiUoxFMqL/9+TJE5hbWCKzggq6Tt0JpUpVpJJD26gNBizyQTOb0bnOF8RihO2eh4cRx7Fr1y7069evmBMSUWHs3bsXAwcOhEHLLrB2dEcF2fw/OKtq6KLbtF2Ijf8HllZWSE1NLaakRadTp074448/cDNgW9Yde/OT8vE9fNY44GbAX1i9ejUOHjwAFZXiucydiKi8unPnDrp374GTJ08WellBELB06VLUatgObfu4ZCt+qWvWRs36bRHqNQsPI45i165dcHZ2LsroxSYyMhJm7drjU4YsVGvUhZ19lzJ9N2oiYpGMCMD/39LbwhIpYjl0nbYzx+DSxUlGpgKq1KwLeaWcl3kKgoCz+xfjwbnD8PLywuDBg6WQkIgKysvLC8OGDUO9tr1gOXpVvuNy/ZtKlRpQ0zJAdHQ00tPTJZxSMiZNmoRBgwYjbKcr3r7M+wvFm+d3cXRpb3yMf4CAgAD89ttvpfJMAyKi0uTatWvo2Mkc/oFn0LdfP/j4+BRqeZFIhKVLl+LlvfO4FbQj27z01C/w2zgeL26dwZEjRzBs2LAiTF58goOD0amTOeTUaqLnjP2wn/gnRIqVYd3ZBrGxsdKOR0QSwiIZEb7eyTLmxXPU7zAAFdWrSztOrgRBwIVDy3E3ZC+2bNmCESNGSDsSEeVjy5YtGD16NBp0HIBOI5bBZ60jLhxa8d3l0lM/w3e9I949vw0fb+9Se0aVSCTCX3/9CaN6dRHg6YzUzznHcom6cAwnVg6Aga4Wrl+LhJWVlRSSEhGVLxcvXoS5hSXk1Wti8PJg6DTuhN59+uDMmTOFWo+rqyumT5+O8wd+x4NzRwB8vTOx71oH/PM4Ej7e3ujVq5cEtkDyjhw5Ajt7e1TVb45uU3dAUaUyFFXU0WXyNiR9zkDnzrY57mpKRGUDi2REALp06QIHBwdcPLIK98IPSTtODoIg4NLfbrgV6IUNGzbA0dFR2pGIKB/r1q2Dk5MTjK2Go+PQxXjz/A5i7p7FvbADyEjP+/LJtJSP8F07Bu9f3IG/vx8sLS2LMXXRU1ZWxvHjxyCkJuHMX9MgiMUAgMyMNJzdtxjB22dg8KBBOH/uLPT09KScloio7AsPD4e1dWdU0qyDblN3QFm1KjqPXQOtem3QvXsPnD17tsDrEolEWLlyJcaOHYvQXXNwL/wgvN2HITnhEYKCAmFtbS3BLZGczZs345dffkHt5jZoP3hBth95xJnpkJGVQ1JSEs96JiqjWCQjAiAjI4OtW7fCyckJYbvm4E7wHmlHyubKyXW47rcV7u7upXZMB6LyYvXq1Zg8eTKa2Y5Bu4FzIRKJ8OjyKcgqKCM99RNi7p7LdbnUz0nw8RiNpPiHCAoKRIcOHYo5uWQYGBjgwP59eHE7HFdOrcfnxDc47T4CD84ewKZNm+DltR1KSkrSjklEVOYFBQXB1tYOCpW1Ydx5dNbQHhXkFGA7fiOUq+pgwMBBhVqnSCTCpk2bMOCXAQjbNReZn94gPCwUpqamktgEiRIEAUuWLMH48eOhrmWAf2KisG+2NU66DYMgFuNtzAOcWDkQVVXkcO5cuLTjEpGEFGxwFKJyQEZGBhs3boSCggLWrFmEzIx0NLUZJe1YiDy9CZGnNmD58uWYOnWqtOMQUT6WLl2KefPmwaTbr2jVcwpEIhHE4kw8vuKDBu374cWdcAR4TkCtBu1Qt013GLbqggqyckj5+AE+a0Yh5UMsQoLPwMTERNqbUqTs7OywdOlSzJkzB/fD9qOSsjzCQkNhZmYm7WhEROXC6dOn0bdfP1Q3NMHLexfgv8kZTlujsu5I+ejyKbyLfQgHB4dCr7tChQrYtWsnunSxh5mZGQwNDYs6vsSJxWJMmjQJGzduhFYdE8RHR36dIRJBrXptxEVHwn+jE+rXqwN/P19Ur14yh2chop/HIhnRv4hEIri7u0NBQQErVy6DODMNze3HSS3PDf+/cPm4BxYuXIhZs2ZJLQcR5U8QBMyfPx9Lly5Fq56T0bL7hKx5r6Iu43PiG9Rp3Q2GLbsgZIcrXtwJw4s7YajV0AwiGRl4e4xC5qc3CAsNQdOmTaW4JZIza9YsvIyNxYsXMfhz6xbUqFFD2pGIiMqFv//+GwMHDYJuY3PUatQeL+9dAABscWoIG6d1+Jz4Bmf3LoSTkxM2btz4Q68hJydXagfoT0tLw/DhI3Do0EF0HLYYBi1sEHXhOCIOrwAEAVVq1YPPmtFo184MJ44fg6qqqrQjE5EEsUhG9B8ikQjLly+HvLw8lixZgsyM9GxfeIvLraCdiDi8Eq6urpg/f36xvz4RFYwgCJg5cyZWr16Ntn2no7n92Gzzoy+fBkQihO6cjfevoiErrwT9FjYwaGEDcWYGfNwdIJOehPCwUDRq1EhKWyF5MjIy2PSDX76IiOjH7Nu3D8OHD4eBiR0sRq/G6TWjUUFWHpkZaRDEmbjuswWvn93ClClT4O7uXi7H2Zo0aTL+PnoUncethWFLewBAM1uHr0UyADf9t6F3797Yu3cPFBUVpRmViIoBi2REuRCJRFi8eDEUFBQwd+5cZGakoXWvqcX2weFu6D6cP7AULi4u+P3338vlBxai0kAQBEyZMgXr1q1Du4Fz0MR6ZI428sqVoKCkCg09Y7TpPQ21GraHnIISPr6Ph7f7cMgjFaHhYTAyMir+DSAiojJr+/btGDNmDIxMe6PTyGWQkamAimoaUFavDv1m1khL+YQH5w7D1dW1XH/erFhRGWKxGLIKytmmm3RzRuTpjXB0HINNmzahQoUKUkpIRMVJJJSxe9cmJSVBTU0NiYmJPBWWioSbmxumT5+OprYOMO03U+IfIO6fO4zQHbMxceJErF27ttx+YKGSg/vVnL71yajRo+G1fTs6Dl2ERuaDc20rCALEmRmoICuXNS357St4uw9HRXkBoSHBpXL8FiKiosLjTE4/2yevX7+GpqYmDExsYTNuXdbYY4JYDAHAlRNrcM3bE0uWLMHcuXOLOH3pkpGRgd59+sI/IADdpu2Cpn5TRPy9Cjf9t2Hu3LlYvHgxP48TlQEF3a/yTDKi7/jtt98gLy+PyZMnQ5yRnnW3Okl4GHECYTvnYOzYsSyQEZUCO7y8YD5yGRq0759nG5FIlK1AlvTmBU67D4d6RXmEhYZAT0+vOKISEVE5oqamhnbtOyDyegT+ibkPDb3/v5xfJMLFQytwM3A73Nzc4OLiIt2gJYCsrCwOHTyAzja28FvviBpGbfEk0h/r1q3DxIkTpR2PiIqZjLQDEJUGkyZNgqenJ26f2YXwPQsgiMVF/hrRl70R7DUDI0aMgKenJwtkRKVA+8EL8i2Q/deH+Kc4uXoIqqkq4dzZcBbIiIhIIhQUFODjfRrGjerD22Mk3r6MgiAW4+zehbgZuB0bNmzIUSA7deoUNm3aBLEEPueWdEpKSjh96iQM9XXx4uYZ7Nu3jwUyonKKZ5IRFZCTkxPk5eUxZswYiDPT0Wn4UsjIFM3YBE+uBeDMXy4YPGgw/vrrL8jIsH5NVNKJZCrg0cXjqGfaE/KKKt9t/+7VI3i7j0CtGhoIPhPEuzsSEZFEqaqqwt/PD+YWlvD2GAmtuq3xJNIX27Ztw+jRo7O13bRpE5ydnQEADx8+hIeHR7n7wVZdXR2XL13Cx48foaGhIe04RCQl/CZOVAijR4/Grl278PDCUYRsnwlxZsZPr/PZzWAEbZ2Cvn37YscOLw4KSlRKCOJMfHr9BL7rHJGe+jnftm9jHuC02zDo1dJEWGgIC2RERFQsKleujDNBgdDV1sSz6/7YvXt3jgLZ8uXL4ezsjCbWI9Fh8AKsXbsWixcvllJi6VJSUmKBjKic45lkRIU0dOhQyMnJYciQIRBnZsDSYXW28YYK48WdswjcPBHdu3XD3r17ICvLtyRRaXLs2FH07NkLfhucYD9xK2Tlc94a/s3zu/D2GAmjugYICgxA1apVpZCUiIjKq2rVquHy5YtISkqCtrZ21nRBEDBr1iysWrUKLXtMQsvuEyASiZCW8hELFy6Euro6Jk+eLMXkRETFj2eSEf2AAQMG4PDhw3h2IwBBWycjMyOt0Ot4eT8CAZvGw6azNQ4ePAA5uR8rtBGR9LRq1Qq+vj54++wm/Df9ioz01GzzE57cgLf7cDRuWA8hwWdYICMiIqlQUVHJViATi8UYP348Vq1aBbMBs9Gqx8Ssyyub249DMztHTJkyBT4+PtKKTEQkFSySEf2g3r1749jRo4i5E4YAzwk5vhzn59XDK/DfMA6dOnXE33//DXl5eQkmJSJJ6tChA06fPoWER1cQuHliVtE87tFVeHuMQvNmTRAUGAh1dXXpBiUiIgKQnp6OoUOHYeuff0JeqRIuHFwGQRD+10AQkJmWktWWiKg8YZGM6Cd069YNp06eRNyDCPhvcELG/3+gyE/84+vwW+8IU9O2OHniBBQVc16eRUSli6WlJU6cOI7Y++cRtHUqYu6eg89aB7Rt0wr+fr5QVVWVdkQiIiKkpKSgd58+OHT4MDoMWYS0L8kAgKfXAiAWZ0KcmYGQHbNwJ2QPPD090bNnTyknJiIqXhwAiegn2drawsfHG926dYfvOkfYTdwCOQXlXNu+fnYbvmsd0NKkBbxPn4KSklIxpyUiSbG1tcXfR46gT58+eHItANadO+PE8eNQVs59f0BERFSckpOT0b1HT1yIiIDdhM14+zIqa56/5wRo6DVCparaeH4zGHv27MHgwYOlmJaISDokdiZZ7dq1IRKJsj1WrFiR7zIpKSlwdnZG1apVoaKigr59+yIhIUFSEYmKjKWlJfz9/fA+5g58145BWsrHHG3+eXEP3h4j0cS4IXx9vFGxYkUpJCUiSerevTtOnTqFxYsX49TJkyyQERFRifDu3TtYWlnh0uWr6DrFC7qNO+L5zeBsbdJSPuHl3XAcO3aMBTIiKrdEQrYL0ItO7dq14eDgAEdHx6xplSpVyrcwMH78eHh7e2PHjh1QU1PDhAkTICMjg/Pnzxf4dZOSkqCmpobExERe3kLFLiIiAja2dqikaQj7SdugoFwJAPA29iFOuw2FUV0DBJ8J4thEVKpwv5oT+4SIqOhwn5pTUfZJfHw8rKys8exlHLpM3g4NvUYAgLcxDxD36Cpq1GuFs3vmIynuIU6dOgkLC4ui2AQiohKloPtViV5uWalSJWhpaRWobWJiIrZt24Z9+/bB0tISAODl5YUGDRrg4sWLaNu2ba7LpaamIjX1fwOmJyUl/Xxwoh9kamqK4DNBsO5sA2+PEegyeTu+JL+Ft/sIGOrrIigwgAUyIiIiIio2J06cwL17d2Ezfn1WgQwAqurUh5KaBnzWjEJ6cgKCg8+gdevWUkxKRCR9Eh24f8WKFahatSqaN2+O1atXIyMjI8+2kZGRSE9Ph7W1dda0+vXrQ1dXFxEREXkut3z5cqipqWU9dHR0inQbiAqrVatWCAsNQVriK5z+YxhOuw+Hbk1NBJ8JQpUqVaQdj4iIiIjKkQEDBqBJk2a4sH8xEl8/z5qe/PYVTq0eBFHqe5wND2OBjIgIEiySTZo0CQcOHEBISAjGjRuHZcuWYcaMGXm2j4+Ph7y8fI6zbDQ1NREfH5/ncq6urkhMTMx6xMTEFNUmEP2wZs2aITwsFELKO2hrVEFI8BlUq1ZN2rGIiIiIqJxRV1dHYKA/tDQqw9tjJD6+j8eH+Kc4uWoQVOQFXDh/Do0bN5Z2TCKiEqFQl1vOmjULK1euzLfN/fv3Ub9+fUybNi1rWpMmTSAvL49x48Zh+fLlUFBQ+LG0uVBQUCjS9REVlcaNGyM+7hVEIhFkZCR60iYRERERUZ6qV6+O4DNBMGvXHt7uI5D6OQm1amjgTFAgatasKe14REQlRqGKZC4uLhg5cmS+bQwMDHKd3qZNG2RkZODZs2cwMjLKMV9LSwtpaWn48OFDtrPJEhISCjyuGVFJU6FCBWlHICIqce7cuYOKFStCX19f2lGIiMoNHR0dnAkKhLmFJWob1oafnw80NDSkHYuIqEQpVJFMQ0Pjh3ekN27cgIyMDKpXr57rfBMTE8jJyeHMmTPo27cvACAqKgovXryAqanpD70mERERlSwHDhzA0KFDoaRcEcFngtCqVStpRyIiKjfq1auHV7EvpR2DiKjEksg1YBEREVizZg1u3ryJJ0+eYO/evZg6dSqGDh2KypUrAwBiY2NRv359XL58GQCgpqYGBwcHTJs2DSEhIYiMjMSoUaNgamqa550tiYiIqPTYu3cvhgwZAsNW3VBJ0xDWnW1w7do1acciIiIiIgJQyDPJCkpBQQEHDhzAwoULkZqaCn19fUydOjXbOGXp6emIiorC58+fs6Z5eHhARkYGffv2RWpqKmxtbbFp0yZJRCQiIqJitHPnTowaNQpG7fqi0/ClSE/5DJ81o2BpZY3QkGA0a9ZM2hGJiIiIqJyTyJlkLVq0wMWLF/Hhwwd8+fIF9+7dg6ura7YB9mvXrg1BEGBubp41TVFRERs3bsS7d+/w6dMnHD16lOORERERlXLbt2/HqFGjUL/DLzAf/jtkZCpAQbkSukzZDkX1mrC0ssbt27elHZOIqFQQBAGurq6ooV0Lvr6+0o5TbIKCgtDJ3AJ+fn7SjkJEZRhvuUdEREQSs3XrVjg4OKBhp0HoNHQxRP+626+Csiq6TPGCXCUtWFha4e7du1JMSkRU8gmCgJkzZ2LFihVIr6CCXr17l4ui0aFDh2DfpQsuXb2OPn364uLFi9KORERlFItkREREJBGenp4YN24cGlsOQ4chC7MVyL5RVFFH16lekFGqCnMLS9y/f18KSYmISj5BEDBjxgysXr0a7QbOQd+5R1GzQXv07NUL/v7+0o4nMZ6enhg4cCAMTLpg6IoQVNZpCPsuXXHv3j1pRyOiMohFMiIiIipy69evx6+//gpj6xFoP2geRCJRnm0VVSqj67QdgII6zC0sERUVVXxBiYhKAUEQMH36dLi5uaHdwLloYj0SFWTl0dlpHWo2aIcePXuWuUKZIAhYuHDh12OJ1QhYjl4FeaVKsJuwBbIqGuhsY4sXL15IOyYRlTEskhEREVGR8vDwwKRJk9DUxgHtBszJt0D2jVKlqug6bRcyZSvB3MISKSkpxZCUiKjkEwQBv/32G/744w+Y/eIKY6vhWfMqyMqj87h1qFRND6NGj5FiyqKVmZkJZ2dnLFq0CG36uMBswOyss5EVlFXRZfJ2fEoDevTsJd2gRFTmsEhGRERlwrNnz+Dg4AB9fX0oKSnB0NAQCxYsQFpaWrZ2t27dQocOHaCoqAgdHR2sWrVKSonLJjc3N0ybNg3N7cbCtP/MAhXIsggCRCIRMjMzJReQiKgUEQQBLi4ucHd3R416rXHh0HIkPLmRrc2d0H14G/sQw4YOlk7IIpaamopBgwZj85Yt6DT8dxhbDUfy29hsbT7EP0HKxw9QU1OTUkoiKqtYJCMiojLhwYMHEIvF2LJlC+7evQsPDw9s3rwZs2fPzmqTlJQEGxsb6OnpITIyEqtXr8bChQuxdetWKSYvO1asWIHp06ejRdfxaNP3t0IVyD59eI1TfwxFhYxkhIeFQlFRUYJJiYhKB1dXV3h4eKDD4AVIT/kIALh6agPio69BEATcDPDChYPLMGPGDKxYsULKaX9ecnIyunTtiqPHjsOghR0eX/XB9sktcXhRd3x8FwcAeBLpD5+1Dmjfri28T5+ScmIiKmtkpR2AiIioKNjZ2cHOzi7ruYGBAaKiouDp6Qk3NzcAwN69e5GWlobt27dDXl4ejRo1wo0bN+Du7o6xY8dKK3qZsHTpUsybNw8tu09Eyx4TswpkSW9e4HGkP2LunIXZL7NQTbdhjmU/vo+Ht/twKIjSEBYehrp16xZ3fCKiEsnT0xMdhiyEbuMOOLtvEQAg5k44Yu6Ew7TfTEQcWYmZM2di+fLlhTtztwR68+YN7Lt0wZ27D9CyxyRcOvr12C0SyUBGSR5KqlVxL+wAwvcuQP9+/bF79y7Iy8tLOTURlTUskhERUZmVmJiIKlWqZD2PiIhAx44ds32otrW1xcqVK/H+/XtUrlw51/WkpqYiNTU163lSUpLkQpcy3wZWXrx4MVr1nIyW3Sfg/atoPLnmjyeR/vgn5j5EMhUgiDMRc/dsjiLZx3dxOO0+HMqymQgLDYOhoaGUtoSIqOTpMGQhGlsMwcOLJ7KmVTdoigqyCog4shKzZs3CsmXLSn2B7Pnz5+jc2RavXr9F9+l7oF69NuQUlXHxbzdkpH6GkWkv3PD7C5ePe8DZ2Rnr1q2DTC53TCYi+lkskhERUZkUHR2N9evXZ51FBgDx8fHQ19fP1k5TUzNrXl5FsuXLl2PRokWSC1tKCYKAefPm4ffff0ebPi5o0cUJz2+FwGfd17PytI3awMZpHVI/JyNs1xwYtuySbfnkt69w+o9hqKQoQlhoWI7/GyKi8uxbgQwA6rbuDhkZWWjVaYHoK96IOLwSrq6u+P3330t9gSwhIQGmZu3wJUMGPWccgJqmHgDA2HIY7p89hLcxD/A58R/cDNiORYsWYd68/O+YTET0M1h+JyKiEm3WrFkQiUT5Ph48eJBtmdjYWNjZ2aF///5wdHT86Qyurq5ITEzMesTExPz0Oks7QRCyvqC17TcDLbo4AQBUq+tBr4k5ZCrIIe7hFbyPe4znt0JQ3aApVDV0spZP+uclTrkNgZpyBYSHhbJARkT0H98KZAAgkpFBndZd8ejyaUQcXonZs2eXiQIZAHz48AEJ8XGoYWQK1eq62eZ1nbwNuo07IvryKXh6emL+/PllYpuJqOTimWRERFSiubi4YOTIkfm2MTAwyPr3q1evYGFhATMzsxwD8mtpaSEhISHbtG/PtbS08ly/goICFBQUCpm87BIEAdOnT8cff/wBs19mo6nNqKx5lbUM0GXSn0j5+AHhe+bjbuh+pHx6j7Z9pme1SXrzAqf/GIYqlZQQGhoMXV3d3F6GiKhce34rFHpNzLOeX/fdiot/r8acOXOwZMmSMlMsMjIygqenJ8aNGweVKjXQsvsEAEB66meE7piFuIeXcOjQIfTr10/KSYmoPGCRjIiISjQNDQ1oaGgUqG1sbCwsLCxgYmICLy+vHOOVmJqaYs6cOUhPT4ecnBwAIDAwEEZGRnleaknZCYKAqVOnYu3atWg/aB6MrYbn2k5RRR2NzAfj8VVfAIBhy683VUhMeI7T7sOgoV4RoaEhqFWrVrFlJyIqTXw3jIf9xM3QM+6E675bcPFvN8ydOxeLFy8uMwWyb8aOHYs3b95g7ty5UKpUBYYt7eG7fiyS46Ph6+MDKysraUckonKCRTIiIioTYmNjYW5uDj09Pbi5ueHNmzdZ876dJTZ48GAsWrQIDg4OmDlzJu7cuYO1a9fCw8NDWrFLFUEQMHHiRGzcuDHbWDl5qVG3JRRVKqNyDUOoVKmBD/FPcfqPYdCqpo7Q0GBoa2sXU3IiotJHEDIRsMkZdVp3w4Pzf5fZAtk3s2fPRkJCAjZsWIgbflshhzSEhYXCxMRE2tGIqBxhkYyIiMqEwMBAREdHIzo6OsfZSYIgAADU1NQQEBAAZ2dnmJiYoFq1apg/fz7Gjh0rjcilzr1797Bx40YYW4/4boEMAGQqyGLYqnBAJML7uMc4/ccw1NSqhtCQ4HwvbyUiIgCCACsrC/j7/Y158+Zh0aJFZbZABgAikQhr1qxBSkoKIq9dx/59e1GvXj1pxyKickYkfPvmUEYkJSVBTU0NiYmJUFVVlXYcIqJSj/vVnMprn6SmpqJb9+4IDz8H+8l/Qbte6wIt9y72Ebw9RkC3ZnWEBAejevXqEk5KRKVJed2n5udbn/zzzz/4/PkzatWqVaYLZEREklbQYw3vbklEREQFoqCggJMnTqBdOzP4rnNE3KPI7y7z9mUUTv8xDLV1tBAaEsICGRFRIcjJyUFHR6fMFMhSU1Px+PFjlLHzNIioDGGRjIiIiApMSUkJp0+dRNs2reG7zgHxj6/n2fafmPs4/ccwGOrrIDQkpMA3YCAiorInNTUVPXr2RJ06dTB79mwWyoioRGKRjIiIiApFWVkZPt6n0dKkBXzXOiDhyc0cbd48v4vTfwyHUV0DhASfQdWqVaWQlIiISoLU1FT06dMHISGhaNhpIFasWIEZM2awUEZEJQ6LZERERFRoFStWhK+PN5o3M4bP2tF48+xO1rzXz27jtPtwNGpQF8FnglClShUpJiUiImlKS0tD/18GICDwDGydPdFp2BK0GzgXbm5umDZtGgtlRFSi8O6WRERE9EMqVaoEP19fdLaxgfeakeg6bSfEGenwWeuAJo0bIMDfH2pqatKOSUREUpKeno6BAwfB19cXNr9ugk6jDgCAJtYjICNTAWvWLEJmZibWrVsn5aRERF/xTDIiIiL6Yaqqqgjw90fD+nXh7T4S3h6j0LypMQIDAlggIyIqxzIyMjB48BCcPHUKnZ3WQ8+4U7b5Dc0HQdOgGbZv95JSQiKinFgkIyIiop+ipqaGoMBAmLZpCWtLcwT4++V7a20iIirbMjIyMHToMBw9dgzWY9dCqVIV3A3dB0EsBgAIYjHCdszGm2e3sG3bX1JOS0T0P7zckoiIiH6auro6zgQFSjsGERFJWWZmJkaOHIVDhw5Bo3ZjnNu3CJ8+JAAAVKrWhG6jDgjbPQ8PLx7H7t27MWDAACknJiL6H55JRkREVM5lZGRg//79ePTokbSjEBFRKSYWizF6tAP279+PjsMW4/XTW1kFMhlZOWgZtkD43gV4cO4wvLy8MHjwYCknJiLKjkUyIiKiciwlJQV9+/XH4MGD0aFjJzx79kzakYiISrzU1FQ0a9YMIpEIN27cyDbv1q1b6NChAxQVFaGjo4NVq1ZJJ2QxE4vFGDNmDHbv2Q1Lh9Vo0OEX/LLgFHQadwQA1GrQDpePueNe2AFs27YNw4cPl3JiIqKcWCQjIiIqpxITE2FjawdfPz9YjFqBVLEcrK074/Xr19KORkRUos2YMQPa2to5piclJcHGxgZ6enqIjIzE6tWrsXDhQmzdulUKKYvXpEmTsGPHDliOWoW6bbpDJBKhqk59NLMdAwBIT/2EOyF7sHXrVowaNUrKaYmIcsciGRERUTkUHx+PDh064UrkdXSdsgP12/VFlynbkfD2A2zt7JGUlCTtiEREJZKvry8CAgLg5uaWY97evXuRlpaG7du3o1GjRhg4cCAmTZoEd3d3KSQtXsEhoVCtVgu6xh2zTa9Zvy2adh6NuIdX4OnpCUdHRyklJCL6PhbJiIiIypnHjx/D1LQdnr2MR48Z+1GjrgkAQK26HuwnbcP9qEfo2as3UlNTpZyUiKhkSUhIgKOjI3bv3g1lZeUc8yMiItCxY0fIy8tnTbO1tUVUVBTev3+f53pTU1ORlJSU7VHaHDywH7JCCk67j8CX5HcAAEEQcPHv1bgZuB3r16+Hk5OTlFMSEeWPRTIiIqJy5ObNmzA1a4fEFDF6zDyAqjXrZZtfTbch7CZsxrlz5zFkyFBkZmZKKSkRUckiCAJGjhwJJycntGzZMtc28fHx0NTUzDbt2/P4+Pg81718+XKoqallPXR0dIoueDExNjZGWGgIhC9vcdp9OD4nvcWlY+644fcnPDw8MGHCBGlHJCL6LhbJiIiIyonw8HC079ARMsoa6DHjAFSr1cq1nXa91rAeuwZHjx3F5MmTizklEVHxmjVrFkQiUb6PBw8eYP369UhOToarq2uRZ3B1dUViYmLWIyYmpshfozg0atQI4WGhkElLxKEFXXDdZzPc3NwwZcoUaUcjIioQWWkHICIiIsk7ceIEfhkwAJqGJrD5dSPkFVXyba+h1wiVKmsh/Oy5YkpIRCQdLi4uGDlyZL5tDAwMEBwcjIiICCgoKGSb17JlSwwZMgQ7d+6ElpYWEhISss3/9lxLSyvP9SsoKORYb2nVoEEDhIeFYpzTePTo3g3Tpk2TdiQiogKTSJEsNDQUFhYWuc67fPkyWrVqles8c3NzhIWFZZs2btw4bN68ucgzEhERlRfbt2+Ho6Mj9JvbwGqMGyrIKSAzIx0f4p+gai2jHO3fxz+B75pRUFWWxeFDB6WQmIio+GhoaEBDQ+O77datW4elS5dmPX/16hVsbW1x8OBBtGnTBgBgamqKOXPmID09HXJycgCAwMBAGBkZoXLlypLZgBLIyMgIoSHB0o5BRFRoEimSmZmZIS4uLtu0efPm4cyZM3lev/+No6MjFi9enPU8twExiYiI6PsEQcDq1asxc+ZMNOw0CC26OCEq4gRe3AlD7P0LSPvyEbbjN8DAxDZrmddPb8F3vSP0atZAYKA/atasKcUtICIqOXR1dbM9V1H5ekauoaEhatX6evn64MGDsWjRIjg4OGDmzJm4c+cO1q5dCw8Pj2LPS0REhSeRIpm8vHy204nT09Nx4sQJTJw4ESKRKN9llZWV8z0VmYiIiL5PLBZj+vTpcHd3R902PRAffQ17ZnaCSCSD6gZNoWnQDDF3z0FV43+DQ8fcPYcAT2c0b9YEPt7eqFKlihS3gIio9FFTU0NAQACcnZ1hYmKCatWqYf78+Rg7dqy0oxERUQEUy5hkJ0+exNu3bzFq1Kjvtt27dy/27NkDLS0tdO/eHfPmzcv3bLLU1NRst6gvjbdLJiIiKkrp6elwcBiDPXt2o/3g+QCAR5dOAgDqtO6Gtv2m49y+xaim2xDVdBsCAKKv+CB422/o3Nkafx85wjO5iYi+o3bt2hAEIcf0Jk2a4OzZs1JIREREP6tYimTbtm2Dra1t1mnIeRk8eDD09PSgra2NW7duYebMmYiKisLRo0fzXGb58uVYtGhRUUcmIiIqlT5//oz+/fvDPyAQVo7uqNu6GwCgVgMzPDj/N+4E74acghKe3wpBuwFzAAB3Qvbi3L5FGDx4CLy8tmeNo0NEREREVJ4Uqkg2a9YsrFy5Mt829+/fR/369bOev3z5Ev7+/jh06NB31//v05CNjY1Ro0YNWFlZ4fHjxzA0NMx1GVdX12x3TElKSoKOjk6ubYmIiMqy9+/fo0vXrrh2/SbsJ26BTqMOWfMq1zCEab8Z+PjuFe6FH0QFWXkYtu6KKyfW4eqp9Zg8eTLc3d0hIyMjxS0gIiIiIpKeQhXJCnp75H/z8vJC1apV0aNHj0KH+3aXmOjo6DyLZGXpdslEREQ/6vXr17CwsMSzmFfoNm0XNA2a5tpOt3EnRF/2Ru3m1rhyYi3uhuzF8uXLMXPmzO+OG0pEREREVJYVqkhW0NsjfyMIAry8vDB8+PAfunTjxo0bAIAaNWoUelkiIqLy5OrVq7h37y7aDZyTZ4EMAHSNO6FJ51F4FxuFp5F++PPPPzFmzJhiTEpEREREVDJJ9JqK4OBgPH36NNcP37Gxsahfvz4uX74MAHj8+DGWLFmCyMhIPHv2DCdPnsTw4cPRsWNHNGnSRJIxiYiISj1bW1v06dMXl4/+gVcPL+fZroKcPN7HPsTrx5H4+++/WSAjIiIiIvp/Ei2Sbdu2DWZmZtnGKPsmPT0dUVFR+Pz5MwBAXl4eQUFBsLGxQf369eHi4oK+ffvi1KlTkoxIRERUJlSoUAH79u1Fu3Zm8N84Hv/E3M/R5kvyW5z+Yzjex9xGgL8/evXqVfxBiYiIiIhKKIne3XLfvn15zvvvLZN1dHQQFhYmyThERERlmoKCAk4cP4ZO5hbwXeuAnjMPQFVDFwCQ/DYWPmtGoULmZ5wND0OzZs2kG5aIiIiIqIThLayIiIjKEFVVVfj7+aJ6VTV4rxmNz4n/4F3sI5xYMQCqCiJEXDjPAhkRERERUS4keiYZERERFb/q1asjKDAApmbtcNp9OD4nvkYdfT34+/vxZjhERERERHngmWRERERlkL6+PgID/CGTnoSWLZohPDyMBTIiIiIionzwTDIiIqIyytjYGK8T4qUdg4iIiIioVOCZZEREREREREREVO6xSEZEREREREREROUei2RERERERERERFTusUhGRERERERERETlHotkRERERERERERU7rFIRkRERERERERE5R6LZEREREREREREVO6xSEZEREREREREROUei2RERERERERERFTusUhGRERERERERETlHotkRERERERERERU7slKO0BREwQBAJCUlCTlJEREZcO3/em3/SvxWENEVJR4nMmJxxkioqJV0GNNmSuSJScnAwB0dHSknISIqGxJTk6GmpqatGOUCDzWEBEVPR5n/ofHGSIiyfjesUYklLGfbMRiMV69eoVKlSpBJBIV6bqTkpKgo6ODmJgYqKqqFum6ixqzSkZpyVpacgLMKglFnVMQBCQnJ0NbWxsyMrxKH5DssaY4lZa/6Z/BbSwbuI1lQ17byONMTtI6zpSHv8PCYH/kxD7Jjv2RU0ntk4Iea8rcmWQyMjKoVauWRF9DVVW1RP1n54dZJaO0ZC0tOQFmlYSizMlf9rMrjmNNcSotf9M/g9tYNnAby4bctpHHmeykfZwpD3+HhcH+yIl9kh37I6eS2CcFOdbwpxoiIiIiIiIiIir3WCQjIiIiIiIiIqJyj0WyQlBQUMCCBQugoKAg7SjfxaySUVqylpacALNKQmnJSdJXHv5WuI1lA7exbCgP21ja8f8oO/ZHTuyT7NgfOZX2PilzA/cTEREREREREREVFs8kIyIiIiIiIiKico9FMiIiIiIiIiIiKvdYJCMiIiIiIiIionKPRTIiIiIiIiIiIir3WCQjIiIiIiIiIqJyj0WyfISGhkIkEuX6uHLlSp7LmZub52jv5OQk8by1a9fO8borVqzId5mUlBQ4OzujatWqUFFRQd++fZGQkCCxjM+ePYODgwP09fWhpKQEQ0NDLFiwAGlpafkuV1x9unHjRtSuXRuKiopo06YNLl++nG/7w4cPo379+lBUVISxsTF8fHyKPNN/LV++HK1atUKlSpVQvXp19OrVC1FRUfkus2PHjhz9p6ioKPGsCxcuzPG69evXz3cZafQpkPv7RyQSwdnZOdf2xdmn4eHh6N69O7S1tSESiXD8+PFs8wVBwPz581GjRg0oKSnB2toajx49+u56C/v3TmVHQffFt27dQocOHaCoqAgdHR2sWrVKSol/zO+//w4zMzMoKytDXV091zYvXrxA165doaysjOrVq2P69OnIyMgo3qA/oay9jyW1vyspCnIML+7PZkXN09MTTZo0gaqqKlRVVWFqagpfX9+s+aV9+8qD1NRUNGvWDCKRCDdu3Mg2r7QfFwqqvBwnC6usHXMKqjzsu3/WihUrIBKJMGXKlKxppbVPWCTLh5mZGeLi4rI9xowZA319fbRs2TLfZR0dHbMtV1w7zMWLF2d73YkTJ+bbfurUqTh16hQOHz6MsLAwvHr1Cn369JFYvgcPHkAsFmPLli24e/cuPDw8sHnzZsyePfu7y0q6Tw8ePIhp06ZhwYIFuHbtGpo2bQpbW1u8fv061/YXLlzAoEGD4ODggOvXr6NXr17o1asX7ty5U6S5/issLAzOzs64ePEiAgMDkZ6eDhsbG3z69Cnf5VRVVbP13/PnzyWa85tGjRple91z587l2VZafQoAV65cyZYzMDAQANC/f/88lymuPv306ROaNm2KjRs35jp/1apVWLduHTZv3oxLly6hYsWKsLW1RUpKSp7rLOzfO5UtBdkXJyUlwcbGBnp6eoiMjMTq1auxcOFCbN26VYrJCyctLQ39+/fH+PHjc52fmZmJrl27Ii0tDRcuXMDOnTuxY8cOzJ8/v5iT/piy+D6WxP6uJCnIMby4P5sVtVq1amHFihWIjIzE1atXYWlpiZ49e+Lu3bsASv/2lQczZsyAtrZ2jull4bhQUOXlOFkYZfGYU1DlYd/9M65cuYItW7agSZMm2aaX2j4RqMDS0tIEDQ0NYfHixfm269SpkzB58uTiCfUvenp6goeHR4Hbf/jwQZCTkxMOHz6cNe3+/fsCACEiIkICCXO3atUqQV9fP982xdGnrVu3FpydnbOeZ2ZmCtra2sLy5ctzbf/LL78IXbt2zTatTZs2wrhx4ySa879ev34tABDCwsLybOPl5SWoqakVX6j/t2DBAqFp06YFbl9S+lQQBGHy5MmCoaGhIBaLc50vrT4FIBw7dizruVgsFrS0tITVq1dnTfvw4YOgoKAg7N+/P8/1FPbvncq+/+6LN23aJFSuXFlITU3NmjZz5kzByMhIGvF+Sl7vVx8fH0FGRkaIj4/Pmubp6Smoqqpm2+6Sqqy/j4tqf1eS/fcYXlI+mxW1ypUrC3/99VeZ3b6yxMfHR6hfv75w9+5dAYBw/fr1rHll6bjwI8rycbIgyvoxpzDKy767IJKTk4W6desKgYGB2b6zl+Y+4ZlkhXDy5Em8ffsWo0aN+m7bvXv3olq1amjcuDFcXV3x+fPnYkj49TTHqlWronnz5li9enW+l4xERkYiPT0d1tbWWdPq168PXV1dREREFEdcAEBiYiKqVKny3XaS7NO0tDRERkZm6wsZGRlYW1vn2RcRERHZ2gOAra1tsfYd8LX/AHy3Dz9+/Ag9PT3o6Ohk+0VX0h49egRtbW0YGBhgyJAhePHiRZ5tS0qfpqWlYc+ePRg9ejREIlGe7aTVp//29OlTxMfHZ+s3NTU1tGnTJs9++5G/dyr7/rsvjoiIQMeOHSEvL581zdbWFlFRUXj//r00Iha5iIgIGBsbQ1NTM2uara0tkpKSpPJ+Lozy+D7+kf1dSfffY3hJ+WxWVDIzM3HgwAF8+vQJpqamZW77ypqEhAQ4Ojpi9+7dUFZWzjG/PBwX8lMej5PflMdjTn7K+r67MJydndG1a9cc3+FKc5/ISjtAabJt2zbY2tqiVq1a+bYbPHgw9PT0oK2tjVu3bmHmzJmIiorC0aNHJZpv0qRJaNGiBapUqYILFy7A1dUVcXFxcHd3z7V9fHw85OXlc4zToqmpifj4eIlm/SY6Ohrr16+Hm5tbvu0k3af//PMPMjMzs31RAr72xYMHD3JdJj4+Ptf2xdV3ACAWizFlyhS0a9cOjRs3zrOdkZERtm/fjiZNmiAxMRFubm4wMzPD3bt3v/v3/DPatGmDHTt2wMjICHFxcVi0aBE6dOiAO3fuoFKlSjnal4Q+BYDjx4/jw4cPGDlyZJ5tpNWn//WtbwrTbz/y905lW2774vj4eOjr62dr9+1vJj4+HpUrVy7WjJKQ1z7n27ySrDy+j39kf1eS5XYMLwmfzYrC7du3YWpqipSUFKioqODYsWNo2LAhbty4USa2rywSBAEjR46Ek5MTWrZsiWfPnuVoUx6OC3kpr8fJb8rjMScvZXnfXVgHDhzAtWvXch2vvTT3Sbk8k2zWrFl5Dsj/7fHfN/vLly/h7+8PBweH765/7NixsLW1hbGxMYYMGYJdu3bh2LFjePz4sUSzTps2Debm5mjSpAmcnJzwxx9/YP369UhNTS3060oy5zexsbGws7ND//794ejomO/6i7JPyxJnZ2fcuXMHBw4cyLedqakphg8fjmbNmqFTp044evQoNDQ0sGXLFonms7e3R//+/dGkSRPY2trCx8cHHz58wKFDhyT6uj9r27ZtsLe3z3U8jm+k1adE+ZH0vrgk+JFtJCqJCnoML42MjIxw48YNXLp0CePHj8eIESNw7949accqlwq6z1y/fj2Sk5Ph6uoq7cgSVR6OkyRZZXnfXRgxMTGYPHky9u7dWyw3hCtO5fJMMhcXl3zPEAEAAwODbM+9vLxQtWpV9OjRo9Cv16ZNGwBff4EwNDQs1LI/kvXfr5uRkYFnz57ByMgox3wtLS2kpaXhw4cP2Sq8CQkJ0NLSkmjOV69ewcLCAmZmZj80uOXP9GluqlWrhgoVKuS420Z+faGlpVWo9kVtwoQJOH36NMLDwwt95pKcnByaN2+O6OhoCaXLnbq6OurVq5fn60q7TwHg+fPnCAoKKvRZitLq0299k5CQgBo1amRNT0hIQLNmzXJd5kf+3ql0KMp9cV7vx2/zpOVnjov/paWllePOXCVhGwuiPL6Pf2R/V1LldQwvys9m0iQvL486deoAAExMTHDlyhWsXbsWAwYMKBPbV5oUdJ8ZHByMiIgIKCgoZJvXsmVLDBkyBDt37iyxx4XCKA/HSUkoj8ec3JT1fXdhREZG4vXr12jRokXWtMzMTISHh2PDhg3w9/cvvX0i7UHRSgOxWCzo6+sLLi4uP7T8uXPnBADCzZs3izhZ/vbs2SPIyMgI7969y3X+t8H0jhw5kjXtwYMHEh9M7+XLl0LdunWFgQMHChkZGT+0Dkn0aevWrYUJEyZkPc/MzBRq1qyZ78D93bp1yzbN1NRU4oPMi8ViwdnZWdDW1hYePnz4Q+vIyMgQjIyMhKlTpxZxuvwlJycLlStXFtauXZvrfGn16b8tWLBA0NLSEtLT0wu1XHH1KfIYyNrNzS1rWmJiYoEG7i/M3zuVPd/bF38bkDgtLS1rmqura6kckPh7A/cnJCRkTduyZYugqqoqpKSkFGPCH1PW38dFtb8rSb53DJfWZzNJs7CwEEaMGFFmt68seP78uXD79u2sh7+/vwBAOHLkiBATEyMIQtk6LhREeTpOFkRZP+bkp7zuu/OTlJSUbZ9x+/ZtoWXLlsLQoUOF27dvl+o+YZGsAIKCggQAwv3793PMe/nypWBkZCRcunRJEARBiI6OFhYvXixcvXpVePr0qXDixAnBwMBA6Nixo0QzXrhwQfDw8BBu3LghPH78WNizZ4+goaEhDB8+PM+sgiAITk5Ogq6urhAcHCxcvXpVMDU1FUxNTSWW8+XLl0KdOnUEKysr4eXLl0JcXFzWI6+cxdWnBw4cEBQUFIQdO3YI9+7dE8aOHSuoq6tn3fVs2LBhwqxZs7Lanz9/XpCVlRXc3NyE+/fvCwsWLBDk5OSE27dvF2mu/xo/frygpqYmhIaGZuu/z58/Z7X5b9ZFixYJ/v7+wuPHj4XIyEhh4MCBgqKionD37l2JZnVxcRFCQ0OFp0+fCufPnxesra2FatWqCa9fv841p7T69JvMzExBV1dXmDlzZo550uzT5ORk4fr168L169cFAIK7u7tw/fp14fnz54IgCMKKFSsEdXV14cSJE8KtW7eEnj17Cvr6+sKXL1+y1mFpaSmsX78+6/n3/t6pbCvIvvjDhw+CpqamMGzYMOHOnTvCgQMHBGVlZWHLli1STF44z58/F65fvy4sWrRIUFFRyXofJScnC4LwtbjduHFjwcbGRrhx44bg5+cnaGhoCK6urlJOXjBl8X1cFPu7kqwgx/Di/mxW1GbNmiWEhYUJT58+FW7duiXMmjVLEIlEQkBAgCAIpX/7younT5/muLtlWTguFFR5OU4WRlk85hRUedh3F4V/391SEEpvn7BIVgCDBg0SzMzMcp337QASEhIiCIIgvHjxQujYsaNQpUoVQUFBQahTp44wffp0ITExUaIZIyMjhTZt2ghqamqCoqKi0KBBA2HZsmXZfgn/b1ZBEIQvX74Iv/76q1C5cmVBWVlZ6N27d7adf1Hz8vISAOT6yCtncfbp+vXrBV1dXUFeXl5o3bq1cPHixax5nTp1EkaMGJGt/aFDh4R69eoJ8vLyQqNGjQRvb+8iz/RfefWfl5dXnlmnTJmStV2amppCly5dhGvXrkk864ABA4QaNWoI8vLyQs2aNYUBAwYI0dHReeYUBOn06TfffjWNiorKMU+afRoSEpLr//m3PGKxWJg3b56gqakpKCgoCFZWVjm2QU9PT1iwYEG2afn9vVPZVpB9sSAIws2bN4X27dsLCgoKQs2aNYUVK1ZIKfGPGTFiRK7b+O/j4LNnzwR7e3tBSUlJqFatmuDi4lLoM0mlqay9j4tif1eSFeQYXtyfzYra6NGjBT09PUFeXl7Q0NAQrKyssgpkglD6t6+8yK1IJgil/7hQUOXlOFlYZe2YU1DlYd9dFP5bJCutfSISBEH4mcs1iYiIiIiIiIiISrtyeXdLIiIiIiIiIiKif2ORjIiIiIiIiIiIyj0WyYiIiIiIiIiIqNxjkYyIiIiIiIiIiMo9FsmIiIiIiIiIiKjcY5GMiIiIiIiIiIjKPRbJiIiIiIiIiIio3GORjIiIiIiIiIiIyj0WyYiIiIiIiIiIqNxjkYyIiIiIiIiIiMo9FsmIiIiIiIiIiKjc+z8Bznu/uDAwKQAAAABJRU5ErkJggg==", - "text/plain": [ - "
      " - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "from typing import List, Optional, Tuple\n", "\n", diff --git a/scripts/conversion/av2_sensor_conversion.sh b/scripts/conversion/av2_sensor_conversion.sh index 9e493387..b386972e 100644 --- a/scripts/conversion/av2_sensor_conversion.sh +++ b/scripts/conversion/av2_sensor_conversion.sh @@ -1,2 +1,2 @@ py123d-conversion datasets=["av2_sensor_dataset"] \ -dataset_paths.av2_data_root="/media/nvme1/argoverse" \ No newline at end of file +dataset_paths.av2_data_root="/media/nvme1/argoverse" diff --git a/scripts/conversion/nuplan_mini_conversion.sh b/scripts/conversion/nuplan_mini_conversion.sh index 76fd599b..13ec7a53 100644 --- a/scripts/conversion/nuplan_mini_conversion.sh +++ b/scripts/conversion/nuplan_mini_conversion.sh @@ -1 +1 @@ -py123d-conversion datasets=["nuplan_mini_dataset"] \ No newline at end of file +py123d-conversion datasets=["nuplan_mini_dataset"] diff --git a/scripts/conversion/pandaset_conversion.sh b/scripts/conversion/pandaset_conversion.sh index e895fcbb..0131e60f 100644 --- a/scripts/conversion/pandaset_conversion.sh +++ b/scripts/conversion/pandaset_conversion.sh @@ -1 +1 @@ -py123d-conversion datasets=[pandaset_dataset] \ No newline at end of file +py123d-conversion datasets=[pandaset_dataset] diff --git a/src/py123d/conversion/dataset_converter_config.py b/src/py123d/conversion/dataset_converter_config.py index 8cd9396a..b1c3045f 100644 --- a/src/py123d/conversion/dataset_converter_config.py +++ b/src/py123d/conversion/dataset_converter_config.py @@ -1,7 +1,7 @@ from __future__ import annotations -from typing import Literal from dataclasses import dataclass +from typing import Literal @dataclass diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index e502e8cd..1fdb45b8 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -175,7 +175,7 @@ def write( elif isinstance(camera_data, bytes): camera_data = camera_data elif isinstance(camera_data, np.ndarray): - _, encoded_img = cv2.imencode('.jpg', camera_data) + _, encoded_img = cv2.imencode(".jpg", camera_data) camera_data = encoded_img.tobytes() record_batch_data[f"{camera_name}_data"] = [camera_data] diff --git a/src/py123d/conversion/log_writer/utils/lidar_compression.py b/src/py123d/conversion/log_writer/utils/lidar_compression.py index fffc0e76..2449d83f 100644 --- a/src/py123d/conversion/log_writer/utils/lidar_compression.py +++ b/src/py123d/conversion/log_writer/utils/lidar_compression.py @@ -1,8 +1,8 @@ import io +import laspy import numpy as np import numpy.typing as npt -import laspy from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata diff --git a/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py b/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py index eb90efc0..5d3143d0 100644 --- a/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py +++ b/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py @@ -15,7 +15,6 @@ from py123d.conversion.utils.map_utils.opendrive.utils.objects_helper import OpenDriveObjectHelper from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import get_road_edges_3d_from_drivable_surfaces - from py123d.datatypes.maps.cache.cache_map_objects import ( CacheCarpark, CacheCrosswalk, diff --git a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py index 3eb87d0c..8e1abc33 100644 --- a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py +++ b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py @@ -4,7 +4,6 @@ import shapely from shapely import LinearRing, LineString, Polygon, union_all - ROAD_EDGE_BUFFER: Final[float] = 0.05 diff --git a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py index 990bd0b3..986433e9 100644 --- a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py +++ b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py @@ -2,11 +2,11 @@ from collections import defaultdict from typing import Dict, List, Set +import networkx as nx import numpy as np import numpy.typing as npt import shapely import shapely.geometry as geom -import networkx as nx from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import get_road_edge_linear_rings from py123d.datatypes.maps.abstract_map_objects import ( diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index e3bf5106..634bc1ca 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -4,8 +4,8 @@ import cv2 import numpy as np import numpy.typing as npt -from omegaconf import DictConfig import pyarrow as pa +from omegaconf import DictConfig from py123d.conversion.datasets.pandaset.pandaset_sensor_loading import load_pandaset_lidars_pc_from_path from py123d.datatypes.detections.detection import ( @@ -27,7 +27,6 @@ from py123d.geometry import BoundingBoxSE3, StateSE3, Vector3D from py123d.script.utils.dataset_path_utils import get_dataset_paths - DATASET_PATHS: DictConfig = get_dataset_paths() DATASET_SENSOR_ROOT: Dict[str, Path] = { "nuplan": DATASET_PATHS.nuplan_sensor_root, diff --git a/src/py123d/geometry/transform/test/test_transform_se3.py b/src/py123d/geometry/transform/test/test_transform_se3.py index b035a04f..44e86504 100644 --- a/src/py123d/geometry/transform/test/test_transform_se3.py +++ b/src/py123d/geometry/transform/test/test_transform_se3.py @@ -2,24 +2,21 @@ import numpy as np import numpy.typing as npt -from pyquaternion import Quaternion as PyQuaternion - import py123d.geometry.transform.transform_euler_se3 as euler_transform_se3 -from py123d.geometry import EulerStateSE3, EulerStateSE3Index, Point3D, Quaternion, StateSE3, StateSE3Index +from py123d.geometry import EulerStateSE3, EulerStateSE3Index, Point3D, StateSE3, StateSE3Index from py123d.geometry.transform.transform_se3 import ( convert_absolute_to_relative_points_3d_array, convert_absolute_to_relative_se3_array, + convert_points_3d_array_between_origins, convert_relative_to_absolute_points_3d_array, convert_relative_to_absolute_se3_array, convert_se3_array_between_origins, - convert_points_3d_array_between_origins, translate_se3_along_body_frame, translate_se3_along_x, translate_se3_along_y, translate_se3_along_z, ) - from py123d.geometry.utils.rotation_utils import ( get_quaternion_array_from_euler_array, get_rotation_matrices_from_euler_array, @@ -181,10 +178,6 @@ def test_convert_relative_to_absolute_se3_array(self): abs_se3_euler[..., EulerStateSE3Index.XYZ], abs_se3_quat[..., StateSE3Index.XYZ], atol=1e-6 ) - # pyquat_rotation_matrices = [ - # PyQuaternion(array=q).rotation_matrix for q in abs_se3_quat[..., StateSE3Index.QUATERNION] - # ] - # We compare rotation matrices to avoid issues with quaternion sign ambiguity quat_rotation_matrices = get_rotation_matrices_from_quaternion_array( abs_se3_quat[..., StateSE3Index.QUATERNION] diff --git a/src/py123d/geometry/transform/transform_euler_se3.py b/src/py123d/geometry/transform/transform_euler_se3.py index 15bb48ce..398b2af4 100644 --- a/src/py123d/geometry/transform/transform_euler_se3.py +++ b/src/py123d/geometry/transform/transform_euler_se3.py @@ -8,7 +8,6 @@ get_euler_array_from_rotation_matrices, get_rotation_matrices_from_euler_array, get_rotation_matrix_from_euler_array, - normalize_angle, ) diff --git a/src/py123d/geometry/utils/test/test_rotation_utils.py b/src/py123d/geometry/utils/test/test_rotation_utils.py index 30b3515b..f298af7a 100644 --- a/src/py123d/geometry/utils/test/test_rotation_utils.py +++ b/src/py123d/geometry/utils/test/test_rotation_utils.py @@ -1,8 +1,9 @@ -from typing import Tuple import unittest +from typing import Tuple import numpy as np import numpy.typing as npt +from pyquaternion import Quaternion as PyQuaternion from py123d.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex from py123d.geometry.utils.rotation_utils import ( @@ -26,9 +27,6 @@ ) -from pyquaternion import Quaternion as PyQuaternion - - def _get_rotation_matrix_helper(euler_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """Helper function to ensure ZYX (Yaw-Pitch-Roll) intrinsic Euler angle convention, aka Tait-Bryan angles. diff --git a/src/py123d/script/config/common/default_dataset_paths.yaml b/src/py123d/script/config/common/default_dataset_paths.yaml index b2dc2027..ded971a6 100644 --- a/src/py123d/script/config/common/default_dataset_paths.yaml +++ b/src/py123d/script/config/common/default_dataset_paths.yaml @@ -20,4 +20,4 @@ dataset_paths: # Pandaset defaults - pandaset_data_root: ${oc.env:PANDASET_DATA_ROOT,null} \ No newline at end of file + pandaset_data_root: ${oc.env:PANDASET_DATA_ROOT,null} diff --git a/src/py123d/script/config/viser/default_viser.yaml b/src/py123d/script/config/viser/default_viser.yaml index 99059e83..4a1c88bd 100644 --- a/src/py123d/script/config/viser/default_viser.yaml +++ b/src/py123d/script/config/viser/default_viser.yaml @@ -2,7 +2,7 @@ hydra: run: dir: . output_subdir: null - searchpath: + searchpath: - pkg://py123d.script.config - pkg://py123d.script.config.common job: diff --git a/src/py123d/script/run_viser.py b/src/py123d/script/run_viser.py index d1809fcd..cc89e024 100644 --- a/src/py123d/script/run_viser.py +++ b/src/py123d/script/run_viser.py @@ -3,11 +3,11 @@ import hydra from omegaconf import DictConfig -from py123d.visualization.viser.viser_viewer import ViserViewer from py123d.script.builders.scene_builder_builder import build_scene_builder from py123d.script.builders.scene_filter_builder import build_scene_filter from py123d.script.run_conversion import build_worker from py123d.script.utils.dataset_path_utils import setup_dataset_paths +from py123d.visualization.viser.viser_viewer import ViserViewer logger = logging.getLogger(__name__) diff --git a/src/py123d/script/utils/dataset_path_utils.py b/src/py123d/script/utils/dataset_path_utils.py index 4cc71278..393c05f4 100644 --- a/src/py123d/script/utils/dataset_path_utils.py +++ b/src/py123d/script/utils/dataset_path_utils.py @@ -4,7 +4,6 @@ from omegaconf import DictConfig, OmegaConf - logger = logging.getLogger(__name__) _global_dataset_paths: Optional[DictConfig] = None @@ -32,8 +31,6 @@ def get_dataset_paths() -> DictConfig: :return: global dataset paths configuration """ - global _global_dataset_paths - if _global_dataset_paths is None: dataset_paths_config_yaml = Path(__file__).parent.parent / "config" / "common" / "default_dataset_paths.yaml" logger.warning(f"Dataset paths not set. Using default config: {dataset_paths_config_yaml}") diff --git a/src/py123d/visualization/color/default.py b/src/py123d/visualization/color/default.py index 6a8f2f6c..317dda32 100644 --- a/src/py123d/visualization/color/default.py +++ b/src/py123d/visualization/color/default.py @@ -1,5 +1,8 @@ from typing import Dict +from py123d.datatypes.detections.detection import TrafficLightStatus +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.visualization.color.color import ( BLACK, DARKER_GREY, @@ -11,9 +14,6 @@ Color, ) from py123d.visualization.color.config import PlotConfig -from py123d.datatypes.detections.detection import TrafficLightStatus -from py123d.datatypes.detections.detection_types import DetectionType -from py123d.datatypes.maps.map_datatypes import MapLayer HEADING_MARKER_STYLE: str = "^" # "^": triangle, "-": line diff --git a/src/py123d/visualization/matplotlib/camera.py b/src/py123d/visualization/matplotlib/camera.py index 344a2b1c..fd3f4d5a 100644 --- a/src/py123d/visualization/matplotlib/camera.py +++ b/src/py123d/visualization/matplotlib/camera.py @@ -10,13 +10,13 @@ # from PIL import ImageColor from pyquaternion import Quaternion -from py123d.visualization.color.default import BOX_DETECTION_CONFIG from py123d.datatypes.detections.detection import BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.detections.detection_types import DetectionType from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.geometry import BoundingBoxSE3Index, Corners3DIndex from py123d.geometry.transform.transform_euler_se3 import convert_absolute_to_relative_euler_se3_array +from py123d.visualization.color.default import BOX_DETECTION_CONFIG # from navsim.common.dataclasses import Annotations, Camera, Lidar # from navsim.common.enums import BoundingBoxIndex, LidarIndex diff --git a/src/py123d/visualization/matplotlib/observation.py b/src/py123d/visualization/matplotlib/observation.py index 06173a88..15828cbb 100644 --- a/src/py123d/visualization/matplotlib/observation.py +++ b/src/py123d/visualization/matplotlib/observation.py @@ -4,6 +4,15 @@ import numpy as np import shapely.geometry as geom +from py123d.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.maps.abstract_map import AbstractMap +from py123d.datatypes.maps.abstract_map_objects import AbstractLane +from py123d.datatypes.maps.map_datatypes import MapLayer +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3 +from py123d.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, StateSE2Index, Vector2D +from py123d.geometry.transform.transform_se2 import translate_se2_along_body_frame from py123d.visualization.color.config import PlotConfig from py123d.visualization.color.default import ( BOX_DETECTION_CONFIG, @@ -19,15 +28,6 @@ get_pose_triangle, shapely_geometry_local_coords, ) -from py123d.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper -from py123d.datatypes.detections.detection_types import DetectionType -from py123d.datatypes.maps.abstract_map import AbstractMap -from py123d.datatypes.maps.abstract_map_objects import AbstractLane -from py123d.datatypes.maps.map_datatypes import MapLayer -from py123d.datatypes.scene.abstract_scene import AbstractScene -from py123d.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3 -from py123d.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, StateSE2Index, Vector2D -from py123d.geometry.transform.transform_se2 import translate_se2_along_body_frame def add_default_map_on_ax( diff --git a/src/py123d/visualization/matplotlib/plots.py b/src/py123d/visualization/matplotlib/plots.py index 41228303..3779aa7e 100644 --- a/src/py123d/visualization/matplotlib/plots.py +++ b/src/py123d/visualization/matplotlib/plots.py @@ -5,13 +5,13 @@ import matplotlib.pyplot as plt from tqdm import tqdm +from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.visualization.matplotlib.observation import ( add_box_detections_to_ax, add_default_map_on_ax, add_ego_vehicle_to_ax, add_traffic_lights_to_ax, ) -from py123d.datatypes.scene.abstract_scene import AbstractScene def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes: diff --git a/src/py123d/visualization/matplotlib/utils.py b/src/py123d/visualization/matplotlib/utils.py index 1ab1ff3d..3c552eac 100644 --- a/src/py123d/visualization/matplotlib/utils.py +++ b/src/py123d/visualization/matplotlib/utils.py @@ -7,8 +7,8 @@ import shapely.geometry as geom from matplotlib.path import Path -from py123d.visualization.color.config import PlotConfig from py123d.geometry import StateSE2, StateSE3 +from py123d.visualization.color.config import PlotConfig def add_shapely_polygon_to_ax( diff --git a/src/py123d/visualization/viser/elements/detection_elements.py b/src/py123d/visualization/viser/elements/detection_elements.py index 91e38764..a8c5b98a 100644 --- a/src/py123d/visualization/viser/elements/detection_elements.py +++ b/src/py123d/visualization/viser/elements/detection_elements.py @@ -3,8 +3,6 @@ import trimesh import viser -from py123d.visualization.color.default import BOX_DETECTION_CONFIG -from py123d.visualization.viser.viser_config import ViserConfig from py123d.datatypes.detections.detection_types import DetectionType from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -14,6 +12,8 @@ corners_array_to_3d_mesh, corners_array_to_edge_lines, ) +from py123d.visualization.color.default import BOX_DETECTION_CONFIG +from py123d.visualization.viser.viser_config import ViserConfig def add_box_detections_to_viser_server( diff --git a/src/py123d/visualization/viser/elements/map_elements.py b/src/py123d/visualization/viser/elements/map_elements.py index b1e0e752..57eefe58 100644 --- a/src/py123d/visualization/viser/elements/map_elements.py +++ b/src/py123d/visualization/viser/elements/map_elements.py @@ -4,14 +4,13 @@ import trimesh import viser -from py123d.visualization.color.default import MAP_SURFACE_CONFIG -from py123d.visualization.viser.viser_config import ViserConfig from py123d.datatypes.maps.abstract_map import MapLayer from py123d.datatypes.maps.abstract_map_objects import AbstractSurfaceMapObject from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.geometry import Point3D, Point3DIndex - +from py123d.visualization.color.default import MAP_SURFACE_CONFIG +from py123d.visualization.viser.viser_config import ViserConfig last_query_position: Optional[Point3D] = None diff --git a/src/py123d/visualization/viser/elements/sensor_elements.py b/src/py123d/visualization/viser/elements/sensor_elements.py index 2f3a9c15..2876fa5e 100644 --- a/src/py123d/visualization/viser/elements/sensor_elements.py +++ b/src/py123d/visualization/viser/elements/sensor_elements.py @@ -6,7 +6,6 @@ import numpy.typing as npt import viser -from py123d.visualization.viser.viser_config import ViserConfig from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDARType @@ -16,6 +15,7 @@ convert_relative_to_absolute_points_3d_array, convert_relative_to_absolute_se3_array, ) +from py123d.visualization.viser.viser_config import ViserConfig def add_camera_frustums_to_viser_server( diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index d42952b2..17adbb2b 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -5,6 +5,11 @@ import viser from viser.theme import TitlebarButton, TitlebarConfig, TitlebarImage +from py123d.datatypes.maps.map_datatypes import MapLayer +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.visualization.viser.elements import ( add_box_detections_to_viser_server, add_camera_frustums_to_viser_server, @@ -13,11 +18,6 @@ add_map_to_viser_server, ) from py123d.visualization.viser.viser_config import ViserConfig -from py123d.datatypes.maps.map_datatypes import MapLayer -from py123d.datatypes.scene.abstract_scene import AbstractScene -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDARType -from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 logger = logging.getLogger(__name__) diff --git a/test_viser.py b/test_viser.py index 1381639b..7446fb62 100644 --- a/test_viser.py +++ b/test_viser.py @@ -1,9 +1,8 @@ from py123d.common.multithreading.worker_sequential import Sequential -from py123d.visualization.viser.viser_viewer import ViserViewer from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder from py123d.datatypes.scene.scene_filter import SceneFilter from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType - +from py123d.visualization.viser.viser_viewer import ViserViewer if __name__ == "__main__": From 065d875176c488dd3fa308af19cca57d141e2cf5 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 20 Oct 2025 18:39:17 +0200 Subject: [PATCH 103/145] Minor changes to log (#57) --- assets/logo/123D_logo_transparent_black.png | Bin 2486726 -> 2486726 bytes assets/logo/123D_logo_transparent_white.png | Bin 2486726 -> 2486726 bytes .../logo/svg/123D_logo_transparent_black.svg | 24 +++++++++--------- .../logo/svg/123D_logo_transparent_white.svg | 13 +++++----- 4 files changed, 19 insertions(+), 18 deletions(-) diff --git a/assets/logo/123D_logo_transparent_black.png b/assets/logo/123D_logo_transparent_black.png index 46614e00ee7886bf24885f8c1899c2cf6a38adab..6717f5f81f94a4adb7c3042fe6f5332891afecfc 100644 GIT binary patch delta 22805 zcmaKU2YA#)^MAf~d%d?@%B9jVKq5stp?4ub2pTY<2?z*Lq$t9HG($;_HXtC0BqUNK zk+PyFV4+A$6yc?ZCZHfe>4N-c_IsBE-}m?TdF~NNZuhgZvr|4ZyZbJKv=2YtS<*H| z*y>!b#KYDonlTpsN!?GjPES8C@y6>Eb8PqnDBo|=@Nc&())-j7jU^U-Aziwurl&`#B7%Z~HbeOaPc@%+we$wWKep!;dmktu{&O9Z_7!Jzu;7Q$6p0W9o^fQ zBuT(CmYA62^WIin#veq(j89xe^aCGcTnf!9_J$}`_wWZME={+6rzrML*RZMU8lI-v z6spSE);&|Cfb}*G)e(Bva&Hpoq%(ZRL*oOK*k$Erh;QW9a9tw%RVw!Gli1K_&z?cs zU&C{Xy(`VQqI;5n41G6P`>9+w<$Abd);ifI!yLhU?OD8%HT^P^Qh09MH zH2liD%J0-{_D>;o3D9{O=Z*wf*EJeg*F7Q~#s}LNlrJ;-d|n6aX@c9baEY>}nH?E6 z@-ThlJq>>2t^_4RwG6h^3L7Xrfi3OAQ+(c@aQ+*;rT{lb0e|P%^TB*jtKsKAQ(m;Q zSC-!EihC7;-@PS)cc@0Qe2TT7&ufJ2XD$Zc_O#{kliw>Bxk-?3z{NV|G)Vm0$Y5|A zmdD4RQ>HRjej?Muk6l!@aEJjwaY38p^I9M~&dgxxGglrTQ;RVLk9es3Qpdh2Twfn& ztj`k{#q9Izn$H^oU+20sa6C4ApctP=nc4Rab|ZLcBU+qKfdH>cuyeC^01mx;jjNCb zUF6mxtk+S(D}5QMGBEsN9#HX9i=<$w6Mh9Y&z8P$rl`Yx7Y70)% zkk)gO@$C0izn~6kp;~KOD#}sHlP9Wv!!grB$G@gJ2TsMziU(8;1cVrlSefDB#*cbd~xOi$T@<)x$GwU#Ko2${?3CS!j}{!MDX6T zbbBQDaIsS8g4l6Hng`Z~sCaEussO-uf6|F7(Phdch#sk8FnXnGh0nW- z@BT=&O~Z!tZC!?l=R(=>Fvb<@R2xk4JcCB*E9J57`>;Gf#K~a&T(uOjISd9vkjv!? zVvGgdyLT_>x8HsP>oS+m=XHX0d?+p=)S3!+o49c$z4?f3st_}4RucOM&h;se`%YCy z4by5Mu6}KUahm^vJNi;o5^VpG=3G=o0oE@;-VV{flh*&fDcY>yj#sbh=uD1`UFuqN1J~jmFeyG(n9UHx9Df z?Y@kakn;;}$LMaj5@!?30zBM&`&HFdCzKTG8LYZOa|q}RhW}*CgY+L!z6#3@XimCRNrlK_3k%lgpc$7)qNa6)5^+zUo2_NS& zl${YbI_LYa0&+`NNhylDMO0T>tyYMSR4s!&54DUxb*a}IS+^b&zQFN5hWG?D!ab^~ zbG6|6orwcVP6nozJ9AIJIHVay;r2Z37@xNVq-re5Fm7fPTF+Kc3HnCW7LO#aD3PlXynewgrb=KwJiXKBO-UPUh#nQ)fE~Ub^&g#r*G_ z`nSwz>g6|_3^vs?rtm3e)Ng6ncKr{F`0Y#TXfrH4jK1NME6#x;DuI%5sM7*aO=ldXpAJ)Bv8D3ZyXs%eZ1?)b-4Ia^UY;4cO@YRj@neTtE|+@;LKJWoh5680U&6L zo}jry0~r)RHn6{Dw~_Um_t6p_HB^(JlND!m1;_zo|EGjneP~PtZ&DQF>Ekr}9Zd7( z%rPkNnkecQNaf$dpl(W-2ClrLM$4gv>>e22yuDeID+ga+oI=C&ex6vhG9)BK;DRq( zi;d)4=909822}!C`>_lp=erOFzj9}75X##nNAtUa4Q$aZd+YS{_a(k9Pcz#_aaB7g z3J*>D_rwubQ6Tl-etu&0jT<+DZr_HOhss6#&KH`O^{l1kkD55(rxI9y@GxHfz2=@y zC=Z;fVJnnIKyj_GelYu0dEd|w#tAr3zTLw3N2Qu#8{6_YArnRN6O^|__gdqMW{47Y z?{;gz*~ldJ>N%O*M5Ch)4Gj%4n_`4Sc#>mXLsX>x?xI zYIWD%AaJO|0SQ%rJKjTk-L<@22nR8|#@bn$Ir#LU+UDqA8APY48T@rjQyY4JihtU( zRx^+OnH&Xwf4COiG#(~l`YNaEwZWRa`~x?GTbm;=*l!Dq8i&<}#*2kCpw7C;RGu+S z8?0h`tA|}gF^A!BuC}E3^CbS}Y;CR?vdc{5c%+YHx!nBLnhc3j1cUL*w8CAzm|*qm zFhcp+$ZD;%bi$!}@XPD8N44yMwq%Ho*hwszg!f)y?U%ng^9UbkLV6f)zzyA(8apHt8!v_U*bt!U^ zF!q2}a-z$fEkWd2G{Ot&nuWSn0>NOZ=tF_&JE5t@v4upAYae7C$fJCmugGoWX*hdid#$pLg)H3O{?HZL5&EK5rhB zed@xjn&%!@vnXZ@QYaKbckbMg z!%>OCQ3fp+2BeIE|5YY!gQBJ3eN@eHb&0%WVrVZ0EtI;c%=5>N*KjYJLi7>?W5W?C ztk&g7y~$i`g6t&` z8r>ND**^Tr$ADS)Di8rwzrhdsnw4jm%ojG$R=+voG zLD#Qe2dgX-{*O)fgN=nOJvox^ucj-Yq;!^q$C*KcZd&0y4o-6v!m<9gWXSm#gU83m zJa{Iba#YzaDmZ^ZUESwxfcC!iGj~C;H&3Qb;5)Sw139Pe0U{rat9uQi^CHTZdo7IC z`T_oHOW5@9eK@!8?=nUg-=vG1J--4GGdm|K3@=&h{ zJ%(RjP6Ohn7XFeOKLhadk$<=ekbPXuRO@ie;l&w~Nks2UaQ+&`5tB`t%A+)33fi^w zzWV-fd$#9QTo5F-*JZKN<~9oErt)4LbZE2Dj4(KQS54oORzO0Qsere9Rkv2h>`ez1 z@e4zB4@__@I!XhxoOYD|>yG@2EH{bd&zrxk+iYd6b+IWp>~OMCFR5YACd$~gff2Lj z>oBRyyRWS@IS6kY>#nFv5qR^8G&f0P5B%6Kx_%PBn6DFt7o(UrC=)cm*ap@@Ub;(H zU&G#x75Qg#-s*c@eJeburIW`F0OXkE`pj(0*jOvKT+wMUm8PYxO4NxxZ{~3na?EWX zx>&1fn@1psge64@%?N%oiTrzAaoTB8p>vsyFp(~sqGW*it&9?se-cyk6vhv+GXAbs zf5k=XW^f~;at;Wjxk>O2$)m>Gth#VT~mfidx3x3YvtEh}G|s*y&eC zXtz#JA0YARHS{xVLTjLXArcvtScv4K47m?%*b|Q1+e%2zO}S&IPMw09HER~sv}sfE z=lJ+|NQ$uM^Dmm|C+XSJFI^w-h8^@%4B$OtNv4fo1{pgdG4Z05e0)uo1iBN^Rpx9U_?QKPn9l3h6 z9;1}c=V_`!zCS}R5-m!cAw;3AF8kvpXEZBd-3R(Swq|^{_PD$0LdE#!v-FppFh16- zf$?su&?#aA;GL_MJcxgaV_-Th27K=Zdh~niBGH*!dC%2)wU)g$CFBp>7G(G^DPFMr zzMJv?tk;h+K*?@&{HcS*c#O_26fX@&l)a$q)++E?C1X^HZdtfz(=t~o+)mWV)AC2# z^{<=R(w@T}QOpvhsnY~JBl zJq9m}&RR9_*sG^gBi)0T*ByEPg&|#zTdX~_aqz<{k&P%@qBM?Q{71iB#g5FFxC*x* znU^qw+6s9q^<*0$bCso#()MH+Qyhkex0%pj5K;Aty{;`7~xheo#Z)m1Zal$Rj> zWn6dOB>OBFpYFoFY6#XBk%dV-Wu3);t!Y@ShO{xbzq5V}Z{hRCK!a^Kjo8{YOy!r_ z+4@xwkE<J*CNM#`tJUsY&+RT9uHUc3ky8;KEV#(!9B_`}BDdVk(&9Pb2_EkM#`X}+O`5@J7; z5iXGtex?i2z?kWXaKITMVBw(K?F|47Y$&n}C@Cr+!0ud|IE80tT7;yCb^vcD2YTn7 zMq)~U;6U_)2%%M)?+srtZ^JJ?MI^oDEH&&0!$l{N>q1sClI3R%*YUgmntsxGF$ga@ zYY5k}NreNZY@MEdQQ}ytxMh&xtmSVcH^u%AbxrtlWo{W~w(YX(47vJ6G{47Dum z@TklV2yYsZ0~aw?jKR7?TdmQA>_9{GADxCM2g1FkmV7u8i$42RqY-nN{{t#CuZ^6; zYi8pd7uFkugIoWSeKq@RMrRdH5dzPu8-+^d7CI_~M0hx|9^{U&EPmYdmZpDmDL5g2=qHxxk!if6KF`WzLHzPJ|w}+9HVfR z|AxGjWOzE)*i6apT|01Z>-6*n67RI$i2j^PQSe=yaUk@F{y)@SiHi0-jOkw*bM)-f zo8GZF)Dj664tCEXQDtXf$Pqulum+MR*&7vm(;(BZU{-A*xwz*~f^4iW)l-X2 zX2P$@N;KqRSPHLw)40$DSU?eB;9@9!$CC$NDon&f$RujQF-!FT0Ca!1+ch6Mxqi85rIP00;4G~LrmymiKR`eiUqlf6h1D(bX>!V^D?WU zfh+MPW_Dei;dA z!%cVbr{PzwT%kl+R1VB$bI?Ek{3HI3Nx8pV!QjHVh*a2;ZY`81QIgh=4;^V*&6Ml6 zU4_K7$OxbJB_bM_w3%ay*GTE}W5+%zL;=Ixnhuj+=q16u@Pd*=az@bWimAT)s6Q05$Spw?_X@H<7A2b zKk?!UI^frt+(ACu3KBJ#N@Ehg{$++ZBjmcJQmu{NzY&$?>**iK6&br4@9 z8fO5{d}J~i*<*J@8h$Z3e3b;fpJ6b#G2TciT0e)U*zOR-mWz8PS+D}Al z#omasb$c73f6B8RW`~`9*(=k9B8ivy$(rU*6;Pv{Stw){3_gL?L!xmw(l}UO#F~rn zQewm@3o1?m<8|7b=O|eDz?XRf zHi+3y*f8f3#KM6g+9Y^*A{c?U6+tyt#xy}spFZ^;(CYlhMdss9I*cX>0(BG-wUpIC ze{iLXX(`nL1F5u&b?x@x0Yc~i;p@%h8z>HO*Q{AnoK3Y_Ei$O`@^VspxuWNVY2WCc z@wW=ibySKyKawp=(sjd=)flshL&X`pRDA1JGboVWFF#5p;5J6a@9!`lS3-utus~{G z@YyG31(jihYE-UJ@B-mWafREyslXCW*_dus(rKNVo1E{|&r7NkY6sUST725E}{a!AuNMnp#Nersc#?Ca8sFx(OOe}1gx zH6@E_x$oB2>FH{Tn`&9m)8~Z7YGB?>3tBxb2yG!CLUoDLsE`$ci37tCO1iZWIo9dt z1bS?+4&WcRv}EYmsOX75JMms!7$cX$4_Mvgc zI1SN=*REZo1n+iVOPrm#V}GaMq_f0dA7PoOkP9eS*b&2>*dM(qfK~)%0_gAiT#bC* zVO4-q;{#tc)`F3^_ps1Gl%6dUw{pe@77E3ICRfwsm{|C1hQ*LD+j5^tPgZort#k3j zITlRJMdu9GP-t%H7VOy)hJQ+_U{xyRg;kn3@WbH9q4d+TrcXzvk!Su}Nq~8i4VINU z`1Wa)1JTBYVp))hVli)#Rn(sQJSaYEhvh2^`*h3qW*Q0$dW+tV&~U?#me++~@NgNb z;6nODMBf5Ur(r@UyxuVjM&4LJ5ti-x1yeo|AC1|qbaPMW^{eG8lE$+Qi2wFfS5uKH zQ`(N1I+ory>M?ish@v|`b=`6r#fj}bfCUu4NYWI_jdDIe?Ksw^1IjF_RXxPl>`M}QQP{a8nQnK)xZngsddNz-S?YB3McGfXuMU7R0%8~AI#>JlAM{mygIxm zTnI6!cz8o=KL=aZY(OnU+6xbIIbcNt#f_N)Y4mdtli|P(Ck7SGfN+W+En&8&aRuvu zT@2@3fM-Xzq5!M_Nh4s1%F(lQlve;28)}`TWQ&{sIG0a<-TICln%@=kOY@1?%_Am; z^jN9W>8Q=24&-;24}s`Q*!-ULw4UX;HMbG}MlwSCrKvDD*|3VQoM%<&{7q1#JhrT8 z#NMxxlp+7cK)J*!Y+NO>Cojqhm9o^@(#}4#_CJWCPnGze1=eH*l?8;w%#`5;XqqI5 zZ-_l$g&W0am1ui~$%8%FWX055?sL+@CPnn73Lru-!e}pE<&GxNr<6Dw;W_6kf3T`0 zwxPiMBd#umKi+4}auQ)q`xB540ubk%xp7M{q=QIX@G_1a3uCp+9AduCxBP0IY9bbn z4v{=FvMYh}gUx9tvDO}#@!EZBiG`IZzqx`l7LhBjt*oSZ03&z|YX+g7lmW8URRtmV zm#oYBBzUilO&I#;5Q*e?nZj;vI{>AqsjXKhBre=ao?5!$ZcGe0Fdgr zfdDjr{`n`R7J~FuV!Z_}IvmURu}qYMG$1gwG>_8qyb6j(e3PnyL@On1(?=+A7+FI` zuCvWoGB&4Q!Pe>NjghA+wAHc65SGq0qF6OBvY{*k;&_M~!eiZ0dGh2*_^doURAf_! z4<8md0NV|aq}XL!qGusD&u|>Flf=6ouzjI}-fQ%1*`Jz$(%dT8Oh4Q1kUgftb~u$9 zfBNYs>|OXHh>{XoDpi#B?%fNCzZ)@p(efR~ZOL}_-fIs}a?3T_Aq9!;U0^yX_GTOl4p-uG;VR*7LMmwoPjF>WvqJ?mYmhfjSAsV+g}?h&9fr?kFnuj>Y~Z z>|ErF9@$nXRc8Fbn&OOWN{(q)f&!567P>a|hB06Ch(a%B7a7h&l|4#g{d??Mjr;i} zu}?P@e6QaAMDo4W7px|`7OpGY*~Q)ukv9H3O2eDl?L8Fm@hpRmN2ht(BdzVSYq{;3 zq6}MPwF;WBu0kezF|8UtS^cezE$Pt>*V(Glw% zz*tB^$XjGef>}@Osl{GN;=Nw7mswb?`s(@gr3*+lU_ME`MdwJE2dG12V8uk3LL+g* zyT5GD6z#0_py(rK8XzSmMlPGGOw!wIwxA9jT3u`C;%!FQ+qp$g1*Eky41}+4yU?me zLs9FPB+-n;u?cLrhPCTo3@9be74z)%ERa4C^N(tuXa`bowTT*ullX!a_WfpdqfOah z)S@+*VTOb-{;<$~PR(#j%3$w5l*<~}2#nG}YU#lh_l5XsHpZ_P*(U|FUEk!5!YN)w zBXs~Hl7WEiH}m(|_Jb`8!ZqA?(2ixXh4qj%{V~=llqYc*B5~d~6+%p~J{dl^WH0i0 z3;CJL_G~5VomO&~kGp11x64NR$Qb$(cKTpJ5GMQ>mP^8J5a-aqna8qDpwR!)vn@Rx zzaf3H|&%e0QGI$FcwiZu+UHDp9lMbiU<5e8i4 zP`h^Rg0RRWdh&P^m!qehY5LxDqm15^_`TYW#){{$q2v+M*GP9Fu!l4x*~Cc7Krk%` zLVFgZQmKNd$&zYJG>zP%vWWHzd(KRD##`5S%yB_zKPUMPX$ZuWsy9~R)YPq8S8ii; ziPWu8qelLk)Oe2O!XmMOA6=A4*@~6CLw83!#^qEI35ZWXT)I)04}U4r9i%=7~$Vh1WqDG)%yk45)O)Z;PXZdQrc7)?S6V-f=pb)=+qfOPR*m)_TnGwy5 zVy1c%9b^LKu2k8(BJG2nMxq3OmFk6p^X8`~IgVJ^p#6f+GXHRcBU^OkRf?4w20(~4a*yLC4JX=%EF(;{p@pdeCw^f(k}?FA*ZbD7SjG0V zzuy>Ptb;C|kW{h`$YX4<<`#Psk?q8YlW#uaud%Zqy?r9_LpL0GYPpQ(`B*+vGJ3y^zg~=b z@!lPWh*mK0i-SK}hf;9CtN-QL>SS|YY}FYNQ$UhIhb*HKob(FJIL$VS-bDz=6@yMp zAStyS-#~_ia!rNk92D-j1lJl_u&JrxNxZJfxfHu_slw=ohLeagB_92L6~;p#!-OIZ zre4)IrDjnKe^Q}(e`g_|Si?D1!CH<_+lUw+@(Z<{vu&~-yfIr?9aVo6V0~W(@(O`- z@n2BIY7ylzJ=>I(MPcq$aRuT+7iVL=oT2^hr50!=@Xb0Ml1~Sg_Pt8lS5tI7U6`ql zqqy>5vhzhdUOg$P&Yz8Nex-a4l++i@F*<*;gGW__KNN6yzp~Y#d|c=w#b+GjGmOGS z?cI+-w0yy3n;ETg?}}k*2=(K~kNwVA26O8t8VP&;&SLL#xfUznzkEd{>bC5)HyYf; zb-KapL>1h80(WE5Jm+Hn8S={xTVQT)HgBxhAp!I3*|Yxx=E1ejZdz6^X3BQ5MK3G) z@Qu!X`igG~Q%Gk3XZ73 zW@M*_r7mq33JtD1OHpV_6ec8uM$~y8ouw=GDM$V0#Qs)45>uyjSb1&O0AseF&ZHA%&Wyl zM#Ke*eIy_52zSMsSVx`i{8p^HDEP#>u0?7BqXFdu?LeuE=;onmg~q#ItODrwQvwj* z)6kXWWD69JNwULHtDgkG<4$4y;6-yjl)oIQfj!;iZ(}tjPA-dJO;*4d)x(9Y&y~}5 zgbUhmzpgY~jycN67hS?m1T*7PP>JZ+?F zEwaF}90e5WxuCTur@C&^|0{uDm0IY_yT77v&C^`3=-C+W8?$gh+j!K6u1tLuaP1%_ zT(eP@_O7M!k`PxMU|qivnNA}1IMxTY+Sn_(>0ME>qa=(j02l7~msI~&rn2TCdAD_2_tib7yTrXMJ zoxg5OMPSJs?THy_lFGfp5rk~0u@LmuU~vvaw?px~&}fk^C}f6gx!@c*w*_Ou9?*8N zEbD!|=hnrSv5rB^3$vi0cJMvibtUCim~=;|fBEGX%w7Dry72V6!TvU??vZ4mwbCrQ z+Li~4Y^c0=D+MuV!;c+17F1eVDv}-yW<>eu=FOWy`}gl(Zn62W3#NhtP2mID2dAjm zn(IMS8h#g!{o+)?u~Dx6kaHgUlNWRi9%6Y8veXcIag$%S@{WWjL7!KHQSr47Y=NNs zDro!m?c#&eQCf#G!uub11h{Ho@D3w;Z}{HBeD}!U3tF)S{)NE3UhaIrI-CZ|KZ{6` zZ{s|Bi$*pIQNrO16#rKjzLuv<2p;cbvlDN`A?lHMpr@0shUbt%&M36?*y~m(c8w35 zAB=^WDjNvBZw1S9@<;1}Pc!zgTJL^au`fm9<_*ESCmsm_?XySBP=!^-J(-2MhV@4KKfaJ@~P*V$?W)TN+le zTE;N`J7fw>0w;zRBA(r>xTwQZ2e|fY!)Aigu zQVrN0;=)V@Zc@TE?V-lyt)zQrnr2V(KdplXy z?1%9<+4!wR3GqnBQr{vPeW4Ik0*2~49l4?v7;YaDL(0b=lNXP$oyt0O>i9Ai@)bwi zSN$>%K#GRkMW!rDCD9XL$bQ9r2g+u;vCtANZi$UsruA{GkJ^=N{^W%vyw?r)dHZt^ zFuACTpxIM5W;d9OQpX$G?CNwiY##l~UQAv+fmGJv=47J^wp z)m>ZO64krIdV?Ry`80WvYa3!uUe*$8BTNbS+`%DtHLO?s=o)7M4%nj3OqdP-H?Z7)~M(1C-nuXuIBk zh2h2l7uDD}~nlmvbQ}3@oMF ztUSKuMo6T;OW6+_O4ehF7}EgzjRfXXf61c?xV_s_Z>pBhycg2g$@-Mo$ho|YUTq?aGFsPDKOU z@kh~kF3F;TX9zf5xVuxwg)a32V@gq>tO1{1b>lA}i%Q1;c|i<@GeQUOpAti#YFX{p zty2x7k?x3;Qnc3!_4@ z$K${8O}IplVx^eFMc1(2?7%_A^bn%LoxHLs%rAIk?QE3 z@>^(Mbn$*M5){nzP}c^?NNU~;#rQZ7P76IXLs36XKT`G|d!g{K-4j_AWtf;3iyv`R zi^c&4%Kij9a#Q3RVwiSgHD-wQ?VY%&mOMotHXQFBQ`oSvmTX4Gkr~0>bUcrxXhRN! zhl#T!E`DJ@8+LbZ)5p^($NpEm;TaK@Vq_Cfb!IqTM?R)**nRDDNT~sd!Pb6sYa<0F zL?YOIjlzW7v0fLa!~g~2ztAKEuyXA7wmF%ld#Mgbd=FjsF#mNSKR{_LBn-et{-Qh4 z$RhNV(FqO=N!M(SeZ%O7s>%v)KcR3MeXc76)ItWkhldHHkJBUPM@`smf0x^Eh400_ zX<1fq{w$s=5&|DRGOR>mXIJNK<89sui*QukN=A2r{Ny;=U6L81N`|keiB&33pFT~k zUNOq6SFZ+ozPQaB);~QG*?oh^TQV@ZXasPpGC9^!@kWLqdYphhZhhZg!ONb65rSzhYxVA*3G& z6E=a`;~n39_g&BzUwjd?YuB!zojZ35vqCrfuwjdPFTxWR_Oa`Ej3dA|IHCZo ze@7+5YZt>-qcadU?x^@GUw-ny%`4bdFmOV6KHqUUY`dB@TYYExR;(f`_>VWk&iN%8 zc$lC{fg(ky1OM_-*w6gUiaTwNDUz4@@^CsOy%!K2$Lh*QFFE4-Ng>ZaiL)cMF4+ly^iQ>neRSWhP zyd8nn7-fg>1!~qK;e}uDn{j-1xA28liu6yz-d&zFs_&oy|C91Y4Nn~yo@i!e6OGgO z+Bd=n>Et*`jGGR4(=Lxo?}V>+u?9a>n}(xJL!DSfKJ#*ADoKFFm~!DOh3{AxzN`vB zMhrj1yo=4Cz1N3_pnY1gqcD6c-sm@s=(crw`gVzr*%XdWuOd065d(~P;f~$mD*R3! zMdqpUKYx=IUdW5~hFf&3d{663h-(J#a438bzN#Stnfc=J2sTj6D@gls5dR)ljy9hR zr}7_NFG}Ubr^3T*ETKpLUpZbNpNzalAUUp_UvS!ep?`TeRw%7@x)h8zgv4zj%7E}0 zhnTsJITV{w-oWvelb=4oG?sQ!^$KFZ*gpr^Qjf@XsG=i!%CPbxZUxbE8Ig^;87yu=PP>okUT5W27Sc21|dt3myn@^C0)}b$BnS_!=c#8DhRMGMmLlZX z056PVJgtG2*N_=q*W5#g2(ncwroh;;cma!)S^B+R)dK!^3(s9MJKgB+L|i}%X(#o( zt}6D<5Kr(h-m9l4S2V-s!9|5Tjcq{fgyJKZ8i=O3XMH?qMYMu(O^gqzA&SCiseEw} znY^%{2TcZDOOj52Z8(;_==B>z0uE6c6tw$smXAMs!?T>+poaSPMD7(M-`3f^dpF&Q z{ZLd_c=A{exW^LxQvOI+wxwW>ZSyLhqxt3Bnk8RE%?w>BDT!}h&u@~j*$Qoe0u1#euT zj*<5vGTe#nVH-W@|Nae)`C0nDkh5Jtlid#9WkHUPHn|Eoj^KWBurF%9OP2Kc+U;j}SMHOe9^p#yeovgz)0Od4l4}9S8`}?bEUH87*Ql+vC5~3;G{(#3h(}sM1QE70ai@-U zFsC^tt6f7Pgo_s5Sr!ukJUEFYUvVWC8#1vd%zK7K+_$o=4c>O*_^l;AJ1zoml?hiD z5Lq`k+E9}xuBkxFArZ-N^O-C+%Y1~2L`F7W^i-o|0OPlzLq`NOq!OJilx~fx zBLGxfY)|1>9}m~CZuQ?9#M4GcOfm~pfOHwEy2W3u&=9a!p!ItZBIKhM3_x^o{(M|i zcKPC|5%`eD%+Dk1^3n4nqD8;H%zUAVlRChnY=mUphzbZxk0kIH(&aCLd$W6G=7|Nf?5kcanD)Kq4G{sd-ePtN<0044a6|5W0-I93tJgB!3 z8DdvUk>CiNJ6MyABYs7peTYu@ zQ&NAOL~J5vBom174ZL0x`bQ18*LaVKt}|IYGg?X&Sf3BXd{ctLjCfANOODW)pQ@0DHD$VxE^` zz_ymeNE}E@Z}dFBz4Q!78k`#+DZe7XF|{kvGPXf|jxw@VN{%FK8Is%q!j(L0BaADN z*w2yCVnM@?HsKYT1Pv{s#ixugDPP2RP25`iNq7w9+J3&}=r0COUnvu1`!->@t7zsGJI6CiT%9G>sDRDAi7Gu!N zVoh@X(}5qFANkl-Wo9gYh?%<{@g&nb7D6;4J;HY}YNObQ1R2QjX_o#PcKld@xJz?X zC7M?FrsBwh+UG#YZ2Sk!qEJhF%eXrHjMtN&!$tk6^b$_SXj4sk<0%_9kJY5Y3pXS4 z;pHe754(Um>G`buq_hdeF=gmkt3iq9#xP6I4;OkxEK!FwKZi;;S0|>zS=M zJ`IFrh{*OoKEb&<@};MXS|{wjL_DV=6{D8R`b(Q=3LDajDkUPnq2FKsR z&~QqxsAZz#YYkj{SD7Z7Kk-t&)XXeT!y`sU{i9^-z8*V`M~#VE<@dZq@RJd(0ma8K zfe^LRecq_S2G(%G+h5@-+DgQf(RI9urID!H1&iNUYJY z!4q()jrgR)QMkcXmRk7=B_DA)>WY@FJ$t{H_qrLi!%WG|MY!`4RqQNsX9!aAdEEIV zs=bxz<9_Iai^1kbY>jQjFcQKy1%)ZW0ssj>=Dk-Qvgh${ozaWk4jK;8(Ss=TUu;j^ zBE*y~XyaqeaoywU;jdK%CyjzF5y5%f)G2z1noS=#_&$E2F)!>9trjmJtpeK=Tbg*c zvj!x%JUOCOkbnJJ^fxBfe*KSgNq!ILh07~=S;}7t5StT0-^Aw>{M|!I#on!wG=bQ~ z0uk#JdtXulcFq?Td+(W8Tiu@*aptAyAvk%61`?H7#I6bY9ZByz*pupFJS02%Q+nF; z`t$RFAe67z@ot@%^5;=1Y$h5iN$<#0$~_J~*80@0QG|ItW({;93GWM_+nh}|%&toE zejIg04rNowsmNqeazl$Fi>CwndZ${ZE` z${oLWakw^#Cc1dB7J0?=RHTx}Gd`F#9>}{!JW%+s3 zQQ#5zm*nQVd)gX_GiEXe=7|@#RFLa0&6HY*IoNeF2uWkyJH?f5zI3a-TvAa-?_gg> z5na61@T927VR)Z1k?o=1y{I*yzI>x*>$`=flN6q55?x)Jem6$j0<<+Lo^Q+swcR%Nd9bPcz0Bm!AACI!7wTyu11L(m3(7K%yM_Jm*JhY zCs?`daU*88l1&Y}lff;wVlFwT7gsJeKayf24eYaxSs!o3epRvDNL>I=7m05Qi=BiP zG3c{#1l8r&#A5FNNv7V%G>n~t6SY#X`Coh@p62agEA}dt2xF&#eY2^C=nlR~y0F_^ t>>b4Bc3v2ZpKAE2&O0xxF?yKu_k{G7tBfD*#T5m0?f7zs&)TKW{C^iO-LC)u delta 22430 zcmZvE34DxK_kW(5J8Ndm%p{p4lf}+!N$k5&YA0zD$=g_BEv*)<)>37tT|%aoLs3aZ zrBq2RSE-6pRIR0pT@YI-TGa0Uz4u8}fA8Pt^HdWv_kQoW=bq(z&V6=YAZ0fm|72x@ zCSnOKeLu?JZ66dAR11$C&r>`X@a)91o9v(I&e4Re76Xr4s;SG^)9@;1)=$VfqwogH zHB+6G^j?HPHw7O}qa7ndA}MxmFw@P%&x^!Y57VtZRs;WTqh@I^J5-WBo}bN9uA6x7 z9^Lm!z~9BFC`u+i{4uqjYx$4vdpXXtMUlIx*L8fa|3Qt>#R~UCe94EEXcjYuj}M{Y ztDKE=r}5DigpYnhiI>d=ZoRC@@sy9-mbwqL8k8NB{4BVc9JQ4|vzcah(54spKTz1? z)r&^Xosd=0xF6#*efI1bji}^W7x3%E6mQTvgMD?8|Bi-T!awoANyB5M$luPoeqDl2 zL%FR@24%4ryOq+6O2%q8Q?bQi;N8Qu^-XNn!Hi~Nl>;sKv{6IneqxLU^ayRMjG@~$ zBd6TvmaHQGJVhBrN^>h$MssD1;Vj1At5ESCJ>z$5MiYK)YGTzt{CuAnOQXYBiiYyc zdPWIP%$+H5q86j31?L6)RyjLak2;f!{L2-^N9SHP&kFb}Q+)lf#YKL%mSR5*W>oNJ zaL*!ti48+B4HHr)=ORZJsx;2vVVBak59G`GYMX>Z26BaTI7*j9=f-Kj;OE9_fosv; z^(6w#8Qb!82^y-$bgAU<>Aw&QVmxQAwu+54S+FBqeCB-vW_h5f14VbkClhx%7(L4i z$*1nFFvCjs7^mii-`RdJ8u*znwIw0!R%lXp>`XWYc&sS@rQBJjRI1g_ZJ@KewFUgw zUE0}U0l!fl+BY~RnS4id2E|L`pLz4>nKRtLN1oQEGd6S}FX4wuw9Q-rznzluLK(dt zV@{@=XC4DheC93SL#r~T;lBIYEhcth*xr4Zwl+_k9k=_rg4UFJNZ;*?!3vHt#$#-3 zmy5M-+w`JXWg2F4}7f`(@hD>zK0w zCH6Ei?rp=SdIZ#a6U}K1+fzt^XP%Yx&S9Z11q!`ZY%8SeF?RekT1!bo4fvlS8a7$u zYA?VHwv?-XC3z(1yT!U>T6)HAQ0kDqUql{H8o{^&Yk2Y|X00by`YYqPALxEi0{)8B zalJWB70%`O&;r`1SkriqnXEaaty2f=;L@j3-WfZi?2e&6Hu3Efi5MuGFs^~G`jpMY zN9}aD(9GzQhuHJV{Xz@*OIz6~J!^hq(>MWAs$Yrn!gyGFA-{5zy<}q{hqifno%1Zk zgmIK)8qW0T%4!->ovoz21Cfl+zs^4Ou+0az=3?H+x$6>qJZZA&dxiY9gEOc_Pt$7N zOs5lKw^N;7`V1Og9f$ARU7-d1H=phe#=7k&oWFiT)@KU8QBgPD8Sq=^W(Vk(*Z``( z%9R}OCsF)xOf|lt?np4psQjg@ z>l(P&mzG}Zd7pl|0qoynzyF@CfdBZK#AcyyKM&3jUoD~$QO=e0`)-JOn_ss|VST@= zS-{_zq+0?_uoz&A_K9<(~+*$K)@f)w|}hLU|?rzPUwu~OrvwhA{f^!*R8V3?qQj8rf+E|W6fVFi@655ov>+NMpLQpto6pZ3zFOH}7k$Wm_IuPYJjqXQKp4fLwT zl5E5(*TdBQ7#NYjUZ2@-44-pe*GCZo2m%c8{WxtzLG3|iMbE?XODEH@~g|z4AnK2D$$60-UYn-6I}-#JNNpBH?e>= zLW>qCJjSZ8uke*R{l`))^IKRl=!5HF1_iK|Q<7U>ss$kGdqQ7JN{BvnyK*T$KA7<* z9{pz)7UkJ=4&!}7@d?2O-ld{G-}oF+{B0`?;0IQySQiRyW6PjUwe<7muk+QXylx@6 zbi6}wCVx^}|CNiq8@&7=hN%eS0^Kf1F=Q&6XyPE#N*Zp2o~E=lCDUK+^tlpE@<-tf z=;qg!Y~H@T{)!YUOhX1yj;Vmo_3<#8Jx5=Pc^LJ(po`bux^;_|#f0VXi+%LJYFOpf zdzSO<{q$Z~Ngdr96JemIlMSil=^kE4=iVz@vY2E+J&dLOk2@vcZ$K^2I5PRg(fSJ} zHhOjQ+CbZ02r&{S{{tQ436u5R%;FU0)x|=GHw@3G(|vGe1Qc?75>;P+=gyrV=tE$V zdnUcLIyjRy_lYhPA5Y_RKGL^!u)4aax)?T7_!!0;F4y1H3WTGNU{g~4;j1Zqn=Oro zO^V4BcnUlD8k1v-w`Rbf#ZP^%FO`Ko_ABESa_ytDzli$77 z#Dvi&^#=^>y8oR?{N{OmtWB&OgO#aat4l5eMgOVJ%`|2+#rcA3KgTfczs^iv;g0@i z8>{ku=LjI_!f(y=ZqUHKG5$|Pyn#m(78-$AKsO4FQf&MyZOI5b3P;%dfJZ#MkCI5rx>+!(TF2nK#-_p{M z#@Ki7wr~koP{_)H!}f;ru&Vaw0F&v-O*o)AR}2PPpK2)8`+UBjGO7IwP0t>N?G|P_ zJE1C%>2FAY*C+$__GV40@;%+Ypj}BBZ$d_L-+l}Y#%@_?_|9-f=UuMp)MADull;js zjAsoq>~XPzo@07I{#79P{|I%SsVumKU4&NVt8g|Sw_U5tzCw@QdD&xSTo2~=v}S6MnzR4QfbPo(eg0;OIrVex8R6jqm%trs~`np`iX9} zwlZG%qM;u&v`pin_EJ{YDQB@+h+BL6_H8O|V9mfW<*lxj$z@6nn><{5s9Q|*N~)t6 z|E0*!1wy}Rj1!pdv#>sxum@pJ8k03QID@xl#vB||gd6G6jH_W>Ct9bzG11Nn7HyZQO@A#NKI&!s#LLDuLMBijMx#aooK)%D54{Gm z*1^DX-~648x#bwidrFKtfFJ8(RF7DH9-YZYj5mhrSk|f@7a;8jDqi4eEsnu-g}*h$m=Ck2qh38i4b*mQRH|}-O7dVA z!{!^WQhuT9--tL69knpeW!v8vstdoc+;|YaP$~(dK1QhSdsl$XUcn5@bk)tMd1oJ7 z&_u(dM~{N=(6VkJ)g?X+!kF8(n8^XZo)1}TZ0KgUr?>VMo+1o(8L}U|ky2_I zlycUXCr!Q9>-AEyFCE;w%9nMGs;bbr&pk}H8z1b-^L9maKr+xAkw=a~zU8J&-64E>gRopH z|2Ez=h`92=fdk~2@A0X-bL0q(_`))qn&*@Yb?pOVDWm0eeJM&)9v5l&5GUa{A|5Zk;K0)o&zpD_ zVYuAEy?AQ@h zT3QC>kod|Sk!#2s{+%{ys? zJ0@Az2HV$yBTyL%VV@MSDx;lxcXq(cK$ka;-#_t3zv(5YGZU zLHKki{%VbB9cT$9m?t}YqlH|v5hdbuP~;7q*@(`O`5csrLy-nm zd;k7@xj;&M$ux&GRTA1jVaD<9ZA~J)*9nQi0L`?>IOrIC`KfIsZ~m%jnThpXSl!9b z_BZ{F=)KI)G+ctM`(h1_vmkA#RukPTywN+Rbq-cx>5gs~c%ZOvjgY(O=v$rHKqoig z3tcB7Etz~FtY^UAfzO#`QWHx50ec$#aVGL@w(7#*e)w)vD(>OTkg)|n^s^~dk&y*2 zU17Q?Vk99-@?Emm4*083(!@wc1J`&E{ipCF-@e3r;0pT}Qqu7ceyHB&7W_5^C2%&KMFF78X>sYSp0X z)vE`I@B(f%zoT}QNCfy;qgk$!B!rZYv6ET9A{CwQj23I8a=xXM@>mE9YtyDpP{W1| zgQ`@i5>%;DrJy=>>IAiI+m;fmcmu)_on30rlt~+hM{sLc)>{*&uAh*Vq3{Wn%@dtM zItb<&W%h*|Tlqv+pb|6jhwZP>s^Z931O6uDKm?}6BvkCzdE^STn2yfXu;ug3P0XXr zY|+-Q`|)~h&10m)7p&%ju^4aLH&Lrqz~FF73_6`oCY?%xBTj~RF#Qu7J3giwjdhvA zO-QNzmv2fjPjRs7zaCHFtp=Fw$oR{)@YYTY8eV`cl-y6RA?8x5k#4D}sZfEABss7@ zt-1aUv+Cx%tafCm2*CT4QiC*iyw>S-LYg*626psAa{;?QdRk5FW0tyI&CHg!JZcl{t}kMcTuSVM zC}!D#GR1nc8{GO8t23FFwb##L#@FA{@L?CtRu~Tv^j@gwuKz+x+Jv{gWLCo;d5or1 zaRPrRB@QW<3D~P->hmZ<%0y0z`En(bz)PN*7wFiUW9>vHo5l~bU_DNxPK25Uii}p1 zuG7@|N$5(ImrA9fMPj4|0u0<)>u4Fg%?b;?Z(-;D)(pWojVQjG1-$F1kjXSW%TpTg z*CR(=L~V)39h3Q`D#0JY?tqyT`}g`N?B9E1gYyNIbzPN-fXBfu^l`H*6CO;%h)rNT zdnqca3y(umg&SQ#H3ic(v3+pvzp;Q&P~}nF&?@=F4=LH?S}FLE=W@{EEDE(q&?2m% zFRvPMC~<#SCO^gn#DEOL_4Y<)-up+|#6EgX_yTMiM?BVwJz5|N!f)FS{Y!?@1;H`}ot6aIn(j=`6%X2cV@LI=f-HlY4YTB` znR%TQJ9fdSZb?Zn=!|te;e726BhvXF!!2Gn>#1$I4%nK*D@xW%uLzPogBJe<1;TNB zac7o_%x3aDsTmx6foil-Sus%m4pXDR@qtCDWul6^Pu=f2qZZd`lB8>onry zob?m3>L~o>&n&8E6Wlhnooh)|rmJ!hG}!+~JIY~9*kZ{uvxR4uF$^?Up`5;9lZC6u z9RVeb&j?;hX`@2w7y0`r{Ounsia`iYaP;Mf^Lj>23;2D4OS>W+!(p4V`oBcRs~xpy zL)gq7gMQ<6N-dY91xG~y33C0ALg9*E#3$HdL1M;h5e8G-F=q}x@Ozo0Uty10s@0_rbk?)d*-24Z>RD5jS(M+( zvYLutLDdB+G*63=TQJnMXx+L9Miq`ZvjusONyBr&&%Wcc9yYKGx6e5svMUO&Q`LH0 z1_6TXr!PBOkr0Xs)i+-|QK2fM+OAUWPcY!{P1R&J77PW!CV>g+fIuB&(*40Gi7lOt zV&{ral9$ccfv|KF`GDlBKSnJ?+cngI=>P z($RrpXF5&+Slh8j+)Io6Rkd_$mUS|9H@K%4`NJ^{vrMOAZwP)WQ24!ftqm9jE`^b( zF1{A!U@*xI=0Ptz=SPrLNyDWsUqXEa5mLC!X{FoS_+98AkCd)PNiq{VppgP z_8Ov;oFIPu` z_a$iCk)c@d>MX|!4n8mtCUga5mNdHjLF{UdROk_7gDSuCJD*c({Y3g&S>HM7u%*(% z8J0rc;DNQDg$-S`RfH8E30JB^(eW7!;OcmbIQo2PMz~i)!(IwO8P#OFCj*PdwO~cM zI^bfr_Aubkij#74gTVcS=elgJ5Jov)_l6+i75-}#+vkuP?1ZQ+p=Odyy|#h2zW7pj zJ_lK+g7#MjMV911DPik27~TWv(FP9cZ7RwL)8>u=wyOL@JKGiMDN*83xm6lgBQBqV zUAtpqX=`j-`SyXf6*5IbmYPPVt0RT6O*a&jb7iH{eY;TY39{L2BC!k-6toCXO0t3x z6G!pb_iQWe>{Hf61jQr@nGq;*k25gtpJAIX*D>Io^65Wl1E}R>S~A`LB~(O!F|o0+ zK{aaB2!cjQ?JfV0NOn%2K27#5-dX|wXufZ@?U);T8KsaX8_UnRZ(Sj%MuTA3Jgj$> zrUciX-e zzF93UrA~#aet6UZenq6{l0{s*$0I}C#D0#N8jMyF;`*4H6oq7R$8p<93;SfGE4Dkd%L+kbWzds0V%r<2_0o=~!sF?spQC|<$!vB-1$%ETd!^V^as7lWy~3?k?W$`= zg(Zz{G>15F>lmV=|G}+&^5h8>aW@?HV16^jUNeN<+5X#p z4BlRW;kUmhU6Cxuv`n_ALX5YYTMC}TFQFwnQNFsl+k*gQ@C%3qLBbcmX_s;Cf-Ufh zA2qh8t8g~$Kw=_^m+2qcu@NZiYQn=Q(9T(#x_zmRlrY79k5%dy$i!B9c;Zz11?jLQ zkkpE39BAkXE>w3DtW1fvTD58-$4wxA6!Z)Q%v=r_9#b8fWF`cayTtL5Wu)|!S@J+XuN5@l}e9JsOd6RvsoyE@>^n(~>hnD;udm1ciwDJjmWPE>y~WYY_c%*DQxb3i2mLzy5%+@d*i zhLr!f(?lqZqJ1YTdy;G306j_2@P*YJZwViv3=)-}bgKT1sqDae;~gp%u`lyD5?U=1 zZgluiw1=qgjz`vWq`KI(&3rn5b;q&2`@H6YZk$1`!snRMXzx`wjz)ujW1FXD z-KgK6_6W;^@=qG|f6a<;u8iaJ#*H5V=vb{o9UWYZos2S+>ALcVCa zLj!fQv;MHelsrj2`{^v?Znsh|v z)t6KedocQbuPdLx`CQRD91eQ+w=scIBGeNka@|d9%0z;)N*a0BxmK&JAZGrzNEUtX z&?~I{`b8(Pigf;Pw_}cUyRx9N%^PB%@835TP~eoKR1Z2S$m5|SdxL9B;-=hspossb zk&PL@e+gfE$uUm)K_ zY^P!VJD4g`#L@_3-%@o6h8J**TH&Sz3B{4#s;5;78w7nSMG`hmPFoN!4k zm!@dY9wW9QO2S+Bwy0$x}#Q{qB4a zBdT}B#l=DA&YhDzQgEh0s5k^&ym&Ea=gys!{G|tD4ds1qIKR-7^X;f~<$b!fDym|U zU(wNoRK0=Ee&AfFRr(i=_+86!qzA%0exN(@oJdbq9#iY1Y$fl^LKH=2rS@VQ3e(B_ zzAhw2VaNZ}36^n-;B>BQ_zrW(V}%la^QMr)8e-%q(k#>z`6p_4LP$u8hHhtAz`Z^a zg)pN|K^bRB(TEFj<{HAEDV=49q~PRM(7M5qD`oYXKZy#dVrJ8py|F7R2jfq zy%y3&d>H$~2$hH=*@8j!riH^*#?rc$amnhw_m4HN3;5xf;LUi;i6JIEtFWTJ=+)s| zGd-jh4yH*~qeKKxard=JG&~e^_2d)iU2@#CCaY0q?!u6rHnt^TnhafAitP#yXZ(I) z$Z2>$7sdC(X+djg5*sNvaiT*493HWk3#jg*cN3ta+@}-wvk~s^C(Ltf1 zp+O>tY0{)gP~*mpMT=Yx#~%J=X5H3)bryhk2@}zS&o{bCvGaBs_Ekh0^{Zik&l4Qi z8JDX-w1sIC5)uRl?3F^-im_1nz$qBO8+u$_L)afYHWre5OW}8`xf)2PCUHjn7?DgL zoB$zuF$9WGr>5(^Uer#I|HRcXR8U?1t&^hZW`GzQtz6a+Y-DYh+LiS6g$T(@k5*+? z4r+l@i6YXAo4O{5nILeSTt;>wujtZ+W~9#9cwws`-&=uBSSIZcDAmGISTm zYV+yXY}YM8{=|#y1ua&e!pgTRNA&rE`ZeP1~Ew>(Z#Oo zl0Fh?OFT`8@JKXFc%F0#iP!qh^^uO%cYVDB^DUyjm$;Cwe_XQR5Y*G zX<&q5N}_Ea#Tw}S+pf~GZYHC;8<}HNeOfoq$9T2BTx;Fz!yk(T_aNeq5klqt^xqRI zy0H}ZH7XtwofKMm>sLIr-zi#y1_)=FDXAkmal%!=k6YX_g=8?a7pdi?NLa{mT2z}J zBgfud!F~0G2DI8LBDJk-00~4ip%r9UsfW9ezgOA)riL{P@vR27`}}M*x7x_HHaMzN zz;9d790ARbHDk-7o!b9#C6k1xHZdASUl5}meqA;P#6xCg^mP!sQL47MpY7mofCft) zb*mJWL8r%Q(Hoc>jDmGL@}|14Q~!L?0ahlnGKBnxGeS^%?%<4ri;`Lz94WVDW@?qu3J1>=1E z)BmEOOQ+Did&`I*;q7Ve`7EK&TaB>3F!H-$B&tkztN9~(M;X18;Y+2HFT3&seuJ7K zY}<&jCB_#TF^w-Yz6e%4&ZNXb{E-MtM1)b0^`dZPV^oQxm`4uf_J|d2C}0WD4}z&Ak~(Q zM7_T7B+G&^(l51H^h$;^UW;}la{O+QW=R}X9&@BjjNOZ&?@60M;B@VXqpDhkB!S03 z_ABlvRwHXzYpQd_eT14N0vS49bIINjZMZSl+%shwC1urK<4dL)pSuc#tOSQM{~J%~ zKL}v)b1Cr+dnPZ|ct+{ig*@jZENM3X!RVPU(o3QFXGEn_wFLxQkWYVPfb-ny^~|H* z4I+d}sP*KJ!gIJQ(i3lGzMYY+)}yMS;qTS(%$D?{6ixJuXqA~2$7`v8B&};$U0}*; z4$r&&xC?g6NWpb|xxBcZXO5d`-#QxuNCVg7&Wl2^t)erPN|f}mX>P#pq+8wKA&WcV z>jVCDNfj4@;6u82)Rteg1A=)Mq_@_tM?@;}$?FZVKZEN6J*Jf35wp=cBYF-5Ys`lX z^!%w|KV6GD$NLQSER(Jn^BU-UDTrkp=M^-&JD9e!?(k&F9EX9LUbds^EAx;n8hgo} zsl-#W&B+x0WK6vn;jFu`ZyFTWJyAE z@gnW`-uMw;?+jHHN47^7N8pP%82O@~X6wzNJ7{IK(7URpV){zzIXa&e4Cc_tvucE@ zB%Sj?z%yw7kl^a6_s^0VG%WAMw$?lNN707y{~j^ZH?K>u!ymA z=x4W%4h`}2ke6&YF4^=Gxo5dN&|&icdL2{WBM*{Lq6H%(lj)ULL+8baBxcvHU83-U zZtW|QGDjXK6P@qA`;La~3>S2^s`RIaN9U`XyQXKT&BAKs7Nqg*gG1FCtCU%Tk;h9r@k|+vevfBSS~JS?TR%X;|4{ zc;*ShGk>O%%22&Bqwx7BAcuuI@}9FoMU6qHYSe^L;i=D0R6BL{79U0>^G*e!GZ=G) z%$~j;4HgQwtqN7$sytHJwZf$;2`!gWyJ>(u6(UBBZ|;H=+UpZ zb?})f>?0BPNz`2ukz-)5{7PrBzR zo9#$reZH`p1{%t6KTcdwA+TOhxhH&Sld!k#tZu7SqS3uCq0@!&L-ApKwfuOOu#F<4 zEO#2o^k_tAA=SE#Q~&J*j3eYo_maZCkacz!2;89N&6@{_q(uBJ2;F?QE5JLw5~dyt zcnyRltIerOJ!(BsJ1F3d`j70a%~Um6m|6d5!{w{G1!sB-1XK_W*qo6YiX@pEEg zVvtHR&!p2eY?-)iiFVSJ{N5X3=Y_DIds;{fU0K4r$jy;xn@5c5` z5e&bB;RMuEzhsnLHav!C$ zw#(#c?P8hC(Kla+ zs^mB{2C3!O-!y#0+OY4W_@&E2d?QkNYR#uI=2JrNxvf8C%q|%?TegS~b85@4z#+uJs(FOOaj(7J3>l7f%E0i*1=3u7;KnEW#?20g;7{ zojm0csYquMSsGanD6$qMn zEl`wW--Ry}-nBlV<0hL9AF*WeX}iM1bu2kDCL6P*(XuNcnJaOX3GCPijoNPbE3i;>RC$^o|x?F=Cs(q1>Cn>6F_S@k(xAQ$CIA@qe(1F2!tKHN(5X z$y#50cPc+S#M^`kyDZV5xvP+>4udr6{iT;EWYcE0!@X+ycxywXLD?l#6WhIl zB(t=48oni!0Sr(!jhNwmkJZ1f?Fyjtxqqhjvb6p<2<1B2K_$0-%)^@H)zGUu^zovX zHdoF@8I>(CqqvzviPi1-l-mwn?p2n159ryZ$!*36pPH?aZ$jWRzwqXii{NXDp9&G2 zzFUr~t!V5;$zY-A>4Bab>HM3(S9}3C$PRDu*0HlLf0^@vGK=@$=Y3VCL9kb4s=5CN zRyA^r`o#%Tfvvm&F@*zL06CmaTxj5kJ#qITg|OS5*eUi=BW}OuO#uh^&y^pG7Oe(? z^P8d0Ugf%1Eu}~YC~PHl9V#x1b)bFSLHnU4k$PMWMnkp?XDj&PmNO~C-v$FMr0!cM1OFmx@~7pK?B8g z8G2`xGnv2LC}LX(Yk&Ob`WW_oc)1~pklY3!#9iGK+CRe^8Su|kzY*OvLTXdI+Qj8i zJlT9tw+Pj=Ly1zKYepeKxsa0%j40LLx^=7U+G7nB*}%%d3slt9K3}4OL1H|5QA7hL`*wWS zS{UjgZ?-yO9AZ|ZQbad{7F~x+%*ZBjMi}CbEaXX_MWX#r zEiw+@R2aD)FFKjM-oAc9)<%U7{UTEM6r(!cm|-B=4tP*cE*o#t_DD3JiMzs~HEY%s zmpFpDbmVc!mQ$7W)KpQ)wWp{u$eXyhTa-?Rd!kpp_n(ksTBWGjdRAiH z^(FS}KS%}9vB$l&qH0+Ha>1`?EdB8sFlH12BSKIEHNOn{YkAYCC0bO$1zNPo3a&aX z*f5w-4@X3NK8JQf-GQHY>k~I%NvIzEBs8{C#9s z{_ORr1&X#^yLKXLOwfwzD`=7_Jo(MCuSs!P2Z2XD&=XT(@v7ZHI8PlXM_r|U4IxEi4ULow0!%n!&DWzX$n;li@jhI+1%Uj9_z@CJ;`|v^@HOj@- zPL0-JsHyNtXgodkX|%Kled$by3rxqesGjuc!6=o~LhC|;TIgI}Eqc1V_Q!GUPxx{4 z$ANkyP;X_6?>aR~F8e8G~{*Wb$*ZP33p^v@+m(Z zk<6n{AP9gEf{@|M-Ty{s^F+n>w~6^jIGmw!MAZiM!32?2N6?Qj&14(>5f z^L{=U^k-4?YJn&mgN$4{v%!mB5^iKD95qtg)xA-2Q*= z5VM!xTx!4$bWypD<(++^3T9l;7Vfs~BU~I&Ao?%0d>@Q5`p+7xl9)ZtcihHCTff!w zH)i=_(Sk1!q*0~okREQX-g@5l7B3rtZY__V!4v0~ty-BK`ZAk*Z>sP!KDiFsya!jC z6TiUP7oHV+r)s^p(G45Rf@1>ysgfq+AZP_$YoryQg-YE+fin&lAW!VVq&?Ctc-_bt ztQ0rf8K1q|cbi$0%+1q-f?|U@wr!pIh~nzvo)&IRF^-ElYYZgvWxnXB@4Qq7KsQj~ zEr{R%12=eMoGOBurvXehwGXpm25<<|)k`?I;ttydl8Fvn)u}WhZ+gy`9L!=TZheK% z`Q5if+J3AI(a;c(9*gH=d$8?gU`0nwnO&U90h+s*F88Dff|&u&_qkA3r=o7>WN`&t zl|_!+?yI)wQWX^%_g_H8KXOE?CSCLl%W0L@X?$Y`JhnJ?QrYEdvhVrk(ULm&vV6}D z`bMYoI@O~`YMA*><|Zil9fdEh6|MS9=v0O(7OWNh)>D4JOzH$sfEw6EMyuh>J2tS^C$&+2Fe9cozNF z^zHn|Sj6XOST|vrIy46a9TxIeEG%3#- z$I2F>Iewg!MYbBUE+g*4sEaYCVKKOtz-XKScZOoo;=#c>tH)KaR8)~WW7)b_i%vnm zn+s4YMBH*qUW@9}!%$sXEA<^bEx7-WXw^s}5J!o~Zt>i}*(u5rv<7u())Dn(>+E;#A4R>WhWFJYaFwhvQo^9OAT<%eYX;qz z1@g0?p0<#WTokiCgsrKjy;(O>-ob3Dbp z;Y!y{fMh3$1Wvvl5wE4gs|<6mR(}cWuFps9k5TnO-r*Rq4|g~|NJbf=e7wOIVlEh2 z=WQPbc=ziun^c!pmUf}0C&1$RO)joubNAzzmmF-|veP2PLVF%MJzFq@G#8QQ0=LUNnTXh@4=FGW2_t^BLZ$w;^^X14%6dO+ zuvUolZ$r6AE6LgpR5tFMHl=)IF#@g(4v6R@-=v563Uy7ekljTij%wLhNR=mScOPIy z{{1GFHDlIE%s&tQg$w`E2rXkGUk3A#O7RD==(43yaVBoUhv&wA?kS(Wn!LLuy7921 zs|?Qpkbf(<3voHW$iG^Vcg>Uv6u-fqr(Sd_?hp&%oMuN-dW;h5`3wZ-6_l;wz6i|+ zs_AA3{|1>LGsfas>B_D;Qh>pe2 zpTk|`!8e~MqB%`|Gf8wJt`Y2%W4eK>AGn7VrS-9+_TWy-$rujTJ}m#cBx zwXE~;THUz)M%;OqSWjMPGo5&KN*_AhF{W58l0Hx>L&W$qx@ zYN5-kC~K0fj!wMcq&JNi7j=}cji@j(g!LY3>n+yod2VUDNRz1>j!*i<$*hPNC5kD7 z<=Z83cSrKyL+VwS5hfFAUvGS{6P`MF#0z5{;1Tbf-HE4=AAZm4)v(!@8ZHsDba58> zwPiD56HV;e%bE!B57&jNZMPNqd$DQlb1LAeh^G>7pHumb0q%zLzj7C~?IG+?P{(%N L+J4z4YtsJ%J2K{- diff --git a/assets/logo/123D_logo_transparent_white.png b/assets/logo/123D_logo_transparent_white.png index e6365d14cf2f3db08c191ab55d40174b6fd02985..16aec842680f35dc3210d983a6f0e3c745216058 100644 GIT binary patch delta 11912 zcmZWv2UwIx6Ta`hz5BlFcibIt2X_<&K~cdH8!8wh_SmDw0+z&wjP^xhtOoY%7ivI-eS|JF6;?2*K zCr=bbMMa9smoIx$4?z3@)rbCMYmfb>A>*>DuYmhSHf`Ee5g8fzX2sjzTCG;mvSmxf zx^?RmH*ZqQZ<=DbbXBz~05?=Solaatg>4!qO+Kb4V#9_Fy{UU)+$)tsMLKPayMhch zgAVDMYymN*1L&`M7rSEZiHq@-(GyDE$R{{Hmd8S7@RM+Z^<>d|_Vo+h@(m5ZQd zC3Uuz?GzLgq;NQ>cc_0P?YLhR?DZ^#Lp9Wwtfbq86X7_wQo-7|BLXaMuKt6`!zf3o z%gf6Zj~~8RvT-NqQ^|`I$qB-*z~p9q$PdV#FXNmNldaJfo6_X5r|7yH`6m#T`t#KR{Jh; z;Iioyi4*wLs~%<{+vCok#kC((vkSj_Z;AS{>fc*kSf$$mx>Xh2GHO0<=^J9pcBvbZ zCEO@7Y!1{$;hzjNgfF zx0~)PGe-Lqa5#9q*Q0?)kJQ9OwgwLR0O_`cvZ;>s%B;0<1jXlQU4?QfFE^H|tH_m;adxN0HsT%c2-6)EG+B|#Y>kiafQQR6Cbt++e zgnS(jo(qy8S_Guz1BOu(-XVCCBc{5XUl~=D`DK>80sA%52xq`XOcYhu} ze5lyEb*ln@Gx^Sz%14zXuEyN=Va*kJkcDwEOQ1!)tc2FNVye#>p$msem*|ndWg?=N zryb`x54xF)XFl5DaPXm2DiweJ`KN*j`ua5;Ki{wo(w@nJAc~@rrub=R=6i~j^!zLN zs?t&i5)<%W`JS&uI$}&Pp-n~2*QB4-^E+yyzUmS(Jdh@)xGFLG*-6h6jjZ|+zx2`t zNBy(Bo1|Rp#vTNwci?Q&eamlFr2Ul$9y{eseTR}A)~5B zjR`)I?^!QoKM5eh33|lk2%u%t48&3cC)UKle#hd7fyCuHt3c!R8d>N>NL9eK(K@H6ih3NG|UFIPL-d=hupXzwoE(l`Z`gicm$X=80Vt3WDEM$I$>seH``R z{v4GgXzciRA&!1IDKr2njlqyMQvhX@<{5rEopZdZMu5zf@+_2Ow?J#w)ZEQa9Z7`Y zH0-E)iRN!y%2@7GA=dypqJ@sAzQeELX;CfxG_NOANT)}y1e~H}vOj_C^)v$nYMW}D zMU!s(qdkNwsrfw$xe;2k0PA(C zHU!kwv^NF1^dsG5v;?8QHx-~^4Q-}Cm-_`yhRQ)wu&B9>!VJL?M~6?8OH@Yub0aJ| zq3(kE9jijBgG!w7)MLX?@liePJlxU;uG|e8G~gGX-7faS0LIy~XBEZ8#fs<8sW41; z5NX6W0+e1WlD>3-Ta9d1FjN%QqIF-Y~Dpjf|+O};=%T5`i8IYRMNtXpy>du zsOI9fl@U!OSb{k`i(9Wn$d98v+UKmg@>}n{&g-$$(0MjOPkyIOCAR)q z3-R2CGmjBY7Y@NwDoxiiq=KqM+Dg5i?z~IF=?`_C7#TEl?M4%Uv2(R2B~m+id2>A7 z2NF0yD?0_F)kfiTaIGRz7U-;gm2I`HjzB(*PSc~g^Mt;MB9l4p?CL* zWC4!Q6I~f7l40LL?IM5jNa*_o48N*1yggta<%P}AF9{|Kj|wBNYZ-Ro)M99H4~7FX zKwqvMsv;lO{^=VOG8&c=U5MYm+2cfl6MfUS1n7R#lM#+;46)avvF-JQ&fH>2rz4W> z*SwzEw6vvx(1bJQbXuxJac@WJCdiClW;`nN{uyhO`qZgYibs#A(8R&vYTsC0tc}#1 zTg)&*1H4Xy1gO(O$6<|6NzHazqbTQMQM!qXBDd1{L}{V3#n8~dT^M$w+_6laMJc-H zN>VxZz&%(nK)2rZR+d>0k;);HyFS8LNcvWntR+=EC(CiuZ&>uz2~cyA?iivNLK#)g z=3jr&okuj=;4ubP3+cKnxi3yX0~ka7*|}tAz|YV;Znp1PLGau)n$9tGV1gF zN$;kw4&c&6nrw6w(M2n)F>h&~iqvy8Gd9H4}s;PdoiafshcLFYM z4UIq0-?A{>y-R?0_`*WK)m?uK5QUcPMIGL$rYvTMpxDfYF^WyG?L)=PHrOfwq6@U&kP=PF0bX^4x9$1>; zm#YK@n$qcql5KLnClGG{(NCsD4nO>~RDdU68sZ7<65+_CnavSrCFL|lKvcn&IMxs$ zP;ZIqMD{K8HWNx@bjX2lJdJTHii+O(soh10O*hmN$ctO&H!!to$|LTFJnCjqC}>!a z%LNHPgq;xKz!ZZ`BMhN&4-8qfXc5AC#9gn@In}R7{PTY@gbDoAF3CD%t+D{)XBl3q z;P_#~O%}Hp-|5g{s$ro(o%L;8gg)f-T+3NJpJOQ21I&fmAgFfj+Po|y%Q6eYiaU4iDE9B)?<>iiprcyoE-|Ig zSz(M(iI(>^(T+{YANd}WigqZ+)MHD5F;gkbqvP#z4h`Lm;yu}DJR#Cloo>4DKIAPn znw2ym3u`Mww;B0`=exFR*G>@^7pHi$E8E#_w<}t=ZcQIFabWxmghoZir7E&5^=nqr zm;%H07{_y$#Pi5qWyWANR&^AZj3qV<+DGlZG%w+gGe)DHknnyD;r=z_66TvkEEF-4 zVLoVZc(@+p${d*T!1%e1EN@f$Hzb~vwKlFR0|W=4zhv@-D(_=ml*ljmUWOjxy`F6l zt26n4LL-AdhK}FL0t>vVr3OM(bq$rho&|Jm znu=+YA|V1_#hYep$hj}z4_r4D5hKew>V&GNL-m%X=ZtqcwuwGQ{rBI0D=u819T%wD zc|D`R-Np3ELM+Q3zr;!%WLKI@muPnP{D7XW@t@ zPc)T~;nKcWD0CATJjs;DaSvN59yxO4?akKBH$};0N5Jc@%%{8%8RLYe{pHG}TkqSa zdp+mr!gz!VAK%i=V(_cqFQ%Rx7WgfCyW4a?0P#9$6MKD%IUB9o_)!z;m>>*f(yI3>QyBp2RCGzW8Dm{4fhp(T?WS1%vR}-)jqtiFY_k`h5EyZwS+MyqbG3$sXP$NDbfX+HAKee-pyDYd>YRMFZ zUNBs1^eRc8PpWPM(+$g94wD$nmr!P(+(%*BpO(8e)Z2Mlden?btmqdeU|K1p)h`9LUUThU7=FWV zu+8f^2L&(vc5BHGyP9;tjd!yP*n@UBf$8B~*?OHFj6s(lMZdLW_)fHX4 zcJn3(bxy@s2 z3l*_HLp@FOaqQM~?`wS@p?>=>t1tzO7a?@0^$AOfM1*wIJ0~EF{f%+Bf;t7&*Ve@* zQnO*$dPbgC&nPj=OPK22=)ezQk*aA=YdScmTUT*hRQ|P9O%GkPhl@I$jy5syopTB@ ztpViGf_N>G*}|^|3ztzWB;u8v4!u`ef8pVYnJ5jh_(xHUsrau9c(v2IOGU05UtYtt z$Ka>E)^P65(ci`RE`HLlI=rdVA?K{MvVk-lba^%$zG3w;|M-uii=dsq(P7Y=2Av;S z({03&*m({TWR5149qwd$-xCl|L%U;hV!^VYKX5Vaq_tr(M8ty!53*1gP4kjW^dX{)Ep3skGsD7j49!}^V2=b(yV^uQBE`A{^Y?F=x%pBVNB;Zhpms=F8PyIGlf^Q=1tpCCgzM(OUys1Vj(=bWy@nm zfQw_^uA8Z}FHw=;Xyuo_+sM}XH(DUgfdy;h^aAW|Wxs?TjnEnn+?`!*iY0}u z^uk!X40GGsxq2z5burF`h(l*Xxv()d5VJbc>d$R0pzk1iCb|wnCkC1;NMHd74Q4W2 z9BZGfCGDGRZG}4mgio=j@soa&p+>P95I6Uk+4hSTLM&fiMll9s!)B%32j}?Bz(g9S zGUj~1H3J&2v2&%VfB*e=#gix0@<@d>q#ed0dalI2lx+KC$4|IrGF;hf&*w+}4;_}X z(qTD4g^*rqmt^8Tym>lg+_bObksB)zQ8e|bo!tP6l{X3MKeP8WVY*g#H&W~hSeq?p zXU&Tasb9FmBvxhQ0c|&2w)pS#CoSuxOvkx(>G>#sPKVWUlxNSQg0{iV_ns>RnIS`_Me8;Q!0FD^yGVfS89dV<%+V?c7-htF75JXb#y;? zmg0Y&s8(PU?C4D`;86>^3D|~cirsTDAuj@U4DvsL?nqDHc*l)iPZgLj%Krl!*;(bs z%EShRR3lrUe5{ms#Cx+e{wQqFLQkz=E=89xCV1EQ>?e+~aQq&WhwUIOOI#|eSI_XS)- zZOYsmApsXBpsZh?9<42HgOY;*TLNh5gaG3GB2>iY5goq6xD5tg54f%*^0@RJ@Zqh1 z<9PdI+B+4mI=0mpcs(vKmIt)b5|5e7-wpCLfg!wXQuGvE$LUX;l{rn?yT5Bau7#q2 zz$F$^R_SEl0!+aLnjC?sWKGq;Lnt2+IW&*1pdAXsVgr3w@A~!Y3SY%0QjHyD1$M0y zYe`$EA>VHx>6+H9CCa)0{8|Tc!>0Nzj+nPZHl_p?OJqKcoD6A$10V3Kx8ay0M#NLW z_ZY4XjIHEv1FM+Gsae84q%;8TPYJAvu1%)cLXLUcPcQI#RA9^soaj$(CAEHmGe=R~ zsz844%0*|4nw@c27=2Lg+yYy31M``$^)Yi~s0p3VH28RTU~?78-oAN0t{xAS_XR%S z&i06ZAYqMXR^2)i#^z=KcCwb}Nsp1Xmayhlpp{ov-|z#B>HNO})i&a8wwytek!apC zn0cSz*)#0dIarI5P)3@r_{7PRYdSv9UI=}Sj&+>3H}310CY|Ff zz1ZTwhYsBre)2IgpA7x?IXdxx_TeO)iRoq`ol&BaV8wAqe}D3{{=h9@y6x!9wTd}V z1+ftt(^lws*Wt6&2M-=#ABef%NQ${RKlFRV8I4Kv@6TrAHbY^O)>)0W2sjNNewYsP zT+T^Y<-N0~7EbyAhE;XmWBBS#QWnK=X6z;@4$X*WmLN=S_nz~zm7IwnpqFK+_>kKRXh6gVVxj(VvLo?(eZ9nM`QxacZ?>+L_iJkda?_6AxZab&5>u zGE3NZ?b^lNf1t!3FqM>)cvE-7y;;tI7E=GA{28vgK|Ou71e)bKhcNsV!m6R`*RNOH zzRfyxz8|a*v*4##vt|u7R@D^aJeZa5?0|*064t6?6zJ7i{`Gn1U5KTrOEeR_o<20W zkD3t72rg1fj%Kfsq|uj_+a!A&st7V}-lR(uLHN3_&44Vwtn8 zknb@l)wk>VDIzK}=!XgPIJsP*IBb1yQ*Bt}2pWwI)1!zw+kP3m8ttQ{st5ACm<2&! zxKee*&s*|S1C+eSc$IcoC1qq^abN)!gfvSVfjT@wJtgLqL0wB2MLcJ zJ*p7&3|zbHoFtJ+x^3SVq$T%J3Mtvk{nju~H{x?FB~JO%c@djRB~WukP`Z*dy5B4c zr(F_Y*6N`1CfsVKuOlk7!9-cN#Op~Udc8gy4!^K`h+_A`=r7Xz^P1;!r5W?DAf{Nr zD#oJv*$eu^lc00h-GksgK?8`8Lz_G`G?GxHj7{pcsPFMGZ>Hlk(n$A7H!|Bbm(^zF zob`v*zDkg4E!P0*bO}5@9JE?#XO98C!ohmw_i${$F_3ngWXZ+?hw3t-#Z$4&+DxIghsPzwwB!D&JSC_D{2=68^Rc`+#| ziG@?U%-)M&hk)M(u)V-ZQG*8$ZZfCa{x`1zD$0&4(1QktvEog0QTsxpRv6mdQb;1T zRMqT4WFljR-osK4R^+E<{oC}+wCorr@atHC7^vq0nphtY8W;nxX3~YQMm`;1Ie4{d z8U8hzy~sF6kAw#iVJ5}4_Zn^nLRF@<5@ve{McAVwdj!s|jD!7-!NHzmL_lg`P$gd_ zUhpt8=1&0T3U70~`fw{W_!m4>y_k@Yz%<|xvE@vYe{pTX+e_sj=QUQHJ<=8Z^tj5w z1J&eSTQy7WYAE^B4{%fujx?fqsu6V|oT7_1+1y&@q;hg{DjxK0pq|fyUjeFXuYq*B zer*q0QzIYbsnsTUBMajqKeRB8>(#Yt)k23vFFg#zvmXn`y97u1lixbLE`;(S!P&?x zkyxR;UcGwW)G*#QRkO`}9oG$joB3iVD?fR^sO+n9|Mnj>y87?IH7WQuA+x^p8;D#a zLfdJJxrxyFgQU50WH3a8a z$(YwWFW~e^%;&8Zm=)YQ34>y_(OmM zql-ryc@OBQ z$sJ9Rp&T-7z>T4-ZX}x;eDDzro#wJ@*i)qA%TXMoMs4$h@e)X2dg_{NCDT?OWzcg9I`NfE!J)pcEq770^joY5RyVpAA&>Ig zN1ajcFuKL;_A!c23-3-pjbZkvz;>{C5gti2v zX6_XlGH#yw2E$JYB0OsEKE_QFpG?I3wBE(b0PB0YB@5X<|F2xwG1R>Vo#eYLWn`Jr zxYMEM$O?wGG(jxR(h8dKQWb?Ug5>NE%Ro`%L7ML7)i;i^mA*%K8)4;AcU6gKe{{`Z zTpU%Qk8|&0T&VGpZpF)E#8x%bh0txAdy0|7L^i+3ruyNVu3>Hh&knhroGS#&Q6fh( zo1y%7_gy==IpaU9yAT6GcikKv^NZ;#`?`7cR{6H^lM1WvxtWQ|BH|Z@I9zIkE)~~! zC$yD9atTSjFscwKI>4q6DQ6}S#Nce>#*O)#ZS<1-hecp4Fw@YYAUty$ybcLb>4~!I z!PR&u_>zs=th;*1awex4>w3Ic$zu2Y`}cXUI07r`hkR-!@gw*3!2P`J{2FdX4HD&eRM>FpzGEU_Iu+pWRt8yTk=8?8QQ-#o8b@`V2aJW;;u delta 11512 zcmZvC30RcX_y7COduQHvHfDy|7?>GAP;p~1Ok7eh_Z`hO7u+(J)G5DZr7{|snk6H~ zvD92}LtGe9FDNE1h?c3SrIriglA7VZrt&}c9W47lzj>Zp4s-9h=iKF-&-t8t;1ci$ zuz5513`K^JLAEM|?+g};g|pl3oKmUe?%lfwJ%+0G_`G4X>O1A6)s%DSd9pI4$;1TGb?TJQQ$jCZR<5^;(b3VI)oS&5YAfR6z*8zE`Mh7z>Ccr8CFwA5LO-0n z4egns&gN;jUX{p0&rUi*^OUN&1{mUS#S2EG5h7k_>iN7tV{EEVb)@^7wS72j1??E6 z%GP|7?;;B9v*L^t3*QyHi|LVA)kQN&IH?|vy`l>86OQq8O}O_is>dBW4_?%^tx6^4B7Oagx^-A-CKMP<;C@9J*& z425@Knuqa_IZa7uLWZi1PL^jdz{MZcu`ptVqoL262`G-x(LU996WQG4$~_$THW*#_ zx7Tx2l}eGVV?siL&yxW0*%E=G*^U^WcRtmwM+H@N>(=FJ%v0UEbÐ9#WSnyZO9h zVD(;AbF!!W_%4)qg!=cXa&2&7rUnNa|0}86@2c-b65~&8LT^>7EJiSX?@WYaD@_DK zCb7vf>Aia@Vj#6T-@J;#y3*To9a|My`3ZzpE5%L9|4ZyVx>P5=V8HfrS2Hd2!OUZZvNLK>e(!1z#4 z=-v+EuX-USCI(WDI&1s9I$54jQBmRZ6w)vIikUjn+wo{Q4Id%8wW30yc)5tL2%kqA zO%$J+$*sz`38-Tow5S2wv#)tLW(_7Z=yUNL|DQ>IXeI9%{rw9u*Z!(h+?t(%tM%z} zu~0$Wmp;)Kq^ITbv|^Rm%`DWbSMQ~!;?0{kxf(?yio|dYnf!i(`EqA4mO2T&yr1J?`ITT?!oM|DFLbVhCIs{a3$Te3qUbV#CPGLSX8ql?+zjS%Rs?WN|*m!icRH*Rp1 zm6dGAgl^Il8;O}ScR5WTAvNbAuD1bChK`-oopjwOX}w(_gh2O3<^=Kj_3JOkwVN)r zQIg1S5`IMe-)5UPo2L^#mloiaUuRAX48N5vtw+g9TLF`*+mDA0a zq<2l|j27`wGRRCI@sc4)o)DIg7cauGM-mY@j)T;tp{3&8yLY)AJ9fbCElQWXhTPtkuVOxytCu$OOb{<;l)DnGb zkaY$hSPAJ|NK5*yLA{NSx^(FhmzS5v{r>y!+~0ryje`Oo40PPVg9o|%{Cp_yrZ1xP zgVi0B{>hUGWS&$JxE`k7z&C=FVXoFb?*^#o=O)VLICMJj&4Tjy#&8>?3lP@LvIt7F zX2Q3D8B>CHD%W6_?vQdVXmR$<5JEp77<>HQ@qE2I_`^PI3$ZbiUgvy6BCvM^*QssF zTO1dJe`}0w9Q<`xd(G#qD_QYdF*ew9)>!28?q_FeAx{-C>LUe%4M3$>^l#fvISjgWP z{Dsdu2`07I67^^JrybZ11Im<>mRdP|;q~LbeSyxiwb^Mnt@Ih0#u@lWX0&HK*6>QJH@~xQN`b zW5>9|hYxe$D^nh(ts@bR2x@LJy*OK};7Rj#BeM(A)4V)P(qe4RtF1Bef~hH`zJ{)C zxY1dP>{E!6Gz~PDdHx7n=`4N(Y+4yaY4cc3CJ!Ut3!BX+gQvarvftZOp}&E%kHrSq zG9luiejgK{+>T#c(B=C>JMwsAaGh{Q>4&hE;I&|Q>DN@V6cNoM7ZEyl?##l2mHAR4 zzV_N{Tyk=<&vTgm(M&U03A0YBll)#Ift)ur^T>Dge%?r5YpZ#ehuP;HnUHx{Mfeb( zrvT6D?0}b=*g->hun4-@{Oc5NaFhVyT{RNl8xr;z_hlO;!c&^Q*WQz-$=x*r1Q9=M zO5hWWkus==jD$PYp$F(!y)>IlWMFXh8rpiKMoirAY=F-OFG`V&r1S*`N zjRc#`#sp#LS_~0OEd+l3OyePii{k;eeI$650gSm!sbD zb>!v0KINW11%AAC2d%YD^M-*; zn6lv_3pcd{`gxhAmr@lM7gr-cR*=I@G5zsp%?ca2QP-JDCtlX*5u@Ejr+d*Enwb5X zmB4Gk>Yen1tC}@-kqwqTcOy77z>N_xi|VR1d1ek=m^mHYACK#$wvx6K$G?1fQ(Squ7sG{M7O^ZkG1ARz%SG((+$6p_Kaz zw7X5@*I!%aW7jhjqoGY%vu=@rSlu;OwVN)@W0U`J27~XBQWsk=czwTi4zVxZaT^Ct zrL9Y~h(ymt8t^v>K05D;XN!8_to9~@7WE1W($g;U5WhuDD7~uvO%H9=0r9dH84t9* z4P?}#+FelCbcRGPD`3)A?3^O$o|1p$Hrg1F+c-$)w~^8@8*5|#6i5ql6vI+P26FOJ zRRx~Y&LVoMjt+Hh(EGnCO>V9GR7rmD6@80CJJUDX>8_g~Y^mHyb@uF8u4Xv&eBVT< ztH18NjtI@G5EPHp9oE2m83AztO)}~3NxF^}lF;kO-zctSR(^t<9|YmiE)xk|K1+9o z|3^9tbGL89jA};_9Y2RDSF-I@_N5S!a?;sR&NLMAPm}{<62l!o> zro2@DtrNOaCi0Ej&=1%^pJTR z@6VRzhM0c@(wj~6BMc;?W7W6pzJDc9yiq?A)uBU&FpxsZJh4{xcmrW`Fm&YWuK&t{ z7pEmp2aeS9Jb5d|8x2E8>F)<}hn<+pGDHRW`JuQr>~=IAtyl6;tH1k#-#b-7VVoX# zNIez2!S8J=!0j*f8PG$tAm9nX150$Xp~Ml4-&XPT;s5k4iElDG=eD@Kuie(n^LhUl zm|U2qug$~o2g2cK1|iq4U(cOBeVTjx_%TKcj$?82>eZ`UQBe{1!w)~m5o{+tk8CRuozyY4X>RCj~ir9h;*!s$p`DI^E~BLdgpB zYxP6)&skJWs!62%-{>QGm|>Mp8W^T5DJhXvU_cvJ;xL`DNdHX9=hB3!VSftD!VKDT zu6{WWp-C3m*dc^JJKc!ebQ~Ap4kjYq*6Nc9zr*L5uk3=d!Wg!EhIZqOGC-*4rAxxI zY$h~&lfD$e?iS@ww2ogtz(Vfz9&!b(X^Xuv(N8`Sh8O98e()yQ$JiKsU^bg&q*PT^ zg^BKY?*4u7PYC~&cKtBuQv><_*P*329+*!E6(C|N-YV~$pjjlkL4yWdvu4dWv`e<{ z<$scrlDMNskNyJ`j?yPz2d!pjB$}stR|T1PlByXVhC)LKUWply5TPm*S-5)RjW+

      7@O+q#5DgewB61NzFW(o^kR?;a zKChOA^OipEE*h^jpc8L*HY5=$)3hjx&||d>uL-37kWD28>1mC5+N`btJ<>}N@rpn) zx(D*pm28y&MG*R7OT)Kna_y}Tx8b0;O4m^Elv|%iPCHq1Wg=Lu^rN?!Klxi=suDaoaYzVO@1p!Q1BQW(ak%0y z$L`+ty|GA1BJ6vc;?Om0=TuY_jh#&bN`c!s?u3Bbd^FXHAOdJdhcQe~PEA}Kg^QP< zpo?OR!=?YqET&Zn#u*k;cW^9AfqUfSCt!s72@N>V!H5xR`Ntsy?&P@R1w>wu&+N_a zZ2XUA$Gho^Y5WLd7lG7y=KLQ`f5$l7A`eu!-H_5)jD_4T@_x|MpBgu7$l&KY*v-vj z29yQ-#K=C{MfBU*#)leM`M$fxWLfZM3ysELGBo5;2ebhL-`%ywHh4%;ErrVkp2p^X zr8n5f6l3EN)#3gxgwj&uZ{(Yto(7yQo}M^h%(1B$XZmtv@6do=@0~ZclF0s+<%{X& ztHw!&z#6UQ1HalH8CMuci}w9TpqPB-a$|&7&>Y4qjIo39GToLP@ok;y44-XB1)WYX z)w9G3jCb%;3j$=IB?gmVC#RH?K9QFI?+a=w1r!E4CdPzEnWcs`DykT7@$xb#QcOQ9 z$=4f<({OAV{jP&4)C9xJq!6Fi32z^>urp8d2AJaYWZJncb?Jq7Ow%<`|Aie-g9PMy zpaa4eu#ltMhW>z3dZF4Q=<+X2;}nWpw_rtLXsoKdyd0Bm@I;&QeO^6%G1r9DEz7)- zHbMGBc=YJeKjuJ=X^=pgf48z*L3)~+-@eY&#|$O)Yy?UkOVRSb6tBoiE;jAfkZDU6 zZNcuHc-r-#>57CZR8Z4#Q*E9g(URu#?3Y=N;^JZ!mK-ws!jgW(U_MWggnb?$C*bpW z9xKN$KHGqYG_9_=-y4aWvI>KMVYluB-se503{eaBNBTYekSRkqL$6nuG6gz(pkaZ6 zY)<{@3u^eov_u)jA`2tE81i?!-JC|FVMGyk<;oTA`0?W$5^FxsF8bwPrkjYQrzy`u zMY$2PIUzWbXJKNP&vQ)0R%icySfR8dqrFsS2BCo}_$E1Tm^K5K*$VMuFd>R+u9wfV zl!dGk1^q@7+)qKbxNL7>j(;EVeQ>-7)8Ky-uT4t{Zl~^Zj8{A zt6wX6F;BBapl9j_4=l8?BE;hO*56q&YtY&vG>3M3(QrzeXJbe&jZcOB~6Jsd`Ir#<)GSOW~ zgJR9w?J5@X19@Q`sC>^-3>(^;FOjP=P0TUXqT4!{3vKc+-}^D}8@24F{fC<0l*ri0 zYs%Sr%n)eAIP*iToK(wLRMn_aqkrsCPBE`EkQLuHW_6KTGTjBlDCMxtaHG>cg0Zjl zKywBSoo8MzC<5>{7UHj4$iKPojI{-sW;@wa)Ncz)+0ODU0tZ?-q8WENZMF_?U?v@$ zYu;#=L8cr8P1;yU54G;5&-a-3D#_jp3)|t?Q21XyS(VAtCpYvly7_dRolMOgMQDrrL5F=>Sx<$|QllpXhyw7|T?>Gl3mq+WYRGylA7LTj6{0(fKJH@?g305N z{gY_*XiI^T>5218ukhzUS5L5Df>Cr_{-vg90MO=7m=a$Kqe}}dGYur~!1~K5tvwxc&{8D^ zti9IX6#q)qrHFPew_LQ5J0Fi~2yxde<OS)Qd z`D9U(h8yES+t^i1UmssC9HDu(Rm1A)RN^8xr zrt2`ZN)*sv1aw%KZM|S3FXBO+y{tQJ))p9)~7I&#Kn+GA#f6s+h=F_|5N0w8x;Pv;Qf~9+&2p}H;lc$Z z6{TISn zC)?Ut$oD@rWIpy?bR+4^jclT6b8T(P7zshl2w}O@)Kree4EBEpX^{}zML+CiUu`4P z()*mou{~kp82f3+U+5z4e*OA!jT<+XtJ91%V2F!-@87>agSuGLD5AB-*?+JzkfdtY zu3e4Ww}~p1ibXM4)<;D3vY;f(DyJXC8TN667=_=M@AlE+IrifwfhA>(zm)58jLbx! zT}VAk?0a=2yR#!0yVi%yu9(DP&1@YH6;p#VYBKiI3hLQx-%DiNj5VfuuOv%FvJYiR zkA1s6#!Q-rZK;c*CgIj5W7d0u5c=px`xQR>GkZLw)N$;jWk1_@*nQp@xxfxptsI4P z)K&Xc1^J}&)*#yBy8VQu=D0P6>)WOlKHH6zXHsd5%APT1hTn)$PrRQKT2{=7$9Ni)A8{k=!P;DGCEFz z`74kE)BA%*k!r_@);QfV8r(U=FP~=Q;-ng#=F{!NLP|w)sztZWG<{6S9YYPFCqd93 zjEu^pX`h7DHIRD?;(MT!k@VJ_khqtOo&%J~3K^;hgvpOmv%`jghNny(>u zC6S>$KhWH{4vckI_NWq}Ngqow?Xl8`Poj$RT>{#foIUp=Hce4ee4S%xTY z-3iAC8#xhm)JY9D9KHGf02R3^;GM0*jhs(YZ#wX_9bF%oiQy%*POoL4b^B!#dqr);anT# zvJ`zbH&oCIfBp3rBRa7htifU{QkDB?)uPZ*CUSSTA`UzM0p6h+Lfhnpj#Y}RDvU%W zBR{QjLUZE82~Mu#meNK3(4LHi@&>9!e1EvOK%eFFrV5zmfN5dK9u~2}Rf@E3ZeGIW+mKCo_jR*;@HM8OZsUK6~To$E;x zkXoDbSLlTz*nh!_OCZv?bzvC_^0*&*?DVvYJYBRQ?5rVBh2^I~!U@tsrYe)5M=#{q zyqzR;O4&I12ap=2Iw{Azu=x0RE;2Hb!=G4<_MZSPLs)!$g`baEDfzKB>Z|WBou4#p z85Sw&#lgBhf=2b^Z|-ts6c-&?bA|>`=Yg=1gkKFq2O3(DX2lIS+?W~45?rC~6xX#M zUfG|+2;UBd)d^e7s8C5c3V&SZ^ZXrH@)fY6rLKj~JD%P@8n!{9kHcTv*R+-}!oP@iq1`!b03La2T|3sTi)7b9BQ z3MymyeeTI~_5$rV;j8(XjGgXB;P!Ds_-zn>TQeU+FZG^xT*IV4BfA-vrT1-gu46?{ zp^;3=;{nh$H+h4k?E&JH>fU#HB4y}T6ioOd-f+>@>*!}yX; zq0Y5tvUvIzUtya6b50(Ry_T@p{uWw83^4+GD207Nxc9wzlD}Hb|G9~WhzC0 zpKHum!)2sZ!f?j;M9>eqxVG5;zu|?{G}6^sB*Uu?R@0&5Toa9e9CDI8zx>dG?wRUZ z6GXDRl-{S~zI0jDjH*tQX%9xjQabS)7qOA+w?1h@Kgx4$;RVc5AalDDL$Qu2KxxP( zoEQ4`(n1~ zz}@7Hb-;ya^R6+(}UJ z39@mGg4`?ck8Gab)@u_P{Z8pFqZ?I}1!FlOp3(k0>5zKvb_!Da$(50~{1FOT*2s-W z>#OZf4A?rjdmqP%ZQSeBWbIFblnm-53-n%h_i+t$dZEE3!sn_A%#l409b0L^K(}Nf zS4w=-=&kX4pth2QaspLBy#}9op5Biu2st?f;CH zqXX1_p}|WrkLIp&*B41q)906%HswmX&hOr<3DgrCjc~COqX@iJ*iQTIa?c4O#IfoS z>l-XU=6E-uPmZ`lRdUHy#ll~JbJ|X;e{tWm3OmB@A-B_rhTU|p=0z6qWh%0EpdJvl z0H3dIsB-)5B72>Nr9<{0DpD1oEm;j=5A7<3=MmCi<>PZ`i-THp;nl%WtSpq0lEU8M zqCflh!%{_jtjm>^l`-1&Abk-YuGEpN%zh@^4|I9#ffaQ(3SXmP_aKJy(? z)Kuq=&~-`Sqs`>p)OY9Lq&{}FPora&zR2hvjx7RPB(_@EqOe8NjPA8#b1aE>{Wp(% TKNb^Rj_cH}d)rNI(q{fY(4AE? diff --git a/assets/logo/svg/123D_logo_transparent_black.svg b/assets/logo/svg/123D_logo_transparent_black.svg index f2dacef0..2c07cc5c 100644 --- a/assets/logo/svg/123D_logo_transparent_black.svg +++ b/assets/logo/svg/123D_logo_transparent_black.svg @@ -10,9 +10,9 @@ xml:space="preserve" sodipodi:docname="123D_logo_transparent_black.svg" inkscape:version="1.4.2 (1:1.4.2+202505120738+ebf0e940d0)" - inkscape:export-filename="123D_logo_transparent_black.png" - inkscape:export-xdpi="150" - inkscape:export-ydpi="150" + inkscape:export-filename="../123D_logo_transparent_black.png" + inkscape:export-xdpi="200" + inkscape:export-ydpi="200" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns="http://www.w3.org/2000/svg" @@ -27,14 +27,14 @@ inkscape:deskcolor="#d1d1d1" inkscape:document-units="mm" inkscape:zoom="1" - inkscape:cx="286" - inkscape:cy="70" + inkscape:cx="255" + inkscape:cy="250" inkscape:window-width="2560" inkscape:window-height="1313" inkscape:window-x="2560" inkscape:window-y="27" inkscape:window-maximized="1" - inkscape:current-layer="layer1" + inkscape:current-layer="layer4" showguides="false" /> Date: Mon, 20 Oct 2025 21:54:51 +0200 Subject: [PATCH 104/145] Fix intersections in OpenDRIVE, tested for CARLA (#59) --- notebooks/bev_matplotlib.ipynb | 160 +++++++++++++----- .../opendrive/opendrive_map_conversion.py | 74 +++++--- .../map_utils/opendrive/utils/lane_helper.py | 36 ++-- .../map_utils/road_edge/road_edge_2d_utils.py | 19 ++- .../map_utils/road_edge/road_edge_3d_utils.py | 53 +++++- .../datatypes/maps/gpkg/gpkg_map_objects.py | 5 +- .../visualization/viser/viser_viewer.py | 56 +++--- 7 files changed, 287 insertions(+), 116 deletions(-) diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index 7fb87a18..13eb3f5f 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -38,10 +38,10 @@ "source": [ "\n", "# splits = [\"wopd_val\"]\n", - "# splits = [\"carla\"]\n", + "splits = [\"carla_test\"]\n", "# splits = [\"nuplan-mini_test\"]\n", "# splits = [\"av2-sensor-mini_train\"]\n", - "splits = [\"pandaset_train\"]\n", + "# splits = [\"pandaset_train\"]\n", "# log_names = None\n", "\n", "\n", @@ -159,13 +159,13 @@ " route_lane_group_ids: Optional[List[int]] = None,\n", ") -> None:\n", " layers: List[MapLayer] = [\n", - " MapLayer.LANE,\n", - " MapLayer.LANE_GROUP,\n", - " MapLayer.GENERIC_DRIVABLE,\n", - " MapLayer.CARPARK,\n", - " MapLayer.CROSSWALK,\n", + " # MapLayer.LANE,\n", + " # MapLayer.LANE_GROUP,\n", + " # MapLayer.GENERIC_DRIVABLE,\n", + " # MapLayer.CARPARK,\n", + " # MapLayer.CROSSWALK,\n", " MapLayer.INTERSECTION,\n", - " MapLayer.WALKWAY,\n", + " # MapLayer.WALKWAY,\n", " MapLayer.ROAD_EDGE,\n", " # MapLayer.ROAD_LINE,\n", " ]\n", @@ -186,6 +186,7 @@ " MapLayer.WALKWAY,\n", " ]:\n", " add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])\n", + " print(f\"Added {layer.name} with id {map_object.object_id}\")\n", "\n", " if layer in [MapLayer.LANE_GROUP]:\n", " map_object: AbstractLaneGroup\n", @@ -194,8 +195,8 @@ " if layer in [MapLayer.LANE]:\n", " add_shapely_linestring_to_ax(ax, map_object.centerline.linestring, CENTERLINE_CONFIG)\n", "\n", - " # if layer in [MapLayer.ROAD_EDGE]:\n", - " # add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, ROAD_EDGE_CONFIG)\n", + " if layer in [MapLayer.ROAD_EDGE]:\n", + " add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, ROAD_EDGE_CONFIG)\n", "\n", " # if layer in [MapLayer.ROAD_LINE]:\n", " # # line_type = int(map_object.road_line_type)\n", @@ -216,7 +217,7 @@ " print(f\"Error adding map object of type {layer.name} and id {map_object.id}\")\n", " traceback.print_exc()\n", "\n", - " ax.set_title(f\"Map: {map_api.map_name}\")\n", + " # ax.set_title(f\"Map: {map_api.map_name}\")\n", "\n", "\n", "def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes:\n", @@ -226,9 +227,9 @@ " map_api = scene.get_map_api()\n", "\n", " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", - " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", - " if map_api is not None:\n", - " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", + " add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + " # if map_api is not None:\n", + " # add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", " add_box_detections_to_ax(ax, box_detections)\n", @@ -256,11 +257,12 @@ "# scene_index = \n", "iteration = 1\n", "\n", - "fig, ax = plt.subplots(1, 3, figsize=(15, 5))\n", + "scale = 0.5\n", + "fig, ax = plt.subplots(3, 1, figsize=(5*scale, 15*scale))\n", "scene = np.random.choice(scenes)\n", - "_plot_scene_on_ax(ax[0], scene, iteration, radius=10)\n", - "_plot_scene_on_ax(ax[1], scene, iteration, radius=30)\n", - "_plot_scene_on_ax(ax[2], scene, iteration, radius=50)\n", + "_plot_scene_on_ax(ax[0], scene, iteration, radius=30)\n", + "_plot_scene_on_ax(ax[1], scene, iteration, radius=50)\n", + "_plot_scene_on_ax(ax[2], scene, iteration, radius=100)\n", "\n", "plt.show()\n" ] @@ -272,16 +274,15 @@ "metadata": {}, "outputs": [], "source": [ - "scene_index = 17\n", - "iteration = 99\n", "\n", - "fig, ax = plt.subplots(1, 3, figsize=(15, 5))\n", - "scene = scenes[scene_index]\n", - "_plot_scene_on_ax(ax[0], scene, iteration, radius=20)\n", - "_plot_scene_on_ax(ax[1], scene, iteration, radius=50)\n", - "_plot_scene_on_ax(ax[2], scene, iteration, radius=100)\n", + "map_api = scene.get_map_api()\n", + "map_api: AbstractMap\n", + "\n", + "intersection = map_api.get_map_object(\"562\", MapLayer.INTERSECTION)\n", + "\n", "\n", - "plt.show()" + "lane_groups = intersection.lane_groups\n", + "\n" ] }, { @@ -291,36 +292,113 @@ "metadata": {}, "outputs": [], "source": [ - "from py123d.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", - "from py123d.dataset.maps.gpkg.gpkg_map import GPKGMap\n", + "import shapely\n", + "from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", + "\n", + "# from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import lift_road_edges_to_3d\n", + "from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import (\n", + " _interpolate_z_on_segment,\n", + " _split_continuous_segments,\n", + ")\n", + "from py123d.geometry.geometry_index import Point3DIndex\n", + "from py123d.geometry.occupancy_map import OccupancyMap2D\n", + "from py123d.geometry.polyline import Polyline3D\n", + "\n", + "\n", + "fix, ax = plt.subplots()\n", + "\n", "\n", + "def lift_outlines_to_3d(\n", + " outlines_2d: List[shapely.LinearRing],\n", + " boundaries: List[Polyline3D],\n", + " max_distance: float = 10.0,\n", + ") -> List[Polyline3D]:\n", + " \"\"\"Lift 2D road edges to 3D by querying elevation from boundary segments.\n", "\n", - "map_api: GPKGMap = scenes[scene_index].map_api\n", + " :param road_edges_2d: List of 2D road edge geometries.\n", + " :param boundaries: List of 3D boundary geometries.\n", + " :param max_distance: Maximum 2D distance for edge-boundary association.\n", + " :return: List of lifted 3D road edge geometries.\n", + " \"\"\"\n", "\n", - "drivable_polygons = map_api._gpd_dataframes[MapLayer.LANE]\n", + " outlines_3d: List[Polyline3D] = []\n", "\n", + " if len(outlines_2d) >= 1 and len(boundaries) >= 1:\n", + "\n", + " # 1. Build comprehensive spatial index with all boundary segments\n", + " # NOTE @DanielDauner: We split each boundary polyline into small segments.\n", + " # The spatial indexing uses axis-aligned bounding boxes, where small geometries lead to better performance.\n", + " boundary_segments = []\n", + " for boundary in boundaries:\n", + " coords = boundary.array.reshape(-1, 1, 3)\n", + " segment_coords_boundary = np.concatenate([coords[:-1], coords[1:]], axis=1)\n", + " boundary_segments.append(segment_coords_boundary)\n", + "\n", + " boundary_segments = np.concatenate(boundary_segments, axis=0)\n", + " boundary_segment_linestrings = shapely.creation.linestrings(boundary_segments)\n", + " occupancy_map = OccupancyMap2D(boundary_segment_linestrings)\n", + "\n", + " for linear_ring in outlines_2d:\n", + " points_2d = np.array(linear_ring.coords, dtype=np.float64)\n", + " points_3d = np.zeros((len(points_2d), len(Point3DIndex)), dtype=np.float64)\n", + " points_3d[..., Point3DIndex.XY] = points_2d\n", + "\n", + " # 3. Batch query for all points\n", + " query_points = shapely.creation.points(points_2d)\n", + " results = occupancy_map.query_nearest(query_points, max_distance=max_distance, exclusive=True)\n", + "\n", + " for query_idx, geometry_idx in zip(*results):\n", + " query_point = query_points[query_idx]\n", + " segment_coords = boundary_segments[geometry_idx]\n", + " best_z = _interpolate_z_on_segment(query_point, segment_coords)\n", + " points_3d[query_idx, 2] = best_z\n", + "\n", + " outlines_3d.append(Polyline3D.from_array(points_3d))\n", + "\n", + " return outlines_3d\n", + "\n", + "\n", + "def _extract_intersection_outline(lane_groups: List[AbstractLaneGroup], junction_id: str = 0) -> Polyline3D:\n", + " \"\"\"Helper method to extract intersection outline in 3D from lane group helpers.\"\"\"\n", + "\n", + " # 1. Extract the intersection outlines in 2D\n", + " intersection_polygons: List[shapely.Polygon] = [\n", + " lane_group_helper.shapely_polygon for lane_group_helper in lane_groups\n", + " ]\n", + " # for intersection_polygon in intersection_polygons:\n", + " # ax.plot(*intersection_polygon.exterior.xy)\n", "\n", + " # for lane_group_helper in lane_groups:\n", + " # ax.plot(*lane_group_helper.outline.linestring.xy, color=\"blue\")\n", + " intersection_edges = get_road_edge_linear_rings(intersection_polygons, add_interiors=False)\n", "\n", - "linear_rings = get_road_edge_linear_rings(drivable_polygons.geometry.tolist())\n", - "rings_lengths = [ring.length for ring in linear_rings]\n", - "max_length_idx = np.argmax(rings_lengths)\n", + " # for linear_ring in intersection_edges:\n", + " # ax.plot(*linear_ring.xy, color=\"blue\")\n", "\n", + " # 2. Lift the 2D outlines to 3D\n", + " lane_group_outlines: List[Polyline3D] = [lane_group_helper.outline_3d for lane_group_helper in lane_groups]\n", + " intersection_outlines = lift_outlines_to_3d(intersection_edges, lane_group_outlines)\n", "\n", + " print(len(intersection_outlines))\n", "\n", + " # NOTE: When the intersection has multiple non-overlapping outlines, we cannot return a single outline in 3D.\n", + " # For now, we return the longest outline.\n", "\n", + " valid_outlines = [outline for outline in intersection_outlines if outline.array.shape[0] > 2]\n", + " assert len(valid_outlines) > 0, f\"No valid intersection outlines found for Junction {junction_id}!\"\n", "\n", + " longest_outline = max(valid_outlines, key=lambda outline: outline.length)\n", "\n", - "size = 16\n", - "fig, ax = plt.subplots(figsize=(size, size))\n", + " # for linear_ring in intersection_outlines:\n", + " # ax.plot(*linear_ring.linestring.xy, color=\"red\")\n", "\n", - "for idx, ring in enumerate(linear_rings):\n", - " if idx == max_length_idx:\n", - " ax.plot(*ring.xy, color=\"black\", linewidth=2, label=\"Longest Road Edge\")\n", - " else:\n", - " ax.plot(*ring.xy)\n", + " # ax.plot(*longest_outline.linestring.xy, color=\"red\")\n", + " # longest_outline.line\n", + " print(longest_outline.array[:, 2])\n", + " return longest_outline\n", "\n", "\n", - "ax.set_aspect(\"equal\", adjustable=\"box\")" + "_extract_intersection_outline(lane_groups)" ] }, { diff --git a/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py b/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py index 5d3143d0..2bb831c8 100644 --- a/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py +++ b/src/py123d/conversion/utils/map_utils/opendrive/opendrive_map_conversion.py @@ -2,8 +2,8 @@ from pathlib import Path from typing import Dict, Final, List +import numpy as np import shapely -from shapely.ops import polygonize, unary_union from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.utils.map_utils.opendrive.parser.opendrive import Junction, OpenDrive @@ -13,8 +13,14 @@ OpenDriveLaneHelper, ) from py123d.conversion.utils.map_utils.opendrive.utils.objects_helper import OpenDriveObjectHelper -from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length -from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import get_road_edges_3d_from_drivable_surfaces +from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( + get_road_edge_linear_rings, + split_line_geometry_by_max_length, +) +from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import ( + get_road_edges_3d_from_drivable_surfaces, + lift_outlines_to_3d, +) from py123d.datatypes.maps.cache.cache_map_objects import ( CacheCarpark, CacheCrosswalk, @@ -27,6 +33,7 @@ CacheWalkway, ) from py123d.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType +from py123d.geometry.geometry_index import Point3DIndex from py123d.geometry.polyline import Polyline3D logger = logging.getLogger(__name__) @@ -37,7 +44,7 @@ def convert_xodr_map( xordr_file: Path, map_writer: AbstractMapWriter, - interpolation_step_size: float = 0.5, + interpolation_step_size: float = 1.0, connection_distance_threshold: float = 0.1, ) -> None: @@ -195,13 +202,13 @@ def _find_lane_group_helpers_with_junction_id(junction_id: int) -> List[OpenDriv continue # TODO @DanielDauner: Create a method that extracts 3D outlines of intersections. - polygon = extract_exteriors_polygon(lane_group_helpers) + outline = _extract_intersection_outline(lane_group_helpers, junction.id) map_writer.write_intersection( CacheIntersection( object_id=junction.id, lane_group_ids=lane_group_ids_, - outline=None, - geometry=polygon, + outline=outline, + geometry=None, ) ) @@ -299,27 +306,40 @@ def _write_road_edges( running_id += 1 -def extract_exteriors_polygon(lane_group_helpers: List[OpenDriveLaneGroupHelper]) -> shapely.Polygon: - - # TODO @DanielDauner: Needs improvement !!! - # Fails if the intersection has several non overlapping parts. - # Does not provide 3D outline, just 2D shapely polygon. +def _extract_intersection_outline(lane_group_helpers: List[OpenDriveLaneGroupHelper], junction_id: str) -> Polyline3D: + """Helper method to extract intersection outline in 3D from lane group helpers.""" - # Step 1: Extract all boundary line segments - all_polygons = [] - for lane_group_helper in lane_group_helpers: - all_polygons.append(lane_group_helper.shapely_polygon) - - # Step 2: Merge all boundaries and extract the enclosed polygons - merged_boundaries = unary_union(all_polygons) + # 1. Extract the intersection outlines in 2D + intersection_polygons: List[shapely.Polygon] = [ + lane_group_helper.shapely_polygon for lane_group_helper in lane_group_helpers + ] + intersection_edges = get_road_edge_linear_rings( + intersection_polygons, + buffer_distance=0.25, + add_interiors=False, + ) - # Step 3: Generate polygons from the merged lines - polygons = list(polygonize(merged_boundaries)) + # 2. Lift the 2D outlines to 3D + lane_group_outlines: List[Polyline3D] = [ + lane_group_helper.outline_polyline_3d for lane_group_helper in lane_group_helpers + ] + intersection_outlines = lift_outlines_to_3d(intersection_edges, lane_group_outlines) + + # NOTE: When the intersection has multiple non-overlapping outlines, we cannot return a single outline in 3D. + # For now, we return the longest outline. + valid_outlines = [outline for outline in intersection_outlines if outline.array.shape[0] > 2] + if len(valid_outlines) == 0: + logging.warning( + f"Could not extract valid outline for intersection {junction_id} with {len(intersection_edges)} edges!" + ) + longest_outline_2d = max(intersection_edges, key=lambda outline: outline.length) + average_z = sum(outline.array[:, 2].mean() for outline in intersection_outlines) / len(intersection_outlines) - # Step 4: Select the polygon that represents the intersection - # Usually it's the largest polygon - if len(polygons) == 1: - return polygons[0] + outline_3d_array = np.zeros((len(longest_outline_2d.coords), 3)) + outline_3d_array[:, Point3DIndex.XY] = np.array(longest_outline_2d.coords) + outline_3d_array[:, Point3DIndex.Z] = average_z + longest_outline = Polyline3D.from_array(outline_3d_array) else: - # Take the largest polygon if there are multiple - return max(polygons, key=lambda p: p.area) + longest_outline = max(valid_outlines, key=lambda outline: outline.length) + + return longest_outline diff --git a/src/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py b/src/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py index 03f2cd0d..5b8045d2 100644 --- a/src/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py +++ b/src/py123d/conversion/utils/map_utils/opendrive/utils/lane_helper.py @@ -140,8 +140,8 @@ def center_polyline_3d(self) -> Polyline3D: @property def outline_polyline_3d(self) -> Polyline3D: - inner_polyline = self.inner_polyline_3d.array[::-1] - outer_polyline = self.outer_polyline_3d.array + inner_polyline = self.inner_polyline_3d.array + outer_polyline = self.outer_polyline_3d.array[::-1] return Polyline3D.from_array( np.concatenate( [ @@ -156,9 +156,17 @@ def outline_polyline_3d(self) -> Polyline3D: @property def shapely_polygon(self) -> shapely.Polygon: - inner_polyline = self.inner_polyline_se2[..., StateSE2Index.XY][::-1] - outer_polyline = self.outer_polyline_se2[..., StateSE2Index.XY] - polygon_exterior = np.concatenate([inner_polyline, outer_polyline], axis=0, dtype=np.float64) + inner_polyline = self.inner_polyline_se2[..., StateSE2Index.XY] + outer_polyline = self.outer_polyline_se2[..., StateSE2Index.XY][::-1] + polygon_exterior = np.concatenate( + [ + inner_polyline, + outer_polyline, + inner_polyline[None, 0], + ], + axis=0, + dtype=np.float64, + ) return shapely.Polygon(polygon_exterior) @@ -215,8 +223,8 @@ def outer_polyline_3d(self) -> Polyline3D: @property def outline_polyline_3d(self) -> Polyline3D: - inner_polyline = self.inner_polyline_3d.array[::-1] - outer_polyline = self.outer_polyline_3d.array + inner_polyline = self.inner_polyline_3d.array + outer_polyline = self.outer_polyline_3d.array[::-1] return Polyline3D.from_array( np.concatenate( [ @@ -231,9 +239,17 @@ def outline_polyline_3d(self) -> Polyline3D: @property def shapely_polygon(self) -> shapely.Polygon: - inner_polyline = self.inner_polyline_se2[..., StateSE2Index.XY][::-1] - outer_polyline = self.outer_polyline_se2[..., StateSE2Index.XY] - polygon_exterior = np.concatenate([inner_polyline, outer_polyline], axis=0, dtype=np.float64) + inner_polyline = self.inner_polyline_se2[..., StateSE2Index.XY] + outer_polyline = self.outer_polyline_se2[..., StateSE2Index.XY][::-1] + polygon_exterior = np.concatenate( + [ + inner_polyline, + outer_polyline, + inner_polyline[None, 0], + ], + axis=0, + dtype=np.float64, + ) return shapely.Polygon(polygon_exterior) diff --git a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py index 8e1abc33..f4cbb094 100644 --- a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py +++ b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py @@ -1,24 +1,27 @@ -from typing import Final, List, Union +from typing import List, Union import numpy as np import shapely from shapely import LinearRing, LineString, Polygon, union_all -ROAD_EDGE_BUFFER: Final[float] = 0.05 - -def get_road_edge_linear_rings(drivable_polygons: List[Polygon]) -> List[LinearRing]: +def get_road_edge_linear_rings( + drivable_polygons: List[Polygon], + buffer_distance: float = 0.05, + add_interiors: bool = True, +) -> List[LinearRing]: def _polygon_to_linear_rings(polygon: Polygon) -> List[LinearRing]: assert polygon.geom_type == "Polygon" linear_ring_list = [] linear_ring_list.append(polygon.exterior) - for interior in polygon.interiors: - linear_ring_list.append(interior) + if add_interiors: + for interior in polygon.interiors: + linear_ring_list.append(interior) return linear_ring_list - union_polygon = union_all([polygon.buffer(ROAD_EDGE_BUFFER, join_style=2) for polygon in drivable_polygons]).buffer( - -ROAD_EDGE_BUFFER, join_style=2 + union_polygon = union_all([polygon.buffer(buffer_distance, join_style=2) for polygon in drivable_polygons]).buffer( + -buffer_distance, join_style=2 ) linear_ring_list = [] diff --git a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py index 986433e9..42d01faf 100644 --- a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py +++ b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_3d_utils.py @@ -191,7 +191,7 @@ def lift_road_edges_to_3d( query_point = query_points[query_idx] segment_coords = boundary_segments[geometry_idx] best_z = _interpolate_z_on_segment(query_point, segment_coords) - points_3d[query_idx, 2] = best_z + points_3d[query_idx, Point3DIndex.Z] = best_z continuous_segments = _split_continuous_segments(np.array(results[0])) for segment_indices in continuous_segments: @@ -202,6 +202,57 @@ def lift_road_edges_to_3d( return road_edges_3d +def lift_outlines_to_3d( + outlines_2d: List[shapely.LinearRing], + boundaries: List[Polyline3D], + max_distance: float = 10.0, +) -> List[Polyline3D]: + """Lift 2D outlines to 3D by querying elevation from boundary segments. + + :param outlines_2d: List of 2D outline geometries. + :param boundaries: List of 3D boundary geometries. + :param max_distance: Maximum 2D distance for outline-boundary association. + :return: List of lifted 3D outline geometries. + """ + + outlines_3d: List[Polyline3D] = [] + if len(outlines_2d) >= 1 and len(boundaries) >= 1: + boundary_segments = [] + for boundary in boundaries: + coords = boundary.array.reshape(-1, 1, 3) + segment_coords_boundary = np.concatenate([coords[:-1], coords[1:]], axis=1) + boundary_segments.append(segment_coords_boundary) + + boundary_segments = np.concatenate(boundary_segments, axis=0) + boundary_segment_linestrings = shapely.creation.linestrings(boundary_segments) + occupancy_map = OccupancyMap2D(boundary_segment_linestrings) + + for linear_ring in outlines_2d: + points_2d = np.array(linear_ring.coords, dtype=np.float64) + points_3d = np.zeros((len(points_2d), len(Point3DIndex)), dtype=np.float64) + points_3d[..., Point3DIndex.XY] = points_2d + + # 3. Batch query for all points + query_points = shapely.creation.points(points_2d) + results = occupancy_map.query_nearest(query_points, max_distance=max_distance, exclusive=True) + + found_nearest = np.zeros(len(points_2d), dtype=bool) + for query_idx, geometry_idx in zip(*results): + query_point = query_points[query_idx] + segment_coords = boundary_segments[geometry_idx] + best_z = _interpolate_z_on_segment(query_point, segment_coords) + points_3d[query_idx, Point3DIndex.Z] = best_z + found_nearest[query_idx] = True + + if not np.all(found_nearest): + logger.warning("Some outline points could not find a nearest boundary segment for Z-lifting.") + points_3d[~found_nearest, Point3DIndex.Z] = np.mean(points_3d[found_nearest, Point3DIndex.Z]) + + outlines_3d.append(Polyline3D.from_array(points_3d)) + + return outlines_3d + + def _resolve_conflicting_lane_groups( conflicting_lane_groups: Dict[MapObjectIDType, List[MapObjectIDType]], lane_groups: List[AbstractLaneGroup], diff --git a/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py b/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py index 642c97b7..97b11d73 100644 --- a/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py +++ b/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py @@ -2,7 +2,7 @@ import ast from functools import cached_property -from typing import List, Optional +from typing import List, Optional, Union import geopandas as gpd import numpy as np @@ -27,6 +27,7 @@ from py123d.datatypes.maps.gpkg.gpkg_utils import get_row_with_value, get_trimesh_from_boundaries from py123d.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType from py123d.geometry import Point3DIndex, Polyline3D +from py123d.geometry.polyline import Polyline2D class GPKGSurfaceObject(AbstractSurfaceMapObject): @@ -105,7 +106,7 @@ def _object_row(self) -> gpd.GeoSeries: return get_row_with_value(self._object_df, "id", self.object_id) @property - def polyline_3d(self) -> Polyline3D: + def polyline(self) -> Union[Polyline2D, Polyline3D]: """Inherited, see superclass.""" return Polyline3D.from_linestring(self._object_row.geometry) diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index 17adbb2b..60458b29 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -3,7 +3,6 @@ from typing import Dict, List, Optional import viser -from viser.theme import TitlebarButton, TitlebarConfig, TitlebarImage from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.datatypes.scene.abstract_scene import AbstractScene @@ -19,6 +18,9 @@ ) from py123d.visualization.viser.viser_config import ViserConfig +# from viser.theme import TitlebarButton, TitlebarConfig, TitlebarImage + + logger = logging.getLogger(__name__) @@ -51,34 +53,34 @@ def _build_viser_server(viser_config: ViserConfig) -> viser.ViserServer: verbose=viser_config.server_verbose, ) - # TODO: Fix links and logo. - buttons = ( - TitlebarButton( - text="Getting Started", - icon=None, - href="https://nerf.studio", - ), - TitlebarButton( - text="Github", - icon="GitHub", - href="https://github.com/nerfstudio-project/nerfstudio", - ), - TitlebarButton( - text="Documentation", - icon="Description", - href="https://docs.nerf.studio", - ), - ) - image = TitlebarImage( - image_url_light="https://docs.nerf.studio/_static/imgs/logo.png", - image_url_dark="https://docs.nerf.studio/_static/imgs/logo-dark.png", - image_alt="NerfStudio Logo", - href="https://docs.nerf.studio/", - ) - titlebar_theme = TitlebarConfig(buttons=buttons, image=image) + # TODO: Add logos, once we are public + # buttons = ( + # TitlebarButton( + # text="Getting Started", + # icon=None, + # href="https://nerf.studio", + # ), + # TitlebarButton( + # text="Github", + # icon="GitHub", + # href="https://github.com/nerfstudio-project/nerfstudio", + # ), + # TitlebarButton( + # text="Documentation", + # icon="Description", + # href="https://docs.nerf.studio", + # ), + # ) + # image = TitlebarImage( + # image_url_light="https://docs.nerf.studio/_static/imgs/logo.png", + # image_url_dark="https://docs.nerf.studio/_static/imgs/logo-dark.png", + # image_alt="NerfStudio Logo", + # href="https://docs.nerf.studio/", + # ) + # titlebar_theme = TitlebarConfig(buttons=buttons, image=image) server.gui.configure_theme( - titlebar_content=titlebar_theme, + # titlebar_content=titlebar_theme, control_layout=viser_config.theme_control_layout, control_width=viser_config.theme_control_width, dark_mode=viser_config.theme_dark_mode, From b3eac8a8d7df6f1d50418165d0b3c161b762830f Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 20 Oct 2025 21:59:16 +0200 Subject: [PATCH 105/145] Test adding docs workflow to GitHub. --- .github/workflows/deploy-docs.yaml | 58 ++++++++++++++++++++++++++++++ docs/requiremets.txt | 5 +++ 2 files changed, 63 insertions(+) create mode 100644 .github/workflows/deploy-docs.yaml create mode 100644 docs/requiremets.txt diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml new file mode 100644 index 00000000..0dcdb195 --- /dev/null +++ b/.github/workflows/deploy-docs.yaml @@ -0,0 +1,58 @@ +name: Deploy Sphinx Documentation + +on: + push: + branches: + - main # Change this to your default branch name if different + workflow_dispatch: # Allows manual triggering + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install sphinx + # Add any other Sphinx extensions you use, for example: + # pip install sphinx-rtd-theme + # pip install sphinxcontrib-napoleon + # Or if you have a requirements.txt: + # pip install -r docs/requirements.txt + + - name: Build Sphinx documentation + run: | + cd docs + make html + + - name: Upload artifact + uses: actions/upload-pages-artifact@v2 + with: + path: 'docs/_build/html' + + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v3 diff --git a/docs/requiremets.txt b/docs/requiremets.txt new file mode 100644 index 00000000..1bfb747f --- /dev/null +++ b/docs/requiremets.txt @@ -0,0 +1,5 @@ +Sphinx +sphinx-rtd-theme +sphinx-autobuild +myst-parser +sphinx-copybutton From 1ebecc75ed2f8ab984af5a468edf0aa3f9887870 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 20 Oct 2025 22:04:45 +0200 Subject: [PATCH 106/145] Change branch in github workflow. --- .github/workflows/deploy-docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index 0dcdb195..4ac4b2e6 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -3,7 +3,7 @@ name: Deploy Sphinx Documentation on: push: branches: - - main # Change this to your default branch name if different + - dev_v0.0.7 # Change this to your branch name (e.g., docs, dev, etc.) workflow_dispatch: # Allows manual triggering permissions: From 4ae4f1255355a3746bae85424bbeb98b36f02060 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 20 Oct 2025 22:07:27 +0200 Subject: [PATCH 107/145] Remove typo and re-trigger github action. --- .github/workflows/deploy-docs.yaml | 2 +- docs/{requiremets.txt => requirements.txt} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename docs/{requiremets.txt => requirements.txt} (100%) diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index 4ac4b2e6..e13df3b4 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -24,7 +24,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | diff --git a/docs/requiremets.txt b/docs/requirements.txt similarity index 100% rename from docs/requiremets.txt rename to docs/requirements.txt From 6128652cd47584beccc71ad2f411ba042c5bc7b9 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 20 Oct 2025 22:10:33 +0200 Subject: [PATCH 108/145] Change to non-deprecated `actions/upload-pages-artifact...` version. --- .github/workflows/deploy-docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index e13df3b4..abc8f877 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -42,7 +42,7 @@ jobs: make html - name: Upload artifact - uses: actions/upload-pages-artifact@v2 + uses: actions/upload-pages-artifact@v4 with: path: 'docs/_build/html' From fd1e578694e366a53bcacf9f10b9f83f9ee24d83 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 20 Oct 2025 22:16:56 +0200 Subject: [PATCH 109/145] Test `requirements.txt` compiled with `pip-compile docs/requirements.in` --- docs/requirements.in | 5 ++ docs/requirements.txt | 115 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 115 insertions(+), 5 deletions(-) create mode 100644 docs/requirements.in diff --git a/docs/requirements.in b/docs/requirements.in new file mode 100644 index 00000000..bd564dda --- /dev/null +++ b/docs/requirements.in @@ -0,0 +1,5 @@ +sphinx +sphinx-copybutton +sphinx-rtd-theme +sphinx-autobuild +myst-parser diff --git a/docs/requirements.txt b/docs/requirements.txt index 1bfb747f..bf7b4c72 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,110 @@ -Sphinx -sphinx-rtd-theme -sphinx-autobuild -myst-parser -sphinx-copybutton +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile docs/requirements.in +# +--extra-index-url https://pypi.ngc.nvidia.com +--trusted-host pypi.ngc.nvidia.com + +alabaster==1.0.0 + # via sphinx +anyio==4.11.0 + # via + # starlette + # watchfiles +babel==2.17.0 + # via sphinx +certifi==2025.10.5 + # via requests +charset-normalizer==3.4.4 + # via requests +click==8.3.0 + # via uvicorn +colorama==0.4.6 + # via sphinx-autobuild +docutils==0.21.2 + # via + # myst-parser + # sphinx + # sphinx-rtd-theme +h11==0.16.0 + # via uvicorn +idna==3.11 + # via + # anyio + # requests +imagesize==1.4.1 + # via sphinx +jinja2==3.1.6 + # via + # myst-parser + # sphinx +markdown-it-py==3.0.0 + # via + # mdit-py-plugins + # myst-parser +markupsafe==3.0.3 + # via jinja2 +mdit-py-plugins==0.5.0 + # via myst-parser +mdurl==0.1.2 + # via markdown-it-py +myst-parser==4.0.1 + # via -r docs/requirements.in +packaging==25.0 + # via sphinx +pygments==2.19.2 + # via sphinx +pyyaml==6.0.3 + # via myst-parser +requests==2.32.5 + # via sphinx +roman-numerals-py==3.1.0 + # via sphinx +sniffio==1.3.1 + # via anyio +snowballstemmer==3.0.1 + # via sphinx +sphinx==8.2.3 + # via + # -r docs/requirements.in + # myst-parser + # sphinx-autobuild + # sphinx-copybutton + # sphinx-rtd-theme + # sphinxcontrib-jquery +sphinx-autobuild==2025.8.25 + # via -r docs/requirements.in +sphinx-copybutton==0.5.2 + # via -r docs/requirements.in +sphinx-rtd-theme==3.0.2 + # via -r docs/requirements.in +sphinxcontrib-applehelp==2.0.0 + # via sphinx +sphinxcontrib-devhelp==2.0.0 + # via sphinx +sphinxcontrib-htmlhelp==2.1.0 + # via sphinx +sphinxcontrib-jquery==4.1 + # via sphinx-rtd-theme +sphinxcontrib-jsmath==1.0.1 + # via sphinx +sphinxcontrib-qthelp==2.0.0 + # via sphinx +sphinxcontrib-serializinghtml==2.0.0 + # via sphinx +starlette==0.48.0 + # via sphinx-autobuild +typing-extensions==4.15.0 + # via + # anyio + # starlette +urllib3==2.5.0 + # via requests +uvicorn==0.38.0 + # via sphinx-autobuild +watchfiles==1.1.1 + # via sphinx-autobuild +websockets==15.0.1 + # via sphinx-autobuild From 3c29af9e77da3437bc60f33adce037c64cc0649b Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 21 Oct 2025 19:09:00 +0200 Subject: [PATCH 110/145] Fix some plots, add some notebooks demos (need to be cleaned before release). --- notebooks/bev_matplotlib.ipynb | 49 ++---- notebooks/bev_render.ipynb | 130 ++++++++++++++ notebooks/camera_matplotlib.ipynb | 162 ++++++++++++++++++ src/py123d/datatypes/scene/abstract_scene.py | 2 +- .../scene/arrow/utils/arrow_getters.py | 33 ++-- .../conversion/datasets/pandaset_dataset.yaml | 4 +- src/py123d/visualization/matplotlib/camera.py | 26 +-- src/py123d/visualization/matplotlib/plots.py | 10 +- .../visualization/viser/viser_config.py | 3 +- .../visualization/viser/viser_viewer.py | 4 +- 10 files changed, 351 insertions(+), 72 deletions(-) create mode 100644 notebooks/bev_render.ipynb create mode 100644 notebooks/camera_matplotlib.ipynb diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index 13eb3f5f..16125a71 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -11,7 +11,7 @@ "from py123d.datatypes.scene.scene_filter import SceneFilter\n", "\n", "from py123d.common.multithreading.worker_sequential import Sequential\n", - "from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType " + "# from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType " ] }, { @@ -20,28 +20,13 @@ "id": "1", "metadata": {}, "outputs": [], - "source": [ - "from py123d.geometry import Point2D\n", - "import numpy as np\n", - "\n", - "# import torch\n", - "\n", - "from py123d.geometry.polyline import Polyline2D" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], "source": [ "\n", "# splits = [\"wopd_val\"]\n", - "splits = [\"carla_test\"]\n", + "# splits = [\"carla_test\"]\n", "# splits = [\"nuplan-mini_test\"]\n", "# splits = [\"av2-sensor-mini_train\"]\n", - "# splits = [\"pandaset_train\"]\n", + "splits = [\"pandaset_train\"]\n", "# log_names = None\n", "\n", "\n", @@ -70,7 +55,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -227,9 +212,9 @@ " map_api = scene.get_map_api()\n", "\n", " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", - " add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", - " # if map_api is not None:\n", - " # add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", + " if map_api is not None:\n", + " add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", " add_box_detections_to_ax(ax, box_detections)\n", @@ -257,12 +242,12 @@ "# scene_index = \n", "iteration = 1\n", "\n", - "scale = 0.5\n", - "fig, ax = plt.subplots(3, 1, figsize=(5*scale, 15*scale))\n", + "scale = 10\n", + "fig, ax = plt.subplots(1, 1, figsize=(scale, scale))\n", "scene = np.random.choice(scenes)\n", - "_plot_scene_on_ax(ax[0], scene, iteration, radius=30)\n", - "_plot_scene_on_ax(ax[1], scene, iteration, radius=50)\n", - "_plot_scene_on_ax(ax[2], scene, iteration, radius=100)\n", + "_plot_scene_on_ax(ax, scene, iteration, radius=30)\n", + "# _plot_scene_on_ax(ax[1], scene, iteration, radius=50)\n", + "# _plot_scene_on_ax(ax[2], scene, iteration, radius=100)\n", "\n", "plt.show()\n" ] @@ -270,7 +255,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -288,7 +273,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -404,7 +389,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "5", "metadata": {}, "outputs": [], "source": [] @@ -412,7 +397,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [] @@ -420,7 +405,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [] diff --git a/notebooks/bev_render.ipynb b/notebooks/bev_render.ipynb new file mode 100644 index 00000000..6e84c122 --- /dev/null +++ b/notebooks/bev_render.ipynb @@ -0,0 +1,130 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", + "from py123d.datatypes.scene.scene_filter import SceneFilter\n", + "\n", + "from py123d.common.multithreading.worker_sequential import Sequential\n", + "# from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# splits = [\"wopd_val\"]\n", + "splits = [\"carla_test\"]\n", + "# splits = [\"nuplan-mini_test\"]\n", + "# splits = [\"av2-sensor-mini_train\"]\n", + "# splits = [\"pandaset_train\"]\n", + "# log_names = None\n", + "\n", + "\n", + "\n", + "log_names = None\n", + "scene_uuids = None\n", + "\n", + "scene_filter = SceneFilter(\n", + " split_names=splits,\n", + " log_names=log_names,\n", + " scene_uuids=scene_uuids,\n", + " duration_s=20.0,\n", + " history_s=0.0,\n", + " timestamp_threshold_s=20,\n", + " shuffle=True,\n", + " # camera_types=[CameraType.CAM_F0],\n", + ")\n", + "scene_builder = ArrowSceneBuilder()\n", + "worker = Sequential()\n", + "# worker = RayDistributed()\n", + "scenes = scene_builder.get_scenes(scene_filter, worker)\n", + "\n", + "print(f\"Found {len(scenes)} scenes\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "from py123d.visualization.matplotlib.plots import render_scene_animation\n", + "\n", + "for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:\n", + " render_scene_animation(scenes[i], output_path=\"test\", format=\"mp4\", fps=20, step=1, radius=50)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "py123d", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/camera_matplotlib.ipynb b/notebooks/camera_matplotlib.ipynb new file mode 100644 index 00000000..f9a0433a --- /dev/null +++ b/notebooks/camera_matplotlib.ipynb @@ -0,0 +1,162 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", + "from py123d.datatypes.scene.scene_filter import SceneFilter\n", + "\n", + "from py123d.common.multithreading.worker_sequential import Sequential\n", + "from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# splits = [\"wopd_val\"]\n", + "# splits = [\"carla_test\"]\n", + "# splits = [\"nuplan-mini_test\"]\n", + "# splits = [\"av2-sensor-mini_train\"]\n", + "\n", + "\n", + "splits = [\"pandaset_train\"]\n", + "# log_names = None\n", + "\n", + "\n", + "log_names = None\n", + "scene_uuids = None\n", + "\n", + "scene_filter = SceneFilter(\n", + " split_names=splits,\n", + " log_names=log_names,\n", + " scene_uuids=scene_uuids,\n", + " duration_s=6.0,\n", + " history_s=0.0,\n", + " timestamp_threshold_s=20,\n", + " shuffle=True,\n", + " camera_types=[PinholeCameraType.CAM_F0],\n", + ")\n", + "scene_builder = ArrowSceneBuilder()\n", + "worker = Sequential()\n", + "# worker = RayDistributed()\n", + "scenes = scene_builder.get_scenes(scene_filter, worker)\n", + "\n", + "print(f\"Found {len(scenes)} scenes\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "from matplotlib import pyplot as plt\n", + "from py123d.datatypes.scene.abstract_scene import AbstractScene\n", + "from py123d.visualization.matplotlib.camera import add_box_detections_to_camera_ax, add_camera_ax\n", + "\n", + "iteration = 0\n", + "scene = scenes[0]\n", + "\n", + "scene: AbstractScene\n", + "print(scene.uuid, scene.available_camera_types)\n", + "\n", + "scale = 3.0\n", + "fig, ax = plt.subplots(2, 3, figsize=(scale * 6, scale * 2.5))\n", + "\n", + "\n", + "camera_ax_mapping = {\n", + " PinholeCameraType.CAM_L0: ax[0, 0],\n", + " PinholeCameraType.CAM_F0: ax[0, 1],\n", + " PinholeCameraType.CAM_R0: ax[0, 2],\n", + " PinholeCameraType.CAM_L1: ax[1, 0],\n", + " PinholeCameraType.CAM_B0: ax[1, 1],\n", + " PinholeCameraType.CAM_R1: ax[1, 2],\n", + "}\n", + "\n", + "\n", + "for camera_type, ax_ in camera_ax_mapping.items():\n", + " camera = scene.get_camera_at_iteration(iteration, camera_type)\n", + " box_detections = scene.get_box_detections_at_iteration(iteration)\n", + " ego_state = scene.get_ego_state_at_iteration(iteration)\n", + "\n", + " add_box_detections_to_camera_ax(\n", + " ax_,\n", + " camera,\n", + " box_detections,\n", + " ego_state,\n", + " )\n", + " ax_.set_title(f\"Camera: {camera_type.name}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "py123d", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/src/py123d/datatypes/scene/abstract_scene.py b/src/py123d/datatypes/scene/abstract_scene.py index ca302047..45f5ffc2 100644 --- a/src/py123d/datatypes/scene/abstract_scene.py +++ b/src/py123d/datatypes/scene/abstract_scene.py @@ -94,7 +94,7 @@ def scene_extraction_metadata(self) -> SceneExtractionMetadata: return self.get_scene_extraction_metadata() @property - def token(self) -> str: + def uuid(self) -> str: return self.scene_extraction_metadata.initial_uuid @property diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 634bc1ca..6a4bba72 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -77,20 +77,24 @@ def get_box_detections_from_arrow_table(arrow_table: pa.Table, index: int) -> Bo def get_traffic_light_detections_from_arrow_table(arrow_table: pa.Table, index: int) -> TrafficLightDetectionWrapper: timepoint = get_timepoint_from_arrow_table(arrow_table, index) - traffic_light_detections: List[TrafficLightDetection] = [] + traffic_light_detections: Optional[List[TrafficLightDetection]] = None + + if "traffic_light_ids" in arrow_table.schema.names and "traffic_light_types" in arrow_table.schema.names: + traffic_light_detections: List[TrafficLightDetection] = [] + for lane_id, status in zip( + arrow_table["traffic_light_ids"][index].as_py(), + arrow_table["traffic_light_types"][index].as_py(), + ): + traffic_light_detection = TrafficLightDetection( + timepoint=timepoint, + lane_id=lane_id, + status=TrafficLightStatus(status), + ) + traffic_light_detections.append(traffic_light_detection) - for lane_id, status in zip( - arrow_table["traffic_light_ids"][index].as_py(), - arrow_table["traffic_light_types"][index].as_py(), - ): - traffic_light_detection = TrafficLightDetection( - timepoint=timepoint, - lane_id=lane_id, - status=TrafficLightStatus(status), - ) - traffic_light_detections.append(traffic_light_detection) + traffic_light_detections = TrafficLightDetectionWrapper(traffic_light_detections=traffic_light_detections) - return TrafficLightDetectionWrapper(traffic_light_detections=traffic_light_detections) + return traffic_light_detections def get_camera_from_arrow_table( @@ -161,10 +165,7 @@ def get_lidar_from_arrow_table( lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) elif log_metadata.dataset == "carla": - from py123d.conversion.datasets.carla.carla_load_sensor import load_carla_lidar_from_path - - lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata) - + raise NotImplementedError("Loading LiDAR data for Carla dataset is not implemented.") elif log_metadata.dataset == "av2-sensor": from py123d.conversion.datasets.av2.utils.av2_sensor_loading import load_av2_sensor_lidar_pc_from_path diff --git a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml index 6acdd2df..9f2f8e3e 100644 --- a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml @@ -26,11 +26,11 @@ pandaset_dataset: # Cameras include_cameras: true - camera_store_option: "path" # "path", "binary", "mp4" + camera_store_option: "binary" # "path", "binary", "mp4" # LiDARs include_lidars: true - lidar_store_option: "path" # "path", "binary" + lidar_store_option: "binary" # "path", "binary" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. diff --git a/src/py123d/visualization/matplotlib/camera.py b/src/py123d/visualization/matplotlib/camera.py index fd3f4d5a..57b708be 100644 --- a/src/py123d/visualization/matplotlib/camera.py +++ b/src/py123d/visualization/matplotlib/camera.py @@ -12,10 +12,10 @@ from py123d.datatypes.detections.detection import BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.detections.detection_types import DetectionType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeIntrinsics from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.geometry import BoundingBoxSE3Index, Corners3DIndex -from py123d.geometry.transform.transform_euler_se3 import convert_absolute_to_relative_euler_se3_array +from py123d.geometry.transform.transform_se3 import convert_absolute_to_relative_se3_array from py123d.visualization.color.default import BOX_DETECTION_CONFIG # from navsim.common.dataclasses import Annotations, Camera, Lidar @@ -98,12 +98,12 @@ def add_box_detections_to_camera_ax( box_detection_array[idx] = box_detection.bounding_box_se3.array # FIXME - box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] = convert_absolute_to_relative_euler_se3_array( + box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] = convert_absolute_to_relative_se3_array( ego_state_se3.rear_axle_se3, box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] ) # box_detection_array[..., BoundingBoxSE3Index.XYZ] -= ego_state_se3.rear_axle_se3.point_3d.array detection_positions, detection_extents, detection_yaws = _transform_annotations_to_camera( - box_detection_array, camera.extrinsic + box_detection_array, camera.extrinsic.transformation_matrix ) corners_norm = np.stack(np.unravel_index(np.arange(len(Corners3DIndex)), [2] * 3), axis=1) @@ -131,7 +131,7 @@ def _transform_annotations_to_camera( boxes: npt.NDArray[np.float32], # sensor2lidar_rotation: npt.NDArray[np.float32], # sensor2lidar_translation: npt.NDArray[np.float32], - extrinsic: npt.NDArray[np.float64], + extrinsic: npt.NDArray[np.float32], ) -> npt.NDArray[np.float32]: """ Helper function to transform bounding boxes into camera frame @@ -144,17 +144,17 @@ def _transform_annotations_to_camera( sensor2lidar_rotation = extrinsic[:3, :3] sensor2lidar_translation = extrinsic[:3, 3] - locs, rots = ( + locs, quaternions = ( boxes[:, BoundingBoxSE3Index.XYZ], - boxes[:, BoundingBoxSE3Index.YAW], + boxes[:, BoundingBoxSE3Index.QUATERNION], ) dims_cam = boxes[ :, [BoundingBoxSE3Index.LENGTH, BoundingBoxSE3Index.HEIGHT, BoundingBoxSE3Index.WIDTH] ] # l, w, h -> l, h, w - rots_cam = np.zeros_like(rots) - for idx, rot in enumerate(rots): - rot = Quaternion(axis=[0, 0, 1], radians=rot) + rots_cam = np.zeros_like(quaternions[..., 0]) + for idx, quaternion in enumerate(quaternions): + rot = Quaternion(array=quaternion) rot = Quaternion(matrix=sensor2lidar_rotation).inverse * rot rots_cam[idx] = -rot.yaw_pitch_roll[0] @@ -261,7 +261,7 @@ def _plot_rect_3d_on_img( def _transform_points_to_image( points: npt.NDArray[np.float32], - intrinsic: npt.NDArray[np.float32], + intrinsics: PinholeIntrinsics, image_shape: Optional[Tuple[int, int]] = None, eps: float = 1e-3, ) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.bool_]]: @@ -276,8 +276,10 @@ def _transform_points_to_image( """ points = points[:, :3] + K = intrinsics.camera_matrix + viewpad = np.eye(4) - viewpad[: intrinsic.shape[0], : intrinsic.shape[1]] = intrinsic + viewpad[: K.shape[0], : K.shape[1]] = K pc_img = np.concatenate([points, np.ones_like(points)[:, :1]], -1) pc_img = viewpad @ pc_img.T diff --git a/src/py123d/visualization/matplotlib/plots.py b/src/py123d/visualization/matplotlib/plots.py index 3779aa7e..cbbdca61 100644 --- a/src/py123d/visualization/matplotlib/plots.py +++ b/src/py123d/visualization/matplotlib/plots.py @@ -23,8 +23,9 @@ def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, ra map_api = scene.get_map_api() point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d - add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=route_lane_group_ids) - add_traffic_lights_to_ax(ax, traffic_light_detections, map_api) + if map_api is not None: + add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=route_lane_group_ids) + add_traffic_lights_to_ax(ax, traffic_light_detections, map_api) add_box_detections_to_ax(ax, box_detections) add_ego_vehicle_to_ax(ax, ego_vehicle_state) @@ -60,8 +61,6 @@ def render_scene_animation( output_path = Path(output_path) output_path.mkdir(parents=True, exist_ok=True) - scene.open() - if end_idx is None: end_idx = scene.number_of_iterations end_idx = min(end_idx, scene.number_of_iterations) @@ -78,6 +77,5 @@ def update(i): pbar = tqdm(total=len(frames), desc=f"Rendering {scene.log_name} as {format}") ani = animation.FuncAnimation(fig, update, frames=frames, repeat=False) - ani.save(output_path / f"{scene.log_name}_{scene.token}.{format}", writer="ffmpeg", fps=fps, dpi=dpi) + ani.save(output_path / f"{scene.log_name}_{scene.uuid}.{format}", writer="ffmpeg", fps=fps, dpi=dpi) plt.close(fig) - scene.close() diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index f92d53c7..330e4504 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -3,6 +3,7 @@ from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.visualization.color.color import ELLIS_5 all_camera_types: List[PinholeCameraType] = [ PinholeCameraType.CAM_F0, @@ -41,7 +42,7 @@ class ViserConfig: theme_dark_mode: bool = False theme_show_logo: bool = True theme_show_share_button: bool = True - theme_brand_color: Optional[Tuple[int, int, int]] = None + theme_brand_color: Optional[Tuple[int, int, int]] = ELLIS_5[0].rgb # Map map_visible: bool = True diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index 60458b29..dd85dfad 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -78,9 +78,10 @@ def _build_viser_server(viser_config: ViserConfig) -> viser.ViserServer: # href="https://docs.nerf.studio/", # ) # titlebar_theme = TitlebarConfig(buttons=buttons, image=image) + titlebar_theme = None server.gui.configure_theme( - # titlebar_content=titlebar_theme, + titlebar_content=titlebar_theme, control_layout=viser_config.theme_control_layout, control_width=viser_config.theme_control_width, dark_mode=viser_config.theme_dark_mode, @@ -88,7 +89,6 @@ def _build_viser_server(viser_config: ViserConfig) -> viser.ViserServer: show_share_button=viser_config.theme_show_share_button, brand_color=viser_config.theme_brand_color, ) - return server From d3a711cddcd95fe6800e3407ffb8ecb6cfb1ec40 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 21 Oct 2025 19:55:00 +0200 Subject: [PATCH 111/145] Adjusting requirements for docs. --- docs/requirements.in | 9 ++++----- docs/requirements.txt | 39 +++++---------------------------------- 2 files changed, 9 insertions(+), 39 deletions(-) diff --git a/docs/requirements.in b/docs/requirements.in index bd564dda..d39cc178 100644 --- a/docs/requirements.in +++ b/docs/requirements.in @@ -1,5 +1,4 @@ -sphinx -sphinx-copybutton -sphinx-rtd-theme -sphinx-autobuild -myst-parser +sphinx==8.0.2 +sphinx-copybutton==0.5.2 +sphinx-rtd-theme==3.0.2 +myst-parser==4.0.1 diff --git a/docs/requirements.txt b/docs/requirements.txt index bf7b4c72..ceb2e009 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.10 # by the following command: # # pip-compile docs/requirements.in @@ -9,31 +9,19 @@ alabaster==1.0.0 # via sphinx -anyio==4.11.0 - # via - # starlette - # watchfiles babel==2.17.0 # via sphinx certifi==2025.10.5 # via requests charset-normalizer==3.4.4 # via requests -click==8.3.0 - # via uvicorn -colorama==0.4.6 - # via sphinx-autobuild docutils==0.21.2 # via # myst-parser # sphinx # sphinx-rtd-theme -h11==0.16.0 - # via uvicorn idna==3.11 - # via - # anyio - # requests + # via requests imagesize==1.4.1 # via sphinx jinja2==3.1.6 @@ -60,22 +48,15 @@ pyyaml==6.0.3 # via myst-parser requests==2.32.5 # via sphinx -roman-numerals-py==3.1.0 - # via sphinx -sniffio==1.3.1 - # via anyio snowballstemmer==3.0.1 # via sphinx -sphinx==8.2.3 +sphinx==8.0.2 # via # -r docs/requirements.in # myst-parser - # sphinx-autobuild # sphinx-copybutton # sphinx-rtd-theme # sphinxcontrib-jquery -sphinx-autobuild==2025.8.25 - # via -r docs/requirements.in sphinx-copybutton==0.5.2 # via -r docs/requirements.in sphinx-rtd-theme==3.0.2 @@ -94,17 +75,7 @@ sphinxcontrib-qthelp==2.0.0 # via sphinx sphinxcontrib-serializinghtml==2.0.0 # via sphinx -starlette==0.48.0 - # via sphinx-autobuild -typing-extensions==4.15.0 - # via - # anyio - # starlette +tomli==2.3.0 + # via sphinx urllib3==2.5.0 # via requests -uvicorn==0.38.0 - # via sphinx-autobuild -watchfiles==1.1.1 - # via sphinx-autobuild -websockets==15.0.1 - # via sphinx-autobuild From 5861870c38bebcce46bc9f97b3a745e07ae44f86 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 21 Oct 2025 20:36:50 +0200 Subject: [PATCH 112/145] Update github workflow for docs --- .github/workflows/deploy-docs.yaml | 16 +++++---------- .pre-commit-config.yaml | 2 +- docs/_static/logo_black.png | Bin 0 -> 2486726 bytes docs/_static/logo_white.png | Bin 0 -> 2486726 bytes docs/conf.py | 17 +++++++++++++-- src/py123d/geometry/transform/__init__.py | 24 ++++++++++++++++++++++ 6 files changed, 45 insertions(+), 14 deletions(-) create mode 100644 docs/_static/logo_black.png create mode 100644 docs/_static/logo_white.png diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index abc8f877..3ef75f9a 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -1,4 +1,4 @@ -name: Deploy Sphinx Documentation +name: docs on: push: @@ -24,22 +24,16 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.12' + python-version: '3.11' - name: Install dependencies run: | - python -m pip install --upgrade pip - pip install sphinx - # Add any other Sphinx extensions you use, for example: - # pip install sphinx-rtd-theme - # pip install sphinxcontrib-napoleon - # Or if you have a requirements.txt: - # pip install -r docs/requirements.txt + pip install uv + uv pip install --system -e ".[docs]" - name: Build Sphinx documentation run: | - cd docs - make html + sphinx-build docs docs/build -b dirhtml - name: Upload artifact uses: actions/upload-pages-artifact@v4 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cfcbd787..18eca0ba 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ repos: rev: v5.0.0 hooks: - id: check-added-large-files # prevents giant files from being committed. - args: ['--maxkb=1024'] + args: ['--maxkb=5120'] - id: check-case-conflict # checks for files that would conflict in case-insensitive filesystems. - id: check-merge-conflict # checks for files that contain merge conflict strings. - id: check-yaml # checks yaml files for parseable syntax. diff --git a/docs/_static/logo_black.png b/docs/_static/logo_black.png new file mode 100644 index 0000000000000000000000000000000000000000..6717f5f81f94a4adb7c3042fe6f5332891afecfc GIT binary patch literal 2486726 zcmeF42bfev)`p8P3_0gG3`v3@Q4j+lk_Cwp41iz+6DnpgtgEiO?wZr;ue)nPF(IfJ zFknW^l7pz^sN^vJcW_3AVR|~9x?R=%)$^REyYH=g>YTT3>Z&?bx1Z9#XPJ^UOB!R! z^zPMtkTJ!Y7*ptu;>ED#oRxQP#Yc&YdySZ6Ox^cI3SIcs<;P=Vjmu6Ne%auQ&c1B& zm`lf-$&)9yJnzB_CY?3r;_)pny7Zh!HXd8km<-dqd)FaX7BbWB891R(R#p~{VQBBA(MI0=wI00gp5eQYbo0t5h>MUVgqkN^pg z011!)36KB@kN^pg011!)36MZc1g70HZ~}WPVnWA*NPq-LfCNZ@1W14cNFap?Z2j*M zXK^e*3ZDRUodigL1W14cNPq-LfCO|2u%kkkC$*6P36KB@kN^pg011!)36KB@kN^pg z011#lOa#u}Fy{u21&9e94aiX^LMx3pADlNPq-LfCNZ@1W14cNPq-LfCNZ@1Og?%j*38W zvoI1M0TLhq5+DH*AOR8}0TLjA7zjL9`u_jpSb!M7@emRq0TLhq5+DH*@PPn3DtxFV z+DHN)EPw<^z)b?|sBp7hXcY;N011!)36KB@ zkN^pg011!)36KB@kN^pg011!)2_!86c1|R1)jH?c1C#ch%CP{>RSOLw0TLhq5+DH* zAOR8}0TLhq5+DH*AOR8}0TLjA#1ddfMPm80bP^x|5+DH*AORAHlR)N>3;S^_K%5YH zI0=vd36KB@kN^pg011!)36MY{2~4|Z-~{$oB(errHVKdb36KB@kN^pg011!)36KB@ zkbrXpHte}~F~b zQe>Fk-MbF)#b(->tOTarGjIZXE0VQ#=~q+)(E3{l2RsZJ7uA8x>>B}TNjw%Uia+^g zc#gf<=jsFaeNN6cev$wQI6*+>f@+HU%L*r~p%ElN0wh2JBtQZrKmsH{0wh2JB;Ys! z4#9Q2f@oe~1TI2J?3f4)xBY_NyYsP%Va5W~ffX;Hu|LheHP%WvADY=oVGS=8lR(S_ zO5n(kVm_J&k^l*i011!)36KB@kN^pg011#liV=`{(EfDm9uMT(vWXz;ukuHU8X{yOM832if9E9IDuFIISKo8hxV(1ZT&;#}OCcrm?&EI~AOYtHoQ3<#W6oJcgGhh` zNPq-LfCNZ@1W14cNPq-Lz##&h?#ZFb2+9;Ra!Lfn6&A{?0-aAmzbrs~NP2QyrK%o` z1-KGMvy$-iwpT$D-fZKP&U+RBDvZJlU65(ZDJ5mJ%c7WW@ zIV3;=Dg>@q(LyC8KmsH{0wh2J!4sG}?%Xdp79e=;ERh6AfCNY&r3tX3BK4|&99*tt z(e1s^{jEj)Dyq?8BMgKEQ|c1%V#RlytJ3jzLF!f$JtqNg33S5sd!o12({K_X0TLhq z5+DH*AOR8}0TLhq5>O^E;@)k&LyZMsM};zMm(`)cL>6I~Y}v+Vv2v)artNwX4veO7 zy_W3S_my4sYT9MR`!Z_UsE7neAV>l?23Z)(A^{R00TLhq5+DH*AOR8}0TLhqp9mB} zL$Xiy($+W#pmXAU9DZLMD#9JQ4;fnEoSGjlVbhop8G*&ecsW zyN`3V9PC>TDetnKv@M6g<{$2;9cnB@4T1d4Cp=ykN^pg011!)36KB@ zkN^pg00|sMfKe2OAxX{+*%1_1TK4W~*E=b){g(&|c2ZbA85~#tyz8A5+59fVCp#$| zKbdGA34|eVU6}pcO#&o90wfTQz&p=8)iS!V08_GV2&a_$Nq_`MfCNZ@1W14coFl-n z3g@cCjzQ?HI2W29u%rEeRtc$Sjuzkr7R!iRH8@mV!yTLltJ)(YpU(~GF! zT$NR`mq99fOPYq0014y}I1cxm4|9I-lLSbB1W14cNPq-LfCNZ@1QJYOP1SqbMK=~8 z!3MMV#mjr?&@WQ%Vs)Qd45Tr#civ&o31W14c zNPq-LfCNY&83-_%BtQbGN1$lxxtuy4XNOc=W!dBZdJPxYRPw*AgGt$K{L!Ie5mz#9Tf=e}WPKatRNBCZCkLEZqPp(H>8 zBtQZrKmsH{0wh2JBtQZrV37bjCoIBsK=tXcA=9EA5=QaYg7g<%_Rwe<=_Si)tEN5@@P|NFmihMWClhV&DY(vCqSN%7L>t2LNq_`MfCNZ@1W14cNPq-L zfCNY&(FC|SMWSoKQu%27C<4{C7p3qef-M(G-wbzCX~>pxbni7F3}W?!sLl(w?tF;Fe<$g>{O4S zC|kCy>D8-OJp6cUJ_MeA`e}m>g?#qJz3G;?HCR@F&c!qop&mhzmR`cN?0%+Y>#3du z94CPL%DkBmxnkrHT<vwUkexPNiMLlDnyPmfG7TwM;#YA zv1;_%NJz!`5g!m?Mut>G2r9~K&p!LCt&obFqckP(?Aaj|>aCX&Qqii1bb>IEKxz~C z^0fzD127!deG}J=qCF%)0wh2JBtQZrKmsH{0wh2JBoHKl0(&TeG=yd85XcUrxLQYB zNG(r#5=Joz9;k;=$f6VsqX>C!#I$4O%9ZBhk3Wv-*f0lujC1wfFnL_qeHx6YuWd-P z6Goa6Wu%Xe$u5D~H+H?QfUy8}3#axd2(0*KwpqK}xkamkSWGGkO3X?EBtQZrKmsH{ z0wh2JBtQZrKmx%NC>nf`EYUK75zudQWTr$=+-BK_xYcJzP)vwhQ62?l%9Js^d-u+> zl|SPp@XRyM*ovT-60dExkI(L0RF9x2l9rALim|rMr+yM}p1_wgoQt5yVRkyM|69?S zx+rG@KS_WDNPqbA^|4};98LDpe{tN1Np2D$+-sPQx?{fYanlu^Ixum z&5(`RBIo|k+42+4{a^8&i3CW11W14c4kN&hio=ljyCM6<`Wnj~_!3QVDL7b(rYKRO zgk{93rT*7ne>HRF%(3D0r+5i|(}rSYog<-7eR{TL$De9Sm9MI7D%FvIUj$Zu|DjpE zc&@8POC*IXpmesY2GTAPAOR9cCIa}cB>xSLf;5H5z^-Q5pVBE2$p2#4uoh+g2$`S# zDc`@uJL`IUGLb+a1aKafL_w_~?IA}(T0&u4fx?8PS&3B*kR=a)o291l4W(go5S5_DToS(GUqBO*CzA8h*`KV+)j zw;}K0yq69QCK4b45+H#TA`tWgDTUAm>na}V@}hl}O4sKBScGKdwsvI&FU>?-Yj6(I4tQ(@pt(xR-k5+DH*aGe1DAW5i1Pe@mY^ajWr zHNK<_7%DOpBI$Sd^FBVM755GNDU)I7Oe7F70h~V#kark2@q5kB|A zZt1Oi8S)a&`(^lKA^{R00TM`Z0)=qHPjVMi!#zC2hr`pSEqc2Lx{tv5veaT3ajOOg z878tEqS{*_VHA%(`Y3xV;;x}X72JLI-R7;g-a2%n&aYE&uF6ZTvkB*FAsF{HMB=Q~ zQsE*+&B(vJWy(~prKXdLNWez|uiSaMkq!zEl1$*-5cjN~J+PG~S|otSLMGH_Kx71p zb7}10gnJU*iHV0KbCU-dN5Hi0!u=2_{aXxXH!`DTIqCs9r5&?b=-GJ23H z1qt9iPFCra#+NQB=zcUV{{~jq;C=L-XbxiDjBj=5Lh}beuGG7M$UKX+>mWsAe*v9 zR-@1PE}zm-w-%Q{?oJSFVj_X$Ai#)q~oImATv*afk zFr}L0Pe}vZyM9&GN@dOyz~ewVs=kCYbKY2Y3>=GV<#Bh+p(Q~Oz_n5a^I_24kjHV(%li|PWdbY|8m zAmuCK)QaA=}f_(@p#K?M=ms z73HUsY5n^3=Iys-p)~a*f7Yg{Yt>hHGE}N|QizF7It^eag??R8qh|!xEdAO_Cq?&T zs+e}o%bSbO%(Ov78XGRPp_n?ICvXZZWG6**)nxk}I67;X+bRM4?P(187YdQ-@$QAR zv09*i%A3OEzd?Qhr^g}f{d0j9M?(O0TnZxpdzV5a!XTA8DFl`HD}FB(4v>$PkFs&CQS}A@4($Lw4Z0SONI}BF}}ZAcG+-ATmIPi3FS@ zz>W&%sw8TI2IGj9Q6Clg0TMxRYvkDzlM^>td05H_64g|)WJ%MnUq4ly8p{SraZy7#%!9>)A6 zkaPs*PM5YT{dCFbKRP9b_Nr;>)hwZ3fHCLcSynYt#cA(-Ju3dO08_GVh_pcF3yeUk zz)DJ3e`T14>**pG zm@;7$O`1sWdBLfC`SPZCarvL(R@$^_ldYAr7sJ!%-17EtYjD~2##Z%VZVfy1HRY?< zKfFM0A%PSku>PkX%!+T{Q}_6U4wX#DR)0KV(ux)`7ugA?kfDGwG?FQ`2oy^#ch$V* zP(U@5ZYjchQYXj^SoIl1LQm!Y4pVdljz{*_!SM^oAP5;o%&$+s%U+LvEI`D?#gGxz z*%ya>19<>aEd~|Id`R_dF5hNzN@iaZny*^5Mp)uXtamP+`-i_c_t>Vtx`xIs#w3ijWEm z3oBkWw!tB+M)s?1>QuK9S}_sNw91DR#vdd=0wjCseg@XUkb|2&DNxDa+% z%wv1pKL-pLU`m&kx&PcskwjC7#i%>8W5*7sjNiO@vw8E)H*MG?Vb4$5P^_sFm+eW= zCt;VWDYIQKQ?XWKRh?8u0=^O0`0FCG{44!MZ#uWBXpU)p=p#m=Dkh#T1CZ2{{7H~! z8WRbS012c70lXVkhTH>ZWf}_ne`T^jKq7hH24{(~XlX$|Razi`x*7+yKR||9P@l{y zWiaQD;5PxnQQ*m3)AY?Y0r>+Y5nMMz-hphyB{d5&8PW#A_GCa|(jmZ;Nrz)1vodJn8bP@H_SM3}pks#K{G@ru~8I`y7CQjPAU7hinQ?Ao=6lMFVb5G}zY9{< zG23WPHUZR47065Y`3R&u!Nf#B+6$xuR66Hs#iW4;kpKzgB_Mx%xfP}NCgB;Xr?O{Z%t*=A&XpB-j=fpN9bIq#HKLA`~)8ktu!xJ)lAs ztzgJj{4A+~1o9G)hPmMoS>R$VuCq5FvZ8&>yczkM1i}$uZ$&sl4(`u>xVE!s+8*fk zVNnW(RD?JQB&1@)h7BS1CT6pQRP5Zj(?;PEQnA~HVr8AzDeF6+u1%kFZK$PA67Y(^ z-0AmO38T1TT*Cr6j2TqdRIi*~V4fIvUV>*mhfl@`kB5-}3AjlB_cckEz`Cy?UEQ=Y zWvr?JPoF}4yabVzhnW&cKrRfVNiac%Ua&rN>q2@&IM(ij*}(6H>8op@h%7leTT!W@gNoVZ)%Gq4fW3DAv@8hjJ(AlTlHsDWg?4 zQzhd_Rh?8u0=^O0vUY`8^11%~rDc<{rd!8?qxK}U;=GX=Hjqgt1t)g1L6$m6fCLgk z0PjN5_$u+W|A25M&VpR+0z` znPAT81P-#gsEoo=O~s2BH!dzpQMGE-s1~JgErR0ZmtQv9w{KUit88Vrqa$tCAK!a@ zdgWg%js?)O-pZTxdIW_mN>R3~%nR;T@;QP+Iw+=3pKb%_YAF4;4aJ%|@$hX6eG=}dn(DSV-c+sM zQdK9Fk$`Umwys-g7Jc%vy4goIE^B%oTQwrRB})`B=Z(mSC~uURq_K$exkU+$*)0)x zt>hdGeYZ9*fF-M`)*%9TACe@I;IBf|-{Bo{HCaqK1CD%-I%|9;C9h=KF7NBkHs0a{tyhY){l@UlC z;?+~Y@e)mOdjb2M*aZ$Uy5~}DV_c4=s8AuYDg4&0Tj$(*6tBMes`>r*-?ght^xc6@ zwH-DTE9;yBeG!bsj;_=;eK`|Mbi|lD83}wj<3TGC6qk){5K#n04%6|&>X}OAtwmCl z%qfSTBtQZr;0pn~ua}0c&qHp8@IR+76=zISnEDy&?{t4m_RmuAl~Me&AxHRUeNtE~ zqfy>QJsjhLH}5`mH`;mh#sW;qvJ$}W0xH^&qy*$8p*KW^pGnWf1PK2CC23_x>K5M| zBPHcVzUwWU;Y&z`Omq|3c-JU^_3PKqlq***iX(!_>T^hiteP#o6&BKZ=zPS2UR9NN z$hL;UzN$*0taj65OpQkEwAE4{33x@|_l>K}f{$NRxA}<7(x%S|(LG{HA+%!ru)6BD zKw&k=*)|kYCkc=M3D_cl_n%tO{Sidwd0_I7Kna-qBkmvbp>j4P1HzOz z0o*ii8FyElR(N7ke=InAS-Y!-T*0hWx$ljmAnA>vf)YH;v($@Q;)W2u7pwC zrlNn=#dl)dl-^i?5JrKCgj7rlu{-9?5>hc>fGi^CR;pI5nu#W)V&le*PB|d+K5gB) z)rL(HQeh=lL)lpATr8xl({ND=ZKBlYBY^`U6?^3|rkgH5tDz}eIQq->oKrJQxiV?G z#{8jeGM;_XXv9PUBtQbr6TtmShUH5q#gWb%O9R6ZxB?koLY;DF1$GadhSc@rz2(Y8VpUS?*vZ?<@ zWAAdyWoW2B4@#mN-Woxvef#!h1Vsd#qRAo=6ta4@g|q=WAGDxXRV5y%&7rWrs!}Mc z-mtBy*`$NETIwSKuLx}4vd+w(^Mbm~jWbG{{@r4G7cN`6sI5qf>Tq;yjgWkbXN63Z-zxe;c?1cNfHDCYn0zs039iXov+tYAMNplK1lUpGVy)z{2^VA!Y_hqc zmJCC^Jx{UD{RD^NFv`lJ6sMeWigW%-1V#Ds5``K*tzEm;xE4V%bLLF5Wy_ZE#z&vy zALvy3E&9U(KSE{&Rlmo!A8=vdj<7%yaE*XOQ0&{YOWmwX$7IG7L6Kv~xx+I|>5@fr zerkS7Bt^QWLh2#`5+DIh0=P$=0DZF{(g4epTm;&{oexmQveI)hry;jJj9Eu4j|Iqn zp1%r^Es`0g$G%m7?K4qN>}2)05@~u;5s(gxs~}5pU0w%aWM5KM@8Qm|mq3~NiH)FM zS0jy2G6J%2>`jn`xHiv%unpVW3Qm9XrU{Xa1&E@Or)0_SV5S5Rz}46t#mYYbTWL2G z-(gF+mR@j>kcvrK`f~Khq7(xM4$S%K%xCrL)uRfj*tBVrGq#H%Z@lq_tuT+f(6hGL zDQELHMtT^dxXI?PB=6RC{U_$##NQ;4as=kR{;1izd#AeJlg2bKMTLq~`At3YNJ_`99*XQvm;&V#5$(yeG)qS016MYGzI2Rmh zYP0CwySI~J6pb1+irCeK3l~P5H;9aT_Utj!rcJX^)K)0H+lFFIop@L_guY>#Iz?BN zdd*GU<~I8&wJlD1eW!Kq^Uuh}0wg_(#5lO~_s!;q_nuZSyiWBJW^m8=LMn2Ms#q?~ zj2=`c=cneU#A}?UsgSxzfCNY&9s+p(ITFW9Ck2Ng#{<%@W2Lc3Ix8aor{x9KU_&k%>$>{QQ>eU;bQ9s^KAAI z`V>KNnRZnbD^|?7xF|(55fp3ItTC=dP|TV&%WT-NLA$y{-`$9ya4TFSe=jA1LVb4n zcKxv^MWOgdc;|02w~~M#1ipIXQL|^)c6DQAQHo+|dapziPs=nVid%`KxEk*U>h~9Q zpj1c#B#`t3vKs|v!cm!zK{*{;ZX;$Gvb;usvdt;0P6m=mXGO9vToG3VgHJ^$_#cGN z@VH1OSaw|1y5_tr*Yta<@_U*G3O|cEN^fyc0!qkQvc*8iQeMr@;_HSU59rBD~i=Er9u)Qfutjl zegAn5jx|p@=jc$h1e)T2mvK(W`hiTj2!P2M_&FfgCVusgKsA{Bpnqo5ViHI>0!PE+ zxw!BCh0h~@>z!ZphbG!T_%n>^$ZC_(&EWnhk*seu98SBDm`DdbbLH$s*}n{z&8TBcl>6) zd*?suW>>46ZbqG~{>(XGr$yZ|A^1>r7*t3CBtQa@5%?>z@x>%ER;iHdSUC9~yayLb z1!p7i5h!EE~uU_q37{$BqzH7uT3+X}h ztXYZAP_|XVD2gfTH0=;Z!8sPSsZyVB1ipFe3A1baR&}#2hC3Xv)mE4WluI}T-{I8H z2r4{`!06sD;f*QT<^?#+O>T&u0PaD%hrmraUo3q}NX5^V zbZe>|2W9HBd-v?w)BBK$MT-_`a*kWq{{8!nrI3o9Fz)Yh8)#Vp9(*-W#AxkeD%5CT z8no-Jt(N*oz$*f>YWBBpKdElB#RVk(v^8hpizi=N`R6vJFU_DwdfctVvMJd}H6%a+ zNkjneI8wenB zM7QXUiYm9fWXprr=>!fkGTq9e6oUp0a?bZ^)v86jD20pB6z{+PzWL>sUu?!|B$xrdp@Hnlq-tr{39Tp6gz*H|HZm#!fEwQiQ+|cmFA=E(lMFlnb&?X zyZ7wRXK!4aB&uQ>?vYt>Y2m?%Ca@#X2C#e*2uC3Mz5IX3S2|o?Pxs5a=0}iukfo4i z@`>APOberdmo$vjgfxI;LK;FkKsrLoK$0;PhEuZYa651lhAEfedm5aURa|{eTVT9g z3v(c!KxA~>RgUt+R3WZ*biz~(r}@sOh+KDRxMfnWay2VuiR0wmxEfg@q&=eTw+ z#qazU?QV2R8x;0JxrxaGAP2WRI664 zhz(k>U_r!rgUE=RzO85q8NK5}G=;C$O8 z0Z8J1{vy02fRrm?9UAt>jAt zMH&EHp$#D|EzP(XK_PyYEGhTQ@M+bmRc7l}{khr0mmE6BhaY}uR;*YNIz#MxWTeVZ zu^%4TkrM4wLc6rK{Vp(t3)>&zt!+2;k$@(F?`Qwd?D%birpg1lE*Mqclr9Ai+EakmXM7T^ZFN5oagl&l-#G?0gD5O^I*-qBD*9VB3b0PZtgpjSQA z*@mrQbgsnyJ0UXczY*ko$bTWKJrxDnfU@>MzJ}ZfkyhJkkUo&dAsZpdkdB5EQyI=am-sS0UXXb#Xpk3i%8YS5HMKpP+XuM6NeE-)lmygscvg-_h+7 z7L{ysdpU}lW|4q8fnm@%5BF+$Mlm@^fV~xNRmv30=E(dxKU*$CL;YD$QcpusZXFUv zQMt04i(c*Cy*sz5A-^wNxG?0tMC`!%WTsA?YNKcweszZp#hNx* z1xoNXL`wNuEDXLyp2Z*dOvP z#7gHxC=e)b9prZGuMasNBAtl7q+4;G2RHP^7b>)q1pFW%_nZ%K4a-L6TdkVDHEY%wS)ihzpk zklC^Bdjc=5Ruso-P>AC$!*=zFn%nd}&lD~ymjV+Bq%MK)-+RVv+q_mCz{mxFTA`-@hOd!!JnX!vy)M1e~mmlb9zqT zI@sUPLmy;CqklpkkcLnn?1!B)NJsAFGjK0B0-qN{PKT6)_?Dz&>oNFoB>dX$oAJ@w z3qQ-?7#|{yzK5MM=ZVa9mhc2SqG+=eel6#6PCJ3@lX=hNUQr?YyZp{ASbKb5^3q z_F5{3YX2ngv&LKdhdv(g?LR&M0jLoLJqwIBQjeM?{?MXuNEngct2u z5g);fP7**JN<*d7qe*eDt#=Y0jZ!Cox;_$0Wk7~cX&a3F8|0t3p0@a8cy3!^`+WSC z&dY1?DUG+6LZng5r<4g}Z-V%C4#mQFJPvYi^}~anNJHF>kUMa0MHa??@L22LV3S<8 zlX1R3flrZk)(efVfU97uOkEXd^htasC(Y5z!F_YmcoV`AoL|!6A@`J;5NX7c5v>xM zE#KvjOx^5H>DXY3jeraeY6RB9aZY*rz7U)1ZXQ+GO|w$VDqIk4;c-ukUdspt9~yh5 zNApT;4n>L-aduIP9AEeE-)|%$VZ(+EIh+3c+`fJLfqnAnQu^$(&&=Y*vhbjKl9#RR;`%AZ9U389)=_dbUa6skn+D64kkKW~k0EPx$5QbFsFA51mdHmy;2_3TsYneqvU zrpR&R^0OLX0h&TNKQ%w6Xey&F5+H%pCV>Aba-(_xBJb%wBnegf52Oj+CvQxaMhEz@ z0dg(;k_d~ZA>yY`>0+Ept~WZz#S{L?HF8m0O6(pi5k)<49^C?Qu9G6S>#$Eoyf) z_aLhwOyLRiLY9wkAD51GrUVmU6h(q7AiH=OVPd7VB3r*iQ2gSCs|8vy8hh&(m?MUr zu18ShxU_56F7wexADK#(Dw&EEE206bu-UR@i&?vNEv5%?Z;-~b&pw-Dy5^@uQ2bR> zp$%PlfXR!1%sH!?N|vi)j_5pCRVS5^fNuo$@7rr7f}&)JBIfJ_MNs6_L6iEWO`j90 zn(6PVcf1^f`%)J?>u2Yb#ZM9-0TPIVz)&39IgaDwIkfi18E3>g7GMPm9*=wBtS|;l z$+{s-4wrYs!JlB?NZk1E#^=8v9bC3Imu*Gx{UO-h9zMAD?*W} ziot{ZYWw{U%rB#gunU`Ivb*yogdBfb>M`C;MnA>5SoTYU5l zJp(H1YpdTh;qN}k&G13y##!u%FSlgBKYLAN3ttR}JPh&W znF3SQlMK@M5AN&Z)iqOLL(ta5fp#3s9!}jI7s;KWEEw( zlt=`{kRd~IZQ)l90$+dqwXIdWpT`4ILKLG(UmsHe&yft%yL;D!)V0j}R52QZBgxyk z*EmE|F#0kQR?L&U1QvY!ve~j`xw?a62GwyQnj*)s$>ST^imJFKrwo3Q011#lv;^>e zUJl2oFB%uE9bxjX!hV_W?*GE%OX}V5|5G?9|0`uAtm~;d3{;Ptcfjd!4%m>_gq7GJ zofL2fyLR9GEG8`1Uwx?y0zE^3R)t6+@FECxB~gE#z6qIgmy;G6QlRWF|z~ z7E>unTe(b8I5Cx+cEwkAR3x@0rl9yj7EO2%HyB?cDC9<{nod0NL{qJrbP6+R5_smB zXEfF2=#rHH|B~~=)lWRAO2Ian>Pa;fFJ0a=J7I*XPAVe--v~$q#k`phs+*l&tgyLY zlv@LWTQ(_cx^=9q?kg1b#C^)e5uG-CrB3SvDt0{mr9_SeuM~mW1w| zkJ69Ef1S?~YK0?(gTwC-iAtBLpj=;J`$9aclIBkue33zSqZ~OEa?IF;FGFxntj*bw z44?347W{BAdO@P>2P6Yy`al9X1aQ5rfJmESA8eCpkYpb9|3ZF;q(YLm+PiU&zdRM3 zb;(C|R3xZ2aIznTqEEJ1y!3e9X;Ht5>g*T0D^--`QXY5cjo)= zzt>QeSI0|u0DhBqql3TY#aJHFs_Cen<4jt5NmZRxMgqPOSp3;*X7lQ0>Smugu#Tx- zSt5AdN>_|)Xv?5v9I0xH74?z;31kz%J9!y=y~JT4U&6NI@xCad#F=st0F#^X^GwL@ zT$^3{DlfX~5%dXkvMb7Vt+Qn!&V|ns$i!OF@aqvMzsFhuu`513=0lG!k$Gw$V*#dQ z-4L^pJdgx(5x|8gJ>t_Ka^lMka5&@@h|3Qjn5UBNz>AWM1Yin?0DCI}0-aAW^3Y&N zg)ygrL*sn-#I>ngw{8rnh>KwU2S5Alv-$6cf9tLBtFWs952xZNK|Q1*y;M2VqN`j6 zOeB!H1hTUBn=fZRqz<518bT^Yx)oB9<7B%Q<;@8lD&_pt{OpVSReMc^)I|a$kP-x} z%xS4aPDZqxga(pzO8Syc4cPn#PW6Utb=bLc@qDW3uzk^(hK6RR(sY~y(xRWt=?Xaa zgJTY!gmb#QV>Zzo5>O|A`^$ES%nc>sL$x56K^CePQEt8Vh8g@=3pl)%3 zw?MJ|Efy_d6o0d*KOfb73payDv91vSCK4;FKK!$A;lgI{U4 zni1q=mS_lAT9iUeaK2|c`%n9%Xm~}Ew^)fXrbV~dLxm$LgLz26c>-BkX6|bbsGFHq zw2+y2x|AMRQ!(df=+8-+MPH45EWiNV!&-$_&OIbR0wiz<0T~@$?T{^Q z{E*IxzW9%`+6{Buw*su*hm|r3?jA?2JqypLa*o;-sac(nF@gtqr0U|%cMIx$dE8n& zPyj#Y!OVX+X6gXP?4da%5C{QWkCNVn3`2o3`s!ZDwm=FGwvemv+`2H>B2%w4n~mkE zM>7JCaZ)!&wo@X=8fz9AUEs^26zZcq89@>2X;biVix)38^XAFBpn7@(53`TdwdyMz z50w(usG3@KKhva_EUT)M%1FRB0?X#jG8=xLuWt76zO_u<8Zuzh?R44L1~v>V1f|#5 zP)wa9Kmtig;35aTlE(GHcu!p9plwc@1ZJ?0RuxCZ_9&bzXo2 zQ*627&Mz>;wT64!Y-GUOI@^Urp}!~EdnC$+i3C!X0G>h1AQIVC4L{p3a2lZ%*u3fvDTD8>MlMATM)Y$d((@z_m zS2mpeiw&ikI`I%ngFf}pbXhg~$P?v1BohgwHi5Y_9#jWVq;Mf~;pv&`dTl5?zI`Rr zu~h{ds)ykIB~wi@kpKyhKu7|(PaKDxZ9-;9+>Wd9?m0Vg#rs_{*gXReCOhiIM4Wd% zH|EOvAFiZ3QP;a%F(wE*;IA}#P6?u{2+NSpw{{WcWi}E>Y67@EH$bMqNeOGe4U#NT z71D%C`j#$TYQFfw%Gwcc<4N{`2Y|=IMi~;Inxv1bm`)37)4rInyFm5vZ+#~iYZpCn5y5ll|B3Hv$n!0ZnkZEy!!FrkuVDNVQ6XT zB}~hcM#pO&k52%Bg(J@?9`#s&1lZ|gkzc-MC#2$%Gv%GwpLAlU%BD^8ayAS;756mt z5Jnq@QYQ(JK=1@`e~{t81A{NQz!KME?>IDYWEGf;JHrylW;hxdmxaw7e1@}vFH)t% z&d?%rEV_{HMIFlu+9{Xd_Z~QXzeA39amW^$LITbZz_q#-ay|@_#{GAkF)VgNipABKbcQH`NRg-kMNLs&xT@UoiZtt#4D<%=3Pda z5@jl>>ZCFf@QuLA?>{iBmwc&i_P~>>n}+pDsq3|^aFU%!iXyOEeIY2@EU2FZNFXo* zvf8C9A!ql#Sp{QQ4qYQ)0^S$b1V$st3j)i{a8dp+xi&{oAN*IY?je4eFU^G{9eGc> zU`PUOkf@3s2`oQ%(H(F;vz;wB5q>9uTmks=X&#sxc=Bp*~sm@y8$UTr>r4R;ELT4yJzn`bIh|a*}MX zQ>V^>j*HyebN0LR`OGuV*ovl*=pv^R>PZ^-)T1eiq@^R8VvN(i(>M|cg23GA`h(wa zdo>rI*&qn>)ywF0Tvc;aQ!DSdqi{cKq;4=3k^l)L1A#Lgb7Cg`&phjxP2QUWrgP!M zEsi-NBeqU=%%(`pIX03^0p$4r^}0NOa-1y!{%?SBuG}x=1+KBPuxJU9$;`|QwL9V4SFT)X=FFL6!;Vkz zP=v|liCS$pYd zE6)Tgovyje=2sFR0S^h_y*vYE%hWZlBpKy@kt@a|p&fsNPiUrcDh+qanlQ%Tnvp4C zTZPFM)b9TTRgmN5JnxWw?H#g(rjUTM1ke_=88RLQc84r+*3j^V4L}z4*Y1qr@RGX5e8UInFXfbnCk8xT&sD}i+CGf?o_o>@116eGs zMy+mZD207asAifsEE8&XJloH}&8>bs3V19DkU-KA7zB@88UiR2?Jr6?%8ob$w$d9k z!4Y$E%xHsWQ-hoz9(>9|i?WJpqSJmD@M5BkaHM=00k_o=TaI$X4w^v%UJ$@@NfxVU z3Hduj?rT1#oA4Yv$w$Lov)N_~x@*2B#2%ce(qwIOod0&{zcV4uMNud?$e*8UqBta? ztfF?v5Swd$L}bQcnIw#&M2QlIZO^+Ql012j3F7yvRjbSgAADe=D4B`=bsLJ6b;>oX zzS?Zlb8bbXt9GO4P{;ts$6a- zfkO$LaaHw=w#Nb-S|WcXoPbOa?Mjk{#@k#mCRy!3qtGn)_MBt>xpW?eLnEuex{$s^ zUCP`?DW9@C;9heCgiNKuL;@rb1p!>c+aZ_Xklv8BQ5@<@RvG2`6xt~3x?+&8cCe$u ze$8EvB4qMSTj_pW%xi2ZSJW%L2}dgGJD^HJD(cm%r>!UC~TZm)ae(2&tJaViourVOR{2X zK}!2~t7dE7s+NW*fan$9h1bLN(I3VmVjv)$)_Y<& z&ekD5bxz1=Ul)>eGBF(_@bHfHsRB~Cyzr&Q-bZ<{J!n7Ea>nkXI)rMkao$`lCy;JRo&O=O@O42UC zL;|TxV9nC6&Cfr~Q3ueYa~0FNSvhsRiBMR$a3OQi8JRYUmZ*wJHWX7Q36Owy1Y|;G zSN;de|CxKdBj~BYXaHIP)Bop?`R(wZu8KqU6l97kt0c=5M@$C^d~)z>Lci6+xm+Tl z7O-#<@R$IePrt(a6Ct-ke4GBMJ#79LgegV>uDq1SXuk7@T?Y$oM^LyB2DNKZdHlzT zY%G8@{Hl*YIri9Nos6c4WPu7fi}&x}Zn&L|B3Xy1v z)_u;=R!e;(;2nW4UcJ|f%@?0(rD5NSt&u1l)T4%JSg%whS)0Z-&y zuH%3mG=T)XC4l#Vy^w3*Kwrof&m1`i|FimeX0tQqvZKOMRp6wQ1;9pHQZH&{#E~!S zQCNwf7&c6n4sa{U)KkTa7mvuOl`B`8?c29UlsBkM>({S0@4WMljpBZQ(wA%~R@T`c z`r0b%G>tnBFl8&()>cb>B;XZ+^*{Y!e)>*7LaXc1mCezuDtN^roUug^NpbO?krZ=p zyyB5a#{z`Ye8_%zSQQXeF(vDUL$dP+36MZi62PAp39;(xh;!0n&S8iFR|Z;w=A+1I z0T)|PaXkv!>B2r-H!=aa3qvCoq5gKbz{C|Bz(ejMpF3g?dn+8N2%6zH0bJLw!-HcX z5(?;3dKk}(iar_aimmLeuvZh(LT@vq;yzrkE`(HsBRCKlT7}CL?fzqqImXE_ibxlw zkT8nHix)@hw)Ol|r%p9{_wKb(&P~}bjy7~_>XdGK)l{f(5r$Ey5~VU<34HMiru487 zO*Lt(3=T-XG^|felUb*v4aa1dPgNU=sgndqzzYH$VPzRdES-h-!=;Ycl4@q$;gG`# z38i>rK{&G4T9-W9%s7o8w;J@TdoY{eg7 zrf3PUqas>j5#&b$fQ+y>ErP7EW|5GJzsGuTP{)BoA(VBsc6p-q)KgE@RvWjzXhJG} z`spX2y?bg;eC!t%O!gJk83zLwY|72&c%gfS)8l0!d0BA&tTzSUwbD zJ2zVN zRnTw(w3CM+rUDF3Y*rw&(>B;eI(!=fw|M~ zvl2~l`B@FUV-bzPBl^`gb!%8zknsY%Kv#*z7Um@Z5=a^X@`vArv=K(U=7J$9Y{UN? z@?9c*Tu!buK+C9Nro#!i(ovC|qh}81%`GGl7y+~;Z9`$w@bA;$h6njyE4ovJQ54bf z3Y!}zU_E3?`3Q<^Q2JBnAcDfhMJc4SB+^AGB!Xhc4!5Qdl?aMA-gqPI`5i-sB{=4p z7>*0{5L~x12X1HWLL0R2V=C5atgV*%5=G#{<0n*$c`QJpjCQNsO{XgM@;YJh#jHKaHXT-=THWw`1Tc!_|J|CM=FA5Bo_g^FYJRy=RxlG#50*@_dZW- zb;3+`ROna3wNP11U+te7@5P1b{CX7l^CJ-y?f$e{=}X6s9ZmiERvKzzwooD{8Z?M( zUUXTHV$mX3rpi3%@{2FNXfuLh@4=Fs{U+C~ZbGM8n?C32>ZCRj@Q}dVSMSAg6#9Ww zlgB4_1Vv6&k2<-wsaaJPyjD*W@vP>bXmzYqNCHk1XbtPEgl5Iy^|QH;dA#|Vw#42msf^DqLCM=~qYU%QZ8$I${uCv>d4jnvr}2@XV6l^$8GP zdI5&w&)4AOs|hH=2-&6)RSl zojZ3r<-^vkTg~gQziz`O8PeiXEP1{@;=1jM9mnOnGmed!ZF-nWwVTADfrpZS{{*(I zU11h~F;m@dYaG4@O4ey8N(win218|wOWv#^1tIb=gNo?J+CZiVn>B8RB4>I8B%c%E=C_hDozH6_PS_|Ir`|M zBVGVPyX;W}B8yV^5X*m^4fqVHutAkNeIp=?Qe^Fy z5MAAL*;$c=vgwwZIBjPRtYfNIPPbve#du*ZZ$mM4k^l)fNx+qn>a#t0M|83hm5q8& zSzoN`B!1B4hr0_M6{~UG=*OwXT4Qb<=SoL~%&W&l0wiFO0PcelO>rT_hYpH*u=6sD zIFd#+J13N?5`XUNK-Fj^b@{202ku?@?Qn3LmBwD*7o}Lfe7UVofS6B#-+uee%$PAF z=Hm-Ea3%IVQNaGVc1>yfCgusqFul8X9TJzKTnFPCmXU=LQK6bLjylm)snMj*Sh;v%^HoCmI7 ziFT*j9@v>s6WLLrhyQA*viUIbC4!?()kygS^3n(wtE-No>p$g-65=dnNcz#KQ*_jYmo?)S?qa=3R9%?7I z|3QEq6$)npUiKxLVvK^SP%0#v!k6_Zt`FrzJlkE3rbthZ>^;cEXo~IIx0~tHr^iz* zp^jaJ9gm0F>B@Fow@1U49@_TQJ@N!oHKV1rTIwSKuL%6U@mI6p2;hd z>sG3}SD?ZY#fzBpMr0J2C&rzXaNKz@9?Qc>fCT&@AS2X^IAHDyG$<@^zz&*lkih!~ zzb5os!-SSzaN#aAI=d1nP%x>)>@A&`Vh<~I?_paG+3>}AcXUteSb!;6HymOSe~>^@ z5x@(E^x~cZneVt`BhVmqoZ~ho)I9c9=-|H+Y9ed4j;*}AanU&+d>KsY1GJ-kzaq<8SSDJE{0UR{PN3Y+cxP4Q%|?!eD&dN`Z{&bp^%GG*q}56d;P2s{R7*mmr@$6ysY=u@_j%T%@h1U@@_Q$n`LB;Ky3q7E1)jc`o)6L2BaMJ)4ST7motaw`dVN&sz78)14M$Qp-T zkUy|@;r|xf;6qdr!Gr)4>1Lc@%x55;cVZ-Ta+)JFo|5tujgVY6?q{vfhRV;iJc1jXGY7Y+(F7T}N5;_OrF znet_vAJ$N}nKfKKTitP3lv4ImvNPuf=oDg$O<8FtMW|jw-=PCD9aT z#i1hHq4JXW&v2O>*>5F+V)*dkj<|Qw4C$osIf7#6&Yeb9&$f^@K<9%N^s1`Fbt|I? zPgYe5Wi^`|ZE7}FUqD{lOzI;6s|2=hS!d>dI90uj`n5}%Q@U4I*XyIgGNp=|@pd99 z1n@W?nb2kuAc2SoxX?L~1xvq;$XaF!i~t&Rq+VQzq>!HXU{;_yztA*<59U8FKzQ;l zOpoP)4dF>8CWj06I`N#HNr|a{mP!H#2;k9nAHF=}Fph?>&AkRs2Y|7oBECwv5{KIy zap+4lg{(w;YgXNF_2?Rp`*R|Y0U1+>pig!gTA-%znOzKh=IpSReK{wB;4d5-v2XSA0DY!Q$tZ0FljuAuiCyq2lAM=98)rs99smAO6ku>fj~ zbyKor9+i|!IJ>ID>G2jF_bH^}S{odO4I5@dZCpBKQHo;4ibbSj*|KG3*DklhK6mfl zZDjRq3rSWky4QkURh77In?fOGRMAb<8niaGj_9PTl-fwZGXgt*-(-Gx|58ThyxQ-1MBwTc~4aL++0wj=70&;DaOsEBa6#lgXHqeCp3H+4*=D@dB z47|h$3Pdm4FUWQRqOYFBd9_>EJubXyO7qoU<7R-fm9}tJt|@d z7;JNt!x6A+sH3(e%q&iX6+8OLIMiks#lLV-Isb4&V{dnGI4*XK`5)P)O&im+sk9@z zlcEiySh{qnJH}^QGIQokvuV>N8+QFO`ypUMx2DePpiI5nx_$qNnl@6GYXr8f`UGz_ zvCa$Nnm2*iBOw)gcImHEbMcv(45`Sc2G1W^-#qf%Pv-aSd-K^F*Cq+AcntS<7bfD4 z%Ml(-0wnM!fqH*_OY9eRR3w&vL8ZGA?&4}#g~4<22LL<)&&Ue+6bT}&MY%qNtp~v( z-!f1i5;oR8-<`p4WLQP;%vmA{97X{5&F^t*n1me<9=6ks4H8=M9PA9TzApf$ZTv@L){HiAxBPcR5GQ5tU*t2JkdFiE>)awcgw?KS| zppelu>Y+JR>a{R+nyF8DW}}{{lLV|0*!lZr^WE&H)b-b@R@|K0hY=L{fvs3B&5RjT z$K3PZW%=!nXIotyd^+Uc@hIW3BtQaA5s>I_7t+GKMm)QI3PyWyEI?i^3I6R$V|Y*< z05Ri6yON9mvoZ=SW}{O3z-DmYl@6|zsa+NHo&=mC@Gz{A$b}P~vg!|Gj>N}6h(ydW zfq?qR;uN8J1^l!bGA*OieTb%zi~M-)I&a;&wQ1H&-Z9)tb?VfK$e_J@_nIY3mPC{{ z$V_j(`KH;hVS|k#CHlvQ7xe4kd=YH`?FU?F%z?o>+M2141pFcJ&8)}Go}Gw0QBD#; zQ7nzGDxt7mFsi;OQC$7DCdFNiK^Yv(5(+%GlYoB&TuEWllLY zN7dUS;;_uUDHY;yhKG7jpxKlewK*05)rj^giB=c~aphT;_vErNg?`>beEH3P|54Ty z{$iR(5K`bKK%6kV0;G&S{(?4qrf`bfYl0=swoX1;s(zv?zutDJ5|os5@j zrjP`xR4iuBJSDPD^pHyO-(lsRnSW8-N&+Mh00CDzDr9bdCN~MV(s+~ZCLUVlFoAB4 z*tQRLEO*2fnn40200QWrSdI^ww%^6n8tqgkxM)}+Y+~m`^wj0Vj;xhBdfjrw-MD~c z0b-{UE~2h5P<_Ngt5&T{^XAQ+^IbY!_wU~y)-G2gC}z!?W!A1;8`l09GTZ|;!Opq) zQ0b=;VieE309V6LozcNlvTitpS-X=bFr?xTBK(nx1iqQ|e`eQq>1fwY7oU-7(u?uE zB^=d@&ZuvSr&|fFxDF40ottMkC%B&kNWeA$S9&WLR$;rQW7n_0x>Rho6pQUJ9+ip& z@SY+KHGLfM{b&49c5BF@BVcF-2_%@n11NuXf^7&@{JEiaq}q1=>yM(w@$i!{3Y#a7 zPhk{?w$etyv7oHOhYv4kcf$5bZ$%{YB`sUF%*il{7hZV520NMH<)1bbYwE;xTNnCH z*VHMxD%NadGFo@nR!e;(;1z-0JAOCcy#0i_%@QJT#wiS=hy-laN)mEXE0V0y=4prn z)ED%L)(GYgjKItPy`w>VV*vs)l?BB~ph}#_YaG52@76ms6j6s20eJP$AwzbA#sh)FbfSJO44ewrx>&eBx=Dwju-6ZFNE6MfM_2uEh&8 zZ*(qHGHoD%_z0AZ?{Jl)Ip32CMr%sSSkp|!Bw&xg2z#od*8FAYBQpP28RL55nHM0m zZtfug5{QNX+OQTO|J~6X6Hs1hvl<&vSxy(jKc5j+5B{iELe}#l%pOZNiKcLVbb7X9 zH^<^&xMN(6rpU2-XO*RBik)!K`9)N4 z-O7;c(b|SP z;SZ-QETbO2-nQQbjHYnyWT!oW6Zrn!|Ct@X=?4K`Fsi;ORl>?T3xP8VvcOA5H!x34 z|HbUy6Mj*pDDAodFV8RFp0BPrN;{aH1V|uu0_9>qRPD$OYMQ9X1_5?d*q}Lvjah>rmN%WT1an(LuO)~RqQ)rT?C={nOTqk^`DQ4|pn#|bPE6cz+u^k%RQMhhpfylG8ODtElo@vnDm9^Qly`w%7h>XC#J-f`;vmT2qa}>EM zlua|^hSrVZh@{Q>-k7ZuV;&1oyLxdmv{y}4I8(9?E=ScIn)|D2r!o>CfuISv5LO`} zxlHa7aG^&_=rF9p-E+hl%kalfLg&>NB8Jz>58s^d zEjAZKPPlWC7{K0&NGKU|DY7x7qEMk@z@b}Y$Rf|xv}se*qD70y4`9xS2z>bAhql5f z{_V-jmkh5d5fQ8?GPUh@zA0STrBORk7|*OEphV#N*-xA8Th=LQJE-Q|;TfiM$)X20 z@LN;_E*YI^iWVsp)iDufz8(z$Ruo5AKC_Vk3B*Uhg<bD>Of#GKq_^u^yUE0g)= zF2v>U?5GG+J2*kgBg^?=vcl^HFtI%a$%`#t&miMLb~Z*Dh%W^{5e# zicrV4#tz1lg`&glB;Y=QGzU!F;eZV^AvA#op?3wkXIr49u`urlpnc|87(LoC!`V6E zScT9W5(q^AZDq5tZ%(NF3ED0rVTUHDY_(#$4102l##jJ$RD`b*>0y)33R_7QsMusn zxt3nM5OjjB?ppeC^hh*?EKI?qOW>oAJ~As-tk6~ZhqlLYzAm?+SXt-g(5F6nuuY$H z8BL*FQB>z2fgeA3&TQMXM&0mpPR%go%DB*$rtVsx3NJsap(#?>O7O#tco(puI8Z<= zj094XfD55&jHqz?+;gR)V!hj#Xk8)+;59_AAc#Tuqbr-9mPsn2-=ogY8*6pgU}&N~{lf&2IEHDA51 zA59^vW;2>1>gve%fXq52&43fD=esY?jcsv!k2rPka1tPafC=CoN&frENWDZSMpPua z211mNbEFz}*_`G$M9KMX)}L20-(tAYQvx@^Zr4URzK#Fln?2=8vq>N&3E-NQ6|3hu z=6okSk83+-Q()$>qoQEcvK|E^C<+zo2oCBE5=R_yglXBbWx+MTy-^eR?6c3z;>C-j zJ}!a-{)?OVvIz1eI*XNcWDZ176y*r-L{~1$Pb`7?bDlT9ZTLmqi1EYfnM&ndno?NZ z#XuEac2)yKd|4S_cyKB0Ks{n%BtQaj5O5)aLd-urieh)Qwo}|23os>%cRJHNjuTG% z(6!nMsV_MQpcSt_Jh{?AZ>BnE6HOuk5(rP=mhf2u%_0Bhh6GxoLxr)oB1C22;U^&# z6GQCQ*z8kCg_U=9hE!;rMu+Qo?z!g<-x&WESz*YBkP5kO)fbZ~S+0sXqVwSRDN+Lo zcuYXTDCW(0NZo^y2%|V}M25QFR94ueereOMi;Rp_Pcp=(o4QsiBmokzNx(|iWef&$ z(SaDC1$4-<0hJT3nElAGAzUWzPdx&7_hKg+2EneXYPoFDuo0Y?bDIpB^e z?u`X-1WpJuUdHxsL+ne`=3$8{IML;@qat(-T#Q{6Lhp`y&s`Xz?!%%KGRH&DxV_Dz zKtd{7wQ7}TD}TmI;EON5FpCx~idRj3$3Knpbzy!x9NTsY>`@Pel8}lbY3Yu+N^?jc zSON<_dD(1T|Fe3bV+YkWl`Al$LL2O)u?>u+r6jJ?ww3xwfCTIj!28=SX#dGx2@z}d z;XN{9XOb+VD;*W}&W!ZJQ;UqUmud+nKG6llnX*khOoGm1v6eVuj6l zBQrdu;8kI4!HSR-(+HO-MJmQjlDMV6Jhf+T?Zxy%pN zDabMlD9g$Owgr^OU5*pL`B@z%z6Gi8sHqP+Y8TBS0TPIefJ{f?QY4?W!=~hvPwc43 z|AduJ3Y!rW|L~!=9WMhCLD4IJM6qve(xi!L-MV$`3V39`1m?|~XMX(g$9(t2xp6Aa z*YD!g5yaurF`<6l9@TT4DO#*}5cabS5^$ElqEBBno7XH;w{P^oI;MJMMkuL+%~p8D zxQ4bODXz6;Gxa8h!1Q-3l!$OFKw=DJshR}-4rS6=A)^O_PVzoE0PmGDjVP0Q1mq7| zX-CXr)Quw*;er`xm&k-2??4*6U{5|a%*VEO^V!Z#BtQb55Wu_1PMG?*W9ANa%$`8Z zab;DtK%9-F5Ih7WnnI>}vX!<#u`kgSH^Nrw#Zyg;rclNCr!q_HQOKXzTmLM?(J#1e zOTjet2#VsRE0|^{40qIPnneP^5Xj2fZ|2Q>SiLZbpqO~NtApay-A#zXmQBiLURX&?kUvRia}j;<}X%ip~&BSZzJ+KnXuUW}|#g;bR?l z%mws~`AGc(7wD^`Mjr{_UeyJ5%TxiC9k%~I+}k4Q@SQXH*gcMp1(=d`gTsJn8VMwd zz(Xi_5=6Z*G)9Mx#!ay-?l)UwJl@O0*jsUUwaLoa6%XI6wnaiJHmhl~qzD&78>sH1 zt-fKyhR%jmz-N=5o^JN-+h_Ld!5f7~>DzC=HQ#^#y$y?};e7qZhGJ!%6QEE1pSOfm zq@|ZsHkIl~z%K$zKA&MW{<=ur=+pYwHZ`g+WJw)t9)*{WYha_dLQ1(B;!-PD9?$uc z1V|t+0ld>mYcN}c^MXn3-$Na8pP>{ERYoFA!T)h-9F+DLnJ_*r5jI3v<|^d-cZ7MF zjRZ)*VFGwJT8vxIEST8C5i_Noz6<0PN9+m240cZB!i*DAVq9%5N+F#Rcjqe7<*!p< zT)sVrz5DFI(5MX4yL;Cm@i;JQ)F{kFk#7<4sEzB`B1MXr#*G`Bx^?TCV#WSg$#wJQ z&1S)Z1!nEqwQ=bV>fmRddB#>zf3>ApOE1nR=}EX$OJ9zj^it(ai>{+`e)5w9Qj-8i zi+wpmf8O1+qJ_+a)9R-t(}2Bh+q}H#(xH->{h9i_t9@|)ZI74x&jWUe#gPCBkN^oN z5WqdWIn*>$P^G0p=FnkEQ35z0<;74M|GRpC;7+{%Naz?736MZ?5RgB-u5?rg(Gwvl z)ltF4DWuvDCh4(PABlZXz40Z2LMA5dm&^1PDX6Y>6j4R z<;#~hM<0E($;imiD$1Ik`Sa%+iK4KOUcpP(Tnl=&Rh|ol>dV8nI%%v)D_%<5R_Y@G zuLvyt@=dehXZ-=UBl^`gb!%WQe5SkvicTHWO=B!TUcG<*o;<$MpWl_gaKQ>?pj1Z! zBtQZI5^yP+Vk`V+M@4{@?@CeVph$;Y0(-uJ_|QoqBN=6S7$y=Rf#f0ZDqLCbkUQNS zvLz5x*in&<84pBhk(7s`tt8PD_t;Xdtal3Z6;jq|MvffmTm*%z=v|>gg^;c`Z{FOL zD&=-#_0vy39n$_dcia-^@zxK=bt`kl$h?87saT0JrbV~YRCQ7r3HV6BMg&EX!iCI4 zMoCQEUDT1EgdT6^kit z+IMxWjs>u2dvdGB=~x6Z242eOn14WIrVAg^wK#9L`Cuq*BmokLi~z1_Sy1c6$Z`jg zt10e}H3KQhkwVx}aj+sL;H%AOiofH8_hCJXqrluLp}n!y5~5T7 zNp#tn+gAc;a4QFqwwLcAk3s5rRADrg1fn4DToie2XO;h0GUby$>BW2Q z!sPk!uMq%`t#njO$-2QGm^qf>X({98FU|RF;Zu4k?zN!ThDxbHxiVDKh!G>43#pJf zpJe{0aH&k0GU2i$Y`?`tDQ-=eQxO)7>lQad8zB{?P0JI{jL-^ZBZ1^4Fn9X>>dr|> z#l>f2s_Uh~@C1%)SIKlbDs&iOcnu+Q48r~R$dG%vg9J!`1W3R|0v9@F6niTis}N0d z@SY%H6URaxh2CEvGTlTI%`yVem4z}c!iYZ0;XeU*!Xik31o9&=8ynZ(Q0X(XA?&Pru%zZHF{I+=6G4%-_I70wh2J1rsIl=2-FsNwSYQekuiEd)EPOMXhxsy%#A` z<lfl9`jtqUqnNTY1Jw2bsg`lvKwhU~&xn>&jZm zMa(^x%p{Anf{qN30Wv@a9Ap3=f%Rb4m5!M<7at@)Ic8HjnsYlcmDfx~I_5joJjvmW z?AxTuBIA<*zZe+pkhvnIh=q&oYy~=JvV_Furh=l8^hHJ?W31|R!No%<>ej7m^5>WF zlf0d)GKxNZ`q(;@LgX|~JG5IVynD5c%wM38X>{r}+OWhU1Ku(4?ckTJ*lcMq+={Jn z0BzgiAk(nEtdvxDm*F{GQyrCn))+W;`;Av*yB5G&iUcMDWWW&yq~Gt_{?D;kF6uz} zH*mlP-%R)&26wic&*a49BwP6o!(ZLiidyqCP@yLKz4E#R(2eWXBzGUmP6DpV^50!n4>0Dtz_TZ93 zDl9nJwE7+kxP&AFWPl8iffN|PH{bDyF8yfNTo2a_N>h8}{Y)2}flUb74&ynCf=Age zWS@bbV^K(kY1^h%yZvtR0vR9!?lXXAz)V;<#}P{_<6UlZ4BL@xT3AKzOwMvK1cfVV ziaT=&lIks=)D*HosrrPK?`nz}GiEq>D21pgcI=RY%+y^b9>P1-Q56BvZl#{8t10s3 zFJO*5jcN+Tq9T;13``tiM+ErKD{6bnNP3%nPU}kMu-aC(;JOOW>l*3J9&<+q$N(Ae zhXFj3MLF?0Jf^9gazEkoLl#TXkpY(&kdN&vP&cit4s2T%^B@IgUwC``U+kJ$l!t+=w(*$J!#5H&^f=FI~DhZm%1(V~G5 zsMxe=lbJt%et5w7D20T%wp786X2> zz<&n*i7Xu9U^d=y&?cHh2JXhVPG8bM2FO6H85rkK?wdGdi#1bXH&KCAByw2PUDg;i zW(BZcv52n;{Q$t!>p-VYoeX%^v@>NaIgcV=zF=8(^*oBcef!!{c07y@nCl7(sZjT& zToLvG5Cz3iZDr>-Ix-M%1||-{c@!2N1OK{;3W`K()+0Y&*(`yz0Es~78TPz3l}w!) z#q$jJXMoI|odbsaE7-N@7;qZ+FE;o;86X2>AR-3vESAl06McqG1sb$Wsg->N4YDd4q8y_!V}p2Z8z2T4U;aL`_x z2RZ!VxZ{q~hD`}iWE4RUrEpb7v258gGj80tlz7M?@-MhPIEQ;KT|O7aG}1PxUi0%z zsq!`-2ig`Bj|}KDuzbN3Gk5YQ>cA&9EoYh^SymmFfO-t%&v$^abi%o;4EeX7r9>hF zWPl8ifg~At4e?4O8P{gC5An^s)Mf&EuxHJE|JSJ20_?GZzr14LY1B{ky@tJFfX0#m zGVm7zv+!f(Uw=gWf1QZa4<-3nO;lhNNg=Rma~}ecRJs4zo8x$BrGn zmQ=WUC`G@1{miy)+x8|~=P!{|SUK%N$GpE${`2=Qr5^|6%MVG#^-5@jAp`z0FzG`m z0WC->?!1PQ3Lk1kyVI+fTGfl$F!e_CyDHlNCMFpm17v^<b}pS!hhO;%!FZQilI@>+mcO;mu( zYv5~SQK{d5uiAM&!V&IQOGtHdu$Ee#Z2T~m0_&{}B0_rm$s*2k$ucZ%c zOc8+opABGQk^wS62FO6}450rks|8%04EPjdKwssa1KuSAp)eqWkq@Ch-W5t}OoR-O z0iPI{=!mf;V8=m@*ka9$*i25KgC-|A+I;Ewq@Z{ZToll*RW2$h>eZ_k_)rR06%_sZ z_cvR&%66^l?oo8ad{9uROKn9#(d5jV)CnbE+A#3aao;?lycWQHZYDxaVdIf-_jR(n zAe}D^bU34`sZph<4RdcsU#y}HU}BO1GC&5%K=usaxhvm%J>g4cA0qOru1=stP7DnQ zl>ynV;97heXNH<86C(p;z!wHwnWzxH(tgXQ*i2NQFt3KsfVbsDBIv_;6jG@>YLif< zN)>bb@yBb!ri3R7ifYxW1;W_4aif_(e}15_VFvo+k3Y=l(W7l7ITN6LZ2)Ux;^XQx z;He*UQm1KKQ>IcaO;d?Q2E1cn)sop}*2EF&b~ib^jA_}doH{N6?J-cWzyapA%d9*T z*dz*P=+1o4X|yHa8m-NyTfP(DeZ9l;t(5u2ZK@ zf#nOT3FiWqsH9@Y4)+uW0|pE*TefV;C6`Ix5=n(DA93B?r*0tj^*ic}8`WV6NCtdk zVA9antjH+tyf*FoPQDd&AbT%8r<$o=$%>rfHuS^F2Qq?z$N(8217sj226nIXk?%cM z-Q)NMk$raP$UsyK$cebep&D|MEUFSgu?s!aQ@Jz5K3g#(MAvN`-Zi};B^Q&y1( zTMmc461Lggh5l*0=zNe=$mr3<+T|pY3KtKhNJLWM;-M64)~qq3Mvc-gF2S1(|34eR znwa>|Y7M*-H8BONcH`4bxoSA)hmH)yn1MCR=9`({=zpIztY6BsX`*xZu)$C1Bb=knz%yLn+iZx6e{Lm*e;AS^jqUrxgW-97>Tte}1R( zUB7<)z^abv)2AC(6%?B`ZL-1Z6L`@2prAl~?c2N5H{3(QV1r*`k^!$65H-b) z9oyAyzWcg52F;AlQwACDSyvrLf z-h=NE_ayABLvdrEG&1}u{Fi7qjpCMh=9dhRfrJ>Ckx*!RfvKXf$8BiHmm?)B%K~@j zz?VZQWY+BxZPqJSu53;`QGKC%N@f->UOb^gDHbkVm{NK+BClP$){GdTziB*dT>#(1 zhRvFo=qSp86fHF|1*>MGlTC#h4YXm2M+UrNVBN|^X8PAd)NO81x1>4k1Rj#CPHc#P zg$v~~w_K88BSjeqppwD{v#DT~s#I!;rL_R5n8mncfDEL}z$-v#k}`@tfR8arVMQu# z&txK}IRnQae-rTy`V71rtVKr#$N(AGmjPEMDyr;jSi1e4)`|+eWMrAY&GRUHQd7wO z{_5vZbm-8*IW>SJf1Q1`RTDvbO~%YKkouaJ3;bwE+dM@mV+7fF&jw z@QQ&+Lti&Lwry3n`QO)2L7`4+1b|mxc(6IBd|?|Y{}b=#k~V;eNe0LO8Hh6jc(&e- z{9ofR2XYRD&2Mstvrf|-XF$eB{tf>#+R+J`w$9Bz7ONpMOxrfC+G*n2#LC>+*(N-( z$v|2#;L2=8+HbI&*0U983L>efYq3fAawtU&aBzt>50xraGAEsMk~VBgcec&*7dkm(f!^7RH>SZP39-v@{hn8fwE5j|_Olz`B)-&D1aZtJ_?^ zW(jlVNt9Hm6B_}bNGopam|-L30|9!A4PauD0Wv@a;>rN}&1WF*&pDp|9#}0k$#ENL zUc4E|K=#L>Jv|JszQQx!)H3&Ez-tCvIh>*po=qZCPg|F^lM^U7kx|@ZvE+P`QOKbb zg;L^T%oi`s%VjNqiw9JsgQQ~c;K647`t>OlT@H~&QeoxfiyYER_VR(F z6k^whAAYD^T!M%Fr%hKIz?zt-uZ@9sx+bPzRX_X$Q>l*2d-Q7On0RDhe+D+JUS_6# z@xlH#QoK^DdNFhE$(2$>iFfpG7eDY&E^7fIC8t=?{N{!Z(v?tma=Pmc>Zk-H17v^< z#Eb#-pXDVi9}p!S&-z{XeqQFdjWmx8WMx1Ot@s{oOvaYz$N(9LHv>zN{hf|bkk(s1 zrR78gUL-ODey_y}Bx;H`EaGcI-w!bLrI790w|DMPiqfS^2e!%8Ln&mEV%<6`^Ivk> zyOq-}G%G8xI>W0k#cpyIcNkGFC&GEi!1p8HGTXN33#snJp%nS4pzyFpsGMTS2GR(D0bWT3Y0X!Vk-YXOqU z595T-fJ^|shj#bu?s2B@EntddfDA;=0LIg{!HAWS8}g?CoTzZ5AQCa-Vg#`Yu1{E@~_LdD`O-ywB8UgQY zO-#Y6ddRV+a^0rdu*4$+UNNw7&2sbW_&(}3*Q{2|TySb-bzA~Q&p^rI1Hb~E1Ei_}s4C7+QdDyUFX7%dTHfVho`L&|eP&QUJ167w)?A*D-eDuaY zluacZ8SsmN6$__AQlYP``oAk{nF9IMo2_5urm?j*b;>Yr_L*l^uidJh%X`pY>w!*JmaV27J&E2fF}&d-oz8o{?0|6p5}?o zG?NUF0X+s}-={tRUf zd>e@^n)ZziSYoCp0|g#$*PHtSBvH22DitvoogVCPpd{cK#Q_FNl_+4Y!HL?>cb}zg zjc9SMf_MI1rJZr&kpVJ522x<)1;p)?Lj16zeuadd3@b6FLk2=%K*pM6vf>KZmyS3C`fo&$e|Q+7=tY*3W_&vfh*(w2YBiV3OSU* zMFquQ+)bG>#VlVg#k9ZMxN)PIIB}wr3W^aUM%YqNynqhW+WpyZ?#hF(D6O7H&M{1b z$bd2f|953=Q>Y-Dtun37!HJThL@_JtT0}{q-cNCkKpI2_$iQv}DlMNef@=XZsso;# zqLlYRA}lKI%keED8+y}`fplU(_JA6Rw%9hEGR3@*0Wy#$1FlR|l*rRkj{~IJ2t#|6 zg-sdG6CSB4WObsdt5BhWY2CWDDyBBr_U+rvgb5SOzx&#Q4xX$^k+w@m2FQRX3{L+&nt4c$IE1Z43Ggbu$uw&m1R=mQBMf@7rvKgdSYvw%;X^vh;J(W@VSl|0)3!~kc1nYv#18QH4HMsEKeJ{H9H|lE(*mP75cC|liOjm!`0%Y#&oI}2NnGB>C z0|%8aY%V*uT6$%QxyhA*a%BpcE6x`cgu1H%#7ops2}lOW02xRF2GDW77}=1$hkaPp z^Be5EE)8f;*DUl&*NjD!FWGmz3)-m6Ceo1sGLYU3aJC|Z@=c4`3cM7g!*-uVzLo&n z7a4`TAl0YX+O=!v^q~|M%ib#V7himl<6#;pCj1I!*ZxQmBaG;B0w5h3AOjvTaNDId zDWmYHytGV}axYR?zQxtuE$WB_%$SMl+)1TsJdB4eO@*+S+@JFDp<%M%k0#z4h#f2wD?^qm(hLhw*c@jf^d zgId^AmyQgO0Wv@a@Ju`hEXb;NU%u7v!ZZ9!mYll`pnT+Tipx;9`s4p|;bkiZ8C;jc z7XBLWDzp2V>}gaSUYwlNT5#7d{*xU8vVx-*cs&CYkCg!R8y0$oYXP#Oir2_MS}@>> zqC%e2Y3qFH{Nz;t_vaNLh5O#9De!`*2(fwW}+-*e|9bMM3ZG6>NZ-yOajo{_foCTPY`R)Y{$d_oTO zvMeIVq=xuXyS@CDiHwTy2f@qahy1PrUm0Gm#lgu$kDRRC8vIU-Lx!10G7vKcT$!Bs zYf?Ea>Gyte0xtnkPc*bxI#1yxv(X~HCiKCYV1iY)Y*{EMPF&oi!e46vsHV{7SnxJ& z+GN(QU8@Z(QZ(n)6uUFsSUdB?BLif>9s>uKE@-a4(8}2a_KYU7Dg#w27BLr}Rn>GK zus{_TVCsDtZ3Br%2FL&zu+0FzaW6y~z2SXXAto!XuC>hz@uOz|VhpmJ zd^r4JyL}`4-yGhTNjR7q*p)=c;mGsR;7VP38rTX3Bm-o?1_PX^u)*p+nBGrRAU`ro zZ_^F239gK--jS8lvkMhU$Nqyd^GU#M+qNx_Y+2Mlz5H1GTCb zHzkUtL$@K3T!#?0b^F1u|Fsq%gyb}ns8LlGP#TlDv$H1bE-dv9jk<*dBm-oC3|M3U zqnsB3z8AbNZ{9Vq^+JrysxQ5=NHd{BWB_$zC6fDWx8IdA>bdMI)e8Q!-M;Rw8|m>6 z)_R%&$m0pfyDuwW(}Q{Df(+PYKwVFsq`7H5pq38%txQ9~%4UnU+&^pqTfkkWjX8b# zbo1SJ-)X}V4-6D9UfjmX? znrkEbKI~IyBj3a%17v^<7z}211Pwk-Ol*g}7h?eSSBH!5_mk%0lPJTqZPQB5v7~d2 z0o0%M@I&EmgRcksn!vvd@5{=&yeO4z1K!9h5bu)#GT;>hu1pZ5!&rWf1@C?4@KT$L z+>OdHhpFGV0i8$N(v8%u{1ag&{!?Gc(z|zWE&GU*g#pwnQ?q8xELYTi-V1CWv@pZf za!epHKn6l#;HfV1nxTs;1Mdu6V3w}1B76QmuADO8WPl9Bn*sFMF9riW;eAn0NP)?z z*~8=q9T`Z40o0@K;ALT`jJL>)(l~hE-Q{T4xB8w5+D`_^z~2nGGPbt$Z;K-TF=lMf zo9wLx@P49VclkVIv7Cwk`yPw69z<4K~;q(g{5q!oSgmgdvlagcftLRe?PYYOiVIB z2FQTR3}7(xGT7GxKEKO$?qgdw^x>c1=NkVc11T|pI<*D9KYS|yod7=~CBWz-Kaci( zT=Ztp0y01b6c}(tMd8pyg#xSlK%~Xw1YTkjkgt*Z>+y1_rU_^f>n$7Dy?XVs z0ULYFlTSX`#+FaQ>wi#Zec0Rk)!0`865tF2=Uwr`M0;xioXMuDAy0RuszQ}ePryV? zv1GXwHN`vV)1`IC+mj-unPh+rgvbE;?6Rn+JG@Vm6raN0n?t0DNyU%>^a;LzKLc5p zt)M>glmhuC?}2tM=Wx)G0W#n>15{LmP=0AKQL&q+2k`$+OPtB9*}ayqRblZWmz9X) zRY3tJ=Tl6YG)Wsa_ITyXmp5(Nw6VcM20-Pb$7zT9`X~%s?X;0Jjtr22y%_j>(n|B? z_dK6sFH+L{4~q(lO`z_jG^dZ*BLif>n1N@PTbd4Q0Wx=XwosfH!MqxaO`$Em%FM#_OYQw~5+&7aJk^wT12LqH<5pZVXYogaUD9685*RAfN{T#H2(L${~7RX16mz3U%q^%R;^m5MT-{Z^wUo_ty{M? z4H`5sMT*E4UT(RIF1pB;wBml$Sy!fgb0htqx!vSFGC&60W8j%?v)nU^7FlE9UC1aF zFO!J{b@v+j6^qnS2}lOW02y$U0rbspglS#iefkbx276D(^Ln|Xw9>3JW&riBKeBfy z{A2LGNiT0in?E&;i-%c@nt{R>EF z%OMpatr!=CutdVloH^4AX+?no1x)ki&CTJ5A8tyQE^P`HENF@sFTU%JIQZa$a}5*rO;J+FmQ)s8Enr@10aqJx5CD1li8iamBLfy0821g86c*`q0$LOl z|Lq~$kE=WBT+1LGUFZzlb?9qdxfURF+L<63*v$YYDW1a5hdd)h78PBO@5>RM*-dlP zoB`CyweVLWo0r3H_9VO4<2i7IC$`c|GC&6QX26w+idB1?m0o{2z0x`vs_~+lga^N< zD%vI!KR}!-fd2&2vGU~}ApJyw$fS*I{iN!8_3E`tl>i=8aW%k7mMj@aEm5~_T{D0F ze6wT64vl10Fho@`cI;TQZQC{#yeuGcK1EN|S#{tnX{7WMw(W$Cvbsd}9aVK_)~jh= zKBJ{7Cc(&nZw!33V6nNRzrNt`xwmJT4_<2O8y&PaAOk&zEHsOjT3N93CLa601vG=f z$N(82170$KzO8&Ly$pYghZ(pJ&*^tOw3{Z!gaOpg_t3=VAiJ{Q&5gms%f4#*R^Dp? z^26R2;M>4E>@s(DcGxNovF{`5_!FJl|D60_8Y)dQdw_PwA_;`bhwBln8 ztgIL!t@!p^kz4xfqVx8~w@G%guxz*}l|?bpUY7)*a?$dxugy<3JW^0vR9!WWX8& z7)6y4$!_pI&Q!dF@5m>tkwaiIpu_;`sjSRw4nIQ)(k9HwcrKi4GXY|g0Wx5P0aqp` z(ppN9mXi~BNy*+Z`fEn_)bq>nVyVC9Pp5x9n7noK4=;fogYr5fK%^DmJo#iS#az95 zbzoV7NGtN^mv0DnT!#)FY#mtf0P3&PGAh(3sZftx&WVaWTzK`% zrjkNNw0$|G;!D^%TD#k2r36sxVIWDcpexs zh76#}u0Xb1!++;^_UpjDD;&3x=8*w1u!n)l_*r<*pOOCJL`9_ZCz&{2P@<}^dD7P< z=zJcMque*zo;`aysic@OV@BXZCqzk6pnzNA0y(6@lDgwT)LFMOyHCa3{V`-k zN%878>hmA_SkFJnfcp&GcFd7hjC}qbAI@vClJ&F%6$Qoo#a0v)UGbb>n3kk5Q)GY) z#GV24vx|d-q44K>NX9VO+zHQUXAiSWgwtz^&A7>fwE&4^iD4690QFc_sGSb~a{|$# z2znQu8*Xju5CscNlMML5Kn;h?ZgR)Wgce(-x4(^C)6d zlbBI55HAKW*jNfVmPK->c$o7~VDm*7+?D4v9U1VJ0o37TF#cqCD;*gnV&t2=y%H8- z$N(9z%Ro)L@Cl)dTFeFaMB@`rv$_^Q_Sg#5W$v`HxIgDS;B94FTG0tT(*`+P;M5%} z?7MnMg-9!8FQ_1{cJ12ENh{=#ikM3)9t%=-iG-0uDqjCqeG()Q6Bw2ZIKjZJmJX?S zrYjGraH4=yG@{o~tCEU&DVoRVWPl8i0p}RN_h4BVH3t4f=ZwqSpsx|+TzofM(bDCe zJVTHHT?SB>mjdw|_%*u7s%ZD%xp9ChBEiT28L+`Xh7HgpF@tS%pCowH(L$f0h#JiX`06WBYPjV#ZQ7KOoT6q;mu5(f5jn+4C!J)2)Vcum zVd+Lc*r`5~Fm&2%Gizm9%j0a+nY70|;h$HazGaxUZCbTU`^@f>!4@^DT9Hk1IixYz%Eer;EZu4d+_NlU6P65+0X+sX^q^8g+JV?hQbKS%a@tQ+ z;Kd|Lis2T^p*=dK@^bOPJpdzjY8#p^N{WI73u;r75}rjRMJDR3)0<$T-O9uF@stS6 zCh~SDDPH~N$87HK5*dgW1GhA5WF^DTzCF_lE0JIdZ{BP~@e#zyB!z3LiprHMn-(ov1TjC+FutfN`XXcMC!c)?RmJQ-xY9e( zO2hE!#lT4!Rn3u=WZ|c-8}<1z^Zl=EU%IS@gC+`!7wsu1o(d-yOoa@P0WuH{1DKeo zj+Dm19~MrEs;T?{G?|!KsftQ4GTLdnh z0bD8NBOEd0H%IKSW=1R~D$ohDt*W>XFBv<_Q}v79imIY_@7@MH>sy;5qMT2$ZQHg$ zD79+UGOnpAI(F=6>wJm_P-mTP|EN!*s(2z$;Uy4AR28p%GdY163`z#llYxI6V`tIl zvv1EzPr}kXSAB;6W=l=+HlF8m)0{qLj|`B37%_l8bWLP;JbXP5vn`VpZSY*?B!!2C zN0Sp{0QFo3D6WQ=)3sf5vcthut{Fvp$N(AmlL5TztK!#zj_|R_5j(7z;q>67a;|ME zDGq?Gg73V;``OAwjVLL;_~Hxa^1o%vmV}fPGC|>z6D7roC!T1-q(cFEkqzKfF)zQl z(efPD0`vj))Ku~JjX!wmtT0p+nLE9BLhQ>9|Be*{Eo)XYjVwqjMt-)G(hARN()JxV zl%mIM8&-=pnUV?{%o39fkby`T!03g{P<#q6GZwx%*#uQ)DpvSrIPE0^$ufZY{yXAc zmu!5?@vg!%B~F8Mmgyus8Bk@Qp(}r$>Di;vG{Ug+JmrLmKCTv zSLGBlXU_CqPLVI4l|IXZu6KseZnvY7KBihx0VZ;a*T1ElLYr6O`NzONj!k$LH7pCT5}x98vS$VL7Y?NEC|U$p9HJ=tCb48bnS}%|kk* z6U<2p4-1Yar_KQSC4+(TZt6(t2pK?Zt&U1SGC&427;t63C&|lPjSRVrVP*8w;S5_D zpx>QekyhOAaNhqm4KL^K@CQCub=`sm3(V)Af36FvfF{z4Idg)IlN2aWz|^i?TOn0F zh$>a8m=jJoK@V^bl5D2iZqJ|2{^||OrfFL@Xv!=z=Z`hou*4$+UNO+BW_8o>pz`WA zi<07pDLkY?omdNiqM)#ND24jIeHJJnBpDzBWFSulFfkz;C4K^5DbK*}1(1E7PR2JN zq|0<(9cG*ixXpk|6BX)XW3EvjtBDF*1N2!6^&E6! zn`Qai<)58l+)ix+-+%voCl9HZJ$rUwRfU|UU8qnYZE8}&>(HTttlUZoBZtVHf$Q{h z0PWVEs$wEl6*<%kUXBq1w^=x(Lbd{auDg}(gJM)N;bgTh)D*MkueXudyLf)ju>nj> zGC&5%K$r}muiOObje)NaX1Ypg$pSS|PjHe#iBQ7$$pG5GQds`5gVvwz`uDhloY5pQ z5C#J-jqP%BB8(CbISng=@>%jAN)6{7Ue3}{{V*3pFL|YP<&@R}h^peV&pyi~iD+(# zszN3!f;bmd71gU(x2dYQAP6rJhv|j%)gR6Sf1PROuA-_U;<9D(5i@XdEjy};5ug5U zew@no6>;(An!KnfUU)ayyD?Xw{cp;76jW2}&kL`R0W#nj0~kO&9wv{4FXx%*V(ypl zt?>=Wb0|D4Jeq8UftP{$s}Cv=U)p6|ZQLaBwjrLC54I%j zu@cSc)zda|ynBFNeXFA^ww{TK?90m~6LzaBDSGwlWp?b?k%baBe#+#;)~zyLlD88j zMd8AQ^9~qdh>jgQ+EP+vy1sG)?N*c&>H}RopCZHx%%q%V;8qLgQ;3q{`FD6eMfL@M z^*is^Nog%W_OK~j{a~b)nj&+jtO(iHy@v;R+V`sW%{2ce17sk*7(j=)HF7ixzLbYK zlS4F4!?W1Vem5QxLX*h=+JlS^-0dI}=QwB+O(FwvWvujHd@w|!> zyV;7}rDI!Kk?BJ%Kn4FEb`H_Db3?id={v9jx z2Qs_-q>Fi`O=N%!kb$5KU}EBQgdGZBJZKz;!i|P4XW(0JokKQ-V#+B+i{izWVcNE7 z<--Adp>V=Pa%DhPvC82sF1d4EGKRL00Wx5$48kSJtNM6Yl6F{)7R!kWblPO^Bk6eB za;Kwnd!j9HW!wjVr>=qM-MhEhzFo#D+;WLZD~c2;;#3AYb?RhGTJaF-tkcf}v|FiA z>e7mBkN~~@?PRCQnZ}TTC>XelvwA zNCYxK2FO4(44_Xe1BHX&i$-HXN~sP<>~ruO-jEUsBa?wNWdLmhy;JkJW7#_t&z%~M z*+g^5K%5y66@@EKeed%mGxlu0E!{#{3*hv?WRm0sjh2ak@{+RUWCFl>hxhbPFmkZA zp{}Ybwr$&HX3d%vD7*Rc<#SS1aolmo1+qWEK%%PfVfDs)uv2{itUnH^Sg?kxz7wo7 z44!5ToKoA4s$wWq6~D}6`*Nweh??TL?!n%L3EI6K;i;xbkTeD-17yG*1~6FI9@g}O zcWWcqU=$5Ru=DYKw<%a~h9U#8WuQNDKGm_@yEHKB7&SD941~u3`T-@7+EL-9m}g2q z<{2Ul0>pN50-ZM7N{UlZsatqbM;?Tou~AaYnKQ@8N!meNMn;B-k&@y;)LB>d+sn3i z&~B{^e2J3cjc;Y1jxKf#JleV03!$t9@QYb_5ZrupLv^EN-P&_+TNxBrw>2(+13t2* zrs#=hb}S^xaj7fJCmDzn1LzW8j{Nq5FX&;u#qRca1_%4#@i2olnG9$#fOaC|AkS&x zDdD)Zy^K-;vse_|V9dZXBe6VTWytxgFjs&O=2X={)VzxE`{eGE2C51Mr z#Pg1UQ|r_)hgLXH-R>b{mrzNe&T4XimK7B0Tk<8BOU5DtWWWUmZq2;;Q6JX=V3OiG z@Yfwaf37@c?sWEBuNw@%3?F&ha?Jp5k^wT1m4Wy1hf9aKaJC|A>F_rhh>n3XqDy+e zlw~i+Df?Yb!wa#Vtw0CPwzQ%(db=$>sW;NwR+m=v>C?x_Ln=gCv3c|6Kt)rdMh)kr z6&GK8v8_WYI-}0IF0I&#f>Ym6RHPMeQd$wHO)yaUG9ZorwqqKpXHeu6&)eB!NZr@a|V!MlJggZ(~!7e9q>xSQcQNnxjY5Hfoy>6(%@e#_Ir|g;bSLEwLYUW_dNhVR0Z7|?08i0)_Y%a_ODD>rrx=I z|NZx!R8q{HJ2$YBqI~)C&J~(PB}FFcZ0JggJL=rOF`2ahXv3naP~T@{m#ShS53F#m z;?tl|8IT9TtAGC&5RW5A`Yo`eNXR76)7 zB1t(DsoGRhJQ7K+5=;~?XHimokYKbtgD+XK#C-C}CwYboJAf!DX3m@$DA@?c?enxt zwxK%am}Bw`mqY+jQkiRaco67#1=4d$p9G$#K8H1LWLLT+whW#Hl^4rDujM??rf_njzb6dlxT}O zaO%=Yx1y?$)2m%`qN>=mX;YqNty!}s&oEI0=-9ENt@9}!LY)m=RgnmXXt$!IcruYV zLBj4*QcR+fA_!LulfDc{E?xb)zIkB5Il)1<$Y-;?PqeJY{@$flSnAd9_j>En)OxrfC+DRCh zREdqWLM9;=E?k%sH|CdLelf1fD#YkcojS!`TA{wF@D}V1_QtoAR^-?ycq0}J4l^%IiwuwfGLQ@d_$JGbc-`P{NG5*7aYRadF}_{j zjo1z*PX^MC0kkXG`(czL87qrtRV7Dkp&4W#ZVb3KQ86w~1;B}ly-QmLHE7&~^XHmnNX_uqeSe*E#rU6T;VzY%rC^5x6TS6_W)=FFL+ zl7=l1QBoXz^wBopn*h{@$u;@VoTp8|`#7Y6N(yaOiRT*wXFy3&ubloN6$3t6Y-Y@j zqf)8c5_3V;wE*GP2w8{o+}l%$g0!b)fO?eas@_36{cxOhl~9Cq}mxAkD~( zb#`1jskU;gOM;mv1JN*WX*9{^n5qm)#_F4k6BYZ{1>35Mhg~1|6-v+{o`L_=RTayY zEi)f~{PF&*J9GtkcgUJxrqr%o+ca(3G+5xoLdoRXX^BONBy?A#YDrZg--vW%AifNo zS+AxQRYm_%i_Oe=aa2`=SC`~<_H4JAHWCyC#bY*riAe^?02$C?0N-3ifHe^Qd@aln zan>NM)A3z0I>a=Y6d8ys1Cx<)Q7pOO4syW=+CT>4!T|aTMUmGl9LukBXHq=Y^7jT| ztp$j9q9SvrEZcX^p+{(2TG1GpKHIsBXKzp@F3V<(s;+O}zRpT3R5Kbf*u@uLZ0nE; zC);XPdeLsT;O}R$Ueox!dE0i=WfG+o8e9^?Uj{^4ar4o(q!qLMCB_5Ghe1;D^9(DJ ziVyJI<-^f~IzSW2KwKHX=tFVjeJH#ylJ=}c^#}eq1<%>fvR>qGGC&6GFo3ouGZivp z;ezAg6fTqkZHN;C9gx{_j%9eHWA<1#C*m0j>-kGIflNfi0z_*r(<3>>``RpsoMOzF zG1{=P#}he4lO|1U@Ng{Z>!~(?l`%z5k@FGgNha3or)fB(f^v!^>2o*QrrPJV3u`Ta zyDTJS*_rign%W1JPAYD0kvyn(xa-J5LWgOW9%Zid>>sC2afKHPEi%N=2O*?I1?Oy%zv^KUV zDY{PjSsRvkWWXy1@>(poyRa4jlN4o;8`(4I#ME;YI>I8P*b>j%A45+$CJCZ`l!mVrg0F@& z4nNQLwKqfyKR zMZI#SDY-jxIyF@l$#IuktU7h-n4^x8c`$X?91reO)KT>T-vZ=s+OYroX{z~sJyjLj zyb{kZ2F|Wu%hW2XucYWVVv(6OpNCXv)&@~fJlEaI1du2w9@Vs!SY&_E0E^%@N!5Bo%;;Tb;Ly1 zww-Z=7Mih}fl6y{&U9fdKspx4Ee>YtLkDfLZj$o@<<=Rhf0h_9d7pbaHtrjCc zTWWrof(6a`ZUCOMT-~eBD-p>68Ay=<36e)-+az&I_&-0{kC*&%6Tiz7q`0bHWN^(@7p8(Zmni;}|5p1wz1 zF!T*OYXPjNDtbs~gS+mTEMFt{c;G9G3NGtjbUu0%4pma&AV4v%5Wxz`$ z6|#DPjtr0iGT;;g=&)9WLF3_NHJESi7Z@xv6|xMU&JF|e?ut^{a3Sibk5V5yrQt5> zTt`eR?1&vSgAAlQ19u{Wg&fP|ytu9mbAF^e%8G(Y@e|S?Y|)0pfqjuhd`)QS)TzrU z`t<2zwrts=X(zG3fJHgQ!>$vIcDo5SKC5k~98%HkJNbU3BLneeK;#rR9o={4%k2wa%W(9?zcm`8|9>0XqjX}WFSfg(033i=Cx7U zkVD%2bGV-NmpM_9cM;o`Ry>S`kw0%Uq78vgvnC(?=|k37-> zvN;a*wY3dkWlX3vjGR8G>U#b-)vVYc^B{C&AifNocW7->v#e}mpzHdM_|44yjY^7C zS$+0xYYK|TQ?-rp$p9H31DXt=Pg)arau`J|P27;MzC}8x;2X=80nw1M9Z51StV%Bt zNzR#QU21Ad1*%y8xQ!!Sf${?NB?U#coBl<>tDUu*$&7o^k!rybg@6;;K?ZQD$jN%Bab zBLneez?Q0F>o%$?Qms{^K3`@_O)(hH)rqOv$M|G`43GgW1~5r+D6l?+&CEA z8qd|0z8UVRz3ShuN$^lGi8zcBJ_GB*PjbH$IXST(CA>fe^cX<@p*+xT)I(H5nu318 z_iAXK0daC7Sb^DAQph0{1%jDkH`GbjJ~SrCS^zYfxv)fiR!>wFAAR(Z-Au+1{g6Wr zF^w8EvXS%SQFmL~09M9)9eDbxipf)`s!%Q{!uiL*`G?jq8D;cU74HxG&CFZ0(LX{Q zvH01y9e9b%Eb0 zfGI8nv7)5t*RP*fN{Zsei<={kIKs4O(PEcx*s!4~S<=1d+a;G=Vyn(Le+o3(ttcs; z*Ct@Y)@?YSLLLcpWFWo_P)U(g*-sf=c3P0N07-#ojWqhpW#+qItej0T1kcul)D)RJ zoqt_qrH;SJ02%O&0nAi1h0UM9SM<$x*gFvZTs&AgEgx>-C9tQDHNz;Yutu#aunV&l zF4R|5QVB)|Twwrx2pNpM$rXFD*s&A8du91Mc0V~=5xmH4ODi5kLveoE9G&TTpsYu5 z+Jc1?ImM_^qk;vFI8>D?RZPp4Eloy7hACaT^e$hcMvYyw76%`EaKzT_oxI2?WG2Oe zYl-@Lk_B9CNRdCm+-}_*wuL_;+uKTbSkL zPg*ViMx@ajG@Q#5eV6lr6$6uhYFxNXa6%FP@ML;r9QDhzUpn$=X304P@E=?w4 z1aTfgn(Cdr4?g(7Y~JklySPM&66UC*jxzc3?ca3dnoJ6mEn7B%BFQ;Pi_!|2ymQ*2 z-O5UsXSI#oC9U}W7j0PLkpZt55NXAYM<1bXvm8?KQjgi{xakZy1~Q6CzpS#6#i4kP z%1N(uWPl8ifk+v^Bt=UYAgh{6M`}oNiFZS^%kVtimRxkkN|AweDaO}`?qj{x$hU78 zE}V9*KF;D>A<$kjV1ohlBaR2|g*LDiFuN;YSb=!qu}wz?UkjjP!v3SJI=JbI(is`s zVKH-^(Rd13#8-hvCwo4C-c@x!lXf>hf~s%9--z%STx@`=x#9rPc&G5AK(kV;9hF*Tbaew9OJF#oJU; zXww=J-ZS?+_e0og0Yb`Jgvk_ulI*5t`rFGU$4*#ozW&k57|bv{OTP(j2}6Ha;ib4I$JzWs;}Io= zx+F=I6eC8A$R>-U=ldyC zQYaS`;rwHu9nPn)p`_?JWT9EGB=-HZJS)U=Z_mmzOp*Z}PBI>&kpVIgX9k|Q`}jL8 ztOY=SQzj|;!OH-mi!O7gd?n`MWTxVtT<-H$cnr8OTTvyv>@uY!8E`>TA*L@+(tbuG z17si@2GF6XjLWVbyeHfCyq1mGvAe|CiUf-solKEd3`{UuBEf4Q@Rf-~h%oG9NL&3h z2|1)<(gb=v=;&XosrVCLi$0%dw+fDHW200t8- z!5;(QU7ely+tz)4?8M*y=ETH4tVH{#3zHL-P^UTe5^X)mD|J`)<&ynh=*Ymn3^aVb zJXW7(n6_ljJLPCZ z3qU}3Z4%b3Sz|_y9H|Yff;WHu{J=P_Nh{=l3QN+8Q&3-hkXFd%z{>&`V8DRy;*g3} zo7Cs)0;bMjWWWsuE;_ufsa8sV@!5NWDXqw@ge^)cWFZ0_86X2>AQA@9ue=5c_kb@D zi2=zcD&L2<;2TMO-_hhvVoWfw!~qrx!Unfiu{l7BVS> zW$O+@RYmr+j{jzb`RWHNi$q7_dHF^5sq!irAOmE;E&~`;xCJS6f%om3bvtan8qd$y z?J`94Bp6tjL>wDYYT5uLrWylv)R0mGQs1*UHFFr74EV_a`V%rRCkhHzos2P$a@Dx7 z?eeYD7dAh+rDt17aX%VU(cBDh;T}5RqNM1iZHp)=h7TXE4Xc7Tb?Vf>IENf^NZ_zx z1Z>!_p*j5U!)+vWD(dSAHh`5ettctF<1}q5DU?f!aNaS{p+P+>N(wooV$o7Jw*ZW? zke;=7C@^9q}@xE z6BS983F?q-RfQa^B~oV}+@mm3{ixFc0|uCl8`WV`=5yuBm1gUJIR+Lc7fn4DCnwY? zRRye*09w4omyQhh#DGXN&vw}08L+Ld!}f)48fPm~qujQ% z;y>uQ`yj1Y3>sr4t(Z1#THu;fubx{S(?*RN*^*YAj{16hU}B;RD6l6&vcvh*H7*LAJqmV9vL75WFRjF z(0`O?&m(z-bMXEK*mO2#Dn4}3Hk!1D0hz6EVY#YXdiy>6@YkOVBr;oJlQT*wY_d!2 zv}XW)3fV{GX~#4AB>E1rS0vqT1~@s9LRCOz5;?_y6d>}7S{VUu$SaV>{Z1E5fpkOd zi~u>XLR|ylvb18^vSopaL8KLxD@(Duw(01>ZuS>Om68G6PdA;|GDSiE3XaNI-@Ooq^+!Vu8?8-9N$E zc&=>T|1z(T0Wy#m1H1bbZzJ3hd4-Mm{!?h@TO+o?x#T%fkz$dcQrT8j$jJsbrD$eY z(Vsv<>Pm`%0|z>JNX4pEtIX1+O9OFRvSi6PDJdQdL`sB#&~9broBAOYyOb2z_KA)R z#F>FY`SY6_kJj%Kijv}$o^#_&t@--LKWr%|KE`wLvsjbM%##5!5E=s*K**28WP#Jo zp(U!9kaYfA;hFi39wd>HXQ0CwkM>M>Er1IX6*4(TRYmdzZ!_kJHo+6XmT@LJGC&5B zW8hvyv$l0eawhE;OZM;Ww%=7=Fr28+sRWsTv8k%K2OatnI;Pq~L5E#b6>n*$AgYR? zLx*a^s^BG}s*v+3P>w3;YJfCu+}Ip?=%E@=`(m7leEOiOkckRe1*ht|eMeP=Dxn1P zl7Wu9R29Y4ZSOu{zFD$7zKhEqe{+U9r@H|kwgpUFGC&5%Kvo9ON0gPyJ>aj-dfk)X zi(qFeZ{wk|j$5;fS=NN)?<3;*hg=NW}vVWiwk-&~EYIwIi+Q@uMprMA>GY7s-Hi z21HtMeN$V~iaFNpcQFBxRLH>;7Tj1o4?nelOGq+62FSo(3}7^&5H7qAfAL;7{P<%Y z%xr<@B|CY3l-Isix-tsv%Bc9h{KWQeJeP_f^RBG4`#H7+!HkoEBpE;(mL<3l*g3x% z|4Z0tbKE#d2Hl9})C`3ilq7A%Lx^cpPH`6+UCE@a3n?Nx>@rdxD>=pV>C*$V>)Ig| zO`0?@BJr@`&O&`X+5)aN4qjQG=nLH}&h&kUmVh7D2170wI=V>(u8AEb#cRFd zrKI>~@+$M$#1%Gb(l|T^$J+oVCK(_DF=OD67AGDbU@ZU!5@gnTFud%~;F}W#h3xq> z-#5c)?;ZwFe)65_f~rE=!by89<1Ysou=Zh`VBVzDXKka;1Z`n(GT=M|=v&C-glrUl zu=BfTeUz)<;956II0<+Q>3V;p!n3)22-`$akPD zyLL!L)22;rsVdGveQg?ulxPDzkK|Wrn;?f&tlzR#8;cP)W>HTpvX=yrLuBCF5Yx&nsWWrC z$$Q~3fajAaTFwhE#avVR8qbT{`A3l{}#x%cSoJ z_(rMVgcNrp()mwFX?dFrCn^++4)2vPR2BbFG}$K9698YQoq(&V3OVUJ5miOCYNASW z$93q?!Im-i*#g$WMZ2}4s#vvYv*Gy^TBSrBUl|Zp#kEZusGGle%MSBeues{DE&_i0 z(<(Fm8!KvxFYx>u=VG2|6B!@_>A?U756U1Xqu_m9H7=_iPsVd{d3uxva{&finW*ql zO_Z(`=rm-ktRtD@3TsCSf@ZkQ!0rjgez3B++m`-q-J|GB%>Mf?{>YXAPEKS?OG+0r z5!>eZ6r!Y%j=s}wcSrtBZ6oF2><>TuP#abSPtK=6z5`*&`4su`xzqfPKKf`=w{G1) zwkH_qT-4VbloVH&yz)gS*8*5kRmfQtIjAZ!cXm#YRR$*mJ~MF15r>$Aid#6OV!(W} z{Esa@QxnMcC*QIrsdzAu0SrV2$N(8gf&p|0W$)TC@V<;DWX>VF1r5kyD(z&*iB8T#tk)tB8tyrt1X*XtPCO=n!~c zq-nCE{RZ?W(tl;0LJi_Xg#wA$Ks z6)s6Dnl)=?OImRr>Z=bM1&W+vZI-I0^0UjtpK_8@sNfTX4EVsnr48*UDZ2GfZzaW3 zU1r)U-Un>~6PFB-fp{~3K3^3uFdn|4hfEBD&FA48W}}C;)8srDK)H(kT_(+3a2?TB z3c6rOjBU69c`uP*{%@^y55`9At_2v6s=7HryBM4d#D)PpJEh$YgU7-gZ`@mG*I#;L zZN!FhqQXJ}paR-fRg{L=|FmGb4albfsy_HTc<^8+RTXm5_iw-b7KletRaCEDJy6)N z0<~}7-bPXf0Q3PHz?zt7w=047vL>criK?O}RTbK_63;URL{)K3Zk-H17slf3}Eo!V2~h_6o+_7MsL`B5uT46AoQ^C5-{17iHd_@ z^LYu_&7g`5;8|4|aQ{$*OciQ$ssM~n2GW%Qv{z9)iNdj!M|oTfTkrMA-pEbm(r;c`o=@>~ zS8Hkt9*v`33kgUD$N>6zwZX!t@K!pKmKlB<_?O|kWt-)62p>HIa#o5f?j~0ZiJ={` z-Bx9X@-2g~bYy@G?85;17PW9mlnBS~bIavFMZI@5`V^~NHqtfQI8kA#L{Je$Ng>D1 z+j6A<{4ZPJ%DB$}PhCkNlM`|lglkTe6blz(>L-9JQlyAE_+VLZ=7z(=-&j;q-0wy% zbG8EQb_MS0pHH!IllkA|X*nm%J7gdR3=}O;z+Bg)fqG6wN%4B`x$3xDfLE8fV^k_@ z0e+mi#-^GgB9iZ zc|LOUgCjX=iMHdzYA#3U(lG;^9{{8~bkI~#enq|H$%ZOa5+t1%KvO*y`S}XoH>u`J zXw#g5Ot3O?wjx2o5)A$@!rPQq+>QoYAwio$4~~XuWdzt|IfWcjv13QDqOV!AX6R*- zTY|?NbBrxH#f7M^K5SI{Ecj8M273dCR8UTlTg~IWI52Q&!$YmeDc2(@rap%36TGb2{kn zU(x=M0dwvT5`Gzoq~fsflg%;3{yE;^4Kff@2GGyA5Lq7!U(quPCd1x)JhRyeb2(9A zu^77j6qAGAxN)OduwX%;$i<5nH^qt-3luhtKzI?D zi!QpzMpDuNzt0A+CMMdgOsuKzQ}zd>6}>2}&@3uqdB{Mqf(6XAjqOM)-gtkmhm7oR zvdAdLe6?)<8>wHJfalvsslyO~43Ggb@FxT4Gd9MrPvB*vecxPXJQMHs&1l-I!hkE2 z6Ox&BXh-!_az&6l835PC5$MT01BM%blM~_c&D0!b0Btlsye#Q?oN|BQ9Qo zzQv~OZhCdC^qD_`{0!5!O{;bh7|w|b`=r}eRosS#?EK6k8mkQczN}66rcIm7haY~Z z4Xc7TW5$fYs)_>G7sVxa?6JohOG=N6P+yO5DX%%$Vr9=VIizCaHtu_z1LwRP69z78 zWJy)gCp}aZPrhYMP4Q4nsu;6L2FO4N44^+J+c10#UoM2?G?NhZ#GUvyk#97*7&3se z{}I`q<4D%!l)^_Hu_X>>h-$?p_1E;>^)U|IGmm5-R|e3psEhz|fY1Z*K8{u2h4%b& zu9@V`92xMjz;n!ui#Jd?Z7V5cCBVO3%zHLAJ&$W^vbmnpB{`(RXC;Mf3nd3yxa9C+ zFdaH{uwjxX}}T11sLd zWQEUe^0YN(^q0$QWML9OabN}=86X2>Aaw@NXKVo!QD~J(9W7J@nWVS{-)f@tri(WN zy&THEs49+e$ey$`1srD}V|O@~xgL($LvzSLBn+T^wn4(*!?%pYfYcN12MF~8Pg6I` z9|TTRgi}_saxvH;1i@(dG2 z-kLXWZZa}5Y$Pp83jO_F^eufFkn6Nz-<UMX*`4oSw z*{Y5k7T{A|X4*QB;sLyaWl)Ze43GgbkP-vC2M|U8q~so;t2VW~%&WBk(mB5xkHy#b zvq2OzI6qOozX#%`g%2zdS8X!178fRRh}F!`J?CK*VQ0X+AL z!AskeRaFNi2@`cRIgsKiete}?4158m#$TY=sH9U6P&QYyB&uPnhQ11o!|U7t)u#>txjJiD$# zsB`lU8ES|zsE5H$vkcWNCgu_Y<;om@{f{C$BOo*?&_+){VzM9K?eM;iQ?G>W=c8S_ zwPy;9^3C~L0es_G?0p#CrnKT_G;WtRTFkbL&}e0n;^k~_rf^Xv1YDI?EL^zIOqeji ztXj2aer6jED3~{Ip85Rq&&`%CTT;lm-KfVMcbutFqlS${I-tJ#Agz#7*7T(n6Mr(B zw&gCju|Ya9$$(1?Tyf-~R-_egzVD~B;_0r|Bo&?U?#+I-+@%UZTgU(zaGC-1+b)2C z1K?d+g&c&l%?Q*EkHJ1c0y9jk80g_xZlCbsd)_h5@-zpW)<>Wh@(dV$fNtR@$rQ;z z^bDX)mV}o!IUc@F^cE#;feh$$ME_!T(g=+BHv^oU_?rs5KTu(9D=Fkd;ZD1`P(yzX zNP9-=m0(>}Qb_ieFJEpx`|L9_a^y%eX3Q8fYSgG*>WZyf-QJr7FAdYaeS4)6`U~dT zm(Ig6C0W)2$aCo4zwWyFKiaUUD&EqzPgE7}|4dbdHm}61)i$&#b5^z5@@#ed2<)k^wT{9s}qbc7jzh&|kvFYd1MZ%K{@jpUL-A%@ z8IS`i);g5=GO(pP+S>sR+2pk;pi-V$QqIL4%g935Gte=cXbu^$%78rE;4ViZG7$1l zc;CmS??L}!v{lk#1nzrf@U5sGhQT(K6gQx;S96^dG~m^+@g;3LMM*Jmpxf_wnV8tP zaifuwvwd+VoN$6oCB-2g@bENjl#g3g_r|wWQm7J2FfSR9$%(QS4ykyv&s?)=t=kj9 zSqtpxuCr_%O3@kbTq-EClAZ3qn|EE^d9Sqq>7HTcFH8pTopBY?dk?-qnCU8{B?HiH z@i_cQ0gDi6$N=ib8f2@7V_7=|_Q(Jpoel${j&H03rHb+@>YqQIR@JN%ERGDIFVO_~ z{v7^Y_{wR(K^L@P+18#;i2+VjC=nS129;M-6*i^DqN=zjh%F9;!J2yWhqfKVhYxr1 zd zwwbs3_@ShjGHabJ1;zKMD8tgGvM?iLfDDAr0Q!K}Bhjw#(r5F@iE{f?^x;1B$!OXd z2?PH{s;*7wKZUl}A`&*zC{b{DEwb}}nptXgV#dkJYXM|2To+9nqrl4Ci6s(rWFQCw zXqXxBZzAm1@W%y-<3X4YVdKpn*y)vtoUI7IxE?{0HrLc&Cl7$|vz1ti_<9g&h4c

      ct_Nibt9N-H{{zRHH7uDhpTpt`i;^=~KhkP6pJp7vR1;L0NpGi8btR!`^k z-hN0cp6oIMQadXHC7F2FS^-usECP}NGT+yqX9Dc*&S{?b;<0C1_ZAVci!ra8%*YhL<4iF52mfw4&cHlvZf- zN<6<9C{d`OxzfTR6(X(Z(${Ti#njpB%<%D64y6!Dg{<(QBLifB3?$3|`hE8zl1QKr zNH}t|5f&l&lhAMbAzCw-x@QcyxNnE-i835*v9xDgMs6-RI|@ehg?DkEx-GCv7QoSw zfwW`*?eB1S88?wZj;rBaY=_Bm|1%<g8j)S-5uE7JL zrY&$STr_G?Qpf~_s@uGIGnEvojQ$CB;)y4kYSpZqeR47Cs}D*F+0az|MfB=7lg;LB zHXjETDgz`Q|MML zqLG1EFo6Eq!^r649%fc%rd#0ACkw>r$UtBQP-jLXu!{I<#aEc6_R z6BQ8@AS$%XD9X&fEhiFDS^eaLlZlFz+J?#@6)RV&!)lx96<)r4`D`9i;e35XyA>sc zY|yOgL`m_%uQOCJ2}TBdW1u7^DXu)~aCNgqNzt|6e05w6Kv7T(v!kGp)x$$HY$OI5 zAOrDY00Rq8BgYSTm}}`s$;8Ag4{fK(i8CvKAoG6dp-5LP+6TBb|Y0r<|8B~D+hl&a3{lm1b;gG0r2VOWRgO*l>9B- zGUif#I8hN@X~C*=(7{7(*|Nn995^s|1cnU6K#LYF zOx3DY1BFd6(50xa^%9H|MR4g*s;eqq!}%0jw%dFhL_rnPBm;>ta5c7kvZ1PYwO4ei zirg4@y6a3^=TSV2cPkYXxslI%WPl9p#Q^$X;$FtZJNLTb#~)Cy?b_mLz8~h&N*5Wx zYjZg4n&_f!ut^5|2BD1>chtVf%>p;a!-!AdD@JZi&IY`RdM1OMbY#F=2GG{>!=DZ> z3$JCcUKSoV@HRtv8$Jg?TB7aB|1oimzNl?=EkNEhIy_O4xwCWL_WKv2ehJQC>0 zKztbxX~h-T2TI$?{!iULaBC22+MIRf!%u$K&Z^+asNG;2z{Dg2WFS2mz=(r<6ut$2 ztA{!K9yZG;g7nYm$Uv$L{3lfai}6nb_E@yn3KrAw5?V%1VYn>gF5_?pWZ&zD9JY_9 zk%1f;KpT@8idOKiAdu`?CVM+w1Ya0Fot;QErH?T;oipi5-W;BxaD^hPc4PwG=KAR? z@Bndsc{&>VI`HscZKf%w&}Q^cypv8k$(Eera@5zlcC1!Kl}(zh$SJmN-=S(K!FDll zdhr|1u(>Y)X|}|`HH{B9r7Rs-v2MdQOI9ZiYf)0s8Shs2pqx-=S=jdatxT~;;2zZ1}Ik8s$=&`bjl;_gO*AHo+4 zE#c%6dYsRy5X2@7;3JPv^=v$?yy<{LF1~A|titFC+mnLt*s&Osw z*`8tAwrSNad6X1mjzj!Y@m;klMI1&a1N$;?Cob4rRJd_-n@c-o)BpO>XES#T(C|f7VdapD0Rsl)o;dF&%s|VQEp4ePE<=5l zPPgk$I>MW^jePZ+9}QI%+N=`KHwJ7eDPH~G9OHtL;+d|qY#mCGiFYX#6uwn>+Dir^ zU;uqB*`nbCcq=PUEN5vH@Xx^WZms2X2p>ZRP_KSQw%>L<>m^`cZ?xn0;mbL0tGao} z^HBhu1b>b?xE;WMpk8gV15MPlX5gr^+Aj)WEdbh75%?3}?}V2{PfOrsJH@BqPllI1 zLmZt<(wvC)i(;S)lL1atgvoX`X`#}Gp{lq)o1|U5B&!RSY8y6c)F`ujxnD}~%9Sgd zBab}N9DD4sreVW|rd&C<`kwn=kC;VJRrH@SQ`=nP zkpZt5knNr>Yjmi(%_~<&uc{C=#h{O+@2~ED1tMqV)oCRl8Av|{FbGi;xf%@b%OGFY zLXcI{=b|5`z6EF2MEE=Q49JcIK72fOfSo_1O`jcy5|d{a{A0-L1b817wn?!J!1HRT z{GcNPQ89paBNGu#;je*z3H~eCvKsy~_^08|gLnA^dYBiAlkG2MA%t*47cv8!oCull z9Fmg*lx!*~I^hANKhl#!W|F;(hroKoeLmUvImMInDSS~<6ev)@G;iMAG;P|{RIgs$ zRH#tF)TmLz9CzGtyCy48jyYvB^y{r!wMs@wF`@aXwH2r8W~7C2GGfp z<=C=^!)YF7t~YGH2+um54w#1o(ByC!Kpm5riaWwdE#*`y0bvN*c2D>kDWf?GfV{PZ zX(D-%nWRFFnz^r8BK^93-;4a44A^1-?W81pWBB&)_rZ(&N>(&UUHlflEBtNn$HM#4 zj$mpi?qQ^I6`ucFLrIN^?8g9SEB2!#{0nFRVaO>S2tQ{@rT7YB#a2!s@`>{0%V$fR z%wE*5Uq9P>QCt)`#f2AMXd`K#9zxiEmiiqg0}624U~!Cbv1 zQr64x{bX~cm#cZFUC}V`9+Dj!P0~rFdI@4pLpy%~KKl>Lq+p|sguK*-?*}V}!}}to z_=~iA@XX@o_J6U#|5IiFZJ{*$;qYzYuZ7Qqe;a;0{9+`q2440?>jVENd`EcsxGDfo zmpubpaQSMq)z0ucvk$_nVKBgniZECXmc~N}X0yZH0S~Mrf|(LsC_E2DTJfB=1zWdn zHU0bd*M?QaD^;qLk-eUBbG2*NHbsjT%`Hf%_gc4ZZK_nMVk6BfQD1$KQ#_8$%L`4_ zz5F!~sZb@9U|uqCP2&cpxCJG}D?Ot*q~htWGtKrLR{FdT4ASx7W=G{+Ql$-CZfH1sDaBUys0SCg}kKsON>@%fr`#7X`#g@Mpna z4*xIs$Kkue4}$+3ek#1EB38lw3@;ORUEn*zUj^R+z7jlLVhqSc;z@Y)cTX%DL)&2B zfIr{CHlT@_C`%IXU4_@!_E_Bay$t+mc|(sK`-hfIk}Z$(9%Xo5EUv5P1VH|3-4pXeZgWZCi8G zO*d)7s^HbBQ^y>7=%IOH?bxxyeDcXBX2S;Q+eh!VZ{Kciyzxf!`|rPN8?zOF^-y=` zY6ENHp{^bbyczJSdpjOnx}^E|iVIABY_Ua02I9)VADcFt7H{`6>$k|`K+lycQ^7RDI|iYvmHPVkq*H+IIb z5U(&hq<$Q|QV5|CQ_eURnWR@;2iY*@8r0VsHh`5e9|fMe z{`3_bQn4M<89Fi$X9liodW4ljDqiX_$E@FIe~XD{y3Mk&*OOFDLlO*U%*a*LvS5AR)B_9n~zZGNxo8l5wm zj0H7=e+2#q@c$e9K=}LMPleBJKbxG<0^R{`MfmgKUxxn$(L`(76aH-Yyy^ggc;UW< zm3MeyDUBop@nc{-vMl?u4TxXX{m3yVDjX{(G?p-=6~3%vd=x5Y)-d~F+XI$a|WbM-U;7&_n3|6*|BN<$a;6+ zjgVp5wrLgdG8ax%*evJdv2Cj=&P9je=;X}{Cng>wA}>&v{)(#NgAd$PVa}gFKk%Uw zqN*rSqC_}_5g-*&RaiQq{94pkA5<06Zq-#4FMs_5RTTm1AA_Vf1J^fcV2TygA8&i< zz1e2tCVQfU=ek={Q#_1!WIC+oOz(=z+>wE38Q^zbwEQ#m#2IkC&dPUb|HQN9U5uKr zxE1^r@UpG0rL}X3m@$D7zT2ckYmi3opDdknITuDunPd^X;@lyPXFE|EF!FC@F?ao1+aI z8@%J&UUjM)YXM?Ix%G^eEn37}(y+dII?GmWG4EJXRfw9RpPiGl<%MDMP%Cww2}lOW z02v?yWWaX@(C^=gJX{6;gYS8v{i+Pyf~xVcDk8zi02xSy0a?uTB>XXGn?I!j#JJ%x zz=?|RxK1>shY`W%3dyrE&@KB=I_>ZW#&cKZ^Q6#Eg)3-M#%Mx0M!M@ccqijv}aDk$PtNtt6Z5HAL5w5f&h zI2ZN>aG&?gow634F8vt5$Ld<->MVG>8%U-fb?KU;=TRrRq-*AwFEZdE17E`CCTN%U z!gD`1j|z~p6^<4b8q!RdW>Z=rIrpXC{03}QmsWiI@yBM#k|n}}Q!bH1DnwdQuUxRA(1d3a*B&B$SIbt*lcY|lMIjnGC&5%Kw2<>?{xVtJrjOST4acsNS1-Vi0HiZ zKiSMNUW5#M*yxV(Tnms&0mxo-*TJ8F=k?@NfEYI#1~^d>4evoywJohU0|VW%VWiUz z4;PVEJgse{98%%3v||1G^?|ZnrAif3s#K{!VZ#V?$|0V*>1DUmaSIyKz#)7MD^#3TE+=P z2FL&zAOmC|1`Oa`Ec=L@4=>-Tbn#w{G46 zLkdv<1Bd6IFMajB+@<&bW2oj@fFeNmMOD#jFD4ZJm z;<>l2DJUMryO0VBuWD;p#-7peUzcgF1qjP-rb7nE02xRh2Jns^f!wr*-i7dDx4~g{f$9m6IF%!itK$- zdT^h=UH#LVs^YibWP4G!9LhehszNNUU%$RH87Nq=psn*ML`m^)XEK_DA-mh{`#IcB z`m!h~hEqwAG=Gek9t>0{4kg7A`p>)Hmv1&bhotr{Sh!@9=`(zh4WefQw5JVVVv+$e zKnBPF86X46Gk|{oV8reS&jVVMr_tS*4-Yx`u>IEpeER?G-3go)#hJ(Po?uWx7DWM% zL;*!Hf>#t{jENIbgNTU1H8IJ?1EVO$s2JDPbp|}MhC|INiOT80CB}1&8V}-u8ig27 zKp9XGK@gB2qB#41P!MpA_o#Z`s_tKZJ_R%Lc0c{pxB9)hs=Af%;cELO;U-?L%&Kc+ zx%p|igxIe;f%yv6Po=Y-jFVNk){{HivPYt~qU=qOQvzNtcE8@9yJ@KU57zrV$RG9E zW1j%8a-E#rwR+N|Nv^{>iD$hixlXOvcH3=}Ew|j#$$nh~v8feXZ@smPP#ecMUB~P8 z8~-uD-@QzUy6&0j$!EL|2iV-CR7!!<_Un>twn-D`yBsp=jiPQ+F=V8-DHWINJ&-9C z&Rd*-2q1s}0tgf@p!ev>iqcbsA@;>{h8W>tC3U3Q`NI&CBPRq>E?{@Xwh`95=s7r? z??mZrZ8|3+XjeV?98RbB^X$`g9==`Dy<26UdMkffy^nP|0j_Z^jee}^J)TtFS zX3WUBy1VYWYtHNXUeTsj9DA%?uhsctQ!CDM9@TL`J%{a1BjX*1O=c}#lH5PD#&uZY zArO^-O|AGzXFpRbZoQ|lsTDS*;`aLr+qMyWd(UG^MI%T&i~s@%Ab>zQ1@zhfA??4L zik(zrNsoYipF2wJOD9x%+LW5aA@Hu!JYQueJ^wD{IT%Ny6`Ca-6@|)`emmbYDZZzX z>4%iQTKVl!FGzNWs^8_eRpV>hOp1vUCpNxf8P=FLZ(g!&*|MDW+_r7opk`7Wal{e6 zW>TCH$xMoYYPi)oHC`SoT{o4P6nSv5YDxu8-`~$nieaPQOg>)qS>9?xH3MVSj?aHz zy{Dg#1^6WY7&+|JZct(GgDeOjfB*srAP}&C-mj-BJ-Z2{UAK`Xeu2TN!U<|uuI&|n zTT+8~1ZFGMGgLb0x%YdO-0t2QPyNWW4goqU>OcwcPkKVytcqJb=N8LMiksw6@^`5u$%IKZI|4@6))M?zG!gIqBK74_G`|s)H{uKGns>pZ!JY<%lb_86bGkhK_~XgjZ|66h z)@cF_1bhAU*Bf|r!-qn;Nrg?V=-s>bh6%d)t(n3Ma1+X36g_WWRdnYU7n`|gN%9w_ zR`_c;B1a)$Q!9?!!~N6>n+9M@$v1Q0*~0R#{TNI>5K zR;lz=N~frN6i{lUQoq17J+zX+Q+d)sOhQB~5KRw7 z#Kx{uw_({nyB5Ez;pvCFPTF-my?EJlimP15wVPCgI-O$v{Q1d}B};Oua_iQueccoy zr~PldQuprNeNCsZn^eRwox(c(oKL41#&n8}54Cl_AGzuNhXykiU|m-H;9KC#u6rh% zZqmg0Zr0p*B8QtzTr;My8(ZX%@D+OB< zwMO7Z{dk-)rtxu~Dq;^<2AL z0nJF|=zJ!{lqpk!ok@|;O)5g0NpZ|E$M`s;nk(peJ_5UpspqX-n0AuOm=?=sQatpA z4YX!KAiDye&!p(+93Yn)PQ3U2$C^Qr+iZ(G6SR><_8!NA00IbvCa}qbrusV4F6rK_ zvQKEakq-jF3+Q{nKb7SEDmSUr2A^=Y68u2bJx*=(D-~wGWUK9|t$zV~y?aXKOqK2R z9P6d>XO-3dcg-GR5}>0Zrq=p0SKqTL_S3F@^yTit{7ElpHml-F*V);uit*#eyAJCl zUOuxbw%KNzpk`GZdE}AFR$Fc5#9^Z#XGSusVxaPNKC5Er)7+$DqvL^JGbqq+zde(t zP2A6<7&7vWWK{z*DTa)CBU$;em(SMBpvd5HOqCHp009KjE1>VL%T-QQ4UbguGRh&n zdfsaGxFXvH@tqGS^OiAzQ!a4JSvyQl-B^GbYP2eqj(V=yOr`5p>?SZQ2-GP+M@5}1 zVgFH2YMWJYi{~sajbv8EC^@|8+QMd4%$YOCbyz3y^0_}nr%s)m?9)XMyFZ1;SrzBF z$kAsEJ#TGRg+6w;U1lu$fLRr8Nr@Jrz;-Q~>;4q(XH~pCe`PZ6zPisP*ldbh?ymc> zA_3U-Cdl$p=_Q+!+TdjwniFA~#>oz0}U!gZdC z8|?il7A#nheDHzIL~VR&)v8s}vSrK0SMma zAdStcsIIl~2Kg@6Ju{uJI{Bipe(D6y*niLD>rLE$&KX?K)QVxcDTSvg6<6s^>EqNj z5EViI0R#|00D(0EdY@mSaKRiFD+PE+}Y zo=#m=?1mG(rlqW*OHm#7-{MP=HTy{PgfGQ(ih+?#rx>F{!ufQHC!TmB71Jr&wrv~C zGiI~RHcNW+y7sRuT%0`2O)6Zo5-&1=HPb1+#4 zq$1CO*jIMimnMv{cn>V~k#jw%dW>DD3BktOmzC)85 zA%Fk^2p~|WfZq2%Rk4$OyQmCSv60Cv?gTzmsQ*;yEa$s(4m;)Aw!R9CDq=WJG^Q}I z7fz>(&8n!Wsc{<9Rj^!dQn6yi3Rk;y631p$c)Cf&7E17JCrNq?qUWv6s<_MbE?!=+ zD0$>fyQV!00@)SVUN@=u@$U9L-|h0&{Ew1}4}3m2Xw7VjySxm(v)L3Q+-6QR1Q0*~ z0R#|;K|tRT7O4zSuDhw&>`EJq$5Oa}4g46Qva?)&r^2N)3%6v;YzSCq!xRFV0#+Oe~fk5<}Sj`QV8 zz2`8S!Z|Ae9Sb;r?{b_U;l320t-iG2Cj_Dq&}V_UDkm%9T~+>9h0g~0wVriIT(7c= z9M4nXiCj)`~lHx>j+B0xt)NtpfZL@yYn zm`O1xl9?1XqvUnh+D|?8R5E+^Y}a9(#C!3@7n4=1?0u!~<&{@nNmi~5Z^p3Ar0CVF zSKUb$_t#ivQrHIp=QAl@(oBj+XET$cxI>7oV;8WQ6vuj)Nm298ie$v3xyh(I>wAvv zc<8elHce{Nz)HPid4dJYhorzkSE>zh(xpq6q;>1o>*9U% z(MO5(RD?K@_a$1Vy%gb|2wzYI=SnF zqumB|kr*)$D51bR%a$iU9DPUf$!fRvfoZwLCdty}xz1$FtHNjNHS5~EwqRuh${?Wk zt`^$uT$L@$u=};UoTTT=ach6!|M~^gUXSR}%KF#f5d?w}xL42Xo^@sO!f$)%VvY{R z0@M{e(!c4sy{S^S>m;11a$uy@@R9aR1-(J#CLK?9QwkQx0y_47s*txi4of(90ygz( zwo1)L|J!rpH9artAL^Rp7QI+f2umO|3ZE^$th$IxIP){Wn~PB_0A%30&~d6UoGv?K64s z%Y0?nL9bc%#exNa;05&F^AHiP9|6`Mu1{?G5}_ZllY{O!903j&1*>^ywMOTmo=prfLYrVMZ! zy?5-SwCpa@O+BV}f?fn~_ZZ(vXj#~YwlPkEtrz6L0}o6tyzs*H{=$#N3fQcQzJ2>9 z%a++B2&aop)38|;A2^NbIGB#reH8L}75nh#Y++V~GfDzQEMT)LDo5RseEM0?{p%)t zj$W&VMNCcV85Gd_m(2>ak-Xnh*|Y=%_P+jv%COMi+e=`P*Dlmn)=BXjmG7w7j2IRK zLJ_bp08gn5)$@8$C`rYXho0YdJ?}$Qti!@)q@=l1(k?~wsEVC1pR8kMvHoW%k-!(n z+RwB*8?0cRV@|)nmZ%!OcVpA8*PqE_vlsrg-cD^$l65}V-KJ!^l0U1v=uC>Se?9Ax;Kl+tZC-*EAn;qws+bh^tO^?qWwR+(6i_`jK>z^+5I_I{ z1l9=X`-OE{bXBpt0Uo8Yhf3@xlkM}-V=DKkSmy-y<`4u60tg_GHUWDBV@Z#|AXUZZ zb>=!M{!c>Z(uWosVR4!3JQX(?H_onZz~WM%XV0EVixyr6_5DU4(zkM%tEX7iitBue z^0iZ^drFtE2*e}Mzw2H}ldlE+QRjc?n26_iB2xqqNRn?4IPW3G0u*^9REK{_(jsff z<{=2^1IA}6Q&lch>7tyrQ?W1e!&P2WasCBFF%3r$Qhs z2q1t!8U=#>3ZF)BpLNn3_8i5(#pga0yGmz(%W*N{e5+;M3ict)>GI4o&m^;E&2kzu z>%p2eYnB{+wDbEj7;8(FvqJaP^Y#Vhc)#mBr_W!QOqpZX`ei{Ny8=75Y>^z(sY9p$ zHjTng7_cCK00IagfB*tc1@s-`J(ck){Zw{W@K!2^sr;9U-Ppmt)LLgiIhPvk{9cuT zDu=6VrCj$_IalRDm6iG%3j!GsSb6diY8QA6 zcG0o=Z6)`dN`=cL_t~y>a>wyD<(mb8>>_y_Y>=ASfo<3UYV_*Z9w5m`W$56 z)mRWf009ItEI>lqv-camL`Y`dS$9)tXS`Ic#=pQPbiw?F?fWd?%K6$a`D9%hhF`t^7A;8!e&s|Nx)3bq}X@6Hp!&pdqj0CkSqeO z1?DVSmUJ6)XGk+Ceyi7{t6kTVc$pFyIcvpTg^UHr6cUw1009ILKmY**5I_Kdcm&q= zfX93)Z68X`HiP1Z zP?8}J1Q0*~0R#|00D<)dnst8iSjGaZFH-2w)wNZHZpwBDAdqDNdMnb~`gV<*RUWHn zQ#XF;F}{<~Is^Qf0$uJT*hWFdjk6ndv$z!K)vH(1ym@n%A=kz_SG{X3*8URk{{wyG zHuULfKEe_cfv5z2{hhs&CSMEUW6#xkZ{k`{Q5`)bivR)$Ab?)uzH zu>C{F)pK~Na(vKro;J1Oskh#B9hP_qL?zHpQ!9=t|I`Y*STj>9qB?R&76AkhKmY** z5I_I{1Q0;LmjE}X@Fg2STy-3j;wBaSbV7A_0M%_U1z8-Y73}&?PL~~a*db}xuAS4E zSr2A6shBp+%cy@lDQGvTusgd2yXaW`u9ADwb%Hy#Y>{+sv$gB6#6ut|fq#ASNiwD8 zout-j)>S`0X+=R}0WQ?*(N#`UAs7M(AbuPkURfa^qvhX4WyAbU?>;Wm?pJwC z;hT!>vm?Yw>gq_b0kmepl5K$j>I8`4{uEW&ZYFAvKq>_W>#Zp08C|JVr&0(YfB*sr zAb;g#r0?uzuYI@#2tsnjxdY#US^Qi5Qu{`H@VgxFj;y z@~8X#*B@5TQ@f;lx5_@{&xQ;Ts83*s-j0}3QJ)TfM*sl?5I_I{1Q0*~0R#}pqyQZi z8EAH$5V4sQqdiykLY@3==6N43!r!SN(_I82MzjLkDD%@p&tA{lry$1n8(pK_li^g)6k)CpoTL z$ZFeaqtgo6u$0^A=gk75I_I{1Q0*~0R#|00D*7?Vi-jnu3mhZ*-VO69@B2E*e838?e3{yUzr02ycys)zsr2q1s}0tg_000M3V=%{ex z7*iDAQ!AQBgmGy^xss}Tfv>v{=`A8NDZJ4V7y)kr!_?9Efw$-cMgRc>5I_I{1Q2j2 zaOG#iMlu$_At<2`KmdWf1?Z^An=I}PbkbywYB0)UEuQum+jZ!B74Yw_gAy+qf%jv{ zRL@(xLB*6<@+D6M5J*0f!8JG_fB*srAbY%APSkIcnn!-N*V{KY zFc#nvRrFmCm3>b!ul5kvOGusk<%ShrWTOc>J(^5EPjpaZ`I0UISr)iWZ!yfK$np_D z-4Q?l0R#|0009ILKmY**8WpJc{QPXA>#;)01S%>jc2<>iljGzFl_o`2?=x-xpvtdx zJk2aJR>?P)#WUJz*OKp;9hOtT?k2yzj@L!yOd{WX>sYm!6bI+K9qS{2fH#4fTW`^X ziFQf%Zk2uN+KF|7x7BOS!nz*hZ}kgQ*H+d4Adet`00IagfB*srAbW`xT9WQd|X#C z1AJ5w0R#|0009ILKmY**5J13}z{kJuyKl&20SX;O@5_t02$W0U6z#aCTsvpKB@?h2 zKz(EFr1(PVH2wcZ$;8+Z0tnCqMyu2djN{~AO z2p~|ez~qTTM=%zkUY;T!sIILlG6I_;fB*srAbf%g5Hvrg0!R$Iv zHhg_|m1Zg|@d&I|s&A?MO=XE>;3ME=m;Qy00IagfB*srAdoVFmC59@84HlIMx{E%3(!$fyde=XAfS`M)={x( z2CABdD(bvp?UFQ9q>wsrtQMliHVEV_uv)J#%W__u6%jxH0R#|0009ILKmY**5I~?_ z0Xiq@rQrbt5GY9C>IcTmU@SmEbZmqG0tg_000IagfB*srAbz^+5I_I{ z1Q0*~0R#|0009ILKmY**5I`Uf0Xiq*Xs3d*96a&H_KXE6h>ndAKmY**5I_I{1Q0*~ z0R#|0009ILKmY**N+du>MTwx<4FU)tfB*srq()%3eJlUYSb)?tClx^e0R#|0009IL zKmY**5Gc0*9Tnwn0rrmo0tg_000IagfB*srAb**Ku6 zB?1T_fB*srAb5kf1F0|E#jfB*srAbz^+5I_I{1X3+PM@6cem&zf400IagfB*srAboWU7cj=>*!g>Aw|Y0ZQauU0YS6J+T`E5I_I{1Q0*~0R#|0009ILKmY**d<)Q9 z;aiya2q1s}0tg_000IaUA#iZ@4KFhmpa?)VMF0T=5I_I{1Q0*~f#3z`s0hA^NDu)8 z5I_I{1Q0*~0R#|0009ILK)|uU?ib(nDq{g0qY@4Q1P~}xfR2hn<=74Z1Q0*~0R#|0 z009ILKmY**5I_I{1Q0*~0R##XpmU-iwg@(w)MCnT#sWmpP^5?e0tg_000IagfB*sr zAbeaPXMQzW<2q1s} z0tg_000IagfB*tf2+&&*MKh5k0tg_000IagfB*srAbzj0)wXiv~MJ10qCfR z>A)ga1Q0*~0R#|0009ILKmY**5I_I{1Q0*~0R&PYK<7jX8j&g>fB*s+6?k`L=My3s z3y{$kqS^=`fB*srAb!21hzlv z(&3B+D0zEV*H)E$pX?9;1Q0*~0R#{TNr2vpkXnOW5I_I{1Q0*~0R#|0009ILKmY** z5I`Vx0`u?saVN$Cq^?=1kV64FDjZ@G3IPNVKmY**5I_I{1Q0*~0R#|0009ILKmY** z8W*5*qH!bEKmdV81%B6}>E9R&&?pltAbQ3 zfB*srAbcLR;4}&Abz>1?Z?qZPQXQ1Q0*~0R#|mDX{v4C+}q}fJ;$gA%Fk^2q1s} z0tg_000Iag@VNjT6&wi&Abq#sZW}pZy|$00Iag z5QqRB6@fGaX&`_A0tg_000IagfB*srAbm{2t+QhaGw+Zc;gm-dfckf{jj6EP5$ADgZ>{mQ`ss2 literal 0 HcmV?d00001 diff --git a/docs/_static/logo_white.png b/docs/_static/logo_white.png new file mode 100644 index 0000000000000000000000000000000000000000..16aec842680f35dc3210d983a6f0e3c745216058 GIT binary patch literal 2486726 zcmeF42bfgV`Nq$k*=1SU(whi~QU#S_Ma5MjV2uWdDH=tDrHEkfEVfu;OhN=fEYYa> zYhn_OqOk-EDguHHQ4moqAXR$V-TA*CE`4Wr=GJr0eb4iJ%*?&_ob!IaGwjUV?|kRn z@zXQ<)UMIIhEl3_zrMZCR;pTirJOsfSHqE^Oa8PL5Ba0}{^ByFTD~skjGBAJ@i^G* z@}Hb@`OhvHa`{ykTzZkZ>Z+>_88&LfWfxvB`l3TFxpe6LtB!51RBP3*cVYjl9rg71 zvqn0pR0{Q2NPq-LfCNZ@1W14cNPq-LfCNZ@1W14cNPq-LfCNZ@1V|tp0*P?CVJ{>= z0wh2JBoG?{M?bI%69#^q3lJNp=_m=1011!)36KB@kN^pg011!)36KB@xFx_qgKyZL?21<-6z;wOiJiujE=JthGX zAOR8}0TLhq5+DH*AOR8}0TLhq5+DH*AOR8}fk+83I1wp+c1{8$5CVbJ&mUaNxd0)6 zW)~zt0wh2JBtQZrKmsH{0wh2JBtQZrKmsJ-ionz3&l<^Sg)4NdL;@s00wh2JBtQZr zKmsu)u=cMVF63N*7{3ANIth>f36KB@kN^pg015a@fPo5sd9pPUAOR8}0TLhq5+DH* zAOR8}0TLhq5+DH*a7AFq%IRY{7r+%dRw4lsFq{Ab6^0KW<&yvjkN^pg011!)36KB@ zkN^pg011!)36KB@kN^pgfN2C6oG@*~jMjU7t1q78TmYkokm5;z1W14cNPq-LfCNZ@ z1W14cNPq-LfCNZ@1W3SV0t{6643y20011!)36KB@kU;1J#@9$4$hiQaV`qmXKmsH{ z0wh2JBtQZrKmsH{0{#$Spu!)HY=s0!fCNZ@1W14cNPq-LfCNZ@1W3R#0^fYNup#FH zST?A1hXhD~1V}(n0#A=WYb2u;dX5cMkpKyh011!)36KB@kN^pg011!)36KB@kN^pg z011$QWd!n0xU}8Aa{(Bruxyy=4hfI|36KB@kN^pg011!)36KB@kN^pg011!)36KB@ z=uUva3EfAC`bdBTNPq;4BJlJrwdd?R7r-bND3%0BfCNZ@1W14cNPq-LfCNZ@1W14c zLM6aJMX0#h83~X836KB@kN^pg011!)36Ovb0#DYO@G$2BxB$m0BtQZrKmsH{0wiDp z0R}2e7$s^X0TLhq5+DH*AOR8}0TLhq5+DH*AOR8}0TLhq-w7<7^uhg{3*b9eHkdU5 z1}d^f#tI}r0wh2JBtQZrKmsH{0wh2JBtQZrKmsH{0wh2JBtQb52s}OhtdWdRctXg^ zBtQZh6Zqz`&DA*!aKmsH{ z0wh2JBtQZrKmsI?1%W39?BA1f0kQzbOG$tPNPq-LfCNZ@1W14csvy8XMHPT}ngmFI z1W14cNPq-LfCNZ@1W14cNPq-LfCK_3@aNzEbP4AI1df>O%*A^{R00TQs700R{k4=!CJ0TLhq5+DH*AOR8}0TLhq z5+DH*AOR8}0TLhq5+DIh2;}AuSX1F#00t^F86nCb0TLhq5+DH*AOR8}0TLhq5+DH* zAOR8}0TLhq5+H%N5MXd3E+b7pNq_`MfCPdkFr>|khgLWjAo#A?0|}4-36KB@kN^pg z011!)36KB@kN^poN`Qe1Q%8=vNq_`MfCNZ@1W14cNPq-LASML*x45P~=K{oJu<0fV zkN^pg011#lED11B5zAqx(fqEtg)# zxd1VL6KLc9)8o$?$!LW(!$Bb=KmsH{0wh2JBtQZrKmsH{0wh2JBtQZrKmsH{0wfR* z0t`;XW1Q(F3B-=T!e{#3%DDis8+JNO0wh2JBtQZrKmsH{0wh2JBtQZrKmsISAOQv{ z3>-8{CIJ#40TLhq5+DH*Ac2SooW6ebHqHfz2tK?2t(*%Gvq7i3BtQZrKmsH{0wh2JBtQZrU?qVZE4`vq*%G)C6$X}9T7$<_ zmJM8l>lbHP$EeFEPMkQ-ahy|)5*=T$|12sh!mYb_mSIyC+<^D$nep_VUXwtu1o}1m z*Ywj*xneXvCbRvV3|1IhCjk;50TLhq67ZKm!T%1Kz_|eaGG%KdKmsH{0whoc0R}3n z02G7M4dCPTu-YzteFgOvyR2(OwaJqwH^Yrz3FcHcLU6oA%FV;`NZFrz1);}S7u(BN~NxYat0|37(N7){*G*lj|`9+ zh{Rl{-XOmaJTEn?)IEdNq_`MfCPdjPc6FG=lrm*q?Ed0--A3t0wh2JBtQZr zKmsH{0wh2JBtQZrkSPHMCo&~v*L4MO=VA{oNe0Cm9_njfb;5)RO(F6k?S)d{b^_Qy z`T0a$q1aApsTd_e*vu!}&BQ(93r#r6xZjdrg!GfzE!*VTMR z0wh2JBtQZrKmsH{0wh2JBtQZrkPQI_Dzc$v&$VOV3Nt7w8P8-gc@<7GgQ5~f8l8Hv zsHkYVMtVY(l1P+}LWlK2?UbF7Kuid9tM|65o4YElx5&0*CPi90JYg$=25-;#Ack`R zY#mPeMFJ#10wh2JBtQZrKmytjV4y;qp$JWgWK+nEO}FL0sAzRI#gr*inqXG?5VwF> zFI)l`t6RhEQJ>yMOqf>-OI>;S|E!NQR6+up5lA9~;YB{bJCO?OQ7zB+8!Fsq|ym5pmEubQmm*08c`c#Z@}z#W0E z^=7Dg)mBznIi*t9R(TOGAOR8}0TLhq5+DH*AOR8}0TLhq64-|TgA@B8vHCzAcqhRL zx8+4t%<4w)%=Zt#V%=HK1#qj+*LssDPi}~5>P)Hdm1>}k-5hA^8aIB$qaV~%j*{K8 zSxA5cd?t`kWk{)bC9_r?r{Bt%{SMFUVqQf8BtQZrKmsH{0wh2JBtQZrKmsJNg8%~+ zJ4nRiQPze*5~1s2eFjBoX{l^VQOh+URt%HCTiBFhT9_T`)YrzXHHWE`YOIqcY9Rqz z2pn2(rfN{_`;6Vem#OPBzSzGDdsH8LkEwG3{G-5@NPq-LfCNZ@1W14cNPq-LfCQ`{ zz(9o+!$Ij_;hc9?|adj*)GbXP7({B<8 zl7M7T^lBvgFlAm+DRst*cTeq+c@17k0wh2JBtQZrpf7=AYJL1J=K|FLl5~;Flc0A`J36KB@kN^pg011!)36KB@kN^pgz+MCn zz4Fb!RWcW#(m+LVYV2OE@ukTGYC^o1MeJWdeXCO{?!EWk2Dk&xlnO)M<%vY%CPSq9 zE_2lQg?XuzbA|5>vq2Id0p&O;b$lb2flx=CwR%RsLx_KaisCnra4_SQB+EJe;i8pYJOVkoVlF2s)6 z`^D9NJ^g;{ythuRWG;Z7qN%D10teTbtD02*wu-CLo<^7>b#2F@r8{9B!j}sBKC4a*!m2LWHLG2#=*DICu~YF4t}L<^sOqN zhEATHn@rY4P%W13SA~b?Nx%~U2mh~nHGa`k8%V<#uzFUXTk7?A@gr{yu{H^i011%5 zb^@o){oUc=&IL%K-zKpAVKOnK1*~ql)hmD8u|{kGUhh5eZwroXgq6Wo?`6yIdI{{C z^2behWFY}v3CRD0mN5B$AdgL9JN^&IV=heo5A0p!{V)Fy*1%Sko4ohGF1N4n=z1my z>avgk36KB@SWJL{3X2D~+_e#SZ0x~JxhZUQ!O}sO!9iwIm@&%lBb(wGGo<>jGMT7* z0fH2*{co8qk^l*Gtn-0tR$cBLc`Avr(!^D0;apFxurdjd011#l5Cr7={$a3=u=cP{ z<&P4S2(lDVG*c-j=I7S2UJ~P@_(TNOoDPjm9=R2Q`kJ% zhp=~H^4{M7V<7<&h=4%iqPo8dcP>B#PO?km2sl`a-#F0}oG}5(rdR}X8>pCyx~F8^ zn}HX~eiX>2_yW=ys4!#v`WF=y{lg5Y{;K>jo1#Fz+#ld?%WRDVNI(b-JLoR8U%pK8 z^IS^HRH9SEBcA)rb4zT11W14cNWd)t$&8R2u{~iu%B>w9_s)R8AuY;csF+!9)A1-b zaZ6wk756Vy#jj;CIJl=5+DH*Ab}VW(BT6q zMz}+Fbr^bd8<-HW)@M*4FmWmTWMD!@TmLd|*|KH-tXk%EAAgVJ`qrvn@5267UtPf! z#i_9s9M$bmGEwK((8NfEZUaL-RuJe==Oe!&6~ZH#a8g&}agY^o=mZIn0122wKvE_8 zzzSi#V6sMyDWx@piVc8CSr2<1CUT_dXf;SYbPo1dTm z1=5Lv%xyJ-g&`t;fi#N08KTu!nFF6v$%bneN`ACV0c0To67ZS8kT&rM zsmj_H9PuK5RQrZB77ew5m#tw59ld?^s*@LbnhW5cQybCLT$ntvkiZTC^8e;2*p8dA z5~#4X$Oo3(yp?rO-+;+7am z0$<#^AZb6l|NWKq%eL;;*k(N6wA<@#crJlw77`$VN(eBmq7pcUpBfJRZi5u!`b~!W zU!{BmDK4`iNKsHwpjxzOp{iA@rq--kqn0dLqDo4vOA%ktph1JDsvJ~HFKGC~!e1zd zK?+NUmu^K&pl$6>{0>rxN4cd=LNY0aM(iHDH-kV6$dsEeCzv5OGL`Gl^L4QCkttxu zx)6|?V{-HLSFq-~hzeLo8?-;B+?L`|Chz6mykP+4+aMdF)n*+ibf$(tAj-Z8nw~=6m1Tf^e1UKaI z2N)KM*lw~L{f_7ID1o}~VY>%G@REfD>>$953OmMeZ!aX9;&YhWFYfXs;IzFPwdwWb z$&(8Zq*!cnpaN@Gsh&N1s*W8yD#wxUr$3gB8#k(H)26AfzWQp%AsZggLpFun#ED%Q zH%=v%wOIH*9G4&EEF?ezJ`)(+<`1fMP5H?5y=+UAsdjabn!4Ecmf4_91oGgF>>}F9 zCZ_`SWf0nbDBwQWynX~^y;ccM$oO~DPhMn-K0(h9mYdwnVu=p{`OSX;Y!s|ReB9o? z1O1=iuY3;3OLj7c=J@Fc+Y-wDeNk+&6m3r(V5!sdnx5T?wd0jT)+N z-@fXZXP!|@m&#|C@#S-DQ1OKEUbuAN`~5zb^@$mzaLJF=NT4zTZEAfMRFFctMOKBn z0*^Aok%a`(5%?)iFi4S(PQ}M1tAc@wirLtBRKD!W-_i47bzm%e66lDRcfoFk{S78F z&S%0{>?KeWt_^|7cMLBpD40s91BA=_{05l3_b0(N!dQYMFcM8OND&<0Y+9~`izSmq zEM1Wx#kVjCS}cTpR&Jl-aRF=#jD-ZWB9PEZ4aN9DpgJ14+(Sc>LGiAK`r22O3xkBB72s=9nEXO@F*??AWofoCSc~7$@bFU2FOPQa=gkN}#au z3&Bcr)P*bGK7GF+Iyw9Ku1mbn1t?C94MH5-^hDrLPqpJ&`9QcWck5W}CU7|1dj^Jy zP}`@CGg49cc*~E%^I`JC?lo8+7>iv5WY%dBOmabg4~t`@0&edt;=gbZ9>RsQaseTr z!tW{pfeolx`M)5kFvr5qhusGIJM2T4MD`ZJrowKAodfFtOTt)4ATk1sRz!wAu#RO~ zg_k=c{-jg{=AQS4%gV}RVb^-zTZ*oQ!w;9=Yn7MAjT@_)HI4u6{uMT*_-o~!+Ix2R zlm%xYU~!1O9?~-sh>n2lnteb`nN1X2LBui7$tjhojw~dQjzG0^CwRP40(q65EJy zZpF@uP0P8+O<3W?;y(d?Z}FdLR4vN=-~(WNVOPPPfPDh{0roab{>GdSli&qEM*x8( z90Ckfgu^+kUgSsHl^#0#59(T_n*`%53)vHIY*NXsb9&+n| z0Ry&rX~XEcj&qIC`YE0SG$wFjeKmrvIkQuH= z!HQtAuBHSmR>0){)b+4JSU!w}1l$uCxagxc8O#M>pu#<39cx|$QC_ksZr0H$KQ-aT z_a$i0`fLhe24B_s_3Qn_7j|1qN=i!p5_YG$^f&y$g=eIc>YL3A|2<%^Du{x;M z{IFcAo~V`_6IN&JhXhD~1Pmk45PHYMWCk?mva$Z`Y*Q*8pB>*bH zIWH4AV~&yTlfWJXWVw#su-jqsS+WfF1neAG0gQzNToYiR!ZkYWD#|R2RvuJYpFxq6 zlQRl@Bob_F>E6A2#@gr1nUnFg8eX_`^XAPDYN#s|Ny8ufyq`)rRc}5LiW+u80(KE- zR(*+TTU#dAg<5J>%T0_9wMTYF0wh2J`VwdhX)>#S3apas=HL9Y^J1Uo0u-mlqA!*T z2;|_Hd_c{I{i1^NJQOW~6VbhFLUCraGOX$SWO%}qEo+98j%Y?e(jp~NArXu3VK2gD z{V1+`3kWRP5n!|;J6^h8EmM!YY)T<16*F~p&JSG^CQPX1IL;-0v=vnw`|rPh#+y=n z^UXKv+i$;(N~-66C8c7_m@%6?H)Lu9sj_QLl}_CxpgVz+noh<1U96}Ua=|iu7q6Rr zhJ1-|so0?x9K zKsf;hD(rYGNWL^vDk>SvL?R(U3ZrijWs_1dd$y$D8C#YmlgY`(s`n^-_(KbjN9i2v zu^Cn;0TQTyK(p%KsP?r#s^Cm0hiYvrDUtL(77`!<5{M%Kx%oO3_Int2;?(CIAhY%5 zgHdL}vP45b)=`n5(eGfA<-+1W0SN?M1A75h*Z=0(A_Y!v$T;)OWGpK8Q1_gXHK7%6DO(`tMptyU;kRcnhe@{Q!X3m`f z&IQO`kmnkyGUo=*jj#a{Ab|=AoY3TjNU|w*ig8k-et7dV&L-LkgilC-1Y$zq6!`Kc(Q8)vM31MnF?0|5(V=p8Sdjb-iI2-mf zYza&@wrB%mA%Q&zFi^1v4kKS&0M)HMD8I!+T_dVuM*hVRDm#!FTYB{9k+Jq!vu0&{ zt%et_%*)HWPeWayNE-go!d}jgGY&%OkDZV}oCq||TdF$Mem`oL3rh1!WQSN55-^KE z>A90S*)Olm)9 zX5I!q+SEI+Zkbo%l_Wp{J`j-a0W#rw0ql9$889x#1_&%lF;HRlyFjujuJ+)G!8eY7U_I2hxBQRG!ywrr`IG-;BNj<3J|T7CE3cNtx)<3-pzynmzcxfqeO<KIL;6=svM@o=%fM zzyuoPtx$&)%nG=dtebC;%+D{%x)v%dnb#GWfm+Q4aN`E+kN^qTPvB^{_$G`uVeJ{~ z)8WcAn9;wlG#QTi(D4GSwI+Hf<3|D#tau)FD1ndw2}DkyCKQZ@eGa<~CVw|sVnBd_ z3Nr^;c9-$GDTQ`-ZTuVx$)G@cm-*Rd*llH-L9uk{(y%4^+20?B4I8%F&jyWZE0HRO zUdJfS6iWhz5a`ti85E8_85BFQIqLH7W}m@zS9b#86A~Z+!wK|){+D39kzn{hRv^C{ zj=ceERlzx%4t-I5#Go+d0^|YOv#`!K!L!Jhdhlf`j6qh529>TDO+fDL$!ao-U^l>+ z*#`(V7X~WK99_w#XzNnCWK&qbAH~Lv8;8M9qi6eMlT9&m=1iAwhv=(em-7vYMB-1; zD~U$Ii2D~Drc&zEXnJR_Bw#mzy46;xL+a}K_c7HZuVm{*c8>tPBLNbK34!C_=YL>w zGl9iU0`1_|beJsX#gaXNNvNX5j?LLik4=qc@Kgdwya^hcA*Z7T5Ga7gn_!>8E`;$O zHXxW;maLk4c4l({vKZvz)L1iVSgBf4DkP-nw#WjbR;N@v{P4roaR>ZTw}5o0*SdA< zj5nqD>Z`BR_uqf7gKSqCuz|?BmuJtG==;rFqwvSyxFc$eMJQLN4Fv>dLR20LCR(b_b>PFx+2)b4`QPe{MB(R-;+%$U)_7rUY?ML{J6$BWlFl!hkgTl)P zWn3~SjNX*ufCCQ5ID_KLFTXT8g97j8iA6<4=B9vn{QHsmF@u7Chdpv+brPtE!10Y< zRym21icV>DES17$Q}YH^(@GS@kN^pgK-C1~rhqKqF6%M)TsqwKUo79>TJ`H)*x%=- z*__`5nxjDpR7 zXdf|UK}Q5C`19KfMAjvNGz4nptX19Xzn$i+c8B+0y>9*a+Nq)-5+DH*NKatLF0-=y z4@*1=w1C?ZsFEA-EEN-Y5QjO-w_-NBAC(EB5?E@ayHM&QfvgFvdaV8k`{n{jHs>E< zZ^0zPlf_^H3{;pic=BO+od>!9MBVp2)YrbM>_>q;4ldVTXe9zAo1$UEhLv2g>y!oA z6q84e9Qo}okK*>sk^j&98yvS`r@x^PIKJ^q*pFgcD1G_S34)f_F{{E4l5B$nNFW3P z@|)yI*uf$6#V+(FAUkUR8&+L!nUSbF5501E*GSO&)@c*8bf0gnu^|#5fuIN+iH2nc z@2xO?9tA}?;C6XqEMUMsHve<9)xl+Zl0k8k%eux?!+sP)A=c>GTs?a9$e6d`85CP` za&qp<*x9NtTz*E=LBY%gC{B&7x_ggT44u62K-`^_|KuzrKmtA!kd%tU>&c87lS-=P zs BWs+iQB>@tMErH*{^WL%bo{sNL;8+}Z6z1%GBF3*}1CM)R?7vN}pABcuwaFp+ zLIRc&$c10BR{jUDLQ8$L+AT&a%otu7r)xY&eHwMGPoqesQdf9j6nWJ)ZQ7^?4H{&m zWAWm}rf*8|z~I4yzs|^63ol9}o$KKi7pJvwZ0S<;1bQ}lRplnOMlU2w1t|yVRnrRj zcOgq|ygXn6H>l~S1Tz;PVBu^&1OfxmhtVm_Aux(#7yaS1m*1_nF&htuxGt7GqbfnC z%+@`y$7gy%0;UpZ3*oQ8CcyGdbs~cZqkh}@Ju z02a4(C*=3$9N1J?oJ(&wFTom{ioE9+!@h>?_&*^3H+K9VkoUcq%(!a_YX!@J*;2%> zJ75?3?a)DmFF(e)0DeF4K-CO?2bd~ zPc}=Fbpqu^`y$vE<&SQ^aLbL6=CA`{ZD0q%j({Bnlbt?oEb^OG-XlF=!Z3~{7f%1t zaL0due)+#J9rg}v0qkp7Hs4ExLu=TZl}yI*XBxjuRaO;8*COVo1g?P->1mfFDSQvm7lGH$1`BR zg#83&+C3==KHd#G1}6W%SxA5c%pf3{kCS1&VMAaNjAhY~fXsGa(U*XH2wvww-akt;cQBdu-xf9;_E(tSkqR`Kwv^(G{8+meCUL75^I6P-sjg{S1B|;?j5d!f~_9y2eynwQAL1h;3zz z;50%vZQ7LP%$|p5&63$I#+Fj-bbhOBe{I{A}kXhQOZSxA5cd?t{W*sP9f@Vd{f zM%G-Xl{4S@X=J^#$gj6R^WzlIHL~ zSWv+UG+kj?j-z+L4uDC(P=bi2l-nRQ#}u*DNdhFW2Lbt;^9D@*?yzV^fPYvu8<5@P zNJ_=^yS>i#>EBV|gKR6Abp7PXlXG$7`%05Er_=i7mtQK`prW#}Xwf3Ia^=d(F46tm z--Znv_Nne7gVL9iQ|)TJU#kbzKHDXMI1@Og(KMBxwC5hLlS6L#W1;9nG4gk41}=X>MD-9d?$dvMcX%4@V()H8(fX{{{Wi^lN+HX6bV{L zkV32jY&7g#SPhtIMFLwAAnF3M=fgSuDAdL&6EclHhE7@Y)lP(*{EB7jQ42)BA_DUBAJZxIV;&?B`}si34HRe1=nRa7l8i(0v!-fjh}_uhq`P* zZfxA*vaT`JaMSlfh^_qYz9HUx`st^tQ>RX_##wl;HF|ob>v3eIM|)Ats(W9O*iuY_A1z4%B@E>l_R)T*7N-g?X(9X-}c<2OjEcU$s1gLcYXUQA$fo)$`9kuX^|Ht(rHN&!Amp z?b@~KrI%h(@@R1Rdr?u*M+WQnB)wXq^a{*{Htk2HC!cMv-1U$tCuKJmz;+^f_;*ah z*Hq0M`HgE=NhZ};d^LEMSrvATE4?8BLkP(45}D~NGY?HDGD%Br9Lf#EZ%h!Jp~jWC zPBQE#!(_sb+)$CRF{O-#(7(ZEnIhPQPWeA_iHmw+RgsLMpTqtaR)-$?nTNW^!tRER z@X$`sRXd=AvthD=1q%rTK|nIm#HyOSA{iMHv^WqZH{IKnTYEeS)w;W@x_uqfTV2*QrW$VSUx3}STN@)d1ECu?ZdA`a^NecN ztl4%;Bof<~cao)`%F2wMq=AH&l=bh)bVEFJ@@EZ`$;8lf=Xsn2NWeV-$)Grz)UDfh@1pF9A@;dn@V8Ie;&D8sAHK6Q9p&re-u;776=^) zlRJnH!ereX9m^<)VzffXk%^Zknc6C|Lo1pgD(gud0+Xd$W;k6P~{V zlL+LSyB+7#kO?qa5i;-$yOaE0wkJpm`2~I+o`$4ehK*$Z0V_?_9AQOWL!RoIr}f`ADfaz!Gh@BKe{Y7#+BgfhAJP9G`F&bPf0@HxbF|N;a{`9U<^w12J$m>**icwoSZmlY*i_hNSe%O7E1v|rI8ILM;wu9cx{Sn+ zI!?obtkvnJNCw5NZYr8o2RFXYV+MseBZjfEKC36)Xbhda2tV*sBh#JdaS|W__XKhh zCF;0_FS~ERteVHHnR!Ytvm{fk9Rz;%bzdyd(pvTFT`0GPSmH)New^3S*y;Jub|maw zjWvcTZ9O`ZS+O_5Qek%NqOVb?BQtkxDM>i;E6tqQ0zLAVTW&nsR9=EJI%Y3Ome83t zfu=7c00Lja?t`5IlVy;82Ky^aK5ybs960$W*cEYbRwo}BsL*LNl)Ax#oF`G&>THUq zo_Z>YK*d!a7_n-I1Ta=F6crW4_Qzu)r))HE*9{RlyCQ+u5a?d-ZB-|CO>F!vOQpPg zKZp${I!Xd0;E8}_eMP# zxM=HdulwQKc$jYGT-So;mfm|}D02adQ)69=WJMC#lYm4ho`%UAU;Y4|1A7Lh^M?=Q z`BLsgD>4&+B_sljR)hpPgKkd2HAiG{T}79a;oxl*ov`WHiWMu)gFljo#$o`0WHM=e znoqi681>k~I!;Q-lra_(AOW8VB%M-qY@^tuRP3O4(&~5n9=GEKA4z}&NFV?Lk_sxD zYUx-$fT*9qmg^`^6HSjoM?ctFP4rYGqX-9^S9HR_V?WnS^M9a6?&#ZGu7q>*G;>h? zE^&66W`jaGfe_e;R%LA{Ngvt|b~)@bn9XGzocpcK4(j9^gA@9U#gBF- zQPb=yDdk*ewg*&C0wka@0ogtB5>53jfUZ8U`2FF}en4JDmD6FHvacFSjePVXH@8eL zIdI@?jhv7hh>}uad)W*Je+|=V26ry>oN7CG`bPqL5mkDm8s(zz2bAL1~j+&L|AP&LwT1l|wgF~axdsl6vdt;o{9&L%j}h@QGg4BnKGMUDU{sWE zN+Ev`Sj-?Web}&JubCm$f0Y}yZGQK3shJ^&;OyCPRFPUAfV6tSA&dU%&Ue@Iv)X;6ozpS0m zxdH@R`qZOUt5*Gr*%U6{ArVx=SdERKTP?c0;Niv9RVtzL4wPDO zq6iXjLqM`AdNum5o2u5-aa8KG74M$f!y1R^2nmpY4g_S+n{GM?*-L{gi#u(vV|t1`bl)GOr%mdQ7BbqY@W4lK8XI(pG}CO18cqnEK$fdt?9-E1cgK8ZPjmGHk9 zb^z>lm@Qcqa_{^vuoGb{nG-no{CB!!Jr}@rprSYx+k26j4@nkRUZ2IadtWNwLgSE9 zA-`5w%pma2(4j+PxH!v?gOguYQlwO>nHfVxl_X#mfkW!PryAzPH>G07Xd!CD&%_-t z_(%dI;0J-Bel+598~;Ijcl+FC9Geq{|JKlPnQD8AhPpD5B&pMfW^!Q&*GM|SmyYTt_H;))Cj{dc$#< zzT~FJf9_k{R5Yp1iWMt<0ln=_(p-_&yu3WM-+ue4{rBHr<>$+$adsA@QAkc-EX%0z z3-d73`|?;iA0wx?4O;qg)^h=3#3u|_9VdkiDqaesD--(o>FOE%4lzL}HIe`cXiq?< z1D~atO4*%rFf65+E)(Sl!}DQZXeeGbsW7qhglI<-;eUn>*+DywWgDEHppoMzXrzTw zNI)k7GRtf@Om6PKs*^C6TI74v^Dqg(u=q$I$H%tVRQ3dJ$iB+X8nP+HCp%wR_(V3P zz>V+gOm;1qOsb=gK3a9^)JY{0KgI>I!?gPBv(MD@>C@HLty@iYZ*T4IA%o(@y${Ca zbxA&cNIPo3xcEyyNgzxDo$Jg|O{#wzW|unm<)j=XpN(7;Rp(KnHWDy{fb4czLldbj zkGvKW>szaSy$k#AkTeocmhE`KN6!S-poIaj=`fiMs!PGZtERA}x(LcphuaB<88)Bs zWfFvXB;%{?yzqXwbt6oN8G%PYhy3u@adQ~RVj+RZ3490zC&A8!jfXXdnOfwtYbs3s z-~V8$6T0dC@y@Aks^X#NM7VjpOIIbE;%1k1jj4ug3b_f>!5G1N2}O`a_3hhNb?)4G zXOKcJk>l;#w^yf~cAAnvhAjnt6~mWU&UhO-d3G*>6j#_Xmh^)JbRvKa8C0*v|J6y7 z4qE!Jp4F$b4nnAb1W3SW0>9Brz5Ljh$yhA@5}1wVbj!q-36tmg>t-mex!qLKZ*gy+ z!Nq8N%;ft>@_%18uFxgJ z4pPi63{*rneiHF_TVy|q`EDzlP;cD0aS7b`mUZlmEgd^{+`iOPItLmzZmha=>5}fW zQO7Y>bCFFUYY4}(B$9Oo!SPnHbe>L=K)?h#7JQ(ZR{tj8UW{u#QC60?+Bnq|OadgJ z6M@zc(^DfsTcG65dmej7Wm7X4S&_PvRW5@-P)f0N;yWxf79}amgKyhkp6(b@k6^?SQ zs^T)9CIJ#~NuXCFrc_jf4)b=-Suv}h@f$f+s z;~ldL(kMP&x^(Hk%=W^)`b4ro^3@M;-vVor012cc(4o#pYQOxi(w&a`@noWm(|6+j zCJ4s=?&m*_$!&Y?5fDsIY*iZqKhs9Zk2e}`*GP}8QeJ{%Pip3zPVd92(8w;ZI+XVy zLe@TFQAz@E&uIdpqb52ig9J<>un}S}hxLPfZ<6qGtw}sS4r^(kcC92cP!axcNN)8B zF1wYKid$XQHKrO;Dr9#zV^b<*!-=k4GyCqkY11ZS#wDFl1eIbJh!!s*f$RyK(CCHiE5)nEub0m_xox~$rne+O0zMGf4{aRfLpy#oaSs|= z;a6j9r&0n};_S9cuL$>~Nx=!3?W;qPAQQ_j0C+2*XvQhC(D*ClRw0E(z%JrPQT-zvib-Pi4!OG2PDM}g>v#zXkN^qnAkb&W6Ad08fCesI6YfCVf$ns<2_*L>Oj*-g zhYZFN@OnGkw~O|@eFBBDD{s_A-pl((ErR`%fZ+t>Z=6hAJ{9(ZVZIE7-qQ@ztc_d- zD#9I#aUQyR9CbhSP~U{A*y6ys-UOlPG)e|V&6+jSo!;a4{Q2|Mx^?UJIA++3&yhj# ze}-xHO>Tp=pASf-RAyB05%SA_{ z73@jqs+6MEt*&YsRB6(rNwPSpE{lFdQqg@n3eR~)QOrVQWd zJ$8dehm9go*qEDAWHVrnaxPpw`zI~4sl#hYfCOSoKsKeQt%>)t$o?Wtw8TxuofYYc5g;)LdwcUQL8nOO@nYrim8HAORB4n?NKt zh0`FJl;$`O2Twp|ozDxtn~ZvBj;nOYP^e6-Xw;#6po0hvHGH9oCSBeqn&6-e67Wpm zJG9UX_N?cY!fW7{=<2NSy3?|!L#Iq1;c_m3KD(!BN!CYEGIPmo8mnYe+~%VeEt^6% zh5Sc8R7-Igh>~oI!wx$vgDbHTEHJ9lnI*J^yxtwV+k*`~3+z@$kwg_Clw39JJ) zOad_{a8l#vV@|-O`;Hp2V*1Jaf$S0=tC2vA2*?k99m*<*cuof)G1j2dq$BygtaH)j z^VRsB4FeivT>=&L0`iZ|l0cjZY=HA}ztE(~4fjEBqc{&*w!SlqB3okat{>~J+V)y; z$)LE_1Ea{R9(?e@8D~(;n>Ww&42mTU8Z`JzJ)f2f%W>vUC$4Vp+8v?oT^l(kQ{Rebx)kI5NWjqeY zQ<^w`v?e;z$0t-$JiUcl0k9kp_wIsl$Mq%)AysmPUp7|7%(8VKPJl(sq0++ ze`YljAb~0foY;736<6?d6$FZwz12s)46~2`36MZV1iIp2enlrDIQD=Q;b#w0Cle>Z^CP5uVb0M{3|ZBeoiUwej9!FH=C3S*_2|#h7FmHzmC^^ zw{hdfhjbJchNhw^i%wA~=kPH4VjmX0`|WRyoE+y;zZ+$HBtQbz z5YT11`g#z3!y2k)JMy$9@;gIEQWGr|$tbAkV93X0z1`&@_i0LZy8OVdX$mmvj0=GQ zkDdRU$mRkB4OxN}7sLJ>bVok5e>d9EWyXk4E(OyJ0~Nt=-O*|{Jn6UNMV60}Qt{g? z>lk-AHl>i1iXO%ZFBhDXlM}0y3b=jym@#8EnL1uxbtjxMqt_|(>Obp~K#&AZYVtgy z`tp5~#R~yPIV0E4Ii-=8HdvPgNWfYGy3ACc2EwsT(beqT^_nJnszb+tnrNv=h7N&> zr4UwDkvJYxx&$g@T|E{OAc0^B$PdO*FcSh4t)TPrU~$;dI)f9zj^j8q?>2+tQPj0Q zgCdnOd;OejGALwI3d1ufmL(F2DZ&1i$*lE8?Y}%N%DDhVQx^OLN_%FemRFGg31mZ{ zS^hVweVvc9slsd16R5d$Yf8SnvycD@kigyq4%_=cM6drFQNQeVUjpyr(3*V@M{?xg zNP5kz)4G{m7U)&Xrbv%hmx*-M(o5tS5+DH|2}rQw64;YIwi#4YSEJz;K{akmy9`tW zH!~iAk4px{>ELs?OGc4blMIT(4$JH}o@`1nckbNCHG33r*N`DYHhOHs*y>o8 z?0RFxQ#c7|PN1;SRAx~4IYy2e^~0N|aXn-|nZ~EB=AWEvMD|bPMRJqBb(v|J(Rdp(YbSHm7ky4vT`4P{ITiT z6f4Wh$|kuZWqz$;Qx=|p|IEjm?+HC10sROx&ReQF*7-m`VP-3;Q(CQ*nG|M^A=Q(B z-vnfadeZMkU9|Tl>R6vO?t)8{RbG#>^WFMBXt(FOJ=Ni+vo4tex{ZzKsildilFD^V zDR~khxYnv)@526-Um4Wd;?&rnnr6EspalVm;{FsiUkf$qMf?J%j!S=0C4cb-%E=}^uxl_EW69a3FH9XH)cKSn%50zna2g@#XoEeomxAKI0f?DxQU4?Yb{ zf=L6$<6((U%bU+lc?6YANuxNoSLrOza{;iH&W&zB=~Aalmo9NisaRF5TD5z1VIQ`R zq6Zfqjg5Uz47*?UM*?vm&?s-2>Rd1<4z5PwV?pWGO~a$;n0=A}31~?`(kgT)vvm+b z4SNt+xW}vLUg#XC&|!CE1}f6OH+4y?*pgl%&yWBK1W4cuw0$N_=l_G=DKY-b<)#N^!^`hwOE<;+G{QB{pPG{9(|b zLF+2^(Eg)|lq$C0OL|BG(Gcj>_yuNAq(#5}>rE$ko(r(Ebw@duu9$vi?VShsgak++ z&IAtB#ChY>j5Xotr;P8=m=2lK`$2~eYm`;QR)?ALKU5@)$F>t_p$P|_Z<1<)h%!hZ zFak5s@@0Xw?oY$lqZu7C^!)M0$5t4q@DXNtQ{(XHmg`?o*OY7u*W{2vah>bR8dQu~ z2E`AXH*X%V0rp5FjC^Rpk?8p3NIGYyBw#s#26-#gAqDSQ?q%feHAv=eEs9(P1&{y< z=s-Y+;6w@_KGXpQHS9q^#zlur3dvMxyoa)&UQC*?ZL<5h06Hv>rGti`M#pQrI{d8@ z9~eyXI+CnU0^SJ7N+5sp)K}T$}bTxn1EYL(>15I@7E#r4M zuww6nA-?_ueLWIlf2MV%!{0Yy7Ug@yv@xSz5(tXG5HzgwCfYJ_+vda@2>xeg+z zfdpbsKxReshi%Z5LkH*@psBXVWN{%Z|Il|rQ)jzuZX4>|>awme)h0}saB?D%IMx`! zdkK{^3JFeRQf%0i0%;WEhYT4K+vy;fb11vJ%hW@bxD&{G>51QanF|niepUE?LgN=z zjw8RlSwbRk<=1ZyysyR4Px2;5NWj?*31~*3l_q)^sL*5}Dv+T|x{I!96@qLYOoA2l z$_)>Jm+#So4jYFA?x=?j@;FXQcnb4?i?Ig91T{ z4cJ8F4;eXY;YB0wUwAP7H=kjlYjlYOA}3IgyILJqPc}AV36Vh4{N&mT(chm#?2}!Q z014w^dKOUM|BBsEztuBRgeG) zfB^O*+Mc-}8ELu|nGJmwOfu$JK)~&db;8?|Ku5fkbRsuJvMFwJQ_-Y46DLmW4ZX*i zq|-hbchNn!{pO(-fVl36>kEM*B#fyY=fqthgiDS_h~y~GTP zOkw$Wouizq)WTEpd~A|UkpKypLqL~6#kb}li=9$kf)mwaN1hIQC2+jg27Ih<{p3?0 zS+S|$3FOyY(~olje&j6K3WH&~eAewf`bJOc-rE_kuNkQDO7O?}H{!u9OWBXY`VTbB z&Nq5DV%)e__3C9dsYmvs*t~hOKkuw)TG@>8x+j{hqwjUZlm!xa{8{u$sDK3QBTzec zt?FL?9s3+J!=o0fSEbH3Ln>90015a^K$k#;tnJUDCjnh<-Y}r9=lervRVQj9PS!D9 ztce!NAORBik-%a+T%{?DuFxe*2(o~H+Z*bHw~IhqJaxO_iDXdxc2`q&KASXYlB87W zG`k^d&{+ORm6le$Zn_N_6bMY*hy5tN%NkNx73%i82#bxjR{eSxx=mfmRBFm`B$}Q6 z?__aGmU&|#0TOUa;K!5-`7C6KfGph?IaX(F&$jMEW+UwDXOX zo(=CLKcm89Z9`AB;KV7KIPx`gY}G^yWsm>~>>@A`Pp9pAsnxS0t<*#)CjDKw2t^sz zbusiNU22X+8U^n3OR9;{MWPkoep~hLDcO|5@HC1o@bz~_50(eu~yFFNP6#YSoRmhcp+!Ff#VZZKQ!qtDs?% z20AFAVgkCPGcZt5@%Zp)#RPte!!;^C75dQ+Lhq3sk^l*0LO|Aj8UoWXka#w9HO|Bd z+pl4u!rKT-N`>2-nz7iFV#0(8g(%qmKd-iI*`oiTq*u7Dq@?7o^r!7UGyI_iZIq*aX}6d3js&71P$RiP9ns*;XgX)F zdk}DxI&fuvo=!jP_t3`|BtQaz5YQn|@ogZUu~EMW$g(*)6r+Dk=>RZB8rs3vJ{tKu zMcwS3F!$iP?4Su&*(|uO2~4%1a}Y~%?hGXwn+jc;2VJ-(2`6xmwhU9 zDhEJ}1YM`ZA&JTSdcoN zW$5HZEt5%gZalrG*CY@uff~t;>ZpdV2iu8#t#4bl@VQLq0(kU3)pm8Obpz`u^+aV2 z6{p5lcADo%fCLO9phH^4X2Y1pO|A|REn&{74RL$tx%0OLs_FD&PS%s*8h7r-lC?-6 zOak|#v*EBVVRo89U$UEatfLjN&nwB`O;XV%2jZaHwP7Ts!uq?yl170@MPWtocuY$I zj}9I@*!l=sI`YXx>KdG@mhL={lK=_0CvbFw|EWBtQMd;fP|cK*&3MM~dtg9(*(?cY zMnH#Y@(fOB_NLfNPV2p1NA)t;;KckX^*#$}E_^HsepN6_Zlf)mW?oeNkYHm|f9zxwX+z^&83@m04C`Gx$45|LB96I@vX39yO z?|;i|kpxJfdUBIGy5VaCp(O#QIMUemr_)Q?!?*?78x5-5vRtb-QOvKZn zY|udjHE2!XSg1Q+GldLJXf_~}Ljsi$cnRmGS8_gzQ?h>DfG9flv(Ia5T(H8Axd04Q zWIrlxQ03RzSKD4A7TFXNCQLX6-k#`^5vzqy;Bjn9fyqVjEXhQUWK&d+r}y-l1cD`S zbc5HF1S?oHCE%ornN^|b*iaS;_)Ea;1S>yPFr&gxn*O(?OP~TbIao*_EdlvkHUZ|O zy(0V*ThZmK;djd(Nq_`A5V+Mt-Jq%(AE@vGhQW#K>0O5^Zol9^h`QEiP$UwGaoJ;K zjfe=8#W{mw#KVi5;=AqOh}g4RBM2uRzlp z(?C)$3B;VhRQNwzGX_UPPYca7g(ioAitJFg!sT8vD6C&nXVRoe@*k>qc363Jgan>M z2F1q__8)22snU`wFb5|;QW5N&1S}NFeCBDi#g1hgRF zc7m1P;`hy>IPkAmVY9y0s$cKI{(kr7zP;iU{wl`39IO$5?={jwDS8n&19Gm`OVV~V zPid%$l1P9AswQwt)t7{Nh5SI%==)KqzAHv6vg3U{E_b^_(*vk$eM$vc7gNlRs;SR)_fCGFv18639(#RXrO{BM1Ws;9GFl)ie6`Ge9aOk^l*~BcMw( zlHqH2;}=G)W5ehqgFeb|&B_d}<0bJU&=xNL1*Xg2vmGNK>plHz#|u7^fF=a)T6V%x z{pJE_0w;}(f8+RvY0gD;cwkf=N8K+26`7AfOI&(h=9lkrrEE%JeM-f|i4%`XrBWyF z;W=OEOyKDuLx$+~$2c@ziDdE$bXF_04%rb2#E8I=_1{2B#kv@IY__W@Cv~IQ9#B0A zkU%8_w&2`Xm7EXjRBSgw3k$G^esl>`$i@^U9{gZcI|=L$w_b$R)zCe;xw%?HU6e!u zBv3Vh6fXH))t7~O#Zl-@{^rH5FgTIXAesDlV<(24*y6;v<74$w-~E;0A*ddg=cKmrC5coLd* z%H)%*u~;to#6U%6|9d2gb$(`-?sHXKGAOz`j?-r!?|DF10?!N^HcYpjfFVW&=ptdl3~oOn~a7mHW#&lr?0}=Y3lKO=;F*AO$UdvNFYN3 zn{nMk8D1OKWj_n6Bc1xuWmh$wm{g?YS{!tnO)&*^7gyA%O~(+VxEcQFwoar?h<){? ztgP%-UmG;2v7wU}6(p0+2!kY3DhU`vASY3xj%zHF-B=7HaP*29eG3~N`2v4v8#s8B zOafI9D8u=SVG^L|32Oukx@K^sj!a8`Tj`G?L>yrQp>`Nf&l&a$z zy|V8K9x;Xhg1CjN-|c%`y(3~4$!+BT>d6{9Eb%3909==w z|Ahu1kU)y0jBngvf2wsF6D5lv7ma&Yd+@?@HQBMMXu^UDdRpQt2i~ zX64kgz!kbc0@@MiR_`5EH+PkGx(pNa%9yJcr<)59x+^K=TpxO;?2rUVfCNY&3IaN1 zQ^>Lw3{*t%E{MKQnII~g2z~%FAxQB(bjtEDEF?ez_7Hd$uB_0=o!%O02}KG66_sPY z7N_$oza;J1`*G&Wv}Y|jQCeDB3|Aae-RaSzhicNKNm}{G9CM5+C@4sK-n0`~(&`q| zH2bCZibp@FnQ~H>_|+cUApsKDML;qrj&CGcvMd%5IC=G~Q)Kaa77`!<5+DJ43CQoa zUufjsyU-$=s03JY+6+2Io4Ei1`e|g-NvOU6_9^W5uv#z^icIL(V1iI;Bmol0m_P}x z{Ab2j2Y+ElbkRKcZuIGafr|8F(F~_9OnlF|_E z41_%bTMByvb|I`L%)}x=mN!ilPR%4h0vQpIb)TNj=*n;|Ix(DX^ysDPDdc+S3|qxj z=+H|B#n)jA4!gf(GC3BzXB(SAA-iU0l0hL{mn>NlHr^om8y7^QCbTIT6!|H2nYsBs zCQzovxDs%jlsc~A%W-wzYQM3A_L(bY^y%L4$QNf@?I@ii0TKwGz=BJse$Kf7;d8IJ zN6Dtp={Fsgj8I%evf{Dw`)Dp2l#Iy+upjoaHFzyq1KAb{JjfbOEn!_?(tZ!v0We$2 zmvH2ATb!XEBtQab3CJ2yL(`rO_e2YHw;xQh(5x>ERO~y%vZeBneGg}JM6xOF$mmLg zFT$o2ogx1WQ(cg3ijEySW+Z>prcG+mqD2{9tMNs%kxlVmjr9d8Ex)YvBK-W4Kk6(b zKmtA!=u+oB)uj5jKDWx|G$G(5oP;cWIY1LVltBU{KmsJ-oxuKRVvzS{d~M-BzP853 z(h)crCmx1XmSw&c=U2g2z$9QKuO-Vx0vUB+65No@CGyH2Mig!$djOLT}@v z_D(?7Jbnfy0fx$o1S7;+S9Vd%&iw)YGJQB^gHCs2OP~~<{||PSCIq@chy3lfz8HIh zIiQhgh<3eXP}~`iE^V5<_uhLCMquKn+6c;0h-6T7=+GgfS{pZRRP*M|%jjB-FPbxK z*s$j{)|a8QOY+{u$Ap);0Lh#>vi51K3_EmlS@HHTkeiyULjpDs=v3!@)g+H86*i1z zB|bQ*WZ5;9oaZSLAOR8}0dokngVggile-vt7!ePCP-OS#sv;G^t1z#JHz8Tp24^7w z5{M6hmo##-gTTNQv--8)@q&+b5%^z|GOX$4WiEhSJm?Jx z#E5|WrqgZx=U7Y^)!;oQ>o>DxM_?H)|2b@1cGY+_36Ox{1a$hr%Rq(UZzebLC8eUs zO-0n%46}Yy3Q4JepSt8eWj$0iYu1caN(ItiKK}mu?_>LgKPRW)JiJK`$oh@K3M4=R zRTAh>`$N?{|Ensm;{{d_NG8it*IMBToge`cAc6Q2=n2>5Myqb+5#0n?sz)}JV9A_- z+~1Lf{k^0MKV7qX8_osDoH?%~0UZe}gs3kx5hfc8HP=K-pfVDHwr*DAS3s_pYzpgl zGs(%x84E_nzGt$@rm$gCin6k@abw4hm0izbR|Y<%lBpC^b7ME~bT~8ug^gbftuJ;I zJ%N;RezknY$!()oLIos10wh2JE(l2G#6+0mqHd5?WSOVgL3Y5lNx%gG`ThJGm@M_g zLINZZPXf%O2zlI%z0vG5rv_+mn9DXL!fySh6q6=R>VzGbf2Ioh_y5Z9xd63l)ygWH z;*(E4arxHJrrIaSruc_8YC;y$Xx-<(LYKU$7BY8s7Y_pMYR^;q)%Y?V&eF>r1d>T7 zdDV^=d?W!9AOR9Eioj^7?V_RLhc(nhNhBcOcb+5$5+H$C6PTuv_pw;=iGhkA-*K1Y zp|;C6-z3ztK7#@?@^6Hn#=c)9g93YMWHiL@z4x9C`%&By^9%|uO_I^OofnZn_5^x0 z{%`h`SR*3>U(|o`1vBOX6sIJu^1L73?ZY+oG8$T5L;@s00@@JRA4=qJxNc<|1pQSv zQPdNA0=I*}?_y7Z07!rYW@>~$$JqwH<1zN`vyUVsrDBB3_9Ug^E|+zUsWxH4gpSzg z0J|)jS+Y%|m_L8MnU3yKy%1>>v7P?W=!XRZv2i$eeBJea;4>2NjlhAmK2@!1eC}I& zY{)1Axuq(>bPA(}jABWE1V}(l00Xz{8V8B*Sv3BAkR9x5`=SZeKQm)4z%I>v7BK-? zHshLzHL!aUAOSrH$dBJudPq?RX`&-g84Oe?btwolrJ|CzSt5}zKBWRaRN~>TQ#Pbj zAgFXRQYuPzc@>{$SaH`WQx3mW#%IXsX~+Z$8&3_nM|P(ffkA8DI<=K%nka_^NPq-L zpke|;aQL)}Pep!|Gu0y>0t(oTfJ{E+WQ^T_@F@wzoIncx&(?%N8|ct&bE#cuJ{;HO zJD=wQWOnyGuomo9zW<$K!rYQr8_ z*xzxqTzuDfwse354yd_6wXVr*ikQC>a#J}eSu>M`1W14cNI-7_2ST2%!GPDH?QOmB zQC&<3$j!)cuq$KYFWn>o63CputjsSB^vYUjy-A=wXwZ1vgA8!b5j~Yj1T`Qal^U%$I!s&{=kU&}j z>u}~Q*n?@W;0Y2Sfw&XUDOw?^l2(^EM=NAg3b!d06H(XtlnVGLn^NS!j4auvRFsyM z8tLZ_(b$yYw*v+Y*t+9&>>i!6Yh&j#9VUU02yj!1kU*Q$&9D`-`!+L2E|ro136MZ+ z2=sxcH)!nbZO|s`?68nPR0L!J(_>%^l0`KF>^Beszv_6_vj)ut2!ug^jV?jk>jP{* zo2Hpokqs?_u9cLEi(TrCMM}kl2@?*&+E3@WWE6QdNvUYpu3bhtEJ>;Odh6D$4`t-A zg%=HfV8N*>s)?N*K^st zWKewTvaT`JFe6`P-^3z=V)pFWs;sQcn6aryD8}o~i!Z)-OGW3*KGtsP6Q^f87r?p3 zY@ex~1oS0vV&kdG;p9hs2Ph(Ccu$O2Kj)N25p~aQNq_`MfCTm=P#rJ-4r{RIVXaY%YpTs9bs42sE< zCm)C#-@kOpDDrA`>eR_9gW}UqKaE_oM*&MWZrnJ&@*h!IZ4ls$cKI{+=6S10+BK8WH$2v~<@< z&)!mA!GRa|KFHT35Fvph(YLHSe;2G~ggV$Y36Oxk1at{hG}jlAKg!~m;wC`~x7idE zP}llw3S<{Bo5G*}UtzU!=a?~LHigxhZvCWEj%*CbLINZZ00G&Lf*BM6fVHH_luC_U z`*z>@mbgPVNPq-Lz*GYA`}RVO#mkZzS81$|(&9@%R*<+H_7Ut@KQXjlFzL6Z%>^h< zjr9`&+adug3Fs27us)SNZqW)!sc<_5P*N)Hw(`H*Zca%`1z!GYw?jrfEhs2ZZQHiZ zNb|aN>y!m46)O;3x<4a_Exc&NgP#{-BfjG-bd4^NK;#4(SN~3Rto=dcIw(ME0(DA~ z&M2)FQXC18014Pl;1sy0`)|}opmDa{!$9v0C(s)Dr@?N58M_8Hw6l-^3HVDum(3}v z!L0&+UDK(eN(TlRJEbKei-rpG3JZP;9ZZr!@6L?W}dxD6Xr zVBGE+K79CkJjSx%y(=j|EF?ez0T3u`{DN{EnF+~40+kXNy=LxNbt}DsCrN+=NPq-_ zAkYPk{tcE4qH&+vT#0tB@Tn~}Ljpk(kpD;W+wx7AOx|Q60TLjAMH(S!sF4=mrDS#& z*Y|ECX|NW0^|C3&?;}wiVyE}sdv7b;>@_xnqFS|T>c9gt`yL?~6rX+eS%`E4>1ri> zmR;RrSw`Id`7v-DSrjZJKmvXfXq2~1b*?kV?`GLv8UlsCABi8sR_qZ_P`YjN$TV*9 zFbR+V36MZA1e&7RXJNI2Y2LS1uS84BeQS*kkwEYSWYY@yLX0_^EF?ezF(>dn{MTjX zp4Ce}#VJrB*%WREGZxtt$Ucx+1CmB$WNFy2p~}hGHyNZNXJ*ZsrLas=MW;+UcF&+e zgVvcO*jKHIRI#t^u`v=Lft>_SUFe-85~*^CFlovrl)E#-}8ZF@ei)<$qvY)-q#cypRN}BcMwrpoYPT_wK$E zyJh&jE1*vY{(?`x|!qqR?bZIO+UKr{sUqT6?1?W5_Qy^;V4L`*=|ebS+D za|#_s!Gi`#qqx*%{SBxmxqVib$&)9ygoEdq?&Q|3Th+XI^D1>?)v8rCq*1KJCL({V z)KQC14tro>*OYSlS?nTRi;lpb-gxT^|K|ck=Z~kJ>sMQ;4y`-WQyZ)tTLKM}HL4GZ zt@m`C1W14cf+uhfy7)!#-FWWdZM1N^=T_JN3Ft#Wf)o<0I8Gl)R6+tI5E21h0u^p2 z--m?BTQ?!3Qg{>SLv2}ups~w#Vv#|Co8H%fkI@+vJI42&civIoeDlqYmwP;}U%y_x z_~MH;WKfJBHf-4HJ)Dcz3(3xM;^i{EC4q1V9N*}_%%BJdwXMBe@%7sS^KEsGevtqP zcp-56(N9g~TmUZwcdIME-A3*9TGyxRAxtL2$%5W2Bw!N(Sp(_?7$?u!G^+H41ga*W zOQ6E)%=OH+9Ikq>biJYmgkA1ZTr9FFCQO*HKX%VP-z6jMs+E+KsOO%0PIc(eLA7n$ zR@JUuTVWFi_2rjest-Q+KyBHwMLW&e2trV41AL5aHpR&M7ar^+)altU8;O7UcTQtmJP3-)W+QyJIdg(MI(f%ArRdWCZVf~G9(>$2o}UKq|s)rakYieUij zC7VK*Xjx$I5A2+MEgH8Xn_>r~va+)6OF&h%4AhS6O?t$5-FxxH7wfW%Pw41Mld9NB zsW@bUh0c~8l7M7W{te$|OJnBtQZ}K!U%2Fa*Uj z(0RWhTH_{jS=<~B&|kTC@&v37?12FLVbdf)0(ub8Wwx``!3mph@>R$CnEO#w@}F5U zC~&j;f=VvoDZL16gq*wek`$<#{a-i%_qJNAe!UC(@6l1wLkl}0NOATa2X%W<>ALh>Y-3G1H90=yTQhd4Hh z|1UP4hT~`w-AH6kqKPadKmv9XV6-BH@wGWxA!!uNT{65E^{h{$z*As?(BJ1Cm>EI{IQa9k7&407fxz6Jx5IBKe+tVnv1>O%khz9{=yj`Q_4YVpv z)2fmC%YAEFRyY^HDlh2_3D`v7r*P&kFcZE9%a@6Pu-N`)f$zGP$06{qx{EYSp9B!^ zgXuk+&M>HyO9C1a&?Qye>if7k8VuXkuauDrjucB!3GFme?^izyh{d6OFw$Vv7Z^qo@=`QRyAFy#F~A zrEIzH)|t8A`Fsw$d+(Ir`Oe$ry?f@&+;Yn;8wjK0qkv5xi4a&)RaNEu>#{bIr`++? zW<_4^$lC669Rd&tPM}%IJgG5!5P(3$1omwEq8v&=Hbul( z>CVgj&3mQK9bWI3D}ez42tWV=z6l&AHSZU*1NUeW-X~{ybN9pT7U*J0kL`R^@%qHvowrs_ACDHXtPY{3r1l%QX zxVUzom@O;VCy3UQ+?@_QTaCbM`ChG%B8ipWHey?^v>oK@u41+X;Y6zM(p+jjCDuc1 zitkobga8C=AV8oZfmw1fP@(@JXaqk?km+t zZ2y(E*{T~3d~GW}JS?_{*kT9#!379FU%zD7V zlVYZpeT#bDY57_JQ_;qzYiH^CII&~I^l!Wi-|lq2?)Jx| z@;km8P!R$U@J*ogw{yH{b36L3Pelm$Ah4vmqAIh8D*50;6$n580!rXmF6-p5a0SG`K9Rj+2#Aq>_f)ry!@4$3$F=~bg=z_`v z#kz?-XQ|%}M7IVZ^&Acg1R!8H0aKpG5vZ{AbrdF(LQ@TQu+(qgvIhfhkU8TRPBJPv!AOL~n36NHi0P~^*#z!5l0;19e@1s-Z94<n#3&F*nt<*FwUwAY z+p%pavHrgACq{ulJOq}0-$Isj+P@Yc9y)RKYRU&@rxyy?^ad%&i|kQp6m%K(VbZpH zkU?!~bMnb2zbSg3w@Gt-TICQ2?~p?d@pjv7H?K{bHeRDfjl32uT6o)Sx1D$7kw}joK^SnI~oE@@|DG` z7>tG`y+8m0?h!agT)J7zrqAFDMeo646U8VHNQ=NC8Cdt|-d*gAv_u3LR~NwQfiA5A zw516IAYcXoQyw7N7N{*~_H24%RFGpUD~HXb$iyKPl1UL$_HvLZ3*TnfU3c}`w{Kr# zM~xdd_V(LvKToqPY%MZhu4hsVd%EWt^{)kxtcrX-cd@PWh;9g&PN3q$W~NI-pUDLB z-YsRjK0eK4iD-iW1R#(S0nK8+T1+2Y+EVnP$dO`i+aeg9&J%b?Jlt9APK!L^`q;73Z1MlnH0a2_C1`M@fw_2nMv_%4Y#bj)UaVgZ_6#W ztW$T(mMy)lw$hAIYfDEtq~dqhst+l=%RfdvEM1UE5t1X#Ay5;6R$ogd#k@^wy2UjJ z)JZ^AhAhojmt0wAC2kr(p!m>px2S(DfB{0000CDCTp^APvCKJL{5L@CY0GpY*L?!> z#l_>qbeDmpVm1{0UE0nDp=g8v1lA#7N}yuSI?5cnkcB{nzOlL_#GL7h7w3zv(ibO{ zmDNSJaw;mS|7E??PCHqhRUwmfQI^lA5dXvcPFwvNM?3`LCa~-SGAZI_>|*~#UhY;h zC|sObe1ZT3LJ-i5i5tbv3(-efP0yEs??`KK#)W_n0(VQDzG5?d)J>+!4!d=JqW-l2 zy0ycwWCVp7r93}4Omo^oz+nQWL@VYx%-E`pF~iJ*rdDWvV34AlarO$*(55z$TJf%I zrel3-h4|nLd();(y{)(2+E-meD|M7ZDt>FIw%BELzHYRfrk4L8_JJ4x0vROG`s>+} zTA}Ib6bSeuApi3&mH&QqsW1fs5P$##RuIr9lDaAVAPa$9FPi^up>8B+kHBl<@~&bN zEpUBb(Wu*YP#^#Sy9k&PoFJ9LuGdYiYA>zzPW?Bs5=CAsonrpzJ>&rUPF}yheU7Lf zl4&*M;k{Q;q5G)TUo^F%q@<+&iiXyZRG9OJ3>jjt17WyoL@HMl=Jz|y=(L4^^8}WB z)ZBR=aWFapMLF-b(q2z}6J3w=1OW&@z-j^wMEyU-j1N1!5A{Lu~`X4UnD(MmD1H(8vnhjCc_ zhIH)Mv0hbGl`p_M@4U0upn)!Qva?7g#f4eUq{!vHFu&7wPCBX~kPv|-AGJtmB!;n> zK;FyW-fFier`YTR+93b|2t-7nsq`az(|d4j16<~V%m01<~ zM%CJ~V8H@!!h{LF^sl$xdfqnMY~!o0p_RI}Z{PkHL$$>&t5fTanpI)?8_?J}FaQMN zAW$}C7UxsM0oJWPD~hXgSGwgA-ar5X5C}v-qx+hf{)<4Z({1#!41c89vUFuIIs`%! z_)&V$^B=wn-BWxWXeLGf_`0WO2tXi00-Yo5D{NPqEoMU5pwHpA#<~_j_t;9bl;;O0 zI$mvqT~gZ(AJ;YI!1+>CRHQSNx7pG6-h0o}y`cP*9e3Q(>eLEJg~@Je#o)o!@|@U@fc4b009WpNT9V`eL-x`8txqUJYCZ( z1RxM0fld+j6{f3dX*@kl2d1~pVxU6jBupknn6{?B9)8j%haOm;NilBRxPme%8Z|QI zK%AeZ!S^>ZUl(QekP6MD$mRd;8x|EI0D*!CEas4kf_P=7a?Tq~21RCO8%H4!l)$_* zu75HsYXS6h%~!;B3<@j7b{eG6y{dFWe+mR#B=D3t^%sL38)&dLq(J}zD+p|62#D`R zM};9;Vv&*6Kt-6Wialf|`ouCD26cMMDW{ARIm*e}qFEK6eDaAe{TigOJgZ`}_19nj zC|}MxSgFf53m=dT%KV*3;-LSy1c5jSluag+B2K=R_FYk&ufE(;?Z}1z1RxLvfsLh; zSH!lCqO*ki7%$^#U}9Fn<1!osk|uDi45&d06N~;1>faFx1OgIhwByLGSy>AZ5K*+v zwu`pAz`aci+|rVUSq)U^^jsWbj>5DJF>K;ZhscOlpHDG%>{zd&!e6j)haGnCY|g5X zxf=Y?Lk|@v{wgtHzy}}HRNhNXkb(vXxJY2}$1PlR3!j1$xU*%?XD0=3PD=e7oX&ipX!T7b^qE|7Buyw41BygaWPs8Jx`3V|{Xsc>Z~ zqwu4mxTu&t_o5(49}s|mjRZ78F-A-y7Pgi5MX-LTooSnJ^gQE1avj#osskuZ6}?iyM3bVlHMQy zfrtp0w5)k@L_QhYO*S8B{yZIFQY&_qZubAVD>uKB)QXSf;rD-R_D)kPKKke*UoL2B zh3@@iaoI`^sjxn^A}IgP(-z(gNQOXE1kRI==0?@o8vR^6;Ro5Tqm$RKZ=WO9 zxKZ#AzbUu{4Ilu4BnX%ksBr#ySJnd+`hpIVRdI>5&n59&k>(iH)zuft_*Q3Cy#N0D z-m+!Od|}i*pc*%B?5nP!l`hJ1Rz)u7u?>l#FHB_FPhi2vrtHsYKT9h;$mQ;8+v~ZB zR%%By1Rwx`-~_r!(-*|r2Ja%ZmXb@gJbQv5MQT&Pz=ja`L39i?M9b^SR689Du$d009UXNI(xLd`Ya#KyCF&dP6GgBgQ!t^^xL!RtVf6j(%d8 zv+YDrFT*q;2Lf3qpud-!83LhFbU455F45W*ueLCm6dH+lK9i!Vs!GoQwmy?$>C&Yu zX1tz%9mu2@oaIc4d@j!ew0d6S+=f6<0t-HE6|^bsej>2ZRz>Hovlif|yT*T$_wH`f z^T}F*z~y;;!$5%m1R&rd0e#Z{tXK;RoqJU@>t4n2>~~`!5F|rD2vmsu#Xu5=8mI|L z5XdrtJ;Zra9{f%gE&le1v3SOaR;-x3C*(`e)QWD>R`c|nE}Bk}%jN!Uvy-~#lizfT zl9Cc{#~pPKT5HSJ?c2BiwYBO)3h(mGg1e*(_9Y3)k>(Jni9p#Gv%Mb{Zcx)LuDMH~ zsybK1etPcCFP=dF0_zeuKrZQL#HQ=sGXBc*qVPbm1;#6lSm5cqJiLj;YXKsrL3cg~ zJSlbb@D`KGFHI7I76?GV)09e6N-e9!OpzCj4gwWFa}1MOQ7+w>5@Gw^QzblJL>Q@yg4jNUO-1q==W z2&_&(vnWg{rW{Q1{4U?^m&jTG0u?{M^ulCS>?Ga%>gTT9{O;tFPyQsI&p%|dcbZk9 z!3sa6xVYHcVTT?3R5h}W%+)q>NX21Bic3J$>_rXlmSLRV#4iDq3;}_42$X#>ldOt$ zs5pC}s;Vew+6Q`PVdEGCAdotN{-QuX?=(tXN#O$YFhPCqzohV{w17aS21!rj++h5fY~kf)i`f9lGX^1^uLLo#$CpRHS}dpGmQJ@nT<- zyUjM+Se;2BTXabx#SrITmo-ghw~-@tuTi!qsR<6(ekRaw=8MU!1^9VL7r&eLX=@kV z!l$(eNV@2~ZMr{g%1WNK)bS$(AOL|#3H)AqyH~71q`fB7nd;Uci}QO|Cxeqw94Da9 zHm)(uN0YXfF$@iIAdo15ePuM0-UqywsP`!imdOX0b+Fcb34@+bv8}Xq{u}_!q!2$J ztb?KPw<;@v%!O!$ZLc8ox=KI~dNQTxUXGudA|_59L@U;%t7!v4OSv?3o@rH8mA*5t z3JS@Dc6yq&-_(k&w%W?-)C%D}G}EaS)udLK@H>YFHwn!9ltU`qoY@s# zVSIxC1XdC_MZS&@v+;A>aM3+L%<8mv(NBRu(gZXxVbUs0UH#fJX$XvGBY~3E=T5bB zEr5Nv4=_mbc+q0@b6CGDZG1E4*SDbc!lYJgBZD3pbYNTB4H`5^kC=bJ7QHp;tgNi` z#*f$E^ZJV(QnBThTh?FE+#1`qYu8Rs)5~O;wYcFOq*i2d>Tx;&0&TvW<^8yDg9Jum zh!hA^dBxS2q%bZ+LI45~NRq&LGMGN>xAAlF6{35f7(ohwK!HF20-AwjO3}YTyO<&d z9T3PUf$hcbO$_oq6Z<_WZyVo)CGk2L8e$O5FmRr$9C}bLep;PI{PD*hdy5un##H^K zLx&FaS2DPU94ex(OW9Z4-16dpmv4%DEr1?ak*m(>2RRA^AP_NuTy@@?JE1h)I1T<)X00i72aFMuT>w84i{#SRV0#6{23V}CdRFe+i(ubrJ2tXh` z0w!(sq!b8Lr1x4dVr0#v_+LOJTS>d$1Z-Y}2mzq{p9roGu@ z+_P85y%r$TfN|b>0{ZuEh=pFru6*7(Vi#Me9mxxu;qpw19GMhx@ulBv6IhW+(I(pvT=PEXJ=nVYvmd$U7XCm00uT@a`d;`tF&i@w zsztYM?{uvg1p*L=hJY?o4N;n1-s81c)&l4PSYt|gUN>l?8UfP+6|ZDz z0ti&pJ=dax`i-?z;W7ju z5Rkx%Op3N%o1K0L*aOWBAn-3TC=8edBtQTHQ4(k}QMBw*Tt^BLlYQ>&-zn-?-F>LcWNPfL}c z=Tmf&w#U{#gxxg;3>fgGc%UnjoG2~_DOOI994bAspA(Z6z=^7=rkYjpO@O8}fdB;R zA<*VC&Znpck;%7ZM?CKmlLev;0uX>efdtl*`j3d&7^Ijdx(^T|NFfj?5Qv$;2hzW0 zESXT&H$em%Adn#fjm6hL80N2Lb{Vy>_|+}0-!!SU04WD5%JYK_do|Q0M^h_;mJ6k! zF6VK&6crWe6OKx!opi`KiO38UcXAH7DVZ(vk}Kmi1Dl9D!O0w1yp zKxR(WT<+o4y`I)j>l6q;00J%$Xev%TA!g&gr?W-(FU0;UMu7kX;wPYy3RB(#aX3Z% z^UVklSe?L8@Ekyh$uASn(4JRyu`1 ztprZ3^-elBs%2PRXpyN>k|}S)IeV!z4|+(&`O@}IF;nsg!;z?^b!BDc5ILygw{qM0 ztdUv}San%4DKwKQbJ1Cyq)vlJFPVJy@mJ;Zx$CT&G{i!{0s_l3DL!lKmF>tO6&6g_ z3W<5|kygE)dEXkn2!#Lyj3&_P#@Ve5SqmUKmcQ5SWi+sw#eFN6`-**9^KGt1K%k}c zk;KYbJ*aZJ7&{I`Flh;OF)5*O>KlfPiuwp~9|F-4=q6qDjIO&#dU{uS`7V;4Eb4@e z3jdR(D{)@)Uww_5o5)WmtZ~J`9|jE?G({eMlT~5Q-0m`B#E8shRd_AEkz`fa^I}D* zT?9%$o8!$|(#9^)*;Sjb_Np(-uKRcn0SGumK>y=;NzBH;#AMOEml%Nwfe=I4L;8GA zY>t>7tQfa_Eo00SyH-pONuiieV6q_+lL(#};wmx%5a_tq>BWYu1qh(t>?VT@l=rxS znqrqkprSCunpF|BTp$hgbQY&eb@lS?B6QUV#SQ}HcF2rgrDjzWdAX~j@0lJU0D+?F zYH!wOECRFPm5}rDkGARctUkPqG;A2g+4r|L{C6h$m(h2 zJ4fGjojtaYyJv~rQ)ex1nnGZTAwrygW@HFwWI$kJL7+Rrm{tf*m^LE@>d~!nf@V@| zC~Z$P(5FZxX(okqs9Rf6>>#j*scppn28mZ7_ z_>_1ETrAzDnc8r;jBjJAPdu|>Zm*LKk=Do%9ms%y>jcgf2Tj|%ULUY#a!+1TDGL8} z3-clV|71AlL*qq7MVE-5q*E0B+DfH`?9(+Qm4OWyRMRQ)Uj7;bq#ywT&JoabiW#I+ z7(K5!&wH#*&nI=$Q;M4ea?J<5mG)WyH_76asRTNSurXp=m@2RiU1Q~5Z!tZ@fD#{p zUE=H7ch9@}u1UqT2+T-p;I+n0GpM$f6n=z&vjj91^CSbk`?rCb5|KopqHxS%QY$u) z;RX~wq+Klr3>Yvqm&@H@m+DNb-BnU6_RT7RqE_C`;(Idb6j_Cg-Ev-1xWGvR@#ZR~>TB~THRT=v>6GQi~$12iB30xlD{P@HRSpo5vspwPJ>P!aqU z877nBZ!)a&DW4LYxJ>+XGLxcZ%a-0Ad+gyIe)!?u5l0;1?Y;NjUTJA*@K=)=Er*&R zBY71Yj2yYh%jd36eq=_3fcpfNXHrZglR{?t(C)eMQLhCE{Q}B+xhLYxrYO%34y}<6 zAOHaf)J34X+ z{=%eIoF_v&|0bofvQim2vr?#a>(<^r`|RTt7ZN7^HuK>z{}h@U`j8A2C| zwTyqL$Va$cdOTLFGV-43&<_C<_SdrUb?b+%YjsT6`oe@@nrrilKM+s?yNexeC<YL^N2JAw!Dh35?YfZD$PCM;eIZ@l0tcvZo-@e8m#i|L> zV8t%G?6T@d8^7*8eE9IaZ4@4b=EfzJH%UKaRYU=kJ|M6bfi|Dc_U0_*kczdiB>rQ* zx+t@I{Ur{Y0U-c^s0i#Q9X%;#!b8MDeY!y!{86l`(1z5HjKJ*3dJ5iAtKdy(nIHiZ z{>BifNN~a#Dl-K1+4r@EdOJ&WY1U9qg`xNVRG z&8qnEiwz7?ltC%^{L^K-JodXCEwfPlRO4iV{(i8Zm%g)2n!pTzPOYIjMp34w~% zE&+Aa7v1Wqp#i?lGe83pAmAE-f#Oh4L!Huv-A+6^4a1i}MHqT3+CC>=f@V_aa?;bo zOE9~K0RsknCki#q+4>?u7hgW~=+VPhJrgU*>F0e$jvTp{iQ1CT)U>!lSG)a~#8`|0 zfvgc|`|&Kvq~P;;TJxSOF4Bbq6bL{70`Ux$ zuO=$bpe7Td6&q%dyKOF;kW!({X4nP}y$~>*fG(E{v*J8X_o}7yo8uh7mcEHrL^D%i zUg(-y;d~CLEY-hE{Ioti_`?tWwgb}ii;XwlIGR^i3cX0g;-VCWHDbu+sTH0_YK0NA zfe6zy<=p6_F2%nHrs_jiIJZ=;u2E6SWaa z{<;al3C+#t*-P?sU&x52Jd@G=Unme*o4}l|pZp=7wE%061s$leDE5}bh|Ehx&aP0 z5Dje&(1C>733RFbR-$(ZR3wUgvCXpNm3pO15FN;z&pI7+ z_~znejWsyIUZ4)oF|MUYpzS9!y}8SrkI$zEc&(9hUjCA`F7aaq2#gxO`Q?VM1<1fG z;9>{@x@D4PwfZ|5F2q0)H?5TZjuX2x;(qBa8UkNO(@p4JHV@qc9mGk%lt9IlIGv<# z2xNqSzK^&~Ot;-LwdetYx?Lni2oR`<_GJ(zt3m@6rP0bUueYkIDm_5X>a2?C)2DmW zrcLt&w{6?DUZ+l-eAP9vlAI@Ufr+}3(6l0xB6ocfV=)EVm$(+3JRm8Bx~*JAhBBsybSzcIvLf)j>JL@hE32~K=E zA}5O%J9+*3_Bo=K?jpH7HIiN;@1#6GIPyN}5CZWM&=k!>;vFJX?-QlAiM0T0<1Q&FaU!+i>#x6F+qD{hZqufX*ST}&8t>U~Q4Sc%Y-+`(*WWWw zPM+nE3L9RiXta*Niqwh@*6FrP@QbZ_JUPZP&B%oS1hPXwQz-Pr>GLggd7fzQC#DP4 zC}tB_BJxaFp1zaWm@=)dxp;3v+KWE8^!Jb<)6<3X5U7wG4~Nhh83z4&SUsd9xLQXvO(ME`0_rpNt9j2N+dGDDd+ zO7(gT{^8|vv&~b6A_&+|pv^~fB$J|*{f^Y_!Obr{J1UK}0Bfi!$`uV=;{rcG00Iy& zhk(9GzERA^&q4Yg@fTu2KmSQnO2Py_O?bSThU+XBoqq#Vlj9a#+f6i<7@+ej(V%>?TSQ4v|um-74b|`n5>G^MUoyx?{tyvCp5ijfz3|- z_~Vb>)TvW_xlmeK>UHVT#aCTJE1hqsuGnRDxqRf@eBQf0_JJ4x0vRMwR8{5u@Wn0|T{Ax6uzWd>^s5)0M&KD+nBYPMZS?UJGDC zph6#_kyT;l%Xm*iWPNOi4rD;UMgo5m(fth<{-x-<-EfVm$Rkh@5B3+N+c5#V*7T%5 z2W)FivjGDJ%#bq=Mpz@b7NLcnPhoLZML)@^=vfN}_b)dp8Aeux`_qhv=@4l1@oaB? zMay&scX(9!|7M88?%^5)AYd;6U8#JR*a;T8@U3X>E%v^J+Osa%gy4kEfhiBcv;KNm z+yA}RzL3(-1Se7g&#=}LI9NOgvtT?a0DqN%mL)Y<=y5jL=UzFVpp)0HZ=WMVcjmu? zBL7w6uUVcS9RDB*j}YcJMVA<@pRnGHkyt+rssE6D&kbXn1{kw+iToMANPoB+_6gHjWJe!^Oqg~S}q~35pbxp4G z?z`{$me-?44^Q^H@?GDgijw+rP^Qx<8aB9A=3|aYbApya2|V{vx9%yd1t?Tz#_PBJ zP*W>fWSl~D?pJ&H3#03eo*)1L2t-0a9|r0N^ur_RE!j>c%5eIC`O9R7WwZzhjF-MD zBkV6|SG!6>4cJl4A)vq8jSP`9)({=YfPgIoHWH!Fh?$q)C%}@ow3&rMKy(oBFD2Xr;_P@X|~Q zFSq=JM?EANn`)bI^sYf*k^E#rR)zj9?7fCEen^|ZZ_*a9a{QU{RfD1w2tdFe0S!(( zA!fq|$_GW~oBrI&W-S60@y`2;(*2l#L+DDQa|5=urrDrDgJ#M$LN{9@xE7&$NQGn^ z)N*&t%iX$lvo@CX(wAc>5YNuOiu?+uz=}ItvbOIIe z&(Y8jL!2yaHGRqHQdwDfl{jd1R>iDYv%Igq`pVZlX;#JNn{V!`u8Eao>#7S()Rly$ zPQylPkm9B!#$pTzWQ{;cRh2jWOB)u!7(Fv@l=Xb-C8HH04gwI!5`k9Y?6YDvt{T_X zj{Az~R<#s22$&M6urZs+jaR_BUhFTITCaOKl`E{UP6a!IT1Y?xj1P(F_IWmxOGKo< zhlMtH6O%@Q6ET5Zt=q7f6n|N*FDJj%ObW?BxY2g!^n8kpWKtX&O(w;!_rKzOyVn9V z$Q9{X6=4Hw<@v$3zxvS+ftU!CN@~Rd4yg!CH($h`m%#972muI0O`uFVd_l~XrwOb3 z|BC#upBSmMt5dQ6`_~L{p{wY~Y?`_ubFfOrQLDZ&;HxRGPXkaO0RpuWXe4+3Ep}k7 zHO#v4k;u8)EJ3y|>%bkF^E;*zF#b+gblJ&jWzfGn_3JCf@r@TuJXuI?&{-TiH1 zBQa2I1T>xEm$ltY`1(Q_kF<(}p);Iy1mwN0_n4S1scF@s^<=SSVw8Fa5U8jJP(-&i zwc^!)eQ7>ZCQ>WJ&l>_VGNzdxQXxSXUwSM*q~g$#BS&`k<*0*|8dOwSA6Rp6T5!n` z0;TVhNfBr&^VKA11p-GS2tXiW0vk$y`nhNOi2F^VyZ^`_zY<%V#9)k(IDx#3s6jIm zik{Zozyu)~)}Rj!n-%E)Z|)c%aivaPzrKBrD6pyT>iYL{sqflUgh2KP=&6aXAjVc-iBN%j+1o>ehv6Z;)W)`Pc7dp-MnvO_W& z1Rwx`bqMGmZ+*yS;~U)jME8+mJRlSZ4i{4b73&LQ4T@3HA<#&M{Zl$a*ElNg;cCRf zMF=b>pczk^;n>F_9^Z-9zgeU=b*Tg=lA8j3BcyNEgA`q1@z)>?eYIJ#%O9VMdo6%3 zAwO3-^UO0fNO9xOSM2=mz4zWLD668Rq{L1~<;}HZRqSA^_^`SgR95T$f>v)=7M20+ zArKdV((#&A(LAp1UF^5qMW^ry0uZo@fbO+FMr>27RId;_O1>N`R%xMjBo{`&q%9Ip zElfJKBP5{PZEX-?e_^}Qhp-d~Kww=0UF4ExBJ95IEn}}}_TC9%RxiR%nl6EgWQh+A zG2HReR(~;_E{hj0zQ%=2iqAj)+}AucZQ9i8ObXfB>LOo`I#@|FDdc~h;SRcnOAtt% zz=}+Yj;ZU2LcpYtmyS$uEx=o)J)eFy3f=MaQO>E}@qndQ2pC5|(mW_YOb)}T1 zuvdvu+$S(qTpVwRo4rLxW>`+|Na`_=~bxpqzfB*!15YRWWuZVT$FPP_YNa8e8ViV$rL?&n1@WURUmCn|&l3R(10F_3fko zNhMtfR3uG&#hdw80}iXdw9c=z7%*VKT=CQTtctN?$ND~`qCo>ai^SS;`0(Mwx3^Y( zNZ}Pl4RtjhSrs8U(i{Rc5h(p&9$6JNfmwAepO-!O@~i}2#e=U9fIyN2bQ^{j#mbT# zG=j18Z|1pT`XGdoO#<(W+mj4&T|eJnVu+S3$k1mgCS`x=rqTL(q(A@yD-Mj&1B5OV zv+-H=d7}FxCNdZS8z(%Nr)ISZlS!ecx@}>WJJG8tDk>T#y;`41F=4`ldf`-6ReAH~ z&8xSb!4+~cU)j#2$a^CVo*$&GAW;4Fm|Ig^3&1plC9piJVsfXj-8k7^vng8ldgf^- zox>>zK)`eYdx^m3#9EmybS-@vq!=W2^;*~YF?$5=HOOUMu-eNYJ+4aWB#xPM9))tg zOaGxj00LfLVK7dtx1}r|65Tghs@(|L1S-;*hL>g7FqstRrz1PLQCDVC{3E#$;~7cM zr>Lx~th4hkzWAcvI(2qnz)jim>4;lyxn(;8q@*OFSkI?el+xe~3jwDIl)gXLTUe>t z`II#YR1YSDVof6a1OW&nLO>rNXkc`;y-Kh8?7&}HwH81R$UZ@AxL7=;JTHHvDK-%> z=_zqez+ETC!BL7woPq)z?2#vkGo#P=kdF7kXpHjFBBVYW_Pl zXU-h&%{SjPNO52(IY|$3K8jWwozBkxoIZ~~f6@$IBe!8>rUWiIE9 zZQb+PryO(*mmmNEvk4p|@*Wq{hY2>73K9ETu{&%Mjn-HR=w3S}eF)P%)CR=rake}E zqqzS|!w~4+HeVa22|0!l*k#^&r(3laAUMB0q^a(Ic7~X(&#Cp<N*!ltp z;j%GJUlt{=1XQD&Sn7stPM4Lb6&kFtx_tim=iU=fJmLND!w*0CQBhIhee%gC-Xo7Z z;w@RSg0tpcK zwT$twm`SUU{kXkUD(Rj&5BRA}olFrhZ3X+^g?@)j%|J5F>fYJIk{PO?QEo4&HkBdZ z4gpQe)Zau@A;b&(*fN+Zcsz3(-j7e*?OxDSidPFvA)W~btu%V~f71h-`h0c}xPdVk3qwDr+_pi6<=>A%Oug*Gt zm~0q-X5B2f3jqiOA@D>NQ~QFfuPyDnlCQFa<*UoT7Fy<_a(Z`ZA*;vBz~sb?Gk-P-P0vCG9a zFiu_Jg3gsD9ClSWw;Ed%)p(VQNfCHW26`pnU^;i_2W)Fiv(ryMeV&ND)*8X}2wl2# zsW*TAd{=`M>V5waBSvgn4+dv%7d7y%5a$*<>mZInAVmUtNX4Q`+jba<9$3EGD>1n&R1SwQ61p;9S$RaCGpM~Bj=0cF-R`G)%g+Pd^5U7ZX zWbl5%Y}?ddhTT5+&~~(x%!7Z3(jVrMOWGGCpN|=(P&MDfPT(2 zEn`6!KJAqX4Yvk8N!&RrN|*LNywk%GuLaOdFMY^qbt%sewpu^plO@neh8rpNKe64! zTqyeCY@iE{nB)V|iu7kg!-=m1l#!EmVWJ%YhfTKGz<~o7$UbG)B|9pkg&=U098%%@ zDetxMDatp{B|D^6)CP#_5C}n__51U^#Z}o&t;l;tLrAFz@dBbL1Rzid0e#9hLd>?$ zi@L(8kJwvc6bRHuV4_rbzP?Hc-)|t}-7nT5;c;CWPSdD{8|u(#(KXXhZODQ^R0KLo zN4JaV-&kGwVB4w(kz8ZZ{ris+tE}-Z7XuTp?agLjM-6WnCctpG__jkJ|Gv8Q%XSg3 z1qh_0z(x|NxL%r&T2Uakl&TlYe4U@t*r^ZOaoEW3JTIq95GfFVK*R(ZEUECmnUdAi zirhzKJ$pSCF;cpN00baV69G+;)%{m>QIt*PTM@gr*m#>nqcwE`Chpszd!jrg)-rWE z_6gWYBtIx-;y!iDM3<(`P$1wcfo;UGTg5&XJ3-9E-$RiVsC*=?eksPH)Ij*^ZbYD> zZazuf)l7<)0}iGi8U_b!YfUrFq!6*r98zHo_<%wsIC1pw;lsBG*xdSNMP7;Q4bFdB z)=wl37)d}gDV9_5b!B3y{xga{b`y)yLy|2tWV=F%TFkeO?wrzj1dt zMF!A4b3ToGAo_QX!1Lmfi5WP$&$H||9)?bI}fyNLp zkAOa()Pr@N7W+`_B(a8KE){)`ae&xtmz*-m7XlUL%tn~3ieHHvJ9wkkiHoZZ5 z{vn>;=`~^k#kpKA2E_mgD6XmN0CcaEd0OKY~VAucb-aM|g0P4uaiF)IIuzjTJBVq(A>Swm2R2d_- zVf|H8sZk-L>cTb(1Y99dCXSpT_JP=QV)`IogBGq84N~+Go93EhCiz34!kpQ7U8ID` zs<^-$SK?7}?6Jo#l$H8qRm6igj$V(+c2-5hO9d*afN{LC=ri*KT8+0Ot0JIVljZnSh=zp?{NfOErC1f1TL2Vy>1+;#hAn{hE=o&Elt9$GH}u zevAeOD$4a!c_vGkWlV<(v7W9|Yu2pUjp8Gz6?ROtrdH_Y#+fY352?uKMrYD_oQ6Qm z1R5@tLn@|riFp)by8pPe*RHyZk^%t;Kp+kR`l0X^vD4z{Jmx;fN&mgYzKeMry3YoI z|1;Rl{YBeLVr>o9n3gnsa;nR?Tcsty$GCH)&JZ8Pa|8KWmgA@aeNLn&h9OV<#{G68+~ zaHp7VByC%HO9byDHp4dI=rxwW^P=n-W7Vx#l&G)!?F`nKq_mt2_7}0o#hN5JaDcJ& zz#slr1pr5r)ClPB)PZ8UkUT3aB5&ip#%?a)YEg?`N0o?VBJ zT1!S`bK$fCm0{6gyAs1+iITPm29f zENhuuABxYt#l8`vBt(EfMM9tp7)G-yG(s7)lm~5SOFPY~kZpv>s<36kJnz_%BS&s# zkKAaL=8JQ>YGzrqz0w;5AfNOKcE|M#BhPTUNGy zGHU_W6VLY$)8MRbF*{vMw^O`AY+o^b&ydBUL7F|qzRjZZY5Gi{B2D_Wj~!+Ur~Rc{ z{bh2xELybaCUMZ&KNy`mclP@9>Ej)A&_Ui_d+p_IxZ#F2&u6h@75vp^2f}Dyo>k#_ zWL1QLNLvWhLZHFoWpX}+t63HKPfL63st*z=5P$##q9o8*I(t;imWO<+`>v~|e<`+j z^^1HDfrttGpY%5);y&Z(?kMS011pEb(Yb+r>ff$Q#oiLrY!VyF!y@tt8-$|KX#yIE z=q`4mm}d06DW+$o{8#Ksv0sUqd;-15OQh)bm%0!_*-#QDkdudo2@lC|J_)=i_4e^u zckOB#?fb>r7hU^VR_32AuitB2@~dIPhTi`B@9%Z!&|w|7KKtx5@0C|x@httFtq|Q? zo_gx3d)?aqomFcAzFNo03|#o?%s*_B%N6V1h7D_Uw>&?%#_L>!00jIIsA$~KYktW0 zUZY~SR;=an-qEdlJ$=7F3e0=gXgF|j_i+&B5M zE+hZFSf$A#(dG;ReXZ$}D_hDxMd?$rhaWL=EA-)FN+_& z#nLXVMZlc2MHoDObJ3>TS!}xU>$dWBhn3%#$X8131cu73axtEm*Ul)`J4LbfPUm3= zgh{99D+BKxW^fzYHfq#J52{u~^6iS1V1cAF{ke8AeD55cbEB$;XHcYHdM17jjjlS7? zNbD)GE+z;nK!a{8@J#{rsS1Jm2pKdP##y!-CE&s(ryL7nvsy;)IGQnJNKC!I9OP<^q=syCe?*3s!4 z0&Wvn+N7b^>X7MP!=j~5IGXdkV_NroT2~BHAYd8+{V+@xg=w==zn)U5qJaq;SG?#x zucwRg&_Ezi5+$Ikk;jQOO?1@ogXsfG-FiT?4s6Xf+fuaaBDbT(Qh%`e>u;yM5n?TX z9;l-alU?~6rq7vri=|y$$*5=~AT|4;g;=YV-|Nr%|F#Ai)pS3mRp0B^4q}=WNr{fY zH`3LSV!EdsC1nDs|Its`f76H8tcsw43SA-9TkI7vXG`P8jYlk5vgFU=p#HJ8y7)PO zqPV!&+j-}my)k3P*ywA6%F4>WiSPk7iVv$?H#%EjzK)^8qZN;DG#dfgJFWn$ovvNmRs2$1m6Idiw zUKe{rtg8O%iPxAYBc3B>+v`sscn%TM&t1BjdbrsC#B3<~cj~WV{l$uG5L|=CnR4v} zF;@Z;HMp4CrC~C%u6j46Xs~4MqJhnZVtOD%V=?_dOoIxt4rzHHV%2^5k&+?-4Y>VA z?1vOJF!X8!$f8&chN0hv$)wOXdwbOE*Z+Q1Ej_!oFMu3aafTd%a9zzl+jDiNop$ni z_3Blx;Ogq?d$4?^vw$e0uYFhfGb%Qk|p@N);*s3 zXM||z3Ib6Q&|v$mQFa)xFV3a#2ToUB>W6XNo0n2EQO^C?Ekpwki`IN||k%$VV;$D*PluUofn zzUmrSsex=+r7L4HSvp^R+Z6ePb5ACn$7u+}Okhwd(!rGE^)7C^Eo7B1G+ zkIt6z{9tE&!!Zb$O5l1Cwymk+&;@~&1a_0J`t-=eqEEI?6#L#paRq47D1fF{d?ThQ z78i)6nQFCxjQ?9PjRdJCU3ut2q(b+r`L~j0PvYKwu35EygeOmR0K>uXdJPE6K-CU%3^$70`#>1wV+#OiwuNEIu}JEHO&E0iJ<0@)+*s<^zp z*u&X#-42flRM;_3VNxq}Rf@jRaJnp8w(J&hP*-4BTXZj{apT7M(r%G7 zD{XS^?O)}+yuKNyKmY;}6Ih;F@lDr=drhW0NxAx>^};3hCo>wOKmY<6AkbcXbmgI6 z2Bs_XE;bedx|f@&MfbeXJwjY78;V1}61zm~S+QASUx~dWc86HG*a>3$i)miJKHTdf zrdbdg$XI0?$@g1}?J1@KQca^DE%v6E9@d~)DEd(FG%>B`T2XHO#g@9}82&)Oasv80 zd9K(#VpA;lC1&{-RKF^J{nK3wu);+Ss)%{2BkcZp>8o$R{@#^#x>`chRh=!eN?+gj z=*kRhiv}Q$KKf{{dGqG=`XCScy$2q6z?(B?PQ7&vtWY8QKy5Z?(4b8IXYs|Kj_X=n zTr@%ErNqFwK@tS4Ca|-p4OR>Mbp76_aqPTp#OqxwKo z9}H1yC$O2^`n>kr+<|~`1UiT!U0rW#ku?S0Lt=-UDiB?%5U7w*`-z3w1tS&V1q`Zz zy50rUrz!+YAn?A3I9BXK6U3mQZURMh-{!7C1O|uD(p}mf5Td;mO)DxY^uzKuR*0=d zWFeUpvc;9R^Ul_8+1a4DxcIzUFgSmC)9df}Qk?v!^Nt$e;B#9vIN6M~00zLwpoB(? zmU>gCb+O8GSw-+=*`mewS|u8>5P$##AOL}E5zs|{$BXGUNfZdIA}~n4aXCn_%1^#R zz!n0!HQ(i8y~GGoSTbt_DlC~Ok@<$$3nAp{ev`WL!`X8B>8CH1hxdQC-N6YHCV2Db z&-dlVW}9v1wQj9hwpNytZo28Fj#es z6w$s0=nVo8h@HUiuD$7M)&ls$T%HfJlO|9tGO{dn%(y*kYL^le|WLc3~k-IOG&f_!$VkTfyI)$vulr)MZq*=xM znxK0KKmY;|fIvh9rb$2gxq4ni{n3q`1RfOGnf)wl=agBZ`l@N)Y|mPNFq5Twg8or# z53vuzbVS>92oR`9hkMXbH6r+Y(8khkh?vu<6-OR<GI4pk4y9=F7WNY)dg+ zw5&f1(&WNgnN?d)#U3|qoG($998z)GN$<3% zZ!JJ0IbbBS2Uc{w_O>r1ba=n7Nv9G7AW#s2#tW8uQ@-7_pf)K~o>JPp1=$oSfM-Yu zKmY;|fIvtBFG>4fi|Il+N_Gh7;?cv!nD^|=Ii8wH;4P80m)LP)KZ;Q-CP1LVzR7!2 zB!|hQ&<*jOp7#L*22_ZTdQy(n<@3)!&qOAL1X>1+7%`%Q)sq!Me7-7ACPfH-G=)H2 z1e%RoYI-Kc#opEr^5M3wN#vdn0*Bvo+1B-~1@O@gRUiNX2tXhVfv2T$e=+?mN^zfn zJ{Z+)zZSahDjq_>SOTAkG7T2)Cia@KDiKx?0jD2W7evCY%3-o9wi8!>WtY2!tCg&Z zQIeG~sqmKOw9u>yzvok^>h9gUnO1=gwZccI|xCHLy~qQ!A?T`Er?)Dg);QNf5A}K+~~Hy-FhwsmNbKN`>_= zFC2gX1Rwwb2q*!4PQQ7 z^aEej8B@4ALnTu=6a z`m>%)+$UIl9qnPev&+ni>aJ3MquyI)APSF zlj5=vjErdd<(FT2GiT29rKMxXj@D*UNFeEy8*jXELtlwmb_4!VX*5J;Us zlktm_$)qTmH1Ga3)&kV;y6pLODcKbDbD{J~al}w5| zMWxkm4l{H}#d;+rC7I2n=s4`wPvy1xkomt=D1?B01e(lWYS1ASxv8z2H@m|=iYSHv z1Rwwb2&6}#;XNZSFK{hDdSZVb`4joJm)Mw}FY%pa1awQ@GsN_BDvQc2n>^%NOW=J` z{(G^_#fFP55u;>;0D%g-Cpv6a#Sn47#BKz2t6!N_acSMP%)eVmR>dZpY~rVEr2zF zit>EGBN-B&-L!@)4>7QYD31!!-ioGWWo4RLG0_UKb%=cT-FNG_yXGyM52=vUinG`B zCObcEa{VY>diAgDc#NkINRGhr)Qag{lN+$+k-lzSn!BUsTU>UrW~)h;1VFTd~*0C>bVTO@t!Db5x(}nojX- zeU<9Cuc05O(9>xVPc)dbo-O< z#V8PP`xNC2&2M=KUpJGsdK#3|rA?bQcS*3~bK9L9J9eyZ7rJ-X1t3Jg4mddYNxAyb_1qA{SfPl>eF1o$pcdP}lc?Qv*Hh~XC zKo7Co#PVs2Vf-}+%$A>y6dNE$=F1vr_yGc;3FzOuH^c^sZ7B9jvHyta0WcH@{6v6^ zil0ze{momVK1^1{MbbXKWu*zA*Q^Q|=CTBaigk#|lPCMms_4+6gSX*^`uE?;a_Wc? zBidQ1JOa^Gx$1K1hOCMRSkeUq)+W$od^(vFxtw=785C>J20udp0uX=z1TsTlfq42y zu>-}XWyWi>9eq{g>4x}hw`=x1Wk}sSqZU5o+*$x#FswVd=pt#&RJvJA4}qaTphf}& zDr&@H<;C(4s&%98V?(sJqN!w6+#^CivqEegA`3a6qG!*Zb@XKJE!h&Ksi-^0T!rx} z+w|HyKF+OJT@dd;^bUax5@<4au{ZVm#Iq{CTUgnAbOv$bG6Wz10SG_<0&5a@Mt*y`-FgoxNiY=oFTVWmI-0>KC@k!DYcoh-JIm@bvQ zRE)oA0)bK(0iqR!@iM)3c?f|WrS0!Sw6~&ZX=&*{MX2@Z6z{yFQEGpsV@I7@`--Mh zh@EYp@@N!y{l{$|<@4U7(ey?y5P-ny1T>vuN^0^3xlkH9pIRYH^e+ht$dq>9eDjSr zefo4?avC*iEm8z?&PrKoU8`94UNeNC-EGQ=%g|F z+$Y^mh@tC5yY!n?F>l_yM0G_um}XU!l$7Xyewi$thK(LCj{AQ@ok>bK4S`?;n#^64 zL{`Q0#Z}F23#O1(5P$##AOHafWQc&CD*IU%~PwZR904cUw!peT~&;~C&7x-Go4jYR5U~;DQNJ~ z`1wU31kxlBdsan0e>vF{Y0fZX$3TGpk79rtQLEO(+;Q&v`Xzx4WP^gSorQk3Ire!F@b0^DbD)p&o9)v7GV0)l7{-`pHeG<^8Dah z?{ULq0wz55(I-)JO@?HMHeI4k#KMrtKt{a;bT!$mdTV8?!envxG_lTNmx=u#MyZ{^ z9JzJ5nDQDV#%B2fA);c!|NRlUlDTele>r^+_AfDA1=(M08?k0$JBbYtyIo8VozTC- z6bPh3ASWC3r!p{u#!lcV8KGamQ6%?bd2XcB%Qi-i9C@_7{_hRQ$e3m=TekG}+;dNF zqm48rZ$+6uf4=wDTW@(^e);99OSXSqP*haZdBA`HGi-Mxi2f<(oV1f1)bXCQ%>`*s z8wfz4b^?o9Hu2i^o9q?mB7Q%f&z)7;>nYuAngRir2s9UGbeV6f6rIu!*hi)~zD+~F zBx3v6Bp9ukCGdoJen?H;7ajSY{LXjs`t|K|L{0ZIdQG=JP#*dae%FjLsLAb#a&frW zZDM*T1tod{M@yI1KSPNgi!nXwGrUP+lh^vHzZ<&mxu#H1AYe2B0u@G2PbhKSr1d*u zK_6`XN!s2JN`c{xhYT4~wEp_*Yo6P7hD-G&uW8e!UW*njyroN*db4NG_FM}>%HB_x zoP6@h7y5$XV5LcCpYXV>g*@0n*Ki2}sS}vBPYbW}`kzIpBlrE%;*u^KbbD-3gq_kA z1WX}txCqho)RGhgO_4G86#F`bv27f33laUQ*!nh#M{{Nf=)NNQd#w-GDEEaWfr=ngX-}K5SrxjWqtg#rB&*^md7!T!wkhTlm?!%{ zbw2UL6K9z(HF||p&N^Y|YR`LDde%3J6bL{dLIMj*n|kf{`^qcI$!}RS<($%u+WSYmxGd3;@`_+`oLU+6q)=?D1N6^^tJX|Fn!7)CQ3WT+a zZml$bwQHKau=-})n(d&$2femnrOlDA+D3oYuWEWtgA)2Tk^+HD5Fj|=_B3}B_udf; z`X9$x(pEnNI$K=Jq;PhkRyrnuq@kytdg@@;{jANuNoO7Zg#0=mRNH;7uOx8ZjgPc7 zZY=<_V-tZ{eI%=*eI!{GGpZIX>D;#W%ld(X0s#m>00Izz00fdCpbwIJiv3bdv#_@j z3;Ikzni^4b(c~*)kBez=g0ndU0tEsPfPiZR2voQ>qwD$;CX+(b#=43*ok=lb#EAal zqprrFm``Ay1S>j8up*NyD8D@8xNdSjN0{KF`7?|{2&6Cp>KozT;KUlDso zY>b#LL#IFh0uX?J!vr+kPH~&Sh2mb&%^KTF+f&`f<9Vl?a>_&EqrM5Em`|XE98z(H z`7+~C*!3TGyeGfR`bM1s0SH7)py{lI-qas7Kp(2is9LymWT+l!3;_s000Izz00hDl z(9P?0MV@}1-%6}h>=$DHC#Go$Ux;ZiAk8va2A8yPZ;04_Vy(q?7CT$)IWYng0)dhR z0+$`wav#%iRn*VOg&I$a69)!#WQMfsBBq<cAq3VUu%LBQZ-f1VA5tMp6VGei^XaSB>XsiN009U<00IzzfQbb3 zaG}k_wp{sjBl)`VO50Gr%9g63K67ZGK@T38yqZmt?C6+K z_Hy|H43dNt2slGv_P))$PVFWJ@n>drMZ?Z*c7N3AzOZw&X4SHWH@V|Io}4-{Cuw7p7Y+DdyhJ6)mni3wPaJ&i;M~ofB*y_009U<00Izz00bZafprKFs8|Py z^B2NoQnVC0XSh!2)KgD=RD68Lbw3^Pue7A3#QB3$YMZOB{}}a#m&?6a+kLJ>00O}Y zw3xBLn=-TBLn>zFD;nJpd=y$j00Izz00baVD}gKiK4Pa;YXNGl#0>~Q00Izz00dki zK%l~vne^{R*sO{(WvEjB!&zBVvnnpJQh5eN>-iL;MvZEoLD!eNTwRsFD678XHw5A) zP&_VIXI4et%aK75_ax9i1Rwwb2tWV=5P$##AOHafL`qr0SGut;J;m~K4L9^v(t-X5P$##AOHafK)?tB1S*V}hEzm^&8pDTwA-bkJi$Tb zkcy!R4#!X-2uKFZxmnJtkS+9wguq8r2tc3?0>$Ht*UY5I=g6R_gBQ0T009U<00Izz z00bZa0bc~l9&b8=wE(_QQ7H=q2vlTYmTUJhOjbox>Gtf}yLRJ_oKNwj1S|gMhBwZ8 zp;;BDIPYaG4t5$g`W3nLYAv_93;_s)AkbX0D!!i8byW{@iz|xArdUOTuMmI$1Rwwb z2tWV=5P$##AOL}Y1PD$91m{RI&7^oDgfFK_Tjw(=#79=A2YHe7UvJ6G6xRYsR>gTY z-+Z%fP3BH1&uc)@f2sC7(t!Nqq9z0iB2Y9=52=td>pkzfmU}!gyP(EYh5!U0009U< z00Izz00bZa0SG`~9Rfrv)eJp|6Z^Ul{<3s4WFAh*l&gM&1q4Fn(n0SG_<0uX=z1Rwwb2-r-3XobzQ zhjs`+00Izz00bZa0SG_<0uX?JO9VdYfBTNC1#oFb@dW}9fB*y_;EMo(3SXG01OW&@ z00Izz00bZa0SG_<0uX=z1Rwwb2tWV=5U`WLxpUvDGI=ckfeJfcSEz;n1Rwwb2tWV= z5P$##AOHafKmY;|fB*y_009VCOMu{nwNr<12tWV=5crwEOOKrL2b0$V{H&DkAOHaf zKmY;|fB*y_009U<00Izz00h<~K%inx9{dCW2tWV=5P$##AOHafKmY=(2yA@((Fd~@ zU=@w1zn13*N8Kg;K>z{}fB*y_00D~$5UsFy@{kSz2tWV=5P$##AOHafKmY;|fB*y_ z009VCMWDF<=G|BeVAYf%HVFa*Dw3eg7!ZH}1Rwwb2tWV=5P$##AOHafKmY;|fB*y_ z009X6M1bJLPbl~X0uYFVz_C|)@30mi5|nfT0SG_<0uX=z1Rwwb2tWV=5P$##AOL|Z z5Fk*Ig;~Z&2tWV=5P$##AOHdD3B2^L%?7X*!1`�SG_<0uX=z1Rwwb2tc3)0t6~* zfWjpRKmY;|fB*y_009U<00Izz00bZaflLs1?z9s(Vl6e+5`wxq&)+S4*>{300Izz00bZa0SG_<0uX?J z5ZLduCmv)i051gyKmY;|fIt=r5U9xFtm88TAOHafKmY;|fB*y_009U<00Izz00bZa z0SIJ+!1L#9wwvK=0h|a_l;;O$;{}D65P$##AOHafKmY;|fB*y_009U<00Izz00bZa z0apk(5utEpGVlWe5P$##3MKHTgT_dcPF}yheU2!!E%hM)0SG_<0uX=z1Rwwb2tWV= z5P$##Odvp@!h~5s0|X!d0SG_<0uX=z1RwwblL*}N{Wj;a7Qmz#K??*R009U<00Izj zngD?cr>7U^AOHafKmY;|fB*y_009U<00Izz00bZa0aFMx={lqhYXMA|4Rn}9fIx*w zvw;=}KmY;|fB*y_009U<00Izz00bZa0SG_<0uX>e1_%(G$iO7yA_S5nuztJKTC)}) zA>QTr!3mATFc5$M1Rwwb2tWV=5P$##AOHafKmY;|h?@Y>ins~WKLj8E0SG_<0uX=z z1VRwlvwZl+tOW=GkfsoT00bZa0SG_<0uX?J@dOA|7(Yb_ga8B}009U<00Izz00bZa z0SG_<0uYFpz*ZML`YCGxB8EzL5P$##f)XH55tJP5AOHafKmY;|fB*y_009U<00Izz z00bZa0SG_<0uTs9fZ#+RY!)>7SMxC=SPNjmL?ICZ5P$##AOHafKmY;|fB*y_009U< z00IzzK(+}GsL1yG<2?i*009U<00IRPIP8&@V_6GOAhb}`%kzUnwWl!zAOHafKmY;| zfB*y_009VCL4at571M-B2tWV=5P$##AOHafKmY;|fPkw6#?P9zBWnR%on8Eb00bZa z0SNdZK%l}8CF(!`0uX=z1Rwwb2tWV=5P$##AOHafKmY;|fB*z+A~1CP(I;857Jxv7 zO)o68LI45~fB*y_009U<00Izz00bZa0SG_<0uX=z1RNkhaKeE}#03aI00J2$@ZFMa zkFjJeKt^W>w;=!l2tWV=5P$##AOHafKmY;|fPfDI1S)($q6!2c009U<00Izz00bZa zf$R|2@c4^IuofWk`7X~7PJB!Tga8B}009U<00JfvAX;J4Y@h`K5P$##AOHafKmY;| zfB*y_009U<00Izjj==Os58slt0M1P-4n;_SKt+Tw=?Vf6fB*y_009U<00Izz00bZa z0SG_<0uX=z1Rwx``Uw!6sGkuvAOL}S30%><;eS~RP%kDbKmY;|fB*y_009U<00Izz z00bZa0SG_<0_zYUP_YgYEih7XlD~00fL7u+=43jkISifHBw*0s#m>00Izz z00bZa0SG_<0uX=z1k5Brpu)^~K@|ib009U<00Izz00bZafrJS(UiRb{tOZCII>SK# z0uX=z1Rwwb2nYcJ6}$`}009U<00Izz00bZa0SG_<0uX=z1Rwwb2*gF;{t+h+^St7Y m{rdJfBCav$7Xp?Oc>nYMyH Date: Tue, 21 Oct 2025 20:43:12 +0200 Subject: [PATCH 113/145] Update workflow file to host `docs`. --- .github/workflows/deploy-docs.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index 3ef75f9a..da18b59a 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -33,12 +33,12 @@ jobs: - name: Build Sphinx documentation run: | - sphinx-build docs docs/build -b dirhtml + sphinx-build docs docs/_build -b dirhtml - name: Upload artifact uses: actions/upload-pages-artifact@v4 with: - path: 'docs/_build/html' + path: 'docs/_build' deploy: environment: From 4e4d3e53a83fc77362cc3dcf64f1d8e962ad0f56 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 21 Oct 2025 21:13:37 +0200 Subject: [PATCH 114/145] Adjust workflow file. --- .github/workflows/deploy-docs.yaml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index da18b59a..e9598cce 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -41,12 +41,14 @@ jobs: path: 'docs/_build' deploy: - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest needs: build + runs-on: ubuntu-latest + permissions: + pages: write + id-token: write steps: - name: Deploy to GitHub Pages - id: deployment uses: actions/deploy-pages@v3 + with: + branch: gh-pages + folder: docs/_build From a77f315b15fa84a76c8cca4922f66218eb8c7a5d Mon Sep 17 00:00:00 2001 From: Daniel Dauner <50077664+DanielDauner@users.noreply.github.com> Date: Tue, 21 Oct 2025 21:15:28 +0200 Subject: [PATCH 115/145] Change deployment branch to dev_v0.0.7 --- .github/workflows/deploy-docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index e9598cce..d2ebb719 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -50,5 +50,5 @@ jobs: - name: Deploy to GitHub Pages uses: actions/deploy-pages@v3 with: - branch: gh-pages + branch: dev_v0.0.7 folder: docs/_build From b2e6ef5840b6ebf8e8dab7b65ae8ea81cf64438f Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 21 Oct 2025 21:25:48 +0200 Subject: [PATCH 116/145] Test another modification to the workflow file. --- .github/workflows/deploy-docs.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index d2ebb719..1bf5e6ab 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -41,14 +41,14 @@ jobs: path: 'docs/_build' deploy: - needs: build + environment: + name: github-pages + url: ${{steps.deployment.outputs.page_url}} runs-on: ubuntu-latest - permissions: - pages: write - id-token: write + needs: build steps: - name: Deploy to GitHub Pages - uses: actions/deploy-pages@v3 + id: deployment + uses: actions/deploy-pages@v4 with: - branch: dev_v0.0.7 - folder: docs/_build + publish_dir: './docs/_build' From 635521d151aaec58587231fd117a680d53b2f2e7 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 23 Oct 2025 20:52:25 +0200 Subject: [PATCH 117/145] Move tests out of `src`. Refactor detection types. --- notebooks/bev_matplotlib.ipynb | 10 +-- .../datasets/av2/av2_sensor_converter.py | 8 +- .../datasets/av2/utils/av2_constants.py | 62 +++++++------- .../datasets/nuplan/nuplan_converter.py | 8 +- .../datasets/nuplan/utils/nuplan_constants.py | 18 ++-- .../nuplan/utils/nuplan_sql_helper.py | 6 +- .../datasets/pandaset/pandaset_constants.py | 58 ++++++------- .../datasets/pandaset/pandaset_converter.py | 4 +- .../datasets/wopd/utils/wopd_constants.py | 14 +-- .../datasets/wopd/wopd_converter.py | 4 +- .../log_writer/abstract_log_writer.py | 3 +- .../conversion/log_writer/arrow_log_writer.py | 5 +- .../detections/box_detection_types.py | 72 ++++++++++++++++ .../{detection.py => box_detections.py} | 63 ++------------ .../datatypes/detections/detection_types.py | 41 --------- .../detections/traffic_light_detections.py | 48 +++++++++++ src/py123d/datatypes/scene/abstract_scene.py | 7 +- .../datatypes/scene/arrow/arrow_scene.py | 9 +- .../scene/arrow/utils/arrow_getters.py | 8 +- src/py123d/datatypes/sensors/__init__.py | 9 ++ .../datatypes/sensors/lidar/lidar_index.py | 85 +++++++++++++++++++ .../datatypes/vehicle_state/ego_state.py | 8 +- src/py123d/visualization/color/default.py | 24 +++--- src/py123d/visualization/matplotlib/camera.py | 8 +- .../visualization/matplotlib/observation.py | 15 ++-- .../viser/elements/detection_elements.py | 6 +- .../geometry/test => tests/unit}/__init__.py | 0 .../test => tests/unit/common}/__init__.py | 0 .../unit/common/multithreading}/__init__.py | 0 .../unit/common/utils/__init__.py | 0 tests/unit/conversion/__init__.py | 0 tests/unit/datatypes/.gitkeep | 0 tests/unit/datatypes/__init__.py | 0 tests/unit/datatypes/detections/__init__.py | 0 tests/unit/datatypes/maps/__init__.py | 0 tests/unit/datatypes/sensors/__init__.py | 0 tests/unit/datatypes/time/__init__.py | 0 .../unit/datatypes/vehicle_state/__init__.py | 0 tests/unit/geometry/__init__.py | 0 .../unit/geometry}/test_bounding_box.py | 0 .../unit/geometry}/test_occupancy_map.py | 0 .../unit/geometry}/test_point.py | 0 .../unit/geometry}/test_polyline.py | 0 .../unit/geometry}/test_rotation.py | 0 .../unit/geometry}/test_vector.py | 0 tests/unit/geometry/transform/__init__.py | 0 .../transform}/test_transform_consistency.py | 0 .../transform}/test_transform_euler_se3.py | 0 .../geometry/transform}/test_transform_se2.py | 0 .../geometry/transform}/test_transform_se3.py | 0 tests/unit/geometry/utils/__init__.py | 0 .../utils}/test_bounding_box_utils.py | 0 .../geometry/utils/test_polyline_utils.py | 0 .../geometry/utils}/test_rotation_utils.py | 0 tests/unit/visualization/.gitkeep | 0 55 files changed, 359 insertions(+), 244 deletions(-) create mode 100644 src/py123d/datatypes/detections/box_detection_types.py rename src/py123d/datatypes/detections/{detection.py => box_detections.py} (67%) delete mode 100644 src/py123d/datatypes/detections/detection_types.py create mode 100644 src/py123d/datatypes/detections/traffic_light_detections.py create mode 100644 src/py123d/datatypes/sensors/lidar/lidar_index.py rename {src/py123d/geometry/test => tests/unit}/__init__.py (100%) rename {src/py123d/geometry/transform/test => tests/unit/common}/__init__.py (100%) rename {src/py123d/geometry/utils/test => tests/unit/common/multithreading}/__init__.py (100%) rename src/py123d/geometry/utils/test/test_polyline_utils.py => tests/unit/common/utils/__init__.py (100%) create mode 100644 tests/unit/conversion/__init__.py create mode 100644 tests/unit/datatypes/.gitkeep create mode 100644 tests/unit/datatypes/__init__.py create mode 100644 tests/unit/datatypes/detections/__init__.py create mode 100644 tests/unit/datatypes/maps/__init__.py create mode 100644 tests/unit/datatypes/sensors/__init__.py create mode 100644 tests/unit/datatypes/time/__init__.py create mode 100644 tests/unit/datatypes/vehicle_state/__init__.py create mode 100644 tests/unit/geometry/__init__.py rename {src/py123d/geometry/test => tests/unit/geometry}/test_bounding_box.py (100%) rename {src/py123d/geometry/test => tests/unit/geometry}/test_occupancy_map.py (100%) rename {src/py123d/geometry/test => tests/unit/geometry}/test_point.py (100%) rename {src/py123d/geometry/test => tests/unit/geometry}/test_polyline.py (100%) rename {src/py123d/geometry/test => tests/unit/geometry}/test_rotation.py (100%) rename {src/py123d/geometry/test => tests/unit/geometry}/test_vector.py (100%) create mode 100644 tests/unit/geometry/transform/__init__.py rename {src/py123d/geometry/transform/test => tests/unit/geometry/transform}/test_transform_consistency.py (100%) rename {src/py123d/geometry/transform/test => tests/unit/geometry/transform}/test_transform_euler_se3.py (100%) rename {src/py123d/geometry/transform/test => tests/unit/geometry/transform}/test_transform_se2.py (100%) rename {src/py123d/geometry/transform/test => tests/unit/geometry/transform}/test_transform_se3.py (100%) create mode 100644 tests/unit/geometry/utils/__init__.py rename {src/py123d/geometry/utils/test => tests/unit/geometry/utils}/test_bounding_box_utils.py (100%) create mode 100644 tests/unit/geometry/utils/test_polyline_utils.py rename {src/py123d/geometry/utils/test => tests/unit/geometry/utils}/test_rotation_utils.py (100%) create mode 100644 tests/unit/visualization/.gitkeep diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index 16125a71..bddd36d6 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -24,9 +24,9 @@ "\n", "# splits = [\"wopd_val\"]\n", "# splits = [\"carla_test\"]\n", - "# splits = [\"nuplan-mini_test\"]\n", + "splits = [\"nuplan-mini_test\"]\n", "# splits = [\"av2-sensor-mini_train\"]\n", - "splits = [\"pandaset_train\"]\n", + "# splits = [\"pandaset_train\"]\n", "# log_names = None\n", "\n", "\n", @@ -217,8 +217,8 @@ " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", - " add_box_detections_to_ax(ax, box_detections)\n", - " add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n", + " # add_box_detections_to_ax(ax, box_detections)\n", + " # add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n", "\n", " zoom = 1.0\n", " ax.set_xlim(point_2d.x - radius * zoom, point_2d.x + radius * zoom)\n", @@ -245,7 +245,7 @@ "scale = 10\n", "fig, ax = plt.subplots(1, 1, figsize=(scale, scale))\n", "scene = np.random.choice(scenes)\n", - "_plot_scene_on_ax(ax, scene, iteration, radius=30)\n", + "_plot_scene_on_ax(ax, scene, iteration, radius=80)\n", "# _plot_scene_on_ax(ax[1], scene, iteration, radius=50)\n", "# _plot_scene_on_ax(ax[2], scene, iteration, radius=100)\n", "\n", diff --git a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py index 233cba1a..bf48f59d 100644 --- a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py @@ -24,8 +24,8 @@ from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.utils.sensor_utils.lidar_index_registry import AVSensorLidarIndex -from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import ( @@ -261,7 +261,7 @@ def _extract_av2_sensor_box_detections( detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) detections_token: List[str] = annotations_slice["track_uuid"].tolist() - detections_types: List[DetectionType] = [] + detections_types: List[BoxDetectionType] = [] for detection_idx, (_, row) in enumerate(annotations_slice.iterrows()): row = row.to_dict() @@ -283,7 +283,7 @@ def _extract_av2_sensor_box_detections( box_detections.append( BoxDetectionSE3( metadata=BoxDetectionMetadata( - detection_type=detections_types[detection_idx], + box_detection_type=detections_types[detection_idx], timepoint=None, track_token=detections_token[detection_idx], confidence=None, diff --git a/src/py123d/conversion/datasets/av2/utils/av2_constants.py b/src/py123d/conversion/datasets/av2/utils/av2_constants.py index 56a6285f..7f81f48c 100644 --- a/src/py123d/conversion/datasets/av2/utils/av2_constants.py +++ b/src/py123d/conversion/datasets/av2/utils/av2_constants.py @@ -1,7 +1,7 @@ from typing import Dict, Final, Set from py123d.common.utils.enums import SerialIntEnum -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.maps.map_datatypes import RoadLineType from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType @@ -46,36 +46,36 @@ class AV2SensorBoxDetectionType(SerialIntEnum): # Mapping from AV2SensorBoxDetectionType to general DetectionType # TODO: Change the detection types. Multiple mistakes, e.g. animals/dogs are not generic objects. AV2_TO_DETECTION_TYPE = { - AV2SensorBoxDetectionType.ANIMAL: DetectionType.GENERIC_OBJECT, - AV2SensorBoxDetectionType.ARTICULATED_BUS: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.BICYCLE: DetectionType.BICYCLE, - AV2SensorBoxDetectionType.BICYCLIST: DetectionType.PEDESTRIAN, - AV2SensorBoxDetectionType.BOLLARD: DetectionType.BARRIER, - AV2SensorBoxDetectionType.BOX_TRUCK: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.BUS: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.CONSTRUCTION_BARREL: DetectionType.BARRIER, - AV2SensorBoxDetectionType.CONSTRUCTION_CONE: DetectionType.TRAFFIC_CONE, - AV2SensorBoxDetectionType.DOG: DetectionType.GENERIC_OBJECT, - AV2SensorBoxDetectionType.LARGE_VEHICLE: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.MESSAGE_BOARD_TRAILER: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.MOBILE_PEDESTRIAN_CROSSING_SIGN: DetectionType.CZONE_SIGN, - AV2SensorBoxDetectionType.MOTORCYCLE: DetectionType.BICYCLE, - AV2SensorBoxDetectionType.MOTORCYCLIST: DetectionType.BICYCLE, - AV2SensorBoxDetectionType.OFFICIAL_SIGNALER: DetectionType.PEDESTRIAN, - AV2SensorBoxDetectionType.PEDESTRIAN: DetectionType.PEDESTRIAN, - AV2SensorBoxDetectionType.RAILED_VEHICLE: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.REGULAR_VEHICLE: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.SCHOOL_BUS: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.SIGN: DetectionType.SIGN, - AV2SensorBoxDetectionType.STOP_SIGN: DetectionType.SIGN, - AV2SensorBoxDetectionType.STROLLER: DetectionType.PEDESTRIAN, - AV2SensorBoxDetectionType.TRAFFIC_LIGHT_TRAILER: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.TRUCK: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.TRUCK_CAB: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.VEHICULAR_TRAILER: DetectionType.VEHICLE, - AV2SensorBoxDetectionType.WHEELCHAIR: DetectionType.PEDESTRIAN, - AV2SensorBoxDetectionType.WHEELED_DEVICE: DetectionType.GENERIC_OBJECT, - AV2SensorBoxDetectionType.WHEELED_RIDER: DetectionType.BICYCLE, + AV2SensorBoxDetectionType.ANIMAL: BoxDetectionType.GENERIC_OBJECT, + AV2SensorBoxDetectionType.ARTICULATED_BUS: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.BICYCLE: BoxDetectionType.BICYCLE, + AV2SensorBoxDetectionType.BICYCLIST: BoxDetectionType.PEDESTRIAN, + AV2SensorBoxDetectionType.BOLLARD: BoxDetectionType.BARRIER, + AV2SensorBoxDetectionType.BOX_TRUCK: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.BUS: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.CONSTRUCTION_BARREL: BoxDetectionType.BARRIER, + AV2SensorBoxDetectionType.CONSTRUCTION_CONE: BoxDetectionType.TRAFFIC_CONE, + AV2SensorBoxDetectionType.DOG: BoxDetectionType.GENERIC_OBJECT, + AV2SensorBoxDetectionType.LARGE_VEHICLE: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.MESSAGE_BOARD_TRAILER: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.MOBILE_PEDESTRIAN_CROSSING_SIGN: BoxDetectionType.CZONE_SIGN, + AV2SensorBoxDetectionType.MOTORCYCLE: BoxDetectionType.BICYCLE, + AV2SensorBoxDetectionType.MOTORCYCLIST: BoxDetectionType.BICYCLE, + AV2SensorBoxDetectionType.OFFICIAL_SIGNALER: BoxDetectionType.PEDESTRIAN, + AV2SensorBoxDetectionType.PEDESTRIAN: BoxDetectionType.PEDESTRIAN, + AV2SensorBoxDetectionType.RAILED_VEHICLE: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.REGULAR_VEHICLE: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.SCHOOL_BUS: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.SIGN: BoxDetectionType.SIGN, + AV2SensorBoxDetectionType.STOP_SIGN: BoxDetectionType.SIGN, + AV2SensorBoxDetectionType.STROLLER: BoxDetectionType.PEDESTRIAN, + AV2SensorBoxDetectionType.TRAFFIC_LIGHT_TRAILER: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.TRUCK: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.TRUCK_CAB: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.VEHICULAR_TRAILER: BoxDetectionType.VEHICLE, + AV2SensorBoxDetectionType.WHEELCHAIR: BoxDetectionType.PEDESTRIAN, + AV2SensorBoxDetectionType.WHEELED_DEVICE: BoxDetectionType.GENERIC_OBJECT, + AV2SensorBoxDetectionType.WHEELED_RIDER: BoxDetectionType.BICYCLE, } diff --git a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py index da4b4cc9..120d66b8 100644 --- a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py +++ b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py @@ -24,12 +24,8 @@ from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.utils.sensor_utils.lidar_index_registry import NuPlanLidarIndex -from py123d.datatypes.detections.detection import ( - BoxDetectionSE3, - BoxDetectionWrapper, - TrafficLightDetection, - TrafficLightDetectionWrapper, -) +from py123d.datatypes.detections.box_detections import BoxDetectionSE3, BoxDetectionWrapper +from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetection, TrafficLightDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import ( diff --git a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py index 5190e84e..bf385f15 100644 --- a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py +++ b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py @@ -1,8 +1,8 @@ from enum import IntEnum from typing import Dict, Final, List, Set -from py123d.datatypes.detections.detection import TrafficLightStatus -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.datatypes.detections.traffic_light_detections import TrafficLightStatus from py123d.datatypes.maps.map_datatypes import RoadLineType from py123d.datatypes.time.time_point import TimePoint @@ -28,13 +28,13 @@ class NuPlanBoxDetectionType(IntEnum): NUPLAN_DETECTION_NAME_DICT = { - "vehicle": DetectionType.VEHICLE, - "bicycle": DetectionType.BICYCLE, - "pedestrian": DetectionType.PEDESTRIAN, - "traffic_cone": DetectionType.TRAFFIC_CONE, - "barrier": DetectionType.BARRIER, - "czone_sign": DetectionType.CZONE_SIGN, - "generic_object": DetectionType.GENERIC_OBJECT, + "vehicle": BoxDetectionType.VEHICLE, + "bicycle": BoxDetectionType.BICYCLE, + "pedestrian": BoxDetectionType.PEDESTRIAN, + "traffic_cone": BoxDetectionType.TRAFFIC_CONE, + "barrier": BoxDetectionType.BARRIER, + "czone_sign": BoxDetectionType.CZONE_SIGN, + "generic_object": BoxDetectionType.GENERIC_OBJECT, } diff --git a/src/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py b/src/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py index 99d044ec..3f2089e2 100644 --- a/src/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py +++ b/src/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py @@ -2,7 +2,7 @@ from py123d.common.utils.dependencies import check_dependencies from py123d.conversion.datasets.nuplan.utils.nuplan_constants import NUPLAN_DETECTION_NAME_DICT -from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3 +from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3 from py123d.geometry import BoundingBoxSE3, EulerAngles, StateSE3, Vector3D from py123d.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL @@ -57,10 +57,8 @@ def get_box_detections_for_lidarpc_token_from_db(log_file: str, token: str) -> L ) box_detection = BoxDetectionSE3( metadata=BoxDetectionMetadata( - detection_type=NUPLAN_DETECTION_NAME_DICT[row["category_name"]], - timepoint=None, # NOTE: Timepoint is not needed during writing, set to None + box_detection_type=NUPLAN_DETECTION_NAME_DICT[row["category_name"]], track_token=row["track_token"].hex(), - confidence=None, # NOTE: Not currently written, requires refactoring ), bounding_box_se3=bounding_box, velocity=Vector3D(x=row["vx"], y=row["vy"], z=row["vz"]), diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_constants.py b/src/py123d/conversion/datasets/pandaset/pandaset_constants.py index bae1c61c..93ef4bc8 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_constants.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_constants.py @@ -1,7 +1,7 @@ from typing import Dict, List from py123d.common.utils.enums import SerialIntEnum -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType, PinholeDistortion, PinholeIntrinsics from py123d.datatypes.sensors.lidar.lidar import LiDARType from py123d.geometry import StateSE3 @@ -82,34 +82,34 @@ class PandasetBoxDetectionType(SerialIntEnum): } -PANDASET_BOX_DETECTION_TO_DEFAULT: Dict[PandasetBoxDetectionType, DetectionType] = { - PandasetBoxDetectionType.ANIMALS_BIRD: DetectionType.GENERIC_OBJECT, # TODO: Adjust default types - PandasetBoxDetectionType.ANIMALS_OTHER: DetectionType.GENERIC_OBJECT, # TODO: Adjust default types - PandasetBoxDetectionType.BICYCLE: DetectionType.BICYCLE, - PandasetBoxDetectionType.BUS: DetectionType.VEHICLE, - PandasetBoxDetectionType.CAR: DetectionType.VEHICLE, - PandasetBoxDetectionType.CONES: DetectionType.TRAFFIC_CONE, - PandasetBoxDetectionType.CONSTRUCTION_SIGNS: DetectionType.CZONE_SIGN, - PandasetBoxDetectionType.EMERGENCY_VEHICLE: DetectionType.VEHICLE, - PandasetBoxDetectionType.MEDIUM_SIZED_TRUCK: DetectionType.VEHICLE, - PandasetBoxDetectionType.MOTORCYCLE: DetectionType.BICYCLE, - PandasetBoxDetectionType.MOTORIZED_SCOOTER: DetectionType.BICYCLE, - PandasetBoxDetectionType.OTHER_VEHICLE_CONSTRUCTION_VEHICLE: DetectionType.VEHICLE, - PandasetBoxDetectionType.OTHER_VEHICLE_PEDICAB: DetectionType.BICYCLE, - PandasetBoxDetectionType.OTHER_VEHICLE_UNCOMMON: DetectionType.VEHICLE, - PandasetBoxDetectionType.PEDESTRIAN: DetectionType.PEDESTRIAN, - PandasetBoxDetectionType.PEDESTRIAN_WITH_OBJECT: DetectionType.PEDESTRIAN, - PandasetBoxDetectionType.PERSONAL_MOBILITY_DEVICE: DetectionType.BICYCLE, - PandasetBoxDetectionType.PICKUP_TRUCK: DetectionType.VEHICLE, - PandasetBoxDetectionType.PYLONS: DetectionType.TRAFFIC_CONE, - PandasetBoxDetectionType.ROAD_BARRIERS: DetectionType.BARRIER, - PandasetBoxDetectionType.ROLLING_CONTAINERS: DetectionType.GENERIC_OBJECT, - PandasetBoxDetectionType.SEMI_TRUCK: DetectionType.VEHICLE, - PandasetBoxDetectionType.SIGNS: DetectionType.SIGN, - PandasetBoxDetectionType.TEMPORARY_CONSTRUCTION_BARRIERS: DetectionType.BARRIER, - PandasetBoxDetectionType.TOWED_OBJECT: DetectionType.VEHICLE, - PandasetBoxDetectionType.TRAIN: DetectionType.GENERIC_OBJECT, # TODO: Adjust default types - PandasetBoxDetectionType.TRAM_SUBWAY: DetectionType.GENERIC_OBJECT, # TODO: Adjust default types +PANDASET_BOX_DETECTION_TO_DEFAULT: Dict[PandasetBoxDetectionType, BoxDetectionType] = { + PandasetBoxDetectionType.ANIMALS_BIRD: BoxDetectionType.GENERIC_OBJECT, # TODO: Adjust default types + PandasetBoxDetectionType.ANIMALS_OTHER: BoxDetectionType.GENERIC_OBJECT, # TODO: Adjust default types + PandasetBoxDetectionType.BICYCLE: BoxDetectionType.BICYCLE, + PandasetBoxDetectionType.BUS: BoxDetectionType.VEHICLE, + PandasetBoxDetectionType.CAR: BoxDetectionType.VEHICLE, + PandasetBoxDetectionType.CONES: BoxDetectionType.TRAFFIC_CONE, + PandasetBoxDetectionType.CONSTRUCTION_SIGNS: BoxDetectionType.CZONE_SIGN, + PandasetBoxDetectionType.EMERGENCY_VEHICLE: BoxDetectionType.VEHICLE, + PandasetBoxDetectionType.MEDIUM_SIZED_TRUCK: BoxDetectionType.VEHICLE, + PandasetBoxDetectionType.MOTORCYCLE: BoxDetectionType.BICYCLE, + PandasetBoxDetectionType.MOTORIZED_SCOOTER: BoxDetectionType.BICYCLE, + PandasetBoxDetectionType.OTHER_VEHICLE_CONSTRUCTION_VEHICLE: BoxDetectionType.VEHICLE, + PandasetBoxDetectionType.OTHER_VEHICLE_PEDICAB: BoxDetectionType.BICYCLE, + PandasetBoxDetectionType.OTHER_VEHICLE_UNCOMMON: BoxDetectionType.VEHICLE, + PandasetBoxDetectionType.PEDESTRIAN: BoxDetectionType.PEDESTRIAN, + PandasetBoxDetectionType.PEDESTRIAN_WITH_OBJECT: BoxDetectionType.PEDESTRIAN, + PandasetBoxDetectionType.PERSONAL_MOBILITY_DEVICE: BoxDetectionType.BICYCLE, + PandasetBoxDetectionType.PICKUP_TRUCK: BoxDetectionType.VEHICLE, + PandasetBoxDetectionType.PYLONS: BoxDetectionType.TRAFFIC_CONE, + PandasetBoxDetectionType.ROAD_BARRIERS: BoxDetectionType.BARRIER, + PandasetBoxDetectionType.ROLLING_CONTAINERS: BoxDetectionType.GENERIC_OBJECT, + PandasetBoxDetectionType.SEMI_TRUCK: BoxDetectionType.VEHICLE, + PandasetBoxDetectionType.SIGNS: BoxDetectionType.SIGN, + PandasetBoxDetectionType.TEMPORARY_CONSTRUCTION_BARRIERS: BoxDetectionType.BARRIER, + PandasetBoxDetectionType.TOWED_OBJECT: BoxDetectionType.VEHICLE, + PandasetBoxDetectionType.TRAIN: BoxDetectionType.GENERIC_OBJECT, # TODO: Adjust default types + PandasetBoxDetectionType.TRAM_SUBWAY: BoxDetectionType.GENERIC_OBJECT, # TODO: Adjust default types } # https://github.com/scaleapi/pandaset-devkit/blob/master/docs/static_extrinsic_calibration.yaml diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py index ce9bc588..fa0c50c7 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py @@ -27,7 +27,7 @@ from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.utils.sensor_utils.lidar_index_registry import PandasetLidarIndex -from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper +from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import ( PinholeCameraMetadata, @@ -322,7 +322,7 @@ def _extract_pandaset_box_detections( box_detection_se3 = BoxDetectionSE3( metadata=BoxDetectionMetadata( - detection_type=box_detection_type, + box_detection_type=box_detection_type, track_token=box_uuids[box_idx], ), bounding_box_se3=BoundingBoxSE3.from_array(box_se3_array[box_idx]), diff --git a/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py b/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py index 2edf67e7..963a056d 100644 --- a/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py +++ b/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py @@ -1,6 +1,6 @@ from typing import Dict, List -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.maps.map_datatypes import LaneType, RoadEdgeType, RoadLineType from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDARType @@ -12,12 +12,12 @@ ] # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63 -WOPD_DETECTION_NAME_DICT: Dict[int, DetectionType] = { - 0: DetectionType.GENERIC_OBJECT, # TYPE_UNKNOWN - 1: DetectionType.VEHICLE, # TYPE_VEHICLE - 2: DetectionType.PEDESTRIAN, # TYPE_PEDESTRIAN - 3: DetectionType.SIGN, # TYPE_SIGN - 4: DetectionType.BICYCLE, # TYPE_CYCLIST +WOPD_DETECTION_NAME_DICT: Dict[int, BoxDetectionType] = { + 0: BoxDetectionType.GENERIC_OBJECT, # TYPE_UNKNOWN + 1: BoxDetectionType.VEHICLE, # TYPE_VEHICLE + 2: BoxDetectionType.PEDESTRIAN, # TYPE_PEDESTRIAN + 3: BoxDetectionType.SIGN, # TYPE_SIGN + 4: BoxDetectionType.BICYCLE, # TYPE_CYCLIST } # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L50 diff --git a/src/py123d/conversion/datasets/wopd/wopd_converter.py b/src/py123d/conversion/datasets/wopd/wopd_converter.py index 801421de..74cc3861 100644 --- a/src/py123d/conversion/datasets/wopd/wopd_converter.py +++ b/src/py123d/conversion/datasets/wopd/wopd_converter.py @@ -21,7 +21,7 @@ from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.utils.sensor_utils.camera_conventions import CameraConvention, convert_camera_convention from py123d.conversion.utils.sensor_utils.lidar_index_registry import DefaultLidarIndex, WOPDLidarIndex -from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper +from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import ( @@ -356,7 +356,7 @@ def _extract_wopd_box_detections( box_detections.append( BoxDetectionSE3( metadata=BoxDetectionMetadata( - detection_type=detections_types[detection_idx], + box_detection_type=detections_types[detection_idx], timepoint=None, track_token=detections_token[detection_idx], confidence=None, diff --git a/src/py123d/conversion/log_writer/abstract_log_writer.py b/src/py123d/conversion/log_writer/abstract_log_writer.py index dc67495a..87c741fb 100644 --- a/src/py123d/conversion/log_writer/abstract_log_writer.py +++ b/src/py123d/conversion/log_writer/abstract_log_writer.py @@ -2,7 +2,8 @@ from typing import Any, Dict, List, Optional, Tuple from py123d.conversion.dataset_converter_config import DatasetConverterConfig -from py123d.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper +from py123d.datatypes.detections.box_detections import BoxDetectionWrapper +from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDARType diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 1fdb45b8..6177d821 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -8,7 +8,8 @@ from py123d.common.utils.uuid_utils import create_deterministic_uuid from py123d.conversion.abstract_dataset_converter import AbstractLogWriter, DatasetConverterConfig from py123d.conversion.log_writer.utils.lidar_compression import compress_lidar_with_laz -from py123d.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper +from py123d.datatypes.detections.box_detections import BoxDetectionWrapper +from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType @@ -124,7 +125,7 @@ def write( box_detection_state.append(box_detection.bounding_box.array) box_detection_velocity.append(box_detection.velocity.array) # TODO: make optional box_detection_token.append(box_detection.metadata.track_token) - box_detection_type.append(int(box_detection.metadata.detection_type)) + box_detection_type.append(int(box_detection.metadata.box_detection_type)) # Add to record batch data record_batch_data["box_detection_state"] = [box_detection_state] diff --git a/src/py123d/datatypes/detections/box_detection_types.py b/src/py123d/datatypes/detections/box_detection_types.py new file mode 100644 index 00000000..cca6fa5e --- /dev/null +++ b/src/py123d/datatypes/detections/box_detection_types.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from py123d.common.utils.enums import SerialIntEnum + +BOX_DETECTION_TYPE_REGISTRY = {} + + +def register_box_detection_type(enum_class): + BOX_DETECTION_TYPE_REGISTRY[enum_class.__name__] = enum_class + return enum_class + + +class BoxDetectionType(SerialIntEnum): + """ + Enum for agents in py123d. + """ + + # TODO: + # - Add detection types compatible with other datasets + # - Add finer detection types (e.g. bicycle, motorcycle) and add generic types (e.g. two-wheeled vehicle) for general use. + + # NOTE: Current types strongly aligned with nuPlan. + + VEHICLE = 0 # Includes all four or more wheeled vehicles, as well as trailers. + BICYCLE = 1 # Includes bicycles, motorcycles and tricycles. + PEDESTRIAN = 2 # Pedestrians, incl. strollers and wheelchairs. + + TRAFFIC_CONE = 3 # Cones that are temporarily placed to control the flow of traffic. + BARRIER = 4 # Solid barriers that can be either temporary or permanent. + CZONE_SIGN = 5 # Temporary signs that indicate construction zones. + GENERIC_OBJECT = 6 # Animals, debris, pushable/pullable objects, permanent poles. + + EGO = 7 + SIGN = 8 # TODO: Remove or extent + + +DYNAMIC_DETECTION_TYPES: set[BoxDetectionType] = { + BoxDetectionType.VEHICLE, + BoxDetectionType.BICYCLE, + BoxDetectionType.PEDESTRIAN, +} + +STATIC_DETECTION_TYPES: set[BoxDetectionType] = { + BoxDetectionType.TRAFFIC_CONE, + BoxDetectionType.BARRIER, + BoxDetectionType.CZONE_SIGN, + BoxDetectionType.GENERIC_OBJECT, +} + + +# @register_box_detection_type +# class NuPlanBoxDetectionType(SerialIntEnum): + +# VEHICLE = 0 +# BICYCLE = 1 +# PEDESTRIAN = 2 +# TRAFFIC_CONE = 3 +# BARRIER = 4 +# CZONE_SIGN = 5 +# GENERIC_OBJECT = 6 + +# def to_default_type() -> BoxDetectionType: +# mapping = { +# NuPlanBoxDetectionType.VEHICLE: BoxDetectionType.VEHICLE, +# NuPlanBoxDetectionType.BICYCLE: BoxDetectionType.BICYCLE, +# NuPlanBoxDetectionType.PEDESTRIAN: BoxDetectionType.PEDESTRIAN, +# NuPlanBoxDetectionType.TRAFFIC_CONE: BoxDetectionType.GENERIC_OBJECT, +# NuPlanBoxDetectionType.BARRIER: BoxDetectionType.GENERIC_OBJECT, +# NuPlanBoxDetectionType.CZONE_SIGN: BoxDetectionType.GENERIC_OBJECT, +# NuPlanBoxDetectionType.GENERIC_OBJECT: BoxDetectionType.GENERIC_OBJECT, +# } +# return mapping[self] diff --git a/src/py123d/datatypes/detections/detection.py b/src/py123d/datatypes/detections/box_detections.py similarity index 67% rename from src/py123d/datatypes/detections/detection.py rename to src/py123d/datatypes/detections/box_detections.py index 415510e9..2cff36a8 100644 --- a/src/py123d/datatypes/detections/detection.py +++ b/src/py123d/datatypes/detections/box_detections.py @@ -4,8 +4,7 @@ import shapely -from py123d.common.utils.enums import SerialIntEnum -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.time.time_point import TimePoint from py123d.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, StateSE3, Vector2D, Vector3D @@ -13,11 +12,11 @@ @dataclass class BoxDetectionMetadata: - detection_type: DetectionType + box_detection_type: BoxDetectionType track_token: str - timepoint: Optional[TimePoint] = None # TimePoint when the detection was made, if available confidence: Optional[float] = None # Confidence score of the detection, if available num_lidar_points: Optional[int] = None # Number of LiDAR points within the bounding box + timepoint: Optional[TimePoint] = None # TimePoint when the detection was made, if available @dataclass @@ -93,8 +92,10 @@ def __len__(self) -> int: def __iter__(self): return iter(self.box_detections) - def get_box_detections_by_types(self, detection_types: Iterable[DetectionType]) -> List[BoxDetection]: - return [detection for detection in self.box_detections if detection.metadata.detection_type in detection_types] + def get_box_detections_by_types(self, detection_types: Iterable[BoxDetectionType]) -> List[BoxDetection]: + return [ + detection for detection in self.box_detections if detection.metadata.box_detection_type in detection_types + ] def get_detection_by_track_token(self, track_token: str) -> BoxDetection | None: box_detection: BoxDetection | None = None @@ -109,53 +110,3 @@ def occupancy_map(self) -> OccupancyMap2D: ids = [detection.metadata.track_token for detection in self.box_detections] geometries = [detection.bounding_box.shapely_polygon for detection in self.box_detections] return OccupancyMap2D(geometries=geometries, ids=ids) - - -class TrafficLightStatus(SerialIntEnum): - """ - Enum for TrafficLightStatus. - """ - - GREEN = 0 - YELLOW = 1 - RED = 2 - OFF = 3 - UNKNOWN = 4 - - -@dataclass -class TrafficLightDetection: - - timepoint: TimePoint # TODO: Consider removing or making optional - lane_id: int - status: TrafficLightStatus - - -@dataclass -class TrafficLightDetectionWrapper: - - traffic_light_detections: List[TrafficLightDetection] - - def __getitem__(self, index: int) -> TrafficLightDetection: - return self.traffic_light_detections[index] - - def __len__(self) -> int: - return len(self.traffic_light_detections) - - def __iter__(self): - return iter(self.traffic_light_detections) - - def get_detection_by_lane_id(self, lane_id: int) -> Optional[TrafficLightDetection]: - traffic_light_detection: Optional[TrafficLightDetection] = None - for detection in self.traffic_light_detections: - if int(detection.lane_id) == int(lane_id): - traffic_light_detection = detection - break - return traffic_light_detection - - -@dataclass -class DetectionRecording: - - box_detections: BoxDetectionWrapper - traffic_light_detections: TrafficLightDetectionWrapper diff --git a/src/py123d/datatypes/detections/detection_types.py b/src/py123d/datatypes/detections/detection_types.py deleted file mode 100644 index 69754140..00000000 --- a/src/py123d/datatypes/detections/detection_types.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import annotations - -from py123d.common.utils.enums import SerialIntEnum - - -class DetectionType(SerialIntEnum): - """ - Enum for agents in py123d. - """ - - # TODO: - # - Add detection types compatible with other datasets - # - Add finer detection types (e.g. bicycle, motorcycle) and add generic types (e.g. two-wheeled vehicle) for general use. - - # NOTE: Current types strongly aligned with nuPlan. - - VEHICLE = 0 # Includes all four or more wheeled vehicles, as well as trailers. - BICYCLE = 1 # Includes bicycles, motorcycles and tricycles. - PEDESTRIAN = 2 # Pedestrians, incl. strollers and wheelchairs. - - TRAFFIC_CONE = 3 # Cones that are temporarily placed to control the flow of traffic. - BARRIER = 4 # Solid barriers that can be either temporary or permanent. - CZONE_SIGN = 5 # Temporary signs that indicate construction zones. - GENERIC_OBJECT = 6 # Animals, debris, pushable/pullable objects, permanent poles. - - EGO = 7 - SIGN = 8 # TODO: Remove or extent - - -DYNAMIC_DETECTION_TYPES: set[DetectionType] = { - DetectionType.VEHICLE, - DetectionType.BICYCLE, - DetectionType.PEDESTRIAN, -} - -STATIC_DETECTION_TYPES: set[DetectionType] = { - DetectionType.TRAFFIC_CONE, - DetectionType.BARRIER, - DetectionType.CZONE_SIGN, - DetectionType.GENERIC_OBJECT, -} diff --git a/src/py123d/datatypes/detections/traffic_light_detections.py b/src/py123d/datatypes/detections/traffic_light_detections.py new file mode 100644 index 00000000..c4f1c57a --- /dev/null +++ b/src/py123d/datatypes/detections/traffic_light_detections.py @@ -0,0 +1,48 @@ +from dataclasses import dataclass +from typing import List, Optional + +from py123d.common.utils.enums import SerialIntEnum +from py123d.datatypes.time.time_point import TimePoint + + +class TrafficLightStatus(SerialIntEnum): + """ + Enum for TrafficLightStatus. + """ + + GREEN = 0 + YELLOW = 1 + RED = 2 + OFF = 3 + UNKNOWN = 4 + + +@dataclass +class TrafficLightDetection: + + lane_id: int + status: TrafficLightStatus + timepoint: Optional[TimePoint] = None + + +@dataclass +class TrafficLightDetectionWrapper: + + traffic_light_detections: List[TrafficLightDetection] + + def __getitem__(self, index: int) -> TrafficLightDetection: + return self.traffic_light_detections[index] + + def __len__(self) -> int: + return len(self.traffic_light_detections) + + def __iter__(self): + return iter(self.traffic_light_detections) + + def get_detection_by_lane_id(self, lane_id: int) -> Optional[TrafficLightDetection]: + traffic_light_detection: Optional[TrafficLightDetection] = None + for detection in self.traffic_light_detections: + if int(detection.lane_id) == int(lane_id): + traffic_light_detection = detection + break + return traffic_light_detection diff --git a/src/py123d/datatypes/scene/abstract_scene.py b/src/py123d/datatypes/scene/abstract_scene.py index 45f5ffc2..0270db09 100644 --- a/src/py123d/datatypes/scene/abstract_scene.py +++ b/src/py123d/datatypes/scene/abstract_scene.py @@ -3,7 +3,8 @@ import abc from typing import List, Optional -from py123d.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper +from py123d.datatypes.detections.box_detections import BoxDetectionWrapper +from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.maps.abstract_map import AbstractMap from py123d.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType @@ -47,10 +48,6 @@ def get_box_detections_at_iteration(self, iteration: int) -> Optional[BoxDetecti def get_traffic_light_detections_at_iteration(self, iteration: int) -> Optional[TrafficLightDetectionWrapper]: raise NotImplementedError - @abc.abstractmethod - def get_detection_recording_at_iteration(self, iteration: int) -> Optional[DetectionRecording]: - raise NotImplementedError - @abc.abstractmethod def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: raise NotImplementedError diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene.py b/src/py123d/datatypes/scene/arrow/arrow_scene.py index 21d65d8f..fc0b7c15 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene.py @@ -4,7 +4,8 @@ import pyarrow as pa from py123d.common.utils.arrow_helper import get_lru_cached_arrow_table -from py123d.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper +from py123d.datatypes.detections.box_detections import BoxDetectionWrapper +from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.maps.abstract_map import AbstractMap from py123d.datatypes.maps.gpkg.gpkg_map import get_global_map_api, get_local_map_api from py123d.datatypes.scene.abstract_scene import AbstractScene @@ -112,12 +113,6 @@ def get_traffic_light_detections_at_iteration(self, iteration: int) -> Optional[ self._get_recording_table(), self._get_table_index(iteration) ) - def get_detection_recording_at_iteration(self, iteration: int) -> Optional[DetectionRecording]: - return DetectionRecording( - box_detections=self.get_box_detections_at_iteration(iteration), - traffic_light_detections=self.get_traffic_light_detections_at_iteration(iteration), - ) - def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: route_lane_group_ids: Optional[List[int]] = None table = self._get_recording_table() diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 6a4bba72..edf9f86a 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -8,16 +8,18 @@ from omegaconf import DictConfig from py123d.conversion.datasets.pandaset.pandaset_sensor_loading import load_pandaset_lidars_pc_from_path -from py123d.datatypes.detections.detection import ( +from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.datatypes.detections.box_detections import ( BoxDetection, BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper, +) +from py123d.datatypes.detections.traffic_light_detections import ( TrafficLightDetection, TrafficLightDetectionWrapper, TrafficLightStatus, ) -from py123d.datatypes.detections.detection_types import DetectionType from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType @@ -63,7 +65,7 @@ def get_box_detections_from_arrow_table(arrow_table: pa.Table, index: int) -> Bo ): box_detection = BoxDetectionSE3( metadata=BoxDetectionMetadata( - detection_type=DetectionType(detection_type), + box_detection_type=BoxDetectionType(detection_type), timepoint=timepoint, track_token=detection_token, confidence=None, diff --git a/src/py123d/datatypes/sensors/__init__.py b/src/py123d/datatypes/sensors/__init__.py index e69de29b..89175f33 100644 --- a/src/py123d/datatypes/sensors/__init__.py +++ b/src/py123d/datatypes/sensors/__init__.py @@ -0,0 +1,9 @@ +from py123d.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraType, + PinholeCamera, + PinholeIntrinsicsIndex, + PinholeIntrinsics, + PinholeDistortionIndex, + PinholeDistortion, + PinholeCameraMetadata, +) diff --git a/src/py123d/datatypes/sensors/lidar/lidar_index.py b/src/py123d/datatypes/sensors/lidar/lidar_index.py new file mode 100644 index 00000000..7684b685 --- /dev/null +++ b/src/py123d/datatypes/sensors/lidar/lidar_index.py @@ -0,0 +1,85 @@ +from enum import IntEnum + +from py123d.common.utils.enums import classproperty + +LIDAR_INDEX_REGISTRY = {} + + +def register_lidar_index(enum_class): + LIDAR_INDEX_REGISTRY[enum_class.__name__] = enum_class + return enum_class + + +class LiDARIndex(IntEnum): + + @classproperty + def XY(self) -> slice: + """ + Returns a slice for the XY coordinates of the LiDAR point cloud. + """ + return slice(self.X, self.Y + 1) + + @classproperty + def XYZ(self) -> slice: + """ + Returns a slice for the XYZ coordinates of the LiDAR point cloud. + """ + return slice(self.X, self.Z + 1) + + +@register_lidar_index +class DefaultLidarIndex(LiDARIndex): + X = 0 + Y = 1 + Z = 2 + + +@register_lidar_index +class NuPlanLidarIndex(LiDARIndex): + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 + RING = 4 + ID = 5 + + +@register_lidar_index +class CARLALidarIndex(LiDARIndex): + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 + + +@register_lidar_index +class WOPDLidarIndex(LiDARIndex): + RANGE = 0 + INTENSITY = 1 + ELONGATION = 2 + X = 3 + Y = 4 + Z = 5 + + +@register_lidar_index +class AVSensorLidarIndex(LiDARIndex): + """Argoverse Sensor LiDAR Indexing Scheme. + + NOTE: The LiDAR files also include, 'offset_ns', which we do not currently include. + """ + + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 + + +@register_lidar_index +class PandasetLidarIndex(LiDARIndex): + """Pandaset LiDAR Indexing Scheme.""" + + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 diff --git a/src/py123d/datatypes/vehicle_state/ego_state.py b/src/py123d/datatypes/vehicle_state/ego_state.py index 2c2d21f0..fcc6f8fe 100644 --- a/src/py123d/datatypes/vehicle_state/ego_state.py +++ b/src/py123d/datatypes/vehicle_state/ego_state.py @@ -8,8 +8,8 @@ import numpy.typing as npt from py123d.common.utils.enums import classproperty -from py123d.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE2, BoxDetectionSE3 -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE2, BoxDetectionSE3 from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.vehicle_parameters import ( VehicleParameters, @@ -148,7 +148,7 @@ def bounding_box_se2(self) -> BoundingBoxSE2: def box_detection(self) -> BoxDetectionSE3: return BoxDetectionSE3( metadata=BoxDetectionMetadata( - detection_type=DetectionType.EGO, + box_detection_type=BoxDetectionType.EGO, timepoint=self.timepoint, track_token=EGO_TRACK_TOKEN, confidence=1.0, @@ -231,7 +231,7 @@ def bounding_box_se2(self) -> BoundingBoxSE2: def box_detection(self) -> BoxDetectionSE2: return BoxDetectionSE2( metadata=BoxDetectionMetadata( - detection_type=DetectionType.EGO, + box_detection_type=BoxDetectionType.EGO, timepoint=self.timepoint, track_token=EGO_TRACK_TOKEN, confidence=1.0, diff --git a/src/py123d/visualization/color/default.py b/src/py123d/visualization/color/default.py index 317dda32..df0691f9 100644 --- a/src/py123d/visualization/color/default.py +++ b/src/py123d/visualization/color/default.py @@ -1,7 +1,7 @@ from typing import Dict -from py123d.datatypes.detections.detection import TrafficLightStatus -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.datatypes.detections.traffic_light_detections import TrafficLightStatus from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.visualization.color.color import ( BLACK, @@ -83,8 +83,8 @@ ), } -BOX_DETECTION_CONFIG: Dict[DetectionType, PlotConfig] = { - DetectionType.VEHICLE: PlotConfig( +BOX_DETECTION_CONFIG: Dict[BoxDetectionType, PlotConfig] = { + BoxDetectionType.VEHICLE: PlotConfig( fill_color=ELLIS_5[4], fill_color_alpha=1.0, line_color=BLACK, @@ -95,7 +95,7 @@ marker_size=1.0, zorder=3, ), - DetectionType.PEDESTRIAN: PlotConfig( + BoxDetectionType.PEDESTRIAN: PlotConfig( fill_color=NEW_TAB_10[6], fill_color_alpha=1.0, line_color=BLACK, @@ -106,7 +106,7 @@ marker_size=1.0, zorder=2, ), - DetectionType.BICYCLE: PlotConfig( + BoxDetectionType.BICYCLE: PlotConfig( fill_color=ELLIS_5[3], fill_color_alpha=1.0, line_color=BLACK, @@ -117,7 +117,7 @@ marker_size=1.0, zorder=2, ), - DetectionType.TRAFFIC_CONE: PlotConfig( + BoxDetectionType.TRAFFIC_CONE: PlotConfig( fill_color=NEW_TAB_10[5], fill_color_alpha=1.0, line_color=BLACK, @@ -127,7 +127,7 @@ marker_style=None, zorder=2, ), - DetectionType.BARRIER: PlotConfig( + BoxDetectionType.BARRIER: PlotConfig( fill_color=NEW_TAB_10[5], fill_color_alpha=1.0, line_color=BLACK, @@ -137,7 +137,7 @@ marker_style=None, zorder=2, ), - DetectionType.CZONE_SIGN: PlotConfig( + BoxDetectionType.CZONE_SIGN: PlotConfig( fill_color=NEW_TAB_10[5], fill_color_alpha=1.0, line_color=BLACK, @@ -147,7 +147,7 @@ marker_style=None, zorder=2, ), - DetectionType.GENERIC_OBJECT: PlotConfig( + BoxDetectionType.GENERIC_OBJECT: PlotConfig( fill_color=NEW_TAB_10[5], fill_color_alpha=1.0, line_color=BLACK, @@ -157,7 +157,7 @@ marker_style=None, zorder=2, ), - DetectionType.SIGN: PlotConfig( + BoxDetectionType.SIGN: PlotConfig( fill_color=NEW_TAB_10[8], fill_color_alpha=1.0, line_color=BLACK, @@ -167,7 +167,7 @@ marker_style=None, zorder=2, ), - DetectionType.EGO: PlotConfig( + BoxDetectionType.EGO: PlotConfig( fill_color=ELLIS_5[0], fill_color_alpha=1.0, line_color=BLACK, diff --git a/src/py123d/visualization/matplotlib/camera.py b/src/py123d/visualization/matplotlib/camera.py index 57b708be..9126655d 100644 --- a/src/py123d/visualization/matplotlib/camera.py +++ b/src/py123d/visualization/matplotlib/camera.py @@ -10,8 +10,8 @@ # from PIL import ImageColor from pyquaternion import Quaternion -from py123d.datatypes.detections.detection import BoxDetectionSE3, BoxDetectionWrapper -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.datatypes.detections.box_detections import BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeIntrinsics from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.geometry import BoundingBoxSE3Index, Corners3DIndex @@ -89,7 +89,7 @@ def add_box_detections_to_camera_ax( box_detection_array = np.zeros((len(box_detections.box_detections), len(BoundingBoxSE3Index)), dtype=np.float64) detection_types = np.array( - [detection.metadata.detection_type for detection in box_detections.box_detections], dtype=object + [detection.metadata.box_detection_type for detection in box_detections.box_detections], dtype=object ) for idx, box_detection in enumerate(box_detections.box_detections): assert isinstance( @@ -218,7 +218,7 @@ def _rotation_3d_in_axis(points: npt.NDArray[np.float32], angles: npt.NDArray[np def _plot_rect_3d_on_img( image: npt.NDArray[np.float32], box_corners: npt.NDArray[np.float32], - detection_types: List[DetectionType], + detection_types: List[BoxDetectionType], thickness: int = 3, ) -> npt.NDArray[np.uint8]: """ diff --git a/src/py123d/visualization/matplotlib/observation.py b/src/py123d/visualization/matplotlib/observation.py index 15828cbb..03c0d711 100644 --- a/src/py123d/visualization/matplotlib/observation.py +++ b/src/py123d/visualization/matplotlib/observation.py @@ -4,8 +4,9 @@ import numpy as np import shapely.geometry as geom -from py123d.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.datatypes.detections.box_detections import BoxDetectionWrapper +from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.maps.abstract_map import AbstractMap from py123d.datatypes.maps.abstract_map_objects import AbstractLane from py123d.datatypes.maps.map_datatypes import MapLayer @@ -84,7 +85,7 @@ def add_box_detections_to_ax(ax: plt.Axes, box_detections: BoxDetectionWrapper) # TODO: Optionally, continue on boxes outside of plot. # if box_detection.metadata.detection_type == DetectionType.GENERIC_OBJECT: # continue - plot_config = BOX_DETECTION_CONFIG[box_detection.metadata.detection_type] + plot_config = BOX_DETECTION_CONFIG[box_detection.metadata.box_detection_type] add_bounding_box_to_ax(ax, box_detection.bounding_box, plot_config) @@ -95,7 +96,7 @@ def add_box_future_detections_to_ax(ax: plt.Axes, scene: AbstractScene, iteratio agents_poses = { agent.metadata.track_token: [agent.center_se3] for agent in initial_agents - if agent.metadata.detection_type == DetectionType.VEHICLE + if agent.metadata.box_detection_type == BoxDetectionType.VEHICLE } frequency = 1 for iteration in range(iteration + frequency, scene.number_of_iterations, frequency): @@ -114,10 +115,10 @@ def add_box_future_detections_to_ax(ax: plt.Axes, scene: AbstractScene, iteratio ax.plot( poses[i : i + 2, 0], poses[i : i + 2, 1], - color=BOX_DETECTION_CONFIG[DetectionType.VEHICLE].fill_color.hex, + color=BOX_DETECTION_CONFIG[BoxDetectionType.VEHICLE].fill_color.hex, alpha=alphas[i + 1], - linewidth=BOX_DETECTION_CONFIG[DetectionType.VEHICLE].line_width * 5, - zorder=BOX_DETECTION_CONFIG[DetectionType.VEHICLE].zorder, + linewidth=BOX_DETECTION_CONFIG[BoxDetectionType.VEHICLE].line_width * 5, + zorder=BOX_DETECTION_CONFIG[BoxDetectionType.VEHICLE].zorder, ) diff --git a/src/py123d/visualization/viser/elements/detection_elements.py b/src/py123d/visualization/viser/elements/detection_elements.py index a8c5b98a..bdd76e00 100644 --- a/src/py123d/visualization/viser/elements/detection_elements.py +++ b/src/py123d/visualization/viser/elements/detection_elements.py @@ -3,7 +3,7 @@ import trimesh import viser -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.geometry.geometry_index import BoundingBoxSE3Index, Corners3DIndex, StateSE3Index @@ -63,7 +63,7 @@ def _get_bounding_box_meshes(scene: AbstractScene, iteration: int, initial_ego_s # Load boxes to visualize, including ego vehicle at the last position boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] - boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO] + boxes_type = [bd.metadata.box_detection_type for bd in box_detections.box_detections] + [BoxDetectionType.EGO] # create meshes for all boxes box_se3_array = np.array([box.array for box in boxes]) @@ -124,7 +124,7 @@ def _get_bounding_box_outlines( # Load boxes to visualize, including ego vehicle at the last position boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] - boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO] + boxes_type = [bd.metadata.box_detection_type for bd in box_detections.box_detections] + [BoxDetectionType.EGO] # Create lines for all boxes box_se3_array = np.array([box.array for box in boxes]) diff --git a/src/py123d/geometry/test/__init__.py b/tests/unit/__init__.py similarity index 100% rename from src/py123d/geometry/test/__init__.py rename to tests/unit/__init__.py diff --git a/src/py123d/geometry/transform/test/__init__.py b/tests/unit/common/__init__.py similarity index 100% rename from src/py123d/geometry/transform/test/__init__.py rename to tests/unit/common/__init__.py diff --git a/src/py123d/geometry/utils/test/__init__.py b/tests/unit/common/multithreading/__init__.py similarity index 100% rename from src/py123d/geometry/utils/test/__init__.py rename to tests/unit/common/multithreading/__init__.py diff --git a/src/py123d/geometry/utils/test/test_polyline_utils.py b/tests/unit/common/utils/__init__.py similarity index 100% rename from src/py123d/geometry/utils/test/test_polyline_utils.py rename to tests/unit/common/utils/__init__.py diff --git a/tests/unit/conversion/__init__.py b/tests/unit/conversion/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/datatypes/.gitkeep b/tests/unit/datatypes/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/datatypes/__init__.py b/tests/unit/datatypes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/datatypes/detections/__init__.py b/tests/unit/datatypes/detections/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/datatypes/maps/__init__.py b/tests/unit/datatypes/maps/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/datatypes/sensors/__init__.py b/tests/unit/datatypes/sensors/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/datatypes/time/__init__.py b/tests/unit/datatypes/time/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/datatypes/vehicle_state/__init__.py b/tests/unit/datatypes/vehicle_state/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/geometry/__init__.py b/tests/unit/geometry/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/geometry/test/test_bounding_box.py b/tests/unit/geometry/test_bounding_box.py similarity index 100% rename from src/py123d/geometry/test/test_bounding_box.py rename to tests/unit/geometry/test_bounding_box.py diff --git a/src/py123d/geometry/test/test_occupancy_map.py b/tests/unit/geometry/test_occupancy_map.py similarity index 100% rename from src/py123d/geometry/test/test_occupancy_map.py rename to tests/unit/geometry/test_occupancy_map.py diff --git a/src/py123d/geometry/test/test_point.py b/tests/unit/geometry/test_point.py similarity index 100% rename from src/py123d/geometry/test/test_point.py rename to tests/unit/geometry/test_point.py diff --git a/src/py123d/geometry/test/test_polyline.py b/tests/unit/geometry/test_polyline.py similarity index 100% rename from src/py123d/geometry/test/test_polyline.py rename to tests/unit/geometry/test_polyline.py diff --git a/src/py123d/geometry/test/test_rotation.py b/tests/unit/geometry/test_rotation.py similarity index 100% rename from src/py123d/geometry/test/test_rotation.py rename to tests/unit/geometry/test_rotation.py diff --git a/src/py123d/geometry/test/test_vector.py b/tests/unit/geometry/test_vector.py similarity index 100% rename from src/py123d/geometry/test/test_vector.py rename to tests/unit/geometry/test_vector.py diff --git a/tests/unit/geometry/transform/__init__.py b/tests/unit/geometry/transform/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/geometry/transform/test/test_transform_consistency.py b/tests/unit/geometry/transform/test_transform_consistency.py similarity index 100% rename from src/py123d/geometry/transform/test/test_transform_consistency.py rename to tests/unit/geometry/transform/test_transform_consistency.py diff --git a/src/py123d/geometry/transform/test/test_transform_euler_se3.py b/tests/unit/geometry/transform/test_transform_euler_se3.py similarity index 100% rename from src/py123d/geometry/transform/test/test_transform_euler_se3.py rename to tests/unit/geometry/transform/test_transform_euler_se3.py diff --git a/src/py123d/geometry/transform/test/test_transform_se2.py b/tests/unit/geometry/transform/test_transform_se2.py similarity index 100% rename from src/py123d/geometry/transform/test/test_transform_se2.py rename to tests/unit/geometry/transform/test_transform_se2.py diff --git a/src/py123d/geometry/transform/test/test_transform_se3.py b/tests/unit/geometry/transform/test_transform_se3.py similarity index 100% rename from src/py123d/geometry/transform/test/test_transform_se3.py rename to tests/unit/geometry/transform/test_transform_se3.py diff --git a/tests/unit/geometry/utils/__init__.py b/tests/unit/geometry/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/geometry/utils/test/test_bounding_box_utils.py b/tests/unit/geometry/utils/test_bounding_box_utils.py similarity index 100% rename from src/py123d/geometry/utils/test/test_bounding_box_utils.py rename to tests/unit/geometry/utils/test_bounding_box_utils.py diff --git a/tests/unit/geometry/utils/test_polyline_utils.py b/tests/unit/geometry/utils/test_polyline_utils.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/geometry/utils/test/test_rotation_utils.py b/tests/unit/geometry/utils/test_rotation_utils.py similarity index 100% rename from src/py123d/geometry/utils/test/test_rotation_utils.py rename to tests/unit/geometry/utils/test_rotation_utils.py diff --git a/tests/unit/visualization/.gitkeep b/tests/unit/visualization/.gitkeep new file mode 100644 index 00000000..e69de29b From 3fc820ed8f1654dc610dd1081ec4d31d092bd3df Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 23 Oct 2025 22:00:01 +0200 Subject: [PATCH 118/145] Add optional draco lidar compression. --- pyproject.toml | 1 + .../datasets/av2/utils/av2_map_conversion.py | 3 +- .../conversion/log_writer/arrow_log_writer.py | 10 +++- .../utils/draco_lidar_compression.py | 52 +++++++++++++++++++ ...ompression.py => laz_lidar_compression.py} | 0 .../datatypes/scene/abstract_scene_builder.py | 2 - .../scene/arrow/utils/arrow_getters.py | 19 +++++-- test_viser.py | 4 +- 8 files changed, 81 insertions(+), 10 deletions(-) create mode 100644 src/py123d/conversion/log_writer/utils/draco_lidar_compression.py rename src/py123d/conversion/log_writer/utils/{lidar_compression.py => laz_lidar_compression.py} (100%) diff --git a/pyproject.toml b/pyproject.toml index 09a4c7f9..2b7acd84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,6 +48,7 @@ dependencies = [ "trimesh", "viser", "laspy[lazrs]", + "DracoPy", ] [project.scripts] diff --git a/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py b/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py index 437dde09..41851c58 100644 --- a/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py +++ b/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py @@ -189,7 +189,8 @@ def _write_av2_road_edge(drivable_areas: Dict[int, Polyline3D], map_writer: Abst drivable_polygons = [geom.Polygon(drivable_area.array[:, :2]) for drivable_area in drivable_areas.values()] road_edges_2d = get_road_edge_linear_rings(drivable_polygons) non_conflicting_road_edges = lift_road_edges_to_3d(road_edges_2d, list(drivable_areas.values())) - road_edges = split_line_geometry_by_max_length(non_conflicting_road_edges, MAX_ROAD_EDGE_LENGTH) + non_conflicting_road_edges_linestrings = [polyline.linestring for polyline in non_conflicting_road_edges] + road_edges = split_line_geometry_by_max_length(non_conflicting_road_edges_linestrings, MAX_ROAD_EDGE_LENGTH) for idx, road_edge in enumerate(road_edges): diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 6177d821..53d6b03f 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -7,7 +7,8 @@ from py123d.common.utils.uuid_utils import create_deterministic_uuid from py123d.conversion.abstract_dataset_converter import AbstractLogWriter, DatasetConverterConfig -from py123d.conversion.log_writer.utils.lidar_compression import compress_lidar_with_laz +from py123d.conversion.log_writer.utils.draco_lidar_compression import compress_lidar_with_draco +from py123d.conversion.log_writer.utils.laz_lidar_compression import compress_lidar_with_laz from py123d.datatypes.detections.box_detections import BoxDetectionWrapper from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema @@ -26,11 +27,13 @@ def __init__( logs_root: Union[str, Path], ipc_compression: Optional[Literal["lz4", "zstd"]] = None, ipc_compression_level: Optional[int] = None, + lidar_compression: Optional[Literal["draco", "laz"]] = "laz", ) -> None: self._logs_root = Path(logs_root) self._ipc_compression = ipc_compression self._ipc_compression_level = ipc_compression_level + self._lidar_compression = lidar_compression # Loaded during .reset() and cleared during .close() self._dataset_converter_config: Optional[DatasetConverterConfig] = None @@ -201,7 +204,10 @@ def write( # Possible compression step if self._dataset_converter_config.lidar_store_option == "binary": lidar_metadata = self._log_metadata.lidar_metadata[lidar_type] - lidar_data = compress_lidar_with_laz(lidar_data, lidar_metadata) + if self._lidar_compression == "draco": + lidar_data = compress_lidar_with_draco(lidar_data, lidar_metadata) + elif self._lidar_compression == "laz": + lidar_data = compress_lidar_with_laz(lidar_data, lidar_metadata) record_batch_data[f"{lidar_name}_data"] = [lidar_data] diff --git a/src/py123d/conversion/log_writer/utils/draco_lidar_compression.py b/src/py123d/conversion/log_writer/utils/draco_lidar_compression.py new file mode 100644 index 00000000..2a934766 --- /dev/null +++ b/src/py123d/conversion/log_writer/utils/draco_lidar_compression.py @@ -0,0 +1,52 @@ +from typing import Final + +import DracoPy +import numpy as np +import numpy.typing as npt + +from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata + +# TODO: add to config +DRACO_QUANTIZATION_BITS: Final[int] = 14 +DRACO_COMPRESSION_LEVEL: Final[int] = 10 # Range: 0 (fastest) to 10 (slowest, best compression) +DRACO_QUANTIZATION_RANGE: Final[int] = -1 # Use default range +DRACO_PRESERVE_ORDER: Final[bool] = False + + +def compress_lidar_with_draco(point_cloud: npt.NDArray[np.float32], lidar_metadata: LiDARMetadata) -> bytes: + """Compress LiDAR point cloud data using Draco format. + + :param point_cloud: The LiDAR point cloud data to compress, as numpy array. + :param lidar_metadata: Metadata associated with the LiDAR data. + :return: The compressed Draco binary data. + """ + lidar_index = lidar_metadata.lidar_index + + binary = DracoPy.encode( + point_cloud[:, lidar_index.XYZ], + quantization_bits=DRACO_QUANTIZATION_BITS, + compression_level=DRACO_COMPRESSION_LEVEL, + quantization_range=DRACO_QUANTIZATION_RANGE, + quantization_origin=None, + create_metadata=False, + preserve_order=DRACO_PRESERVE_ORDER, + ) + + return binary + + +def decompress_lidar_from_draco(draco_binary: bytes, lidar_metadata: LiDARMetadata) -> LiDAR: + """Decompress LiDAR point cloud data from Draco format. + + :param draco_binary: The compressed Draco binary data. + :param lidar_metadata: Metadata associated with the LiDAR data. + :raises ValueError: If the LiDAR features are not found in the decompressed data. + :return: The decompressed LiDAR point cloud data as a LiDAR object. + """ + + # Decompress using Draco + mesh = DracoPy.decode(draco_binary) + points = mesh.points + points = np.array(points, dtype=np.float32) + + return LiDAR(point_cloud=points, metadata=lidar_metadata) diff --git a/src/py123d/conversion/log_writer/utils/lidar_compression.py b/src/py123d/conversion/log_writer/utils/laz_lidar_compression.py similarity index 100% rename from src/py123d/conversion/log_writer/utils/lidar_compression.py rename to src/py123d/conversion/log_writer/utils/laz_lidar_compression.py diff --git a/src/py123d/datatypes/scene/abstract_scene_builder.py b/src/py123d/datatypes/scene/abstract_scene_builder.py index 87327c05..17652549 100644 --- a/src/py123d/datatypes/scene/abstract_scene_builder.py +++ b/src/py123d/datatypes/scene/abstract_scene_builder.py @@ -5,8 +5,6 @@ from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.scene.scene_filter import SceneFilter -# TODO: Expand lazy implementation for scene builder. - class SceneBuilder(abc.ABC): """ diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index edf9f86a..440d12fa 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -7,7 +7,6 @@ import pyarrow as pa from omegaconf import DictConfig -from py123d.conversion.datasets.pandaset.pandaset_sensor_loading import load_pandaset_lidars_pc_from_path from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.detections.box_detections import ( BoxDetection, @@ -23,6 +22,7 @@ from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType +from py123d.datatypes.sensors.lidar.lidar_index import DefaultLidarIndex from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import VehicleParameters @@ -182,6 +182,9 @@ def get_lidar_from_arrow_table( raise NotImplementedError elif log_metadata.dataset == "pandaset": + from py123d.conversion.datasets.pandaset.pandaset_sensor_loading import ( + load_pandaset_lidars_pc_from_path, + ) ego_state_se3 = get_ego_vehicle_state_from_arrow_table( arrow_table, index, log_metadata.vehicle_parameters @@ -196,9 +199,19 @@ def get_lidar_from_arrow_table( raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") elif isinstance(lidar_data, bytes): - from py123d.conversion.log_writer.utils.lidar_compression import decompress_lidar_from_laz - lidar = decompress_lidar_from_laz(lidar_data, lidar_metadata) + if lidar_data.startswith(b"DRACO"): + from py123d.conversion.log_writer.utils.draco_lidar_compression import decompress_lidar_from_draco + + # NOTE: DRACO only allows XYZ compression, so we need to override the lidar index here. + lidar_metadata.lidar_index = DefaultLidarIndex + + lidar = decompress_lidar_from_draco(lidar_data, lidar_metadata) + elif lidar_data.startswith(b"LASF"): + + from py123d.conversion.log_writer.utils.laz_lidar_compression import decompress_lidar_from_laz + + lidar = decompress_lidar_from_laz(lidar_data, lidar_metadata) elif lidar_data is None: lidar = None else: diff --git a/test_viser.py b/test_viser.py index 7446fb62..3ed0a1f6 100644 --- a/test_viser.py +++ b/test_viser.py @@ -8,9 +8,9 @@ # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] - splits = ["carla_test"] + # splits = ["carla_test"] # splits = ["wopd_val"] - # splits = ["av2-sensor_train"] + splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] log_names = None scene_uuids = None From 56840654d17675020e50831a4d53c0be5dfeef9f Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 23 Oct 2025 23:35:19 +0200 Subject: [PATCH 119/145] Change `docs` theme (#57) --- docs/conf.py | 34 +++++------ docs/datasets/av2.rst | 1 - docs/datasets/index.rst | 2 +- docs/{ => development}/contributing.md | 0 docs/development/index.rst | 7 +++ docs/index.rst | 24 ++++++-- docs/requirements.in | 4 -- docs/requirements.txt | 81 -------------------------- docs/schema.md | 15 ----- pyproject.toml | 1 + 10 files changed, 46 insertions(+), 123 deletions(-) rename docs/{ => development}/contributing.md (100%) create mode 100644 docs/development/index.rst delete mode 100644 docs/requirements.in delete mode 100644 docs/requirements.txt delete mode 100644 docs/schema.md diff --git a/docs/conf.py b/docs/conf.py index 6916f3c6..0479a8ec 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -6,6 +6,7 @@ # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + project = "py123d" copyright = "2025, 123D Contributors" author = "123D Contributors" @@ -18,6 +19,7 @@ "sphinx.ext.duration", "sphinx.ext.doctest", "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.napoleon", @@ -47,26 +49,15 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "sphinx_rtd_theme" +html_theme = "furo" +html_title = "" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] -html_theme_options = { - "analytics_anonymize_ip": False, - "logo_only": True, - "prev_next_buttons_location": "bottom", - "style_external_links": False, - "vcs_pageview_mode": "", - # Toc options - "collapse_navigation": True, - "sticky_navigation": True, - "navigation_depth": 3, - "includehidden": True, - "titles_only": False, -} +html_theme_options = {} autodoc_typehints = "both" autodoc_class_signature = "separated" @@ -79,8 +70,7 @@ "imported-members": True, } -# Logo configuration -html_logo = "_static/logo_white.png" # or whatever your logo file is named + # Custom CSS for color theming html_css_files = [ "custom.css", @@ -89,6 +79,16 @@ # Additional theme options for color customization html_theme_options.update( { - "style_nav_header_background": "#565656", # Navigation header background + "light_logo": "logo_black.png", + "dark_logo": "logo_white.png", + "sidebar_hide_name": True, } ) + +# This CSS should go in /home/daniel/py123d_workspace/py123d/docs/_static/custom.css +# Your conf.py already references it in html_css_files = ["custom.css"] + +# If you want to add custom CSS via configuration, you can use: +html_css_files = [ + "custom.css", +] diff --git a/docs/datasets/av2.rst b/docs/datasets/av2.rst index 17311422..3c09f329 100644 --- a/docs/datasets/av2.rst +++ b/docs/datasets/av2.rst @@ -5,7 +5,6 @@ Argoverse 2 .. image:: https://www.argoverse.org/assets/images/reference_images/av2_vehicle.jpg :alt: Dataset sample image - :width: 290px | **Paper:** `Name of Paper `_ | **Download:** `Documentation `_ diff --git a/docs/datasets/index.rst b/docs/datasets/index.rst index ead97169..851fbf32 100644 --- a/docs/datasets/index.rst +++ b/docs/datasets/index.rst @@ -6,7 +6,7 @@ Brief overview of the datasets section... This section provides comprehensive documentation for various autonomous driving and computer vision datasets. Each dataset entry includes installation instructions, available data types, known issues, and proper citation formats. .. toctree:: - :maxdepth: 0 + :maxdepth: 1 av2 nuplan diff --git a/docs/contributing.md b/docs/development/contributing.md similarity index 100% rename from docs/contributing.md rename to docs/development/contributing.md diff --git a/docs/development/index.rst b/docs/development/index.rst new file mode 100644 index 00000000..579ccee4 --- /dev/null +++ b/docs/development/index.rst @@ -0,0 +1,7 @@ +Development +=========== + +.. toctree:: + :maxdepth: 0 + + contributing diff --git a/docs/index.rst b/docs/index.rst index c923847f..a9e1d197 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -13,11 +13,27 @@ documentation for details. .. toctree:: :maxdepth: 1 - :caption: Contents: + :caption: Overview: installation - geometry datasets/index - schema + + +.. toctree:: + :maxdepth: 2 + :caption: API Reference: + + geometry + + +.. toctree:: + :maxdepth: 1 + :caption: Visualization: + visualization - contributing + +.. toctree:: + :maxdepth: 1 + :caption: Development: + + development/index diff --git a/docs/requirements.in b/docs/requirements.in deleted file mode 100644 index d39cc178..00000000 --- a/docs/requirements.in +++ /dev/null @@ -1,4 +0,0 @@ -sphinx==8.0.2 -sphinx-copybutton==0.5.2 -sphinx-rtd-theme==3.0.2 -myst-parser==4.0.1 diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index ceb2e009..00000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,81 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile docs/requirements.in -# ---extra-index-url https://pypi.ngc.nvidia.com ---trusted-host pypi.ngc.nvidia.com - -alabaster==1.0.0 - # via sphinx -babel==2.17.0 - # via sphinx -certifi==2025.10.5 - # via requests -charset-normalizer==3.4.4 - # via requests -docutils==0.21.2 - # via - # myst-parser - # sphinx - # sphinx-rtd-theme -idna==3.11 - # via requests -imagesize==1.4.1 - # via sphinx -jinja2==3.1.6 - # via - # myst-parser - # sphinx -markdown-it-py==3.0.0 - # via - # mdit-py-plugins - # myst-parser -markupsafe==3.0.3 - # via jinja2 -mdit-py-plugins==0.5.0 - # via myst-parser -mdurl==0.1.2 - # via markdown-it-py -myst-parser==4.0.1 - # via -r docs/requirements.in -packaging==25.0 - # via sphinx -pygments==2.19.2 - # via sphinx -pyyaml==6.0.3 - # via myst-parser -requests==2.32.5 - # via sphinx -snowballstemmer==3.0.1 - # via sphinx -sphinx==8.0.2 - # via - # -r docs/requirements.in - # myst-parser - # sphinx-copybutton - # sphinx-rtd-theme - # sphinxcontrib-jquery -sphinx-copybutton==0.5.2 - # via -r docs/requirements.in -sphinx-rtd-theme==3.0.2 - # via -r docs/requirements.in -sphinxcontrib-applehelp==2.0.0 - # via sphinx -sphinxcontrib-devhelp==2.0.0 - # via sphinx -sphinxcontrib-htmlhelp==2.1.0 - # via sphinx -sphinxcontrib-jquery==4.1 - # via sphinx-rtd-theme -sphinxcontrib-jsmath==1.0.1 - # via sphinx -sphinxcontrib-qthelp==2.0.0 - # via sphinx -sphinxcontrib-serializinghtml==2.0.0 - # via sphinx -tomli==2.3.0 - # via sphinx -urllib3==2.5.0 - # via requests diff --git a/docs/schema.md b/docs/schema.md deleted file mode 100644 index b3d44a6e..00000000 --- a/docs/schema.md +++ /dev/null @@ -1,15 +0,0 @@ - -# Schema - -TODO - diff --git a/pyproject.toml b/pyproject.toml index 2b7acd84..c3b6ab41 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,6 +69,7 @@ docs = [ "sphinx-autobuild", "myst-parser", "sphinx-copybutton", + "furo", ] nuplan = [ "nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2", From 88bcb863cd8e3cd9cfd6b37b021c5339d202c924 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 24 Oct 2025 00:06:08 +0200 Subject: [PATCH 120/145] Add 123D logo to viser (#57) --- .../visualization/viser/viser_viewer.py | 54 +++++++++---------- 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index dd85dfad..b27d6853 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -3,6 +3,7 @@ from typing import Dict, List, Optional import viser +from viser.theme import TitlebarButton, TitlebarConfig, TitlebarImage from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.datatypes.scene.abstract_scene import AbstractScene @@ -18,9 +19,6 @@ ) from py123d.visualization.viser.viser_config import ViserConfig -# from viser.theme import TitlebarButton, TitlebarConfig, TitlebarImage - - logger = logging.getLogger(__name__) @@ -53,32 +51,30 @@ def _build_viser_server(viser_config: ViserConfig) -> viser.ViserServer: verbose=viser_config.server_verbose, ) - # TODO: Add logos, once we are public - # buttons = ( - # TitlebarButton( - # text="Getting Started", - # icon=None, - # href="https://nerf.studio", - # ), - # TitlebarButton( - # text="Github", - # icon="GitHub", - # href="https://github.com/nerfstudio-project/nerfstudio", - # ), - # TitlebarButton( - # text="Documentation", - # icon="Description", - # href="https://docs.nerf.studio", - # ), - # ) - # image = TitlebarImage( - # image_url_light="https://docs.nerf.studio/_static/imgs/logo.png", - # image_url_dark="https://docs.nerf.studio/_static/imgs/logo-dark.png", - # image_alt="NerfStudio Logo", - # href="https://docs.nerf.studio/", - # ) - # titlebar_theme = TitlebarConfig(buttons=buttons, image=image) - titlebar_theme = None + buttons = ( + TitlebarButton( + text="Getting Started", + icon=None, + href="https://danieldauner.github.io/123d", + ), + TitlebarButton( + text="Github", + icon="GitHub", + href="https://github.com/DanielDauner/123d", + ), + TitlebarButton( + text="Documentation", + icon="Description", + href="https://danieldauner.github.io/123d", + ), + ) + image = TitlebarImage( + image_url_light="https://danieldauner.github.io/123d/_static/logo_black.png", + image_url_dark="https://danieldauner.github.io/123d/_static/logo_white.png", + image_alt="123D", + href="https://danieldauner.github.io/123d/", + ) + titlebar_theme = TitlebarConfig(buttons=buttons, image=image) server.gui.configure_theme( titlebar_content=titlebar_theme, From 4ca260c3671c15d1f5e0e257d8ed3598bddf758d Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 28 Oct 2025 23:13:56 +0100 Subject: [PATCH 121/145] Refactoring and mostly unfinished changes . --- .../logo/svg/123D_logo_transparent_black.svg | 18 +- .../logo/svg/123D_logo_transparent_white.svg | 18 +- docs/datasets/wopd.rst | 11 ++ docs/installation.md | 11 +- pyproject.toml | 6 +- scripts/conversion/wopd_conversion.sh | 6 + .../conversion/dataset_converter_config.py | 4 +- .../datasets/av2/av2_sensor_converter.py | 33 ++-- ...av2_sensor_loading.py => av2_sensor_io.py} | 2 +- .../datasets/nuplan/nuplan_converter.py | 38 ++-- .../datasets/nuplan/nuplan_load_sensor.py | 15 -- .../datasets/nuplan/nuplan_sensor_io.py | 29 +++ .../datasets/nuplan/utils/nuplan_constants.py | 9 + .../datasets/pandaset/pandaset_converter.py | 30 +-- ...ensor_loading.py => pandaset_sensor_io.py} | 31 +-- .../datasets/pandaset/pandaset_utlis.py | 4 - .../datasets/wopd/{ => utils}/wopd_utils.py | 0 .../datasets/wopd/waymo_sensor_io.py | 66 +++++++ .../datasets/wopd/wopd_converter.py | 56 +++--- .../log_writer/abstract_log_writer.py | 43 ++++- .../conversion/log_writer/arrow_log_writer.py | 100 +++++++--- src/py123d/conversion/registry/__init__.py | 0 .../registry/box_detection_type_registry.py | 6 + .../lidar_index_registry.py | 1 - src/py123d/conversion/sensor_io/__init__.py | 0 .../conversion/sensor_io/camera/__init__.py | 0 .../sensor_io/camera/jpeg_camera_io.py | 39 ++++ .../sensor_io/camera/mp4_camera_io.py | 180 ++++++++++++++++++ .../conversion/sensor_io/lidar/__init__.py | 0 .../lidar/draco_lidar_io.py} | 6 +- .../sensor_io/lidar/file_lidar_io.py | 62 ++++++ .../lidar/laz_lidar_io.py} | 4 +- .../detections/box_detection_types.py | 31 --- .../datatypes/scene/arrow/arrow_scene.py | 2 +- .../scene/arrow/utils/arrow_getters.py | 79 +++----- src/py123d/datatypes/sensors/lidar/lidar.py | 2 +- .../datasets/av2_sensor_dataset.yaml | 2 +- .../conversion/datasets/nuplan_dataset.yaml | 8 +- .../datasets/nuplan_mini_dataset.yaml | 8 +- .../conversion/datasets/pandaset_dataset.yaml | 2 +- .../conversion/datasets/wopd_dataset.yaml | 6 +- .../config/conversion/default_conversion.yaml | 2 +- src/py123d/script/run_conversion.py | 2 + .../viser/elements/render_elements.py | 25 +++ .../viser/elements/sensor_elements.py | 8 + .../visualization/viser/viser_config.py | 4 +- .../visualization/viser/viser_viewer.py | 33 +++- test_viser.py | 12 +- 48 files changed, 761 insertions(+), 293 deletions(-) create mode 100644 scripts/conversion/wopd_conversion.sh rename src/py123d/conversion/datasets/av2/{utils/av2_sensor_loading.py => av2_sensor_io.py} (89%) delete mode 100644 src/py123d/conversion/datasets/nuplan/nuplan_load_sensor.py create mode 100644 src/py123d/conversion/datasets/nuplan/nuplan_sensor_io.py rename src/py123d/conversion/datasets/pandaset/{pandaset_sensor_loading.py => pandaset_sensor_io.py} (67%) rename src/py123d/conversion/datasets/wopd/{ => utils}/wopd_utils.py (100%) create mode 100644 src/py123d/conversion/datasets/wopd/waymo_sensor_io.py create mode 100644 src/py123d/conversion/registry/__init__.py create mode 100644 src/py123d/conversion/registry/box_detection_type_registry.py rename src/py123d/conversion/{utils/sensor_utils => registry}/lidar_index_registry.py (99%) create mode 100644 src/py123d/conversion/sensor_io/__init__.py create mode 100644 src/py123d/conversion/sensor_io/camera/__init__.py create mode 100644 src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py create mode 100644 src/py123d/conversion/sensor_io/camera/mp4_camera_io.py create mode 100644 src/py123d/conversion/sensor_io/lidar/__init__.py rename src/py123d/conversion/{log_writer/utils/draco_lidar_compression.py => sensor_io/lidar/draco_lidar_io.py} (86%) create mode 100644 src/py123d/conversion/sensor_io/lidar/file_lidar_io.py rename src/py123d/conversion/{log_writer/utils/laz_lidar_compression.py => sensor_io/lidar/laz_lidar_io.py} (91%) create mode 100644 src/py123d/visualization/viser/elements/render_elements.py diff --git a/assets/logo/svg/123D_logo_transparent_black.svg b/assets/logo/svg/123D_logo_transparent_black.svg index 2c07cc5c..b9214f93 100644 --- a/assets/logo/svg/123D_logo_transparent_black.svg +++ b/assets/logo/svg/123D_logo_transparent_black.svg @@ -34,7 +34,7 @@ inkscape:window-x="2560" inkscape:window-y="27" inkscape:window-maximized="1" - inkscape:current-layer="layer4" + inkscape:current-layer="layer3" showguides="false" />123D + inkscape:label="Text"> diff --git a/assets/logo/svg/123D_logo_transparent_white.svg b/assets/logo/svg/123D_logo_transparent_white.svg index 7ebd250a..f98386fa 100644 --- a/assets/logo/svg/123D_logo_transparent_white.svg +++ b/assets/logo/svg/123D_logo_transparent_white.svg @@ -35,7 +35,7 @@ inkscape:window-x="0" inkscape:window-y="0" inkscape:window-maximized="1" - inkscape:current-layer="layer4" + inkscape:current-layer="svg1" showguides="false" />123D diff --git a/pyproject.toml b/pyproject.toml index c3b6ab41..6df6dff0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -90,9 +90,9 @@ nuplan = [ "retry", ] waymo = [ - "tensorflow==2.16.1", - "waymo-open-dataset-tf-2-11-0", - "intervaltree", + "protobuf==6.30.2", + "tensorflow==2.13.0", + "waymo-open-dataset-tf-2-12-0==1.6.6", ] [tool.setuptools.packages.find] diff --git a/scripts/conversion/wopd_conversion.sh b/scripts/conversion/wopd_conversion.sh new file mode 100644 index 00000000..e4f4b3d4 --- /dev/null +++ b/scripts/conversion/wopd_conversion.sh @@ -0,0 +1,6 @@ +py123d-conversion datasets=[wopd_dataset] + + +# pip install protobuf==6.30.2 +# pip install tensorflow==2.13.0 +# pip install waymo-open-dataset-tf-2-12-0==1.6.6 diff --git a/src/py123d/conversion/dataset_converter_config.py b/src/py123d/conversion/dataset_converter_config.py index b1c3045f..6539e3e4 100644 --- a/src/py123d/conversion/dataset_converter_config.py +++ b/src/py123d/conversion/dataset_converter_config.py @@ -18,6 +18,7 @@ class DatasetConverterConfig: # Box Detections include_box_detections: bool = False + include_box_lidar_points: bool = False # Traffic Lights include_traffic_lights: bool = False @@ -28,7 +29,7 @@ class DatasetConverterConfig: # LiDARs include_lidars: bool = False - lidar_store_option: Literal["path", "binary"] = "path" + lidar_store_option: Literal["path", "path_merged", "binary"] = "path" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. @@ -44,5 +45,6 @@ def __post_init__(self): assert self.lidar_store_option in [ "path", + "path_merged", "binary", ], f"Invalid LiDAR store option, got {self.lidar_store_option}." diff --git a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py index bf48f59d..172954a9 100644 --- a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py @@ -2,7 +2,6 @@ from typing import Dict, List, Optional, Tuple, Union import numpy as np -import numpy.typing as npt import pandas as pd from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter @@ -20,10 +19,9 @@ get_slice_with_timestamp_ns, ) from py123d.conversion.datasets.av2.utils.av2_map_conversion import convert_av2_map -from py123d.conversion.datasets.av2.utils.av2_sensor_loading import load_av2_sensor_lidar_pc_from_path -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.conversion.utils.sensor_utils.lidar_index_registry import AVSensorLidarIndex +from py123d.conversion.registry.lidar_index_registry import AVSensorLidarIndex from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata @@ -373,8 +371,8 @@ def _extract_av2_sensor_camera( def _extract_av2_sensor_lidars( source_log_path: Path, lidar_timestamp_ns: int, dataset_converter_config: DatasetConverterConfig -) -> Optional[Dict[LiDARType, Union[str, npt.NDArray[np.float32]]]]: - lidar_dict: Dict[LiDARType, Union[str, npt.NDArray[np.float32]]] = {} +) -> List[LiDARData]: + lidars: List[LiDARData] = [] if dataset_converter_config.include_lidars: av2_sensor_data_root = source_log_path.parent.parent split_type = source_log_path.parent.name @@ -382,21 +380,16 @@ def _extract_av2_sensor_lidars( relative_feather_path = f"{split_type}/{log_name}/sensors/lidar/{lidar_timestamp_ns}.feather" lidar_feather_path = av2_sensor_data_root / relative_feather_path - # if lidar_feather_path.exists(): - assert lidar_feather_path.exists(), f"LiDAR feather file not found: {lidar_feather_path}" - if dataset_converter_config.lidar_store_option == "path": - # NOTE: It is somewhat inefficient to store the same path for both lidars, - # but this keeps the interface simple for now. - lidar_dict = { - LiDARType.LIDAR_TOP: str(relative_feather_path), - LiDARType.LIDAR_DOWN: str(relative_feather_path), - } - elif dataset_converter_config.lidar_store_option == "binary": - # NOTE: For binary storage, we pass the point cloud to the log writer. - # Compression is handled internally in the log writer. - lidar_dict: Dict[LiDARType, np.ndarray] = load_av2_sensor_lidar_pc_from_path(lidar_feather_path) - return lidar_dict + + lidar_data = LiDARData( + lidar_type=LiDARType.LIDAR_MERGED, + dataset_root=av2_sensor_data_root, + relative_path=relative_feather_path, + ) + lidars.append(lidar_data) + + return lidars def _row_dict_to_state_se3(row_dict: Dict[str, float]) -> StateSE3: diff --git a/src/py123d/conversion/datasets/av2/utils/av2_sensor_loading.py b/src/py123d/conversion/datasets/av2/av2_sensor_io.py similarity index 89% rename from src/py123d/conversion/datasets/av2/utils/av2_sensor_loading.py rename to src/py123d/conversion/datasets/av2/av2_sensor_io.py index 1e89b551..a17e4892 100644 --- a/src/py123d/conversion/datasets/av2/utils/av2_sensor_loading.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_io.py @@ -7,7 +7,7 @@ from py123d.datatypes.sensors.lidar.lidar import LiDARType -def load_av2_sensor_lidar_pc_from_path(feather_path: Union[Path, str]) -> Dict[LiDARType, np.ndarray]: +def load_av2_sensor_lidar_pcs_from_file(feather_path: Union[Path, str]) -> Dict[LiDARType, np.ndarray]: # NOTE: The AV2 dataset stores both top and down LiDAR data in the same feather file. # We need to separate them based on the laser_number field. # See here: https://github.com/argoverse/av2-api/issues/77#issuecomment-1178040867 diff --git a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py index 120d66b8..c837e559 100644 --- a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py +++ b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py @@ -13,6 +13,7 @@ from py123d.conversion.datasets.nuplan.utils.nuplan_constants import ( NUPLAN_DATA_SPLITS, NUPLAN_DEFAULT_DT, + NUPLAN_LIDAR_DICT, NUPLAN_MAP_LOCATIONS, NUPLAN_ROLLING_SHUTTER_S, NUPLAN_TRAFFIC_STATUS_DICT, @@ -21,9 +22,9 @@ get_box_detections_for_lidarpc_token_from_db, get_nearest_ego_pose_for_timestamp_from_db, ) -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.conversion.utils.sensor_utils.lidar_index_registry import NuPlanLidarIndex +from py123d.conversion.registry.lidar_index_registry import NuPlanLidarIndex from py123d.datatypes.detections.box_detections import BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetection, TrafficLightDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata @@ -273,11 +274,12 @@ def _get_nuplan_lidar_metadata( # NOTE: We first need to check if the LiDAR folder exists, as not all logs have LiDAR data if log_lidar_folder.exists() and log_lidar_folder.is_dir() and dataset_converter_config.include_lidars: - metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( - lidar_type=LiDARType.LIDAR_MERGED, - lidar_index=NuPlanLidarIndex, - extrinsic=None, # NOTE: LiDAR extrinsic are unknown - ) + for lidar_type in NUPLAN_LIDAR_DICT.values(): + metadata[lidar_type] = LiDARMetadata( + lidar_type=lidar_type, + lidar_index=NuPlanLidarIndex, + extrinsic=None, # NOTE: LiDAR extrinsic are unknown + ) return metadata @@ -401,14 +403,22 @@ def _extract_nuplan_lidars( nuplan_lidar_pc: LidarPc, nuplan_sensor_root: Path, dataset_converter_config: DatasetConverterConfig, -) -> Dict[LiDARType, Optional[str]]: - - lidar: Optional[str] = None - lidar_full_path = nuplan_sensor_root / nuplan_lidar_pc.filename - if lidar_full_path.exists() and lidar_full_path.is_file(): - lidar = nuplan_lidar_pc.filename +) -> List[LiDARData]: + + lidars: List[LiDARData] = [] + if dataset_converter_config.include_lidars: + + lidar_full_path = nuplan_sensor_root / nuplan_lidar_pc.filename + if lidar_full_path.exists() and lidar_full_path.is_file(): + lidars.append( + LiDARData( + lidar_type=LiDARType.LIDAR_MERGED, + dataset_root=nuplan_sensor_root, + relative_path=nuplan_lidar_pc.filename, + ) + ) - return {LiDARType.LIDAR_MERGED: lidar} + return lidars def _extract_nuplan_scenario_tag(nuplan_log_db: NuPlanDB, lidar_pc_token: str) -> List[str]: diff --git a/src/py123d/conversion/datasets/nuplan/nuplan_load_sensor.py b/src/py123d/conversion/datasets/nuplan/nuplan_load_sensor.py deleted file mode 100644 index e964e03c..00000000 --- a/src/py123d/conversion/datasets/nuplan/nuplan_load_sensor.py +++ /dev/null @@ -1,15 +0,0 @@ -import io -from pathlib import Path - -from py123d.common.utils.dependencies import check_dependencies -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata - -check_dependencies(["nuplan"], "nuplan") -from nuplan.database.utils.pointclouds.lidar import LidarPointCloud - - -def load_nuplan_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: - assert filepath.exists(), f"LiDAR file not found: {filepath}" - with open(filepath, "rb") as fp: - buffer = io.BytesIO(fp.read()) - return LiDAR(metadata=lidar_metadata, point_cloud=LidarPointCloud.from_buffer(buffer, "pcd").points) diff --git a/src/py123d/conversion/datasets/nuplan/nuplan_sensor_io.py b/src/py123d/conversion/datasets/nuplan/nuplan_sensor_io.py new file mode 100644 index 00000000..fed2d508 --- /dev/null +++ b/src/py123d/conversion/datasets/nuplan/nuplan_sensor_io.py @@ -0,0 +1,29 @@ +import io +from pathlib import Path +from typing import Dict + +import numpy as np + +from py123d.common.utils.dependencies import check_dependencies +from py123d.conversion.datasets.nuplan.utils.nuplan_constants import NUPLAN_LIDAR_DICT +from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar.lidar_index import NuPlanLidarIndex + +check_dependencies(["nuplan"], "nuplan") +from nuplan.database.utils.pointclouds.lidar import LidarPointCloud + + +def load_nuplan_lidar_pcs_from_file(pcd_path: Path) -> Dict[LiDARType, np.ndarray]: + assert pcd_path.exists(), f"LiDAR file not found: {pcd_path}" + with open(pcd_path, "rb") as fp: + buffer = io.BytesIO(fp.read()) + + merged_lidar_pc = LidarPointCloud.from_buffer(buffer, "pcd").points + + lidar_pcs_dict: Dict[LiDARType, np.ndarray] = {} + for lidar_id, lidar_type in NUPLAN_LIDAR_DICT.items(): + mask = merged_lidar_pc[-1, :] == lidar_id + lidar_pc = merged_lidar_pc[: len(NuPlanLidarIndex), mask].T.astype(np.float32) + lidar_pcs_dict[lidar_type] = lidar_pc + + return lidar_pcs_dict diff --git a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py index bf385f15..4b074d53 100644 --- a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py +++ b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py @@ -4,6 +4,7 @@ from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.detections.traffic_light_detections import TrafficLightStatus from py123d.datatypes.maps.map_datatypes import RoadLineType +from py123d.datatypes.sensors.lidar.lidar import LiDARType from py123d.datatypes.time.time_point import TimePoint @@ -37,6 +38,14 @@ class NuPlanBoxDetectionType(IntEnum): "generic_object": BoxDetectionType.GENERIC_OBJECT, } +# https://github.com/motional/nuplan-devkit/blob/e9241677997dd86bfc0bcd44817ab04fe631405b/nuplan/database/nuplan_db_orm/utils.py#L1129-L1135 +NUPLAN_LIDAR_DICT = { + 0: LiDARType.LIDAR_TOP, + 1: LiDARType.LIDAR_SIDE_RIGHT, + 2: LiDARType.LIDAR_SIDE_LEFT, + 3: LiDARType.LIDAR_BACK, + 4: LiDARType.LIDAR_FRONT, +} NUPLAN_DATA_SPLITS: Set[str] = { "nuplan_train", diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py index fa0c50c7..49a81d19 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py @@ -1,5 +1,5 @@ from pathlib import Path -from typing import Dict, List, Optional, Tuple, Union +from typing import Dict, List, Tuple, Union import numpy as np import pandas as pd @@ -16,7 +16,6 @@ PANDASET_LOG_NAMES, PANDASET_SPLITS, ) -from py123d.conversion.datasets.pandaset.pandaset_sensor_loading import load_pandaset_lidars_pc_from_path from py123d.conversion.datasets.pandaset.pandaset_utlis import ( main_lidar_to_rear_axle, pandaset_pose_dict_to_state_se3, @@ -24,9 +23,9 @@ read_pkl_gz, rotate_pandaset_pose_to_iso_coordinates, ) -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.conversion.utils.sensor_utils.lidar_index_registry import PandasetLidarIndex +from py123d.conversion.registry.lidar_index_registry import PandasetLidarIndex from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import ( @@ -373,20 +372,21 @@ def _extract_pandaset_sensor_camera( def _extract_pandaset_lidar( source_log_path: Path, iteration: int, ego_state_se3: EgoStateSE3, dataset_converter_config: DatasetConverterConfig -) -> Dict[LiDARType, Optional[Union[str, np.ndarray]]]: +) -> List[LiDARData]: - lidar_dict: Dict[LiDARType, Optional[Union[str, np.ndarray]]] = {} + lidars: List[LiDARData] = [] if dataset_converter_config.include_lidars: iteration_str = f"{iteration:02d}" lidar_absolute_path = source_log_path / "lidar" / f"{iteration_str}.pkl.gz" assert lidar_absolute_path.exists(), f"LiDAR file {str(lidar_absolute_path)} does not exist." + lidars.append( + LiDARData( + lidar_type=LiDARType.LIDAR_MERGED, + timestamp=None, + iteration=iteration, + dataset_root=source_log_path.parent, + relative_path=str(lidar_absolute_path.relative_to(source_log_path.parent)), + ) + ) - if dataset_converter_config.lidar_store_option == "path": - pandaset_data_root = source_log_path.parent - lidar_relative_path = str(lidar_absolute_path.relative_to(pandaset_data_root)) - lidar_dict[LiDARType.LIDAR_FRONT] = lidar_relative_path - lidar_dict[LiDARType.LIDAR_TOP] = lidar_relative_path - elif dataset_converter_config.lidar_store_option == "binary": - lidar_dict = load_pandaset_lidars_pc_from_path(lidar_absolute_path, ego_state_se3) - - return lidar_dict + return lidars diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_sensor_loading.py b/src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py similarity index 67% rename from src/py123d/conversion/datasets/pandaset/pandaset_sensor_loading.py rename to src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py index 503b3dff..30fff374 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_sensor_loading.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py @@ -1,13 +1,17 @@ from pathlib import Path -from typing import Dict, Union +from typing import Dict, Optional, Union import numpy as np import pandas as pd -from py123d.conversion.datasets.pandaset.pandaset_utlis import read_pkl_gz -from py123d.conversion.utils.sensor_utils.lidar_index_registry import PandasetLidarIndex +from py123d.conversion.datasets.pandaset.pandaset_utlis import ( + main_lidar_to_rear_axle, + pandaset_pose_dict_to_state_se3, + read_json, + read_pkl_gz, +) +from py123d.conversion.registry.lidar_index_registry import PandasetLidarIndex from py123d.datatypes.sensors.lidar.lidar import LiDARType -from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.geometry.transform.transform_se3 import convert_absolute_to_relative_points_3d_array @@ -27,19 +31,24 @@ def load_pandaset_global_lidar_pc_from_path(pkl_gz_path: Union[Path, str]) -> Di return {LiDARType.LIDAR_TOP: top_lidar_df.to_numpy(), LiDARType.LIDAR_FRONT: front_lidar_df.to_numpy()} -def load_pandaset_lidars_pc_from_path( - pkl_gz_path: Union[Path, str], ego_state_se3: EgoStateSE3 -) -> Dict[LiDARType, np.ndarray]: +def load_pandaset_lidars_pcs_from_file( + pkl_gz_path: Union[Path, str], + iteration: Optional[int], +) -> np.ndarray: + + pkl_gz_path = Path(pkl_gz_path) + assert pkl_gz_path.exists(), f"Pandaset LiDAR file not found: {pkl_gz_path}" lidar_pc_dict = load_pandaset_global_lidar_pc_from_path(pkl_gz_path) + ego_pose = main_lidar_to_rear_axle( + pandaset_pose_dict_to_state_se3(read_json(pkl_gz_path.parent / "poses.json")[iteration]) + ) + for lidar_type in lidar_pc_dict.keys(): lidar_pc_dict[lidar_type][..., PandasetLidarIndex.XYZ] = convert_absolute_to_relative_points_3d_array( - ego_state_se3.rear_axle_se3, + ego_pose, lidar_pc_dict[lidar_type][..., PandasetLidarIndex.XYZ], ) - # relative_points = (points_3d_array - t_origin) @ R_origin - - # Pass the loaded point clouds to the appropriate LiDAR types return lidar_pc_dict diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_utlis.py b/src/py123d/conversion/datasets/pandaset/pandaset_utlis.py index caab4da1..e179a41c 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_utlis.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_utlis.py @@ -89,8 +89,4 @@ def main_lidar_to_rear_axle(pose: StateSE3) -> StateSE3: vector_3d=Vector3D(x=-0.840, y=0.0, z=0.0), ) - # transformation_matrix[0, 3] = pose.y - # transformation_matrix[1, 3] = -pose.x - # transformation_matrix[2, 3] = pose.z - return imu_pose diff --git a/src/py123d/conversion/datasets/wopd/wopd_utils.py b/src/py123d/conversion/datasets/wopd/utils/wopd_utils.py similarity index 100% rename from src/py123d/conversion/datasets/wopd/wopd_utils.py rename to src/py123d/conversion/datasets/wopd/utils/wopd_utils.py diff --git a/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py b/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py new file mode 100644 index 00000000..ca32c3d8 --- /dev/null +++ b/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py @@ -0,0 +1,66 @@ +from pathlib import Path +from typing import Dict, Optional + +import numpy as np + +from py123d.common.utils.dependencies import check_dependencies +from py123d.conversion.datasets.wopd.utils.wopd_constants import WOPD_CAMERA_TYPES, WOPD_LIDAR_TYPES +from py123d.conversion.datasets.wopd.utils.wopd_utils import parse_range_image_and_camera_projection +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.lidar.lidar import LiDARType + +check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") +import tensorflow as tf +from waymo_open_dataset import dataset_pb2 +from waymo_open_dataset.utils import frame_utils + + +def _get_frame_at_iteration(filepath: Path, iteration: int) -> Optional[dataset_pb2.Frame]: + """Helper function to load a Waymo Frame at a specific iteration from a TFRecord file.""" + dataset = tf.data.TFRecordDataset(str(filepath), compression_type="") + + frame: Optional[dataset_pb2.Frame] = None + for i, data in enumerate(dataset): + if i == iteration: + frame = dataset_pb2.Frame() + frame.ParseFromString(data.numpy()) + break + return frame + + +def load_jpeg_binary_from_file(tf_record_path: Path, iteration: int, pinhole_camera_type: PinholeCameraType) -> bytes: + frame = _get_frame_at_iteration(tf_record_path, iteration) + assert frame is not None, f"Frame at iteration {iteration} not found in Waymo file: {tf_record_path}" + + jpeg_binary: Optional[bytes] = None + for image_proto in frame.images: + camera_type = WOPD_CAMERA_TYPES[image_proto.name] + if camera_type == pinhole_camera_type: + jpeg_binary = image_proto.image + break + return jpeg_binary + + +def load_wopd_lidar_pcs_from_file( + tf_record_path: Path, index: int, keep_polar_features: bool = False +) -> Dict[LiDARType, np.ndarray]: + + frame = _get_frame_at_iteration(tf_record_path, index) + assert frame is not None, f"Frame at iteration {index} not found in Waymo file: {tf_record_path}" + + (range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame) + + points, cp_points = frame_utils.convert_range_image_to_point_cloud( + frame=frame, + range_images=range_images, + camera_projections=camera_projections, + range_image_top_pose=range_image_top_pose, + keep_polar_features=keep_polar_features, + ) + + lidar_pcs_dict: Dict[LiDARType, np.ndarray] = {} + for lidar_idx, frame_lidar in enumerate(frame.lasers): + lidar_type = WOPD_LIDAR_TYPES[frame_lidar.name] + lidar_pcs_dict[lidar_type] = np.array(points[lidar_idx], dtype=np.float32) + + return lidar_pcs_dict diff --git a/src/py123d/conversion/datasets/wopd/wopd_converter.py b/src/py123d/conversion/datasets/wopd/wopd_converter.py index 74cc3861..c2fe667f 100644 --- a/src/py123d/conversion/datasets/wopd/wopd_converter.py +++ b/src/py123d/conversion/datasets/wopd/wopd_converter.py @@ -1,5 +1,6 @@ import logging import os +import traceback from pathlib import Path from typing import Dict, List, Optional, Tuple, Union @@ -16,11 +17,10 @@ WOPD_LIDAR_TYPES, ) from py123d.conversion.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map -from py123d.conversion.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.registry.lidar_index_registry import DefaultLidarIndex, WOPDLidarIndex from py123d.conversion.utils.sensor_utils.camera_conventions import CameraConvention, convert_camera_convention -from py123d.conversion.utils.sensor_utils.lidar_index_registry import DefaultLidarIndex, WOPDLidarIndex from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata @@ -54,10 +54,8 @@ check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") import tensorflow as tf from waymo_open_dataset import dataset_pb2 -from waymo_open_dataset.utils import frame_utils os.environ["CUDA_VISIBLE_DEVICES"] = "-1" -os.environ["OMP_NUM_THREADS"] = "1" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["CUDA_VISIBLE_DEVICES"] = "-1" tf.config.set_visible_devices(tf.config.list_physical_devices("CPU")) @@ -181,10 +179,18 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: box_detections=_extract_wopd_box_detections(frame, map_pose_offset, self._zero_roll_pitch), traffic_lights=None, # TODO: Check if WOPD has traffic light information cameras=_extract_wopd_cameras(frame, self.dataset_converter_config), - lidars=_extract_wopd_lidars(frame, self._keep_polar_features, self.dataset_converter_config), + lidars=_extract_wopd_lidars( + frame, + self._keep_polar_features, + frame_idx, + self.dataset_converter_config, + source_tf_record_path, + self._wopd_data_root, + ), ) except Exception as e: logger.error(f"Error processing log {log_name}: {e}") + traceback.print_exc() log_writer.close() @@ -377,14 +383,9 @@ def _extract_wopd_cameras( if dataset_converter_config.include_cameras: - # TODO: Implement option to store images as paths - assert ( - dataset_converter_config.camera_store_option == "binary" - ), "Camera store option must be 'binary' for WOPD." - - # NOTE: The extrinsic matrix in frame.context.camera_calibration is fixed to model the ego to camera transformation. + # NOTE @DanielDauner: The extrinsic matrix in frame.context.camera_calibration is fixed to model the ego to camera transformation. # The poses in frame.images[idx] are the motion compensated ego poses when the camera triggers. - + # TODO: Verify if this is correct. camera_extrinsic: Dict[str, StateSE3] = {} for calibration in frame.context.camera_calibrations: camera_type = WOPD_CAMERA_TYPES[calibration.name] @@ -410,27 +411,24 @@ def _extract_wopd_cameras( def _extract_wopd_lidars( frame: dataset_pb2.Frame, keep_polar_features: bool, + frame_idx: int, dataset_converter_config: DatasetConverterConfig, + absolute_tf_record_path: Path, + wopd_data_root: Path, ) -> Dict[LiDARType, npt.NDArray[np.float32]]: - lidar_data: Dict[LiDARType, npt.NDArray[np.float32]] = {} + lidars: List[LiDARData] = [] if dataset_converter_config.include_lidars: - # TODO: Implement option to store point clouds as paths - assert dataset_converter_config.lidar_store_option == "binary", "Lidar store option must be 'binary' for WOPD." - (range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame) - - points, cp_points = frame_utils.convert_range_image_to_point_cloud( - frame=frame, - range_images=range_images, - camera_projections=camera_projections, - range_image_top_pose=range_image_top_pose, - keep_polar_features=keep_polar_features, + relative_path = absolute_tf_record_path.relative_to(wopd_data_root) + lidars.append( + LiDARData( + lidar_type=LiDARType.LIDAR_MERGED, + iteration=frame_idx, + dataset_root=wopd_data_root, + relative_path=relative_path, + ) ) - for lidar_idx, frame_lidar in enumerate(frame.lasers): - lidar_type = WOPD_LIDAR_TYPES[frame_lidar.name] - lidar_data[lidar_type] = np.array(points[lidar_idx], dtype=np.float32).flatten() - - return lidar_data + return lidars diff --git a/src/py123d/conversion/log_writer/abstract_log_writer.py b/src/py123d/conversion/log_writer/abstract_log_writer.py index 87c741fb..6e5185a2 100644 --- a/src/py123d/conversion/log_writer/abstract_log_writer.py +++ b/src/py123d/conversion/log_writer/abstract_log_writer.py @@ -1,5 +1,9 @@ +from __future__ import annotations + import abc -from typing import Any, Dict, List, Optional, Tuple +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union from py123d.conversion.dataset_converter_config import DatasetConverterConfig from py123d.datatypes.detections.box_detections import BoxDetectionWrapper @@ -36,7 +40,7 @@ def write( box_detections: Optional[BoxDetectionWrapper] = None, traffic_lights: Optional[TrafficLightDetectionWrapper] = None, cameras: Optional[Dict[PinholeCameraType, Tuple[Any, ...]]] = None, - lidars: Optional[Dict[LiDARType, Any]] = None, + lidars: Optional[List[LiDARData]] = None, scenario_tags: Optional[List[str]] = None, route_lane_group_ids: Optional[List[int]] = None, **kwargs, @@ -46,3 +50,38 @@ def write( @abc.abstractmethod def close(self) -> None: pass + + +@dataclass +class LiDARData: + + lidar_type: LiDARType + + timestamp: Optional[TimePoint] = None + iteration: Optional[int] = None + dataset_root: Optional[Union[str, Path]] = None + relative_path: Optional[Union[str, Path]] = None + + def __post_init__(self): + has_file_path = self.dataset_root is not None and self.relative_path is not None + + assert has_file_path, "Either file path (dataset_root and relative_path) must be provided for LiDARData." + + +@dataclass +class CameraData: + + camera_type: PinholeCameraType + + timestamp: Optional[TimePoint] = None + jpeg_binary: Optional[bytes] = None + dataset_root: Optional[Union[str, Path]] = None + relative_path: Optional[Union[str, Path]] = None + + def __post_init__(self): + has_file_path = self.dataset_root is not None and self.relative_path is not None + has_jpeg_binary = self.jpeg_binary is not None + + assert ( + has_file_path or has_jpeg_binary + ), "Either file path (dataset_root and relative_path) or jpeg_binary must be provided for CameraData." diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 53d6b03f..f48a18e6 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -7,8 +7,10 @@ from py123d.common.utils.uuid_utils import create_deterministic_uuid from py123d.conversion.abstract_dataset_converter import AbstractLogWriter, DatasetConverterConfig -from py123d.conversion.log_writer.utils.draco_lidar_compression import compress_lidar_with_draco -from py123d.conversion.log_writer.utils.laz_lidar_compression import compress_lidar_with_laz +from py123d.conversion.log_writer.abstract_log_writer import LiDARData +from py123d.conversion.sensor_io.lidar.draco_lidar_io import encode_lidar_pc_as_draco_binary +from py123d.conversion.sensor_io.lidar.file_lidar_io import load_lidar_pcs_from_file +from py123d.conversion.sensor_io.lidar.laz_lidar_io import encode_lidar_pc_as_laz_binary from py123d.datatypes.detections.box_detections import BoxDetectionWrapper from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema @@ -27,7 +29,7 @@ def __init__( logs_root: Union[str, Path], ipc_compression: Optional[Literal["lz4", "zstd"]] = None, ipc_compression_level: Optional[int] = None, - lidar_compression: Optional[Literal["draco", "laz"]] = "laz", + lidar_compression: Optional[Literal["draco", "laz"]] = "draco", ) -> None: self._logs_root = Path(logs_root) @@ -82,7 +84,7 @@ def write( box_detections: Optional[BoxDetectionWrapper] = None, traffic_lights: Optional[TrafficLightDetectionWrapper] = None, cameras: Optional[Dict[PinholeCameraType, Tuple[Any, ...]]] = None, - lidars: Optional[Dict[LiDARType, Any]] = None, + lidars: Optional[List[LiDARData]] = None, scenario_tags: Optional[List[str]] = None, route_lane_group_ids: Optional[List[int]] = None, ) -> None: @@ -188,28 +190,30 @@ def write( # -------------------------------------------------------------------------------------------------------------- # LiDARs # -------------------------------------------------------------------------------------------------------------- - if self._dataset_converter_config.include_lidars: + if self._dataset_converter_config.include_lidars and len(self._log_metadata.lidar_metadata) > 0: assert lidars is not None, "LiDAR data is required but not provided." - provided_lidars = set(lidars.keys()) - expected_lidars = set(self._log_metadata.lidar_metadata.keys()) - for lidar_type in expected_lidars: - lidar_name = lidar_type.serialize() - # NOTE: Missing LiDARs are allowed, similar to cameras - # In this case, we write None/null to the arrow table. - lidar_data: Optional[Any] = None - if lidar_type in provided_lidars: - lidar_data = lidars[lidar_type] + if self._dataset_converter_config.lidar_store_option == "path_merged": + # NOTE @DanielDauner: The path_merged option is necessary for dataset, that natively store multiple + # LiDAR point clouds in a single file. In this case, writing the file path several times is wasteful. + # Instead, we store the file path once, and divide the point clouds during reading. + assert len(lidars) == 1, "Exactly one LiDAR data must be provided for merged LiDAR storage." + merged_lidar_data: Optional[str] = str(lidars[0].relative_path) + + record_batch_data[f"{LiDARType.LIDAR_MERGED.serialize()}_data"] = [merged_lidar_data] - # Possible compression step - if self._dataset_converter_config.lidar_store_option == "binary": - lidar_metadata = self._log_metadata.lidar_metadata[lidar_type] - if self._lidar_compression == "draco": - lidar_data = compress_lidar_with_draco(lidar_data, lidar_metadata) - elif self._lidar_compression == "laz": - lidar_data = compress_lidar_with_laz(lidar_data, lidar_metadata) + else: + # NOTE @DanielDauner: for "path" and "binary" options, we write each LiDAR in a separate column. + # We currently assume that all lidars are provided at the same time step. + # Theoretically, we could extend the store asynchronous LiDARs in the future by storing the lidar data + # list as a dictionary, list or struct-like object in the columns. + expected_lidars = set(self._log_metadata.lidar_metadata.keys()) + lidar_data_dict = self._prepare_lidar_data_dict(lidars) - record_batch_data[f"{lidar_name}_data"] = [lidar_data] + for lidar_type in expected_lidars: + lidar_name = lidar_type.serialize() + lidar_data: Optional[Union[str, bytes]] = lidar_data_dict.get(lidar_type, None) + record_batch_data[f"{lidar_name}_data"] = [lidar_data] # -------------------------------------------------------------------------------------------------------------- # Miscellaneous (Scenario Tags / Route) @@ -300,16 +304,19 @@ def _build_schema(dataset_converter_config: DatasetConverterConfig, log_metadata # -------------------------------------------------------------------------------------------------------------- # LiDARs # -------------------------------------------------------------------------------------------------------------- - if dataset_converter_config.include_lidars: - for lidar_type in log_metadata.lidar_metadata.keys(): - lidar_name = lidar_type.serialize() + if dataset_converter_config.include_lidars and len(log_metadata.lidar_metadata) > 0: + if dataset_converter_config.lidar_store_option == "path_merged": + schema_list.append((f"{LiDARType.LIDAR_MERGED.serialize()}_data", pa.string())) + else: + for lidar_type in log_metadata.lidar_metadata.keys(): + lidar_name = lidar_type.serialize() - # Depending on the storage option, define the schema for LiDAR data - if dataset_converter_config.lidar_store_option == "path": - schema_list.append((f"{lidar_name}_data", pa.string())) + # Depending on the storage option, define the schema for LiDAR data + if dataset_converter_config.lidar_store_option == "path": + schema_list.append((f"{lidar_name}_data", pa.string())) - elif dataset_converter_config.lidar_store_option == "binary": - schema_list.append((f"{lidar_name}_data", pa.binary())) + elif dataset_converter_config.lidar_store_option == "binary": + schema_list.append((f"{lidar_name}_data", pa.binary())) # -------------------------------------------------------------------------------------------------------------- # Miscellaneous (Scenario Tags / Route) @@ -321,3 +328,36 @@ def _build_schema(dataset_converter_config: DatasetConverterConfig, log_metadata schema_list.append(("route_lane_group_ids", pa.list_(pa.int64()))) return add_log_metadata_to_arrow_schema(pa.schema(schema_list), log_metadata) + + def _prepare_lidar_data_dict(self, lidars: List[LiDARData]) -> Dict[LiDARType, Union[str, bytes]]: + lidar_data_dict: Dict[LiDARType, Union[str, bytes]] = {} + + if self._dataset_converter_config.lidar_store_option == "path": + for lidar_data in lidars: + lidar_data_dict[lidar_data.lidar_type] = str(lidar_data.relative_path) + + elif self._dataset_converter_config.lidar_store_option == "binary": + lidar_pcs_dict: Dict[LiDARType, np.ndarray] = {} + + # 1. Load point clouds from files + for lidar_data in lidars: + lidar_pcs_dict.update( + load_lidar_pcs_from_file( + lidar_data.relative_path, + self._log_metadata, + lidar_data.iteration, + lidar_data.dataset_root, + ) + ) + + # 2. Compress the point clouds to bytes + for lidar_type, point_cloud in lidar_pcs_dict.items(): + lidar_metadata = self._log_metadata.lidar_metadata[lidar_type] + binary: Optional[bytes] = None + if self._lidar_compression == "draco": + binary = encode_lidar_pc_as_draco_binary(point_cloud, lidar_metadata) + elif self._lidar_compression == "laz": + binary = encode_lidar_pc_as_laz_binary(point_cloud, lidar_metadata) + lidar_data_dict[lidar_type] = binary + + return lidar_data_dict diff --git a/src/py123d/conversion/registry/__init__.py b/src/py123d/conversion/registry/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/conversion/registry/box_detection_type_registry.py b/src/py123d/conversion/registry/box_detection_type_registry.py new file mode 100644 index 00000000..f3110808 --- /dev/null +++ b/src/py123d/conversion/registry/box_detection_type_registry.py @@ -0,0 +1,6 @@ +BOX_DETECTION_TYPE_REGISTRY = {} + + +def register_box_detection_type(enum_class): + BOX_DETECTION_TYPE_REGISTRY[enum_class.__name__] = enum_class + return enum_class diff --git a/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py b/src/py123d/conversion/registry/lidar_index_registry.py similarity index 99% rename from src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py rename to src/py123d/conversion/registry/lidar_index_registry.py index 7684b685..50f5f8a3 100644 --- a/src/py123d/conversion/utils/sensor_utils/lidar_index_registry.py +++ b/src/py123d/conversion/registry/lidar_index_registry.py @@ -41,7 +41,6 @@ class NuPlanLidarIndex(LiDARIndex): Z = 2 INTENSITY = 3 RING = 4 - ID = 5 @register_lidar_index diff --git a/src/py123d/conversion/sensor_io/__init__.py b/src/py123d/conversion/sensor_io/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/conversion/sensor_io/camera/__init__.py b/src/py123d/conversion/sensor_io/camera/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py b/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py new file mode 100644 index 00000000..327db77c --- /dev/null +++ b/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py @@ -0,0 +1,39 @@ +from pathlib import Path +from typing import Dict, Optional + +from omegaconf import DictConfig +from pyparsing import Union + +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraMetadata +from py123d.script.utils.dataset_path_utils import get_dataset_paths + +DATASET_PATHS: DictConfig = get_dataset_paths() +DATASET_SENSOR_ROOT: Dict[str, Path] = { + "nuplan": DATASET_PATHS.nuplan_sensor_root, + "av2-sensor": DATASET_PATHS.av2_sensor_data_root, + "wopd": DATASET_PATHS.wopd_data_root, + "pandaset": DATASET_PATHS.pandaset_data_root, +} + + +def load_image_from_jpeg_file( + dataset_name: str, + dataset_root: Path, + relative_path: Union[str, Path], + camera_metadata: PinholeCameraMetadata, + iteration: Optional[int] = None, +) -> PinholeCamera: + assert relative_path is not None, "Relative path to camera JPEG file must be provided." + + +def load_image_from_jpeg_binary( + dataset_name: str, + relative_path: Union[str, Path], + pinhole_camera_metadata: PinholeCameraMetadata, + iteration: Optional[int] = None, +) -> PinholeCamera: + assert relative_path is not None, "Relative path to camera JPEG file must be provided." + absolute_path = Path(dataset_name) / relative_path + with open(absolute_path, "rb") as f: + jpeg_binary = f.read() + return PinholeCamera(metadata=pinhole_camera_metadata, jpeg_binary=jpeg_binary) diff --git a/src/py123d/conversion/sensor_io/camera/mp4_camera_io.py b/src/py123d/conversion/sensor_io/camera/mp4_camera_io.py new file mode 100644 index 00000000..626d7a63 --- /dev/null +++ b/src/py123d/conversion/sensor_io/camera/mp4_camera_io.py @@ -0,0 +1,180 @@ +# TODO: add method of handling camera mp4 io +def load_image_from_mp4_file() -> None: + raise NotImplementedError + + +from pathlib import Path +from typing import Optional + +import cv2 +import numpy as np + + +class MP4Writer: + """Write images sequentially to an MP4 video file.""" + + def __init__(self, output_path: str, fps: float = 30.0, codec: str = "mp4v"): + """ + Initialize MP4 writer. + + Args: + output_path: Path to output MP4 file + fps: Frames per second + codec: Video codec ('mp4v', 'avc1', 'h264') + """ + self.output_path = output_path + self.fps = fps + self.codec = codec + self.writer = None + self.frame_size = None + self.frame_count = 0 + + def write_frame(self, frame: np.ndarray): + """ + Write a single frame to the video. + + Args: + frame: Image as numpy array (BGR format) + """ + if self.writer is None: + # Initialize writer with first frame's dimensions + h, w = frame.shape[:2] + self.frame_size = (w, h) + fourcc = cv2.VideoWriter_fourcc(*self.codec) + self.writer = cv2.VideoWriter(self.output_path, fourcc, self.fps, self.frame_size) + + if frame.shape[:2][::-1] != self.frame_size: + raise ValueError(f"Frame size {frame.shape[:2][::-1]} doesn't match " f"video size {self.frame_size}") + + self.writer.write(frame) + self.frame_count += 1 + + def close(self): + """Release the video writer.""" + if self.writer is not None: + self.writer.release() + self.writer = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + +class MP4Reader: + """Read MP4 video with random frame access.""" + + def __init__(self, video_path: str): + """ + Initialize MP4 reader. + + Args: + video_path: Path to MP4 file + """ + self.video_path = video_path + if not Path(video_path).exists(): + raise FileNotFoundError(f"Video file not found: {video_path}") + + self.cap = cv2.VideoCapture(video_path) + if not self.cap.isOpened(): + raise ValueError(f"Cannot open video file: {video_path}") + + # Get video properties + self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.fps = self.cap.get(cv2.CAP_PROP_FPS) + self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + def get_frame(self, frame_index: int) -> Optional[np.ndarray]: + """ + Get a specific frame by index. + + Args: + frame_index: Zero-based frame index + + Returns: + Frame as numpy array (BGR format) or None if invalid index + """ + if frame_index < 0 or frame_index >= self.frame_count: + raise IndexError(f"Frame index {frame_index} out of range " f"[0, {self.frame_count})") + + # Set the frame position + self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index) + ret, frame = self.cap.read() + + return frame if ret else None + + def read_sequential(self) -> Optional[np.ndarray]: + """ + Read next frame sequentially. + + Returns: + Frame as numpy array or None if end of video + """ + ret, frame = self.cap.read() + return frame if ret else None + + def reset(self): + """Reset to beginning of video.""" + self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) + + def close(self): + """Release the video capture.""" + if self.cap is not None: + self.cap.release() + self.cap = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def __len__(self): + return self.frame_count + + def __getitem__(self, index: int) -> np.ndarray: + """Allow indexing like reader[10]""" + return self.get_frame(index) + + +# Example usage +if __name__ == "__main__": + # Create sample video + print("Creating sample video...") + with MP4Writer("output.mp4", fps=30.0) as writer: + for i in range(100): + # Create colored frames + frame = np.zeros((480, 640, 3), dtype=np.uint8) + color = int(255 * i / 100) + frame[:, :] = (color, 255 - color, 128) + + # Add frame number text + cv2.putText(frame, f"Frame {i}", (50, 240), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3) + + writer.write_frame(frame) + + print(f"Video created with {writer.frame_count} frames") + + # Read video with indexing + print("\nReading video with random access...") + with MP4Reader("output.mp4") as reader: + print(f"Video info: {len(reader)} frames, " f"{reader.width}x{reader.height}, {reader.fps} fps") + + # Read specific frames + frames_to_read = [0, 25, 50, 75, 99] + for idx in frames_to_read: + frame = reader[idx] + if frame is not None: + print(f"Successfully read frame {idx}") + else: + print(f"Failed to read frame {idx}") + + # Sequential reading example + print("\nReading first 5 frames sequentially...") + reader.reset() + for i in range(5): + frame = reader.read_sequential() + if frame is not None: + print(f"Read sequential frame {i}") diff --git a/src/py123d/conversion/sensor_io/lidar/__init__.py b/src/py123d/conversion/sensor_io/lidar/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/conversion/log_writer/utils/draco_lidar_compression.py b/src/py123d/conversion/sensor_io/lidar/draco_lidar_io.py similarity index 86% rename from src/py123d/conversion/log_writer/utils/draco_lidar_compression.py rename to src/py123d/conversion/sensor_io/lidar/draco_lidar_io.py index 2a934766..80948ef3 100644 --- a/src/py123d/conversion/log_writer/utils/draco_lidar_compression.py +++ b/src/py123d/conversion/sensor_io/lidar/draco_lidar_io.py @@ -13,7 +13,7 @@ DRACO_PRESERVE_ORDER: Final[bool] = False -def compress_lidar_with_draco(point_cloud: npt.NDArray[np.float32], lidar_metadata: LiDARMetadata) -> bytes: +def encode_lidar_pc_as_draco_binary(lidar_pc: npt.NDArray[np.float32], lidar_metadata: LiDARMetadata) -> bytes: """Compress LiDAR point cloud data using Draco format. :param point_cloud: The LiDAR point cloud data to compress, as numpy array. @@ -23,7 +23,7 @@ def compress_lidar_with_draco(point_cloud: npt.NDArray[np.float32], lidar_metada lidar_index = lidar_metadata.lidar_index binary = DracoPy.encode( - point_cloud[:, lidar_index.XYZ], + lidar_pc[:, lidar_index.XYZ], quantization_bits=DRACO_QUANTIZATION_BITS, compression_level=DRACO_COMPRESSION_LEVEL, quantization_range=DRACO_QUANTIZATION_RANGE, @@ -35,7 +35,7 @@ def compress_lidar_with_draco(point_cloud: npt.NDArray[np.float32], lidar_metada return binary -def decompress_lidar_from_draco(draco_binary: bytes, lidar_metadata: LiDARMetadata) -> LiDAR: +def load_lidar_from_draco_binary(draco_binary: bytes, lidar_metadata: LiDARMetadata) -> LiDAR: """Decompress LiDAR point cloud data from Draco format. :param draco_binary: The compressed Draco binary data. diff --git a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py new file mode 100644 index 00000000..edc5c7d5 --- /dev/null +++ b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py @@ -0,0 +1,62 @@ +from pathlib import Path +from typing import Dict, Optional, Union + +import numpy as np +import numpy.typing as npt +from omegaconf import DictConfig + +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.script.utils.dataset_path_utils import get_dataset_paths + +DATASET_PATHS: DictConfig = get_dataset_paths() +DATASET_SENSOR_ROOT: Dict[str, Path] = { + "nuplan": DATASET_PATHS.nuplan_sensor_root, + "av2-sensor": DATASET_PATHS.av2_sensor_data_root, + "wopd": DATASET_PATHS.wopd_data_root, + "pandaset": DATASET_PATHS.pandaset_data_root, +} + + +def load_lidar_pcs_from_file( + relative_path: Union[str, Path], + log_metadata: LogMetadata, + index: Optional[int] = None, + sensor_root: Optional[Union[str, Path]] = None, +) -> Dict[LiDARType, npt.NDArray[np.float32]]: + assert relative_path is not None, "Relative path to LiDAR file must be provided." + + if sensor_root is None: + assert ( + log_metadata.dataset in DATASET_SENSOR_ROOT.keys() + ), f"Dataset path for sensor loading not found for dataset: {log_metadata.dataset}." + sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset] + assert sensor_root is not None, f"Dataset path for sensor loading not found for dataset: {log_metadata.dataset}" + + full_lidar_path = Path(sensor_root) / relative_path + assert full_lidar_path.exists(), f"LiDAR file not found: {full_lidar_path}" + + # NOTE: We move data specific import into if-else block, to avoid data specific import errors + if log_metadata.dataset == "nuplan": + from py123d.conversion.datasets.nuplan.nuplan_sensor_io import load_nuplan_lidar_pcs_from_file + + lidar_pcs_dict = load_nuplan_lidar_pcs_from_file(full_lidar_path) + + elif log_metadata.dataset == "av2-sensor": + from py123d.conversion.datasets.av2.av2_sensor_io import load_av2_sensor_lidar_pcs_from_file + + lidar_pcs_dict = load_av2_sensor_lidar_pcs_from_file(full_lidar_path) + + elif log_metadata.dataset == "wopd": + from py123d.conversion.datasets.wopd.waymo_sensor_io import load_wopd_lidar_pcs_from_file + + lidar_pcs_dict = load_wopd_lidar_pcs_from_file(full_lidar_path, index, keep_polar_features=False) + + elif log_metadata.dataset == "pandaset": + from py123d.conversion.datasets.pandaset.pandaset_sensor_io import load_pandaset_lidars_pcs_from_file + + lidar_pcs_dict = load_pandaset_lidars_pcs_from_file(full_lidar_path, index) + else: + raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") + + return lidar_pcs_dict diff --git a/src/py123d/conversion/log_writer/utils/laz_lidar_compression.py b/src/py123d/conversion/sensor_io/lidar/laz_lidar_io.py similarity index 91% rename from src/py123d/conversion/log_writer/utils/laz_lidar_compression.py rename to src/py123d/conversion/sensor_io/lidar/laz_lidar_io.py index 2449d83f..cedfb2b6 100644 --- a/src/py123d/conversion/log_writer/utils/laz_lidar_compression.py +++ b/src/py123d/conversion/sensor_io/lidar/laz_lidar_io.py @@ -7,7 +7,7 @@ from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata -def compress_lidar_with_laz(point_cloud: npt.NDArray[np.float32], lidar_metadata: LiDARMetadata) -> bytes: +def encode_lidar_pc_as_laz_binary(point_cloud: npt.NDArray[np.float32], lidar_metadata: LiDARMetadata) -> bytes: """Compress LiDAR point cloud data using LAZ format. :param point_cloud: The LiDAR point cloud data to compress, as numpy array. @@ -38,7 +38,7 @@ def compress_lidar_with_laz(point_cloud: npt.NDArray[np.float32], lidar_metadata return laz_binary -def decompress_lidar_from_laz(laz_binary: bytes, lidar_metadata: LiDARMetadata) -> npt.NDArray[np.float32]: +def load_lidar_from_laz_binary(laz_binary: bytes, lidar_metadata: LiDARMetadata) -> npt.NDArray[np.float32]: """Decompress LiDAR point cloud data from LAZ format. :param laz_binary: The compressed LAZ binary data. diff --git a/src/py123d/datatypes/detections/box_detection_types.py b/src/py123d/datatypes/detections/box_detection_types.py index cca6fa5e..9ac74034 100644 --- a/src/py123d/datatypes/detections/box_detection_types.py +++ b/src/py123d/datatypes/detections/box_detection_types.py @@ -2,13 +2,6 @@ from py123d.common.utils.enums import SerialIntEnum -BOX_DETECTION_TYPE_REGISTRY = {} - - -def register_box_detection_type(enum_class): - BOX_DETECTION_TYPE_REGISTRY[enum_class.__name__] = enum_class - return enum_class - class BoxDetectionType(SerialIntEnum): """ @@ -46,27 +39,3 @@ class BoxDetectionType(SerialIntEnum): BoxDetectionType.CZONE_SIGN, BoxDetectionType.GENERIC_OBJECT, } - - -# @register_box_detection_type -# class NuPlanBoxDetectionType(SerialIntEnum): - -# VEHICLE = 0 -# BICYCLE = 1 -# PEDESTRIAN = 2 -# TRAFFIC_CONE = 3 -# BARRIER = 4 -# CZONE_SIGN = 5 -# GENERIC_OBJECT = 6 - -# def to_default_type() -> BoxDetectionType: -# mapping = { -# NuPlanBoxDetectionType.VEHICLE: BoxDetectionType.VEHICLE, -# NuPlanBoxDetectionType.BICYCLE: BoxDetectionType.BICYCLE, -# NuPlanBoxDetectionType.PEDESTRIAN: BoxDetectionType.PEDESTRIAN, -# NuPlanBoxDetectionType.TRAFFIC_CONE: BoxDetectionType.GENERIC_OBJECT, -# NuPlanBoxDetectionType.BARRIER: BoxDetectionType.GENERIC_OBJECT, -# NuPlanBoxDetectionType.CZONE_SIGN: BoxDetectionType.GENERIC_OBJECT, -# NuPlanBoxDetectionType.GENERIC_OBJECT: BoxDetectionType.GENERIC_OBJECT, -# } -# return mapping[self] diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene.py b/src/py123d/datatypes/scene/arrow/arrow_scene.py index fc0b7c15..236862d5 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene.py @@ -133,7 +133,7 @@ def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> Optional[LiDAR]: lidar: Optional[LiDAR] = None - if lidar_type in self.available_lidar_types: + if lidar_type in self.available_lidar_types or lidar_type == LiDARType.LIDAR_MERGED: lidar = get_lidar_from_arrow_table( self._get_recording_table(), self._get_table_index(iteration), diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 440d12fa..6be9d433 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -7,6 +7,9 @@ import pyarrow as pa from omegaconf import DictConfig +from py123d.conversion.sensor_io.lidar.draco_lidar_io import load_lidar_from_draco_binary +from py123d.conversion.sensor_io.lidar.file_lidar_io import load_lidar_pcs_from_file +from py123d.conversion.sensor_io.lidar.laz_lidar_io import load_lidar_from_laz_binary from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.detections.box_detections import ( BoxDetection, @@ -21,7 +24,7 @@ ) from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType +from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType from py123d.datatypes.sensors.lidar.lidar_index import DefaultLidarIndex from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -147,71 +150,43 @@ def get_lidar_from_arrow_table( lidar: Optional[LiDAR] = None lidar_column_name = f"{lidar_type.serialize()}_data" + lidar_column_name = ( + f"{LiDARType.LIDAR_MERGED.serialize()}_data" + if lidar_column_name not in arrow_table.schema.names + else lidar_column_name + ) if lidar_column_name in arrow_table.schema.names: lidar_data = arrow_table[lidar_column_name][index].as_py() - lidar_metadata = log_metadata.lidar_metadata[lidar_type] if isinstance(lidar_data, str): - sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset] - assert ( - sensor_root is not None - ), f"Dataset path for sensor loading not found for dataset: {log_metadata.dataset}" - full_lidar_path = Path(sensor_root) / lidar_data - assert full_lidar_path.exists(), f"LiDAR file not found: {full_lidar_path}" - - # NOTE: We move data specific import into if-else block, to avoid data specific import errors - if log_metadata.dataset == "nuplan": - from py123d.conversion.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path - - lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) - - elif log_metadata.dataset == "carla": - raise NotImplementedError("Loading LiDAR data for Carla dataset is not implemented.") - elif log_metadata.dataset == "av2-sensor": - from py123d.conversion.datasets.av2.utils.av2_sensor_loading import load_av2_sensor_lidar_pc_from_path - - lidar_pc_dict = load_av2_sensor_lidar_pc_from_path(full_lidar_path) - - assert ( - lidar_type in lidar_pc_dict - ), f"LiDAR type {lidar_type} not found in AV2 sensor data at {full_lidar_path}." - lidar = LiDAR(metadata=lidar_metadata, point_cloud=lidar_pc_dict[lidar_type]) - - elif log_metadata.dataset == "wopd": - - raise NotImplementedError - elif log_metadata.dataset == "pandaset": - from py123d.conversion.datasets.pandaset.pandaset_sensor_loading import ( - load_pandaset_lidars_pc_from_path, + lidar_pc_dict = load_lidar_pcs_from_file(relative_path=lidar_data, log_metadata=log_metadata, index=index) + if lidar_type == LiDARType.LIDAR_MERGED: + # Merge all available LiDAR point clouds into one + merged_pc = np.vstack(list(lidar_pc_dict.values())) + lidar = LiDAR( + metadata=LiDARMetadata( + lidar_type=LiDARType.LIDAR_MERGED, + lidar_index=DefaultLidarIndex, + extrinsic=None, + ), + point_cloud=merged_pc, ) - - ego_state_se3 = get_ego_vehicle_state_from_arrow_table( - arrow_table, index, log_metadata.vehicle_parameters + elif lidar_type in lidar_pc_dict: + lidar = LiDAR( + metadata=log_metadata.lidar_metadata[lidar_type], + point_cloud=lidar_pc_dict[lidar_type], ) - - lidar_pc_dict = load_pandaset_lidars_pc_from_path(full_lidar_path, ego_state_se3) - assert ( - lidar_type in lidar_pc_dict - ), f"LiDAR type {lidar_type} not found in Pandaset data at {full_lidar_path}." - lidar = LiDAR(metadata=lidar_metadata, point_cloud=lidar_pc_dict[lidar_type]) - else: - raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") - elif isinstance(lidar_data, bytes): - + lidar_metadata = log_metadata.lidar_metadata[lidar_type] if lidar_data.startswith(b"DRACO"): - from py123d.conversion.log_writer.utils.draco_lidar_compression import decompress_lidar_from_draco - # NOTE: DRACO only allows XYZ compression, so we need to override the lidar index here. lidar_metadata.lidar_index = DefaultLidarIndex - lidar = decompress_lidar_from_draco(lidar_data, lidar_metadata) + lidar = load_lidar_from_draco_binary(lidar_data, lidar_metadata) elif lidar_data.startswith(b"LASF"): - from py123d.conversion.log_writer.utils.laz_lidar_compression import decompress_lidar_from_laz - - lidar = decompress_lidar_from_laz(lidar_data, lidar_metadata) + lidar = load_lidar_from_laz_binary(lidar_data, lidar_metadata) elif lidar_data is None: lidar = None else: diff --git a/src/py123d/datatypes/sensors/lidar/lidar.py b/src/py123d/datatypes/sensors/lidar/lidar.py index 3e72e6fb..0950c77c 100644 --- a/src/py123d/datatypes/sensors/lidar/lidar.py +++ b/src/py123d/datatypes/sensors/lidar/lidar.py @@ -7,7 +7,7 @@ import numpy.typing as npt from py123d.common.utils.enums import SerialIntEnum -from py123d.conversion.utils.sensor_utils.lidar_index_registry import LIDAR_INDEX_REGISTRY, LiDARIndex +from py123d.conversion.registry.lidar_index_registry import LIDAR_INDEX_REGISTRY, LiDARIndex from py123d.geometry import StateSE3 diff --git a/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml index 261dc386..1a121fd9 100644 --- a/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml @@ -30,7 +30,7 @@ av2_sensor_dataset: # LiDARs include_lidars: true - lidar_store_option: "binary" # "path", "binary" + lidar_store_option: "binary" # "path", "path_merged", "binary" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. diff --git a/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml index 22daf2c0..671b960c 100644 --- a/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml @@ -3,9 +3,9 @@ nuplan_dataset: _convert_: 'all' splits: ["nuplan_train", "nuplan_val", "nuplan_test"] - nuplan_data_root: ${nuplan_data_root} - nuplan_maps_root: ${nuplan_maps_root} - nuplan_sensor_root: ${nuplan_sensor_root} + nuplan_data_root: ${dataset_paths.nuplan_data_root} + nuplan_maps_root: ${dataset_paths.nuplan_maps_root} + nuplan_sensor_root: ${dataset_paths.nuplan_sensor_root} dataset_converter_config: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig @@ -32,7 +32,7 @@ nuplan_dataset: # LiDARs include_lidars: true - lidar_store_option: "path" # "path", "binary" + lidar_store_option: "path_merged" # "path", "path_merged", "binary" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. diff --git a/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml index 265bcefb..a59e67a7 100644 --- a/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml @@ -3,9 +3,9 @@ nuplan_mini_dataset: _convert_: 'all' splits: ["nuplan-mini_train", "nuplan-mini_val", "nuplan-mini_test"] - nuplan_data_root: ${nuplan_data_root} - nuplan_maps_root: ${nuplan_maps_root} - nuplan_sensor_root: ${nuplan_sensor_root} + nuplan_data_root: ${dataset_paths.nuplan_data_root} + nuplan_maps_root: ${dataset_paths.nuplan_maps_root} + nuplan_sensor_root: ${dataset_paths.nuplan_sensor_root} dataset_converter_config: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig @@ -32,7 +32,7 @@ nuplan_mini_dataset: # LiDARs include_lidars: true - lidar_store_option: "path" # "path", "binary" + lidar_store_option: "binary" # "path", "path_merged", "binary" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. diff --git a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml index 9f2f8e3e..51d8e18c 100644 --- a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml @@ -30,7 +30,7 @@ pandaset_dataset: # LiDARs include_lidars: true - lidar_store_option: "binary" # "path", "binary" + lidar_store_option: "binary" # "path", "path_merged", "binary" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. diff --git a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml index 0a04e96f..441c4966 100644 --- a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml @@ -2,7 +2,7 @@ wopd_dataset: _target_: py123d.conversion.datasets.wopd.wopd_converter.WOPDConverter _convert_: 'all' - splits: ["wopd_train", "wopd_val", "wopd_test"] # Which splits to convert. Options: ["wopd_train", "wopd_val", "wopd_test"] + splits: ["wopd_val"] # Which splits to convert. Options: ["wopd_train", "wopd_val", "wopd_test"] wopd_data_root: "/media/nvme1/waymo_perception" # ${wopd_data_root} zero_roll_pitch: true # Whether to zero the roll and pitch of the box detections in global frame. keep_polar_features: false # Add lidar polar features (range, azimuth, elevation) in addition to XYZ. (slow if true) @@ -33,8 +33,8 @@ wopd_dataset: camera_store_option: "binary" # "path", "binary", "mp4" # LiDARs - include_lidars: false - lidar_store_option: "binary" # "path", "binary" + include_lidars: true + lidar_store_option: "binary" # "path", "path_merged", "binary" # Scenario tag / Route # NOTE: These are only supported for nuPlan. Consider removing or expanding support. diff --git a/src/py123d/script/config/conversion/default_conversion.yaml b/src/py123d/script/config/conversion/default_conversion.yaml index daa55f12..aecfa9b7 100644 --- a/src/py123d/script/config/conversion/default_conversion.yaml +++ b/src/py123d/script/config/conversion/default_conversion.yaml @@ -22,5 +22,5 @@ defaults: terminate_on_exception: True -force_map_conversion: True +force_map_conversion: False force_log_conversion: True diff --git a/src/py123d/script/run_conversion.py b/src/py123d/script/run_conversion.py index 406a7e3f..bb4fe510 100644 --- a/src/py123d/script/run_conversion.py +++ b/src/py123d/script/run_conversion.py @@ -57,6 +57,8 @@ def _convert_maps(args: List[Dict[str, int]], cfg: DictConfig, dataset_converter def _convert_logs(args: List[Dict[str, int]], cfg: DictConfig, dataset_converter: AbstractDatasetConverter) -> None: + setup_dataset_paths(cfg.dataset_paths) + def _internal_convert_log(args: Dict[str, int], dataset_converter_: AbstractDatasetConverter) -> int: # for i2 in tqdm(range(300), leave=False) log_writer = build_log_writer(cfg.log_writer) diff --git a/src/py123d/visualization/viser/elements/render_elements.py b/src/py123d/visualization/viser/elements/render_elements.py new file mode 100644 index 00000000..6df316b2 --- /dev/null +++ b/src/py123d/visualization/viser/elements/render_elements.py @@ -0,0 +1,25 @@ +from py123d.conversion.utils.sensor_utils.camera_conventions import convert_camera_convention +from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.geometry.geometry_index import StateSE3Index +from py123d.geometry.se import StateSE3 +from py123d.geometry.transform.transform_se3 import translate_se3_along_body_frame +from py123d.geometry.vector import Vector3D + + +def get_ego_3rd_person_view_position( + scene: AbstractScene, + iteration: int, + initial_ego_state: EgoStateSE3, +) -> StateSE3: + scene_center_array = initial_ego_state.center.point_3d.array + ego_pose = scene.get_ego_state_at_iteration(iteration).rear_axle_se3.array + ego_pose[StateSE3Index.XYZ] -= scene_center_array + ego_pose_se3 = StateSE3.from_array(ego_pose) + ego_pose_se3 = translate_se3_along_body_frame(ego_pose_se3, Vector3D(-10.0, 0.0, 5.0)) + + return convert_camera_convention( + ego_pose_se3, + from_convention="pXpZmY", + to_convention="pZmYpX", + ) diff --git a/src/py123d/visualization/viser/elements/sensor_elements.py b/src/py123d/visualization/viser/elements/sensor_elements.py index 2876fa5e..410cccb9 100644 --- a/src/py123d/visualization/viser/elements/sensor_elements.py +++ b/src/py123d/visualization/viser/elements/sensor_elements.py @@ -15,6 +15,7 @@ convert_relative_to_absolute_points_3d_array, convert_relative_to_absolute_se3_array, ) +from py123d.visualization.color.color import TAB_10 from py123d.visualization.viser.viser_config import ViserConfig @@ -128,6 +129,13 @@ def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]: points_3d_local = ( np.concatenate(lidar_points_3d_list, axis=0) if lidar_points_3d_list else np.zeros((0, 3), dtype=np.float32) ) + + colors = [] + for idx, points in enumerate(lidar_points_3d_list): + color = np.array(TAB_10[idx % len(TAB_10)].rgb, dtype=np.uint8) + colors.append(np.tile(color, (points.shape[0], 1))) + colors = np.vstack(colors) if colors else np.zeros((0, 3), dtype=np.uint8) + points = convert_relative_to_absolute_points_3d_array(ego_pose, points_3d_local) colors = np.zeros_like(points) diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index 330e4504..384f8042 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -46,9 +46,9 @@ class ViserConfig: # Map map_visible: bool = True - map_radius: float = 500.0 # [m] + map_radius: float = 100.0 # [m] map_non_road_z_offset: float = 0.1 # small z-translation to place crosswalks, parking, etc. on top of the road - map_requery: bool = False # Re-query map when ego vehicle moves out of current map bounds + map_requery: bool = True # Re-query map when ego vehicle moves out of current map bounds # Bounding boxes bounding_box_visible: bool = True diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index b27d6853..f2ed1422 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -2,7 +2,9 @@ import time from typing import Dict, List, Optional +import imageio.v3 as iio import viser +from tqdm import tqdm from viser.theme import TitlebarButton, TitlebarConfig, TitlebarImage from py123d.datatypes.maps.map_datatypes import MapLayer @@ -17,6 +19,7 @@ add_lidar_pc_to_viser_server, add_map_to_viser_server, ) +from py123d.visualization.viser.elements.render_elements import get_ego_3rd_person_view_position from py123d.visualization.viser.viser_config import ViserConfig logger = logging.getLogger(__name__) @@ -114,9 +117,10 @@ def next(self) -> None: def set_scene(self, scene: AbstractScene) -> None: num_frames = scene.number_of_iterations initial_ego_state: EgoStateSE3 = scene.get_ego_state_at_iteration(0) + server_playing = True + server_rendering = False with self._viser_server.gui.add_folder("Playback"): - server_playing = True gui_timestep = self._viser_server.gui.add_slider( "Timestep", min=0, @@ -134,6 +138,8 @@ def set_scene(self, scene: AbstractScene) -> None: "FPS options", ("10", "25", "50", "75", "100") ) + button = self._viser_server.gui.add_button("Render Scene") + # Frame step buttons. @gui_next_frame.on_click def _(_) -> None: @@ -204,9 +210,29 @@ def _(_) -> None: ) rendering_time = time.perf_counter() - start sleep_time = 1.0 / gui_framerate.value - rendering_time - if sleep_time > 0: + if sleep_time > 0 and not server_rendering: time.sleep(max(sleep_time, 0.0)) + @button.on_click + def _(event: viser.GuiEvent) -> None: + client = event.client + assert client is not None + + client.scene.reset() + + server_rendering = True + images = [] + + for i in tqdm(range(scene.number_of_iterations)): + gui_timestep.value = i + ego_view = get_ego_3rd_person_view_position(scene, i, initial_ego_state) + client.camera.position = ego_view.point_3d.array + client.camera.wxyz = ego_view.quaternion.array + images.append(client.get_render(height=720, width=1280)) + + client.send_file_download("image.mp4", iio.imwrite("", images, extension=".mp4", fps=30)) + server_rendering = False + camera_frustum_handles: Dict[PinholeCameraType, viser.CameraFrustumHandle] = {} camera_gui_handles: Dict[PinholeCameraType, viser.GuiImageHandle] = {} lidar_pc_handle: Optional[viser.PointCloudHandle] = None @@ -253,7 +279,8 @@ def _(_) -> None: # Playback update loop. while server_playing: - if gui_playing.value: + + if gui_playing.value and not server_rendering: gui_timestep.value = (gui_timestep.value + 1) % num_frames self._viser_server.flush() diff --git a/test_viser.py b/test_viser.py index 3ed0a1f6..73896531 100644 --- a/test_viser.py +++ b/test_viser.py @@ -6,22 +6,24 @@ if __name__ == "__main__": - # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] + splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla_test"] - # splits = ["wopd_val"] - splits = ["av2-sensor_train"] + splits = ["wopd_val"] + # splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] + # log_names = ["2021.08.24.13.12.55_veh-45_00386_00472"] log_names = None + scene_uuids = None scene_filter = SceneFilter( split_names=splits, log_names=log_names, scene_uuids=scene_uuids, - duration_s=None, + duration_s=10.0, history_s=0.0, - timestamp_threshold_s=None, + timestamp_threshold_s=10.0, shuffle=True, camera_types=[PinholeCameraType.CAM_F0], ) From 230c034bef4e9d905b47bad239a8c400d2817365 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Wed, 29 Oct 2025 10:33:03 +0800 Subject: [PATCH 122/145] merge dev_v0.0.7 into kitti360 --- .../kitti_360/kitti_360_data_converter.py | 35 ++++++++++--------- .../datasets/kitti_360/kitti_360_helper.py | 12 ++++--- .../{labels.py => kitti_360_labels.py} | 28 +++++++-------- ...oad_sensor.py => kitti_360_load_sensor.py} | 6 ++-- .../kitti_360/preprocess_detection.py | 2 +- .../scene/arrow/utils/arrow_getters.py | 30 +++++++++++----- .../config/common/default_dataset_paths.yaml | 3 ++ .../scene_builder/default_scene_builder.yaml | 5 ++- .../conversion/datasets/kitti360_dataset.yaml | 3 +- .../config/conversion/default_conversion.yaml | 2 +- .../visualization/viser/viser_config.py | 2 ++ .../visualization/viser/viser_viewer.py | 2 ++ 12 files changed, 79 insertions(+), 51 deletions(-) rename src/py123d/conversion/datasets/kitti_360/{labels.py => kitti_360_labels.py} (95%) rename src/py123d/conversion/datasets/kitti_360/{kitti360_load_sensor.py => kitti_360_load_sensor.py} (92%) diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py index 1443a37a..d8733e12 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py @@ -1,7 +1,6 @@ import os import re import yaml -from dataclasses import asdict from pathlib import Path from typing import Any, Dict, Final, List, Optional, Tuple, Union @@ -11,9 +10,6 @@ import datetime import xml.etree.ElementTree as ET import logging -from pyquaternion import Quaternion - -from py123d.common.multithreading.worker_utils import WorkerPool, worker_map from py123d.datatypes.detections.box_detections import ( BoxDetectionMetadata, @@ -32,25 +28,25 @@ FisheyeMEIDistortion, FisheyeMEIProjection, ) -from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType from py123d.conversion.utils.sensor_utils.lidar_index_registry import Kitti360LidarIndex from py123d.datatypes.time.time_point import TimePoint -from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3 from py123d.common.utils.uuid_utils import create_deterministic_uuid from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter from py123d.conversion.dataset_converter_config import DatasetConverterConfig from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter -from py123d.conversion.log_writer.arrow_log_writer import ArrowLogWriter from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.conversion.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from py123d.conversion.datasets.kitti_360.labels import KITTI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT +from py123d.conversion.datasets.kitti_360.kitti_360_labels import KITTI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT from py123d.conversion.datasets.kitti_360.kitti_360_map_conversion import ( convert_kitti360_map_with_writer ) -from py123d.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex +from py123d.conversion.datasets.kitti_360.kitti_360_load_sensor import load_kitti360_lidar_from_path +from py123d.geometry import BoundingBoxSE3, StateSE3, Vector3D from py123d.geometry.rotation import EulerAngles KITTI360_DT: Final[float] = 0.1 @@ -88,8 +84,8 @@ DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train", } -D123_DEVKIT_ROOT = Path(os.environ["D123_DEVKIT_ROOT"]) -PREPOCESS_DETECTION_DIR = D123_DEVKIT_ROOT / "d123" / "conversion" / "datasets" / "kitti_360" / "detection_preprocess" +D123_DEVKIT_ROOT = Path(os.environ["PY123D_DEVKIT_ROOT"]) +PREPOCESS_DETECTION_DIR = D123_DEVKIT_ROOT / "src" / "py123d" / "conversion" / "datasets" / "kitti_360" / "detection_preprocess" def create_token(split: str, log_name: str, timestamp_us: int, misc: str = None) -> str: """Create a deterministic UUID-based token for KITTI-360 data. @@ -117,7 +113,7 @@ class Kitti360DataConverter(AbstractDatasetConverter): def __init__( self, splits: List[str], - log_path: Union[Path, str], + kitti360_data_root: Union[Path, str], dataset_converter_config: DatasetConverterConfig, ) -> None: super().__init__(dataset_converter_config) @@ -127,7 +123,7 @@ def __init__( ), f"Split {split} is not available. Available splits: {self.available_splits}" self._splits: List[str] = splits - self._log_path: Path = Path(log_path) + self._log_path: Path = Path(kitti360_data_root) self._log_paths_and_split: List[Tuple[Path, str]] = self._collect_log_paths() self._total_maps = len(self._log_paths_and_split) # Each log has its own map @@ -430,9 +426,8 @@ def _extract_ego_state_all(log_name: str) -> Tuple[List[EgoStateSE3], List[int]] [r10, r11, r12], [r20, r21, r22]], dtype=np.float64) R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3] - yaw, pitch, roll = Quaternion(matrix=R_mat_cali[:3, :3]).yaw_pitch_roll - ego_quaternion = EulerAngles(roll=roll, pitch=pitch, yaw=yaw).quaternion + ego_quaternion = EulerAngles.from_rotation_matrix(R_mat_cali).quaternion rear_axle_pose = StateSE3( x=poses[pos, 4], y=poses[pos, 8], @@ -578,7 +573,7 @@ def _extract_detections( if state is None: break detection_metadata = BoxDetectionMetadata( - detection_type=detection_type, + box_detection_type=detection_type, timepoint=None, track_token=token, confidence=None, @@ -606,7 +601,13 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DatasetConver if data_converter_config.lidar_store_option == "path": lidar = f"data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin" elif data_converter_config.lidar_store_option == "binary": - raise NotImplementedError("Binary lidar storage is not implemented.") + temp_metadata = LiDARMetadata( + lidar_type=LiDARType.LIDAR_TOP, + lidar_index=Kitti360LidarIndex, + extrinsic=StateSE3.from_transformation_matrix(get_lidar_extrinsic()), + ) + lidar_pc: LiDAR = load_kitti360_lidar_from_path(lidar_full_path, temp_metadata) + lidar = lidar_pc.point_cloud else: raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}") return {LiDARType.LIDAR_TOP: lidar} diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py index 85bc7ecc..8486329c 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py @@ -1,15 +1,14 @@ import numpy as np from collections import defaultdict -from typing import Dict, Optional, Any, List, Tuple +from typing import Dict, Any, List, Tuple import copy from scipy.linalg import polar -from scipy.spatial.transform import Rotation as R from py123d.geometry import BoundingBoxSE3, StateSE3 from py123d.geometry.polyline import Polyline3D from py123d.geometry.rotation import EulerAngles -from py123d.conversion.datasets.kitti_360.labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT +from py123d.conversion.datasets.kitti_360.kitti_360_labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT import os from pathlib import Path @@ -124,8 +123,11 @@ def parse_scale_rotation(self): if np.linalg.det(Rm) < 0: Rm[0] = -Rm[0] scale = np.diag(Sm) - yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) - obj_quaternion = EulerAngles(roll=roll, pitch=pitch, yaw=yaw).quaternion + # yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) + euler_angles = EulerAngles.from_rotation_matrix(Rm) + yaw,pitch,roll = euler_angles.yaw, euler_angles.pitch, euler_angles.roll + obj_quaternion = euler_angles.quaternion + # obj_quaternion = EulerAngles(roll=roll, pitch=pitch, yaw=yaw).quaternion self.Rm = np.array(Rm) self.Sm = np.array(Sm) diff --git a/src/py123d/conversion/datasets/kitti_360/labels.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_labels.py similarity index 95% rename from src/py123d/conversion/datasets/kitti_360/labels.py rename to src/py123d/conversion/datasets/kitti_360/kitti_360_labels.py index a7428706..7a58b113 100644 --- a/src/py123d/conversion/datasets/kitti_360/labels.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_labels.py @@ -167,7 +167,7 @@ def assureSingleInstanceName( name ): # all good then return name -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType BBOX_LABLES_TO_DETECTION_NAME_DICT = { 'car': 'car', @@ -185,19 +185,19 @@ def assureSingleInstanceName( name ): } KITTI360_DETECTION_NAME_DICT = { - "traffic light": DetectionType.SIGN, - "traffic sign": DetectionType.SIGN, - "person": DetectionType.PEDESTRIAN, - "rider": DetectionType.BICYCLE, - "car": DetectionType.VEHICLE, - "truck": DetectionType.VEHICLE, - "bus": DetectionType.VEHICLE, - "caravan": DetectionType.VEHICLE, - "trailer": DetectionType.VEHICLE, - "train": DetectionType.VEHICLE, - "motorcycle": DetectionType.BICYCLE, - "bicycle": DetectionType.BICYCLE, - "stop": DetectionType.SIGN, + "traffic light": BoxDetectionType.SIGN, + "traffic sign": BoxDetectionType.SIGN, + "person": BoxDetectionType.PEDESTRIAN, + "rider": BoxDetectionType.BICYCLE, + "car": BoxDetectionType.VEHICLE, + "truck": BoxDetectionType.VEHICLE, + "bus": BoxDetectionType.VEHICLE, + "caravan": BoxDetectionType.VEHICLE, + "trailer": BoxDetectionType.VEHICLE, + "train": BoxDetectionType.VEHICLE, + "motorcycle": BoxDetectionType.BICYCLE, + "bicycle": BoxDetectionType.BICYCLE, + "stop": BoxDetectionType.SIGN, } # KIITI360_DETECTION_NAME_DICT = { diff --git a/src/py123d/conversion/datasets/kitti_360/kitti360_load_sensor.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_load_sensor.py similarity index 92% rename from src/py123d/conversion/datasets/kitti_360/kitti360_load_sensor.py rename to src/py123d/conversion/datasets/kitti_360/kitti_360_load_sensor.py index f9a5b6fe..6d021df4 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti360_load_sensor.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_load_sensor.py @@ -9,7 +9,7 @@ def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: if not filepath.exists(): logging.warning(f"LiDAR file does not exist: {filepath}. Returning empty point cloud.") - return LiDAR(metadata=lidar_metadata, point_cloud=np.zeros((4, 0), dtype=np.float32)) + return LiDAR(metadata=lidar_metadata, point_cloud=np.zeros((1, 4), dtype=np.float32)) pcd = np.fromfile(filepath, dtype=np.float32) pcd = np.reshape(pcd,[-1,4]) # [N,4] @@ -28,4 +28,6 @@ def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) point_cloud_4xN = np.vstack([transformed_xyz, intensity_row]).astype(np.float32) # (4,N) - return LiDAR(metadata=lidar_metadata, point_cloud=point_cloud_4xN) + point_cloud_Nx4 = point_cloud_4xN.T # (N,4) + + return LiDAR(metadata=lidar_metadata, point_cloud=point_cloud_Nx4) diff --git a/src/py123d/conversion/datasets/kitti_360/preprocess_detection.py b/src/py123d/conversion/datasets/kitti_360/preprocess_detection.py index 06e47379..2f959b06 100644 --- a/src/py123d/conversion/datasets/kitti_360/preprocess_detection.py +++ b/src/py123d/conversion/datasets/kitti_360/preprocess_detection.py @@ -32,7 +32,7 @@ PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES from py123d.conversion.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from py123d.conversion.datasets.kitti_360.labels import KITTI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT +from py123d.conversion.datasets.kitti_360.kitti_360_labels import KITTI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT def _bbox_xml_path(log_name: str) -> Path: if log_name == "2013_05_28_drive_0004_sync": diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 440d12fa..215cb95f 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -1,5 +1,5 @@ from pathlib import Path -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Union import cv2 import numpy as np @@ -21,6 +21,7 @@ ) from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType +from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType from py123d.datatypes.sensors.lidar.lidar_index import DefaultLidarIndex from py123d.datatypes.time.time_point import TimePoint @@ -35,6 +36,7 @@ "av2-sensor": DATASET_PATHS.av2_sensor_data_root, "wopd": DATASET_PATHS.wopd_data_root, "pandaset": DATASET_PATHS.pandaset_data_root, + "kitti360": DATASET_PATHS.kitti360_data_root, } @@ -102,9 +104,9 @@ def get_traffic_light_detections_from_arrow_table(arrow_table: pa.Table, index: def get_camera_from_arrow_table( arrow_table: pa.Table, index: int, - camera_type: PinholeCameraType, + camera_type: Union[PinholeCameraType, FisheyeMEICameraType], log_metadata: LogMetadata, -) -> PinholeCamera: +) -> Union[PinholeCamera, FisheyeMEICamera]: camera_name = camera_type.serialize() table_data = arrow_table[f"{camera_name}_data"][index].as_py() @@ -131,11 +133,19 @@ def get_camera_from_arrow_table( else: raise NotImplementedError("Only string file paths for camera data are supported.") - return PinholeCamera( - metadata=log_metadata.camera_metadata[camera_type], - image=image, - extrinsic=extrinsic, - ) + camera_metadata = log_metadata.camera_metadata[camera_type] + if hasattr(camera_metadata, 'mirror_parameter') and camera_metadata.mirror_parameter is not None: + return FisheyeMEICamera( + metadata=camera_metadata, + image=image, + extrinsic=extrinsic, + ) + else: + return PinholeCamera( + metadata=camera_metadata, + image=image, + extrinsic=extrinsic, + ) def get_lidar_from_arrow_table( @@ -195,6 +205,10 @@ def get_lidar_from_arrow_table( lidar_type in lidar_pc_dict ), f"LiDAR type {lidar_type} not found in Pandaset data at {full_lidar_path}." lidar = LiDAR(metadata=lidar_metadata, point_cloud=lidar_pc_dict[lidar_type]) + elif log_metadata.dataset == "kitti360": + from py123d.conversion.datasets.kitti_360.kitti_360_load_sensor import load_kitti360_lidar_from_path + + lidar = load_kitti360_lidar_from_path(full_lidar_path, lidar_metadata) else: raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") diff --git a/src/py123d/script/config/common/default_dataset_paths.yaml b/src/py123d/script/config/common/default_dataset_paths.yaml index ded971a6..6a925b06 100644 --- a/src/py123d/script/config/common/default_dataset_paths.yaml +++ b/src/py123d/script/config/common/default_dataset_paths.yaml @@ -21,3 +21,6 @@ dataset_paths: # Pandaset defaults pandaset_data_root: ${oc.env:PANDASET_DATA_ROOT,null} + + # KITTI360 defaults + kitti360_data_root: ${oc.env:KITTI360_DATA_ROOT,null} diff --git a/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml b/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml index 77445192..1fadc982 100644 --- a/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml +++ b/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml @@ -1,4 +1,7 @@ _target_: py123d.datatypes.scene.arrow.arrow_scene_builder.ArrowSceneBuilder _convert_: 'all' -dataset_path: ${dataset_paths.py123d_data_root} +# dataset_path: ${dataset_paths.py123d_data_root} +logs_root: ${dataset_paths.py123d_logs_root} +maps_root: ${dataset_paths.py123d_maps_root} + diff --git a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml index 2096c991..77cea31c 100644 --- a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml @@ -3,13 +3,12 @@ kitti360_dataset: _convert_: 'all' splits: ["kitti360"] - log_path: ${oc.env:KITTI360_DATA_ROOT} + kitti360_data_root: ${dataset_paths.kitti360_data_root} dataset_converter_config: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' - output_path: ${d123_data_root} force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} diff --git a/src/py123d/script/config/conversion/default_conversion.yaml b/src/py123d/script/config/conversion/default_conversion.yaml index daa55f12..4adf788b 100644 --- a/src/py123d/script/config/conversion/default_conversion.yaml +++ b/src/py123d/script/config/conversion/default_conversion.yaml @@ -16,7 +16,7 @@ defaults: - log_writer: arrow_log_writer - map_writer: gpkg_map_writer - datasets: - - ??? + - kitti360_dataset - _self_ diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index 330e4504..d4233f5e 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -14,6 +14,8 @@ PinholeCameraType.CAM_R0, PinholeCameraType.CAM_R1, PinholeCameraType.CAM_R2, + PinholeCameraType.CAM_STEREO_L, + PinholeCameraType.CAM_STEREO_R, ] all_lidar_types: List[LiDARType] = [ diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index b27d6853..dcff0fc7 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -31,6 +31,8 @@ PinholeCameraType.CAM_R0, PinholeCameraType.CAM_R1, PinholeCameraType.CAM_R2, + PinholeCameraType.CAM_STEREO_L, + PinholeCameraType.CAM_STEREO_R, ] all_lidar_types: List[LiDARType] = [ From 7e12332a58801b6b3f7f926155740d981e936194 Mon Sep 17 00:00:00 2001 From: jbwang <1159270049@qq.com> Date: Wed, 29 Oct 2025 13:33:21 +0800 Subject: [PATCH 123/145] merge dev_v0.0.7 into kitti360 (lidar related) --- .../scene/arrow/utils/arrow_getters.py | 185 ------------------ .../kitti_360/kitti_360_data_converter.py | 47 ++--- ..._load_sensor.py => kitti_360_sensor_io.py} | 15 +- .../sensor_io/lidar/file_lidar_io.py | 7 + .../datatypes/sensors/lidar/lidar_index.py | 8 + 5 files changed, 47 insertions(+), 215 deletions(-) delete mode 100644 d123/datatypes/scene/arrow/utils/arrow_getters.py rename src/py123d/conversion/datasets/kitti_360/{kitti_360_load_sensor.py => kitti_360_sensor_io.py} (61%) diff --git a/d123/datatypes/scene/arrow/utils/arrow_getters.py b/d123/datatypes/scene/arrow/utils/arrow_getters.py deleted file mode 100644 index fb810af4..00000000 --- a/d123/datatypes/scene/arrow/utils/arrow_getters.py +++ /dev/null @@ -1,185 +0,0 @@ -# TODO: rename this file and potentially move somewhere more appropriate. - -import os -from pathlib import Path -from typing import Dict, List, Optional, Union - -import cv2 -import numpy as np -import numpy.typing as npt -import pyarrow as pa - -from d123.datatypes.detections.detection import ( - BoxDetection, - BoxDetectionMetadata, - BoxDetectionSE3, - BoxDetectionWrapper, - TrafficLightDetection, - TrafficLightDetectionWrapper, - TrafficLightStatus, -) -from d123.datatypes.detections.detection_types import DetectionType -from d123.datatypes.scene.scene_metadata import LogMetadata -from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType -from d123.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType -from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType -from d123.datatypes.time.time_point import TimePoint -from d123.datatypes.vehicle_state.ego_state import EgoStateSE3 -from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters -from d123.geometry import BoundingBoxSE3, StateSE3, Vector3D - -DATASET_SENSOR_ROOT: Dict[str, Path] = { - "nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs", - "carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs", - # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor", - "kitti360": Path(os.environ["KITTI360_DATA_ROOT"]), - # # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini", -} - - -def get_timepoint_from_arrow_table(arrow_table: pa.Table, index: int) -> TimePoint: - return TimePoint.from_us(arrow_table["timestamp"][index].as_py()) - - -def get_ego_vehicle_state_from_arrow_table( - arrow_table: pa.Table, index: int, vehicle_parameters: VehicleParameters -) -> EgoStateSE3: - timepoint = get_timepoint_from_arrow_table(arrow_table, index) - return EgoStateSE3.from_array( - array=pa.array(arrow_table["ego_state"][index]).to_numpy(), - vehicle_parameters=vehicle_parameters, - timepoint=timepoint, - ) - - -def get_box_detections_from_arrow_table(arrow_table: pa.Table, index: int) -> BoxDetectionWrapper: - timepoint = get_timepoint_from_arrow_table(arrow_table, index) - box_detections: List[BoxDetection] = [] - - for detection_state, detection_velocity, detection_token, detection_type in zip( - arrow_table["box_detection_state"][index].as_py(), - arrow_table["box_detection_velocity"][index].as_py(), - arrow_table["box_detection_token"][index].as_py(), - arrow_table["box_detection_type"][index].as_py(), - ): - box_detection = BoxDetectionSE3( - metadata=BoxDetectionMetadata( - detection_type=DetectionType(detection_type), - timepoint=timepoint, - track_token=detection_token, - confidence=None, - ), - bounding_box_se3=BoundingBoxSE3.from_array(np.array(detection_state)), - velocity=Vector3D.from_array(np.array(detection_velocity)) if detection_velocity else None, - ) - box_detections.append(box_detection) - return BoxDetectionWrapper(box_detections=box_detections) - - -def get_traffic_light_detections_from_arrow_table(arrow_table: pa.Table, index: int) -> TrafficLightDetectionWrapper: - timepoint = get_timepoint_from_arrow_table(arrow_table, index) - traffic_light_detections: List[TrafficLightDetection] = [] - - for lane_id, status in zip( - arrow_table["traffic_light_ids"][index].as_py(), - arrow_table["traffic_light_types"][index].as_py(), - ): - traffic_light_detection = TrafficLightDetection( - timepoint=timepoint, - lane_id=lane_id, - status=TrafficLightStatus(status), - ) - traffic_light_detections.append(traffic_light_detection) - - return TrafficLightDetectionWrapper(traffic_light_detections=traffic_light_detections) - - -def get_camera_from_arrow_table( - arrow_table: pa.Table, - index: int, - camera_type: Union[PinholeCameraType, FisheyeMEICameraType], - log_metadata: LogMetadata, -) -> Union[PinholeCamera, FisheyeMEICamera]: - - camera_name = camera_type.serialize() - table_data = arrow_table[f"{camera_name}_data"][index].as_py() - extrinsic_values = arrow_table[f"{camera_name}_extrinsic"][index].as_py() - extrinsic = StateSE3.from_list(extrinsic_values) if extrinsic_values is not None else None - - if table_data is None or extrinsic is None: - return None - - image: Optional[npt.NDArray[np.uint8]] = None - - if isinstance(table_data, str): - sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset] - full_image_path = sensor_root / table_data - assert full_image_path.exists(), f"Camera file not found: {full_image_path}" - image = cv2.imread(str(full_image_path), cv2.IMREAD_COLOR) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - elif isinstance(table_data, bytes): - image = cv2.imdecode(np.frombuffer(table_data, np.uint8), cv2.IMREAD_UNCHANGED) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - else: - raise NotImplementedError("Only string file paths for camera data are supported.") - - camera_metadata = log_metadata.camera_metadata[camera_type] - if hasattr(camera_metadata, 'mirror_parameter') and camera_metadata.mirror_parameter is not None: - return FisheyeMEICamera( - metadata=camera_metadata, - image=image, - extrinsic=extrinsic, - ) - else: - return PinholeCamera( - metadata=camera_metadata, - image=image, - extrinsic=extrinsic, - ) - - -def get_lidar_from_arrow_table( - arrow_table: pa.Table, - index: int, - lidar_type: LiDARType, - log_metadata: LogMetadata, -) -> LiDAR: - assert ( - lidar_type.serialize() in arrow_table.schema.names - ), f'"{lidar_type.serialize()}" field not found in Arrow table schema.' - lidar_data = arrow_table[lidar_type.serialize()][index].as_py() - lidar_metadata = log_metadata.lidar_metadata[lidar_type] - - if isinstance(lidar_data, str): - sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset] - full_lidar_path = sensor_root / lidar_data - assert full_lidar_path.exists(), f"LiDAR file not found: {full_lidar_path}" - - # NOTE: We move data specific import into if-else block, to avoid data specific import errors - if log_metadata.dataset == "nuplan": - from d123.conversion.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path - - lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata) - elif log_metadata.dataset == "carla": - from d123.conversion.datasets.carla.carla_load_sensor import load_carla_lidar_from_path - - lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata) - elif log_metadata.dataset == "wopd": - raise NotImplementedError - elif log_metadata.dataset == "kitti360": - from d123.conversion.datasets.kitti_360.kitti360_load_sensor import load_kitti360_lidar_from_path - - lidar = load_kitti360_lidar_from_path(full_lidar_path, lidar_metadata) - else: - raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") - - else: - # FIXME: This is a temporary fix for WOPD dataset. The lidar data is stored as a flattened array of float32. - # Ideally the lidar index should handle the dimension. But for now we hardcode it here. - lidar_data = np.array(lidar_data, dtype=np.float32).reshape(-1, 3) - lidar_data = np.concatenate([np.zeros_like(lidar_data), lidar_data], axis=-1) - if log_metadata.dataset == "wopd": - lidar = LiDAR(metadata=lidar_metadata, point_cloud=lidar_data.T) - else: - raise NotImplementedError("Only string file paths for lidar data are supported.") - return lidar diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py index d8733e12..c29c8a13 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py @@ -29,14 +29,14 @@ FisheyeMEIProjection, ) from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType -from py123d.conversion.utils.sensor_utils.lidar_index_registry import Kitti360LidarIndex +from py123d.conversion.registry.lidar_index_registry import Kitti360LidarIndex from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3 from py123d.common.utils.uuid_utils import create_deterministic_uuid from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter from py123d.conversion.dataset_converter_config import DatasetConverterConfig -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata @@ -45,7 +45,6 @@ from py123d.conversion.datasets.kitti_360.kitti_360_map_conversion import ( convert_kitti360_map_with_writer ) -from py123d.conversion.datasets.kitti_360.kitti_360_load_sensor import load_kitti360_lidar_from_path from py123d.geometry import BoundingBoxSE3, StateSE3, Vector3D from py123d.geometry.rotation import EulerAngles @@ -589,28 +588,30 @@ def _extract_detections( box_detection_wrapper_all.append(BoxDetectionWrapper(box_detections=box_detections)) return box_detection_wrapper_all -def _extract_lidar(log_name: str, idx: int, data_converter_config: DatasetConverterConfig) -> Dict[LiDARType, Optional[str]]: +def _extract_lidar(log_name: str, idx: int, data_converter_config: DatasetConverterConfig) -> List[LiDARData]: - #NOTE special case for sequence 2013_05_28_drive_0002_sync which has no lidar data before frame 4391 - if log_name == "2013_05_28_drive_0002_sync" and idx <= 4390: - return {LiDARType.LIDAR_TOP: None} - - lidar: Optional[str] = None - lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" - if lidar_full_path.exists(): - if data_converter_config.lidar_store_option == "path": - lidar = f"data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin" - elif data_converter_config.lidar_store_option == "binary": - temp_metadata = LiDARMetadata( - lidar_type=LiDARType.LIDAR_TOP, - lidar_index=Kitti360LidarIndex, - extrinsic=StateSE3.from_transformation_matrix(get_lidar_extrinsic()), + lidars: List[LiDARData] = [] + if data_converter_config.include_lidars: + #NOTE special case for sequence 2013_05_28_drive_0002_sync which has no lidar data before frame 4391 + if log_name == "2013_05_28_drive_0002_sync" and idx <= 4390: + return lidars + + lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" + if lidar_full_path.exists(): + relative_path = f"data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin" + lidars.append( + LiDARData( + lidar_type=LiDARType.LIDAR_TOP, + timestamp=None, + iteration=idx, + dataset_root=PATH_3D_RAW_ROOT, + relative_path=relative_path, + ) ) - lidar_pc: LiDAR = load_kitti360_lidar_from_path(lidar_full_path, temp_metadata) - lidar = lidar_pc.point_cloud - else: - raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}") - return {LiDARType.LIDAR_TOP: lidar} + else: + raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}") + + return lidars def _extract_cameras( log_name: str, idx: int, data_converter_config: DatasetConverterConfig diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_load_sensor.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_sensor_io.py similarity index 61% rename from src/py123d/conversion/datasets/kitti_360/kitti_360_load_sensor.py rename to src/py123d/conversion/datasets/kitti_360/kitti_360_sensor_io.py index 6d021df4..46318ea8 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_load_sensor.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_sensor_io.py @@ -1,15 +1,15 @@ from pathlib import Path +from typing import Dict import numpy as np import logging +from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType +from py123d.conversion.datasets.kitti_360.kitti_360_helper import get_lidar_extrinsic -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata - - -def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR: +def load_kitti360_lidar_pcs_from_file(filepath: Path) -> Dict[LiDARType, np.ndarray]: if not filepath.exists(): logging.warning(f"LiDAR file does not exist: {filepath}. Returning empty point cloud.") - return LiDAR(metadata=lidar_metadata, point_cloud=np.zeros((1, 4), dtype=np.float32)) + return {LiDARType.LIDAR_TOP: np.zeros((1, 4), dtype=np.float32)} pcd = np.fromfile(filepath, dtype=np.float32) pcd = np.reshape(pcd,[-1,4]) # [N,4] @@ -20,7 +20,8 @@ def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) ones = np.ones((xyz.shape[0], 1), dtype=pcd.dtype) points_h = np.concatenate([xyz, ones], axis=1) #[N,4] - transformed_h = lidar_metadata.extrinsic.transformation_matrix @ points_h.T #[4,N] + transformed_h = get_lidar_extrinsic() @ points_h.T #[4,N] + # transformed_h = lidar_metadata.extrinsic.transformation_matrix @ points_h.T #[4,N] transformed_xyz = transformed_h[:3, :] # (3,N) @@ -30,4 +31,4 @@ def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) point_cloud_Nx4 = point_cloud_4xN.T # (N,4) - return LiDAR(metadata=lidar_metadata, point_cloud=point_cloud_Nx4) + return {LiDARType.LIDAR_TOP: point_cloud_Nx4} diff --git a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py index edc5c7d5..0753c292 100644 --- a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py +++ b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py @@ -15,6 +15,7 @@ "av2-sensor": DATASET_PATHS.av2_sensor_data_root, "wopd": DATASET_PATHS.wopd_data_root, "pandaset": DATASET_PATHS.pandaset_data_root, + "kitti360": DATASET_PATHS.kitti360_data_root, } @@ -56,6 +57,12 @@ def load_lidar_pcs_from_file( from py123d.conversion.datasets.pandaset.pandaset_sensor_io import load_pandaset_lidars_pcs_from_file lidar_pcs_dict = load_pandaset_lidars_pcs_from_file(full_lidar_path, index) + + elif log_metadata.dataset == "kitti360": + from py123d.conversion.datasets.kitti_360.kitti_360_sensor_io import load_kitti360_lidar_pcs_from_file + + lidar_pcs_dict = load_kitti360_lidar_pcs_from_file(full_lidar_path) + else: raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") diff --git a/src/py123d/datatypes/sensors/lidar/lidar_index.py b/src/py123d/datatypes/sensors/lidar/lidar_index.py index 7684b685..c6322a5f 100644 --- a/src/py123d/datatypes/sensors/lidar/lidar_index.py +++ b/src/py123d/datatypes/sensors/lidar/lidar_index.py @@ -62,6 +62,14 @@ class WOPDLidarIndex(LiDARIndex): Z = 5 +@register_lidar_index +class Kitti360LidarIndex(LiDARIndex): + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 + + @register_lidar_index class AVSensorLidarIndex(LiDARIndex): """Argoverse Sensor LiDAR Indexing Scheme. From 810fcb08d4f9c5a6a8d9721d0ea871155ea2fc53 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Wed, 29 Oct 2025 19:15:48 +0100 Subject: [PATCH 124/145] Update logo and svg. (#57) --- README.md | 7 +++---- assets/logo/123D_logo_transparent_black.png | Bin 2486726 -> 0 bytes .../{svg => }/123D_logo_transparent_black.svg | 0 assets/logo/123D_logo_transparent_white.png | Bin 2486726 -> 0 bytes .../{svg => }/123D_logo_transparent_white.svg | 0 5 files changed, 3 insertions(+), 4 deletions(-) delete mode 100644 assets/logo/123D_logo_transparent_black.png rename assets/logo/{svg => }/123D_logo_transparent_black.svg (100%) delete mode 100644 assets/logo/123D_logo_transparent_white.png rename assets/logo/{svg => }/123D_logo_transparent_white.svg (100%) diff --git a/README.md b/README.md index 7f1d75db..a964da5b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ -

      - Logo +

      + Logo

      123D: One Library for 2D and 3D Driving Dataset

      -

      - +

      diff --git a/assets/logo/123D_logo_transparent_black.png b/assets/logo/123D_logo_transparent_black.png deleted file mode 100644 index 6717f5f81f94a4adb7c3042fe6f5332891afecfc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2486726 zcmeF42bfev)`p8P3_0gG3`v3@Q4j+lk_Cwp41iz+6DnpgtgEiO?wZr;ue)nPF(IfJ zFknW^l7pz^sN^vJcW_3AVR|~9x?R=%)$^REyYH=g>YTT3>Z&?bx1Z9#XPJ^UOB!R! z^zPMtkTJ!Y7*ptu;>ED#oRxQP#Yc&YdySZ6Ox^cI3SIcs<;P=Vjmu6Ne%auQ&c1B& zm`lf-$&)9yJnzB_CY?3r;_)pny7Zh!HXd8km<-dqd)FaX7BbWB891R(R#p~{VQBBA(MI0=wI00gp5eQYbo0t5h>MUVgqkN^pg z011!)36KB@kN^pg011!)36MZc1g70HZ~}WPVnWA*NPq-LfCNZ@1W14cNFap?Z2j*M zXK^e*3ZDRUodigL1W14cNPq-LfCO|2u%kkkC$*6P36KB@kN^pg011!)36KB@kN^pg z011#lOa#u}Fy{u21&9e94aiX^LMx3pADlNPq-LfCNZ@1W14cNPq-LfCNZ@1Og?%j*38W zvoI1M0TLhq5+DH*AOR8}0TLjA7zjL9`u_jpSb!M7@emRq0TLhq5+DH*@PPn3DtxFV z+DHN)EPw<^z)b?|sBp7hXcY;N011!)36KB@ zkN^pg011!)36KB@kN^pg011!)2_!86c1|R1)jH?c1C#ch%CP{>RSOLw0TLhq5+DH* zAOR8}0TLhq5+DH*AOR8}0TLjA#1ddfMPm80bP^x|5+DH*AORAHlR)N>3;S^_K%5YH zI0=vd36KB@kN^pg011!)36MY{2~4|Z-~{$oB(errHVKdb36KB@kN^pg011!)36KB@ zkbrXpHte}~F~b zQe>Fk-MbF)#b(->tOTarGjIZXE0VQ#=~q+)(E3{l2RsZJ7uA8x>>B}TNjw%Uia+^g zc#gf<=jsFaeNN6cev$wQI6*+>f@+HU%L*r~p%ElN0wh2JBtQZrKmsH{0wh2JB;Ys! z4#9Q2f@oe~1TI2J?3f4)xBY_NyYsP%Va5W~ffX;Hu|LheHP%WvADY=oVGS=8lR(S_ zO5n(kVm_J&k^l*i011!)36KB@kN^pg011#liV=`{(EfDm9uMT(vWXz;ukuHU8X{yOM832if9E9IDuFIISKo8hxV(1ZT&;#}OCcrm?&EI~AOYtHoQ3<#W6oJcgGhh` zNPq-LfCNZ@1W14cNPq-Lz##&h?#ZFb2+9;Ra!Lfn6&A{?0-aAmzbrs~NP2QyrK%o` z1-KGMvy$-iwpT$D-fZKP&U+RBDvZJlU65(ZDJ5mJ%c7WW@ zIV3;=Dg>@q(LyC8KmsH{0wh2J!4sG}?%Xdp79e=;ERh6AfCNY&r3tX3BK4|&99*tt z(e1s^{jEj)Dyq?8BMgKEQ|c1%V#RlytJ3jzLF!f$JtqNg33S5sd!o12({K_X0TLhq z5+DH*AOR8}0TLhq5>O^E;@)k&LyZMsM};zMm(`)cL>6I~Y}v+Vv2v)artNwX4veO7 zy_W3S_my4sYT9MR`!Z_UsE7neAV>l?23Z)(A^{R00TLhq5+DH*AOR8}0TLhqp9mB} zL$Xiy($+W#pmXAU9DZLMD#9JQ4;fnEoSGjlVbhop8G*&ecsW zyN`3V9PC>TDetnKv@M6g<{$2;9cnB@4T1d4Cp=ykN^pg011!)36KB@ zkN^pg00|sMfKe2OAxX{+*%1_1TK4W~*E=b){g(&|c2ZbA85~#tyz8A5+59fVCp#$| zKbdGA34|eVU6}pcO#&o90wfTQz&p=8)iS!V08_GV2&a_$Nq_`MfCNZ@1W14coFl-n z3g@cCjzQ?HI2W29u%rEeRtc$Sjuzkr7R!iRH8@mV!yTLltJ)(YpU(~GF! zT$NR`mq99fOPYq0014y}I1cxm4|9I-lLSbB1W14cNPq-LfCNZ@1QJYOP1SqbMK=~8 z!3MMV#mjr?&@WQ%Vs)Qd45Tr#civ&o31W14c zNPq-LfCNY&83-_%BtQbGN1$lxxtuy4XNOc=W!dBZdJPxYRPw*AgGt$K{L!Ie5mz#9Tf=e}WPKatRNBCZCkLEZqPp(H>8 zBtQZrKmsH{0wh2JBtQZrV37bjCoIBsK=tXcA=9EA5=QaYg7g<%_Rwe<=_Si)tEN5@@P|NFmihMWClhV&DY(vCqSN%7L>t2LNq_`MfCNZ@1W14cNPq-L zfCNY&(FC|SMWSoKQu%27C<4{C7p3qef-M(G-wbzCX~>pxbni7F3}W?!sLl(w?tF;Fe<$g>{O4S zC|kCy>D8-OJp6cUJ_MeA`e}m>g?#qJz3G;?HCR@F&c!qop&mhzmR`cN?0%+Y>#3du z94CPL%DkBmxnkrHT<vwUkexPNiMLlDnyPmfG7TwM;#YA zv1;_%NJz!`5g!m?Mut>G2r9~K&p!LCt&obFqckP(?Aaj|>aCX&Qqii1bb>IEKxz~C z^0fzD127!deG}J=qCF%)0wh2JBtQZrKmsH{0wh2JBoHKl0(&TeG=yd85XcUrxLQYB zNG(r#5=Joz9;k;=$f6VsqX>C!#I$4O%9ZBhk3Wv-*f0lujC1wfFnL_qeHx6YuWd-P z6Goa6Wu%Xe$u5D~H+H?QfUy8}3#axd2(0*KwpqK}xkamkSWGGkO3X?EBtQZrKmsH{ z0wh2JBtQZrKmx%NC>nf`EYUK75zudQWTr$=+-BK_xYcJzP)vwhQ62?l%9Js^d-u+> zl|SPp@XRyM*ovT-60dExkI(L0RF9x2l9rALim|rMr+yM}p1_wgoQt5yVRkyM|69?S zx+rG@KS_WDNPqbA^|4};98LDpe{tN1Np2D$+-sPQx?{fYanlu^Ixum z&5(`RBIo|k+42+4{a^8&i3CW11W14c4kN&hio=ljyCM6<`Wnj~_!3QVDL7b(rYKRO zgk{93rT*7ne>HRF%(3D0r+5i|(}rSYog<-7eR{TL$De9Sm9MI7D%FvIUj$Zu|DjpE zc&@8POC*IXpmesY2GTAPAOR9cCIa}cB>xSLf;5H5z^-Q5pVBE2$p2#4uoh+g2$`S# zDc`@uJL`IUGLb+a1aKafL_w_~?IA}(T0&u4fx?8PS&3B*kR=a)o291l4W(go5S5_DToS(GUqBO*CzA8h*`KV+)j zw;}K0yq69QCK4b45+H#TA`tWgDTUAm>na}V@}hl}O4sKBScGKdwsvI&FU>?-Yj6(I4tQ(@pt(xR-k5+DH*aGe1DAW5i1Pe@mY^ajWr zHNK<_7%DOpBI$Sd^FBVM755GNDU)I7Oe7F70h~V#kark2@q5kB|A zZt1Oi8S)a&`(^lKA^{R00TM`Z0)=qHPjVMi!#zC2hr`pSEqc2Lx{tv5veaT3ajOOg z878tEqS{*_VHA%(`Y3xV;;x}X72JLI-R7;g-a2%n&aYE&uF6ZTvkB*FAsF{HMB=Q~ zQsE*+&B(vJWy(~prKXdLNWez|uiSaMkq!zEl1$*-5cjN~J+PG~S|otSLMGH_Kx71p zb7}10gnJU*iHV0KbCU-dN5Hi0!u=2_{aXxXH!`DTIqCs9r5&?b=-GJ23H z1qt9iPFCra#+NQB=zcUV{{~jq;C=L-XbxiDjBj=5Lh}beuGG7M$UKX+>mWsAe*v9 zR-@1PE}zm-w-%Q{?oJSFVj_X$Ai#)q~oImATv*afk zFr}L0Pe}vZyM9&GN@dOyz~ewVs=kCYbKY2Y3>=GV<#Bh+p(Q~Oz_n5a^I_24kjHV(%li|PWdbY|8m zAmuCK)QaA=}f_(@p#K?M=ms z73HUsY5n^3=Iys-p)~a*f7Yg{Yt>hHGE}N|QizF7It^eag??R8qh|!xEdAO_Cq?&T zs+e}o%bSbO%(Ov78XGRPp_n?ICvXZZWG6**)nxk}I67;X+bRM4?P(187YdQ-@$QAR zv09*i%A3OEzd?Qhr^g}f{d0j9M?(O0TnZxpdzV5a!XTA8DFl`HD}FB(4v>$PkFs&CQS}A@4($Lw4Z0SONI}BF}}ZAcG+-ATmIPi3FS@ zz>W&%sw8TI2IGj9Q6Clg0TMxRYvkDzlM^>td05H_64g|)WJ%MnUq4ly8p{SraZy7#%!9>)A6 zkaPs*PM5YT{dCFbKRP9b_Nr;>)hwZ3fHCLcSynYt#cA(-Ju3dO08_GVh_pcF3yeUk zz)DJ3e`T14>**pG zm@;7$O`1sWdBLfC`SPZCarvL(R@$^_ldYAr7sJ!%-17EtYjD~2##Z%VZVfy1HRY?< zKfFM0A%PSku>PkX%!+T{Q}_6U4wX#DR)0KV(ux)`7ugA?kfDGwG?FQ`2oy^#ch$V* zP(U@5ZYjchQYXj^SoIl1LQm!Y4pVdljz{*_!SM^oAP5;o%&$+s%U+LvEI`D?#gGxz z*%ya>19<>aEd~|Id`R_dF5hNzN@iaZny*^5Mp)uXtamP+`-i_c_t>Vtx`xIs#w3ijWEm z3oBkWw!tB+M)s?1>QuK9S}_sNw91DR#vdd=0wjCseg@XUkb|2&DNxDa+% z%wv1pKL-pLU`m&kx&PcskwjC7#i%>8W5*7sjNiO@vw8E)H*MG?Vb4$5P^_sFm+eW= zCt;VWDYIQKQ?XWKRh?8u0=^O0`0FCG{44!MZ#uWBXpU)p=p#m=Dkh#T1CZ2{{7H~! z8WRbS012c70lXVkhTH>ZWf}_ne`T^jKq7hH24{(~XlX$|Razi`x*7+yKR||9P@l{y zWiaQD;5PxnQQ*m3)AY?Y0r>+Y5nMMz-hphyB{d5&8PW#A_GCa|(jmZ;Nrz)1vodJn8bP@H_SM3}pks#K{G@ru~8I`y7CQjPAU7hinQ?Ao=6lMFVb5G}zY9{< zG23WPHUZR47065Y`3R&u!Nf#B+6$xuR66Hs#iW4;kpKzgB_Mx%xfP}NCgB;Xr?O{Z%t*=A&XpB-j=fpN9bIq#HKLA`~)8ktu!xJ)lAs ztzgJj{4A+~1o9G)hPmMoS>R$VuCq5FvZ8&>yczkM1i}$uZ$&sl4(`u>xVE!s+8*fk zVNnW(RD?JQB&1@)h7BS1CT6pQRP5Zj(?;PEQnA~HVr8AzDeF6+u1%kFZK$PA67Y(^ z-0AmO38T1TT*Cr6j2TqdRIi*~V4fIvUV>*mhfl@`kB5-}3AjlB_cckEz`Cy?UEQ=Y zWvr?JPoF}4yabVzhnW&cKrRfVNiac%Ua&rN>q2@&IM(ij*}(6H>8op@h%7leTT!W@gNoVZ)%Gq4fW3DAv@8hjJ(AlTlHsDWg?4 zQzhd_Rh?8u0=^O0vUY`8^11%~rDc<{rd!8?qxK}U;=GX=Hjqgt1t)g1L6$m6fCLgk z0PjN5_$u+W|A25M&VpR+0z` znPAT81P-#gsEoo=O~s2BH!dzpQMGE-s1~JgErR0ZmtQv9w{KUit88Vrqa$tCAK!a@ zdgWg%js?)O-pZTxdIW_mN>R3~%nR;T@;QP+Iw+=3pKb%_YAF4;4aJ%|@$hX6eG=}dn(DSV-c+sM zQdK9Fk$`Umwys-g7Jc%vy4goIE^B%oTQwrRB})`B=Z(mSC~uURq_K$exkU+$*)0)x zt>hdGeYZ9*fF-M`)*%9TACe@I;IBf|-{Bo{HCaqK1CD%-I%|9;C9h=KF7NBkHs0a{tyhY){l@UlC z;?+~Y@e)mOdjb2M*aZ$Uy5~}DV_c4=s8AuYDg4&0Tj$(*6tBMes`>r*-?ght^xc6@ zwH-DTE9;yBeG!bsj;_=;eK`|Mbi|lD83}wj<3TGC6qk){5K#n04%6|&>X}OAtwmCl z%qfSTBtQZr;0pn~ua}0c&qHp8@IR+76=zISnEDy&?{t4m_RmuAl~Me&AxHRUeNtE~ zqfy>QJsjhLH}5`mH`;mh#sW;qvJ$}W0xH^&qy*$8p*KW^pGnWf1PK2CC23_x>K5M| zBPHcVzUwWU;Y&z`Omq|3c-JU^_3PKqlq***iX(!_>T^hiteP#o6&BKZ=zPS2UR9NN z$hL;UzN$*0taj65OpQkEwAE4{33x@|_l>K}f{$NRxA}<7(x%S|(LG{HA+%!ru)6BD zKw&k=*)|kYCkc=M3D_cl_n%tO{Sidwd0_I7Kna-qBkmvbp>j4P1HzOz z0o*ii8FyElR(N7ke=InAS-Y!-T*0hWx$ljmAnA>vf)YH;v($@Q;)W2u7pwC zrlNn=#dl)dl-^i?5JrKCgj7rlu{-9?5>hc>fGi^CR;pI5nu#W)V&le*PB|d+K5gB) z)rL(HQeh=lL)lpATr8xl({ND=ZKBlYBY^`U6?^3|rkgH5tDz}eIQq->oKrJQxiV?G z#{8jeGM;_XXv9PUBtQbr6TtmShUH5q#gWb%O9R6ZxB?koLY;DF1$GadhSc@rz2(Y8VpUS?*vZ?<@ zWAAdyWoW2B4@#mN-Woxvef#!h1Vsd#qRAo=6ta4@g|q=WAGDxXRV5y%&7rWrs!}Mc z-mtBy*`$NETIwSKuLx}4vd+w(^Mbm~jWbG{{@r4G7cN`6sI5qf>Tq;yjgWkbXN63Z-zxe;c?1cNfHDCYn0zs039iXov+tYAMNplK1lUpGVy)z{2^VA!Y_hqc zmJCC^Jx{UD{RD^NFv`lJ6sMeWigW%-1V#Ds5``K*tzEm;xE4V%bLLF5Wy_ZE#z&vy zALvy3E&9U(KSE{&Rlmo!A8=vdj<7%yaE*XOQ0&{YOWmwX$7IG7L6Kv~xx+I|>5@fr zerkS7Bt^QWLh2#`5+DIh0=P$=0DZF{(g4epTm;&{oexmQveI)hry;jJj9Eu4j|Iqn zp1%r^Es`0g$G%m7?K4qN>}2)05@~u;5s(gxs~}5pU0w%aWM5KM@8Qm|mq3~NiH)FM zS0jy2G6J%2>`jn`xHiv%unpVW3Qm9XrU{Xa1&E@Or)0_SV5S5Rz}46t#mYYbTWL2G z-(gF+mR@j>kcvrK`f~Khq7(xM4$S%K%xCrL)uRfj*tBVrGq#H%Z@lq_tuT+f(6hGL zDQELHMtT^dxXI?PB=6RC{U_$##NQ;4as=kR{;1izd#AeJlg2bKMTLq~`At3YNJ_`99*XQvm;&V#5$(yeG)qS016MYGzI2Rmh zYP0CwySI~J6pb1+irCeK3l~P5H;9aT_Utj!rcJX^)K)0H+lFFIop@L_guY>#Iz?BN zdd*GU<~I8&wJlD1eW!Kq^Uuh}0wg_(#5lO~_s!;q_nuZSyiWBJW^m8=LMn2Ms#q?~ zj2=`c=cneU#A}?UsgSxzfCNY&9s+p(ITFW9Ck2Ng#{<%@W2Lc3Ix8aor{x9KU_&k%>$>{QQ>eU;bQ9s^KAAI z`V>KNnRZnbD^|?7xF|(55fp3ItTC=dP|TV&%WT-NLA$y{-`$9ya4TFSe=jA1LVb4n zcKxv^MWOgdc;|02w~~M#1ipIXQL|^)c6DQAQHo+|dapziPs=nVid%`KxEk*U>h~9Q zpj1c#B#`t3vKs|v!cm!zK{*{;ZX;$Gvb;usvdt;0P6m=mXGO9vToG3VgHJ^$_#cGN z@VH1OSaw|1y5_tr*Yta<@_U*G3O|cEN^fyc0!qkQvc*8iQeMr@;_HSU59rBD~i=Er9u)Qfutjl zegAn5jx|p@=jc$h1e)T2mvK(W`hiTj2!P2M_&FfgCVusgKsA{Bpnqo5ViHI>0!PE+ zxw!BCh0h~@>z!ZphbG!T_%n>^$ZC_(&EWnhk*seu98SBDm`DdbbLH$s*}n{z&8TBcl>6) zd*?suW>>46ZbqG~{>(XGr$yZ|A^1>r7*t3CBtQa@5%?>z@x>%ER;iHdSUC9~yayLb z1!p7i5h!EE~uU_q37{$BqzH7uT3+X}h ztXYZAP_|XVD2gfTH0=;Z!8sPSsZyVB1ipFe3A1baR&}#2hC3Xv)mE4WluI}T-{I8H z2r4{`!06sD;f*QT<^?#+O>T&u0PaD%hrmraUo3q}NX5^V zbZe>|2W9HBd-v?w)BBK$MT-_`a*kWq{{8!nrI3o9Fz)Yh8)#Vp9(*-W#AxkeD%5CT z8no-Jt(N*oz$*f>YWBBpKdElB#RVk(v^8hpizi=N`R6vJFU_DwdfctVvMJd}H6%a+ zNkjneI8wenB zM7QXUiYm9fWXprr=>!fkGTq9e6oUp0a?bZ^)v86jD20pB6z{+PzWL>sUu?!|B$xrdp@Hnlq-tr{39Tp6gz*H|HZm#!fEwQiQ+|cmFA=E(lMFlnb&?X zyZ7wRXK!4aB&uQ>?vYt>Y2m?%Ca@#X2C#e*2uC3Mz5IX3S2|o?Pxs5a=0}iukfo4i z@`>APOberdmo$vjgfxI;LK;FkKsrLoK$0;PhEuZYa651lhAEfedm5aURa|{eTVT9g z3v(c!KxA~>RgUt+R3WZ*biz~(r}@sOh+KDRxMfnWay2VuiR0wmxEfg@q&=eTw+ z#qazU?QV2R8x;0JxrxaGAP2WRI664 zhz(k>U_r!rgUE=RzO85q8NK5}G=;C$O8 z0Z8J1{vy02fRrm?9UAt>jAt zMH&EHp$#D|EzP(XK_PyYEGhTQ@M+bmRc7l}{khr0mmE6BhaY}uR;*YNIz#MxWTeVZ zu^%4TkrM4wLc6rK{Vp(t3)>&zt!+2;k$@(F?`Qwd?D%birpg1lE*Mqclr9Ai+EakmXM7T^ZFN5oagl&l-#G?0gD5O^I*-qBD*9VB3b0PZtgpjSQA z*@mrQbgsnyJ0UXczY*ko$bTWKJrxDnfU@>MzJ}ZfkyhJkkUo&dAsZpdkdB5EQyI=am-sS0UXXb#Xpk3i%8YS5HMKpP+XuM6NeE-)lmygscvg-_h+7 z7L{ysdpU}lW|4q8fnm@%5BF+$Mlm@^fV~xNRmv30=E(dxKU*$CL;YD$QcpusZXFUv zQMt04i(c*Cy*sz5A-^wNxG?0tMC`!%WTsA?YNKcweszZp#hNx* z1xoNXL`wNuEDXLyp2Z*dOvP z#7gHxC=e)b9prZGuMasNBAtl7q+4;G2RHP^7b>)q1pFW%_nZ%K4a-L6TdkVDHEY%wS)ihzpk zklC^Bdjc=5Ruso-P>AC$!*=zFn%nd}&lD~ymjV+Bq%MK)-+RVv+q_mCz{mxFTA`-@hOd!!JnX!vy)M1e~mmlb9zqT zI@sUPLmy;CqklpkkcLnn?1!B)NJsAFGjK0B0-qN{PKT6)_?Dz&>oNFoB>dX$oAJ@w z3qQ-?7#|{yzK5MM=ZVa9mhc2SqG+=eel6#6PCJ3@lX=hNUQr?YyZp{ASbKb5^3q z_F5{3YX2ngv&LKdhdv(g?LR&M0jLoLJqwIBQjeM?{?MXuNEngct2u z5g);fP7**JN<*d7qe*eDt#=Y0jZ!Cox;_$0Wk7~cX&a3F8|0t3p0@a8cy3!^`+WSC z&dY1?DUG+6LZng5r<4g}Z-V%C4#mQFJPvYi^}~anNJHF>kUMa0MHa??@L22LV3S<8 zlX1R3flrZk)(efVfU97uOkEXd^htasC(Y5z!F_YmcoV`AoL|!6A@`J;5NX7c5v>xM zE#KvjOx^5H>DXY3jeraeY6RB9aZY*rz7U)1ZXQ+GO|w$VDqIk4;c-ukUdspt9~yh5 zNApT;4n>L-aduIP9AEeE-)|%$VZ(+EIh+3c+`fJLfqnAnQu^$(&&=Y*vhbjKl9#RR;`%AZ9U389)=_dbUa6skn+D64kkKW~k0EPx$5QbFsFA51mdHmy;2_3TsYneqvU zrpR&R^0OLX0h&TNKQ%w6Xey&F5+H%pCV>Aba-(_xBJb%wBnegf52Oj+CvQxaMhEz@ z0dg(;k_d~ZA>yY`>0+Ept~WZz#S{L?HF8m0O6(pi5k)<49^C?Qu9G6S>#$Eoyf) z_aLhwOyLRiLY9wkAD51GrUVmU6h(q7AiH=OVPd7VB3r*iQ2gSCs|8vy8hh&(m?MUr zu18ShxU_56F7wexADK#(Dw&EEE206bu-UR@i&?vNEv5%?Z;-~b&pw-Dy5^@uQ2bR> zp$%PlfXR!1%sH!?N|vi)j_5pCRVS5^fNuo$@7rr7f}&)JBIfJ_MNs6_L6iEWO`j90 zn(6PVcf1^f`%)J?>u2Yb#ZM9-0TPIVz)&39IgaDwIkfi18E3>g7GMPm9*=wBtS|;l z$+{s-4wrYs!JlB?NZk1E#^=8v9bC3Imu*Gx{UO-h9zMAD?*W} ziot{ZYWw{U%rB#gunU`Ivb*yogdBfb>M`C;MnA>5SoTYU5l zJp(H1YpdTh;qN}k&G13y##!u%FSlgBKYLAN3ttR}JPh&W znF3SQlMK@M5AN&Z)iqOLL(ta5fp#3s9!}jI7s;KWEEw( zlt=`{kRd~IZQ)l90$+dqwXIdWpT`4ILKLG(UmsHe&yft%yL;D!)V0j}R52QZBgxyk z*EmE|F#0kQR?L&U1QvY!ve~j`xw?a62GwyQnj*)s$>ST^imJFKrwo3Q011#lv;^>e zUJl2oFB%uE9bxjX!hV_W?*GE%OX}V5|5G?9|0`uAtm~;d3{;Ptcfjd!4%m>_gq7GJ zofL2fyLR9GEG8`1Uwx?y0zE^3R)t6+@FECxB~gE#z6qIgmy;G6QlRWF|z~ z7E>unTe(b8I5Cx+cEwkAR3x@0rl9yj7EO2%HyB?cDC9<{nod0NL{qJrbP6+R5_smB zXEfF2=#rHH|B~~=)lWRAO2Ian>Pa;fFJ0a=J7I*XPAVe--v~$q#k`phs+*l&tgyLY zlv@LWTQ(_cx^=9q?kg1b#C^)e5uG-CrB3SvDt0{mr9_SeuM~mW1w| zkJ69Ef1S?~YK0?(gTwC-iAtBLpj=;J`$9aclIBkue33zSqZ~OEa?IF;FGFxntj*bw z44?347W{BAdO@P>2P6Yy`al9X1aQ5rfJmESA8eCpkYpb9|3ZF;q(YLm+PiU&zdRM3 zb;(C|R3xZ2aIznTqEEJ1y!3e9X;Ht5>g*T0D^--`QXY5cjo)= zzt>QeSI0|u0DhBqql3TY#aJHFs_Cen<4jt5NmZRxMgqPOSp3;*X7lQ0>Smugu#Tx- zSt5AdN>_|)Xv?5v9I0xH74?z;31kz%J9!y=y~JT4U&6NI@xCad#F=st0F#^X^GwL@ zT$^3{DlfX~5%dXkvMb7Vt+Qn!&V|ns$i!OF@aqvMzsFhuu`513=0lG!k$Gw$V*#dQ z-4L^pJdgx(5x|8gJ>t_Ka^lMka5&@@h|3Qjn5UBNz>AWM1Yin?0DCI}0-aAW^3Y&N zg)ygrL*sn-#I>ngw{8rnh>KwU2S5Alv-$6cf9tLBtFWs952xZNK|Q1*y;M2VqN`j6 zOeB!H1hTUBn=fZRqz<518bT^Yx)oB9<7B%Q<;@8lD&_pt{OpVSReMc^)I|a$kP-x} z%xS4aPDZqxga(pzO8Syc4cPn#PW6Utb=bLc@qDW3uzk^(hK6RR(sY~y(xRWt=?Xaa zgJTY!gmb#QV>Zzo5>O|A`^$ES%nc>sL$x56K^CePQEt8Vh8g@=3pl)%3 zw?MJ|Efy_d6o0d*KOfb73payDv91vSCK4;FKK!$A;lgI{U4 zni1q=mS_lAT9iUeaK2|c`%n9%Xm~}Ew^)fXrbV~dLxm$LgLz26c>-BkX6|bbsGFHq zw2+y2x|AMRQ!(df=+8-+MPH45EWiNV!&-$_&OIbR0wiz<0T~@$?T{^Q z{E*IxzW9%`+6{Buw*su*hm|r3?jA?2JqypLa*o;-sac(nF@gtqr0U|%cMIx$dE8n& zPyj#Y!OVX+X6gXP?4da%5C{QWkCNVn3`2o3`s!ZDwm=FGwvemv+`2H>B2%w4n~mkE zM>7JCaZ)!&wo@X=8fz9AUEs^26zZcq89@>2X;biVix)38^XAFBpn7@(53`TdwdyMz z50w(usG3@KKhva_EUT)M%1FRB0?X#jG8=xLuWt76zO_u<8Zuzh?R44L1~v>V1f|#5 zP)wa9Kmtig;35aTlE(GHcu!p9plwc@1ZJ?0RuxCZ_9&bzXo2 zQ*627&Mz>;wT64!Y-GUOI@^Urp}!~EdnC$+i3C!X0G>h1AQIVC4L{p3a2lZ%*u3fvDTD8>MlMATM)Y$d((@z_m zS2mpeiw&ikI`I%ngFf}pbXhg~$P?v1BohgwHi5Y_9#jWVq;Mf~;pv&`dTl5?zI`Rr zu~h{ds)ykIB~wi@kpKyhKu7|(PaKDxZ9-;9+>Wd9?m0Vg#rs_{*gXReCOhiIM4Wd% zH|EOvAFiZ3QP;a%F(wE*;IA}#P6?u{2+NSpw{{WcWi}E>Y67@EH$bMqNeOGe4U#NT z71D%C`j#$TYQFfw%Gwcc<4N{`2Y|=IMi~;Inxv1bm`)37)4rInyFm5vZ+#~iYZpCn5y5ll|B3Hv$n!0ZnkZEy!!FrkuVDNVQ6XT zB}~hcM#pO&k52%Bg(J@?9`#s&1lZ|gkzc-MC#2$%Gv%GwpLAlU%BD^8ayAS;756mt z5Jnq@QYQ(JK=1@`e~{t81A{NQz!KME?>IDYWEGf;JHrylW;hxdmxaw7e1@}vFH)t% z&d?%rEV_{HMIFlu+9{Xd_Z~QXzeA39amW^$LITbZz_q#-ay|@_#{GAkF)VgNipABKbcQH`NRg-kMNLs&xT@UoiZtt#4D<%=3Pda z5@jl>>ZCFf@QuLA?>{iBmwc&i_P~>>n}+pDsq3|^aFU%!iXyOEeIY2@EU2FZNFXo* zvf8C9A!ql#Sp{QQ4qYQ)0^S$b1V$st3j)i{a8dp+xi&{oAN*IY?je4eFU^G{9eGc> zU`PUOkf@3s2`oQ%(H(F;vz;wB5q>9uTmks=X&#sxc=Bp*~sm@y8$UTr>r4R;ELT4yJzn`bIh|a*}MX zQ>V^>j*HyebN0LR`OGuV*ovl*=pv^R>PZ^-)T1eiq@^R8VvN(i(>M|cg23GA`h(wa zdo>rI*&qn>)ywF0Tvc;aQ!DSdqi{cKq;4=3k^l)L1A#Lgb7Cg`&phjxP2QUWrgP!M zEsi-NBeqU=%%(`pIX03^0p$4r^}0NOa-1y!{%?SBuG}x=1+KBPuxJU9$;`|QwL9V4SFT)X=FFL6!;Vkz zP=v|liCS$pYd zE6)Tgovyje=2sFR0S^h_y*vYE%hWZlBpKy@kt@a|p&fsNPiUrcDh+qanlQ%Tnvp4C zTZPFM)b9TTRgmN5JnxWw?H#g(rjUTM1ke_=88RLQc84r+*3j^V4L}z4*Y1qr@RGX5e8UInFXfbnCk8xT&sD}i+CGf?o_o>@116eGs zMy+mZD207asAifsEE8&XJloH}&8>bs3V19DkU-KA7zB@88UiR2?Jr6?%8ob$w$d9k z!4Y$E%xHsWQ-hoz9(>9|i?WJpqSJmD@M5BkaHM=00k_o=TaI$X4w^v%UJ$@@NfxVU z3Hduj?rT1#oA4Yv$w$Lov)N_~x@*2B#2%ce(qwIOod0&{zcV4uMNud?$e*8UqBta? ztfF?v5Swd$L}bQcnIw#&M2QlIZO^+Ql012j3F7yvRjbSgAADe=D4B`=bsLJ6b;>oX zzS?Zlb8bbXt9GO4P{;ts$6a- zfkO$LaaHw=w#Nb-S|WcXoPbOa?Mjk{#@k#mCRy!3qtGn)_MBt>xpW?eLnEuex{$s^ zUCP`?DW9@C;9heCgiNKuL;@rb1p!>c+aZ_Xklv8BQ5@<@RvG2`6xt~3x?+&8cCe$u ze$8EvB4qMSTj_pW%xi2ZSJW%L2}dgGJD^HJD(cm%r>!UC~TZm)ae(2&tJaViourVOR{2X zK}!2~t7dE7s+NW*fan$9h1bLN(I3VmVjv)$)_Y<& z&ekD5bxz1=Ul)>eGBF(_@bHfHsRB~Cyzr&Q-bZ<{J!n7Ea>nkXI)rMkao$`lCy;JRo&O=O@O42UC zL;|TxV9nC6&Cfr~Q3ueYa~0FNSvhsRiBMR$a3OQi8JRYUmZ*wJHWX7Q36Owy1Y|;G zSN;de|CxKdBj~BYXaHIP)Bop?`R(wZu8KqU6l97kt0c=5M@$C^d~)z>Lci6+xm+Tl z7O-#<@R$IePrt(a6Ct-ke4GBMJ#79LgegV>uDq1SXuk7@T?Y$oM^LyB2DNKZdHlzT zY%G8@{Hl*YIri9Nos6c4WPu7fi}&x}Zn&L|B3Xy1v z)_u;=R!e;(;2nW4UcJ|f%@?0(rD5NSt&u1l)T4%JSg%whS)0Z-&y zuH%3mG=T)XC4l#Vy^w3*Kwrof&m1`i|FimeX0tQqvZKOMRp6wQ1;9pHQZH&{#E~!S zQCNwf7&c6n4sa{U)KkTa7mvuOl`B`8?c29UlsBkM>({S0@4WMljpBZQ(wA%~R@T`c z`r0b%G>tnBFl8&()>cb>B;XZ+^*{Y!e)>*7LaXc1mCezuDtN^roUug^NpbO?krZ=p zyyB5a#{z`Ye8_%zSQQXeF(vDUL$dP+36MZi62PAp39;(xh;!0n&S8iFR|Z;w=A+1I z0T)|PaXkv!>B2r-H!=aa3qvCoq5gKbz{C|Bz(ejMpF3g?dn+8N2%6zH0bJLw!-HcX z5(?;3dKk}(iar_aimmLeuvZh(LT@vq;yzrkE`(HsBRCKlT7}CL?fzqqImXE_ibxlw zkT8nHix)@hw)Ol|r%p9{_wKb(&P~}bjy7~_>XdGK)l{f(5r$Ey5~VU<34HMiru487 zO*Lt(3=T-XG^|felUb*v4aa1dPgNU=sgndqzzYH$VPzRdES-h-!=;Ycl4@q$;gG`# z38i>rK{&G4T9-W9%s7o8w;J@TdoY{eg7 zrf3PUqas>j5#&b$fQ+y>ErP7EW|5GJzsGuTP{)BoA(VBsc6p-q)KgE@RvWjzXhJG} z`spX2y?bg;eC!t%O!gJk83zLwY|72&c%gfS)8l0!d0BA&tTzSUwbD zJ2zVN zRnTw(w3CM+rUDF3Y*rw&(>B;eI(!=fw|M~ zvl2~l`B@FUV-bzPBl^`gb!%8zknsY%Kv#*z7Um@Z5=a^X@`vArv=K(U=7J$9Y{UN? z@?9c*Tu!buK+C9Nro#!i(ovC|qh}81%`GGl7y+~;Z9`$w@bA;$h6njyE4ovJQ54bf z3Y!}zU_E3?`3Q<^Q2JBnAcDfhMJc4SB+^AGB!Xhc4!5Qdl?aMA-gqPI`5i-sB{=4p z7>*0{5L~x12X1HWLL0R2V=C5atgV*%5=G#{<0n*$c`QJpjCQNsO{XgM@;YJh#jHKaHXT-=THWw`1Tc!_|J|CM=FA5Bo_g^FYJRy=RxlG#50*@_dZW- zb;3+`ROna3wNP11U+te7@5P1b{CX7l^CJ-y?f$e{=}X6s9ZmiERvKzzwooD{8Z?M( zUUXTHV$mX3rpi3%@{2FNXfuLh@4=Fs{U+C~ZbGM8n?C32>ZCRj@Q}dVSMSAg6#9Ww zlgB4_1Vv6&k2<-wsaaJPyjD*W@vP>bXmzYqNCHk1XbtPEgl5Iy^|QH;dA#|Vw#42msf^DqLCM=~qYU%QZ8$I${uCv>d4jnvr}2@XV6l^$8GP zdI5&w&)4AOs|hH=2-&6)RSl zojZ3r<-^vkTg~gQziz`O8PeiXEP1{@;=1jM9mnOnGmed!ZF-nWwVTADfrpZS{{*(I zU11h~F;m@dYaG4@O4ey8N(win218|wOWv#^1tIb=gNo?J+CZiVn>B8RB4>I8B%c%E=C_hDozH6_PS_|Ir`|M zBVGVPyX;W}B8yV^5X*m^4fqVHutAkNeIp=?Qe^Fy z5MAAL*;$c=vgwwZIBjPRtYfNIPPbve#du*ZZ$mM4k^l)fNx+qn>a#t0M|83hm5q8& zSzoN`B!1B4hr0_M6{~UG=*OwXT4Qb<=SoL~%&W&l0wiFO0PcelO>rT_hYpH*u=6sD zIFd#+J13N?5`XUNK-Fj^b@{202ku?@?Qn3LmBwD*7o}Lfe7UVofS6B#-+uee%$PAF z=Hm-Ea3%IVQNaGVc1>yfCgusqFul8X9TJzKTnFPCmXU=LQK6bLjylm)snMj*Sh;v%^HoCmI7 ziFT*j9@v>s6WLLrhyQA*viUIbC4!?()kygS^3n(wtE-No>p$g-65=dnNcz#KQ*_jYmo?)S?qa=3R9%?7I z|3QEq6$)npUiKxLVvK^SP%0#v!k6_Zt`FrzJlkE3rbthZ>^;cEXo~IIx0~tHr^iz* zp^jaJ9gm0F>B@Fow@1U49@_TQJ@N!oHKV1rTIwSKuL%6U@mI6p2;hd z>sG3}SD?ZY#fzBpMr0J2C&rzXaNKz@9?Qc>fCT&@AS2X^IAHDyG$<@^zz&*lkih!~ zzb5os!-SSzaN#aAI=d1nP%x>)>@A&`Vh<~I?_paG+3>}AcXUteSb!;6HymOSe~>^@ z5x@(E^x~cZneVt`BhVmqoZ~ho)I9c9=-|H+Y9ed4j;*}AanU&+d>KsY1GJ-kzaq<8SSDJE{0UR{PN3Y+cxP4Q%|?!eD&dN`Z{&bp^%GG*q}56d;P2s{R7*mmr@$6ysY=u@_j%T%@h1U@@_Q$n`LB;Ky3q7E1)jc`o)6L2BaMJ)4ST7motaw`dVN&sz78)14M$Qp-T zkUy|@;r|xf;6qdr!Gr)4>1Lc@%x55;cVZ-Ta+)JFo|5tujgVY6?q{vfhRV;iJc1jXGY7Y+(F7T}N5;_OrF znet_vAJ$N}nKfKKTitP3lv4ImvNPuf=oDg$O<8FtMW|jw-=PCD9aT z#i1hHq4JXW&v2O>*>5F+V)*dkj<|Qw4C$osIf7#6&Yeb9&$f^@K<9%N^s1`Fbt|I? zPgYe5Wi^`|ZE7}FUqD{lOzI;6s|2=hS!d>dI90uj`n5}%Q@U4I*XyIgGNp=|@pd99 z1n@W?nb2kuAc2SoxX?L~1xvq;$XaF!i~t&Rq+VQzq>!HXU{;_yztA*<59U8FKzQ;l zOpoP)4dF>8CWj06I`N#HNr|a{mP!H#2;k9nAHF=}Fph?>&AkRs2Y|7oBECwv5{KIy zap+4lg{(w;YgXNF_2?Rp`*R|Y0U1+>pig!gTA-%znOzKh=IpSReK{wB;4d5-v2XSA0DY!Q$tZ0FljuAuiCyq2lAM=98)rs99smAO6ku>fj~ zbyKor9+i|!IJ>ID>G2jF_bH^}S{odO4I5@dZCpBKQHo;4ibbSj*|KG3*DklhK6mfl zZDjRq3rSWky4QkURh77In?fOGRMAb<8niaGj_9PTl-fwZGXgt*-(-Gx|58ThyxQ-1MBwTc~4aL++0wj=70&;DaOsEBa6#lgXHqeCp3H+4*=D@dB z47|h$3Pdm4FUWQRqOYFBd9_>EJubXyO7qoU<7R-fm9}tJt|@d z7;JNt!x6A+sH3(e%q&iX6+8OLIMiks#lLV-Isb4&V{dnGI4*XK`5)P)O&im+sk9@z zlcEiySh{qnJH}^QGIQokvuV>N8+QFO`ypUMx2DePpiI5nx_$qNnl@6GYXr8f`UGz_ zvCa$Nnm2*iBOw)gcImHEbMcv(45`Sc2G1W^-#qf%Pv-aSd-K^F*Cq+AcntS<7bfD4 z%Ml(-0wnM!fqH*_OY9eRR3w&vL8ZGA?&4}#g~4<22LL<)&&Ue+6bT}&MY%qNtp~v( z-!f1i5;oR8-<`p4WLQP;%vmA{97X{5&F^t*n1me<9=6ks4H8=M9PA9TzApf$ZTv@L){HiAxBPcR5GQ5tU*t2JkdFiE>)awcgw?KS| zppelu>Y+JR>a{R+nyF8DW}}{{lLV|0*!lZr^WE&H)b-b@R@|K0hY=L{fvs3B&5RjT z$K3PZW%=!nXIotyd^+Uc@hIW3BtQaA5s>I_7t+GKMm)QI3PyWyEI?i^3I6R$V|Y*< z05Ri6yON9mvoZ=SW}{O3z-DmYl@6|zsa+NHo&=mC@Gz{A$b}P~vg!|Gj>N}6h(ydW zfq?qR;uN8J1^l!bGA*OieTb%zi~M-)I&a;&wQ1H&-Z9)tb?VfK$e_J@_nIY3mPC{{ z$V_j(`KH;hVS|k#CHlvQ7xe4kd=YH`?FU?F%z?o>+M2141pFcJ&8)}Go}Gw0QBD#; zQ7nzGDxt7mFsi;OQC$7DCdFNiK^Yv(5(+%GlYoB&TuEWllLY zN7dUS;;_uUDHY;yhKG7jpxKlewK*05)rj^giB=c~aphT;_vErNg?`>beEH3P|54Ty z{$iR(5K`bKK%6kV0;G&S{(?4qrf`bfYl0=swoX1;s(zv?zutDJ5|os5@j zrjP`xR4iuBJSDPD^pHyO-(lsRnSW8-N&+Mh00CDzDr9bdCN~MV(s+~ZCLUVlFoAB4 z*tQRLEO*2fnn40200QWrSdI^ww%^6n8tqgkxM)}+Y+~m`^wj0Vj;xhBdfjrw-MD~c z0b-{UE~2h5P<_Ngt5&T{^XAQ+^IbY!_wU~y)-G2gC}z!?W!A1;8`l09GTZ|;!Opq) zQ0b=;VieE309V6LozcNlvTitpS-X=bFr?xTBK(nx1iqQ|e`eQq>1fwY7oU-7(u?uE zB^=d@&ZuvSr&|fFxDF40ottMkC%B&kNWeA$S9&WLR$;rQW7n_0x>Rho6pQUJ9+ip& z@SY+KHGLfM{b&49c5BF@BVcF-2_%@n11NuXf^7&@{JEiaq}q1=>yM(w@$i!{3Y#a7 zPhk{?w$etyv7oHOhYv4kcf$5bZ$%{YB`sUF%*il{7hZV520NMH<)1bbYwE;xTNnCH z*VHMxD%NadGFo@nR!e;(;1z-0JAOCcy#0i_%@QJT#wiS=hy-laN)mEXE0V0y=4prn z)ED%L)(GYgjKItPy`w>VV*vs)l?BB~ph}#_YaG52@76ms6j6s20eJP$AwzbA#sh)FbfSJO44ewrx>&eBx=Dwju-6ZFNE6MfM_2uEh&8 zZ*(qHGHoD%_z0AZ?{Jl)Ip32CMr%sSSkp|!Bw&xg2z#od*8FAYBQpP28RL55nHM0m zZtfug5{QNX+OQTO|J~6X6Hs1hvl<&vSxy(jKc5j+5B{iELe}#l%pOZNiKcLVbb7X9 zH^<^&xMN(6rpU2-XO*RBik)!K`9)N4 z-O7;c(b|SP z;SZ-QETbO2-nQQbjHYnyWT!oW6Zrn!|Ct@X=?4K`Fsi;ORl>?T3xP8VvcOA5H!x34 z|HbUy6Mj*pDDAodFV8RFp0BPrN;{aH1V|uu0_9>qRPD$OYMQ9X1_5?d*q}Lvjah>rmN%WT1an(LuO)~RqQ)rT?C={nOTqk^`DQ4|pn#|bPE6cz+u^k%RQMhhpfylG8ODtElo@vnDm9^Qly`w%7h>XC#J-f`;vmT2qa}>EM zlua|^hSrVZh@{Q>-k7ZuV;&1oyLxdmv{y}4I8(9?E=ScIn)|D2r!o>CfuISv5LO`} zxlHa7aG^&_=rF9p-E+hl%kalfLg&>NB8Jz>58s^d zEjAZKPPlWC7{K0&NGKU|DY7x7qEMk@z@b}Y$Rf|xv}se*qD70y4`9xS2z>bAhql5f z{_V-jmkh5d5fQ8?GPUh@zA0STrBORk7|*OEphV#N*-xA8Th=LQJE-Q|;TfiM$)X20 z@LN;_E*YI^iWVsp)iDufz8(z$Ruo5AKC_Vk3B*Uhg<bD>Of#GKq_^u^yUE0g)= zF2v>U?5GG+J2*kgBg^?=vcl^HFtI%a$%`#t&miMLb~Z*Dh%W^{5e# zicrV4#tz1lg`&glB;Y=QGzU!F;eZV^AvA#op?3wkXIr49u`urlpnc|87(LoC!`V6E zScT9W5(q^AZDq5tZ%(NF3ED0rVTUHDY_(#$4102l##jJ$RD`b*>0y)33R_7QsMusn zxt3nM5OjjB?ppeC^hh*?EKI?qOW>oAJ~As-tk6~ZhqlLYzAm?+SXt-g(5F6nuuY$H z8BL*FQB>z2fgeA3&TQMXM&0mpPR%go%DB*$rtVsx3NJsap(#?>O7O#tco(puI8Z<= zj094XfD55&jHqz?+;gR)V!hj#Xk8)+;59_AAc#Tuqbr-9mPsn2-=ogY8*6pgU}&N~{lf&2IEHDA51 zA59^vW;2>1>gve%fXq52&43fD=esY?jcsv!k2rPka1tPafC=CoN&frENWDZSMpPua z211mNbEFz}*_`G$M9KMX)}L20-(tAYQvx@^Zr4URzK#Fln?2=8vq>N&3E-NQ6|3hu z=6okSk83+-Q()$>qoQEcvK|E^C<+zo2oCBE5=R_yglXBbWx+MTy-^eR?6c3z;>C-j zJ}!a-{)?OVvIz1eI*XNcWDZ176y*r-L{~1$Pb`7?bDlT9ZTLmqi1EYfnM&ndno?NZ z#XuEac2)yKd|4S_cyKB0Ks{n%BtQaj5O5)aLd-urieh)Qwo}|23os>%cRJHNjuTG% z(6!nMsV_MQpcSt_Jh{?AZ>BnE6HOuk5(rP=mhf2u%_0Bhh6GxoLxr)oB1C22;U^&# z6GQCQ*z8kCg_U=9hE!;rMu+Qo?z!g<-x&WESz*YBkP5kO)fbZ~S+0sXqVwSRDN+Lo zcuYXTDCW(0NZo^y2%|V}M25QFR94ueereOMi;Rp_Pcp=(o4QsiBmokzNx(|iWef&$ z(SaDC1$4-<0hJT3nElAGAzUWzPdx&7_hKg+2EneXYPoFDuo0Y?bDIpB^e z?u`X-1WpJuUdHxsL+ne`=3$8{IML;@qat(-T#Q{6Lhp`y&s`Xz?!%%KGRH&DxV_Dz zKtd{7wQ7}TD}TmI;EON5FpCx~idRj3$3Knpbzy!x9NTsY>`@Pel8}lbY3Yu+N^?jc zSON<_dD(1T|Fe3bV+YkWl`Al$LL2O)u?>u+r6jJ?ww3xwfCTIj!28=SX#dGx2@z}d z;XN{9XOb+VD;*W}&W!ZJQ;UqUmud+nKG6llnX*khOoGm1v6eVuj6l zBQrdu;8kI4!HSR-(+HO-MJmQjlDMV6Jhf+T?Zxy%pN zDabMlD9g$Owgr^OU5*pL`B@z%z6Gi8sHqP+Y8TBS0TPIefJ{f?QY4?W!=~hvPwc43 z|AduJ3Y!rW|L~!=9WMhCLD4IJM6qve(xi!L-MV$`3V39`1m?|~XMX(g$9(t2xp6Aa z*YD!g5yaurF`<6l9@TT4DO#*}5cabS5^$ElqEBBno7XH;w{P^oI;MJMMkuL+%~p8D zxQ4bODXz6;Gxa8h!1Q-3l!$OFKw=DJshR}-4rS6=A)^O_PVzoE0PmGDjVP0Q1mq7| zX-CXr)Quw*;er`xm&k-2??4*6U{5|a%*VEO^V!Z#BtQb55Wu_1PMG?*W9ANa%$`8Z zab;DtK%9-F5Ih7WnnI>}vX!<#u`kgSH^Nrw#Zyg;rclNCr!q_HQOKXzTmLM?(J#1e zOTjet2#VsRE0|^{40qIPnneP^5Xj2fZ|2Q>SiLZbpqO~NtApay-A#zXmQBiLURX&?kUvRia}j;<}X%ip~&BSZzJ+KnXuUW}|#g;bR?l z%mws~`AGc(7wD^`Mjr{_UeyJ5%TxiC9k%~I+}k4Q@SQXH*gcMp1(=d`gTsJn8VMwd zz(Xi_5=6Z*G)9Mx#!ay-?l)UwJl@O0*jsUUwaLoa6%XI6wnaiJHmhl~qzD&78>sH1 zt-fKyhR%jmz-N=5o^JN-+h_Ld!5f7~>DzC=HQ#^#y$y?};e7qZhGJ!%6QEE1pSOfm zq@|ZsHkIl~z%K$zKA&MW{<=ur=+pYwHZ`g+WJw)t9)*{WYha_dLQ1(B;!-PD9?$uc z1V|t+0ld>mYcN}c^MXn3-$Na8pP>{ERYoFA!T)h-9F+DLnJ_*r5jI3v<|^d-cZ7MF zjRZ)*VFGwJT8vxIEST8C5i_Noz6<0PN9+m240cZB!i*DAVq9%5N+F#Rcjqe7<*!p< zT)sVrz5DFI(5MX4yL;Cm@i;JQ)F{kFk#7<4sEzB`B1MXr#*G`Bx^?TCV#WSg$#wJQ z&1S)Z1!nEqwQ=bV>fmRddB#>zf3>ApOE1nR=}EX$OJ9zj^it(ai>{+`e)5w9Qj-8i zi+wpmf8O1+qJ_+a)9R-t(}2Bh+q}H#(xH->{h9i_t9@|)ZI74x&jWUe#gPCBkN^oN z5WqdWIn*>$P^G0p=FnkEQ35z0<;74M|GRpC;7+{%Naz?736MZ?5RgB-u5?rg(Gwvl z)ltF4DWuvDCh4(PABlZXz40Z2LMA5dm&^1PDX6Y>6j4R z<;#~hM<0E($;imiD$1Ik`Sa%+iK4KOUcpP(Tnl=&Rh|ol>dV8nI%%v)D_%<5R_Y@G zuLvyt@=dehXZ-=UBl^`gb!%WQe5SkvicTHWO=B!TUcG<*o;<$MpWl_gaKQ>?pj1Z! zBtQZI5^yP+Vk`V+M@4{@?@CeVph$;Y0(-uJ_|QoqBN=6S7$y=Rf#f0ZDqLCbkUQNS zvLz5x*in&<84pBhk(7s`tt8PD_t;Xdtal3Z6;jq|MvffmTm*%z=v|>gg^;c`Z{FOL zD&=-#_0vy39n$_dcia-^@zxK=bt`kl$h?87saT0JrbV~YRCQ7r3HV6BMg&EX!iCI4 zMoCQEUDT1EgdT6^kit z+IMxWjs>u2dvdGB=~x6Z242eOn14WIrVAg^wK#9L`Cuq*BmokLi~z1_Sy1c6$Z`jg zt10e}H3KQhkwVx}aj+sL;H%AOiofH8_hCJXqrluLp}n!y5~5T7 zNp#tn+gAc;a4QFqwwLcAk3s5rRADrg1fn4DToie2XO;h0GUby$>BW2Q z!sPk!uMq%`t#njO$-2QGm^qf>X({98FU|RF;Zu4k?zN!ThDxbHxiVDKh!G>43#pJf zpJe{0aH&k0GU2i$Y`?`tDQ-=eQxO)7>lQad8zB{?P0JI{jL-^ZBZ1^4Fn9X>>dr|> z#l>f2s_Uh~@C1%)SIKlbDs&iOcnu+Q48r~R$dG%vg9J!`1W3R|0v9@F6niTis}N0d z@SY%H6URaxh2CEvGTlTI%`yVem4z}c!iYZ0;XeU*!Xik31o9&=8ynZ(Q0X(XA?&Pru%zZHF{I+=6G4%-_I70wh2J1rsIl=2-FsNwSYQekuiEd)EPOMXhxsy%#A` z<lfl9`jtqUqnNTY1Jw2bsg`lvKwhU~&xn>&jZm zMa(^x%p{Anf{qN30Wv@a9Ap3=f%Rb4m5!M<7at@)Ic8HjnsYlcmDfx~I_5joJjvmW z?AxTuBIA<*zZe+pkhvnIh=q&oYy~=JvV_Furh=l8^hHJ?W31|R!No%<>ej7m^5>WF zlf0d)GKxNZ`q(;@LgX|~JG5IVynD5c%wM38X>{r}+OWhU1Ku(4?ckTJ*lcMq+={Jn z0BzgiAk(nEtdvxDm*F{GQyrCn))+W;`;Av*yB5G&iUcMDWWW&yq~Gt_{?D;kF6uz} zH*mlP-%R)&26wic&*a49BwP6o!(ZLiidyqCP@yLKz4E#R(2eWXBzGUmP6DpV^50!n4>0Dtz_TZ93 zDl9nJwE7+kxP&AFWPl8iffN|PH{bDyF8yfNTo2a_N>h8}{Y)2}flUb74&ynCf=Age zWS@bbV^K(kY1^h%yZvtR0vR9!?lXXAz)V;<#}P{_<6UlZ4BL@xT3AKzOwMvK1cfVV ziaT=&lIks=)D*HosrrPK?`nz}GiEq>D21pgcI=RY%+y^b9>P1-Q56BvZl#{8t10s3 zFJO*5jcN+Tq9T;13``tiM+ErKD{6bnNP3%nPU}kMu-aC(;JOOW>l*3J9&<+q$N(Ae zhXFj3MLF?0Jf^9gazEkoLl#TXkpY(&kdN&vP&cit4s2T%^B@IgUwC``U+kJ$l!t+=w(*$J!#5H&^f=FI~DhZm%1(V~G5 zsMxe=lbJt%et5w7D20T%wp786X2> zz<&n*i7Xu9U^d=y&?cHh2JXhVPG8bM2FO6H85rkK?wdGdi#1bXH&KCAByw2PUDg;i zW(BZcv52n;{Q$t!>p-VYoeX%^v@>NaIgcV=zF=8(^*oBcef!!{c07y@nCl7(sZjT& zToLvG5Cz3iZDr>-Ix-M%1||-{c@!2N1OK{;3W`K()+0Y&*(`yz0Es~78TPz3l}w!) z#q$jJXMoI|odbsaE7-N@7;qZ+FE;o;86X2>AR-3vESAl06McqG1sb$Wsg->N4YDd4q8y_!V}p2Z8z2T4U;aL`_x z2RZ!VxZ{q~hD`}iWE4RUrEpb7v258gGj80tlz7M?@-MhPIEQ;KT|O7aG}1PxUi0%z zsq!`-2ig`Bj|}KDuzbN3Gk5YQ>cA&9EoYh^SymmFfO-t%&v$^abi%o;4EeX7r9>hF zWPl8ifg~At4e?4O8P{gC5An^s)Mf&EuxHJE|JSJ20_?GZzr14LY1B{ky@tJFfX0#m zGVm7zv+!f(Uw=gWf1QZa4<-3nO;lhNNg=Rma~}ecRJs4zo8x$BrGn zmQ=WUC`G@1{miy)+x8|~=P!{|SUK%N$GpE${`2=Qr5^|6%MVG#^-5@jAp`z0FzG`m z0WC->?!1PQ3Lk1kyVI+fTGfl$F!e_CyDHlNCMFpm17v^<b}pS!hhO;%!FZQilI@>+mcO;mu( zYv5~SQK{d5uiAM&!V&IQOGtHdu$Ee#Z2T~m0_&{}B0_rm$s*2k$ucZ%c zOc8+opABGQk^wS62FO6}450rks|8%04EPjdKwssa1KuSAp)eqWkq@Ch-W5t}OoR-O z0iPI{=!mf;V8=m@*ka9$*i25KgC-|A+I;Ewq@Z{ZToll*RW2$h>eZ_k_)rR06%_sZ z_cvR&%66^l?oo8ad{9uROKn9#(d5jV)CnbE+A#3aao;?lycWQHZYDxaVdIf-_jR(n zAe}D^bU34`sZph<4RdcsU#y}HU}BO1GC&5%K=usaxhvm%J>g4cA0qOru1=stP7DnQ zl>ynV;97heXNH<86C(p;z!wHwnWzxH(tgXQ*i2NQFt3KsfVbsDBIv_;6jG@>YLif< zN)>bb@yBb!ri3R7ifYxW1;W_4aif_(e}15_VFvo+k3Y=l(W7l7ITN6LZ2)Ux;^XQx z;He*UQm1KKQ>IcaO;d?Q2E1cn)sop}*2EF&b~ib^jA_}doH{N6?J-cWzyapA%d9*T z*dz*P=+1o4X|yHa8m-NyTfP(DeZ9l;t(5u2ZK@ zf#nOT3FiWqsH9@Y4)+uW0|pE*TefV;C6`Ix5=n(DA93B?r*0tj^*ic}8`WV6NCtdk zVA9antjH+tyf*FoPQDd&AbT%8r<$o=$%>rfHuS^F2Qq?z$N(8217sj226nIXk?%cM z-Q)NMk$raP$UsyK$cebep&D|MEUFSgu?s!aQ@Jz5K3g#(MAvN`-Zi};B^Q&y1( zTMmc461Lggh5l*0=zNe=$mr3<+T|pY3KtKhNJLWM;-M64)~qq3Mvc-gF2S1(|34eR znwa>|Y7M*-H8BONcH`4bxoSA)hmH)yn1MCR=9`({=zpIztY6BsX`*xZu)$C1Bb=knz%yLn+iZx6e{Lm*e;AS^jqUrxgW-97>Tte}1R( zUB7<)z^abv)2AC(6%?B`ZL-1Z6L`@2prAl~?c2N5H{3(QV1r*`k^!$65H-b) z9oyAyzWcg52F;AlQwACDSyvrLf z-h=NE_ayABLvdrEG&1}u{Fi7qjpCMh=9dhRfrJ>Ckx*!RfvKXf$8BiHmm?)B%K~@j zz?VZQWY+BxZPqJSu53;`QGKC%N@f->UOb^gDHbkVm{NK+BClP$){GdTziB*dT>#(1 zhRvFo=qSp86fHF|1*>MGlTC#h4YXm2M+UrNVBN|^X8PAd)NO81x1>4k1Rj#CPHc#P zg$v~~w_K88BSjeqppwD{v#DT~s#I!;rL_R5n8mncfDEL}z$-v#k}`@tfR8arVMQu# z&txK}IRnQae-rTy`V71rtVKr#$N(AGmjPEMDyr;jSi1e4)`|+eWMrAY&GRUHQd7wO z{_5vZbm-8*IW>SJf1Q1`RTDvbO~%YKkouaJ3;bwE+dM@mV+7fF&jw z@QQ&+Lti&Lwry3n`QO)2L7`4+1b|mxc(6IBd|?|Y{}b=#k~V;eNe0LO8Hh6jc(&e- z{9ofR2XYRD&2Mstvrf|-XF$eB{tf>#+R+J`w$9Bz7ONpMOxrfC+G*n2#LC>+*(N-( z$v|2#;L2=8+HbI&*0U983L>efYq3fAawtU&aBzt>50xraGAEsMk~VBgcec&*7dkm(f!^7RH>SZP39-v@{hn8fwE5j|_Olz`B)-&D1aZtJ_?^ zW(jlVNt9Hm6B_}bNGopam|-L30|9!A4PauD0Wv@a;>rN}&1WF*&pDp|9#}0k$#ENL zUc4E|K=#L>Jv|JszQQx!)H3&Ez-tCvIh>*po=qZCPg|F^lM^U7kx|@ZvE+P`QOKbb zg;L^T%oi`s%VjNqiw9JsgQQ~c;K647`t>OlT@H~&QeoxfiyYER_VR(F z6k^whAAYD^T!M%Fr%hKIz?zt-uZ@9sx+bPzRX_X$Q>l*2d-Q7On0RDhe+D+JUS_6# z@xlH#QoK^DdNFhE$(2$>iFfpG7eDY&E^7fIC8t=?{N{!Z(v?tma=Pmc>Zk-H17v^< z#Eb#-pXDVi9}p!S&-z{XeqQFdjWmx8WMx1Ot@s{oOvaYz$N(9LHv>zN{hf|bkk(s1 zrR78gUL-ODey_y}Bx;H`EaGcI-w!bLrI790w|DMPiqfS^2e!%8Ln&mEV%<6`^Ivk> zyOq-}G%G8xI>W0k#cpyIcNkGFC&GEi!1p8HGTXN33#snJp%nS4pzyFpsGMTS2GR(D0bWT3Y0X!Vk-YXOqU z595T-fJ^|shj#bu?s2B@EntddfDA;=0LIg{!HAWS8}g?CoTzZ5AQCa-Vg#`Yu1{E@~_LdD`O-ywB8UgQY zO-#Y6ddRV+a^0rdu*4$+UNNw7&2sbW_&(}3*Q{2|TySb-bzA~Q&p^rI1Hb~E1Ei_}s4C7+QdDyUFX7%dTHfVho`L&|eP&QUJ167w)?A*D-eDuaY zluacZ8SsmN6$__AQlYP``oAk{nF9IMo2_5urm?j*b;>Yr_L*l^uidJh%X`pY>w!*JmaV27J&E2fF}&d-oz8o{?0|6p5}?o zG?NUF0X+s}-={tRUf zd>e@^n)ZziSYoCp0|g#$*PHtSBvH22DitvoogVCPpd{cK#Q_FNl_+4Y!HL?>cb}zg zjc9SMf_MI1rJZr&kpVJ522x<)1;p)?Lj16zeuadd3@b6FLk2=%K*pM6vf>KZmyS3C`fo&$e|Q+7=tY*3W_&vfh*(w2YBiV3OSU* zMFquQ+)bG>#VlVg#k9ZMxN)PIIB}wr3W^aUM%YqNynqhW+WpyZ?#hF(D6O7H&M{1b z$bd2f|953=Q>Y-Dtun37!HJThL@_JtT0}{q-cNCkKpI2_$iQv}DlMNef@=XZsso;# zqLlYRA}lKI%keED8+y}`fplU(_JA6Rw%9hEGR3@*0Wy#$1FlR|l*rRkj{~IJ2t#|6 zg-sdG6CSB4WObsdt5BhWY2CWDDyBBr_U+rvgb5SOzx&#Q4xX$^k+w@m2FQRX3{L+&nt4c$IE1Z43Ggbu$uw&m1R=mQBMf@7rvKgdSYvw%;X^vh;J(W@VSl|0)3!~kc1nYv#18QH4HMsEKeJ{H9H|lE(*mP75cC|liOjm!`0%Y#&oI}2NnGB>C z0|%8aY%V*uT6$%QxyhA*a%BpcE6x`cgu1H%#7ops2}lOW02xRF2GDW77}=1$hkaPp z^Be5EE)8f;*DUl&*NjD!FWGmz3)-m6Ceo1sGLYU3aJC|Z@=c4`3cM7g!*-uVzLo&n z7a4`TAl0YX+O=!v^q~|M%ib#V7himl<6#;pCj1I!*ZxQmBaG;B0w5h3AOjvTaNDId zDWmYHytGV}axYR?zQxtuE$WB_%$SMl+)1TsJdB4eO@*+S+@JFDp<%M%k0#z4h#f2wD?^qm(hLhw*c@jf^d zgId^AmyQgO0Wv@a@Ju`hEXb;NU%u7v!ZZ9!mYll`pnT+Tipx;9`s4p|;bkiZ8C;jc z7XBLWDzp2V>}gaSUYwlNT5#7d{*xU8vVx-*cs&CYkCg!R8y0$oYXP#Oir2_MS}@>> zqC%e2Y3qFH{Nz;t_vaNLh5O#9De!`*2(fwW}+-*e|9bMM3ZG6>NZ-yOajo{_foCTPY`R)Y{$d_oTO zvMeIVq=xuXyS@CDiHwTy2f@qahy1PrUm0Gm#lgu$kDRRC8vIU-Lx!10G7vKcT$!Bs zYf?Ea>Gyte0xtnkPc*bxI#1yxv(X~HCiKCYV1iY)Y*{EMPF&oi!e46vsHV{7SnxJ& z+GN(QU8@Z(QZ(n)6uUFsSUdB?BLif>9s>uKE@-a4(8}2a_KYU7Dg#w27BLr}Rn>GK zus{_TVCsDtZ3Br%2FL&zu+0FzaW6y~z2SXXAto!XuC>hz@uOz|VhpmJ zd^r4JyL}`4-yGhTNjR7q*p)=c;mGsR;7VP38rTX3Bm-o?1_PX^u)*p+nBGrRAU`ro zZ_^F239gK--jS8lvkMhU$Nqyd^GU#M+qNx_Y+2Mlz5H1GTCb zHzkUtL$@K3T!#?0b^F1u|Fsq%gyb}ns8LlGP#TlDv$H1bE-dv9jk<*dBm-oC3|M3U zqnsB3z8AbNZ{9Vq^+JrysxQ5=NHd{BWB_$zC6fDWx8IdA>bdMI)e8Q!-M;Rw8|m>6 z)_R%&$m0pfyDuwW(}Q{Df(+PYKwVFsq`7H5pq38%txQ9~%4UnU+&^pqTfkkWjX8b# zbo1SJ-)X}V4-6D9UfjmX? znrkEbKI~IyBj3a%17v^<7z}211Pwk-Ol*g}7h?eSSBH!5_mk%0lPJTqZPQB5v7~d2 z0o0%M@I&EmgRcksn!vvd@5{=&yeO4z1K!9h5bu)#GT;>hu1pZ5!&rWf1@C?4@KT$L z+>OdHhpFGV0i8$N(v8%u{1ag&{!?Gc(z|zWE&GU*g#pwnQ?q8xELYTi-V1CWv@pZf za!epHKn6l#;HfV1nxTs;1Mdu6V3w}1B76QmuADO8WPl9Bn*sFMF9riW;eAn0NP)?z z*~8=q9T`Z40o0@K;ALT`jJL>)(l~hE-Q{T4xB8w5+D`_^z~2nGGPbt$Z;K-TF=lMf zo9wLx@P49VclkVIv7Cwk`yPw69z<4K~;q(g{5q!oSgmgdvlagcftLRe?PYYOiVIB z2FQTR3}7(xGT7GxKEKO$?qgdw^x>c1=NkVc11T|pI<*D9KYS|yod7=~CBWz-Kaci( zT=Ztp0y01b6c}(tMd8pyg#xSlK%~Xw1YTkjkgt*Z>+y1_rU_^f>n$7Dy?XVs z0ULYFlTSX`#+FaQ>wi#Zec0Rk)!0`865tF2=Uwr`M0;xioXMuDAy0RuszQ}ePryV? zv1GXwHN`vV)1`IC+mj-unPh+rgvbE;?6Rn+JG@Vm6raN0n?t0DNyU%>^a;LzKLc5p zt)M>glmhuC?}2tM=Wx)G0W#n>15{LmP=0AKQL&q+2k`$+OPtB9*}ayqRblZWmz9X) zRY3tJ=Tl6YG)Wsa_ITyXmp5(Nw6VcM20-Pb$7zT9`X~%s?X;0Jjtr22y%_j>(n|B? z_dK6sFH+L{4~q(lO`z_jG^dZ*BLif>n1N@PTbd4Q0Wx=XwosfH!MqxaO`$Em%FM#_OYQw~5+&7aJk^wT12LqH<5pZVXYogaUD9685*RAfN{T#H2(L${~7RX16mz3U%q^%R;^m5MT-{Z^wUo_ty{M? z4H`5sMT*E4UT(RIF1pB;wBml$Sy!fgb0htqx!vSFGC&60W8j%?v)nU^7FlE9UC1aF zFO!J{b@v+j6^qnS2}lOW02y$U0rbspglS#iefkbx276D(^Ln|Xw9>3JW&riBKeBfy z{A2LGNiT0in?E&;i-%c@nt{R>EF z%OMpatr!=CutdVloH^4AX+?no1x)ki&CTJ5A8tyQE^P`HENF@sFTU%JIQZa$a}5*rO;J+FmQ)s8Enr@10aqJx5CD1li8iamBLfy0821g86c*`q0$LOl z|Lq~$kE=WBT+1LGUFZzlb?9qdxfURF+L<63*v$YYDW1a5hdd)h78PBO@5>RM*-dlP zoB`CyweVLWo0r3H_9VO4<2i7IC$`c|GC&6QX26w+idB1?m0o{2z0x`vs_~+lga^N< zD%vI!KR}!-fd2&2vGU~}ApJyw$fS*I{iN!8_3E`tl>i=8aW%k7mMj@aEm5~_T{D0F ze6wT64vl10Fho@`cI;TQZQC{#yeuGcK1EN|S#{tnX{7WMw(W$Cvbsd}9aVK_)~jh= zKBJ{7Cc(&nZw!33V6nNRzrNt`xwmJT4_<2O8y&PaAOk&zEHsOjT3N93CLa601vG=f z$N(82170$KzO8&Ly$pYghZ(pJ&*^tOw3{Z!gaOpg_t3=VAiJ{Q&5gms%f4#*R^Dp? z^26R2;M>4E>@s(DcGxNovF{`5_!FJl|D60_8Y)dQdw_PwA_;`bhwBln8 ztgIL!t@!p^kz4xfqVx8~w@G%guxz*}l|?bpUY7)*a?$dxugy<3JW^0vR9!WWX8& z7)6y4$!_pI&Q!dF@5m>tkwaiIpu_;`sjSRw4nIQ)(k9HwcrKi4GXY|g0Wx5P0aqp` z(ppN9mXi~BNy*+Z`fEn_)bq>nVyVC9Pp5x9n7noK4=;fogYr5fK%^DmJo#iS#az95 zbzoV7NGtN^mv0DnT!#)FY#mtf0P3&PGAh(3sZftx&WVaWTzK`% zrjkNNw0$|G;!D^%TD#k2r36sxVIWDcpexs zh76#}u0Xb1!++;^_UpjDD;&3x=8*w1u!n)l_*r<*pOOCJL`9_ZCz&{2P@<}^dD7P< z=zJcMque*zo;`aysic@OV@BXZCqzk6pnzNA0y(6@lDgwT)LFMOyHCa3{V`-k zN%878>hmA_SkFJnfcp&GcFd7hjC}qbAI@vClJ&F%6$Qoo#a0v)UGbb>n3kk5Q)GY) z#GV24vx|d-q44K>NX9VO+zHQUXAiSWgwtz^&A7>fwE&4^iD4690QFc_sGSb~a{|$# z2znQu8*Xju5CscNlMML5Kn;h?ZgR)Wgce(-x4(^C)6d zlbBI55HAKW*jNfVmPK->c$o7~VDm*7+?D4v9U1VJ0o37TF#cqCD;*gnV&t2=y%H8- z$N(9z%Ro)L@Cl)dTFeFaMB@`rv$_^Q_Sg#5W$v`HxIgDS;B94FTG0tT(*`+P;M5%} z?7MnMg-9!8FQ_1{cJ12ENh{=#ikM3)9t%=-iG-0uDqjCqeG()Q6Bw2ZIKjZJmJX?S zrYjGraH4=yG@{o~tCEU&DVoRVWPl8i0p}RN_h4BVH3t4f=ZwqSpsx|+TzofM(bDCe zJVTHHT?SB>mjdw|_%*u7s%ZD%xp9ChBEiT28L+`Xh7HgpF@tS%pCowH(L$f0h#JiX`06WBYPjV#ZQ7KOoT6q;mu5(f5jn+4C!J)2)Vcum zVd+Lc*r`5~Fm&2%Gizm9%j0a+nY70|;h$HazGaxUZCbTU`^@f>!4@^DT9Hk1IixYz%Eer;EZu4d+_NlU6P65+0X+sX^q^8g+JV?hQbKS%a@tQ+ z;Kd|Lis2T^p*=dK@^bOPJpdzjY8#p^N{WI73u;r75}rjRMJDR3)0<$T-O9uF@stS6 zCh~SDDPH~N$87HK5*dgW1GhA5WF^DTzCF_lE0JIdZ{BP~@e#zyB!z3LiprHMn-(ov1TjC+FutfN`XXcMC!c)?RmJQ-xY9e( zO2hE!#lT4!Rn3u=WZ|c-8}<1z^Zl=EU%IS@gC+`!7wsu1o(d-yOoa@P0WuH{1DKeo zj+Dm19~MrEs;T?{G?|!KsftQ4GTLdnh z0bD8NBOEd0H%IKSW=1R~D$ohDt*W>XFBv<_Q}v79imIY_@7@MH>sy;5qMT2$ZQHg$ zD79+UGOnpAI(F=6>wJm_P-mTP|EN!*s(2z$;Uy4AR28p%GdY163`z#llYxI6V`tIl zvv1EzPr}kXSAB;6W=l=+HlF8m)0{qLj|`B37%_l8bWLP;JbXP5vn`VpZSY*?B!!2C zN0Sp{0QFo3D6WQ=)3sf5vcthut{Fvp$N(AmlL5TztK!#zj_|R_5j(7z;q>67a;|ME zDGq?Gg73V;``OAwjVLL;_~Hxa^1o%vmV}fPGC|>z6D7roC!T1-q(cFEkqzKfF)zQl z(efPD0`vj))Ku~JjX!wmtT0p+nLE9BLhQ>9|Be*{Eo)XYjVwqjMt-)G(hARN()JxV zl%mIM8&-=pnUV?{%o39fkby`T!03g{P<#q6GZwx%*#uQ)DpvSrIPE0^$ufZY{yXAc zmu!5?@vg!%B~F8Mmgyus8Bk@Qp(}r$>Di;vG{Ug+JmrLmKCTv zSLGBlXU_CqPLVI4l|IXZu6KseZnvY7KBihx0VZ;a*T1ElLYr6O`NzONj!k$LH7pCT5}x98vS$VL7Y?NEC|U$p9HJ=tCb48bnS}%|kk* z6U<2p4-1Yar_KQSC4+(TZt6(t2pK?Zt&U1SGC&427;t63C&|lPjSRVrVP*8w;S5_D zpx>QekyhOAaNhqm4KL^K@CQCub=`sm3(V)Af36FvfF{z4Idg)IlN2aWz|^i?TOn0F zh$>a8m=jJoK@V^bl5D2iZqJ|2{^||OrfFL@Xv!=z=Z`hou*4$+UNO+BW_8o>pz`WA zi<07pDLkY?omdNiqM)#ND24jIeHJJnBpDzBWFSulFfkz;C4K^5DbK*}1(1E7PR2JN zq|0<(9cG*ixXpk|6BX)XW3EvjtBDF*1N2!6^&E6! zn`Qai<)58l+)ix+-+%voCl9HZJ$rUwRfU|UU8qnYZE8}&>(HTttlUZoBZtVHf$Q{h z0PWVEs$wEl6*<%kUXBq1w^=x(Lbd{auDg}(gJM)N;bgTh)D*MkueXudyLf)ju>nj> zGC&5%K$r}muiOObje)NaX1Ypg$pSS|PjHe#iBQ7$$pG5GQds`5gVvwz`uDhloY5pQ z5C#J-jqP%BB8(CbISng=@>%jAN)6{7Ue3}{{V*3pFL|YP<&@R}h^peV&pyi~iD+(# zszN3!f;bmd71gU(x2dYQAP6rJhv|j%)gR6Sf1PROuA-_U;<9D(5i@XdEjy};5ug5U zew@no6>;(An!KnfUU)ayyD?Xw{cp;76jW2}&kL`R0W#nj0~kO&9wv{4FXx%*V(ypl zt?>=Wb0|D4Jeq8UftP{$s}Cv=U)p6|ZQLaBwjrLC54I%j zu@cSc)zda|ynBFNeXFA^ww{TK?90m~6LzaBDSGwlWp?b?k%baBe#+#;)~zyLlD88j zMd8AQ^9~qdh>jgQ+EP+vy1sG)?N*c&>H}RopCZHx%%q%V;8qLgQ;3q{`FD6eMfL@M z^*is^Nog%W_OK~j{a~b)nj&+jtO(iHy@v;R+V`sW%{2ce17sk*7(j=)HF7ixzLbYK zlS4F4!?W1Vem5QxLX*h=+JlS^-0dI}=QwB+O(FwvWvujHd@w|!> zyV;7}rDI!Kk?BJ%Kn4FEb`H_Db3?id={v9jx z2Qs_-q>Fi`O=N%!kb$5KU}EBQgdGZBJZKz;!i|P4XW(0JokKQ-V#+B+i{izWVcNE7 z<--Adp>V=Pa%DhPvC82sF1d4EGKRL00Wx5$48kSJtNM6Yl6F{)7R!kWblPO^Bk6eB za;Kwnd!j9HW!wjVr>=qM-MhEhzFo#D+;WLZD~c2;;#3AYb?RhGTJaF-tkcf}v|FiA z>e7mBkN~~@?PRCQnZ}TTC>XelvwA zNCYxK2FO4(44_Xe1BHX&i$-HXN~sP<>~ruO-jEUsBa?wNWdLmhy;JkJW7#_t&z%~M z*+g^5K%5y66@@EKeed%mGxlu0E!{#{3*hv?WRm0sjh2ak@{+RUWCFl>hxhbPFmkZA zp{}Ybwr$&HX3d%vD7*Rc<#SS1aolmo1+qWEK%%PfVfDs)uv2{itUnH^Sg?kxz7wo7 z44!5ToKoA4s$wWq6~D}6`*Nweh??TL?!n%L3EI6K;i;xbkTeD-17yG*1~6FI9@g}O zcWWcqU=$5Ru=DYKw<%a~h9U#8WuQNDKGm_@yEHKB7&SD941~u3`T-@7+EL-9m}g2q z<{2Ul0>pN50-ZM7N{UlZsatqbM;?Tou~AaYnKQ@8N!meNMn;B-k&@y;)LB>d+sn3i z&~B{^e2J3cjc;Y1jxKf#JleV03!$t9@QYb_5ZrupLv^EN-P&_+TNxBrw>2(+13t2* zrs#=hb}S^xaj7fJCmDzn1LzW8j{Nq5FX&;u#qRca1_%4#@i2olnG9$#fOaC|AkS&x zDdD)Zy^K-;vse_|V9dZXBe6VTWytxgFjs&O=2X={)VzxE`{eGE2C51Mr z#Pg1UQ|r_)hgLXH-R>b{mrzNe&T4XimK7B0Tk<8BOU5DtWWWUmZq2;;Q6JX=V3OiG z@Yfwaf37@c?sWEBuNw@%3?F&ha?Jp5k^wT1m4Wy1hf9aKaJC|A>F_rhh>n3XqDy+e zlw~i+Df?Yb!wa#Vtw0CPwzQ%(db=$>sW;NwR+m=v>C?x_Ln=gCv3c|6Kt)rdMh)kr z6&GK8v8_WYI-}0IF0I&#f>Ym6RHPMeQd$wHO)yaUG9ZorwqqKpXHeu6&)eB!NZr@a|V!MlJggZ(~!7e9q>xSQcQNnxjY5Hfoy>6(%@e#_Ir|g;bSLEwLYUW_dNhVR0Z7|?08i0)_Y%a_ODD>rrx=I z|NZx!R8q{HJ2$YBqI~)C&J~(PB}FFcZ0JggJL=rOF`2ahXv3naP~T@{m#ShS53F#m z;?tl|8IT9TtAGC&5RW5A`Yo`eNXR76)7 zB1t(DsoGRhJQ7K+5=;~?XHimokYKbtgD+XK#C-C}CwYboJAf!DX3m@$DA@?c?enxt zwxK%am}Bw`mqY+jQkiRaco67#1=4d$p9G$#K8H1LWLLT+whW#Hl^4rDujM??rf_njzb6dlxT}O zaO%=Yx1y?$)2m%`qN>=mX;YqNty!}s&oEI0=-9ENt@9}!LY)m=RgnmXXt$!IcruYV zLBj4*QcR+fA_!LulfDc{E?xb)zIkB5Il)1<$Y-;?PqeJY{@$flSnAd9_j>En)OxrfC+DRCh zREdqWLM9;=E?k%sH|CdLelf1fD#YkcojS!`TA{wF@D}V1_QtoAR^-?ycq0}J4l^%IiwuwfGLQ@d_$JGbc-`P{NG5*7aYRadF}_{j zjo1z*PX^MC0kkXG`(czL87qrtRV7Dkp&4W#ZVb3KQ86w~1;B}ly-QmLHE7&~^XHmnNX_uqeSe*E#rU6T;VzY%rC^5x6TS6_W)=FFL+ zl7=l1QBoXz^wBopn*h{@$u;@VoTp8|`#7Y6N(yaOiRT*wXFy3&ubloN6$3t6Y-Y@j zqf)8c5_3V;wE*GP2w8{o+}l%$g0!b)fO?eas@_36{cxOhl~9Cq}mxAkD~( zb#`1jskU;gOM;mv1JN*WX*9{^n5qm)#_F4k6BYZ{1>35Mhg~1|6-v+{o`L_=RTayY zEi)f~{PF&*J9GtkcgUJxrqr%o+ca(3G+5xoLdoRXX^BONBy?A#YDrZg--vW%AifNo zS+AxQRYm_%i_Oe=aa2`=SC`~<_H4JAHWCyC#bY*riAe^?02$C?0N-3ifHe^Qd@aln zan>NM)A3z0I>a=Y6d8ys1Cx<)Q7pOO4syW=+CT>4!T|aTMUmGl9LukBXHq=Y^7jT| ztp$j9q9SvrEZcX^p+{(2TG1GpKHIsBXKzp@F3V<(s;+O}zRpT3R5Kbf*u@uLZ0nE; zC);XPdeLsT;O}R$Ueox!dE0i=WfG+o8e9^?Uj{^4ar4o(q!qLMCB_5Ghe1;D^9(DJ ziVyJI<-^f~IzSW2KwKHX=tFVjeJH#ylJ=}c^#}eq1<%>fvR>qGGC&6GFo3ouGZivp z;ezAg6fTqkZHN;C9gx{_j%9eHWA<1#C*m0j>-kGIflNfi0z_*r(<3>>``RpsoMOzF zG1{=P#}he4lO|1U@Ng{Z>!~(?l`%z5k@FGgNha3or)fB(f^v!^>2o*QrrPJV3u`Ta zyDTJS*_rign%W1JPAYD0kvyn(xa-J5LWgOW9%Zid>>sC2afKHPEi%N=2O*?I1?Oy%zv^KUV zDY{PjSsRvkWWXy1@>(poyRa4jlN4o;8`(4I#ME;YI>I8P*b>j%A45+$CJCZ`l!mVrg0F@& z4nNQLwKqfyKR zMZI#SDY-jxIyF@l$#IuktU7h-n4^x8c`$X?91reO)KT>T-vZ=s+OYroX{z~sJyjLj zyb{kZ2F|Wu%hW2XucYWVVv(6OpNCXv)&@~fJlEaI1du2w9@Vs!SY&_E0E^%@N!5Bo%;;Tb;Ly1 zww-Z=7Mih}fl6y{&U9fdKspx4Ee>YtLkDfLZj$o@<<=Rhf0h_9d7pbaHtrjCc zTWWrof(6a`ZUCOMT-~eBD-p>68Ay=<36e)-+az&I_&-0{kC*&%6Tiz7q`0bHWN^(@7p8(Zmni;}|5p1wz1 zF!T*OYXPjNDtbs~gS+mTEMFt{c;G9G3NGtjbUu0%4pma&AV4v%5Wxz`$ z6|#DPjtr0iGT;;g=&)9WLF3_NHJESi7Z@xv6|xMU&JF|e?ut^{a3Sibk5V5yrQt5> zTt`eR?1&vSgAAlQ19u{Wg&fP|ytu9mbAF^e%8G(Y@e|S?Y|)0pfqjuhd`)QS)TzrU z`t<2zwrts=X(zG3fJHgQ!>$vIcDo5SKC5k~98%HkJNbU3BLneeK;#rR9o={4%k2wa%W(9?zcm`8|9>0XqjX}WFSfg(033i=Cx7U zkVD%2bGV-NmpM_9cM;o`Ry>S`kw0%Uq78vgvnC(?=|k37-> zvN;a*wY3dkWlX3vjGR8G>U#b-)vVYc^B{C&AifNocW7->v#e}mpzHdM_|44yjY^7C zS$+0xYYK|TQ?-rp$p9H31DXt=Pg)arau`J|P27;MzC}8x;2X=80nw1M9Z51StV%Bt zNzR#QU21Ad1*%y8xQ!!Sf${?NB?U#coBl<>tDUu*$&7o^k!rybg@6;;K?ZQD$jN%Bab zBLneez?Q0F>o%$?Qms{^K3`@_O)(hH)rqOv$M|G`43GgW1~5r+D6l?+&CEA z8qd|0z8UVRz3ShuN$^lGi8zcBJ_GB*PjbH$IXST(CA>fe^cX<@p*+xT)I(H5nu318 z_iAXK0daC7Sb^DAQph0{1%jDkH`GbjJ~SrCS^zYfxv)fiR!>wFAAR(Z-Au+1{g6Wr zF^w8EvXS%SQFmL~09M9)9eDbxipf)`s!%Q{!uiL*`G?jq8D;cU74HxG&CFZ0(LX{Q zvH01y9e9b%Eb0 zfGI8nv7)5t*RP*fN{Zsei<={kIKs4O(PEcx*s!4~S<=1d+a;G=Vyn(Le+o3(ttcs; z*Ct@Y)@?YSLLLcpWFWo_P)U(g*-sf=c3P0N07-#ojWqhpW#+qItej0T1kcul)D)RJ zoqt_qrH;SJ02%O&0nAi1h0UM9SM<$x*gFvZTs&AgEgx>-C9tQDHNz;Yutu#aunV&l zF4R|5QVB)|Twwrx2pNpM$rXFD*s&A8du91Mc0V~=5xmH4ODi5kLveoE9G&TTpsYu5 z+Jc1?ImM_^qk;vFI8>D?RZPp4Eloy7hACaT^e$hcMvYyw76%`EaKzT_oxI2?WG2Oe zYl-@Lk_B9CNRdCm+-}_*wuL_;+uKTbSkL zPg*ViMx@ajG@Q#5eV6lr6$6uhYFxNXa6%FP@ML;r9QDhzUpn$=X304P@E=?w4 z1aTfgn(Cdr4?g(7Y~JklySPM&66UC*jxzc3?ca3dnoJ6mEn7B%BFQ;Pi_!|2ymQ*2 z-O5UsXSI#oC9U}W7j0PLkpZt55NXAYM<1bXvm8?KQjgi{xakZy1~Q6CzpS#6#i4kP z%1N(uWPl8ifk+v^Bt=UYAgh{6M`}oNiFZS^%kVtimRxkkN|AweDaO}`?qj{x$hU78 zE}V9*KF;D>A<$kjV1ohlBaR2|g*LDiFuN;YSb=!qu}wz?UkjjP!v3SJI=JbI(is`s zVKH-^(Rd13#8-hvCwo4C-c@x!lXf>hf~s%9--z%STx@`=x#9rPc&G5AK(kV;9hF*Tbaew9OJF#oJU; zXww=J-ZS?+_e0og0Yb`Jgvk_ulI*5t`rFGU$4*#ozW&k57|bv{OTP(j2}6Ha;ib4I$JzWs;}Io= zx+F=I6eC8A$R>-U=ldyC zQYaS`;rwHu9nPn)p`_?JWT9EGB=-HZJS)U=Z_mmzOp*Z}PBI>&kpVIgX9k|Q`}jL8 ztOY=SQzj|;!OH-mi!O7gd?n`MWTxVtT<-H$cnr8OTTvyv>@uY!8E`>TA*L@+(tbuG z17si@2GF6XjLWVbyeHfCyq1mGvAe|CiUf-solKEd3`{UuBEf4Q@Rf-~h%oG9NL&3h z2|1)<(gb=v=;&XosrVCLi$0%dw+fDHW200t8- z!5;(QU7ely+tz)4?8M*y=ETH4tVH{#3zHL-P^UTe5^X)mD|J`)<&ynh=*Ymn3^aVb zJXW7(n6_ljJLPCZ z3qU}3Z4%b3Sz|_y9H|Yff;WHu{J=P_Nh{=l3QN+8Q&3-hkXFd%z{>&`V8DRy;*g3} zo7Cs)0;bMjWWWsuE;_ufsa8sV@!5NWDXqw@ge^)cWFZ0_86X2>AQA@9ue=5c_kb@D zi2=zcD&L2<;2TMO-_hhvVoWfw!~qrx!Unfiu{l7BVS> zW$O+@RYmr+j{jzb`RWHNi$q7_dHF^5sq!irAOmE;E&~`;xCJS6f%om3bvtan8qd$y z?J`94Bp6tjL>wDYYT5uLrWylv)R0mGQs1*UHFFr74EV_a`V%rRCkhHzos2P$a@Dx7 z?eeYD7dAh+rDt17aX%VU(cBDh;T}5RqNM1iZHp)=h7TXE4Xc7Tb?Vf>IENf^NZ_zx z1Z>!_p*j5U!)+vWD(dSAHh`5ettctF<1}q5DU?f!aNaS{p+P+>N(wooV$o7Jw*ZW? zke;=7C@^9q}@xE z6BS983F?q-RfQa^B~oV}+@mm3{ixFc0|uCl8`WV`=5yuBm1gUJIR+Lc7fn4DCnwY? zRRye*09w4omyQhh#DGXN&vw}08L+Ld!}f)48fPm~qujQ% z;y>uQ`yj1Y3>sr4t(Z1#THu;fubx{S(?*RN*^*YAj{16hU}B;RD6l6&vcvh*H7*LAJqmV9vL75WFRjF z(0`O?&m(z-bMXEK*mO2#Dn4}3Hk!1D0hz6EVY#YXdiy>6@YkOVBr;oJlQT*wY_d!2 zv}XW)3fV{GX~#4AB>E1rS0vqT1~@s9LRCOz5;?_y6d>}7S{VUu$SaV>{Z1E5fpkOd zi~u>XLR|ylvb18^vSopaL8KLxD@(Duw(01>ZuS>Om68G6PdA;|GDSiE3XaNI-@Ooq^+!Vu8?8-9N$E zc&=>T|1z(T0Wy#m1H1bbZzJ3hd4-Mm{!?h@TO+o?x#T%fkz$dcQrT8j$jJsbrD$eY z(Vsv<>Pm`%0|z>JNX4pEtIX1+O9OFRvSi6PDJdQdL`sB#&~9broBAOYyOb2z_KA)R z#F>FY`SY6_kJj%Kijv}$o^#_&t@--LKWr%|KE`wLvsjbM%##5!5E=s*K**28WP#Jo zp(U!9kaYfA;hFi39wd>HXQ0CwkM>M>Er1IX6*4(TRYmdzZ!_kJHo+6XmT@LJGC&5B zW8hvyv$l0eawhE;OZM;Ww%=7=Fr28+sRWsTv8k%K2OatnI;Pq~L5E#b6>n*$AgYR? zLx*a^s^BG}s*v+3P>w3;YJfCu+}Ip?=%E@=`(m7leEOiOkckRe1*ht|eMeP=Dxn1P zl7Wu9R29Y4ZSOu{zFD$7zKhEqe{+U9r@H|kwgpUFGC&5%Kvo9ON0gPyJ>aj-dfk)X zi(qFeZ{wk|j$5;fS=NN)?<3;*hg=NW}vVWiwk-&~EYIwIi+Q@uMprMA>GY7s-Hi z21HtMeN$V~iaFNpcQFBxRLH>;7Tj1o4?nelOGq+62FSo(3}7^&5H7qAfAL;7{P<%Y z%xr<@B|CY3l-Isix-tsv%Bc9h{KWQeJeP_f^RBG4`#H7+!HkoEBpE;(mL<3l*g3x% z|4Z0tbKE#d2Hl9})C`3ilq7A%Lx^cpPH`6+UCE@a3n?Nx>@rdxD>=pV>C*$V>)Ig| zO`0?@BJr@`&O&`X+5)aN4qjQG=nLH}&h&kUmVh7D2170wI=V>(u8AEb#cRFd zrKI>~@+$M$#1%Gb(l|T^$J+oVCK(_DF=OD67AGDbU@ZU!5@gnTFud%~;F}W#h3xq> z-#5c)?;ZwFe)65_f~rE=!by89<1Ysou=Zh`VBVzDXKka;1Z`n(GT=M|=v&C-glrUl zu=BfTeUz)<;956II0<+Q>3V;p!n3)22-`$akPD zyLL!L)22;rsVdGveQg?ulxPDzkK|Wrn;?f&tlzR#8;cP)W>HTpvX=yrLuBCF5Yx&nsWWrC z$$Q~3fajAaTFwhE#avVR8qbT{`A3l{}#x%cSoJ z_(rMVgcNrp()mwFX?dFrCn^++4)2vPR2BbFG}$K9698YQoq(&V3OVUJ5miOCYNASW z$93q?!Im-i*#g$WMZ2}4s#vvYv*Gy^TBSrBUl|Zp#kEZusGGle%MSBeues{DE&_i0 z(<(Fm8!KvxFYx>u=VG2|6B!@_>A?U756U1Xqu_m9H7=_iPsVd{d3uxva{&finW*ql zO_Z(`=rm-ktRtD@3TsCSf@ZkQ!0rjgez3B++m`-q-J|GB%>Mf?{>YXAPEKS?OG+0r z5!>eZ6r!Y%j=s}wcSrtBZ6oF2><>TuP#abSPtK=6z5`*&`4su`xzqfPKKf`=w{G1) zwkH_qT-4VbloVH&yz)gS*8*5kRmfQtIjAZ!cXm#YRR$*mJ~MF15r>$Aid#6OV!(W} z{Esa@QxnMcC*QIrsdzAu0SrV2$N(8gf&p|0W$)TC@V<;DWX>VF1r5kyD(z&*iB8T#tk)tB8tyrt1X*XtPCO=n!~c zq-nCE{RZ?W(tl;0LJi_Xg#wA$Ks z6)s6Dnl)=?OImRr>Z=bM1&W+vZI-I0^0UjtpK_8@sNfTX4EVsnr48*UDZ2GfZzaW3 zU1r)U-Un>~6PFB-fp{~3K3^3uFdn|4hfEBD&FA48W}}C;)8srDK)H(kT_(+3a2?TB z3c6rOjBU69c`uP*{%@^y55`9At_2v6s=7HryBM4d#D)PpJEh$YgU7-gZ`@mG*I#;L zZN!FhqQXJ}paR-fRg{L=|FmGb4albfsy_HTc<^8+RTXm5_iw-b7KletRaCEDJy6)N z0<~}7-bPXf0Q3PHz?zt7w=047vL>criK?O}RTbK_63;URL{)K3Zk-H17slf3}Eo!V2~h_6o+_7MsL`B5uT46AoQ^C5-{17iHd_@ z^LYu_&7g`5;8|4|aQ{$*OciQ$ssM~n2GW%Qv{z9)iNdj!M|oTfTkrMA-pEbm(r;c`o=@>~ zS8Hkt9*v`33kgUD$N>6zwZX!t@K!pKmKlB<_?O|kWt-)62p>HIa#o5f?j~0ZiJ={` z-Bx9X@-2g~bYy@G?85;17PW9mlnBS~bIavFMZI@5`V^~NHqtfQI8kA#L{Je$Ng>D1 z+j6A<{4ZPJ%DB$}PhCkNlM`|lglkTe6blz(>L-9JQlyAE_+VLZ=7z(=-&j;q-0wy% zbG8EQb_MS0pHH!IllkA|X*nm%J7gdR3=}O;z+Bg)fqG6wN%4B`x$3xDfLE8fV^k_@ z0e+mi#-^GgB9iZ zc|LOUgCjX=iMHdzYA#3U(lG;^9{{8~bkI~#enq|H$%ZOa5+t1%KvO*y`S}XoH>u`J zXw#g5Ot3O?wjx2o5)A$@!rPQq+>QoYAwio$4~~XuWdzt|IfWcjv13QDqOV!AX6R*- zTY|?NbBrxH#f7M^K5SI{Ecj8M273dCR8UTlTg~IWI52Q&!$YmeDc2(@rap%36TGb2{kn zU(x=M0dwvT5`Gzoq~fsflg%;3{yE;^4Kff@2GGyA5Lq7!U(quPCd1x)JhRyeb2(9A zu^77j6qAGAxN)OduwX%;$i<5nH^qt-3luhtKzI?D zi!QpzMpDuNzt0A+CMMdgOsuKzQ}zd>6}>2}&@3uqdB{Mqf(6XAjqOM)-gtkmhm7oR zvdAdLe6?)<8>wHJfalvsslyO~43Ggb@FxT4Gd9MrPvB*vecxPXJQMHs&1l-I!hkE2 z6Ox&BXh-!_az&6l835PC5$MT01BM%blM~_c&D0!b0Btlsye#Q?oN|BQ9Qo zzQv~OZhCdC^qD_`{0!5!O{;bh7|w|b`=r}eRosS#?EK6k8mkQczN}66rcIm7haY~Z z4Xc7TW5$fYs)_>G7sVxa?6JohOG=N6P+yO5DX%%$Vr9=VIizCaHtu_z1LwRP69z78 zWJy)gCp}aZPrhYMP4Q4nsu;6L2FO4N44^+J+c10#UoM2?G?NhZ#GUvyk#97*7&3se z{}I`q<4D%!l)^_Hu_X>>h-$?p_1E;>^)U|IGmm5-R|e3psEhz|fY1Z*K8{u2h4%b& zu9@V`92xMjz;n!ui#Jd?Z7V5cCBVO3%zHLAJ&$W^vbmnpB{`(RXC;Mf3nd3yxa9C+ zFdaH{uwjxX}}T11sLd zWQEUe^0YN(^q0$QWML9OabN}=86X2>Aaw@NXKVo!QD~J(9W7J@nWVS{-)f@tri(WN zy&THEs49+e$ey$`1srD}V|O@~xgL($LvzSLBn+T^wn4(*!?%pYfYcN12MF~8Pg6I` z9|TTRgi}_saxvH;1i@(dG2 z-kLXWZZa}5Y$Pp83jO_F^eufFkn6Nz-<UMX*`4oSw z*{Y5k7T{A|X4*QB;sLyaWl)Ze43GgbkP-vC2M|U8q~so;t2VW~%&WBk(mB5xkHy#b zvq2OzI6qOozX#%`g%2zdS8X!178fRRh}F!`J?CK*VQ0X+AL z!AskeRaFNi2@`cRIgsKiete}?4158m#$TY=sH9U6P&QYyB&uPnhQ11o!|U7t)u#>txjJiD$# zsB`lU8ES|zsE5H$vkcWNCgu_Y<;om@{f{C$BOo*?&_+){VzM9K?eM;iQ?G>W=c8S_ zwPy;9^3C~L0es_G?0p#CrnKT_G;WtRTFkbL&}e0n;^k~_rf^Xv1YDI?EL^zIOqeji ztXj2aer6jED3~{Ip85Rq&&`%CTT;lm-KfVMcbutFqlS${I-tJ#Agz#7*7T(n6Mr(B zw&gCju|Ya9$$(1?Tyf-~R-_egzVD~B;_0r|Bo&?U?#+I-+@%UZTgU(zaGC-1+b)2C z1K?d+g&c&l%?Q*EkHJ1c0y9jk80g_xZlCbsd)_h5@-zpW)<>Wh@(dV$fNtR@$rQ;z z^bDX)mV}o!IUc@F^cE#;feh$$ME_!T(g=+BHv^oU_?rs5KTu(9D=Fkd;ZD1`P(yzX zNP9-=m0(>}Qb_ieFJEpx`|L9_a^y%eX3Q8fYSgG*>WZyf-QJr7FAdYaeS4)6`U~dT zm(Ig6C0W)2$aCo4zwWyFKiaUUD&EqzPgE7}|4dbdHm}61)i$&#b5^z5@@#ed2<)k^wT{9s}qbc7jzh&|kvFYd1MZ%K{@jpUL-A%@ z8IS`i);g5=GO(pP+S>sR+2pk;pi-V$QqIL4%g935Gte=cXbu^$%78rE;4ViZG7$1l zc;CmS??L}!v{lk#1nzrf@U5sGhQT(K6gQx;S96^dG~m^+@g;3LMM*Jmpxf_wnV8tP zaifuwvwd+VoN$6oCB-2g@bENjl#g3g_r|wWQm7J2FfSR9$%(QS4ykyv&s?)=t=kj9 zSqtpxuCr_%O3@kbTq-EClAZ3qn|EE^d9Sqq>7HTcFH8pTopBY?dk?-qnCU8{B?HiH z@i_cQ0gDi6$N=ib8f2@7V_7=|_Q(Jpoel${j&H03rHb+@>YqQIR@JN%ERGDIFVO_~ z{v7^Y_{wR(K^L@P+18#;i2+VjC=nS129;M-6*i^DqN=zjh%F9;!J2yWhqfKVhYxr1 zd zwwbs3_@ShjGHabJ1;zKMD8tgGvM?iLfDDAr0Q!K}Bhjw#(r5F@iE{f?^x;1B$!OXd z2?PH{s;*7wKZUl}A`&*zC{b{DEwb}}nptXgV#dkJYXM|2To+9nqrl4Ci6s(rWFQCw zXqXxBZzAm1@W%y-<3X4YVdKpn*y)vtoUI7IxE?{0HrLc&Cl7$|vz1ti_<9g&h4c

      ct_Nibt9N-H{{zRHH7uDhpTpt`i;^=~KhkP6pJp7vR1;L0NpGi8btR!`^k z-hN0cp6oIMQadXHC7F2FS^-usECP}NGT+yqX9Dc*&S{?b;<0C1_ZAVci!ra8%*YhL<4iF52mfw4&cHlvZf- zN<6<9C{d`OxzfTR6(X(Z(${Ti#njpB%<%D64y6!Dg{<(QBLifB3?$3|`hE8zl1QKr zNH}t|5f&l&lhAMbAzCw-x@QcyxNnE-i835*v9xDgMs6-RI|@ehg?DkEx-GCv7QoSw zfwW`*?eB1S88?wZj;rBaY=_Bm|1%<g8j)S-5uE7JL zrY&$STr_G?Qpf~_s@uGIGnEvojQ$CB;)y4kYSpZqeR47Cs}D*F+0az|MfB=7lg;LB zHXjETDgz`Q|MML zqLG1EFo6Eq!^r649%fc%rd#0ACkw>r$UtBQP-jLXu!{I<#aEc6_R z6BQ8@AS$%XD9X&fEhiFDS^eaLlZlFz+J?#@6)RV&!)lx96<)r4`D`9i;e35XyA>sc zY|yOgL`m_%uQOCJ2}TBdW1u7^DXu)~aCNgqNzt|6e05w6Kv7T(v!kGp)x$$HY$OI5 zAOrDY00Rq8BgYSTm}}`s$;8Ag4{fK(i8CvKAoG6dp-5LP+6TBb|Y0r<|8B~D+hl&a3{lm1b;gG0r2VOWRgO*l>9B- zGUif#I8hN@X~C*=(7{7(*|Nn995^s|1cnU6K#LYF zOx3DY1BFd6(50xa^%9H|MR4g*s;eqq!}%0jw%dFhL_rnPBm;>ta5c7kvZ1PYwO4ei zirg4@y6a3^=TSV2cPkYXxslI%WPl9p#Q^$X;$FtZJNLTb#~)Cy?b_mLz8~h&N*5Wx zYjZg4n&_f!ut^5|2BD1>chtVf%>p;a!-!AdD@JZi&IY`RdM1OMbY#F=2GG{>!=DZ> z3$JCcUKSoV@HRtv8$Jg?TB7aB|1oimzNl?=EkNEhIy_O4xwCWL_WKv2ehJQC>0 zKztbxX~h-T2TI$?{!iULaBC22+MIRf!%u$K&Z^+asNG;2z{Dg2WFS2mz=(r<6ut$2 ztA{!K9yZG;g7nYm$Uv$L{3lfai}6nb_E@yn3KrAw5?V%1VYn>gF5_?pWZ&zD9JY_9 zk%1f;KpT@8idOKiAdu`?CVM+w1Ya0Fot;QErH?T;oipi5-W;BxaD^hPc4PwG=KAR? z@Bndsc{&>VI`HscZKf%w&}Q^cypv8k$(Eera@5zlcC1!Kl}(zh$SJmN-=S(K!FDll zdhr|1u(>Y)X|}|`HH{B9r7Rs-v2MdQOI9ZiYf)0s8Shs2pqx-=S=jdatxT~;;2zZ1}Ik8s$=&`bjl;_gO*AHo+4 zE#c%6dYsRy5X2@7;3JPv^=v$?yy<{LF1~A|titFC+mnLt*s&Osw z*`8tAwrSNad6X1mjzj!Y@m;klMI1&a1N$;?Cob4rRJd_-n@c-o)BpO>XES#T(C|f7VdapD0Rsl)o;dF&%s|VQEp4ePE<=5l zPPgk$I>MW^jePZ+9}QI%+N=`KHwJ7eDPH~G9OHtL;+d|qY#mCGiFYX#6uwn>+Dir^ zU;uqB*`nbCcq=PUEN5vH@Xx^WZms2X2p>ZRP_KSQw%>L<>m^`cZ?xn0;mbL0tGao} z^HBhu1b>b?xE;WMpk8gV15MPlX5gr^+Aj)WEdbh75%?3}?}V2{PfOrsJH@BqPllI1 zLmZt<(wvC)i(;S)lL1atgvoX`X`#}Gp{lq)o1|U5B&!RSY8y6c)F`ujxnD}~%9Sgd zBab}N9DD4sreVW|rd&C<`kwn=kC;VJRrH@SQ`=nP zkpZt5knNr>Yjmi(%_~<&uc{C=#h{O+@2~ED1tMqV)oCRl8Av|{FbGi;xf%@b%OGFY zLXcI{=b|5`z6EF2MEE=Q49JcIK72fOfSo_1O`jcy5|d{a{A0-L1b817wn?!J!1HRT z{GcNPQ89paBNGu#;je*z3H~eCvKsy~_^08|gLnA^dYBiAlkG2MA%t*47cv8!oCull z9Fmg*lx!*~I^hANKhl#!W|F;(hroKoeLmUvImMInDSS~<6ev)@G;iMAG;P|{RIgs$ zRH#tF)TmLz9CzGtyCy48jyYvB^y{r!wMs@wF`@aXwH2r8W~7C2GGfp z<=C=^!)YF7t~YGH2+um54w#1o(ByC!Kpm5riaWwdE#*`y0bvN*c2D>kDWf?GfV{PZ zX(D-%nWRFFnz^r8BK^93-;4a44A^1-?W81pWBB&)_rZ(&N>(&UUHlflEBtNn$HM#4 zj$mpi?qQ^I6`ucFLrIN^?8g9SEB2!#{0nFRVaO>S2tQ{@rT7YB#a2!s@`>{0%V$fR z%wE*5Uq9P>QCt)`#f2AMXd`K#9zxiEmiiqg0}624U~!Cbv1 zQr64x{bX~cm#cZFUC}V`9+Dj!P0~rFdI@4pLpy%~KKl>Lq+p|sguK*-?*}V}!}}to z_=~iA@XX@o_J6U#|5IiFZJ{*$;qYzYuZ7Qqe;a;0{9+`q2440?>jVENd`EcsxGDfo zmpubpaQSMq)z0ucvk$_nVKBgniZECXmc~N}X0yZH0S~Mrf|(LsC_E2DTJfB=1zWdn zHU0bd*M?QaD^;qLk-eUBbG2*NHbsjT%`Hf%_gc4ZZK_nMVk6BfQD1$KQ#_8$%L`4_ zz5F!~sZb@9U|uqCP2&cpxCJG}D?Ot*q~htWGtKrLR{FdT4ASx7W=G{+Ql$-CZfH1sDaBUys0SCg}kKsON>@%fr`#7X`#g@Mpna z4*xIs$Kkue4}$+3ek#1EB38lw3@;ORUEn*zUj^R+z7jlLVhqSc;z@Y)cTX%DL)&2B zfIr{CHlT@_C`%IXU4_@!_E_Bay$t+mc|(sK`-hfIk}Z$(9%Xo5EUv5P1VH|3-4pXeZgWZCi8G zO*d)7s^HbBQ^y>7=%IOH?bxxyeDcXBX2S;Q+eh!VZ{Kciyzxf!`|rPN8?zOF^-y=` zY6ENHp{^bbyczJSdpjOnx}^E|iVIABY_Ua02I9)VADcFt7H{`6>$k|`K+lycQ^7RDI|iYvmHPVkq*H+IIb z5U(&hq<$Q|QV5|CQ_eURnWR@;2iY*@8r0VsHh`5e9|fMe z{`3_bQn4M<89Fi$X9liodW4ljDqiX_$E@FIe~XD{y3Mk&*OOFDLlO*U%*a*LvS5AR)B_9n~zZGNxo8l5wm zj0H7=e+2#q@c$e9K=}LMPleBJKbxG<0^R{`MfmgKUxxn$(L`(76aH-Yyy^ggc;UW< zm3MeyDUBop@nc{-vMl?u4TxXX{m3yVDjX{(G?p-=6~3%vd=x5Y)-d~F+XI$a|WbM-U;7&_n3|6*|BN<$a;6+ zjgVp5wrLgdG8ax%*evJdv2Cj=&P9je=;X}{Cng>wA}>&v{)(#NgAd$PVa}gFKk%Uw zqN*rSqC_}_5g-*&RaiQq{94pkA5<06Zq-#4FMs_5RTTm1AA_Vf1J^fcV2TygA8&i< zz1e2tCVQfU=ek={Q#_1!WIC+oOz(=z+>wE38Q^zbwEQ#m#2IkC&dPUb|HQN9U5uKr zxE1^r@UpG0rL}X3m@$D7zT2ckYmi3opDdknITuDunPd^X;@lyPXFE|EF!FC@F?ao1+aI z8@%J&UUjM)YXM?Ix%G^eEn37}(y+dII?GmWG4EJXRfw9RpPiGl<%MDMP%Cww2}lOW z02v?yWWaX@(C^=gJX{6;gYS8v{i+Pyf~xVcDk8zi02xSy0a?uTB>XXGn?I!j#JJ%x zz=?|RxK1>shY`W%3dyrE&@KB=I_>ZW#&cKZ^Q6#Eg)3-M#%Mx0M!M@ccqijv}aDk$PtNtt6Z5HAL5w5f&h zI2ZN>aG&?gow634F8vt5$Ld<->MVG>8%U-fb?KU;=TRrRq-*AwFEZdE17E`CCTN%U z!gD`1j|z~p6^<4b8q!RdW>Z=rIrpXC{03}QmsWiI@yBM#k|n}}Q!bH1DnwdQuUxRA(1d3a*B&B$SIbt*lcY|lMIjnGC&5%Kw2<>?{xVtJrjOST4acsNS1-Vi0HiZ zKiSMNUW5#M*yxV(Tnms&0mxo-*TJ8F=k?@NfEYI#1~^d>4evoywJohU0|VW%VWiUz z4;PVEJgse{98%%3v||1G^?|ZnrAif3s#K{!VZ#V?$|0V*>1DUmaSIyKz#)7MD^#3TE+=P z2FL&zAOmC|1`Oa`Ec=L@4=>-Tbn#w{G46 zLkdv<1Bd6IFMajB+@<&bW2oj@fFeNmMOD#jFD4ZJm z;<>l2DJUMryO0VBuWD;p#-7peUzcgF1qjP-rb7nE02xRh2Jns^f!wr*-i7dDx4~g{f$9m6IF%!itK$- zdT^h=UH#LVs^YibWP4G!9LhehszNNUU%$RH87Nq=psn*ML`m^)XEK_DA-mh{`#IcB z`m!h~hEqwAG=Gek9t>0{4kg7A`p>)Hmv1&bhotr{Sh!@9=`(zh4WefQw5JVVVv+$e zKnBPF86X46Gk|{oV8reS&jVVMr_tS*4-Yx`u>IEpeER?G-3go)#hJ(Po?uWx7DWM% zL;*!Hf>#t{jENIbgNTU1H8IJ?1EVO$s2JDPbp|}MhC|INiOT80CB}1&8V}-u8ig27 zKp9XGK@gB2qB#41P!MpA_o#Z`s_tKZJ_R%Lc0c{pxB9)hs=Af%;cELO;U-?L%&Kc+ zx%p|igxIe;f%yv6Po=Y-jFVNk){{HivPYt~qU=qOQvzNtcE8@9yJ@KU57zrV$RG9E zW1j%8a-E#rwR+N|Nv^{>iD$hixlXOvcH3=}Ew|j#$$nh~v8feXZ@smPP#ecMUB~P8 z8~-uD-@QzUy6&0j$!EL|2iV-CR7!!<_Un>twn-D`yBsp=jiPQ+F=V8-DHWINJ&-9C z&Rd*-2q1s}0tgf@p!ev>iqcbsA@;>{h8W>tC3U3Q`NI&CBPRq>E?{@Xwh`95=s7r? z??mZrZ8|3+XjeV?98RbB^X$`g9==`Dy<26UdMkffy^nP|0j_Z^jee}^J)TtFS zX3WUBy1VYWYtHNXUeTsj9DA%?uhsctQ!CDM9@TL`J%{a1BjX*1O=c}#lH5PD#&uZY zArO^-O|AGzXFpRbZoQ|lsTDS*;`aLr+qMyWd(UG^MI%T&i~s@%Ab>zQ1@zhfA??4L zik(zrNsoYipF2wJOD9x%+LW5aA@Hu!JYQueJ^wD{IT%Ny6`Ca-6@|)`emmbYDZZzX z>4%iQTKVl!FGzNWs^8_eRpV>hOp1vUCpNxf8P=FLZ(g!&*|MDW+_r7opk`7Wal{e6 zW>TCH$xMoYYPi)oHC`SoT{o4P6nSv5YDxu8-`~$nieaPQOg>)qS>9?xH3MVSj?aHz zy{Dg#1^6WY7&+|JZct(GgDeOjfB*srAP}&C-mj-BJ-Z2{UAK`Xeu2TN!U<|uuI&|n zTT+8~1ZFGMGgLb0x%YdO-0t2QPyNWW4goqU>OcwcPkKVytcqJb=N8LMiksw6@^`5u$%IKZI|4@6))M?zG!gIqBK74_G`|s)H{uKGns>pZ!JY<%lb_86bGkhK_~XgjZ|66h z)@cF_1bhAU*Bf|r!-qn;Nrg?V=-s>bh6%d)t(n3Ma1+X36g_WWRdnYU7n`|gN%9w_ zR`_c;B1a)$Q!9?!!~N6>n+9M@$v1Q0*~0R#{TNI>5K zR;lz=N~frN6i{lUQoq17J+zX+Q+d)sOhQB~5KRw7 z#Kx{uw_({nyB5Ez;pvCFPTF-my?EJlimP15wVPCgI-O$v{Q1d}B};Oua_iQueccoy zr~PldQuprNeNCsZn^eRwox(c(oKL41#&n8}54Cl_AGzuNhXykiU|m-H;9KC#u6rh% zZqmg0Zr0p*B8QtzTr;My8(ZX%@D+OB< zwMO7Z{dk-)rtxu~Dq;^<2AL z0nJF|=zJ!{lqpk!ok@|;O)5g0NpZ|E$M`s;nk(peJ_5UpspqX-n0AuOm=?=sQatpA z4YX!KAiDye&!p(+93Yn)PQ3U2$C^Qr+iZ(G6SR><_8!NA00IbvCa}qbrusV4F6rK_ zvQKEakq-jF3+Q{nKb7SEDmSUr2A^=Y68u2bJx*=(D-~wGWUK9|t$zV~y?aXKOqK2R z9P6d>XO-3dcg-GR5}>0Zrq=p0SKqTL_S3F@^yTit{7ElpHml-F*V);uit*#eyAJCl zUOuxbw%KNzpk`GZdE}AFR$Fc5#9^Z#XGSusVxaPNKC5Er)7+$DqvL^JGbqq+zde(t zP2A6<7&7vWWK{z*DTa)CBU$;em(SMBpvd5HOqCHp009KjE1>VL%T-QQ4UbguGRh&n zdfsaGxFXvH@tqGS^OiAzQ!a4JSvyQl-B^GbYP2eqj(V=yOr`5p>?SZQ2-GP+M@5}1 zVgFH2YMWJYi{~sajbv8EC^@|8+QMd4%$YOCbyz3y^0_}nr%s)m?9)XMyFZ1;SrzBF z$kAsEJ#TGRg+6w;U1lu$fLRr8Nr@Jrz;-Q~>;4q(XH~pCe`PZ6zPisP*ldbh?ymc> zA_3U-Cdl$p=_Q+!+TdjwniFA~#>oz0}U!gZdC z8|?il7A#nheDHzIL~VR&)v8s}vSrK0SMma zAdStcsIIl~2Kg@6Ju{uJI{Bipe(D6y*niLD>rLE$&KX?K)QVxcDTSvg6<6s^>EqNj z5EViI0R#|00D(0EdY@mSaKRiFD+PE+}Y zo=#m=?1mG(rlqW*OHm#7-{MP=HTy{PgfGQ(ih+?#rx>F{!ufQHC!TmB71Jr&wrv~C zGiI~RHcNW+y7sRuT%0`2O)6Zo5-&1=HPb1+#4 zq$1CO*jIMimnMv{cn>V~k#jw%dW>DD3BktOmzC)85 zA%Fk^2p~|WfZq2%Rk4$OyQmCSv60Cv?gTzmsQ*;yEa$s(4m;)Aw!R9CDq=WJG^Q}I z7fz>(&8n!Wsc{<9Rj^!dQn6yi3Rk;y631p$c)Cf&7E17JCrNq?qUWv6s<_MbE?!=+ zD0$>fyQV!00@)SVUN@=u@$U9L-|h0&{Ew1}4}3m2Xw7VjySxm(v)L3Q+-6QR1Q0*~ z0R#|;K|tRT7O4zSuDhw&>`EJq$5Oa}4g46Qva?)&r^2N)3%6v;YzSCq!xRFV0#+Oe~fk5<}Sj`QV8 zz2`8S!Z|Ae9Sb;r?{b_U;l320t-iG2Cj_Dq&}V_UDkm%9T~+>9h0g~0wVriIT(7c= z9M4nXiCj)`~lHx>j+B0xt)NtpfZL@yYn zm`O1xl9?1XqvUnh+D|?8R5E+^Y}a9(#C!3@7n4=1?0u!~<&{@nNmi~5Z^p3Ar0CVF zSKUb$_t#ivQrHIp=QAl@(oBj+XET$cxI>7oV;8WQ6vuj)Nm298ie$v3xyh(I>wAvv zc<8elHce{Nz)HPid4dJYhorzkSE>zh(xpq6q;>1o>*9U% z(MO5(RD?K@_a$1Vy%gb|2wzYI=SnF zqumB|kr*)$D51bR%a$iU9DPUf$!fRvfoZwLCdty}xz1$FtHNjNHS5~EwqRuh${?Wk zt`^$uT$L@$u=};UoTTT=ach6!|M~^gUXSR}%KF#f5d?w}xL42Xo^@sO!f$)%VvY{R z0@M{e(!c4sy{S^S>m;11a$uy@@R9aR1-(J#CLK?9QwkQx0y_47s*txi4of(90ygz( zwo1)L|J!rpH9artAL^Rp7QI+f2umO|3ZE^$th$IxIP){Wn~PB_0A%30&~d6UoGv?K64s z%Y0?nL9bc%#exNa;05&F^AHiP9|6`Mu1{?G5}_ZllY{O!903j&1*>^ywMOTmo=prfLYrVMZ! zy?5-SwCpa@O+BV}f?fn~_ZZ(vXj#~YwlPkEtrz6L0}o6tyzs*H{=$#N3fQcQzJ2>9 z%a++B2&aop)38|;A2^NbIGB#reH8L}75nh#Y++V~GfDzQEMT)LDo5RseEM0?{p%)t zj$W&VMNCcV85Gd_m(2>ak-Xnh*|Y=%_P+jv%COMi+e=`P*Dlmn)=BXjmG7w7j2IRK zLJ_bp08gn5)$@8$C`rYXho0YdJ?}$Qti!@)q@=l1(k?~wsEVC1pR8kMvHoW%k-!(n z+RwB*8?0cRV@|)nmZ%!OcVpA8*PqE_vlsrg-cD^$l65}V-KJ!^l0U1v=uC>Se?9Ax;Kl+tZC-*EAn;qws+bh^tO^?qWwR+(6i_`jK>z^+5I_I{ z1l9=X`-OE{bXBpt0Uo8Yhf3@xlkM}-V=DKkSmy-y<`4u60tg_GHUWDBV@Z#|AXUZZ zb>=!M{!c>Z(uWosVR4!3JQX(?H_onZz~WM%XV0EVixyr6_5DU4(zkM%tEX7iitBue z^0iZ^drFtE2*e}Mzw2H}ldlE+QRjc?n26_iB2xqqNRn?4IPW3G0u*^9REK{_(jsff z<{=2^1IA}6Q&lch>7tyrQ?W1e!&P2WasCBFF%3r$Qhs z2q1t!8U=#>3ZF)BpLNn3_8i5(#pga0yGmz(%W*N{e5+;M3ict)>GI4o&m^;E&2kzu z>%p2eYnB{+wDbEj7;8(FvqJaP^Y#Vhc)#mBr_W!QOqpZX`ei{Ny8=75Y>^z(sY9p$ zHjTng7_cCK00IagfB*tc1@s-`J(ck){Zw{W@K!2^sr;9U-Ppmt)LLgiIhPvk{9cuT zDu=6VrCj$_IalRDm6iG%3j!GsSb6diY8QA6 zcG0o=Z6)`dN`=cL_t~y>a>wyD<(mb8>>_y_Y>=ASfo<3UYV_*Z9w5m`W$56 z)mRWf009ItEI>lqv-camL`Y`dS$9)tXS`Ic#=pQPbiw?F?fWd?%K6$a`D9%hhF`t^7A;8!e&s|Nx)3bq}X@6Hp!&pdqj0CkSqeO z1?DVSmUJ6)XGk+Ceyi7{t6kTVc$pFyIcvpTg^UHr6cUw1009ILKmY**5I_Kdcm&q= zfX93)Z68X`HiP1Z zP?8}J1Q0*~0R#|00D<)dnst8iSjGaZFH-2w)wNZHZpwBDAdqDNdMnb~`gV<*RUWHn zQ#XF;F}{<~Is^Qf0$uJT*hWFdjk6ndv$z!K)vH(1ym@n%A=kz_SG{X3*8URk{{wyG zHuULfKEe_cfv5z2{hhs&CSMEUW6#xkZ{k`{Q5`)bivR)$Ab?)uzH zu>C{F)pK~Na(vKro;J1Oskh#B9hP_qL?zHpQ!9=t|I`Y*STj>9qB?R&76AkhKmY** z5I_I{1Q0;LmjE}X@Fg2STy-3j;wBaSbV7A_0M%_U1z8-Y73}&?PL~~a*db}xuAS4E zSr2A6shBp+%cy@lDQGvTusgd2yXaW`u9ADwb%Hy#Y>{+sv$gB6#6ut|fq#ASNiwD8 zout-j)>S`0X+=R}0WQ?*(N#`UAs7M(AbuPkURfa^qvhX4WyAbU?>;Wm?pJwC z;hT!>vm?Yw>gq_b0kmepl5K$j>I8`4{uEW&ZYFAvKq>_W>#Zp08C|JVr&0(YfB*sr zAb;g#r0?uzuYI@#2tsnjxdY#US^Qi5Qu{`H@VgxFj;y z@~8X#*B@5TQ@f;lx5_@{&xQ;Ts83*s-j0}3QJ)TfM*sl?5I_I{1Q0*~0R#}pqyQZi z8EAH$5V4sQqdiykLY@3==6N43!r!SN(_I82MzjLkDD%@p&tA{lry$1n8(pK_li^g)6k)CpoTL z$ZFeaqtgo6u$0^A=gk75I_I{1Q0*~0R#|00D*7?Vi-jnu3mhZ*-VO69@B2E*e838?e3{yUzr02ycys)zsr2q1s}0tg_000M3V=%{ex z7*iDAQ!AQBgmGy^xss}Tfv>v{=`A8NDZJ4V7y)kr!_?9Efw$-cMgRc>5I_I{1Q2j2 zaOG#iMlu$_At<2`KmdWf1?Z^An=I}PbkbywYB0)UEuQum+jZ!B74Yw_gAy+qf%jv{ zRL@(xLB*6<@+D6M5J*0f!8JG_fB*srAbY%APSkIcnn!-N*V{KY zFc#nvRrFmCm3>b!ul5kvOGusk<%ShrWTOc>J(^5EPjpaZ`I0UISr)iWZ!yfK$np_D z-4Q?l0R#|0009ILKmY**8WpJc{QPXA>#;)01S%>jc2<>iljGzFl_o`2?=x-xpvtdx zJk2aJR>?P)#WUJz*OKp;9hOtT?k2yzj@L!yOd{WX>sYm!6bI+K9qS{2fH#4fTW`^X ziFQf%Zk2uN+KF|7x7BOS!nz*hZ}kgQ*H+d4Adet`00IagfB*srAbW`xT9WQd|X#C z1AJ5w0R#|0009ILKmY**5J13}z{kJuyKl&20SX;O@5_t02$W0U6z#aCTsvpKB@?h2 zKz(EFr1(PVH2wcZ$;8+Z0tnCqMyu2djN{~AO z2p~|ez~qTTM=%zkUY;T!sIILlG6I_;fB*srAbf%g5Hvrg0!R$Iv zHhg_|m1Zg|@d&I|s&A?MO=XE>;3ME=m;Qy00IagfB*srAdoVFmC59@84HlIMx{E%3(!$fyde=XAfS`M)={x( z2CABdD(bvp?UFQ9q>wsrtQMliHVEV_uv)J#%W__u6%jxH0R#|0009ILKmY**5I~?_ z0Xiq@rQrbt5GY9C>IcTmU@SmEbZmqG0tg_000IagfB*srAbz^+5I_I{ z1Q0*~0R#|0009ILKmY**5I`Uf0Xiq*Xs3d*96a&H_KXE6h>ndAKmY**5I_I{1Q0*~ z0R#|0009ILKmY**N+du>MTwx<4FU)tfB*srq()%3eJlUYSb)?tClx^e0R#|0009IL zKmY**5Gc0*9Tnwn0rrmo0tg_000IagfB*srAb**Ku6 zB?1T_fB*srAb5kf1F0|E#jfB*srAbz^+5I_I{1X3+PM@6cem&zf400IagfB*srAboWU7cj=>*!g>Aw|Y0ZQauU0YS6J+T`E5I_I{1Q0*~0R#|0009ILKmY**d<)Q9 z;aiya2q1s}0tg_000IaUA#iZ@4KFhmpa?)VMF0T=5I_I{1Q0*~f#3z`s0hA^NDu)8 z5I_I{1Q0*~0R#|0009ILK)|uU?ib(nDq{g0qY@4Q1P~}xfR2hn<=74Z1Q0*~0R#|0 z009ILKmY**5I_I{1Q0*~0R##XpmU-iwg@(w)MCnT#sWmpP^5?e0tg_000IagfB*sr zAbeaPXMQzW<2q1s} z0tg_000IagfB*tf2+&&*MKh5k0tg_000IagfB*srAbzj0)wXiv~MJ10qCfR z>A)ga1Q0*~0R#|0009ILKmY**5I_I{1Q0*~0R&PYK<7jX8j&g>fB*s+6?k`L=My3s z3y{$kqS^=`fB*srAb!21hzlv z(&3B+D0zEV*H)E$pX?9;1Q0*~0R#{TNr2vpkXnOW5I_I{1Q0*~0R#|0009ILKmY** z5I`Vx0`u?saVN$Cq^?=1kV64FDjZ@G3IPNVKmY**5I_I{1Q0*~0R#|0009ILKmY** z8W*5*qH!bEKmdV81%B6}>E9R&&?pltAbQ3 zfB*srAbcLR;4}&Abz>1?Z?qZPQXQ1Q0*~0R#|mDX{v4C+}q}fJ;$gA%Fk^2q1s} z0tg_000Iag@VNjT6&wi&Abq#sZW}pZy|$00Iag z5QqRB6@fGaX&`_A0tg_000IagfB*srAbm{2t+QhaGw+Zc;gm-dfckf{jj6EP5$ADgZ>{mQ`ss2 diff --git a/assets/logo/svg/123D_logo_transparent_black.svg b/assets/logo/123D_logo_transparent_black.svg similarity index 100% rename from assets/logo/svg/123D_logo_transparent_black.svg rename to assets/logo/123D_logo_transparent_black.svg diff --git a/assets/logo/123D_logo_transparent_white.png b/assets/logo/123D_logo_transparent_white.png deleted file mode 100644 index 16aec842680f35dc3210d983a6f0e3c745216058..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2486726 zcmeF42bfgV`Nq$k*=1SU(whi~QU#S_Ma5MjV2uWdDH=tDrHEkfEVfu;OhN=fEYYa> zYhn_OqOk-EDguHHQ4moqAXR$V-TA*CE`4Wr=GJr0eb4iJ%*?&_ob!IaGwjUV?|kRn z@zXQ<)UMIIhEl3_zrMZCR;pTirJOsfSHqE^Oa8PL5Ba0}{^ByFTD~skjGBAJ@i^G* z@}Hb@`OhvHa`{ykTzZkZ>Z+>_88&LfWfxvB`l3TFxpe6LtB!51RBP3*cVYjl9rg71 zvqn0pR0{Q2NPq-LfCNZ@1W14cNPq-LfCNZ@1W14cNPq-LfCNZ@1V|tp0*P?CVJ{>= z0wh2JBoG?{M?bI%69#^q3lJNp=_m=1011!)36KB@kN^pg011!)36KB@xFx_qgKyZL?21<-6z;wOiJiujE=JthGX zAOR8}0TLhq5+DH*AOR8}0TLhq5+DH*AOR8}fk+83I1wp+c1{8$5CVbJ&mUaNxd0)6 zW)~zt0wh2JBtQZrKmsH{0wh2JBtQZrKmsJ-ionz3&l<^Sg)4NdL;@s00wh2JBtQZr zKmsu)u=cMVF63N*7{3ANIth>f36KB@kN^pg015a@fPo5sd9pPUAOR8}0TLhq5+DH* zAOR8}0TLhq5+DH*a7AFq%IRY{7r+%dRw4lsFq{Ab6^0KW<&yvjkN^pg011!)36KB@ zkN^pg011!)36KB@kN^pgfN2C6oG@*~jMjU7t1q78TmYkokm5;z1W14cNPq-LfCNZ@ z1W14cNPq-LfCNZ@1W3SV0t{6643y20011!)36KB@kU;1J#@9$4$hiQaV`qmXKmsH{ z0wh2JBtQZrKmsH{0{#$Spu!)HY=s0!fCNZ@1W14cNPq-LfCNZ@1W3R#0^fYNup#FH zST?A1hXhD~1V}(n0#A=WYb2u;dX5cMkpKyh011!)36KB@kN^pg011!)36KB@kN^pg z011$QWd!n0xU}8Aa{(Bruxyy=4hfI|36KB@kN^pg011!)36KB@kN^pg011!)36KB@ z=uUva3EfAC`bdBTNPq;4BJlJrwdd?R7r-bND3%0BfCNZ@1W14cNPq-LfCNZ@1W14c zLM6aJMX0#h83~X836KB@kN^pg011!)36Ovb0#DYO@G$2BxB$m0BtQZrKmsH{0wiDp z0R}2e7$s^X0TLhq5+DH*AOR8}0TLhq5+DH*AOR8}0TLhq-w7<7^uhg{3*b9eHkdU5 z1}d^f#tI}r0wh2JBtQZrKmsH{0wh2JBtQZrKmsH{0wh2JBtQb52s}OhtdWdRctXg^ zBtQZh6Zqz`&DA*!aKmsH{ z0wh2JBtQZrKmsI?1%W39?BA1f0kQzbOG$tPNPq-LfCNZ@1W14csvy8XMHPT}ngmFI z1W14cNPq-LfCNZ@1W14cNPq-LfCK_3@aNzEbP4AI1df>O%*A^{R00TQs700R{k4=!CJ0TLhq5+DH*AOR8}0TLhq z5+DH*AOR8}0TLhq5+DIh2;}AuSX1F#00t^F86nCb0TLhq5+DH*AOR8}0TLhq5+DH* zAOR8}0TLhq5+H%N5MXd3E+b7pNq_`MfCPdkFr>|khgLWjAo#A?0|}4-36KB@kN^pg z011!)36KB@kN^poN`Qe1Q%8=vNq_`MfCNZ@1W14cNPq-LASML*x45P~=K{oJu<0fV zkN^pg011#lED11B5zAqx(fqEtg)# zxd1VL6KLc9)8o$?$!LW(!$Bb=KmsH{0wh2JBtQZrKmsH{0wh2JBtQZrKmsH{0wfR* z0t`;XW1Q(F3B-=T!e{#3%DDis8+JNO0wh2JBtQZrKmsH{0wh2JBtQZrKmsISAOQv{ z3>-8{CIJ#40TLhq5+DH*Ac2SooW6ebHqHfz2tK?2t(*%Gvq7i3BtQZrKmsH{0wh2JBtQZrU?qVZE4`vq*%G)C6$X}9T7$<_ zmJM8l>lbHP$EeFEPMkQ-ahy|)5*=T$|12sh!mYb_mSIyC+<^D$nep_VUXwtu1o}1m z*Ywj*xneXvCbRvV3|1IhCjk;50TLhq67ZKm!T%1Kz_|eaGG%KdKmsH{0whoc0R}3n z02G7M4dCPTu-YzteFgOvyR2(OwaJqwH^Yrz3FcHcLU6oA%FV;`NZFrz1);}S7u(BN~NxYat0|37(N7){*G*lj|`9+ zh{Rl{-XOmaJTEn?)IEdNq_`MfCPdjPc6FG=lrm*q?Ed0--A3t0wh2JBtQZr zKmsH{0wh2JBtQZrkSPHMCo&~v*L4MO=VA{oNe0Cm9_njfb;5)RO(F6k?S)d{b^_Qy z`T0a$q1aApsTd_e*vu!}&BQ(93r#r6xZjdrg!GfzE!*VTMR z0wh2JBtQZrKmsH{0wh2JBtQZrkPQI_Dzc$v&$VOV3Nt7w8P8-gc@<7GgQ5~f8l8Hv zsHkYVMtVY(l1P+}LWlK2?UbF7Kuid9tM|65o4YElx5&0*CPi90JYg$=25-;#Ack`R zY#mPeMFJ#10wh2JBtQZrKmytjV4y;qp$JWgWK+nEO}FL0sAzRI#gr*inqXG?5VwF> zFI)l`t6RhEQJ>yMOqf>-OI>;S|E!NQR6+up5lA9~;YB{bJCO?OQ7zB+8!Fsq|ym5pmEubQmm*08c`c#Z@}z#W0E z^=7Dg)mBznIi*t9R(TOGAOR8}0TLhq5+DH*AOR8}0TLhq64-|TgA@B8vHCzAcqhRL zx8+4t%<4w)%=Zt#V%=HK1#qj+*LssDPi}~5>P)Hdm1>}k-5hA^8aIB$qaV~%j*{K8 zSxA5cd?t`kWk{)bC9_r?r{Bt%{SMFUVqQf8BtQZrKmsH{0wh2JBtQZrKmsJNg8%~+ zJ4nRiQPze*5~1s2eFjBoX{l^VQOh+URt%HCTiBFhT9_T`)YrzXHHWE`YOIqcY9Rqz z2pn2(rfN{_`;6Vem#OPBzSzGDdsH8LkEwG3{G-5@NPq-LfCNZ@1W14cNPq-LfCQ`{ zz(9o+!$Ij_;hc9?|adj*)GbXP7({B<8 zl7M7T^lBvgFlAm+DRst*cTeq+c@17k0wh2JBtQZrpf7=AYJL1J=K|FLl5~;Flc0A`J36KB@kN^pg011!)36KB@kN^pgz+MCn zz4Fb!RWcW#(m+LVYV2OE@ukTGYC^o1MeJWdeXCO{?!EWk2Dk&xlnO)M<%vY%CPSq9 zE_2lQg?XuzbA|5>vq2Id0p&O;b$lb2flx=CwR%RsLx_KaisCnra4_SQB+EJe;i8pYJOVkoVlF2s)6 z`^D9NJ^g;{ythuRWG;Z7qN%D10teTbtD02*wu-CLo<^7>b#2F@r8{9B!j}sBKC4a*!m2LWHLG2#=*DICu~YF4t}L<^sOqN zhEATHn@rY4P%W13SA~b?Nx%~U2mh~nHGa`k8%V<#uzFUXTk7?A@gr{yu{H^i011%5 zb^@o){oUc=&IL%K-zKpAVKOnK1*~ql)hmD8u|{kGUhh5eZwroXgq6Wo?`6yIdI{{C z^2behWFY}v3CRD0mN5B$AdgL9JN^&IV=heo5A0p!{V)Fy*1%Sko4ohGF1N4n=z1my z>avgk36KB@SWJL{3X2D~+_e#SZ0x~JxhZUQ!O}sO!9iwIm@&%lBb(wGGo<>jGMT7* z0fH2*{co8qk^l*Gtn-0tR$cBLc`Avr(!^D0;apFxurdjd011#l5Cr7={$a3=u=cP{ z<&P4S2(lDVG*c-j=I7S2UJ~P@_(TNOoDPjm9=R2Q`kJ% zhp=~H^4{M7V<7<&h=4%iqPo8dcP>B#PO?km2sl`a-#F0}oG}5(rdR}X8>pCyx~F8^ zn}HX~eiX>2_yW=ys4!#v`WF=y{lg5Y{;K>jo1#Fz+#ld?%WRDVNI(b-JLoR8U%pK8 z^IS^HRH9SEBcA)rb4zT11W14cNWd)t$&8R2u{~iu%B>w9_s)R8AuY;csF+!9)A1-b zaZ6wk756Vy#jj;CIJl=5+DH*Ab}VW(BT6q zMz}+Fbr^bd8<-HW)@M*4FmWmTWMD!@TmLd|*|KH-tXk%EAAgVJ`qrvn@5267UtPf! z#i_9s9M$bmGEwK((8NfEZUaL-RuJe==Oe!&6~ZH#a8g&}agY^o=mZIn0122wKvE_8 zzzSi#V6sMyDWx@piVc8CSr2<1CUT_dXf;SYbPo1dTm z1=5Lv%xyJ-g&`t;fi#N08KTu!nFF6v$%bneN`ACV0c0To67ZS8kT&rM zsmj_H9PuK5RQrZB77ew5m#tw59ld?^s*@LbnhW5cQybCLT$ntvkiZTC^8e;2*p8dA z5~#4X$Oo3(yp?rO-+;+7am z0$<#^AZb6l|NWKq%eL;;*k(N6wA<@#crJlw77`$VN(eBmq7pcUpBfJRZi5u!`b~!W zU!{BmDK4`iNKsHwpjxzOp{iA@rq--kqn0dLqDo4vOA%ktph1JDsvJ~HFKGC~!e1zd zK?+NUmu^K&pl$6>{0>rxN4cd=LNY0aM(iHDH-kV6$dsEeCzv5OGL`Gl^L4QCkttxu zx)6|?V{-HLSFq-~hzeLo8?-;B+?L`|Chz6mykP+4+aMdF)n*+ibf$(tAj-Z8nw~=6m1Tf^e1UKaI z2N)KM*lw~L{f_7ID1o}~VY>%G@REfD>>$953OmMeZ!aX9;&YhWFYfXs;IzFPwdwWb z$&(8Zq*!cnpaN@Gsh&N1s*W8yD#wxUr$3gB8#k(H)26AfzWQp%AsZggLpFun#ED%Q zH%=v%wOIH*9G4&EEF?ezJ`)(+<`1fMP5H?5y=+UAsdjabn!4Ecmf4_91oGgF>>}F9 zCZ_`SWf0nbDBwQWynX~^y;ccM$oO~DPhMn-K0(h9mYdwnVu=p{`OSX;Y!s|ReB9o? z1O1=iuY3;3OLj7c=J@Fc+Y-wDeNk+&6m3r(V5!sdnx5T?wd0jT)+N z-@fXZXP!|@m&#|C@#S-DQ1OKEUbuAN`~5zb^@$mzaLJF=NT4zTZEAfMRFFctMOKBn z0*^Aok%a`(5%?)iFi4S(PQ}M1tAc@wirLtBRKD!W-_i47bzm%e66lDRcfoFk{S78F z&S%0{>?KeWt_^|7cMLBpD40s91BA=_{05l3_b0(N!dQYMFcM8OND&<0Y+9~`izSmq zEM1Wx#kVjCS}cTpR&Jl-aRF=#jD-ZWB9PEZ4aN9DpgJ14+(Sc>LGiAK`r22O3xkBB72s=9nEXO@F*??AWofoCSc~7$@bFU2FOPQa=gkN}#au z3&Bcr)P*bGK7GF+Iyw9Ku1mbn1t?C94MH5-^hDrLPqpJ&`9QcWck5W}CU7|1dj^Jy zP}`@CGg49cc*~E%^I`JC?lo8+7>iv5WY%dBOmabg4~t`@0&edt;=gbZ9>RsQaseTr z!tW{pfeolx`M)5kFvr5qhusGIJM2T4MD`ZJrowKAodfFtOTt)4ATk1sRz!wAu#RO~ zg_k=c{-jg{=AQS4%gV}RVb^-zTZ*oQ!w;9=Yn7MAjT@_)HI4u6{uMT*_-o~!+Ix2R zlm%xYU~!1O9?~-sh>n2lnteb`nN1X2LBui7$tjhojw~dQjzG0^CwRP40(q65EJy zZpF@uP0P8+O<3W?;y(d?Z}FdLR4vN=-~(WNVOPPPfPDh{0roab{>GdSli&qEM*x8( z90Ckfgu^+kUgSsHl^#0#59(T_n*`%53)vHIY*NXsb9&+n| z0Ry&rX~XEcj&qIC`YE0SG$wFjeKmrvIkQuH= z!HQtAuBHSmR>0){)b+4JSU!w}1l$uCxagxc8O#M>pu#<39cx|$QC_ksZr0H$KQ-aT z_a$i0`fLhe24B_s_3Qn_7j|1qN=i!p5_YG$^f&y$g=eIc>YL3A|2<%^Du{x;M z{IFcAo~V`_6IN&JhXhD~1Pmk45PHYMWCk?mva$Z`Y*Q*8pB>*bH zIWH4AV~&yTlfWJXWVw#su-jqsS+WfF1neAG0gQzNToYiR!ZkYWD#|R2RvuJYpFxq6 zlQRl@Bob_F>E6A2#@gr1nUnFg8eX_`^XAPDYN#s|Ny8ufyq`)rRc}5LiW+u80(KE- zR(*+TTU#dAg<5J>%T0_9wMTYF0wh2J`VwdhX)>#S3apas=HL9Y^J1Uo0u-mlqA!*T z2;|_Hd_c{I{i1^NJQOW~6VbhFLUCraGOX$SWO%}qEo+98j%Y?e(jp~NArXu3VK2gD z{V1+`3kWRP5n!|;J6^h8EmM!YY)T<16*F~p&JSG^CQPX1IL;-0v=vnw`|rPh#+y=n z^UXKv+i$;(N~-66C8c7_m@%6?H)Lu9sj_QLl}_CxpgVz+noh<1U96}Ua=|iu7q6Rr zhJ1-|so0?x9K zKsf;hD(rYGNWL^vDk>SvL?R(U3ZrijWs_1dd$y$D8C#YmlgY`(s`n^-_(KbjN9i2v zu^Cn;0TQTyK(p%KsP?r#s^Cm0hiYvrDUtL(77`!<5{M%Kx%oO3_Int2;?(CIAhY%5 zgHdL}vP45b)=`n5(eGfA<-+1W0SN?M1A75h*Z=0(A_Y!v$T;)OWGpK8Q1_gXHK7%6DO(`tMptyU;kRcnhe@{Q!X3m`f z&IQO`kmnkyGUo=*jj#a{Ab|=AoY3TjNU|w*ig8k-et7dV&L-LkgilC-1Y$zq6!`Kc(Q8)vM31MnF?0|5(V=p8Sdjb-iI2-mf zYza&@wrB%mA%Q&zFi^1v4kKS&0M)HMD8I!+T_dVuM*hVRDm#!FTYB{9k+Jq!vu0&{ zt%et_%*)HWPeWayNE-go!d}jgGY&%OkDZV}oCq||TdF$Mem`oL3rh1!WQSN55-^KE z>A90S*)Olm)9 zX5I!q+SEI+Zkbo%l_Wp{J`j-a0W#rw0ql9$889x#1_&%lF;HRlyFjujuJ+)G!8eY7U_I2hxBQRG!ywrr`IG-;BNj<3J|T7CE3cNtx)<3-pzynmzcxfqeO<KIL;6=svM@o=%fM zzyuoPtx$&)%nG=dtebC;%+D{%x)v%dnb#GWfm+Q4aN`E+kN^qTPvB^{_$G`uVeJ{~ z)8WcAn9;wlG#QTi(D4GSwI+Hf<3|D#tau)FD1ndw2}DkyCKQZ@eGa<~CVw|sVnBd_ z3Nr^;c9-$GDTQ`-ZTuVx$)G@cm-*Rd*llH-L9uk{(y%4^+20?B4I8%F&jyWZE0HRO zUdJfS6iWhz5a`ti85E8_85BFQIqLH7W}m@zS9b#86A~Z+!wK|){+D39kzn{hRv^C{ zj=ceERlzx%4t-I5#Go+d0^|YOv#`!K!L!Jhdhlf`j6qh529>TDO+fDL$!ao-U^l>+ z*#`(V7X~WK99_w#XzNnCWK&qbAH~Lv8;8M9qi6eMlT9&m=1iAwhv=(em-7vYMB-1; zD~U$Ii2D~Drc&zEXnJR_Bw#mzy46;xL+a}K_c7HZuVm{*c8>tPBLNbK34!C_=YL>w zGl9iU0`1_|beJsX#gaXNNvNX5j?LLik4=qc@Kgdwya^hcA*Z7T5Ga7gn_!>8E`;$O zHXxW;maLk4c4l({vKZvz)L1iVSgBf4DkP-nw#WjbR;N@v{P4roaR>ZTw}5o0*SdA< zj5nqD>Z`BR_uqf7gKSqCuz|?BmuJtG==;rFqwvSyxFc$eMJQLN4Fv>dLR20LCR(b_b>PFx+2)b4`QPe{MB(R-;+%$U)_7rUY?ML{J6$BWlFl!hkgTl)P zWn3~SjNX*ufCCQ5ID_KLFTXT8g97j8iA6<4=B9vn{QHsmF@u7Chdpv+brPtE!10Y< zRym21icV>DES17$Q}YH^(@GS@kN^pgK-C1~rhqKqF6%M)TsqwKUo79>TJ`H)*x%=- z*__`5nxjDpR7 zXdf|UK}Q5C`19KfMAjvNGz4nptX19Xzn$i+c8B+0y>9*a+Nq)-5+DH*NKatLF0-=y z4@*1=w1C?ZsFEA-EEN-Y5QjO-w_-NBAC(EB5?E@ayHM&QfvgFvdaV8k`{n{jHs>E< zZ^0zPlf_^H3{;pic=BO+od>!9MBVp2)YrbM>_>q;4ldVTXe9zAo1$UEhLv2g>y!oA z6q84e9Qo}okK*>sk^j&98yvS`r@x^PIKJ^q*pFgcD1G_S34)f_F{{E4l5B$nNFW3P z@|)yI*uf$6#V+(FAUkUR8&+L!nUSbF5501E*GSO&)@c*8bf0gnu^|#5fuIN+iH2nc z@2xO?9tA}?;C6XqEMUMsHve<9)xl+Zl0k8k%eux?!+sP)A=c>GTs?a9$e6d`85CP` za&qp<*x9NtTz*E=LBY%gC{B&7x_ggT44u62K-`^_|KuzrKmtA!kd%tU>&c87lS-=P zs BWs+iQB>@tMErH*{^WL%bo{sNL;8+}Z6z1%GBF3*}1CM)R?7vN}pABcuwaFp+ zLIRc&$c10BR{jUDLQ8$L+AT&a%otu7r)xY&eHwMGPoqesQdf9j6nWJ)ZQ7^?4H{&m zWAWm}rf*8|z~I4yzs|^63ol9}o$KKi7pJvwZ0S<;1bQ}lRplnOMlU2w1t|yVRnrRj zcOgq|ygXn6H>l~S1Tz;PVBu^&1OfxmhtVm_Aux(#7yaS1m*1_nF&htuxGt7GqbfnC z%+@`y$7gy%0;UpZ3*oQ8CcyGdbs~cZqkh}@Ju z02a4(C*=3$9N1J?oJ(&wFTom{ioE9+!@h>?_&*^3H+K9VkoUcq%(!a_YX!@J*;2%> zJ75?3?a)DmFF(e)0DeF4K-CO?2bd~ zPc}=Fbpqu^`y$vE<&SQ^aLbL6=CA`{ZD0q%j({Bnlbt?oEb^OG-XlF=!Z3~{7f%1t zaL0due)+#J9rg}v0qkp7Hs4ExLu=TZl}yI*XBxjuRaO;8*COVo1g?P->1mfFDSQvm7lGH$1`BR zg#83&+C3==KHd#G1}6W%SxA5c%pf3{kCS1&VMAaNjAhY~fXsGa(U*XH2wvww-akt;cQBdu-xf9;_E(tSkqR`Kwv^(G{8+meCUL75^I6P-sjg{S1B|;?j5d!f~_9y2eynwQAL1h;3zz z;50%vZQ7LP%$|p5&63$I#+Fj-bbhOBe{I{A}kXhQOZSxA5cd?t{W*sP9f@Vd{f zM%G-Xl{4S@X=J^#$gj6R^WzlIHL~ zSWv+UG+kj?j-z+L4uDC(P=bi2l-nRQ#}u*DNdhFW2Lbt;^9D@*?yzV^fPYvu8<5@P zNJ_=^yS>i#>EBV|gKR6Abp7PXlXG$7`%05Er_=i7mtQK`prW#}Xwf3Ia^=d(F46tm z--Znv_Nne7gVL9iQ|)TJU#kbzKHDXMI1@Og(KMBxwC5hLlS6L#W1;9nG4gk41}=X>MD-9d?$dvMcX%4@V()H8(fX{{{Wi^lN+HX6bV{L zkV32jY&7g#SPhtIMFLwAAnF3M=fgSuDAdL&6EclHhE7@Y)lP(*{EB7jQ42)BA_DUBAJZxIV;&?B`}si34HRe1=nRa7l8i(0v!-fjh}_uhq`P* zZfxA*vaT`JaMSlfh^_qYz9HUx`st^tQ>RX_##wl;HF|ob>v3eIM|)Ats(W9O*iuY_A1z4%B@E>l_R)T*7N-g?X(9X-}c<2OjEcU$s1gLcYXUQA$fo)$`9kuX^|Ht(rHN&!Amp z?b@~KrI%h(@@R1Rdr?u*M+WQnB)wXq^a{*{Htk2HC!cMv-1U$tCuKJmz;+^f_;*ah z*Hq0M`HgE=NhZ};d^LEMSrvATE4?8BLkP(45}D~NGY?HDGD%Br9Lf#EZ%h!Jp~jWC zPBQE#!(_sb+)$CRF{O-#(7(ZEnIhPQPWeA_iHmw+RgsLMpTqtaR)-$?nTNW^!tRER z@X$`sRXd=AvthD=1q%rTK|nIm#HyOSA{iMHv^WqZH{IKnTYEeS)w;W@x_uqfTV2*QrW$VSUx3}STN@)d1ECu?ZdA`a^NecN ztl4%;Bof<~cao)`%F2wMq=AH&l=bh)bVEFJ@@EZ`$;8lf=Xsn2NWeV-$)Grz)UDfh@1pF9A@;dn@V8Ie;&D8sAHK6Q9p&re-u;776=^) zlRJnH!ereX9m^<)VzffXk%^Zknc6C|Lo1pgD(gud0+Xd$W;k6P~{V zlL+LSyB+7#kO?qa5i;-$yOaE0wkJpm`2~I+o`$4ehK*$Z0V_?_9AQOWL!RoIr}f`ADfaz!Gh@BKe{Y7#+BgfhAJP9G`F&bPf0@HxbF|N;a{`9U<^w12J$m>**icwoSZmlY*i_hNSe%O7E1v|rI8ILM;wu9cx{Sn+ zI!?obtkvnJNCw5NZYr8o2RFXYV+MseBZjfEKC36)Xbhda2tV*sBh#JdaS|W__XKhh zCF;0_FS~ERteVHHnR!Ytvm{fk9Rz;%bzdyd(pvTFT`0GPSmH)New^3S*y;Jub|maw zjWvcTZ9O`ZS+O_5Qek%NqOVb?BQtkxDM>i;E6tqQ0zLAVTW&nsR9=EJI%Y3Ome83t zfu=7c00Lja?t`5IlVy;82Ky^aK5ybs960$W*cEYbRwo}BsL*LNl)Ax#oF`G&>THUq zo_Z>YK*d!a7_n-I1Ta=F6crW4_Qzu)r))HE*9{RlyCQ+u5a?d-ZB-|CO>F!vOQpPg zKZp${I!Xd0;E8}_eMP# zxM=HdulwQKc$jYGT-So;mfm|}D02adQ)69=WJMC#lYm4ho`%UAU;Y4|1A7Lh^M?=Q z`BLsgD>4&+B_sljR)hpPgKkd2HAiG{T}79a;oxl*ov`WHiWMu)gFljo#$o`0WHM=e znoqi681>k~I!;Q-lra_(AOW8VB%M-qY@^tuRP3O4(&~5n9=GEKA4z}&NFV?Lk_sxD zYUx-$fT*9qmg^`^6HSjoM?ctFP4rYGqX-9^S9HR_V?WnS^M9a6?&#ZGu7q>*G;>h? zE^&66W`jaGfe_e;R%LA{Ngvt|b~)@bn9XGzocpcK4(j9^gA@9U#gBF- zQPb=yDdk*ewg*&C0wka@0ogtB5>53jfUZ8U`2FF}en4JDmD6FHvacFSjePVXH@8eL zIdI@?jhv7hh>}uad)W*Je+|=V26ry>oN7CG`bPqL5mkDm8s(zz2bAL1~j+&L|AP&LwT1l|wgF~axdsl6vdt;o{9&L%j}h@QGg4BnKGMUDU{sWE zN+Ev`Sj-?Web}&JubCm$f0Y}yZGQK3shJ^&;OyCPRFPUAfV6tSA&dU%&Ue@Iv)X;6ozpS0m zxdH@R`qZOUt5*Gr*%U6{ArVx=SdERKTP?c0;Niv9RVtzL4wPDO zq6iXjLqM`AdNum5o2u5-aa8KG74M$f!y1R^2nmpY4g_S+n{GM?*-L{gi#u(vV|t1`bl)GOr%mdQ7BbqY@W4lK8XI(pG}CO18cqnEK$fdt?9-E1cgK8ZPjmGHk9 zb^z>lm@Qcqa_{^vuoGb{nG-no{CB!!Jr}@rprSYx+k26j4@nkRUZ2IadtWNwLgSE9 zA-`5w%pma2(4j+PxH!v?gOguYQlwO>nHfVxl_X#mfkW!PryAzPH>G07Xd!CD&%_-t z_(%dI;0J-Bel+598~;Ijcl+FC9Geq{|JKlPnQD8AhPpD5B&pMfW^!Q&*GM|SmyYTt_H;))Cj{dc$#< zzT~FJf9_k{R5Yp1iWMt<0ln=_(p-_&yu3WM-+ue4{rBHr<>$+$adsA@QAkc-EX%0z z3-d73`|?;iA0wx?4O;qg)^h=3#3u|_9VdkiDqaesD--(o>FOE%4lzL}HIe`cXiq?< z1D~atO4*%rFf65+E)(Sl!}DQZXeeGbsW7qhglI<-;eUn>*+DywWgDEHppoMzXrzTw zNI)k7GRtf@Om6PKs*^C6TI74v^Dqg(u=q$I$H%tVRQ3dJ$iB+X8nP+HCp%wR_(V3P zz>V+gOm;1qOsb=gK3a9^)JY{0KgI>I!?gPBv(MD@>C@HLty@iYZ*T4IA%o(@y${Ca zbxA&cNIPo3xcEyyNgzxDo$Jg|O{#wzW|unm<)j=XpN(7;Rp(KnHWDy{fb4czLldbj zkGvKW>szaSy$k#AkTeocmhE`KN6!S-poIaj=`fiMs!PGZtERA}x(LcphuaB<88)Bs zWfFvXB;%{?yzqXwbt6oN8G%PYhy3u@adQ~RVj+RZ3490zC&A8!jfXXdnOfwtYbs3s z-~V8$6T0dC@y@Aks^X#NM7VjpOIIbE;%1k1jj4ug3b_f>!5G1N2}O`a_3hhNb?)4G zXOKcJk>l;#w^yf~cAAnvhAjnt6~mWU&UhO-d3G*>6j#_Xmh^)JbRvKa8C0*v|J6y7 z4qE!Jp4F$b4nnAb1W3SW0>9Brz5Ljh$yhA@5}1wVbj!q-36tmg>t-mex!qLKZ*gy+ z!Nq8N%;ft>@_%18uFxgJ z4pPi63{*rneiHF_TVy|q`EDzlP;cD0aS7b`mUZlmEgd^{+`iOPItLmzZmha=>5}fW zQO7Y>bCFFUYY4}(B$9Oo!SPnHbe>L=K)?h#7JQ(ZR{tj8UW{u#QC60?+Bnq|OadgJ z6M@zc(^DfsTcG65dmej7Wm7X4S&_PvRW5@-P)f0N;yWxf79}amgKyhkp6(b@k6^?SQ zs^T)9CIJ#~NuXCFrc_jf4)b=-Suv}h@f$f+s z;~ldL(kMP&x^(Hk%=W^)`b4ro^3@M;-vVor012cc(4o#pYQOxi(w&a`@noWm(|6+j zCJ4s=?&m*_$!&Y?5fDsIY*iZqKhs9Zk2e}`*GP}8QeJ{%Pip3zPVd92(8w;ZI+XVy zLe@TFQAz@E&uIdpqb52ig9J<>un}S}hxLPfZ<6qGtw}sS4r^(kcC92cP!axcNN)8B zF1wYKid$XQHKrO;Dr9#zV^b<*!-=k4GyCqkY11ZS#wDFl1eIbJh!!s*f$RyK(CCHiE5)nEub0m_xox~$rne+O0zMGf4{aRfLpy#oaSs|= z;a6j9r&0n};_S9cuL$>~Nx=!3?W;qPAQQ_j0C+2*XvQhC(D*ClRw0E(z%JrPQT-zvib-Pi4!OG2PDM}g>v#zXkN^qnAkb&W6Ad08fCesI6YfCVf$ns<2_*L>Oj*-g zhYZFN@OnGkw~O|@eFBBDD{s_A-pl((ErR`%fZ+t>Z=6hAJ{9(ZVZIE7-qQ@ztc_d- zD#9I#aUQyR9CbhSP~U{A*y6ys-UOlPG)e|V&6+jSo!;a4{Q2|Mx^?UJIA++3&yhj# ze}-xHO>Tp=pASf-RAyB05%SA_{ z73@jqs+6MEt*&YsRB6(rNwPSpE{lFdQqg@n3eR~)QOrVQWd zJ$8dehm9go*qEDAWHVrnaxPpw`zI~4sl#hYfCOSoKsKeQt%>)t$o?Wtw8TxuofYYc5g;)LdwcUQL8nOO@nYrim8HAORB4n?NKt zh0`FJl;$`O2Twp|ozDxtn~ZvBj;nOYP^e6-Xw;#6po0hvHGH9oCSBeqn&6-e67Wpm zJG9UX_N?cY!fW7{=<2NSy3?|!L#Iq1;c_m3KD(!BN!CYEGIPmo8mnYe+~%VeEt^6% zh5Sc8R7-Igh>~oI!wx$vgDbHTEHJ9lnI*J^yxtwV+k*`~3+z@$kwg_Clw39JJ) zOad_{a8l#vV@|-O`;Hp2V*1Jaf$S0=tC2vA2*?k99m*<*cuof)G1j2dq$BygtaH)j z^VRsB4FeivT>=&L0`iZ|l0cjZY=HA}ztE(~4fjEBqc{&*w!SlqB3okat{>~J+V)y; z$)LE_1Ea{R9(?e@8D~(;n>Ww&42mTU8Z`JzJ)f2f%W>vUC$4Vp+8v?oT^l(kQ{Rebx)kI5NWjqeY zQ<^w`v?e;z$0t-$JiUcl0k9kp_wIsl$Mq%)AysmPUp7|7%(8VKPJl(sq0++ ze`YljAb~0foY;736<6?d6$FZwz12s)46~2`36MZV1iIp2enlrDIQD=Q;b#w0Cle>Z^CP5uVb0M{3|ZBeoiUwej9!FH=C3S*_2|#h7FmHzmC^^ zw{hdfhjbJchNhw^i%wA~=kPH4VjmX0`|WRyoE+y;zZ+$HBtQbz z5YT11`g#z3!y2k)JMy$9@;gIEQWGr|$tbAkV93X0z1`&@_i0LZy8OVdX$mmvj0=GQ zkDdRU$mRkB4OxN}7sLJ>bVok5e>d9EWyXk4E(OyJ0~Nt=-O*|{Jn6UNMV60}Qt{g? z>lk-AHl>i1iXO%ZFBhDXlM}0y3b=jym@#8EnL1uxbtjxMqt_|(>Obp~K#&AZYVtgy z`tp5~#R~yPIV0E4Ii-=8HdvPgNWfYGy3ACc2EwsT(beqT^_nJnszb+tnrNv=h7N&> zr4UwDkvJYxx&$g@T|E{OAc0^B$PdO*FcSh4t)TPrU~$;dI)f9zj^j8q?>2+tQPj0Q zgCdnOd;OejGALwI3d1ufmL(F2DZ&1i$*lE8?Y}%N%DDhVQx^OLN_%FemRFGg31mZ{ zS^hVweVvc9slsd16R5d$Yf8SnvycD@kigyq4%_=cM6drFQNQeVUjpyr(3*V@M{?xg zNP5kz)4G{m7U)&Xrbv%hmx*-M(o5tS5+DH|2}rQw64;YIwi#4YSEJz;K{akmy9`tW zH!~iAk4px{>ELs?OGc4blMIT(4$JH}o@`1nckbNCHG33r*N`DYHhOHs*y>o8 z?0RFxQ#c7|PN1;SRAx~4IYy2e^~0N|aXn-|nZ~EB=AWEvMD|bPMRJqBb(v|J(Rdp(YbSHm7ky4vT`4P{ITiT z6f4Wh$|kuZWqz$;Qx=|p|IEjm?+HC10sROx&ReQF*7-m`VP-3;Q(CQ*nG|M^A=Q(B z-vnfadeZMkU9|Tl>R6vO?t)8{RbG#>^WFMBXt(FOJ=Ni+vo4tex{ZzKsildilFD^V zDR~khxYnv)@526-Um4Wd;?&rnnr6EspalVm;{FsiUkf$qMf?J%j!S=0C4cb-%E=}^uxl_EW69a3FH9XH)cKSn%50zna2g@#XoEeomxAKI0f?DxQU4?Yb{ zf=L6$<6((U%bU+lc?6YANuxNoSLrOza{;iH&W&zB=~Aalmo9NisaRF5TD5z1VIQ`R zq6Zfqjg5Uz47*?UM*?vm&?s-2>Rd1<4z5PwV?pWGO~a$;n0=A}31~?`(kgT)vvm+b z4SNt+xW}vLUg#XC&|!CE1}f6OH+4y?*pgl%&yWBK1W4cuw0$N_=l_G=DKY-b<)#N^!^`hwOE<;+G{QB{pPG{9(|b zLF+2^(Eg)|lq$C0OL|BG(Gcj>_yuNAq(#5}>rE$ko(r(Ebw@duu9$vi?VShsgak++ z&IAtB#ChY>j5Xotr;P8=m=2lK`$2~eYm`;QR)?ALKU5@)$F>t_p$P|_Z<1<)h%!hZ zFak5s@@0Xw?oY$lqZu7C^!)M0$5t4q@DXNtQ{(XHmg`?o*OY7u*W{2vah>bR8dQu~ z2E`AXH*X%V0rp5FjC^Rpk?8p3NIGYyBw#s#26-#gAqDSQ?q%feHAv=eEs9(P1&{y< z=s-Y+;6w@_KGXpQHS9q^#zlur3dvMxyoa)&UQC*?ZL<5h06Hv>rGti`M#pQrI{d8@ z9~eyXI+CnU0^SJ7N+5sp)K}T$}bTxn1EYL(>15I@7E#r4M zuww6nA-?_ueLWIlf2MV%!{0Yy7Ug@yv@xSz5(tXG5HzgwCfYJ_+vda@2>xeg+z zfdpbsKxReshi%Z5LkH*@psBXVWN{%Z|Il|rQ)jzuZX4>|>awme)h0}saB?D%IMx`! zdkK{^3JFeRQf%0i0%;WEhYT4K+vy;fb11vJ%hW@bxD&{G>51QanF|niepUE?LgN=z zjw8RlSwbRk<=1ZyysyR4Px2;5NWj?*31~*3l_q)^sL*5}Dv+T|x{I!96@qLYOoA2l z$_)>Jm+#So4jYFA?x=?j@;FXQcnb4?i?Ig91T{ z4cJ8F4;eXY;YB0wUwAP7H=kjlYjlYOA}3IgyILJqPc}AV36Vh4{N&mT(chm#?2}!Q z014w^dKOUM|BBsEztuBRgeG) zfB^O*+Mc-}8ELu|nGJmwOfu$JK)~&db;8?|Ku5fkbRsuJvMFwJQ_-Y46DLmW4ZX*i zq|-hbchNn!{pO(-fVl36>kEM*B#fyY=fqthgiDS_h~y~GTP zOkw$Wouizq)WTEpd~A|UkpKypLqL~6#kb}li=9$kf)mwaN1hIQC2+jg27Ih<{p3?0 zS+S|$3FOyY(~olje&j6K3WH&~eAewf`bJOc-rE_kuNkQDO7O?}H{!u9OWBXY`VTbB z&Nq5DV%)e__3C9dsYmvs*t~hOKkuw)TG@>8x+j{hqwjUZlm!xa{8{u$sDK3QBTzec zt?FL?9s3+J!=o0fSEbH3Ln>90015a^K$k#;tnJUDCjnh<-Y}r9=lervRVQj9PS!D9 ztce!NAORBik-%a+T%{?DuFxe*2(o~H+Z*bHw~IhqJaxO_iDXdxc2`q&KASXYlB87W zG`k^d&{+ORm6le$Zn_N_6bMY*hy5tN%NkNx73%i82#bxjR{eSxx=mfmRBFm`B$}Q6 z?__aGmU&|#0TOUa;K!5-`7C6KfGph?IaX(F&$jMEW+UwDXOX zo(=CLKcm89Z9`AB;KV7KIPx`gY}G^yWsm>~>>@A`Pp9pAsnxS0t<*#)CjDKw2t^sz zbusiNU22X+8U^n3OR9;{MWPkoep~hLDcO|5@HC1o@bz~_50(eu~yFFNP6#YSoRmhcp+!Ff#VZZKQ!qtDs?% z20AFAVgkCPGcZt5@%Zp)#RPte!!;^C75dQ+Lhq3sk^l*0LO|Aj8UoWXka#w9HO|Bd z+pl4u!rKT-N`>2-nz7iFV#0(8g(%qmKd-iI*`oiTq*u7Dq@?7o^r!7UGyI_iZIq*aX}6d3js&71P$RiP9ns*;XgX)F zdk}DxI&fuvo=!jP_t3`|BtQaz5YQn|@ogZUu~EMW$g(*)6r+Dk=>RZB8rs3vJ{tKu zMcwS3F!$iP?4Su&*(|uO2~4%1a}Y~%?hGXwn+jc;2VJ-(2`6xmwhU9 zDhEJ}1YM`ZA&JTSdcoN zW$5HZEt5%gZalrG*CY@uff~t;>ZpdV2iu8#t#4bl@VQLq0(kU3)pm8Obpz`u^+aV2 z6{p5lcADo%fCLO9phH^4X2Y1pO|A|REn&{74RL$tx%0OLs_FD&PS%s*8h7r-lC?-6 zOak|#v*EBVVRo89U$UEatfLjN&nwB`O;XV%2jZaHwP7Ts!uq?yl170@MPWtocuY$I zj}9I@*!l=sI`YXx>KdG@mhL={lK=_0CvbFw|EWBtQMd;fP|cK*&3MM~dtg9(*(?cY zMnH#Y@(fOB_NLfNPV2p1NA)t;;KckX^*#$}E_^HsepN6_Zlf)mW?oeNkYHm|f9zxwX+z^&83@m04C`Gx$45|LB96I@vX39yO z?|;i|kpxJfdUBIGy5VaCp(O#QIMUemr_)Q?!?*?78x5-5vRtb-QOvKZn zY|udjHE2!XSg1Q+GldLJXf_~}Ljsi$cnRmGS8_gzQ?h>DfG9flv(Ia5T(H8Axd04Q zWIrlxQ03RzSKD4A7TFXNCQLX6-k#`^5vzqy;Bjn9fyqVjEXhQUWK&d+r}y-l1cD`S zbc5HF1S?oHCE%ornN^|b*iaS;_)Ea;1S>yPFr&gxn*O(?OP~TbIao*_EdlvkHUZ|O zy(0V*ThZmK;djd(Nq_`A5V+Mt-Jq%(AE@vGhQW#K>0O5^Zol9^h`QEiP$UwGaoJ;K zjfe=8#W{mw#KVi5;=AqOh}g4RBM2uRzlp z(?C)$3B;VhRQNwzGX_UPPYca7g(ioAitJFg!sT8vD6C&nXVRoe@*k>qc363Jgan>M z2F1q__8)22snU`wFb5|;QW5N&1S}NFeCBDi#g1hgRF zc7m1P;`hy>IPkAmVY9y0s$cKI{(kr7zP;iU{wl`39IO$5?={jwDS8n&19Gm`OVV~V zPid%$l1P9AswQwt)t7{Nh5SI%==)KqzAHv6vg3U{E_b^_(*vk$eM$vc7gNlRs;SR)_fCGFv18639(#RXrO{BM1Ws;9GFl)ie6`Ge9aOk^l*~BcMw( zlHqH2;}=G)W5ehqgFeb|&B_d}<0bJU&=xNL1*Xg2vmGNK>plHz#|u7^fF=a)T6V%x z{pJE_0w;}(f8+RvY0gD;cwkf=N8K+26`7AfOI&(h=9lkrrEE%JeM-f|i4%`XrBWyF z;W=OEOyKDuLx$+~$2c@ziDdE$bXF_04%rb2#E8I=_1{2B#kv@IY__W@Cv~IQ9#B0A zkU%8_w&2`Xm7EXjRBSgw3k$G^esl>`$i@^U9{gZcI|=L$w_b$R)zCe;xw%?HU6e!u zBv3Vh6fXH))t7~O#Zl-@{^rH5FgTIXAesDlV<(24*y6;v<74$w-~E;0A*ddg=cKmrC5coLd* z%H)%*u~;to#6U%6|9d2gb$(`-?sHXKGAOz`j?-r!?|DF10?!N^HcYpjfFVW&=ptdl3~oOn~a7mHW#&lr?0}=Y3lKO=;F*AO$UdvNFYN3 zn{nMk8D1OKWj_n6Bc1xuWmh$wm{g?YS{!tnO)&*^7gyA%O~(+VxEcQFwoar?h<){? ztgP%-UmG;2v7wU}6(p0+2!kY3DhU`vASY3xj%zHF-B=7HaP*29eG3~N`2v4v8#s8B zOafI9D8u=SVG^L|32Oukx@K^sj!a8`Tj`G?L>yrQp>`Nf&l&a$z zy|V8K9x;Xhg1CjN-|c%`y(3~4$!+BT>d6{9Eb%3909==w z|Ahu1kU)y0jBngvf2wsF6D5lv7ma&Yd+@?@HQBMMXu^UDdRpQt2i~ zX64kgz!kbc0@@MiR_`5EH+PkGx(pNa%9yJcr<)59x+^K=TpxO;?2rUVfCNY&3IaN1 zQ^>Lw3{*t%E{MKQnII~g2z~%FAxQB(bjtEDEF?ez_7Hd$uB_0=o!%O02}KG66_sPY z7N_$oza;J1`*G&Wv}Y|jQCeDB3|Aae-RaSzhicNKNm}{G9CM5+C@4sK-n0`~(&`q| zH2bCZibp@FnQ~H>_|+cUApsKDML;qrj&CGcvMd%5IC=G~Q)Kaa77`!<5+DJ43CQoa zUufjsyU-$=s03JY+6+2Io4Ei1`e|g-NvOU6_9^W5uv#z^icIL(V1iI;Bmol0m_P}x z{Ab2j2Y+ElbkRKcZuIGafr|8F(F~_9OnlF|_E z41_%bTMByvb|I`L%)}x=mN!ilPR%4h0vQpIb)TNj=*n;|Ix(DX^ysDPDdc+S3|qxj z=+H|B#n)jA4!gf(GC3BzXB(SAA-iU0l0hL{mn>NlHr^om8y7^QCbTIT6!|H2nYsBs zCQzovxDs%jlsc~A%W-wzYQM3A_L(bY^y%L4$QNf@?I@ii0TKwGz=BJse$Kf7;d8IJ zN6Dtp={Fsgj8I%evf{Dw`)Dp2l#Iy+upjoaHFzyq1KAb{JjfbOEn!_?(tZ!v0We$2 zmvH2ATb!XEBtQab3CJ2yL(`rO_e2YHw;xQh(5x>ERO~y%vZeBneGg}JM6xOF$mmLg zFT$o2ogx1WQ(cg3ijEySW+Z>prcG+mqD2{9tMNs%kxlVmjr9d8Ex)YvBK-W4Kk6(b zKmtA!=u+oB)uj5jKDWx|G$G(5oP;cWIY1LVltBU{KmsJ-oxuKRVvzS{d~M-BzP853 z(h)crCmx1XmSw&c=U2g2z$9QKuO-Vx0vUB+65No@CGyH2Mig!$djOLT}@v z_D(?7Jbnfy0fx$o1S7;+S9Vd%&iw)YGJQB^gHCs2OP~~<{||PSCIq@chy3lfz8HIh zIiQhgh<3eXP}~`iE^V5<_uhLCMquKn+6c;0h-6T7=+GgfS{pZRRP*M|%jjB-FPbxK z*s$j{)|a8QOY+{u$Ap);0Lh#>vi51K3_EmlS@HHTkeiyULjpDs=v3!@)g+H86*i1z zB|bQ*WZ5;9oaZSLAOR8}0dokngVggile-vt7!ePCP-OS#sv;G^t1z#JHz8Tp24^7w z5{M6hmo##-gTTNQv--8)@q&+b5%^z|GOX$4WiEhSJm?Jx z#E5|WrqgZx=U7Y^)!;oQ>o>DxM_?H)|2b@1cGY+_36Ox{1a$hr%Rq(UZzebLC8eUs zO-0n%46}Yy3Q4JepSt8eWj$0iYu1caN(ItiKK}mu?_>LgKPRW)JiJK`$oh@K3M4=R zRTAh>`$N?{|Ensm;{{d_NG8it*IMBToge`cAc6Q2=n2>5Myqb+5#0n?sz)}JV9A_- z+~1Lf{k^0MKV7qX8_osDoH?%~0UZe}gs3kx5hfc8HP=K-pfVDHwr*DAS3s_pYzpgl zGs(%x84E_nzGt$@rm$gCin6k@abw4hm0izbR|Y<%lBpC^b7ME~bT~8ug^gbftuJ;I zJ%N;RezknY$!()oLIos10wh2JE(l2G#6+0mqHd5?WSOVgL3Y5lNx%gG`ThJGm@M_g zLINZZPXf%O2zlI%z0vG5rv_+mn9DXL!fySh6q6=R>VzGbf2Ioh_y5Z9xd63l)ygWH z;*(E4arxHJrrIaSruc_8YC;y$Xx-<(LYKU$7BY8s7Y_pMYR^;q)%Y?V&eF>r1d>T7 zdDV^=d?W!9AOR9Eioj^7?V_RLhc(nhNhBcOcb+5$5+H$C6PTuv_pw;=iGhkA-*K1Y zp|;C6-z3ztK7#@?@^6Hn#=c)9g93YMWHiL@z4x9C`%&By^9%|uO_I^OofnZn_5^x0 z{%`h`SR*3>U(|o`1vBOX6sIJu^1L73?ZY+oG8$T5L;@s00@@JRA4=qJxNc<|1pQSv zQPdNA0=I*}?_y7Z07!rYW@>~$$JqwH<1zN`vyUVsrDBB3_9Ug^E|+zUsWxH4gpSzg z0J|)jS+Y%|m_L8MnU3yKy%1>>v7P?W=!XRZv2i$eeBJea;4>2NjlhAmK2@!1eC}I& zY{)1Axuq(>bPA(}jABWE1V}(l00Xz{8V8B*Sv3BAkR9x5`=SZeKQm)4z%I>v7BK-? zHshLzHL!aUAOSrH$dBJudPq?RX`&-g84Oe?btwolrJ|CzSt5}zKBWRaRN~>TQ#Pbj zAgFXRQYuPzc@>{$SaH`WQx3mW#%IXsX~+Z$8&3_nM|P(ffkA8DI<=K%nka_^NPq-L zpke|;aQL)}Pep!|Gu0y>0t(oTfJ{E+WQ^T_@F@wzoIncx&(?%N8|ct&bE#cuJ{;HO zJD=wQWOnyGuomo9zW<$K!rYQr8_ z*xzxqTzuDfwse354yd_6wXVr*ikQC>a#J}eSu>M`1W14cNI-7_2ST2%!GPDH?QOmB zQC&<3$j!)cuq$KYFWn>o63CputjsSB^vYUjy-A=wXwZ1vgA8!b5j~Yj1T`Qal^U%$I!s&{=kU&}j z>u}~Q*n?@W;0Y2Sfw&XUDOw?^l2(^EM=NAg3b!d06H(XtlnVGLn^NS!j4auvRFsyM z8tLZ_(b$yYw*v+Y*t+9&>>i!6Yh&j#9VUU02yj!1kU*Q$&9D`-`!+L2E|ro136MZ+ z2=sxcH)!nbZO|s`?68nPR0L!J(_>%^l0`KF>^Beszv_6_vj)ut2!ug^jV?jk>jP{* zo2Hpokqs?_u9cLEi(TrCMM}kl2@?*&+E3@WWE6QdNvUYpu3bhtEJ>;Odh6D$4`t-A zg%=HfV8N*>s)?N*K^st zWKewTvaT`JFe6`P-^3z=V)pFWs;sQcn6aryD8}o~i!Z)-OGW3*KGtsP6Q^f87r?p3 zY@ex~1oS0vV&kdG;p9hs2Ph(Ccu$O2Kj)N25p~aQNq_`MfCTm=P#rJ-4r{RIVXaY%YpTs9bs42sE< zCm)C#-@kOpDDrA`>eR_9gW}UqKaE_oM*&MWZrnJ&@*h!IZ4ls$cKI{+=6S10+BK8WH$2v~<@< z&)!mA!GRa|KFHT35Fvph(YLHSe;2G~ggV$Y36Oxk1at{hG}jlAKg!~m;wC`~x7idE zP}llw3S<{Bo5G*}UtzU!=a?~LHigxhZvCWEj%*CbLINZZ00G&Lf*BM6fVHH_luC_U z`*z>@mbgPVNPq-Lz*GYA`}RVO#mkZzS81$|(&9@%R*<+H_7Ut@KQXjlFzL6Z%>^h< zjr9`&+adug3Fs27us)SNZqW)!sc<_5P*N)Hw(`H*Zca%`1z!GYw?jrfEhs2ZZQHiZ zNb|aN>y!m46)O;3x<4a_Exc&NgP#{-BfjG-bd4^NK;#4(SN~3Rto=dcIw(ME0(DA~ z&M2)FQXC18014Pl;1sy0`)|}opmDa{!$9v0C(s)Dr@?N58M_8Hw6l-^3HVDum(3}v z!L0&+UDK(eN(TlRJEbKei-rpG3JZP;9ZZr!@6L?W}dxD6Xr zVBGE+K79CkJjSx%y(=j|EF?ez0T3u`{DN{EnF+~40+kXNy=LxNbt}DsCrN+=NPq-_ zAkYPk{tcE4qH&+vT#0tB@Tn~}Ljpk(kpD;W+wx7AOx|Q60TLjAMH(S!sF4=mrDS#& z*Y|ECX|NW0^|C3&?;}wiVyE}sdv7b;>@_xnqFS|T>c9gt`yL?~6rX+eS%`E4>1ri> zmR;RrSw`Id`7v-DSrjZJKmvXfXq2~1b*?kV?`GLv8UlsCABi8sR_qZ_P`YjN$TV*9 zFbR+V36MZA1e&7RXJNI2Y2LS1uS84BeQS*kkwEYSWYY@yLX0_^EF?ezF(>dn{MTjX zp4Ce}#VJrB*%WREGZxtt$Ucx+1CmB$WNFy2p~}hGHyNZNXJ*ZsrLas=MW;+UcF&+e zgVvcO*jKHIRI#t^u`v=Lft>_SUFe-85~*^CFlovrl)E#-}8ZF@ei)<$qvY)-q#cypRN}BcMwrpoYPT_wK$E zyJh&jE1*vY{(?`x|!qqR?bZIO+UKr{sUqT6?1?W5_Qy^;V4L`*=|ebS+D za|#_s!Gi`#qqx*%{SBxmxqVib$&)9ygoEdq?&Q|3Th+XI^D1>?)v8rCq*1KJCL({V z)KQC14tro>*OYSlS?nTRi;lpb-gxT^|K|ck=Z~kJ>sMQ;4y`-WQyZ)tTLKM}HL4GZ zt@m`C1W14cf+uhfy7)!#-FWWdZM1N^=T_JN3Ft#Wf)o<0I8Gl)R6+tI5E21h0u^p2 z--m?BTQ?!3Qg{>SLv2}ups~w#Vv#|Co8H%fkI@+vJI42&civIoeDlqYmwP;}U%y_x z_~MH;WKfJBHf-4HJ)Dcz3(3xM;^i{EC4q1V9N*}_%%BJdwXMBe@%7sS^KEsGevtqP zcp-56(N9g~TmUZwcdIME-A3*9TGyxRAxtL2$%5W2Bw!N(Sp(_?7$?u!G^+H41ga*W zOQ6E)%=OH+9Ikq>biJYmgkA1ZTr9FFCQO*HKX%VP-z6jMs+E+KsOO%0PIc(eLA7n$ zR@JUuTVWFi_2rjest-Q+KyBHwMLW&e2trV41AL5aHpR&M7ar^+)altU8;O7UcTQtmJP3-)W+QyJIdg(MI(f%ArRdWCZVf~G9(>$2o}UKq|s)rakYieUij zC7VK*Xjx$I5A2+MEgH8Xn_>r~va+)6OF&h%4AhS6O?t$5-FxxH7wfW%Pw41Mld9NB zsW@bUh0c~8l7M7W{te$|OJnBtQZ}K!U%2Fa*Uj z(0RWhTH_{jS=<~B&|kTC@&v37?12FLVbdf)0(ub8Wwx``!3mph@>R$CnEO#w@}F5U zC~&j;f=VvoDZL16gq*wek`$<#{a-i%_qJNAe!UC(@6l1wLkl}0NOATa2X%W<>ALh>Y-3G1H90=yTQhd4Hh z|1UP4hT~`w-AH6kqKPadKmv9XV6-BH@wGWxA!!uNT{65E^{h{$z*As?(BJ1Cm>EI{IQa9k7&407fxz6Jx5IBKe+tVnv1>O%khz9{=yj`Q_4YVpv z)2fmC%YAEFRyY^HDlh2_3D`v7r*P&kFcZE9%a@6Pu-N`)f$zGP$06{qx{EYSp9B!^ zgXuk+&M>HyO9C1a&?Qye>if7k8VuXkuauDrjucB!3GFme?^izyh{d6OFw$Vv7Z^qo@=`QRyAFy#F~A zrEIzH)|t8A`Fsw$d+(Ir`Oe$ry?f@&+;Yn;8wjK0qkv5xi4a&)RaNEu>#{bIr`++? zW<_4^$lC669Rd&tPM}%IJgG5!5P(3$1omwEq8v&=Hbul( z>CVgj&3mQK9bWI3D}ez42tWV=z6l&AHSZU*1NUeW-X~{ybN9pT7U*J0kL`R^@%qHvowrs_ACDHXtPY{3r1l%QX zxVUzom@O;VCy3UQ+?@_QTaCbM`ChG%B8ipWHey?^v>oK@u41+X;Y6zM(p+jjCDuc1 zitkobga8C=AV8oZfmw1fP@(@JXaqk?km+t zZ2y(E*{T~3d~GW}JS?_{*kT9#!379FU%zD7V zlVYZpeT#bDY57_JQ_;qzYiH^CII&~I^l!Wi-|lq2?)Jx| z@;km8P!R$U@J*ogw{yH{b36L3Pelm$Ah4vmqAIh8D*50;6$n580!rXmF6-p5a0SG`K9Rj+2#Aq>_f)ry!@4$3$F=~bg=z_`v z#kz?-XQ|%}M7IVZ^&Acg1R!8H0aKpG5vZ{AbrdF(LQ@TQu+(qgvIhfhkU8TRPBJPv!AOL~n36NHi0P~^*#z!5l0;19e@1s-Z94<n#3&F*nt<*FwUwAY z+p%pavHrgACq{ulJOq}0-$Isj+P@Yc9y)RKYRU&@rxyy?^ad%&i|kQp6m%K(VbZpH zkU?!~bMnb2zbSg3w@Gt-TICQ2?~p?d@pjv7H?K{bHeRDfjl32uT6o)Sx1D$7kw}joK^SnI~oE@@|DG` z7>tG`y+8m0?h!agT)J7zrqAFDMeo646U8VHNQ=NC8Cdt|-d*gAv_u3LR~NwQfiA5A zw516IAYcXoQyw7N7N{*~_H24%RFGpUD~HXb$iyKPl1UL$_HvLZ3*TnfU3c}`w{Kr# zM~xdd_V(LvKToqPY%MZhu4hsVd%EWt^{)kxtcrX-cd@PWh;9g&PN3q$W~NI-pUDLB z-YsRjK0eK4iD-iW1R#(S0nK8+T1+2Y+EVnP$dO`i+aeg9&J%b?Jlt9APK!L^`q;73Z1MlnH0a2_C1`M@fw_2nMv_%4Y#bj)UaVgZ_6#W ztW$T(mMy)lw$hAIYfDEtq~dqhst+l=%RfdvEM1UE5t1X#Ay5;6R$ogd#k@^wy2UjJ z)JZ^AhAhojmt0wAC2kr(p!m>px2S(DfB{0000CDCTp^APvCKJL{5L@CY0GpY*L?!> z#l_>qbeDmpVm1{0UE0nDp=g8v1lA#7N}yuSI?5cnkcB{nzOlL_#GL7h7w3zv(ibO{ zmDNSJaw;mS|7E??PCHqhRUwmfQI^lA5dXvcPFwvNM?3`LCa~-SGAZI_>|*~#UhY;h zC|sObe1ZT3LJ-i5i5tbv3(-efP0yEs??`KK#)W_n0(VQDzG5?d)J>+!4!d=JqW-l2 zy0ycwWCVp7r93}4Omo^oz+nQWL@VYx%-E`pF~iJ*rdDWvV34AlarO$*(55z$TJf%I zrel3-h4|nLd();(y{)(2+E-meD|M7ZDt>FIw%BELzHYRfrk4L8_JJ4x0vROG`s>+} zTA}Ib6bSeuApi3&mH&QqsW1fs5P$##RuIr9lDaAVAPa$9FPi^up>8B+kHBl<@~&bN zEpUBb(Wu*YP#^#Sy9k&PoFJ9LuGdYiYA>zzPW?Bs5=CAsonrpzJ>&rUPF}yheU7Lf zl4&*M;k{Q;q5G)TUo^F%q@<+&iiXyZRG9OJ3>jjt17WyoL@HMl=Jz|y=(L4^^8}WB z)ZBR=aWFapMLF-b(q2z}6J3w=1OW&@z-j^wMEyU-j1N1!5A{Lu~`X4UnD(MmD1H(8vnhjCc_ zhIH)Mv0hbGl`p_M@4U0upn)!Qva?7g#f4eUq{!vHFu&7wPCBX~kPv|-AGJtmB!;n> zK;FyW-fFier`YTR+93b|2t-7nsq`az(|d4j16<~V%m01<~ zM%CJ~V8H@!!h{LF^sl$xdfqnMY~!o0p_RI}Z{PkHL$$>&t5fTanpI)?8_?J}FaQMN zAW$}C7UxsM0oJWPD~hXgSGwgA-ar5X5C}v-qx+hf{)<4Z({1#!41c89vUFuIIs`%! z_)&V$^B=wn-BWxWXeLGf_`0WO2tXi00-Yo5D{NPqEoMU5pwHpA#<~_j_t;9bl;;O0 zI$mvqT~gZ(AJ;YI!1+>CRHQSNx7pG6-h0o}y`cP*9e3Q(>eLEJg~@Je#o)o!@|@U@fc4b009WpNT9V`eL-x`8txqUJYCZ( z1RxM0fld+j6{f3dX*@kl2d1~pVxU6jBupknn6{?B9)8j%haOm;NilBRxPme%8Z|QI zK%AeZ!S^>ZUl(QekP6MD$mRd;8x|EI0D*!CEas4kf_P=7a?Tq~21RCO8%H4!l)$_* zu75HsYXS6h%~!;B3<@j7b{eG6y{dFWe+mR#B=D3t^%sL38)&dLq(J}zD+p|62#D`R zM};9;Vv&*6Kt-6Wialf|`ouCD26cMMDW{ARIm*e}qFEK6eDaAe{TigOJgZ`}_19nj zC|}MxSgFf53m=dT%KV*3;-LSy1c5jSluag+B2K=R_FYk&ufE(;?Z}1z1RxLvfsLh; zSH!lCqO*ki7%$^#U}9Fn<1!osk|uDi45&d06N~;1>faFx1OgIhwByLGSy>AZ5K*+v zwu`pAz`aci+|rVUSq)U^^jsWbj>5DJF>K;ZhscOlpHDG%>{zd&!e6j)haGnCY|g5X zxf=Y?Lk|@v{wgtHzy}}HRNhNXkb(vXxJY2}$1PlR3!j1$xU*%?XD0=3PD=e7oX&ipX!T7b^qE|7Buyw41BygaWPs8Jx`3V|{Xsc>Z~ zqwu4mxTu&t_o5(49}s|mjRZ78F-A-y7Pgi5MX-LTooSnJ^gQE1avj#osskuZ6}?iyM3bVlHMQy zfrtp0w5)k@L_QhYO*S8B{yZIFQY&_qZubAVD>uKB)QXSf;rD-R_D)kPKKke*UoL2B zh3@@iaoI`^sjxn^A}IgP(-z(gNQOXE1kRI==0?@o8vR^6;Ro5Tqm$RKZ=WO9 zxKZ#AzbUu{4Ilu4BnX%ksBr#ySJnd+`hpIVRdI>5&n59&k>(iH)zuft_*Q3Cy#N0D z-m+!Od|}i*pc*%B?5nP!l`hJ1Rz)u7u?>l#FHB_FPhi2vrtHsYKT9h;$mQ;8+v~ZB zR%%By1Rwx`-~_r!(-*|r2Ja%ZmXb@gJbQv5MQT&Pz=ja`L39i?M9b^SR689Du$d009UXNI(xLd`Ya#KyCF&dP6GgBgQ!t^^xL!RtVf6j(%d8 zv+YDrFT*q;2Lf3qpud-!83LhFbU455F45W*ueLCm6dH+lK9i!Vs!GoQwmy?$>C&Yu zX1tz%9mu2@oaIc4d@j!ew0d6S+=f6<0t-HE6|^bsej>2ZRz>Hovlif|yT*T$_wH`f z^T}F*z~y;;!$5%m1R&rd0e#Z{tXK;RoqJU@>t4n2>~~`!5F|rD2vmsu#Xu5=8mI|L z5XdrtJ;Zra9{f%gE&le1v3SOaR;-x3C*(`e)QWD>R`c|nE}Bk}%jN!Uvy-~#lizfT zl9Cc{#~pPKT5HSJ?c2BiwYBO)3h(mGg1e*(_9Y3)k>(Jni9p#Gv%Mb{Zcx)LuDMH~ zsybK1etPcCFP=dF0_zeuKrZQL#HQ=sGXBc*qVPbm1;#6lSm5cqJiLj;YXKsrL3cg~ zJSlbb@D`KGFHI7I76?GV)09e6N-e9!OpzCj4gwWFa}1MOQ7+w>5@Gw^QzblJL>Q@yg4jNUO-1q==W z2&_&(vnWg{rW{Q1{4U?^m&jTG0u?{M^ulCS>?Ga%>gTT9{O;tFPyQsI&p%|dcbZk9 z!3sa6xVYHcVTT?3R5h}W%+)q>NX21Bic3J$>_rXlmSLRV#4iDq3;}_42$X#>ldOt$ zs5pC}s;Vew+6Q`PVdEGCAdotN{-QuX?=(tXN#O$YFhPCqzohV{w17aS21!rj++h5fY~kf)i`f9lGX^1^uLLo#$CpRHS}dpGmQJ@nT<- zyUjM+Se;2BTXabx#SrITmo-ghw~-@tuTi!qsR<6(ekRaw=8MU!1^9VL7r&eLX=@kV z!l$(eNV@2~ZMr{g%1WNK)bS$(AOL|#3H)AqyH~71q`fB7nd;Uci}QO|Cxeqw94Da9 zHm)(uN0YXfF$@iIAdo15ePuM0-UqywsP`!imdOX0b+Fcb34@+bv8}Xq{u}_!q!2$J ztb?KPw<;@v%!O!$ZLc8ox=KI~dNQTxUXGudA|_59L@U;%t7!v4OSv?3o@rH8mA*5t z3JS@Dc6yq&-_(k&w%W?-)C%D}G}EaS)udLK@H>YFHwn!9ltU`qoY@s# zVSIxC1XdC_MZS&@v+;A>aM3+L%<8mv(NBRu(gZXxVbUs0UH#fJX$XvGBY~3E=T5bB zEr5Nv4=_mbc+q0@b6CGDZG1E4*SDbc!lYJgBZD3pbYNTB4H`5^kC=bJ7QHp;tgNi` z#*f$E^ZJV(QnBThTh?FE+#1`qYu8Rs)5~O;wYcFOq*i2d>Tx;&0&TvW<^8yDg9Jum zh!hA^dBxS2q%bZ+LI45~NRq&LGMGN>xAAlF6{35f7(ohwK!HF20-AwjO3}YTyO<&d z9T3PUf$hcbO$_oq6Z<_WZyVo)CGk2L8e$O5FmRr$9C}bLep;PI{PD*hdy5un##H^K zLx&FaS2DPU94ex(OW9Z4-16dpmv4%DEr1?ak*m(>2RRA^AP_NuTy@@?JE1h)I1T<)X00i72aFMuT>w84i{#SRV0#6{23V}CdRFe+i(ubrJ2tXh` z0w!(sq!b8Lr1x4dVr0#v_+LOJTS>d$1Z-Y}2mzq{p9roGu@ z+_P85y%r$TfN|b>0{ZuEh=pFru6*7(Vi#Me9mxxu;qpw19GMhx@ulBv6IhW+(I(pvT=PEXJ=nVYvmd$U7XCm00uT@a`d;`tF&i@w zsztYM?{uvg1p*L=hJY?o4N;n1-s81c)&l4PSYt|gUN>l?8UfP+6|ZDz z0ti&pJ=dax`i-?z;W7ju z5Rkx%Op3N%o1K0L*aOWBAn-3TC=8edBtQTHQ4(k}QMBw*Tt^BLlYQ>&-zn-?-F>LcWNPfL}c z=Tmf&w#U{#gxxg;3>fgGc%UnjoG2~_DOOI994bAspA(Z6z=^7=rkYjpO@O8}fdB;R zA<*VC&Znpck;%7ZM?CKmlLev;0uX>efdtl*`j3d&7^Ijdx(^T|NFfj?5Qv$;2hzW0 zESXT&H$em%Adn#fjm6hL80N2Lb{Vy>_|+}0-!!SU04WD5%JYK_do|Q0M^h_;mJ6k! zF6VK&6crWe6OKx!opi`KiO38UcXAH7DVZ(vk}Kmi1Dl9D!O0w1yp zKxR(WT<+o4y`I)j>l6q;00J%$Xev%TA!g&gr?W-(FU0;UMu7kX;wPYy3RB(#aX3Z% z^UVklSe?L8@Ekyh$uASn(4JRyu`1 ztprZ3^-elBs%2PRXpyN>k|}S)IeV!z4|+(&`O@}IF;nsg!;z?^b!BDc5ILygw{qM0 ztdUv}San%4DKwKQbJ1Cyq)vlJFPVJy@mJ;Zx$CT&G{i!{0s_l3DL!lKmF>tO6&6g_ z3W<5|kygE)dEXkn2!#Lyj3&_P#@Ve5SqmUKmcQ5SWi+sw#eFN6`-**9^KGt1K%k}c zk;KYbJ*aZJ7&{I`Flh;OF)5*O>KlfPiuwp~9|F-4=q6qDjIO&#dU{uS`7V;4Eb4@e z3jdR(D{)@)Uww_5o5)WmtZ~J`9|jE?G({eMlT~5Q-0m`B#E8shRd_AEkz`fa^I}D* zT?9%$o8!$|(#9^)*;Sjb_Np(-uKRcn0SGumK>y=;NzBH;#AMOEml%Nwfe=I4L;8GA zY>t>7tQfa_Eo00SyH-pONuiieV6q_+lL(#};wmx%5a_tq>BWYu1qh(t>?VT@l=rxS znqrqkprSCunpF|BTp$hgbQY&eb@lS?B6QUV#SQ}HcF2rgrDjzWdAX~j@0lJU0D+?F zYH!wOECRFPm5}rDkGARctUkPqG;A2g+4r|L{C6h$m(h2 zJ4fGjojtaYyJv~rQ)ex1nnGZTAwrygW@HFwWI$kJL7+Rrm{tf*m^LE@>d~!nf@V@| zC~Z$P(5FZxX(okqs9Rf6>>#j*scppn28mZ7_ z_>_1ETrAzDnc8r;jBjJAPdu|>Zm*LKk=Do%9ms%y>jcgf2Tj|%ULUY#a!+1TDGL8} z3-clV|71AlL*qq7MVE-5q*E0B+DfH`?9(+Qm4OWyRMRQ)Uj7;bq#ywT&JoabiW#I+ z7(K5!&wH#*&nI=$Q;M4ea?J<5mG)WyH_76asRTNSurXp=m@2RiU1Q~5Z!tZ@fD#{p zUE=H7ch9@}u1UqT2+T-p;I+n0GpM$f6n=z&vjj91^CSbk`?rCb5|KopqHxS%QY$u) z;RX~wq+Klr3>Yvqm&@H@m+DNb-BnU6_RT7RqE_C`;(Idb6j_Cg-Ev-1xWGvR@#ZR~>TB~THRT=v>6GQi~$12iB30xlD{P@HRSpo5vspwPJ>P!aqU z877nBZ!)a&DW4LYxJ>+XGLxcZ%a-0Ad+gyIe)!?u5l0;1?Y;NjUTJA*@K=)=Er*&R zBY71Yj2yYh%jd36eq=_3fcpfNXHrZglR{?t(C)eMQLhCE{Q}B+xhLYxrYO%34y}<6 zAOHaf)J34X+ z{=%eIoF_v&|0bofvQim2vr?#a>(<^r`|RTt7ZN7^HuK>z{}h@U`j8A2C| zwTyqL$Va$cdOTLFGV-43&<_C<_SdrUb?b+%YjsT6`oe@@nrrilKM+s?yNexeC<YL^N2JAw!Dh35?YfZD$PCM;eIZ@l0tcvZo-@e8m#i|L> zV8t%G?6T@d8^7*8eE9IaZ4@4b=EfzJH%UKaRYU=kJ|M6bfi|Dc_U0_*kczdiB>rQ* zx+t@I{Ur{Y0U-c^s0i#Q9X%;#!b8MDeY!y!{86l`(1z5HjKJ*3dJ5iAtKdy(nIHiZ z{>BifNN~a#Dl-K1+4r@EdOJ&WY1U9qg`xNVRG z&8qnEiwz7?ltC%^{L^K-JodXCEwfPlRO4iV{(i8Zm%g)2n!pTzPOYIjMp34w~% zE&+Aa7v1Wqp#i?lGe83pAmAE-f#Oh4L!Huv-A+6^4a1i}MHqT3+CC>=f@V_aa?;bo zOE9~K0RsknCki#q+4>?u7hgW~=+VPhJrgU*>F0e$jvTp{iQ1CT)U>!lSG)a~#8`|0 zfvgc|`|&Kvq~P;;TJxSOF4Bbq6bL{70`Ux$ zuO=$bpe7Td6&q%dyKOF;kW!({X4nP}y$~>*fG(E{v*J8X_o}7yo8uh7mcEHrL^D%i zUg(-y;d~CLEY-hE{Ioti_`?tWwgb}ii;XwlIGR^i3cX0g;-VCWHDbu+sTH0_YK0NA zfe6zy<=p6_F2%nHrs_jiIJZ=;u2E6SWaa z{<;al3C+#t*-P?sU&x52Jd@G=Unme*o4}l|pZp=7wE%061s$leDE5}bh|Ehx&aP0 z5Dje&(1C>733RFbR-$(ZR3wUgvCXpNm3pO15FN;z&pI7+ z_~znejWsyIUZ4)oF|MUYpzS9!y}8SrkI$zEc&(9hUjCA`F7aaq2#gxO`Q?VM1<1fG z;9>{@x@D4PwfZ|5F2q0)H?5TZjuX2x;(qBa8UkNO(@p4JHV@qc9mGk%lt9IlIGv<# z2xNqSzK^&~Ot;-LwdetYx?Lni2oR`<_GJ(zt3m@6rP0bUueYkIDm_5X>a2?C)2DmW zrcLt&w{6?DUZ+l-eAP9vlAI@Ufr+}3(6l0xB6ocfV=)EVm$(+3JRm8Bx~*JAhBBsybSzcIvLf)j>JL@hE32~K=E zA}5O%J9+*3_Bo=K?jpH7HIiN;@1#6GIPyN}5CZWM&=k!>;vFJX?-QlAiM0T0<1Q&FaU!+i>#x6F+qD{hZqufX*ST}&8t>U~Q4Sc%Y-+`(*WWWw zPM+nE3L9RiXta*Niqwh@*6FrP@QbZ_JUPZP&B%oS1hPXwQz-Pr>GLggd7fzQC#DP4 zC}tB_BJxaFp1zaWm@=)dxp;3v+KWE8^!Jb<)6<3X5U7wG4~Nhh83z4&SUsd9xLQXvO(ME`0_rpNt9j2N+dGDDd+ zO7(gT{^8|vv&~b6A_&+|pv^~fB$J|*{f^Y_!Obr{J1UK}0Bfi!$`uV=;{rcG00Iy& zhk(9GzERA^&q4Yg@fTu2KmSQnO2Py_O?bSThU+XBoqq#Vlj9a#+f6i<7@+ej(V%>?TSQ4v|um-74b|`n5>G^MUoyx?{tyvCp5ijfz3|- z_~Vb>)TvW_xlmeK>UHVT#aCTJE1hqsuGnRDxqRf@eBQf0_JJ4x0vRMwR8{5u@Wn0|T{Ax6uzWd>^s5)0M&KD+nBYPMZS?UJGDC zph6#_kyT;l%Xm*iWPNOi4rD;UMgo5m(fth<{-x-<-EfVm$Rkh@5B3+N+c5#V*7T%5 z2W)FivjGDJ%#bq=Mpz@b7NLcnPhoLZML)@^=vfN}_b)dp8Aeux`_qhv=@4l1@oaB? zMay&scX(9!|7M88?%^5)AYd;6U8#JR*a;T8@U3X>E%v^J+Osa%gy4kEfhiBcv;KNm z+yA}RzL3(-1Se7g&#=}LI9NOgvtT?a0DqN%mL)Y<=y5jL=UzFVpp)0HZ=WMVcjmu? zBL7w6uUVcS9RDB*j}YcJMVA<@pRnGHkyt+rssE6D&kbXn1{kw+iToMANPoB+_6gHjWJe!^Oqg~S}q~35pbxp4G z?z`{$me-?44^Q^H@?GDgijw+rP^Qx<8aB9A=3|aYbApya2|V{vx9%yd1t?Tz#_PBJ zP*W>fWSl~D?pJ&H3#03eo*)1L2t-0a9|r0N^ur_RE!j>c%5eIC`O9R7WwZzhjF-MD zBkV6|SG!6>4cJl4A)vq8jSP`9)({=YfPgIoHWH!Fh?$q)C%}@ow3&rMKy(oBFD2Xr;_P@X|~Q zFSq=JM?EANn`)bI^sYf*k^E#rR)zj9?7fCEen^|ZZ_*a9a{QU{RfD1w2tdFe0S!(( zA!fq|$_GW~oBrI&W-S60@y`2;(*2l#L+DDQa|5=urrDrDgJ#M$LN{9@xE7&$NQGn^ z)N*&t%iX$lvo@CX(wAc>5YNuOiu?+uz=}ItvbOIIe z&(Y8jL!2yaHGRqHQdwDfl{jd1R>iDYv%Igq`pVZlX;#JNn{V!`u8Eao>#7S()Rly$ zPQylPkm9B!#$pTzWQ{;cRh2jWOB)u!7(Fv@l=Xb-C8HH04gwI!5`k9Y?6YDvt{T_X zj{Az~R<#s22$&M6urZs+jaR_BUhFTITCaOKl`E{UP6a!IT1Y?xj1P(F_IWmxOGKo< zhlMtH6O%@Q6ET5Zt=q7f6n|N*FDJj%ObW?BxY2g!^n8kpWKtX&O(w;!_rKzOyVn9V z$Q9{X6=4Hw<@v$3zxvS+ftU!CN@~Rd4yg!CH($h`m%#972muI0O`uFVd_l~XrwOb3 z|BC#upBSmMt5dQ6`_~L{p{wY~Y?`_ubFfOrQLDZ&;HxRGPXkaO0RpuWXe4+3Ep}k7 zHO#v4k;u8)EJ3y|>%bkF^E;*zF#b+gblJ&jWzfGn_3JCf@r@TuJXuI?&{-TiH1 zBQa2I1T>xEm$ltY`1(Q_kF<(}p);Iy1mwN0_n4S1scF@s^<=SSVw8Fa5U8jJP(-&i zwc^!)eQ7>ZCQ>WJ&l>_VGNzdxQXxSXUwSM*q~g$#BS&`k<*0*|8dOwSA6Rp6T5!n` z0;TVhNfBr&^VKA11p-GS2tXiW0vk$y`nhNOi2F^VyZ^`_zY<%V#9)k(IDx#3s6jIm zik{Zozyu)~)}Rj!n-%E)Z|)c%aivaPzrKBrD6pyT>iYL{sqflUgh2KP=&6aXAjVc-iBN%j+1o>ehv6Z;)W)`Pc7dp-MnvO_W& z1Rwx`bqMGmZ+*yS;~U)jME8+mJRlSZ4i{4b73&LQ4T@3HA<#&M{Zl$a*ElNg;cCRf zMF=b>pczk^;n>F_9^Z-9zgeU=b*Tg=lA8j3BcyNEgA`q1@z)>?eYIJ#%O9VMdo6%3 zAwO3-^UO0fNO9xOSM2=mz4zWLD668Rq{L1~<;}HZRqSA^_^`SgR95T$f>v)=7M20+ zArKdV((#&A(LAp1UF^5qMW^ry0uZo@fbO+FMr>27RId;_O1>N`R%xMjBo{`&q%9Ip zElfJKBP5{PZEX-?e_^}Qhp-d~Kww=0UF4ExBJ95IEn}}}_TC9%RxiR%nl6EgWQh+A zG2HReR(~;_E{hj0zQ%=2iqAj)+}AucZQ9i8ObXfB>LOo`I#@|FDdc~h;SRcnOAtt% zz=}+Yj;ZU2LcpYtmyS$uEx=o)J)eFy3f=MaQO>E}@qndQ2pC5|(mW_YOb)}T1 zuvdvu+$S(qTpVwRo4rLxW>`+|Na`_=~bxpqzfB*!15YRWWuZVT$FPP_YNa8e8ViV$rL?&n1@WURUmCn|&l3R(10F_3fko zNhMtfR3uG&#hdw80}iXdw9c=z7%*VKT=CQTtctN?$ND~`qCo>ai^SS;`0(Mwx3^Y( zNZ}Pl4RtjhSrs8U(i{Rc5h(p&9$6JNfmwAepO-!O@~i}2#e=U9fIyN2bQ^{j#mbT# zG=j18Z|1pT`XGdoO#<(W+mj4&T|eJnVu+S3$k1mgCS`x=rqTL(q(A@yD-Mj&1B5OV zv+-H=d7}FxCNdZS8z(%Nr)ISZlS!ecx@}>WJJG8tDk>T#y;`41F=4`ldf`-6ReAH~ z&8xSb!4+~cU)j#2$a^CVo*$&GAW;4Fm|Ig^3&1plC9piJVsfXj-8k7^vng8ldgf^- zox>>zK)`eYdx^m3#9EmybS-@vq!=W2^;*~YF?$5=HOOUMu-eNYJ+4aWB#xPM9))tg zOaGxj00LfLVK7dtx1}r|65Tghs@(|L1S-;*hL>g7FqstRrz1PLQCDVC{3E#$;~7cM zr>Lx~th4hkzWAcvI(2qnz)jim>4;lyxn(;8q@*OFSkI?el+xe~3jwDIl)gXLTUe>t z`II#YR1YSDVof6a1OW&nLO>rNXkc`;y-Kh8?7&}HwH81R$UZ@AxL7=;JTHHvDK-%> z=_zqez+ETC!BL7woPq)z?2#vkGo#P=kdF7kXpHjFBBVYW_Pl zXU-h&%{SjPNO52(IY|$3K8jWwozBkxoIZ~~f6@$IBe!8>rUWiIE9 zZQb+PryO(*mmmNEvk4p|@*Wq{hY2>73K9ETu{&%Mjn-HR=w3S}eF)P%)CR=rake}E zqqzS|!w~4+HeVa22|0!l*k#^&r(3laAUMB0q^a(Ic7~X(&#Cp<N*!ltp z;j%GJUlt{=1XQD&Sn7stPM4Lb6&kFtx_tim=iU=fJmLND!w*0CQBhIhee%gC-Xo7Z z;w@RSg0tpcK zwT$twm`SUU{kXkUD(Rj&5BRA}olFrhZ3X+^g?@)j%|J5F>fYJIk{PO?QEo4&HkBdZ z4gpQe)Zau@A;b&(*fN+Zcsz3(-j7e*?OxDSidPFvA)W~btu%V~f71h-`h0c}xPdVk3qwDr+_pi6<=>A%Oug*Gt zm~0q-X5B2f3jqiOA@D>NQ~QFfuPyDnlCQFa<*UoT7Fy<_a(Z`ZA*;vBz~sb?Gk-P-P0vCG9a zFiu_Jg3gsD9ClSWw;Ed%)p(VQNfCHW26`pnU^;i_2W)Fiv(ryMeV&ND)*8X}2wl2# zsW*TAd{=`M>V5waBSvgn4+dv%7d7y%5a$*<>mZInAVmUtNX4Q`+jba<9$3EGD>1n&R1SwQ61p;9S$RaCGpM~Bj=0cF-R`G)%g+Pd^5U7ZX zWbl5%Y}?ddhTT5+&~~(x%!7Z3(jVrMOWGGCpN|=(P&MDfPT(2 zEn`6!KJAqX4Yvk8N!&RrN|*LNywk%GuLaOdFMY^qbt%sewpu^plO@neh8rpNKe64! zTqyeCY@iE{nB)V|iu7kg!-=m1l#!EmVWJ%YhfTKGz<~o7$UbG)B|9pkg&=U098%%@ zDetxMDatp{B|D^6)CP#_5C}n__51U^#Z}o&t;l;tLrAFz@dBbL1Rzid0e#9hLd>?$ zi@L(8kJwvc6bRHuV4_rbzP?Hc-)|t}-7nT5;c;CWPSdD{8|u(#(KXXhZODQ^R0KLo zN4JaV-&kGwVB4w(kz8ZZ{ris+tE}-Z7XuTp?agLjM-6WnCctpG__jkJ|Gv8Q%XSg3 z1qh_0z(x|NxL%r&T2Uakl&TlYe4U@t*r^ZOaoEW3JTIq95GfFVK*R(ZEUECmnUdAi zirhzKJ$pSCF;cpN00baV69G+;)%{m>QIt*PTM@gr*m#>nqcwE`Chpszd!jrg)-rWE z_6gWYBtIx-;y!iDM3<(`P$1wcfo;UGTg5&XJ3-9E-$RiVsC*=?eksPH)Ij*^ZbYD> zZazuf)l7<)0}iGi8U_b!YfUrFq!6*r98zHo_<%wsIC1pw;lsBG*xdSNMP7;Q4bFdB z)=wl37)d}gDV9_5b!B3y{xga{b`y)yLy|2tWV=F%TFkeO?wrzj1dt zMF!A4b3ToGAo_QX!1Lmfi5WP$&$H||9)?bI}fyNLp zkAOa()Pr@N7W+`_B(a8KE){)`ae&xtmz*-m7XlUL%tn~3ieHHvJ9wkkiHoZZ5 z{vn>;=`~^k#kpKA2E_mgD6XmN0CcaEd0OKY~VAucb-aM|g0P4uaiF)IIuzjTJBVq(A>Swm2R2d_- zVf|H8sZk-L>cTb(1Y99dCXSpT_JP=QV)`IogBGq84N~+Go93EhCiz34!kpQ7U8ID` zs<^-$SK?7}?6Jo#l$H8qRm6igj$V(+c2-5hO9d*afN{LC=ri*KT8+0Ot0JIVljZnSh=zp?{NfOErC1f1TL2Vy>1+;#hAn{hE=o&Elt9$GH}u zevAeOD$4a!c_vGkWlV<(v7W9|Yu2pUjp8Gz6?ROtrdH_Y#+fY352?uKMrYD_oQ6Qm z1R5@tLn@|riFp)by8pPe*RHyZk^%t;Kp+kR`l0X^vD4z{Jmx;fN&mgYzKeMry3YoI z|1;Rl{YBeLVr>o9n3gnsa;nR?Tcsty$GCH)&JZ8Pa|8KWmgA@aeNLn&h9OV<#{G68+~ zaHp7VByC%HO9byDHp4dI=rxwW^P=n-W7Vx#l&G)!?F`nKq_mt2_7}0o#hN5JaDcJ& zz#slr1pr5r)ClPB)PZ8UkUT3aB5&ip#%?a)YEg?`N0o?VBJ zT1!S`bK$fCm0{6gyAs1+iITPm29f zENhuuABxYt#l8`vBt(EfMM9tp7)G-yG(s7)lm~5SOFPY~kZpv>s<36kJnz_%BS&s# zkKAaL=8JQ>YGzrqz0w;5AfNOKcE|M#BhPTUNGy zGHU_W6VLY$)8MRbF*{vMw^O`AY+o^b&ydBUL7F|qzRjZZY5Gi{B2D_Wj~!+Ur~Rc{ z{bh2xELybaCUMZ&KNy`mclP@9>Ej)A&_Ui_d+p_IxZ#F2&u6h@75vp^2f}Dyo>k#_ zWL1QLNLvWhLZHFoWpX}+t63HKPfL63st*z=5P$##q9o8*I(t;imWO<+`>v~|e<`+j z^^1HDfrttGpY%5);y&Z(?kMS011pEb(Yb+r>ff$Q#oiLrY!VyF!y@tt8-$|KX#yIE z=q`4mm}d06DW+$o{8#Ksv0sUqd;-15OQh)bm%0!_*-#QDkdudo2@lC|J_)=i_4e^u zckOB#?fb>r7hU^VR_32AuitB2@~dIPhTi`B@9%Z!&|w|7KKtx5@0C|x@httFtq|Q? zo_gx3d)?aqomFcAzFNo03|#o?%s*_B%N6V1h7D_Uw>&?%#_L>!00jIIsA$~KYktW0 zUZY~SR;=an-qEdlJ$=7F3e0=gXgF|j_i+&B5M zE+hZFSf$A#(dG;ReXZ$}D_hDxMd?$rhaWL=EA-)FN+_& z#nLXVMZlc2MHoDObJ3>TS!}xU>$dWBhn3%#$X8131cu73axtEm*Ul)`J4LbfPUm3= zgh{99D+BKxW^fzYHfq#J52{u~^6iS1V1cAF{ke8AeD55cbEB$;XHcYHdM17jjjlS7? zNbD)GE+z;nK!a{8@J#{rsS1Jm2pKdP##y!-CE&s(ryL7nvsy;)IGQnJNKC!I9OP<^q=syCe?*3s!4 z0&Wvn+N7b^>X7MP!=j~5IGXdkV_NroT2~BHAYd8+{V+@xg=w==zn)U5qJaq;SG?#x zucwRg&_Ezi5+$Ikk;jQOO?1@ogXsfG-FiT?4s6Xf+fuaaBDbT(Qh%`e>u;yM5n?TX z9;l-alU?~6rq7vri=|y$$*5=~AT|4;g;=YV-|Nr%|F#Ai)pS3mRp0B^4q}=WNr{fY zH`3LSV!EdsC1nDs|Its`f76H8tcsw43SA-9TkI7vXG`P8jYlk5vgFU=p#HJ8y7)PO zqPV!&+j-}my)k3P*ywA6%F4>WiSPk7iVv$?H#%EjzK)^8qZN;DG#dfgJFWn$ovvNmRs2$1m6Idiw zUKe{rtg8O%iPxAYBc3B>+v`sscn%TM&t1BjdbrsC#B3<~cj~WV{l$uG5L|=CnR4v} zF;@Z;HMp4CrC~C%u6j46Xs~4MqJhnZVtOD%V=?_dOoIxt4rzHHV%2^5k&+?-4Y>VA z?1vOJF!X8!$f8&chN0hv$)wOXdwbOE*Z+Q1Ej_!oFMu3aafTd%a9zzl+jDiNop$ni z_3Blx;Ogq?d$4?^vw$e0uYFhfGb%Qk|p@N);*s3 zXM||z3Ib6Q&|v$mQFa)xFV3a#2ToUB>W6XNo0n2EQO^C?Ekpwki`IN||k%$VV;$D*PluUofn zzUmrSsex=+r7L4HSvp^R+Z6ePb5ACn$7u+}Okhwd(!rGE^)7C^Eo7B1G+ zkIt6z{9tE&!!Zb$O5l1Cwymk+&;@~&1a_0J`t-=eqEEI?6#L#paRq47D1fF{d?ThQ z78i)6nQFCxjQ?9PjRdJCU3ut2q(b+r`L~j0PvYKwu35EygeOmR0K>uXdJPE6K-CU%3^$70`#>1wV+#OiwuNEIu}JEHO&E0iJ<0@)+*s<^zp z*u&X#-42flRM;_3VNxq}Rf@jRaJnp8w(J&hP*-4BTXZj{apT7M(r%G7 zD{XS^?O)}+yuKNyKmY;}6Ih;F@lDr=drhW0NxAx>^};3hCo>wOKmY<6AkbcXbmgI6 z2Bs_XE;bedx|f@&MfbeXJwjY78;V1}61zm~S+QASUx~dWc86HG*a>3$i)miJKHTdf zrdbdg$XI0?$@g1}?J1@KQca^DE%v6E9@d~)DEd(FG%>B`T2XHO#g@9}82&)Oasv80 zd9K(#VpA;lC1&{-RKF^J{nK3wu);+Ss)%{2BkcZp>8o$R{@#^#x>`chRh=!eN?+gj z=*kRhiv}Q$KKf{{dGqG=`XCScy$2q6z?(B?PQ7&vtWY8QKy5Z?(4b8IXYs|Kj_X=n zTr@%ErNqFwK@tS4Ca|-p4OR>Mbp76_aqPTp#OqxwKo z9}H1yC$O2^`n>kr+<|~`1UiT!U0rW#ku?S0Lt=-UDiB?%5U7w*`-z3w1tS&V1q`Zz zy50rUrz!+YAn?A3I9BXK6U3mQZURMh-{!7C1O|uD(p}mf5Td;mO)DxY^uzKuR*0=d zWFeUpvc;9R^Ul_8+1a4DxcIzUFgSmC)9df}Qk?v!^Nt$e;B#9vIN6M~00zLwpoB(? zmU>gCb+O8GSw-+=*`mewS|u8>5P$##AOL}E5zs|{$BXGUNfZdIA}~n4aXCn_%1^#R zz!n0!HQ(i8y~GGoSTbt_DlC~Ok@<$$3nAp{ev`WL!`X8B>8CH1hxdQC-N6YHCV2Db z&-dlVW}9v1wQj9hwpNytZo28Fj#es z6w$s0=nVo8h@HUiuD$7M)&ls$T%HfJlO|9tGO{dn%(y*kYL^le|WLc3~k-IOG&f_!$VkTfyI)$vulr)MZq*=xM znxK0KKmY;|fIvh9rb$2gxq4ni{n3q`1RfOGnf)wl=agBZ`l@N)Y|mPNFq5Twg8or# z53vuzbVS>92oR`9hkMXbH6r+Y(8khkh?vu<6-OR<GI4pk4y9=F7WNY)dg+ zw5&f1(&WNgnN?d)#U3|qoG($998z)GN$<3% zZ!JJ0IbbBS2Uc{w_O>r1ba=n7Nv9G7AW#s2#tW8uQ@-7_pf)K~o>JPp1=$oSfM-Yu zKmY;|fIvtBFG>4fi|Il+N_Gh7;?cv!nD^|=Ii8wH;4P80m)LP)KZ;Q-CP1LVzR7!2 zB!|hQ&<*jOp7#L*22_ZTdQy(n<@3)!&qOAL1X>1+7%`%Q)sq!Me7-7ACPfH-G=)H2 z1e%RoYI-Kc#opEr^5M3wN#vdn0*Bvo+1B-~1@O@gRUiNX2tXhVfv2T$e=+?mN^zfn zJ{Z+)zZSahDjq_>SOTAkG7T2)Cia@KDiKx?0jD2W7evCY%3-o9wi8!>WtY2!tCg&Z zQIeG~sqmKOw9u>yzvok^>h9gUnO1=gwZccI|xCHLy~qQ!A?T`Er?)Dg);QNf5A}K+~~Hy-FhwsmNbKN`>_= zFC2gX1Rwwb2q*!4PQQ7 z^aEej8B@4ALnTu=6a z`m>%)+$UIl9qnPev&+ni>aJ3MquyI)APSF zlj5=vjErdd<(FT2GiT29rKMxXj@D*UNFeEy8*jXELtlwmb_4!VX*5J;Us zlktm_$)qTmH1Ga3)&kV;y6pLODcKbDbD{J~al}w5| zMWxkm4l{H}#d;+rC7I2n=s4`wPvy1xkomt=D1?B01e(lWYS1ASxv8z2H@m|=iYSHv z1Rwwb2&6}#;XNZSFK{hDdSZVb`4joJm)Mw}FY%pa1awQ@GsN_BDvQc2n>^%NOW=J` z{(G^_#fFP55u;>;0D%g-Cpv6a#Sn47#BKz2t6!N_acSMP%)eVmR>dZpY~rVEr2zF zit>EGBN-B&-L!@)4>7QYD31!!-ioGWWo4RLG0_UKb%=cT-FNG_yXGyM52=vUinG`B zCObcEa{VY>diAgDc#NkINRGhr)Qag{lN+$+k-lzSn!BUsTU>UrW~)h;1VFTd~*0C>bVTO@t!Db5x(}nojX- zeU<9Cuc05O(9>xVPc)dbo-O< z#V8PP`xNC2&2M=KUpJGsdK#3|rA?bQcS*3~bK9L9J9eyZ7rJ-X1t3Jg4mddYNxAyb_1qA{SfPl>eF1o$pcdP}lc?Qv*Hh~XC zKo7Co#PVs2Vf-}+%$A>y6dNE$=F1vr_yGc;3FzOuH^c^sZ7B9jvHyta0WcH@{6v6^ zil0ze{momVK1^1{MbbXKWu*zA*Q^Q|=CTBaigk#|lPCMms_4+6gSX*^`uE?;a_Wc? zBidQ1JOa^Gx$1K1hOCMRSkeUq)+W$od^(vFxtw=785C>J20udp0uX=z1TsTlfq42y zu>-}XWyWi>9eq{g>4x}hw`=x1Wk}sSqZU5o+*$x#FswVd=pt#&RJvJA4}qaTphf}& zDr&@H<;C(4s&%98V?(sJqN!w6+#^CivqEegA`3a6qG!*Zb@XKJE!h&Ksi-^0T!rx} z+w|HyKF+OJT@dd;^bUax5@<4au{ZVm#Iq{CTUgnAbOv$bG6Wz10SG_<0&5a@Mt*y`-FgoxNiY=oFTVWmI-0>KC@k!DYcoh-JIm@bvQ zRE)oA0)bK(0iqR!@iM)3c?f|WrS0!Sw6~&ZX=&*{MX2@Z6z{yFQEGpsV@I7@`--Mh zh@EYp@@N!y{l{$|<@4U7(ey?y5P-ny1T>vuN^0^3xlkH9pIRYH^e+ht$dq>9eDjSr zefo4?avC*iEm8z?&PrKoU8`94UNeNC-EGQ=%g|F z+$Y^mh@tC5yY!n?F>l_yM0G_um}XU!l$7Xyewi$thK(LCj{AQ@ok>bK4S`?;n#^64 zL{`Q0#Z}F23#O1(5P$##AOHafWQc&CD*IU%~PwZR904cUw!peT~&;~C&7x-Go4jYR5U~;DQNJ~ z`1wU31kxlBdsan0e>vF{Y0fZX$3TGpk79rtQLEO(+;Q&v`Xzx4WP^gSorQk3Ire!F@b0^DbD)p&o9)v7GV0)l7{-`pHeG<^8Dah z?{ULq0wz55(I-)JO@?HMHeI4k#KMrtKt{a;bT!$mdTV8?!envxG_lTNmx=u#MyZ{^ z9JzJ5nDQDV#%B2fA);c!|NRlUlDTele>r^+_AfDA1=(M08?k0$JBbYtyIo8VozTC- z6bPh3ASWC3r!p{u#!lcV8KGamQ6%?bd2XcB%Qi-i9C@_7{_hRQ$e3m=TekG}+;dNF zqm48rZ$+6uf4=wDTW@(^e);99OSXSqP*haZdBA`HGi-Mxi2f<(oV1f1)bXCQ%>`*s z8wfz4b^?o9Hu2i^o9q?mB7Q%f&z)7;>nYuAngRir2s9UGbeV6f6rIu!*hi)~zD+~F zBx3v6Bp9ukCGdoJen?H;7ajSY{LXjs`t|K|L{0ZIdQG=JP#*dae%FjLsLAb#a&frW zZDM*T1tod{M@yI1KSPNgi!nXwGrUP+lh^vHzZ<&mxu#H1AYe2B0u@G2PbhKSr1d*u zK_6`XN!s2JN`c{xhYT4~wEp_*Yo6P7hD-G&uW8e!UW*njyroN*db4NG_FM}>%HB_x zoP6@h7y5$XV5LcCpYXV>g*@0n*Ki2}sS}vBPYbW}`kzIpBlrE%;*u^KbbD-3gq_kA z1WX}txCqho)RGhgO_4G86#F`bv27f33laUQ*!nh#M{{Nf=)NNQd#w-GDEEaWfr=ngX-}K5SrxjWqtg#rB&*^md7!T!wkhTlm?!%{ zbw2UL6K9z(HF||p&N^Y|YR`LDde%3J6bL{dLIMj*n|kf{`^qcI$!}RS<($%u+WSYmxGd3;@`_+`oLU+6q)=?D1N6^^tJX|Fn!7)CQ3WT+a zZml$bwQHKau=-})n(d&$2femnrOlDA+D3oYuWEWtgA)2Tk^+HD5Fj|=_B3}B_udf; z`X9$x(pEnNI$K=Jq;PhkRyrnuq@kytdg@@;{jANuNoO7Zg#0=mRNH;7uOx8ZjgPc7 zZY=<_V-tZ{eI%=*eI!{GGpZIX>D;#W%ld(X0s#m>00Izz00fdCpbwIJiv3bdv#_@j z3;Ikzni^4b(c~*)kBez=g0ndU0tEsPfPiZR2voQ>qwD$;CX+(b#=43*ok=lb#EAal zqprrFm``Ay1S>j8up*NyD8D@8xNdSjN0{KF`7?|{2&6Cp>KozT;KUlDso zY>b#LL#IFh0uX?J!vr+kPH~&Sh2mb&%^KTF+f&`f<9Vl?a>_&EqrM5Em`|XE98z(H z`7+~C*!3TGyeGfR`bM1s0SH7)py{lI-qas7Kp(2is9LymWT+l!3;_s000Izz00hDl z(9P?0MV@}1-%6}h>=$DHC#Go$Ux;ZiAk8va2A8yPZ;04_Vy(q?7CT$)IWYng0)dhR z0+$`wav#%iRn*VOg&I$a69)!#WQMfsBBq<cAq3VUu%LBQZ-f1VA5tMp6VGei^XaSB>XsiN009U<00IzzfQbb3 zaG}k_wp{sjBl)`VO50Gr%9g63K67ZGK@T38yqZmt?C6+K z_Hy|H43dNt2slGv_P))$PVFWJ@n>drMZ?Z*c7N3AzOZw&X4SHWH@V|Io}4-{Cuw7p7Y+DdyhJ6)mni3wPaJ&i;M~ofB*y_009U<00Izz00bZafprKFs8|Py z^B2NoQnVC0XSh!2)KgD=RD68Lbw3^Pue7A3#QB3$YMZOB{}}a#m&?6a+kLJ>00O}Y zw3xBLn=-TBLn>zFD;nJpd=y$j00Izz00baVD}gKiK4Pa;YXNGl#0>~Q00Izz00dki zK%l~vne^{R*sO{(WvEjB!&zBVvnnpJQh5eN>-iL;MvZEoLD!eNTwRsFD678XHw5A) zP&_VIXI4et%aK75_ax9i1Rwwb2tWV=5P$##AOHafL`qr0SGut;J;m~K4L9^v(t-X5P$##AOHafK)?tB1S*V}hEzm^&8pDTwA-bkJi$Tb zkcy!R4#!X-2uKFZxmnJtkS+9wguq8r2tc3?0>$Ht*UY5I=g6R_gBQ0T009U<00Izz z00bZa0bc~l9&b8=wE(_QQ7H=q2vlTYmTUJhOjbox>Gtf}yLRJ_oKNwj1S|gMhBwZ8 zp;;BDIPYaG4t5$g`W3nLYAv_93;_s)AkbX0D!!i8byW{@iz|xArdUOTuMmI$1Rwwb z2tWV=5P$##AOL}Y1PD$91m{RI&7^oDgfFK_Tjw(=#79=A2YHe7UvJ6G6xRYsR>gTY z-+Z%fP3BH1&uc)@f2sC7(t!Nqq9z0iB2Y9=52=td>pkzfmU}!gyP(EYh5!U0009U< z00Izz00bZa0SG`~9Rfrv)eJp|6Z^Ul{<3s4WFAh*l&gM&1q4Fn(n0SG_<0uX=z1Rwwb2-r-3XobzQ zhjs`+00Izz00bZa0SG_<0uX?JO9VdYfBTNC1#oFb@dW}9fB*y_;EMo(3SXG01OW&@ z00Izz00bZa0SG_<0uX=z1Rwwb2tWV=5U`WLxpUvDGI=ckfeJfcSEz;n1Rwwb2tWV= z5P$##AOHafKmY;|fB*y_009VCOMu{nwNr<12tWV=5crwEOOKrL2b0$V{H&DkAOHaf zKmY;|fB*y_009U<00Izz00h<~K%inx9{dCW2tWV=5P$##AOHafKmY=(2yA@((Fd~@ zU=@w1zn13*N8Kg;K>z{}fB*y_00D~$5UsFy@{kSz2tWV=5P$##AOHafKmY;|fB*y_ z009VCMWDF<=G|BeVAYf%HVFa*Dw3eg7!ZH}1Rwwb2tWV=5P$##AOHafKmY;|fB*y_ z009X6M1bJLPbl~X0uYFVz_C|)@30mi5|nfT0SG_<0uX=z1Rwwb2tWV=5P$##AOL|Z z5Fk*Ig;~Z&2tWV=5P$##AOHdD3B2^L%?7X*!1`�SG_<0uX=z1Rwwb2tc3)0t6~* zfWjpRKmY;|fB*y_009U<00Izz00bZaflLs1?z9s(Vl6e+5`wxq&)+S4*>{300Izz00bZa0SG_<0uX?J z5ZLduCmv)i051gyKmY;|fIt=r5U9xFtm88TAOHafKmY;|fB*y_009U<00Izz00bZa z0SIJ+!1L#9wwvK=0h|a_l;;O$;{}D65P$##AOHafKmY;|fB*y_009U<00Izz00bZa z0apk(5utEpGVlWe5P$##3MKHTgT_dcPF}yheU2!!E%hM)0SG_<0uX=z1Rwwb2tWV= z5P$##Odvp@!h~5s0|X!d0SG_<0uX=z1RwwblL*}N{Wj;a7Qmz#K??*R009U<00Izj zngD?cr>7U^AOHafKmY;|fB*y_009U<00Izz00bZa0aFMx={lqhYXMA|4Rn}9fIx*w zvw;=}KmY;|fB*y_009U<00Izz00bZa0SG_<0uX>e1_%(G$iO7yA_S5nuztJKTC)}) zA>QTr!3mATFc5$M1Rwwb2tWV=5P$##AOHafKmY;|h?@Y>ins~WKLj8E0SG_<0uX=z z1VRwlvwZl+tOW=GkfsoT00bZa0SG_<0uX?J@dOA|7(Yb_ga8B}009U<00Izz00bZa z0SG_<0uYFpz*ZML`YCGxB8EzL5P$##f)XH55tJP5AOHafKmY;|fB*y_009U<00Izz z00bZa0SG_<0uTs9fZ#+RY!)>7SMxC=SPNjmL?ICZ5P$##AOHafKmY;|fB*y_009U< z00IzzK(+}GsL1yG<2?i*009U<00IRPIP8&@V_6GOAhb}`%kzUnwWl!zAOHafKmY;| zfB*y_009VCL4at571M-B2tWV=5P$##AOHafKmY;|fPkw6#?P9zBWnR%on8Eb00bZa z0SNdZK%l}8CF(!`0uX=z1Rwwb2tWV=5P$##AOHafKmY;|fB*z+A~1CP(I;857Jxv7 zO)o68LI45~fB*y_009U<00Izz00bZa0SG_<0uX=z1RNkhaKeE}#03aI00J2$@ZFMa zkFjJeKt^W>w;=!l2tWV=5P$##AOHafKmY;|fPfDI1S)($q6!2c009U<00Izz00bZa zf$R|2@c4^IuofWk`7X~7PJB!Tga8B}009U<00JfvAX;J4Y@h`K5P$##AOHafKmY;| zfB*y_009U<00Izjj==Os58slt0M1P-4n;_SKt+Tw=?Vf6fB*y_009U<00Izz00bZa z0SG_<0uX=z1Rwx``Uw!6sGkuvAOL}S30%><;eS~RP%kDbKmY;|fB*y_009U<00Izz z00bZa0SG_<0_zYUP_YgYEih7XlD~00fL7u+=43jkISifHBw*0s#m>00Izz z00bZa0SG_<0uX=z1k5Brpu)^~K@|ib009U<00Izz00bZafrJS(UiRb{tOZCII>SK# z0uX=z1Rwwb2nYcJ6}$`}009U<00Izz00bZa0SG_<0uX=z1Rwwb2*gF;{t+h+^St7Y m{rdJfBCav$7Xp?Oc>nYMyH Date: Wed, 29 Oct 2025 19:22:18 +0100 Subject: [PATCH 125/145] Fix logo to change between darkmode and lightmode on GitHub page (#57) --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a964da5b..25913fa2 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,8 @@

      - Logo -

      123D: One Library for 2D and 3D Driving Dataset

      + + + + YOUR-ALT-TEXT +

      123D: One Library for 2D and 3D Driving Dataset

      +

      From d4bff4bcabaf1ac11649c695d0dc78fb7bd36499 Mon Sep 17 00:00:00 2001 From: sephyli Date: Thu, 30 Oct 2025 10:49:20 +0800 Subject: [PATCH 126/145] add nuscenes dataset --- .../conversion/datasets/nuscenes/.gitkeep | 0 .../nuscenes/nuscenes_data_converter.py | 671 +++++++++++++++++ .../nuscenes/nuscenes_map_conversion.py | 708 ++++++++++++++++++ .../datatypes/detections/box_detections.py | 4 +- .../datatypes/maps/abstract_map_objects.py | 4 +- .../datatypes/maps/gpkg/gpkg_map_objects.py | 37 +- src/py123d/datatypes/maps/gpkg/gpkg_utils.py | 23 +- .../datatypes/scene/arrow/arrow_scene.py | 9 +- .../scene/arrow/utils/arrow_getters.py | 1 + .../datatypes/sensors/lidar/lidar_index.py | 9 + .../vehicle_state/vehicle_parameters.py | 10 + .../config/common/default_dataset_paths.yaml | 4 +- .../conversion/datasets/nuscenes_dataset.yaml | 35 + 13 files changed, 1497 insertions(+), 18 deletions(-) delete mode 100644 src/py123d/conversion/datasets/nuscenes/.gitkeep create mode 100644 src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py create mode 100644 src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py create mode 100644 src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml diff --git a/src/py123d/conversion/datasets/nuscenes/.gitkeep b/src/py123d/conversion/datasets/nuscenes/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py new file mode 100644 index 00000000..eb743fb0 --- /dev/null +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py @@ -0,0 +1,671 @@ +import gc +import json +import os +import numpy as np +import pyarrow as pa + +from dataclasses import asdict +from pathlib import Path +from typing import Any, Dict, Final, List, Optional, Tuple, Union +from nuscenes import NuScenes +from nuscenes.can_bus.can_bus_api import NuScenesCanBus +from nuscenes.utils.data_classes import Box +from nuscenes.utils.splits import create_splits_scenes +from pyquaternion import Quaternion + +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.script.builders.worker_pool_builder import WorkerPool +from py123d.datatypes.detections.detection import ( + BoxDetectionSE3, + BoxDetectionWrapper, + BoxDetectionMetadata, + TrafficLightDetection, + TrafficLightDetectionWrapper, +) +from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.maps.map_metadata import MapMetadata +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, +) +from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from py123d.conversion.utils.sensor_utils.lidar_index_registry import NuscenesLidarIndex + +from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index +from py123d.datatypes.vehicle_state.vehicle_parameters import get_nuscenes_renauly_zoe_parameters +from py123d.geometry import StateSE3, BoundingBoxSE3, BoundingBoxSE3Index +from py123d.geometry.vector import Vector3D, Vector3DIndex +from py123d.common.utils.arrow_helper import open_arrow_table, write_arrow_table +from py123d.conversion.datasets.nuscenes.nuscenes_map_conversion import write_nuscenes_map, NUSCENES_MAPS +from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.datatypes.time.time_point import TimePoint + +TARGET_DT: Final[float] = 0.1 +NUSCENES_DT: Final[float] = 0.5 +SORT_BY_TIMESTAMP: Final[bool] = True +NUSCENES_DETECTION_NAME_DICT = { + # Vehicles (4+ wheels) + "vehicle.car": DetectionType.VEHICLE, + "vehicle.truck": DetectionType.VEHICLE, + "vehicle.bus.bendy": DetectionType.VEHICLE, + "vehicle.bus.rigid": DetectionType.VEHICLE, + "vehicle.construction": DetectionType.VEHICLE, + "vehicle.emergency.ambulance": DetectionType.VEHICLE, + "vehicle.emergency.police": DetectionType.VEHICLE, + "vehicle.trailer": DetectionType.VEHICLE, + + # Bicycles / Motorcycles + "vehicle.bicycle": DetectionType.BICYCLE, + "vehicle.motorcycle": DetectionType.BICYCLE, + + # Pedestrians (all subtypes) + "human.pedestrian.adult": DetectionType.PEDESTRIAN, + "human.pedestrian.child": DetectionType.PEDESTRIAN, + "human.pedestrian.construction_worker": DetectionType.PEDESTRIAN, + "human.pedestrian.personal_mobility": DetectionType.PEDESTRIAN, + "human.pedestrian.police_officer": DetectionType.PEDESTRIAN, + "human.pedestrian.stroller": DetectionType.PEDESTRIAN, + "human.pedestrian.wheelchair": DetectionType.PEDESTRIAN, + + # Traffic cone / barrier + "movable_object.trafficcone": DetectionType.TRAFFIC_CONE, + "movable_object.barrier": DetectionType.BARRIER, + + # Generic objects + "movable_object.pushable_pullable": DetectionType.GENERIC_OBJECT, + "movable_object.debris": DetectionType.GENERIC_OBJECT, + "static_object.bicycle_rack": DetectionType.GENERIC_OBJECT, + "animal": DetectionType.GENERIC_OBJECT, +} + +NUSCENES_CAMERA_TYPES = { + PinholeCameraType.CAM_F0: "CAM_FRONT", + PinholeCameraType.CAM_B0: "CAM_BACK", + PinholeCameraType.CAM_L0: "CAM_FRONT_LEFT", + PinholeCameraType.CAM_L1: "CAM_BACK_LEFT", + PinholeCameraType.CAM_R0: "CAM_FRONT_RIGHT", + PinholeCameraType.CAM_R1: "CAM_BACK_RIGHT", +} +NUSCENES_DATA_ROOT = Path(os.environ["NUSCENES_DATA_ROOT"]) + + +class NuScenesDataConverter(AbstractDatasetConverter): + def __init__( + self, + splits: List[str], + nuscenes_data_root: Union[Path, str], + nuscenes_map_root: Union[Path, str], + nuscenes_lanelet2_root: Union[Path, str], + use_lanelet2: bool, + dataset_converter_config: DatasetConverterConfig, + version: str = "v1.0-trainval", + ) -> None: + super().__init__(dataset_converter_config) + self._splits: List[str] = splits + self._nuscenes_data_root: Path = Path(nuscenes_data_root) + self._nuscenes_map_root: Path = Path(nuscenes_map_root) + self._nuscenes_lanelet2_root: Path = Path(nuscenes_lanelet2_root) + self._use_lanelet2 = use_lanelet2 + self._version = version + self._scene_tokens_per_split: Dict[str, List[str]] = self._collect_scene_tokens() + self._target_dt: float = TARGET_DT + + def _collect_scene_tokens(self) -> Dict[str, List[str]]: + scene_tokens_per_split: Dict[str, List[str]] = {} + nusc = NuScenes(version=self._version, dataroot=str(self._nuscenes_data_root), verbose=False) + + scene_splits = create_splits_scenes() + available_scenes = [scene for scene in nusc.scene] + + for split in self._splits: + # Map the split name to the division of nuScenes + nusc_split = split.replace("nuscenes_", "") + if nusc_split == "trainval": + scene_names = scene_splits['train'] + scene_splits['val'] + else: + scene_names = scene_splits.get(nusc_split, []) + + # get token + scene_tokens = [ + scene['token'] for scene in available_scenes + if scene['name'] in scene_names + ] + scene_tokens_per_split[split] = scene_tokens + + return scene_tokens_per_split + + def get_available_splits(self) -> List[str]: + return [ + "nuscenes_train", + "nuscenes_val", + "nuscenes_test", + "nuscenes_mini_train", + "nuscenes_mini_val", + ] + + def get_number_of_maps(self) -> int: + """Inherited, see superclass.""" + return len(NUSCENES_MAPS) + + def get_number_of_logs(self) -> int: + """Inherited, see superclass.""" + return sum(len(scene_tokens) for scene_tokens in self._scene_tokens_per_split.values()) + + def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: + """Inherited, see superclass.""" + map_name = NUSCENES_MAPS[map_index] + + map_metadata = _get_nuscenes_map_metadata(map_name) + map_needs_writing = map_writer.reset(self.dataset_converter_config, map_metadata) + + if map_needs_writing: + write_nuscenes_map( + nuscenes_maps_root=self._nuscenes_map_root, + location=map_name, + map_writer=map_writer, + use_lanelet2=self._use_lanelet2, + lanelet2_root=Path(self._nuscenes_lanelet2_root), + ) + + map_writer.close() + + def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: + """Inherited, see superclass.""" + # Find the scene token for the given log index + all_scene_tokens = [] + for split, scene_tokens in self._scene_tokens_per_split.items(): + all_scene_tokens.extend([(split, token) for token in scene_tokens]) + + if log_index >= len(all_scene_tokens): + raise ValueError(f"Log index {log_index} is out of range. Total logs: {len(all_scene_tokens)}") + + split, scene_token = all_scene_tokens[log_index] + + nusc = NuScenes(version=self._version, dataroot=str(self._nuscenes_data_root), verbose=False) + scene = nusc.get("scene", scene_token) + log_record = nusc.get("log", scene["log_token"]) + + # 1. Initialize log metadata + log_metadata = LogMetadata( + dataset="nuscenes", + split=split, + log_name=scene["name"], + location=log_record["location"], + timestep_seconds=TARGET_DT, + vehicle_parameters=get_nuscenes_renauly_zoe_parameters(), + camera_metadata=_get_nuscenes_camera_metadata(nusc, scene, self.dataset_converter_config), + lidar_metadata=_get_nuscenes_lidar_metadata(nusc, scene, self.dataset_converter_config), + map_metadata=_get_nuscenes_map_metadata(log_record["location"]), + ) + + # 2. Prepare log writer + log_needs_writing = log_writer.reset(self.dataset_converter_config, log_metadata) + + if log_needs_writing: + can_bus = NuScenesCanBus(dataroot=str(self._nuscenes_data_root)) + + step_interval = max(1, int(TARGET_DT / NUSCENES_DT)) + sample_count = 0 + + # Traverse all samples in the scene + sample_token = scene["first_sample_token"] + while sample_token: + if sample_count % step_interval == 0: + sample = nusc.get("sample", sample_token) + + log_writer.write( + timestamp=TimePoint.from_us(sample["timestamp"]), + ego_state=_extract_nuscenes_ego_state(nusc, sample, can_bus), + box_detections=_extract_nuscenes_box_detections(nusc, sample), + traffic_lights=_extract_nuscenes_traffic_lights(), # nuScenes doesn't have traffic lights + cameras=_extract_nuscenes_cameras( + nusc=nusc, + sample=sample, + dataset_converter_config=self.dataset_converter_config, + ), + lidars=_extract_nuscenes_lidars( + nusc=nusc, + sample=sample, + dataset_converter_config=self.dataset_converter_config, + ), + scenario_tags=_extract_nuscenes_scenario_tag(), # nuScenes doesn't have scenario tags + route_lane_group_ids=_extract_nuscenes_route_lane_group_ids(), + # nuScenes doesn't have route info + ) + + sample_token = sample["next"] + sample_count += 1 + + log_writer.close() + del nusc + gc.collect() + + def convert_logs(self, worker: WorkerPool) -> None: + """ + NuScenes logs conversion is handled externally through convert_log method. + This method is kept for interface compatibility. + """ + pass + + +def _get_nuscenes_camera_metadata( + nusc: NuScenes, + scene: Dict[str, Any], + dataset_converter_config: DatasetConverterConfig, +) -> Dict[PinholeCameraType, PinholeCameraMetadata]: + camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} + + if dataset_converter_config.include_cameras: + first_sample_token = scene["first_sample_token"] + first_sample = nusc.get("sample", first_sample_token) + + for camera_type, camera_channel in NUSCENES_CAMERA_TYPES.items(): + cam_token = first_sample["data"][camera_channel] + cam_data = nusc.get("sample_data", cam_token) + calib = nusc.get("calibrated_sensor", cam_data["calibrated_sensor_token"]) + + intrinsic_matrix = np.array(calib["camera_intrinsic"]) + intrinsic = PinholeIntrinsics.from_camera_matrix(intrinsic_matrix) + distortion = PinholeDistortion.from_array(np.zeros(5), copy=False) + + camera_metadata[camera_type] = PinholeCameraMetadata( + camera_type=camera_type, + width=cam_data["width"], + height=cam_data["height"], + intrinsics=intrinsic, + distortion=distortion, + ) + + return camera_metadata + + +def _get_nuscenes_lidar_metadata( + nusc: NuScenes, + scene: Dict[str, Any], + dataset_converter_config: DatasetConverterConfig, +) -> Dict[LiDARType, LiDARMetadata]: + metadata: Dict[LiDARType, LiDARMetadata] = {} + + if dataset_converter_config.include_lidars: + first_sample_token = scene["first_sample_token"] + first_sample = nusc.get("sample", first_sample_token) + lidar_token = first_sample["data"]["LIDAR_TOP"] + lidar_data = nusc.get("sample_data", lidar_token) + calib = nusc.get("calibrated_sensor", lidar_data["calibrated_sensor_token"]) + + translation = np.array(calib["translation"]) + rotation = Quaternion(calib["rotation"]).rotation_matrix + extrinsic = np.eye(4) + extrinsic[:3, :3] = rotation + extrinsic[:3, 3] = translation + extrinsic = StateSE3.from_transformation_matrix(extrinsic) + + metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( + lidar_type=LiDARType.LIDAR_MERGED, + lidar_index=NuscenesLidarIndex, + extrinsic=extrinsic, + ) + + return metadata + + +def _get_nuscenes_map_metadata(location): + return MapMetadata( + dataset="nuscenes", + split=None, + log_name=None, + location=location, + map_has_z=False, + map_is_local=False, + ) + + +def _extract_nuscenes_ego_state(nusc, sample, can_bus) -> EgoStateSE3: + lidar_data = nusc.get("sample_data", sample["data"]["LIDAR_TOP"]) + ego_pose = nusc.get("ego_pose", lidar_data["ego_pose_token"]) + + quat = Quaternion(ego_pose["rotation"]) + + vehicle_parameters = get_nuscenes_renauly_zoe_parameters() + pose = StateSE3( + x=ego_pose["translation"][0], + y=ego_pose["translation"][1], + z=ego_pose["translation"][2], + qw=quat.w, + qx=quat.x, + qy=quat.y, + qz=quat.z, + ) + + scene_name = nusc.get("scene", sample["scene_token"])["name"] + + try: + pose_msgs = can_bus.get_messages(scene_name, "pose") + except Exception as e: + pose_msgs = [] + + if pose_msgs: + closest_msg = None + min_time_diff = float('inf') + for msg in pose_msgs: + time_diff = abs(msg["utime"] - sample["timestamp"]) + if time_diff < min_time_diff: + min_time_diff = time_diff + closest_msg = msg + + if closest_msg and min_time_diff < 500000: + velocity = [*closest_msg["vel"]] + acceleration = [*closest_msg["accel"]] + angular_velocity = [*closest_msg["rotation_rate"]] + else: + velocity = acceleration = angular_velocity = [0.0, 0.0, 0.0] + else: + velocity = acceleration = angular_velocity = [0.0, 0.0, 0.0] + + dynamic_state = DynamicStateSE3( + velocity=Vector3D(*velocity), + acceleration=Vector3D(*acceleration), + angular_velocity=Vector3D(*angular_velocity), + ) + + return EgoStateSE3( + center_se3=pose, + dynamic_state_se3=dynamic_state, + vehicle_parameters=vehicle_parameters, + timepoint=TimePoint.from_us(sample["timestamp"]), + ) + + +def _extract_nuscenes_box_detections( + nusc: NuScenes, + sample: Dict[str, Any] +) -> BoxDetectionWrapper: + box_detections: List[BoxDetectionSE3] = [] + + for ann_token in sample["anns"]: + ann = nusc.get("sample_annotation", ann_token) + box = Box(ann["translation"], ann["size"], Quaternion(ann["rotation"])) + + box_quat = box.orientation + euler_angles = box_quat.yaw_pitch_roll # (yaw, pitch, roll) + + # Create StateSE3 for box center and orientation + center_quat = box.orientation + center = StateSE3( + box.center[0], + box.center[1], + box.center[2], + center_quat.w, + center_quat.x, + center_quat.y, + center_quat.z, + ) + bounding_box = BoundingBoxSE3(center, box.wlh[1], box.wlh[0], box.wlh[2]) + # Get detection type + category = ann["category_name"] + det_type = None + for key, value in NUSCENES_DETECTION_NAME_DICT.items(): + if category.startswith(key): + det_type = value + break + + if det_type is None: + print(f"Warning: Unmapped nuScenes category: {category}, skipping") + continue + + # Get velocity if available + velocity = nusc.box_velocity(ann_token) + velocity_3d = Vector3D(x=velocity[0], y=velocity[1], z=velocity[2] if len(velocity) > 2 else 0.0) + + metadata = BoxDetectionMetadata( + detection_type=det_type, + track_token=ann["instance_token"], + timepoint=TimePoint.from_us(sample["timestamp"]), + confidence=1.0, # nuScenes annotations are ground truth + num_lidar_points=ann.get("num_lidar_pts", 0), + ) + + box_detection = BoxDetectionSE3( + metadata=metadata, + bounding_box_se3=bounding_box, + velocity=velocity_3d, + ) + box_detections.append(box_detection) + + return BoxDetectionWrapper(box_detections=box_detections) + + +def _extract_nuscenes_traffic_lights() -> TrafficLightDetectionWrapper: + """nuScenes doesn't have traffic light information.""" + return TrafficLightDetectionWrapper(traffic_light_detections=[]) + + +def _extract_nuscenes_cameras( + nusc: NuScenes, + sample: Dict[str, Any], + dataset_converter_config: DatasetConverterConfig, +) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: + camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} + + if dataset_converter_config.include_cameras: + for camera_type, camera_channel in NUSCENES_CAMERA_TYPES.items(): + cam_token = sample["data"][camera_channel] + cam_data = nusc.get("sample_data", cam_token) + + # Check timestamp synchronization (within 100ms) + if abs(cam_data["timestamp"] - sample["timestamp"]) > 100000: + continue + + calib = nusc.get("calibrated_sensor", cam_data["calibrated_sensor_token"]) + + translation = np.array(calib["translation"]) + rotation = Quaternion(calib["rotation"]).rotation_matrix + extrinsic_matrix = np.eye(4) + extrinsic_matrix[:3, :3] = rotation + extrinsic_matrix[:3, 3] = translation + extrinsic = StateSE3.from_transformation_matrix(extrinsic_matrix) + + cam_path = NUSCENES_DATA_ROOT / cam_data["filename"] + + if cam_path.exists() and cam_path.is_file(): + if dataset_converter_config.camera_store_option == "path": + camera_data = str(cam_path) + elif dataset_converter_config.camera_store_option == "binary": + with open(cam_path, "rb") as f: + camera_data = f.read() + else: + continue + + camera_dict[camera_type] = (camera_data, extrinsic) + + return camera_dict + + +def _extract_nuscenes_lidars( + nusc: NuScenes, + sample: Dict[str, Any], + dataset_converter_config: DatasetConverterConfig, +) -> Dict[LiDARType, Optional[str]]: + lidar_dict: Dict[LiDARType, Optional[str]] = {} + + if dataset_converter_config.include_lidars: + lidar_token = sample["data"]["LIDAR_TOP"] + lidar_data = nusc.get("sample_data", lidar_token) + lidar_path = NUSCENES_DATA_ROOT / lidar_data["filename"] + + if lidar_path.exists() and lidar_path.is_file(): + lidar_dict[LiDARType.LIDAR_MERGED] = str(lidar_path) + else: + lidar_dict[LiDARType.LIDAR_MERGED] = None + + return lidar_dict + + +def _extract_nuscenes_scenario_tag() -> List[str]: + """nuScenes doesn't have scenario tags.""" + return ["unknown"] + + +def _extract_nuscenes_route_lane_group_ids() -> List[int]: + """nuScenes doesn't have route lane group information.""" + return [] + + +# Updated arrow conversion function using the new extraction functions +def convert_nuscenes_log_to_arrow( + args: List[Dict[str, Union[str, List[str]]]], + dataset_converter_config: DatasetConverterConfig, + version: str +) -> List[Any]: + for log_info in args: + scene_token: str = log_info["scene_token"] + split: str = log_info["split"] + + nusc = NuScenes(version=version, dataroot=str(NUSCENES_DATA_ROOT), verbose=False) + scene = nusc.get("scene", scene_token) + + log_file_path = dataset_converter_config.output_path / split / f"{scene_token}.arrow" + + if dataset_converter_config.force_log_conversion or not log_file_path.exists(): + if not log_file_path.parent.exists(): + log_file_path.parent.mkdir(parents=True, exist_ok=True) + + # Define schema + schema_column_list = [ + ("token", pa.string()), + ("timestamp", pa.int64()), + ("ego_state", pa.list_(pa.float64(), len(EgoStateSE3Index))), + ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), + ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), + ("detections_token", pa.list_(pa.string())), + ("detections_type", pa.list_(pa.int16())), + ("traffic_light_ids", pa.list_(pa.int64())), + ("traffic_light_types", pa.list_(pa.int16())), + ("scenario_tag", pa.list_(pa.string())), + ("route_lane_group_ids", pa.list_(pa.int64())), + ] + + if dataset_converter_config.lidar_store_option == "path": + schema_column_list.append(("lidar", pa.string())) + + if dataset_converter_config.camera_store_option == "path": + for camera_type in NUSCENES_CAMERA_TYPES.keys(): + schema_column_list.append((camera_type.serialize(), pa.string())) + schema_column_list.append((f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4))) + + recording_schema = pa.schema(schema_column_list) + + log_record = nusc.get("log", scene["log_token"]) + location = log_record["location"] + + # Create metadata using the same functions as the new interface + metadata = LogMetadata( + dataset="nuscenes", + split=split, + log_name=scene["name"], + location=location, + timestep_seconds=TARGET_DT, + vehicle_parameters=get_nuscenes_renauly_zoe_parameters(), + camera_metadata=_get_nuscenes_camera_metadata(nusc, scene, dataset_converter_config), + lidar_metadata=_get_nuscenes_lidar_metadata(nusc, scene, dataset_converter_config), + map_metadata=_get_nuscenes_map_metadata(location), + ) + + recording_schema = recording_schema.with_metadata( + { + "log_metadata": json.dumps(asdict(metadata)), + } + ) + + _write_arrow_table_with_new_interface( + nusc, scene, recording_schema, log_file_path, dataset_converter_config + ) + + del nusc + gc.collect() + + return [] + + +def _write_arrow_table_with_new_interface( + nusc: NuScenes, + scene: Dict[str, Any], + recording_schema: pa.schema, + log_file_path: Path, + dataset_converter_config: DatasetConverterConfig, +) -> None: + can_bus = NuScenesCanBus(dataroot=str(NUSCENES_DATA_ROOT)) + + with pa.OSFile(str(log_file_path), "wb") as sink: + with pa.ipc.new_file(sink, recording_schema) as writer: + step_interval = max(1, int(TARGET_DT / NUSCENES_DT)) + sample_count = 0 + + sample_token = scene["first_sample_token"] + while sample_token: + if sample_count % step_interval == 0: + sample = nusc.get("sample", sample_token) + + # Use the new extraction functions for consistency + ego_state = _extract_nuscenes_ego_state(nusc, sample, can_bus) + box_detections = _extract_nuscenes_box_detections(nusc, sample) + cameras = _extract_nuscenes_cameras(nusc, sample, dataset_converter_config) + lidars = _extract_nuscenes_lidars(nusc, sample, dataset_converter_config) + + detections_state_list = [] + for det in box_detections.box_detections: + bbox_array = det.bounding_box_se3.array + + print(f"bbox_array shape: {bbox_array.shape}, ndim: {bbox_array.ndim}") + if bbox_array.ndim > 1: + detections_state_list.append(bbox_array.flatten().tolist()) + else: + detections_state_list.append(bbox_array.tolist()) + + # Prepare row data + row_data = { + "token": [sample_token], + "timestamp": [sample["timestamp"]], + "ego_state": ego_state.array.flatten().tolist(), + "detections_state": detections_state_list, + "detections_velocity": [det.velocity.array.tolist() for det in box_detections.box_detections], + "detections_token": [det.metadata.track_token for det in box_detections.box_detections], + "detections_type": [det.metadata.detection_type.value for det in box_detections.box_detections], + "traffic_light_ids": [], + "traffic_light_types": [], + "scenario_tag": ["unknown"], + "route_lane_group_ids": [], + } + + # Add lidar data if configured + if dataset_converter_config.lidar_store_option == "path": + row_data["lidar"] = [lidars.get(LiDARType.LIDAR_MERGED, None)] + + # Add camera data if configured + if dataset_converter_config.camera_store_option == "path": + for camera_type in NUSCENES_CAMERA_TYPES.keys(): + if camera_type in cameras: + camera_path, extrinsic = cameras[camera_type] + row_data[camera_type.serialize()] = [camera_path] + row_data[f"{camera_type.serialize()}_extrinsic"] = [ + extrinsic.to_transformation_matrix().flatten().tolist()] + else: + row_data[camera_type.serialize()] = [None] + row_data[f"{camera_type.serialize()}_extrinsic"] = [None] + + batch = pa.record_batch(row_data, schema=recording_schema) + writer.write_batch(batch) + + sample_token = sample["next"] + sample_count += 1 + + # Sort by timestamp if required + if SORT_BY_TIMESTAMP: + recording_table = open_arrow_table(log_file_path) + recording_table = recording_table.sort_by([("timestamp", "ascending")]) + write_arrow_table(recording_table, log_file_path) \ No newline at end of file diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py new file mode 100644 index 00000000..66f7a658 --- /dev/null +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py @@ -0,0 +1,708 @@ +import lanelet2 +import numpy as np + +from pathlib import Path +from typing import List, Optional +from lanelet2.io import load +from lanelet2.projection import MercatorProjector +from shapely.geometry import Polygon, MultiPolygon, LineString +from shapely.validation import make_valid +from nuscenes.map_expansion.map_api import NuScenesMap +from nuscenes.map_expansion.arcline_path_utils import discretize_lane + +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.datatypes.maps.cache.cache_map_objects import ( + CacheCarpark, + CacheCrosswalk, + CacheGenericDrivable, + CacheIntersection, + CacheLane, + CacheLaneGroup, + CacheRoadLine, + CacheWalkway, +) +from py123d.datatypes.maps.map_datatypes import RoadLineType +from py123d.geometry import Polyline3D + +NUSCENES_MAPS: List[str] = [ + "boston-seaport", + "singapore-hollandvillage", + "singapore-onenorth", + "singapore-queenstown" +] + + +def write_nuscenes_map(nuscenes_maps_root: Path, location: str, map_writer: AbstractMapWriter, use_lanelet2: bool, lanelet2_root: Optional[str] = None) -> None: + """ + Main function to convert nuscenes map to unified format and write using map_writer. + """ + assert location in NUSCENES_MAPS, f"Map name {location} is not supported." + nusc_map = NuScenesMap(dataroot=str(nuscenes_maps_root), map_name=location) + + # Write all layers + if use_lanelet2: + _write_nuscenes_lanes_lanelet2(nusc_map, map_writer, lanelet2_root) + _write_nuscenes_lane_groups_lanelet2(nusc_map, map_writer, lanelet2_root) + else: + _write_nuscenes_lanes(nusc_map, map_writer) + _write_nuscenes_lane_groups(nusc_map, map_writer) + _write_nuscenes_intersections(nusc_map, map_writer) + _write_nuscenes_crosswalks(nusc_map, map_writer) + _write_nuscenes_walkways(nusc_map, map_writer) + _write_nuscenes_carparks(nusc_map, map_writer) + _write_nuscenes_generic_drivables(nusc_map, map_writer) + _write_nuscenes_stop_lines(nusc_map, map_writer) + _write_nuscenes_road_lines(nusc_map, map_writer) + + +def _write_nuscenes_lanes_lanelet2(nusc_map: NuScenesMap, map_writer: AbstractMapWriter, lanelet2_root: str) -> None: + map_name = nusc_map.map_name + osm_map_file = str(Path(lanelet2_root) / f"{map_name}.osm") + + if "boston" in map_name.lower(): + origin_lat, origin_lon = 42.3365, -71.0577 + elif "singapore" in map_name.lower(): + origin_lat, origin_lon = 1.3, 103.8 + else: + origin_lat, origin_lon = 49.0, 8.4 + + origin = lanelet2.io.Origin(origin_lat, origin_lon) + + try: + lanelet_map = lanelet2.io.load(osm_map_file, origin) + except Exception: + try: + projector = lanelet2.projection.MercatorProjector(origin) + lanelet_map = lanelet2.io.load(osm_map_file, projector) + except Exception: + return + + for lanelet in lanelet_map.laneletLayer: + token = lanelet.id + + try: + left_bound = [(p.x, p.y) for p in lanelet.leftBound] + right_bound = [(p.x, p.y) for p in lanelet.rightBound] + polygon_points = left_bound + right_bound[::-1] + polygon = Polygon(polygon_points) + + predecessor_ids = [int(pred.id) for pred in lanelet.previousLanelets] + successor_ids = [int(succ.id) for succ in lanelet.followingLanelets] + + left_lane_id = None + right_lane_id = None + + left_boundary = [(p.x, p.y) for p in lanelet.leftBound] + right_boundary = [(p.x, p.y) for p in lanelet.rightBound] + centerline = [] + for left_pt, right_pt in zip(lanelet.leftBound, lanelet.rightBound): + center_x = (left_pt.x + right_pt.x) / 2 + center_y = (left_pt.y + right_pt.y) / 2 + centerline.append((center_x, center_y)) + + speed_limit_mps = 0.0 + if "speed_limit" in lanelet.attributes: + try: + speed_limit_str = lanelet.attributes["speed_limit"] + if "km/h" in speed_limit_str: + speed_kmh = float(speed_limit_str.replace("km/h", "").strip()) + speed_limit_mps = speed_kmh / 3.6 + except (ValueError, TypeError): + pass + + map_writer.write_lane( + CacheLane( + object_id=token, + lane_group_id=None, + left_boundary=left_boundary, + right_boundary=right_boundary, + centerline=centerline, + left_lane_id=left_lane_id, + right_lane_id=right_lane_id, + predecessor_ids=predecessor_ids, + successor_ids=successor_ids, + speed_limit_mps=speed_limit_mps, + outline=None, + geometry=polygon, + ) + ) + except Exception: + continue + +def _write_nuscenes_lane_groups_lanelet2(nusc_map: NuScenesMap, map_writer: AbstractMapWriter, lanelet2_root: str) -> None: + map_name = nusc_map.map_name + osm_map_file = str(Path(lanelet2_root) / f"{map_name}.osm") + + if "boston" in map_name.lower(): + origin_lat, origin_lon = 42.3365, -71.0577 + else: + origin_lat, origin_lon = 1.3, 103.8 + + origin = lanelet2.io.Origin(origin_lat, origin_lon) + + try: + projector = MercatorProjector(origin) + lanelet_map = load(osm_map_file, projector) + except Exception: + return + + for lanelet in lanelet_map.laneletLayer: + token = lanelet.id + lane_ids = [lanelet.id] + try: + predecessor_ids = [int(lanelet.id) for lanelet in lanelet.previous] + successor_ids = [int(lanelet.id) for lanelet in lanelet.following] + except AttributeError: + predecessor_ids = [] + successor_ids = [] + try: + if hasattr(lanelet, 'left'): + for left_lane in lanelet.left: + predecessor_ids.append(int(left_lane.id)) + if hasattr(lanelet, 'right'): + for right_lane in lanelet.right: + successor_ids.append(int(right_lane.id)) + except Exception: + pass + + try: + left_bound = [(p.x, p.y) for p in lanelet.leftBound] + right_bound = [(p.x, p.y) for p in lanelet.rightBound] + polygon_points = left_bound + right_bound[::-1] + polygon = Polygon(polygon_points) + except Exception: + continue + + try: + map_writer.write_lane_group( + CacheLaneGroup( + object_id=token, + lane_ids=lane_ids, + left_boundary=None, + right_boundary=None, + intersection_id=None, + predecessor_ids=predecessor_ids, + successor_ids=successor_ids, + outline=None, + geometry=polygon, + ) + ) + except Exception: + continue + +def _get_lanelet_connections(lanelet): + """ + Helper function to extract incoming and outgoing lanelets. + """ + incoming = lanelet.incomings + outgoing = lanelet.outgoings + return incoming, outgoing + + +def _write_nuscenes_lanes(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """ + Write lane data to map_writer, including topology and boundaries. + """ + lane_records = nusc_map.lane + for lane_record in lane_records: + token = lane_record["token"] + + # Extract geometry from lane record + try: + if "polygon_token" in lane_record: + polygon = nusc_map.extract_polygon(lane_record["polygon_token"]) + else: + continue + if not polygon.is_valid: + continue + except Exception: + continue + + # Get topology + incoming = nusc_map.get_incoming_lane_ids(token) + outgoing = nusc_map.get_outgoing_lane_ids(token) + + # Get lane connectors + lane_connectors = [] + for connector in nusc_map.lane_connector: + if connector.get("incoming_lane") == token or connector.get("outgoing_lane") == token: + lane_connectors.append(connector["token"]) + + # Extract boundaries + left_boundary = _get_lane_boundary(token, "left", nusc_map) + right_boundary = _get_lane_boundary(token, "right", nusc_map) + + # Skip lanes without valid boundaries + if left_boundary is None or right_boundary is None: + continue + if left_boundary.is_empty or right_boundary.is_empty: + continue + + # Extract baseline path + baseline_path = None + if token in nusc_map.arcline_path_3: + arc_path = nusc_map.arcline_path_3[token] + try: + points = discretize_lane(arc_path, resolution_meters=0.1) + xy_points = [(p[0], p[1]) for p in points] + baseline_path = LineString(xy_points) + except Exception: + baseline_path = None + + # Align boundaries with baseline path direction + if baseline_path and left_boundary: + left_boundary = align_boundary_direction(baseline_path, left_boundary) + if baseline_path and right_boundary: + right_boundary = align_boundary_direction(baseline_path, right_boundary) + + # Write lane object safely + try: + map_writer.write_lane( + CacheLane( + object_id=token, + lane_group_id=lane_record.get("road_segment_token", None), + left_boundary=left_boundary, + right_boundary=right_boundary, + centerline=baseline_path, + left_lane_id=None, # Not directly available in nuscenes + right_lane_id=None, # Not directly available in nuscenes + predecessor_ids=incoming, + successor_ids=outgoing, + speed_limit_mps=0.0, # Default value + outline=None, + geometry=polygon, + ) + ) + except Exception: + continue + + +def _write_nuscenes_lane_groups(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """ + Write lane group data to map_writer. + """ + road_segments = nusc_map.road_segment + for segment in road_segments: + token = segment["token"] + + # Extract geometry + try: + if "polygon_token" in segment: + polygon = nusc_map.extract_polygon(segment["polygon_token"]) + else: + continue + if not polygon.is_valid: + continue + except Exception: + continue + + # Find lanes in this segment + lane_ids = [] + for lane in nusc_map.lane: + if lane.get("road_segment_token") == token: + lane_ids.append(lane["token"]) + + # Get connected segments + incoming, outgoing = _get_connected_segments(token, nusc_map) + + # Extract boundaries + left_boundary = _get_lane_group_boundary(token, "left", nusc_map) + right_boundary = _get_lane_group_boundary(token, "right", nusc_map) + + # Skip invalid boundaries + if left_boundary is None or right_boundary is None: + continue + if left_boundary.is_empty or right_boundary.is_empty: + continue + + # Use first lane's baseline path for direction alignment + baseline_path = None + if lane_ids: + first_lane_token = lane_ids[0] + if first_lane_token in nusc_map.arcline_path_3: + arc_path = nusc_map.arcline_path_3[first_lane_token] + try: + points = discretize_lane(arc_path, resolution_meters=0.1) + xy_points = [(p[0], p[1]) for p in points] + baseline_path = LineString(xy_points) + except Exception: + baseline_path = None + + if baseline_path and left_boundary: + left_boundary = align_boundary_direction(baseline_path, left_boundary) + if baseline_path and right_boundary: + right_boundary = align_boundary_direction(baseline_path, right_boundary) + + try: + map_writer.write_lane_group( + CacheLaneGroup( + object_id=token, + lane_ids=lane_ids, + left_boundary=left_boundary, + right_boundary=right_boundary, + intersection_id=None, # Handled in intersections + predecessor_ids=incoming, + successor_ids=outgoing, + outline=None, + geometry=polygon, + ) + ) + except Exception: + continue + + +def _write_nuscenes_intersections(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """ + Write intersection data to map_writer. + """ + road_blocks = nusc_map.road_block + for block in road_blocks: + token = block["token"] + try: + if "polygon_token" in block: + polygon = nusc_map.extract_polygon(block["polygon_token"]) + else: + continue + if not polygon.is_valid: + continue + except Exception: + continue + + # Lane group IDs are not directly available; use empty list + lane_group_ids = [] + + map_writer.write_intersection( + CacheIntersection( + object_id=token, + lane_group_ids=lane_group_ids, + geometry=polygon, + ) + ) + + +def _write_nuscenes_crosswalks(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """ + Write crosswalk data to map_writer. + """ + ped_crossings = nusc_map.ped_crossing + for crossing in ped_crossings: + token = crossing["token"] + try: + if "polygon_token" in crossing: + polygon = nusc_map.extract_polygon(crossing["polygon_token"]) + else: + continue + if not polygon.is_valid: + continue + except Exception: + continue + + map_writer.write_crosswalk( + CacheCrosswalk( + object_id=token, + geometry=polygon, + ) + ) + + +def _write_nuscenes_walkways(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """ + Write walkway data to map_writer. + """ + walkways = nusc_map.walkway + for walkway in walkways: + token = walkway["token"] + try: + if "polygon_token" in walkway: + polygon = nusc_map.extract_polygon(walkway["polygon_token"]) + else: + continue + if not polygon.is_valid: + continue + except Exception: + continue + + map_writer.write_walkway( + CacheWalkway( + object_id=token, + geometry=polygon, + ) + ) + + +def _write_nuscenes_carparks(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """ + Write carpark data to map_writer. + """ + carpark_areas = nusc_map.carpark_area + for carpark in carpark_areas: + token = carpark["token"] + try: + if "polygon_token" in carpark: + polygon = nusc_map.extract_polygon(carpark["polygon_token"]) + else: + continue + if not polygon.is_valid: + continue + except Exception: + continue + + map_writer.write_carpark( + CacheCarpark( + object_id=token, + geometry=polygon, + ) + ) + + +def _write_nuscenes_generic_drivables(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """ + Write generic drivable areas to map_writer. + """ + # Combine road segments, lanes, and drivable areas + all_drivables = [] + + # Add road segments + for segment in nusc_map.road_segment: + try: + if "polygon_token" in segment: + polygon = nusc_map.extract_polygon(segment["polygon_token"]) + if polygon.is_valid: + all_drivables.append((f"road_segment_{segment['token']}", polygon)) + except Exception: + continue + + # Add lanes + for lane in nusc_map.lane: + try: + if "polygon_token" in lane: + polygon = nusc_map.extract_polygon(lane["polygon_token"]) + if polygon.is_valid: + all_drivables.append((f"lane_{lane['token']}", polygon)) + except Exception: + continue + + # Add drivable areas + for road in nusc_map.drivable_area: + try: + if "polygon_token" in road: + polygon = nusc_map.extract_polygon(road["polygon_token"]) + if polygon.is_valid: + all_drivables.append((f"road_{road['token']}", polygon)) + except Exception: + continue + + for obj_id, geometry in all_drivables: + map_writer.write_generic_drivable( + CacheGenericDrivable( + object_id=obj_id, + geometry=geometry, + ) + ) + + +def _write_nuscenes_stop_lines(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """ + Write stop line data to map_writer. + """ + stop_lines = nusc_map.stop_line + for stop_line in stop_lines: + token = stop_line["token"] + try: + if "polygon_token" in stop_line: + polygon = nusc_map.extract_polygon(stop_line["polygon_token"]) + else: + continue + if not polygon.is_valid: + continue + except Exception: + continue + + # Note: Stop lines are written as generic drivable for compatibility + map_writer.write_generic_drivable( + CacheGenericDrivable( + object_id=token, + geometry=polygon, + ) + ) + + +def _write_nuscenes_road_lines(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """ + Write road line data (dividers) to map_writer. + """ + # Process road dividers + road_dividers = nusc_map.road_divider + for divider in road_dividers: + token = divider["token"] + try: + line = nusc_map.extract_line(divider["line_token"]) + if not line.is_valid: + continue + except Exception: + continue + + # Determine line type + line_type = _get_road_line_type(divider["line_token"], nusc_map) + + map_writer.write_road_line( + CacheRoadLine( + object_id=token, + road_line_type=line_type, + polyline=Polyline3D(LineString(line.coords)), + ) + ) + + # Process lane dividers + lane_dividers = nusc_map.lane_divider + for divider in lane_dividers: + token = divider["token"] + try: + line = nusc_map.extract_line(divider["line_token"]) + if not line.is_valid: + continue + except Exception: + continue + + line_type = _get_road_line_type(divider["line_token"], nusc_map) + + map_writer.write_road_line( + CacheRoadLine( + object_id=token, + road_line_type=line_type, + polyline=Polyline3D(LineString(line.coords)), + ) + ) + + +def _get_lane_boundary(lane_token: str, side: str, nusc_map: NuScenesMap) -> Optional[LineString]: + """ + Extract lane boundary geometry for a given side. + """ + lane_record = next((lr for lr in nusc_map.lane if lr["token"] == lane_token), None) + if not lane_record: + return None + + divider_segment_nodes_key = f"{side}_lane_divider_segment_nodes" + if divider_segment_nodes_key in lane_record and lane_record[divider_segment_nodes_key]: + nodes = lane_record[divider_segment_nodes_key] + boundary = LineString([(node['x'], node['y']) for node in nodes]) + return boundary + + return None + + +def _get_lane_group_boundary(segment_token: str, side: str, nusc_map: NuScenesMap) -> Optional[LineString]: + """ + Extract lane group boundary geometry (simplified). + """ + # This is a simplified implementation; in practice, may need more robust geometry extraction + boundary_type = "road_divider" if side == "left" else "lane_divider" + + # Find the segment geometry + segment = next((rs for rs in nusc_map.road_segment if rs["token"] == segment_token), None) + if not segment: + return None + + try: + segment_geom = nusc_map.extract_polygon(segment["polygon_token"]) + except Exception: + return None + + # Find nearest boundary of the specified type within a threshold + nearest = None + min_dist = float('inf') + + if boundary_type == "road_divider": + records = nusc_map.road_divider + else: + records = nusc_map.lane_divider + + for record in records: + try: + line = nusc_map.extract_line(record["line_token"]) + dist = segment_geom.distance(line) + if dist < 10.0 and dist < min_dist: + min_dist = dist + nearest = line + except Exception: + continue + + return nearest + + +def _get_connected_segments(segment_token: str, nusc_map: NuScenesMap): + """ + Get incoming and outgoing segment connections. + """ + incoming, outgoing = [], [] + + for connector in nusc_map.lane_connector: + if connector.get("outgoing_lane") == segment_token: + incoming.append(connector.get("incoming_lane")) + elif connector.get("incoming_lane") == segment_token: + outgoing.append(connector.get("outgoing_lane")) + + incoming = [id for id in incoming if id is not None] + outgoing = [id for id in outgoing if id is not None] + + return incoming, outgoing + + +def _get_road_line_type(line_token: str, nusc_map: NuScenesMap) -> RoadLineType: + """ + Map nuscenes line type to RoadLineType. + """ + nuscenes_to_road_line_type = { + "SINGLE_SOLID_WHITE": RoadLineType.SOLID_WHITE, + "DOUBLE_DASHED_WHITE": RoadLineType.DOUBLE_DASH_WHITE, + "SINGLE_SOLID_YELLOW": RoadLineType.SOLID_YELLOW, + } + + line_token_to_type = {} + for lane_record in nusc_map.lane: + for seg in lane_record.get("left_lane_divider_segments", []): + token = seg.get("line_token") + seg_type = seg.get("segment_type") + if token and seg_type: + line_token_to_type[token] = seg_type + + for seg in lane_record.get("right_lane_divider_segments", []): + token = seg.get("line_token") + seg_type = seg.get("segment_type") + if token and seg_type: + line_token_to_type[token] = seg_type + + nuscenes_type = line_token_to_type.get(line_token, "UNKNOWN") + return nuscenes_to_road_line_type.get(nuscenes_type, RoadLineType.UNKNOWN) + + +def flip_linestring(linestring: LineString) -> LineString: + """ + Flip the direction of a LineString. + """ + return LineString(linestring.coords[::-1]) + + +def lines_same_direction(centerline: LineString, boundary: LineString) -> bool: + """ + Check if centerline and boundary have the same direction. + """ + center_start = np.array(centerline.coords[0]) + center_end = np.array(centerline.coords[-1]) + boundary_start = np.array(boundary.coords[0]) + boundary_end = np.array(boundary.coords[-1]) + + same_dir_dist = np.linalg.norm(center_start - boundary_start) + np.linalg.norm(center_end - boundary_end) + opposite_dir_dist = np.linalg.norm(center_start - boundary_end) + np.linalg.norm(center_end - boundary_start) + + return same_dir_dist <= opposite_dir_dist + + +def align_boundary_direction(centerline: LineString, boundary: LineString) -> LineString: + """ + Align boundary direction with centerline. + """ + if not lines_same_direction(centerline, boundary): + return flip_linestring(boundary) + return boundary \ No newline at end of file diff --git a/src/py123d/datatypes/detections/box_detections.py b/src/py123d/datatypes/detections/box_detections.py index 2cff36a8..dccaa73b 100644 --- a/src/py123d/datatypes/detections/box_detections.py +++ b/src/py123d/datatypes/detections/box_detections.py @@ -97,8 +97,8 @@ def get_box_detections_by_types(self, detection_types: Iterable[BoxDetectionType detection for detection in self.box_detections if detection.metadata.box_detection_type in detection_types ] - def get_detection_by_track_token(self, track_token: str) -> BoxDetection | None: - box_detection: BoxDetection | None = None + def get_detection_by_track_token(self, track_token: str) -> Optional[BoxDetection]: + box_detection: Optional[BoxDetection] = None for detection in self.box_detections: if detection.metadata.track_token == track_token: box_detection = detection diff --git a/src/py123d/datatypes/maps/abstract_map_objects.py b/src/py123d/datatypes/maps/abstract_map_objects.py index 83004a94..de43bc81 100644 --- a/src/py123d/datatypes/maps/abstract_map_objects.py +++ b/src/py123d/datatypes/maps/abstract_map_objects.py @@ -1,7 +1,9 @@ from __future__ import annotations import abc -from typing import List, Optional, Tuple, TypeAlias, Union +from typing import List, Optional, Tuple, Union +from typing_extensions import TypeAlias + import shapely.geometry as geom import trimesh diff --git a/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py b/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py index 97b11d73..a7e0dc36 100644 --- a/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py +++ b/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py @@ -1,6 +1,7 @@ from __future__ import annotations import ast +import trimesh from functools import cached_property from typing import List, Optional, Union @@ -193,9 +194,23 @@ def centerline(self) -> Polyline3D: @property def outline_3d(self) -> Polyline3D: - """Inherited, see superclass.""" - outline_array = np.vstack((self.left_boundary.array, self.right_boundary.array[::-1])) - outline_array = np.vstack((outline_array, outline_array[0])) + left_array = self.left_boundary.array if getattr(self, "left_boundary", None) is not None else np.zeros((0, 3)) + right_array = self.right_boundary.array[::-1] if getattr(self, "right_boundary", None) is not None else np.zeros((0, 3)) + + outline_array = np.vstack((left_array, right_array)) if left_array.size + right_array.size > 0 else np.zeros((0, 3)) + + if outline_array.shape[0] == 0: + # fallback: use shapely polygon generate Polyline3D + poly = getattr(self, "shapely_polygon", None) + if poly is not None: + outline_array = np.array(poly.exterior.coords) + else: + return Polyline3D(np.zeros((0, 3))) + + # close + if outline_array.shape[0] > 0: + outline_array = np.vstack((outline_array, outline_array[0])) + return Polyline3D.from_linestring(geom.LineString(outline_array)) @property @@ -264,8 +279,20 @@ def right_boundary(self) -> Polyline3D: @property def outline_3d(self) -> Polyline3D: - """Inherited, see superclass.""" - outline_array = np.vstack((self.left_boundary.array, self.right_boundary.array[::-1])) + # get left_array and right_array + left_array = self.left_boundary.array if getattr(self, "left_boundary", None) is not None else np.zeros((0, 3)) + right_array = self.right_boundary.array[::-1] if getattr(self, "right_boundary", None) is not None else np.zeros((0, 3)) + + if left_array.size + right_array.size == 0: + # fallback: use geometry polygon generate + poly = getattr(self, "shapely_polygon", None) + if poly is not None: + outline_array = np.array(poly.exterior.coords) + else: + return Polyline3D(np.zeros((0, 3))) + else: + outline_array = np.vstack((left_array, right_array)) + return Polyline3D.from_linestring(geom.LineString(outline_array)) @property diff --git a/src/py123d/datatypes/maps/gpkg/gpkg_utils.py b/src/py123d/datatypes/maps/gpkg/gpkg_utils.py index 54dd93e6..ba587077 100644 --- a/src/py123d/datatypes/maps/gpkg/gpkg_utils.py +++ b/src/py123d/datatypes/maps/gpkg/gpkg_utils.py @@ -21,16 +21,23 @@ def load_gdf_with_geometry_columns(gdf: gpd.GeoDataFrame, geometry_column_names: def get_all_rows_with_value( - elements: gpd.geodataframe.GeoDataFrame, column_label: str, desired_value: str -) -> gpd.geodataframe.GeoDataFrame: + elements: gpd.GeoDataFrame, column_label: str, desired_value +) -> gpd.GeoDataFrame: """ - Extract all matching elements. Note, if no matching desired_key is found and empty list is returned. - :param elements: data frame from MapsDb. - :param column_label: key to extract from a column. - :param desired_value: key which is compared with the values of column_label entry. - :return: a subset of the original GeoDataFrame containing the matching key. + Extract all matching elements by value. + Automatically handles both integer IDs and UUID strings. """ - return elements.iloc[np.where(elements[column_label].to_numpy().astype(int) == int(desired_value))] + # If the column is of integer type, attempt to convert the desired_value to an integer before comparison. + col_dtype = elements[column_label].dtype + if np.issubdtype(col_dtype, np.integer): + try: + desired_value_int = int(desired_value) + return elements[elements[column_label] == desired_value_int] + except ValueError: + raise ValueError(f"Expected an integer value for column '{column_label}', got '{desired_value}'") + else: + # Otherwise, directly compare it as a string. + return elements[elements[column_label].astype(str) == str(desired_value)] def get_row_with_value(elements: gpd.geodataframe.GeoDataFrame, column_label: str, desired_value: str) -> gpd.GeoSeries: diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene.py b/src/py123d/datatypes/scene/arrow/arrow_scene.py index 236862d5..7d89b786 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene.py @@ -36,10 +36,17 @@ def __init__( self._arrow_file_path: Path = Path(arrow_file_path) self._log_metadata: LogMetadata = get_log_metadata_from_arrow(arrow_file_path) + with pa.memory_map(str(self._arrow_file_path), "r") as source: + reader = pa.ipc.open_file(source) + table = reader.read_all() + num_rows = table.num_rows + initial_uuid = table['uuid'][0].as_py() + if scene_extraction_metadata is None: scene_extraction_metadata = SceneExtractionMetadata( + initial_uuid=initial_uuid, initial_idx=0, - duration_s=self._log_metadata.timestep_seconds * len(self._arrow_file_path), + duration_s=self._log_metadata.timestep_seconds * num_rows, history_s=0.0, iteration_duration_s=self._log_metadata.timestep_seconds, ) diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 6be9d433..031c726a 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -38,6 +38,7 @@ "av2-sensor": DATASET_PATHS.av2_sensor_data_root, "wopd": DATASET_PATHS.wopd_data_root, "pandaset": DATASET_PATHS.pandaset_data_root, + "nuscenes": DATASET_PATHS.nuscenes_data_root, } diff --git a/src/py123d/datatypes/sensors/lidar/lidar_index.py b/src/py123d/datatypes/sensors/lidar/lidar_index.py index 7684b685..c101ba8e 100644 --- a/src/py123d/datatypes/sensors/lidar/lidar_index.py +++ b/src/py123d/datatypes/sensors/lidar/lidar_index.py @@ -83,3 +83,12 @@ class PandasetLidarIndex(LiDARIndex): Y = 1 Z = 2 INTENSITY = 3 + + +@register_lidar_index +class NuscenesLidarIndex(LiDARIndex): + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 + RING = 4 diff --git a/src/py123d/datatypes/vehicle_state/vehicle_parameters.py b/src/py123d/datatypes/vehicle_state/vehicle_parameters.py index 92ce83b0..19b050c7 100644 --- a/src/py123d/datatypes/vehicle_state/vehicle_parameters.py +++ b/src/py123d/datatypes/vehicle_state/vehicle_parameters.py @@ -52,6 +52,16 @@ def get_nuplan_chrysler_pacifica_parameters() -> VehicleParameters: rear_axle_to_center_longitudinal=1.461, ) +def get_nuscenes_renauly_zoe_parameters() -> VehicleParameters: + return VehicleParameters( + vehicle_name="nuscenes_renauly_zoe", + width=1.730, + length=4.084, + height=1.562, + wheel_base=2.588, + rear_axle_to_center_vertical=1.562 / 2, # NOTE: missing in nuscenes, TODO: find more accurate value + rear_axle_to_center_longitudinal=1.385, + ) def get_carla_lincoln_mkz_2020_parameters() -> VehicleParameters: # NOTE: values are extracted from CARLA diff --git a/src/py123d/script/config/common/default_dataset_paths.yaml b/src/py123d/script/config/common/default_dataset_paths.yaml index ded971a6..c6a57f75 100644 --- a/src/py123d/script/config/common/default_dataset_paths.yaml +++ b/src/py123d/script/config/common/default_dataset_paths.yaml @@ -18,6 +18,8 @@ dataset_paths: # WOPD defaults wopd_data_root: ${oc.env:WOPD_DATA_ROOT,null} - # Pandaset defaults pandaset_data_root: ${oc.env:PANDASET_DATA_ROOT,null} + + # nuScenes defaults + nuscenes_data_root: ${oc.env:NUSCENES_DATA_ROOT,null} diff --git a/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml new file mode 100644 index 00000000..3e3a8b92 --- /dev/null +++ b/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml @@ -0,0 +1,35 @@ +nuscenes_dataset: + _target_: py123d.conversion.datasets.nuscenes.nuscenes_data_converter.NuScenesDataConverter + _convert_: 'all' + + splits: ["nuscenes_train", "nuscenes_val", "nuscenes_test"] + nuscenes_data_root: ${dataset_paths.nuscenes_data_root} + nuscenes_map_root: ${dataset_paths.nuscenes_data_root} + nuscenes_lanelet2_root: ${dataset_paths.nuscenes_data_root}/lanelet2 + use_lanelet2: False + + dataset_converter_config: + _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig + _convert_: 'all' + + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + # Map + include_map: true + + # Ego + include_ego: true + + # Box Detections + include_box_detections: true + + # Traffic Lights + include_traffic_lights: false + + #cameras + include_cameras: true + camera_store_option: "path" + + #lidar + include_lidars: true + lidar_store_option: "path" \ No newline at end of file From 373a32e144c2dc0fd6e09dceda00acfb923f24fa Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 30 Oct 2025 09:55:08 +0100 Subject: [PATCH 127/145] Push unfinished changes to separate branch for later merge. (#61) --- README.md | 8 +- scripts/download/download_kitti_360.sh | 86 +++++++++++++++ .../datasets/av2/utils/av2_constants.py | 37 +------ .../datasets/nuplan/utils/nuplan_constants.py | 29 ++--- .../datasets/pandaset/pandaset_constants.py | 33 +----- .../datasets/wopd/utils/wopd_constants.py | 14 +-- .../registry/box_detection_type_registry.py | 101 ++++++++++++++++++ .../detections/box_detection_types.py | 15 ++- .../datatypes/detections/box_detections.py | 4 + 9 files changed, 226 insertions(+), 101 deletions(-) create mode 100644 scripts/download/download_kitti_360.sh diff --git a/README.md b/README.md index 25913fa2..0e7e22c4 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@

      - - - YOUR-ALT-TEXT -

      123D: One Library for 2D and 3D Driving Dataset

      + + + Logo
      +

      123D: One Library for 2D and 3D Driving Dataset

      diff --git a/scripts/download/download_kitti_360.sh b/scripts/download/download_kitti_360.sh new file mode 100644 index 00000000..1cb3e540 --- /dev/null +++ b/scripts/download/download_kitti_360.sh @@ -0,0 +1,86 @@ +# 2D data & labels +# ---------------------------------------------------------------------------------------------------------------------- + +# Fisheye Images (355G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/a1d81d9f7fc7195c937f9ad12e2a2c66441ecb4e/download_2d_fisheye.zip + +# Fisheye Calibration Images (11G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/data_2d_raw/data_fisheye_calibration.zip + + +# Perspective Images for Train & Val (128G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/a1d81d9f7fc7195c937f9ad12e2a2c66441ecb4e/download_2d_perspective.zip + +# Test Semantic (1.5G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/data_2d_raw/data_2d_test.zip + +# Test NVS 50% Drop (0.3G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/71f967e900f4e7c2e036a542f150effa31909b53/data_2d_nvs_drop50.zip + +# est NVS 90% Drop (0.2G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/71f967e900f4e7c2e036a542f150effa31909b53/data_2d_nvs_drop90.zip + +# Test SLAM (14G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/data_2d_raw/data_2d_test_slam.zip + + +# Semantics of Left Perspective Camera (1.8G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/ed180d24c0a144f2f1ac71c2c655a3e986517ed8/data_2d_semantics.zip + +# Semantics of Right Perspective Camera (1.8G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/ed180d24c0a144f2f1ac71c2c655a3e986517ed8/data_2d_semantics_image_01.zip + + +# Confidence of Left Perspective Camera (44G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/ed180d24c0a144f2f1ac71c2c655a3e986517ed8/data_2d_confidence.zip + +# Confidence of Right Perspective Camera (44G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/ed180d24c0a144f2f1ac71c2c655a3e986517ed8/data_2d_confidence_image_01.zip + + + +# 3D data & labels +# ---------------------------------------------------------------------------------------------------------------------- + +# Raw Velodyne Scans (119G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/a1d81d9f7fc7195c937f9ad12e2a2c66441ecb4e/download_3d_velodyne.zip + +# Test SLAM (12G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/data_3d_raw/data_3d_test_slam.zip + +# Test Completion (35M) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/6489aabd632d115c4280b978b2dcf72cb0142ad9/data_3d_ssc_test.zip + + +# Raw SICK Scans (0.4G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/a1d81d9f7fc7195c937f9ad12e2a2c66441ecb4e/download_3d_sick.zip + + +# Accumulated Point Clouds for Train & Val (12G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/6489aabd632d115c4280b978b2dcf72cb0142ad9/data_3d_semantics.zip + +# Test Semantic (1.2G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/6489aabd632d115c4280b978b2dcf72cb0142ad9/data_3d_semantics_test.zip + + +# 3D Bounding Boxes (30M) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/ffa164387078f48a20f0188aa31b0384bb19ce60/data_3d_bboxes.zip + + + +# Calibrations & Poses +# ---------------------------------------------------------------------------------------------------------------------- + +# Calibrations (3K) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/384509ed5413ccc81328cf8c55cc6af078b8c444/calibration.zip + + +# Vechicle Poses (8.9M) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/89a6bae3c8a6f789e12de4807fc1e8fdcf182cf4/data_poses.zip + + +# OXTS Sync Measurements (37.3M) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/89a6bae3c8a6f789e12de4807fc1e8fdcf182cf4/data_poses_oxts.zip + +# OXTS Raw Measurements (0.4G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/89a6bae3c8a6f789e12de4807fc1e8fdcf182cf4/data_poses_oxts_extract.zip diff --git a/src/py123d/conversion/datasets/av2/utils/av2_constants.py b/src/py123d/conversion/datasets/av2/utils/av2_constants.py index 7f81f48c..31c0f86d 100644 --- a/src/py123d/conversion/datasets/av2/utils/av2_constants.py +++ b/src/py123d/conversion/datasets/av2/utils/av2_constants.py @@ -1,6 +1,6 @@ from typing import Dict, Final, Set -from py123d.common.utils.enums import SerialIntEnum +from py123d.conversion.registry.box_detection_type_registry import AV2SensorBoxDetectionType from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.maps.map_datatypes import RoadLineType from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType @@ -8,41 +8,6 @@ AV2_SENSOR_SPLITS: Set[str] = {"av2-sensor_train", "av2-sensor_val", "av2-sensor_test"} -class AV2SensorBoxDetectionType(SerialIntEnum): - """Sensor dataset annotation categories.""" - - ANIMAL = 1 - ARTICULATED_BUS = 2 - BICYCLE = 3 - BICYCLIST = 4 - BOLLARD = 5 - BOX_TRUCK = 6 - BUS = 7 - CONSTRUCTION_BARREL = 8 - CONSTRUCTION_CONE = 9 - DOG = 10 - LARGE_VEHICLE = 11 - MESSAGE_BOARD_TRAILER = 12 - MOBILE_PEDESTRIAN_CROSSING_SIGN = 13 - MOTORCYCLE = 14 - MOTORCYCLIST = 15 - OFFICIAL_SIGNALER = 16 - PEDESTRIAN = 17 - RAILED_VEHICLE = 18 - REGULAR_VEHICLE = 19 - SCHOOL_BUS = 20 - SIGN = 21 - STOP_SIGN = 22 - STROLLER = 23 - TRAFFIC_LIGHT_TRAILER = 24 - TRUCK = 25 - TRUCK_CAB = 26 - VEHICULAR_TRAILER = 27 - WHEELCHAIR = 28 - WHEELED_DEVICE = 29 - WHEELED_RIDER = 30 - - # Mapping from AV2SensorBoxDetectionType to general DetectionType # TODO: Change the detection types. Multiple mistakes, e.g. animals/dogs are not generic objects. AV2_TO_DETECTION_TYPE = { diff --git a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py index 4b074d53..d7eaba2d 100644 --- a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py +++ b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py @@ -1,24 +1,11 @@ -from enum import IntEnum from typing import Dict, Final, List, Set -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.datatypes.detections.box_detection_types import AbstractBoxDetectionType from py123d.datatypes.detections.traffic_light_detections import TrafficLightStatus from py123d.datatypes.maps.map_datatypes import RoadLineType from py123d.datatypes.sensors.lidar.lidar import LiDARType from py123d.datatypes.time.time_point import TimePoint - -class NuPlanBoxDetectionType(IntEnum): - - VEHICLE = 0 - BICYCLE = 1 - PEDESTRIAN = 2 - TRAFFIC_CONE = 3 - BARRIER = 4 - CZONE_SIGN = 5 - GENERIC_OBJECT = 6 - - NUPLAN_DEFAULT_DT: Final[float] = 0.05 NUPLAN_TRAFFIC_STATUS_DICT: Final[Dict[str, TrafficLightStatus]] = { @@ -29,13 +16,13 @@ class NuPlanBoxDetectionType(IntEnum): NUPLAN_DETECTION_NAME_DICT = { - "vehicle": BoxDetectionType.VEHICLE, - "bicycle": BoxDetectionType.BICYCLE, - "pedestrian": BoxDetectionType.PEDESTRIAN, - "traffic_cone": BoxDetectionType.TRAFFIC_CONE, - "barrier": BoxDetectionType.BARRIER, - "czone_sign": BoxDetectionType.CZONE_SIGN, - "generic_object": BoxDetectionType.GENERIC_OBJECT, + "vehicle": AbstractBoxDetectionType.VEHICLE, + "bicycle": AbstractBoxDetectionType.BICYCLE, + "pedestrian": AbstractBoxDetectionType.PEDESTRIAN, + "traffic_cone": AbstractBoxDetectionType.TRAFFIC_CONE, + "barrier": AbstractBoxDetectionType.BARRIER, + "czone_sign": AbstractBoxDetectionType.CZONE_SIGN, + "generic_object": AbstractBoxDetectionType.GENERIC_OBJECT, } # https://github.com/motional/nuplan-devkit/blob/e9241677997dd86bfc0bcd44817ab04fe631405b/nuplan/database/nuplan_db_orm/utils.py#L1129-L1135 diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_constants.py b/src/py123d/conversion/datasets/pandaset/pandaset_constants.py index 93ef4bc8..303f5014 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_constants.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_constants.py @@ -1,6 +1,6 @@ from typing import Dict, List -from py123d.common.utils.enums import SerialIntEnum +from py123d.conversion.registry.box_detection_type_registry import PandasetBoxDetectionType from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType, PinholeDistortion, PinholeIntrinsics from py123d.datatypes.sensors.lidar.lidar import LiDARType @@ -20,37 +20,6 @@ PANDASET_LIDAR_MAPPING: Dict[str, LiDARType] = {"main_pandar64": LiDARType.LIDAR_TOP, "front_gt": LiDARType.LIDAR_FRONT} -class PandasetBoxDetectionType(SerialIntEnum): - - ANIMALS_BIRD = 0 - ANIMALS_OTHER = 1 - BICYCLE = 2 - BUS = 3 - CAR = 4 - CONES = 5 - CONSTRUCTION_SIGNS = 6 - EMERGENCY_VEHICLE = 7 - MEDIUM_SIZED_TRUCK = 8 - MOTORCYCLE = 9 - MOTORIZED_SCOOTER = 10 - OTHER_VEHICLE_CONSTRUCTION_VEHICLE = 11 - OTHER_VEHICLE_PEDICAB = 12 - OTHER_VEHICLE_UNCOMMON = 13 - PEDESTRIAN = 14 - PEDESTRIAN_WITH_OBJECT = 15 - PERSONAL_MOBILITY_DEVICE = 16 - PICKUP_TRUCK = 17 - PYLONS = 18 - ROAD_BARRIERS = 19 - ROLLING_CONTAINERS = 20 - SEMI_TRUCK = 21 - SIGNS = 22 - TEMPORARY_CONSTRUCTION_BARRIERS = 23 - TOWED_OBJECT = 24 - TRAIN = 25 - TRAM_SUBWAY = 26 - - PANDASET_BOX_DETECTION_FROM_STR: Dict[str, PandasetBoxDetectionType] = { "Animals - Bird": PandasetBoxDetectionType.ANIMALS_BIRD, "Animals - Other": PandasetBoxDetectionType.ANIMALS_OTHER, diff --git a/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py b/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py index 963a056d..b6b83d56 100644 --- a/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py +++ b/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py @@ -1,6 +1,6 @@ from typing import Dict, List -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.conversion.registry.box_detection_type_registry import WOPDBoxDetectionType from py123d.datatypes.maps.map_datatypes import LaneType, RoadEdgeType, RoadLineType from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDARType @@ -12,12 +12,12 @@ ] # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63 -WOPD_DETECTION_NAME_DICT: Dict[int, BoxDetectionType] = { - 0: BoxDetectionType.GENERIC_OBJECT, # TYPE_UNKNOWN - 1: BoxDetectionType.VEHICLE, # TYPE_VEHICLE - 2: BoxDetectionType.PEDESTRIAN, # TYPE_PEDESTRIAN - 3: BoxDetectionType.SIGN, # TYPE_SIGN - 4: BoxDetectionType.BICYCLE, # TYPE_CYCLIST +WOPD_DETECTION_NAME_DICT: Dict[int, WOPDBoxDetectionType] = { + 0: WOPDBoxDetectionType.TYPE_UNKNOWN, + 1: WOPDBoxDetectionType.TYPE_VEHICLE, + 2: WOPDBoxDetectionType.TYPE_PEDESTRIAN, + 3: WOPDBoxDetectionType.TYPE_SIGN, + 4: WOPDBoxDetectionType.TYPE_CYCLIST, } # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L50 diff --git a/src/py123d/conversion/registry/box_detection_type_registry.py b/src/py123d/conversion/registry/box_detection_type_registry.py index f3110808..e1f38091 100644 --- a/src/py123d/conversion/registry/box_detection_type_registry.py +++ b/src/py123d/conversion/registry/box_detection_type_registry.py @@ -1,6 +1,107 @@ +from py123d.datatypes.detections.box_detection_types import AbstractBoxDetectionType + BOX_DETECTION_TYPE_REGISTRY = {} def register_box_detection_type(enum_class): BOX_DETECTION_TYPE_REGISTRY[enum_class.__name__] = enum_class return enum_class + + +@register_box_detection_type +class AV2SensorBoxDetectionType(AbstractBoxDetectionType): + """Sensor dataset annotation categories.""" + + ANIMAL = 1 + ARTICULATED_BUS = 2 + BICYCLE = 3 + BICYCLIST = 4 + BOLLARD = 5 + BOX_TRUCK = 6 + BUS = 7 + CONSTRUCTION_BARREL = 8 + CONSTRUCTION_CONE = 9 + DOG = 10 + LARGE_VEHICLE = 11 + MESSAGE_BOARD_TRAILER = 12 + MOBILE_PEDESTRIAN_CROSSING_SIGN = 13 + MOTORCYCLE = 14 + MOTORCYCLIST = 15 + OFFICIAL_SIGNALER = 16 + PEDESTRIAN = 17 + RAILED_VEHICLE = 18 + REGULAR_VEHICLE = 19 + SCHOOL_BUS = 20 + SIGN = 21 + STOP_SIGN = 22 + STROLLER = 23 + TRAFFIC_LIGHT_TRAILER = 24 + TRUCK = 25 + TRUCK_CAB = 26 + VEHICULAR_TRAILER = 27 + WHEELCHAIR = 28 + WHEELED_DEVICE = 29 + WHEELED_RIDER = 30 + + +@register_box_detection_type +class KITTI360BoxDetectionType(AbstractBoxDetectionType): + pass + + +@register_box_detection_type +class NuPlanBoxDetectionType(AbstractBoxDetectionType): + + VEHICLE = 0 + BICYCLE = 1 + PEDESTRIAN = 2 + TRAFFIC_CONE = 3 + BARRIER = 4 + CZONE_SIGN = 5 + GENERIC_OBJECT = 6 + + +@register_box_detection_type +class NuScenesBoxDetectionType(AbstractBoxDetectionType): + pass + + +class PandasetBoxDetectionType(AbstractBoxDetectionType): + + ANIMALS_BIRD = 0 + ANIMALS_OTHER = 1 + BICYCLE = 2 + BUS = 3 + CAR = 4 + CONES = 5 + CONSTRUCTION_SIGNS = 6 + EMERGENCY_VEHICLE = 7 + MEDIUM_SIZED_TRUCK = 8 + MOTORCYCLE = 9 + MOTORIZED_SCOOTER = 10 + OTHER_VEHICLE_CONSTRUCTION_VEHICLE = 11 + OTHER_VEHICLE_PEDICAB = 12 + OTHER_VEHICLE_UNCOMMON = 13 + PEDESTRIAN = 14 + PEDESTRIAN_WITH_OBJECT = 15 + PERSONAL_MOBILITY_DEVICE = 16 + PICKUP_TRUCK = 17 + PYLONS = 18 + ROAD_BARRIERS = 19 + ROLLING_CONTAINERS = 20 + SEMI_TRUCK = 21 + SIGNS = 22 + TEMPORARY_CONSTRUCTION_BARRIERS = 23 + TOWED_OBJECT = 24 + TRAIN = 25 + TRAM_SUBWAY = 26 + + +class WOPDBoxDetectionType(AbstractBoxDetectionType): + # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63-L69 + + TYPE_UNKNOWN = 0 + TYPE_VEHICLE = 1 + TYPE_PEDESTRIAN = 2 + TYPE_SIGN = 3 + TYPE_CYCLIST = 4 diff --git a/src/py123d/datatypes/detections/box_detection_types.py b/src/py123d/datatypes/detections/box_detection_types.py index 9ac74034..0ffd47ce 100644 --- a/src/py123d/datatypes/detections/box_detection_types.py +++ b/src/py123d/datatypes/detections/box_detection_types.py @@ -1,9 +1,18 @@ from __future__ import annotations +from importlib import abc + from py123d.common.utils.enums import SerialIntEnum -class BoxDetectionType(SerialIntEnum): +class AbstractBoxDetectionType(SerialIntEnum): + + @abc.abstractmethod + def to_default_type(self): + raise NotImplementedError("Subclasses must implement this method.") + + +class BoxDetectionType(AbstractBoxDetectionType): """ Enum for agents in py123d. """ @@ -26,6 +35,10 @@ class BoxDetectionType(SerialIntEnum): EGO = 7 SIGN = 8 # TODO: Remove or extent + def to_default_type(self): + """Inherited, see superclass.""" + return self + DYNAMIC_DETECTION_TYPES: set[BoxDetectionType] = { BoxDetectionType.VEHICLE, diff --git a/src/py123d/datatypes/detections/box_detections.py b/src/py123d/datatypes/detections/box_detections.py index 2cff36a8..fcb8e964 100644 --- a/src/py123d/datatypes/detections/box_detections.py +++ b/src/py123d/datatypes/detections/box_detections.py @@ -18,6 +18,10 @@ class BoxDetectionMetadata: num_lidar_points: Optional[int] = None # Number of LiDAR points within the bounding box timepoint: Optional[TimePoint] = None # TimePoint when the detection was made, if available + @property + def default_box_detection_type(self) -> BoxDetectionType: + return self.box_detection_type.to_default_type() + @dataclass class BoxDetectionSE2: From 8e1fbf6b7aa6d141ef9fe401732b9783992d4b23 Mon Sep 17 00:00:00 2001 From: Changhui Jing <1903025786@qq.com> Date: Thu, 30 Oct 2025 15:09:27 +0000 Subject: [PATCH 128/145] add nuscenes sensor io. --- pyproject.toml | 12 ++ .../nuscenes/nuscenes_data_converter.py | 114 ++++++++++-------- .../datasets/nuscenes/nuscenes_sensor_io.py | 13 ++ .../registry/lidar_index_registry.py | 8 ++ .../sensor_io/lidar/file_lidar_io.py | 5 + .../config/common/default_dataset_paths.yaml | 2 + .../conversion/datasets/nuscenes_dataset.yaml | 1 - .../config/conversion/default_conversion.yaml | 2 +- 8 files changed, 102 insertions(+), 55 deletions(-) create mode 100644 src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py diff --git a/pyproject.toml b/pyproject.toml index 6df6dff0..5f305b9e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,6 +95,18 @@ waymo = [ "waymo-open-dataset-tf-2-12-0==1.6.6", ] +nuscenes = [ + "nuscenes-devkit==1.2.0", + "pycocotools==2.0.10", + "laspy==2.6.1", + "embreex==2.17.7.post6", + "lanelet2==1.2.2", + "protobuf==4.25.3", + "pycollada==0.9.2", + "vhacdx==0.0.8.post2", + "yourdfpy==0.0.58", +] + [tool.setuptools.packages.find] where = ["src"] diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py index eb743fb0..62ddc0d4 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py @@ -13,17 +13,17 @@ from nuscenes.utils.splits import create_splits_scenes from pyquaternion import Quaternion -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.script.builders.worker_pool_builder import WorkerPool -from py123d.datatypes.detections.detection import ( +from py123d.datatypes.detections.box_detections import ( BoxDetectionSE3, BoxDetectionWrapper, - BoxDetectionMetadata, - TrafficLightDetection, + BoxDetectionMetadata,) +from py123d.datatypes.detections.traffic_light_detections import (TrafficLightDetection, TrafficLightDetectionWrapper, ) -from py123d.datatypes.detections.detection_types import DetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import ( @@ -33,7 +33,7 @@ PinholeIntrinsics, ) from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from py123d.conversion.utils.sensor_utils.lidar_index_registry import NuscenesLidarIndex +from py123d.datatypes.sensors.lidar.lidar_index import NuscenesLidarIndex from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index from py123d.datatypes.vehicle_state.vehicle_parameters import get_nuscenes_renauly_zoe_parameters @@ -50,37 +50,37 @@ SORT_BY_TIMESTAMP: Final[bool] = True NUSCENES_DETECTION_NAME_DICT = { # Vehicles (4+ wheels) - "vehicle.car": DetectionType.VEHICLE, - "vehicle.truck": DetectionType.VEHICLE, - "vehicle.bus.bendy": DetectionType.VEHICLE, - "vehicle.bus.rigid": DetectionType.VEHICLE, - "vehicle.construction": DetectionType.VEHICLE, - "vehicle.emergency.ambulance": DetectionType.VEHICLE, - "vehicle.emergency.police": DetectionType.VEHICLE, - "vehicle.trailer": DetectionType.VEHICLE, + "vehicle.car": BoxDetectionType.VEHICLE, + "vehicle.truck": BoxDetectionType.VEHICLE, + "vehicle.bus.bendy": BoxDetectionType.VEHICLE, + "vehicle.bus.rigid": BoxDetectionType.VEHICLE, + "vehicle.construction": BoxDetectionType.VEHICLE, + "vehicle.emergency.ambulance": BoxDetectionType.VEHICLE, + "vehicle.emergency.police": BoxDetectionType.VEHICLE, + "vehicle.trailer": BoxDetectionType.VEHICLE, # Bicycles / Motorcycles - "vehicle.bicycle": DetectionType.BICYCLE, - "vehicle.motorcycle": DetectionType.BICYCLE, + "vehicle.bicycle": BoxDetectionType.BICYCLE, + "vehicle.motorcycle": BoxDetectionType.BICYCLE, # Pedestrians (all subtypes) - "human.pedestrian.adult": DetectionType.PEDESTRIAN, - "human.pedestrian.child": DetectionType.PEDESTRIAN, - "human.pedestrian.construction_worker": DetectionType.PEDESTRIAN, - "human.pedestrian.personal_mobility": DetectionType.PEDESTRIAN, - "human.pedestrian.police_officer": DetectionType.PEDESTRIAN, - "human.pedestrian.stroller": DetectionType.PEDESTRIAN, - "human.pedestrian.wheelchair": DetectionType.PEDESTRIAN, + "human.pedestrian.adult": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.child": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.construction_worker": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.personal_mobility": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.police_officer": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.stroller": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.wheelchair": BoxDetectionType.PEDESTRIAN, # Traffic cone / barrier - "movable_object.trafficcone": DetectionType.TRAFFIC_CONE, - "movable_object.barrier": DetectionType.BARRIER, + "movable_object.trafficcone": BoxDetectionType.TRAFFIC_CONE, + "movable_object.barrier": BoxDetectionType.BARRIER, # Generic objects - "movable_object.pushable_pullable": DetectionType.GENERIC_OBJECT, - "movable_object.debris": DetectionType.GENERIC_OBJECT, - "static_object.bicycle_rack": DetectionType.GENERIC_OBJECT, - "animal": DetectionType.GENERIC_OBJECT, + "movable_object.pushable_pullable": BoxDetectionType.GENERIC_OBJECT, + "movable_object.debris": BoxDetectionType.GENERIC_OBJECT, + "static_object.bicycle_rack": BoxDetectionType.GENERIC_OBJECT, + "animal": BoxDetectionType.GENERIC_OBJECT, } NUSCENES_CAMERA_TYPES = { @@ -99,7 +99,6 @@ def __init__( self, splits: List[str], nuscenes_data_root: Union[Path, str], - nuscenes_map_root: Union[Path, str], nuscenes_lanelet2_root: Union[Path, str], use_lanelet2: bool, dataset_converter_config: DatasetConverterConfig, @@ -108,7 +107,6 @@ def __init__( super().__init__(dataset_converter_config) self._splits: List[str] = splits self._nuscenes_data_root: Path = Path(nuscenes_data_root) - self._nuscenes_map_root: Path = Path(nuscenes_map_root) self._nuscenes_lanelet2_root: Path = Path(nuscenes_lanelet2_root) self._use_lanelet2 = use_lanelet2 self._version = version @@ -165,7 +163,7 @@ def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: if map_needs_writing: write_nuscenes_map( - nuscenes_maps_root=self._nuscenes_map_root, + nuscenes_maps_root=self._nuscenes_data_root, location=map_name, map_writer=map_writer, use_lanelet2=self._use_lanelet2, @@ -423,7 +421,7 @@ def _extract_nuscenes_box_detections( velocity_3d = Vector3D(x=velocity[0], y=velocity[1], z=velocity[2] if len(velocity) > 2 else 0.0) metadata = BoxDetectionMetadata( - detection_type=det_type, + box_detection_type=det_type, track_token=ann["instance_token"], timepoint=TimePoint.from_us(sample["timestamp"]), confidence=1.0, # nuScenes annotations are ground truth @@ -446,9 +444,9 @@ def _extract_nuscenes_traffic_lights() -> TrafficLightDetectionWrapper: def _extract_nuscenes_cameras( - nusc: NuScenes, - sample: Dict[str, Any], - dataset_converter_config: DatasetConverterConfig, + nusc: NuScenes, + sample: Dict[str, Any], + dataset_converter_config: DatasetConverterConfig, ) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} @@ -474,7 +472,8 @@ def _extract_nuscenes_cameras( if cam_path.exists() and cam_path.is_file(): if dataset_converter_config.camera_store_option == "path": - camera_data = str(cam_path) + # TODO: should be relative path + camera_data = cam_data["filename"] elif dataset_converter_config.camera_store_option == "binary": with open(cam_path, "rb") as f: camera_data = f.read() @@ -490,8 +489,8 @@ def _extract_nuscenes_lidars( nusc: NuScenes, sample: Dict[str, Any], dataset_converter_config: DatasetConverterConfig, -) -> Dict[LiDARType, Optional[str]]: - lidar_dict: Dict[LiDARType, Optional[str]] = {} +) -> List[LiDARData]: + lidars: List[LiDARData] = [] if dataset_converter_config.include_lidars: lidar_token = sample["data"]["LIDAR_TOP"] @@ -499,12 +498,21 @@ def _extract_nuscenes_lidars( lidar_path = NUSCENES_DATA_ROOT / lidar_data["filename"] if lidar_path.exists() and lidar_path.is_file(): - lidar_dict[LiDARType.LIDAR_MERGED] = str(lidar_path) + lidar = LiDARData( + lidar_type=LiDARType.LIDAR_TOP, + relative_path=str(lidar_path), + dataset_root=NUSCENES_DATA_ROOT, + iteration=lidar_data.get("iteration"), + ) + lidars.append(lidar) else: - lidar_dict[LiDARType.LIDAR_MERGED] = None - - return lidar_dict + lidars.append(LiDARData( + lidar_type=LiDARType.LIDAR_TOP, + relative_path=None, + dataset_root=NUSCENES_DATA_ROOT, + )) + return lidars def _extract_nuscenes_scenario_tag() -> List[str]: """nuScenes doesn't have scenario tags.""" @@ -518,9 +526,9 @@ def _extract_nuscenes_route_lane_group_ids() -> List[int]: # Updated arrow conversion function using the new extraction functions def convert_nuscenes_log_to_arrow( - args: List[Dict[str, Union[str, List[str]]]], - dataset_converter_config: DatasetConverterConfig, - version: str + args: List[Dict[str, Union[str, List[str]]]], + dataset_converter_config: DatasetConverterConfig, + version: str ) -> List[Any]: for log_info in args: scene_token: str = log_info["scene_token"] @@ -593,11 +601,11 @@ def convert_nuscenes_log_to_arrow( def _write_arrow_table_with_new_interface( - nusc: NuScenes, - scene: Dict[str, Any], - recording_schema: pa.schema, - log_file_path: Path, - dataset_converter_config: DatasetConverterConfig, + nusc: NuScenes, + scene: Dict[str, Any], + recording_schema: pa.schema, + log_file_path: Path, + dataset_converter_config: DatasetConverterConfig, ) -> None: can_bus = NuScenesCanBus(dataroot=str(NUSCENES_DATA_ROOT)) @@ -635,7 +643,7 @@ def _write_arrow_table_with_new_interface( "detections_state": detections_state_list, "detections_velocity": [det.velocity.array.tolist() for det in box_detections.box_detections], "detections_token": [det.metadata.track_token for det in box_detections.box_detections], - "detections_type": [det.metadata.detection_type.value for det in box_detections.box_detections], + "detections_type": [det.metadata.box_detection_type.value for det in box_detections.box_detections], "traffic_light_ids": [], "traffic_light_types": [], "scenario_tag": ["unknown"], @@ -668,4 +676,4 @@ def _write_arrow_table_with_new_interface( if SORT_BY_TIMESTAMP: recording_table = open_arrow_table(log_file_path) recording_table = recording_table.sort_by([("timestamp", "ascending")]) - write_arrow_table(recording_table, log_file_path) \ No newline at end of file + write_arrow_table(recording_table, log_file_path) diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py new file mode 100644 index 00000000..82f1ac72 --- /dev/null +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py @@ -0,0 +1,13 @@ +import numpy as np +from pathlib import Path +from typing import Dict +from py123d.datatypes.sensors.lidar.lidar import LiDARType + +def load_nuscenes_lidar_pcs_from_file(pcd_path: Path) -> Dict[LiDARType, np.ndarray]: + points = np.fromfile(pcd_path, dtype=np.float32).reshape(-1, 5) + + lidar_pcs_dict: Dict[LiDARType, np.ndarray] = { + LiDARType.LIDAR_TOP: points + } + + return lidar_pcs_dict diff --git a/src/py123d/conversion/registry/lidar_index_registry.py b/src/py123d/conversion/registry/lidar_index_registry.py index 50f5f8a3..c5213d2a 100644 --- a/src/py123d/conversion/registry/lidar_index_registry.py +++ b/src/py123d/conversion/registry/lidar_index_registry.py @@ -82,3 +82,11 @@ class PandasetLidarIndex(LiDARIndex): Y = 1 Z = 2 INTENSITY = 3 + +@register_lidar_index +class NuscenesLidarIndex(LiDARIndex): + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 + RING = 4 diff --git a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py index edc5c7d5..83d4da21 100644 --- a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py +++ b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py @@ -15,6 +15,7 @@ "av2-sensor": DATASET_PATHS.av2_sensor_data_root, "wopd": DATASET_PATHS.wopd_data_root, "pandaset": DATASET_PATHS.pandaset_data_root, + "nuscenes": DATASET_PATHS.nuscenes_sensor_root, } @@ -56,6 +57,10 @@ def load_lidar_pcs_from_file( from py123d.conversion.datasets.pandaset.pandaset_sensor_io import load_pandaset_lidars_pcs_from_file lidar_pcs_dict = load_pandaset_lidars_pcs_from_file(full_lidar_path, index) + + elif log_metadata.dataset == "nuscenes": + from py123d.conversion.datasets.nuscenes.nuscenes_sensor_io import load_nuscenes_lidar_pcs_from_file + lidar_pcs_dict = load_nuscenes_lidar_pcs_from_file(full_lidar_path) else: raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") diff --git a/src/py123d/script/config/common/default_dataset_paths.yaml b/src/py123d/script/config/common/default_dataset_paths.yaml index c6a57f75..64e0a8c0 100644 --- a/src/py123d/script/config/common/default_dataset_paths.yaml +++ b/src/py123d/script/config/common/default_dataset_paths.yaml @@ -23,3 +23,5 @@ dataset_paths: # nuScenes defaults nuscenes_data_root: ${oc.env:NUSCENES_DATA_ROOT,null} + nuscenes_map_root: ${dataset_paths.nuscenes_data_root} + nuscenes_sensor_root: ${dataset_paths.nuscenes_data_root}/samples/LIDAR_TOP \ No newline at end of file diff --git a/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml index 3e3a8b92..410b910e 100644 --- a/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml @@ -4,7 +4,6 @@ nuscenes_dataset: splits: ["nuscenes_train", "nuscenes_val", "nuscenes_test"] nuscenes_data_root: ${dataset_paths.nuscenes_data_root} - nuscenes_map_root: ${dataset_paths.nuscenes_data_root} nuscenes_lanelet2_root: ${dataset_paths.nuscenes_data_root}/lanelet2 use_lanelet2: False diff --git a/src/py123d/script/config/conversion/default_conversion.yaml b/src/py123d/script/config/conversion/default_conversion.yaml index aecfa9b7..daa55f12 100644 --- a/src/py123d/script/config/conversion/default_conversion.yaml +++ b/src/py123d/script/config/conversion/default_conversion.yaml @@ -22,5 +22,5 @@ defaults: terminate_on_exception: True -force_map_conversion: False +force_map_conversion: True force_log_conversion: True From c42b7388f3754bd9c9fa28729a0eb341e0dce852 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 30 Oct 2025 20:31:16 +0100 Subject: [PATCH 129/145] Add some minor changes and refactorings. --- notebooks/bev_matplotlib.ipynb | 14 +- pyproject.toml | 15 +- .../conversion/nuscenes_mini_conversion.sh | 3 + .../datasets/nuscenes/nuscenes_constants.py | 60 +++ ...ata_converter.py => nuscenes_converter.py} | 416 ++++-------------- .../nuscenes/nuscenes_map_conversion.py | 155 ++++--- .../datasets/nuscenes/nuscenes_sensor_io.py | 27 +- .../registry/lidar_index_registry.py | 15 +- .../sensor_io/lidar/file_lidar_io.py | 7 +- .../scene/arrow/utils/arrow_getters.py | 4 +- .../datatypes/sensors/lidar/lidar_index.py | 13 +- .../vehicle_state/vehicle_parameters.py | 9 +- .../config/common/default_dataset_paths.yaml | 2 +- .../conversion/datasets/nuscenes_dataset.yaml | 10 +- .../datasets/nuscenes_mini_dataset.yaml | 36 ++ test_viser.py | 13 +- 16 files changed, 351 insertions(+), 448 deletions(-) create mode 100644 scripts/conversion/nuscenes_mini_conversion.sh create mode 100644 src/py123d/conversion/datasets/nuscenes/nuscenes_constants.py rename src/py123d/conversion/datasets/nuscenes/{nuscenes_data_converter.py => nuscenes_converter.py} (50%) create mode 100644 src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index bddd36d6..bd2dd882 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -24,7 +24,7 @@ "\n", "# splits = [\"wopd_val\"]\n", "# splits = [\"carla_test\"]\n", - "splits = [\"nuplan-mini_test\"]\n", + "splits = [\"nuscenes-mini_val\", \"nuscenes-mini_train\"]\n", "# splits = [\"av2-sensor-mini_train\"]\n", "# splits = [\"pandaset_train\"]\n", "# log_names = None\n", @@ -38,7 +38,7 @@ " split_names=splits,\n", " log_names=log_names,\n", " scene_uuids=scene_uuids,\n", - " duration_s=6.0,\n", + " duration_s=None,\n", " history_s=0.0,\n", " timestamp_threshold_s=20,\n", " shuffle=True,\n", @@ -144,8 +144,8 @@ " route_lane_group_ids: Optional[List[int]] = None,\n", ") -> None:\n", " layers: List[MapLayer] = [\n", - " # MapLayer.LANE,\n", - " # MapLayer.LANE_GROUP,\n", + " MapLayer.LANE,\n", + " MapLayer.LANE_GROUP,\n", " # MapLayer.GENERIC_DRIVABLE,\n", " # MapLayer.CARPARK,\n", " # MapLayer.CROSSWALK,\n", @@ -214,11 +214,11 @@ " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", " if map_api is not None:\n", " add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", - " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", + " # add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", - " # add_box_detections_to_ax(ax, box_detections)\n", - " # add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n", + " add_box_detections_to_ax(ax, box_detections)\n", + " add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n", "\n", " zoom = 1.0\n", " ax.set_xlim(point_2d.x - radius * zoom, point_2d.x + radius * zoom)\n", diff --git a/pyproject.toml b/pyproject.toml index 5f305b9e..9284be71 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,13 +89,11 @@ nuplan = [ "guppy3", "retry", ] -waymo = [ - "protobuf==6.30.2", - "tensorflow==2.13.0", - "waymo-open-dataset-tf-2-12-0==1.6.6", -] - nuscenes = [ + "lanelet2", + "nuscenes-devkit==1.2.0", +] +nuscenes_expanded = [ "nuscenes-devkit==1.2.0", "pycocotools==2.0.10", "laspy==2.6.1", @@ -106,6 +104,11 @@ nuscenes = [ "vhacdx==0.0.8.post2", "yourdfpy==0.0.58", ] +waymo = [ + "protobuf==6.30.2", + "tensorflow==2.13.0", + "waymo-open-dataset-tf-2-12-0==1.6.6", +] [tool.setuptools.packages.find] where = ["src"] diff --git a/scripts/conversion/nuscenes_mini_conversion.sh b/scripts/conversion/nuscenes_mini_conversion.sh new file mode 100644 index 00000000..b6d35aa4 --- /dev/null +++ b/scripts/conversion/nuscenes_mini_conversion.sh @@ -0,0 +1,3 @@ +export NUSCENES_DATA_ROOT="/home/daniel/nuscenes_mini/" + +py123d-conversion datasets=["nuscenes_mini_dataset"] diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_constants.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_constants.py new file mode 100644 index 00000000..dd04d91a --- /dev/null +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_constants.py @@ -0,0 +1,60 @@ +import os +from pathlib import Path +from typing import Final, List + +from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType + +NUSCENES_MAPS: List[str] = ["boston-seaport", "singapore-hollandvillage", "singapore-onenorth", "singapore-queenstown"] + +NUSCENES_DATA_SPLITS: Final[List[str]] = [ + "nuscenes_train", + "nuscenes_val", + "nuscenes_test", + "nuscenes-mini_train", + "nuscenes-mini_val", +] + +TARGET_DT: Final[float] = 0.1 +NUSCENES_DT: Final[float] = 0.5 +SORT_BY_TIMESTAMP: Final[bool] = True +NUSCENES_DETECTION_NAME_DICT = { + # Vehicles (4+ wheels) + "vehicle.car": BoxDetectionType.VEHICLE, + "vehicle.truck": BoxDetectionType.VEHICLE, + "vehicle.bus.bendy": BoxDetectionType.VEHICLE, + "vehicle.bus.rigid": BoxDetectionType.VEHICLE, + "vehicle.construction": BoxDetectionType.VEHICLE, + "vehicle.emergency.ambulance": BoxDetectionType.VEHICLE, + "vehicle.emergency.police": BoxDetectionType.VEHICLE, + "vehicle.trailer": BoxDetectionType.VEHICLE, + # Bicycles / Motorcycles + "vehicle.bicycle": BoxDetectionType.BICYCLE, + "vehicle.motorcycle": BoxDetectionType.BICYCLE, + # Pedestrians (all subtypes) + "human.pedestrian.adult": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.child": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.construction_worker": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.personal_mobility": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.police_officer": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.stroller": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.wheelchair": BoxDetectionType.PEDESTRIAN, + # Traffic cone / barrier + "movable_object.trafficcone": BoxDetectionType.TRAFFIC_CONE, + "movable_object.barrier": BoxDetectionType.BARRIER, + # Generic objects + "movable_object.pushable_pullable": BoxDetectionType.GENERIC_OBJECT, + "movable_object.debris": BoxDetectionType.GENERIC_OBJECT, + "static_object.bicycle_rack": BoxDetectionType.GENERIC_OBJECT, + "animal": BoxDetectionType.GENERIC_OBJECT, +} + +NUSCENES_CAMERA_TYPES = { + PinholeCameraType.CAM_F0: "CAM_FRONT", + PinholeCameraType.CAM_B0: "CAM_BACK", + PinholeCameraType.CAM_L0: "CAM_FRONT_LEFT", + PinholeCameraType.CAM_L1: "CAM_BACK_LEFT", + PinholeCameraType.CAM_R0: "CAM_FRONT_RIGHT", + PinholeCameraType.CAM_R1: "CAM_BACK_RIGHT", +} +NUSCENES_DATA_ROOT = Path(os.environ["NUSCENES_DATA_ROOT"]) diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py similarity index 50% rename from src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py rename to src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py index 62ddc0d4..483a6bc4 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_data_converter.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py @@ -1,29 +1,25 @@ import gc -import json -import os -import numpy as np -import pyarrow as pa - -from dataclasses import asdict from pathlib import Path -from typing import Any, Dict, Final, List, Optional, Tuple, Union -from nuscenes import NuScenes -from nuscenes.can_bus.can_bus_api import NuScenesCanBus -from nuscenes.utils.data_classes import Box -from nuscenes.utils.splits import create_splits_scenes +from typing import Any, Dict, List, Tuple, Union + +import numpy as np from pyquaternion import Quaternion +from py123d.common.utils.dependencies import check_dependencies +from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.conversion.datasets.nuplan.nuplan_converter import TARGET_DT +from py123d.conversion.datasets.nuscenes.nuscenes_constants import ( + NUSCENES_CAMERA_TYPES, + NUSCENES_DATA_ROOT, + NUSCENES_DATA_SPLITS, + NUSCENES_DETECTION_NAME_DICT, + NUSCENES_DT, +) +from py123d.conversion.datasets.nuscenes.nuscenes_map_conversion import NUSCENES_MAPS, write_nuscenes_map from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.script.builders.worker_pool_builder import WorkerPool -from py123d.datatypes.detections.box_detections import ( - BoxDetectionSE3, - BoxDetectionWrapper, - BoxDetectionMetadata,) -from py123d.datatypes.detections.traffic_light_detections import (TrafficLightDetection, - TrafficLightDetectionWrapper, -) -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.pinhole_camera import ( @@ -33,119 +29,75 @@ PinholeIntrinsics, ) from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from py123d.datatypes.sensors.lidar.lidar_index import NuscenesLidarIndex - -from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index -from py123d.datatypes.vehicle_state.vehicle_parameters import get_nuscenes_renauly_zoe_parameters -from py123d.geometry import StateSE3, BoundingBoxSE3, BoundingBoxSE3Index -from py123d.geometry.vector import Vector3D, Vector3DIndex -from py123d.common.utils.arrow_helper import open_arrow_table, write_arrow_table -from py123d.conversion.datasets.nuscenes.nuscenes_map_conversion import write_nuscenes_map, NUSCENES_MAPS -from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter -from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.datatypes.sensors.lidar.lidar_index import NuScenesLidarIndex from py123d.datatypes.time.time_point import TimePoint +from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 +from py123d.datatypes.vehicle_state.vehicle_parameters import get_nuscenes_renault_zoe_parameters +from py123d.geometry import BoundingBoxSE3, StateSE3 +from py123d.geometry.vector import Vector3D + +check_dependencies(["nuscenes"], "nuscenes") +from nuscenes import NuScenes +from nuscenes.can_bus.can_bus_api import NuScenesCanBus +from nuscenes.utils.data_classes import Box +from nuscenes.utils.splits import create_splits_scenes -TARGET_DT: Final[float] = 0.1 -NUSCENES_DT: Final[float] = 0.5 -SORT_BY_TIMESTAMP: Final[bool] = True -NUSCENES_DETECTION_NAME_DICT = { - # Vehicles (4+ wheels) - "vehicle.car": BoxDetectionType.VEHICLE, - "vehicle.truck": BoxDetectionType.VEHICLE, - "vehicle.bus.bendy": BoxDetectionType.VEHICLE, - "vehicle.bus.rigid": BoxDetectionType.VEHICLE, - "vehicle.construction": BoxDetectionType.VEHICLE, - "vehicle.emergency.ambulance": BoxDetectionType.VEHICLE, - "vehicle.emergency.police": BoxDetectionType.VEHICLE, - "vehicle.trailer": BoxDetectionType.VEHICLE, - - # Bicycles / Motorcycles - "vehicle.bicycle": BoxDetectionType.BICYCLE, - "vehicle.motorcycle": BoxDetectionType.BICYCLE, - - # Pedestrians (all subtypes) - "human.pedestrian.adult": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.child": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.construction_worker": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.personal_mobility": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.police_officer": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.stroller": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.wheelchair": BoxDetectionType.PEDESTRIAN, - - # Traffic cone / barrier - "movable_object.trafficcone": BoxDetectionType.TRAFFIC_CONE, - "movable_object.barrier": BoxDetectionType.BARRIER, - - # Generic objects - "movable_object.pushable_pullable": BoxDetectionType.GENERIC_OBJECT, - "movable_object.debris": BoxDetectionType.GENERIC_OBJECT, - "static_object.bicycle_rack": BoxDetectionType.GENERIC_OBJECT, - "animal": BoxDetectionType.GENERIC_OBJECT, -} - -NUSCENES_CAMERA_TYPES = { - PinholeCameraType.CAM_F0: "CAM_FRONT", - PinholeCameraType.CAM_B0: "CAM_BACK", - PinholeCameraType.CAM_L0: "CAM_FRONT_LEFT", - PinholeCameraType.CAM_L1: "CAM_BACK_LEFT", - PinholeCameraType.CAM_R0: "CAM_FRONT_RIGHT", - PinholeCameraType.CAM_R1: "CAM_BACK_RIGHT", -} -NUSCENES_DATA_ROOT = Path(os.environ["NUSCENES_DATA_ROOT"]) - - -class NuScenesDataConverter(AbstractDatasetConverter): + +class NuScenesConverter(AbstractDatasetConverter): def __init__( - self, - splits: List[str], - nuscenes_data_root: Union[Path, str], - nuscenes_lanelet2_root: Union[Path, str], - use_lanelet2: bool, - dataset_converter_config: DatasetConverterConfig, - version: str = "v1.0-trainval", + self, + splits: List[str], + nuscenes_data_root: Union[Path, str], + nuscenes_map_root: Union[Path, str], + nuscenes_lanelet2_root: Union[Path, str], + use_lanelet2: bool, + dataset_converter_config: DatasetConverterConfig, + version: str = "v1.0-mini", ) -> None: super().__init__(dataset_converter_config) + + for split in splits: + assert ( + split in NUSCENES_DATA_SPLITS + ), f"Split {split} is not available. Available splits: {NUSCENES_DATA_SPLITS}" + self._splits: List[str] = splits + self._nuscenes_data_root: Path = Path(nuscenes_data_root) + self._nuscenes_map_root: Path = Path(nuscenes_map_root) self._nuscenes_lanelet2_root: Path = Path(nuscenes_lanelet2_root) + self._use_lanelet2 = use_lanelet2 self._version = version self._scene_tokens_per_split: Dict[str, List[str]] = self._collect_scene_tokens() self._target_dt: float = TARGET_DT def _collect_scene_tokens(self) -> Dict[str, List[str]]: + scene_tokens_per_split: Dict[str, List[str]] = {} nusc = NuScenes(version=self._version, dataroot=str(self._nuscenes_data_root), verbose=False) + nuscenes_split_name_mapping = { + "nuscenes_train": "train", + "nuscenes_val": "val", + "nuscenes_test": "test", + "nuscenes-mini_train": "mini_train", + "nuscenes-mini_val": "mini_val", + } + scene_splits = create_splits_scenes() available_scenes = [scene for scene in nusc.scene] for split in self._splits: - # Map the split name to the division of nuScenes - nusc_split = split.replace("nuscenes_", "") - if nusc_split == "trainval": - scene_names = scene_splits['train'] + scene_splits['val'] - else: - scene_names = scene_splits.get(nusc_split, []) + nuscenes_split = nuscenes_split_name_mapping[split] + scene_names = scene_splits.get(nuscenes_split, []) # get token - scene_tokens = [ - scene['token'] for scene in available_scenes - if scene['name'] in scene_names - ] + scene_tokens = [scene["token"] for scene in available_scenes if scene["name"] in scene_names] scene_tokens_per_split[split] = scene_tokens return scene_tokens_per_split - def get_available_splits(self) -> List[str]: - return [ - "nuscenes_train", - "nuscenes_val", - "nuscenes_test", - "nuscenes_mini_train", - "nuscenes_mini_val", - ] - def get_number_of_maps(self) -> int: """Inherited, see superclass.""" return len(NUSCENES_MAPS) @@ -163,7 +115,7 @@ def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: if map_needs_writing: write_nuscenes_map( - nuscenes_maps_root=self._nuscenes_data_root, + nuscenes_maps_root=self._nuscenes_map_root, location=map_name, map_writer=map_writer, use_lanelet2=self._use_lanelet2, @@ -195,7 +147,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: log_name=scene["name"], location=log_record["location"], timestep_seconds=TARGET_DT, - vehicle_parameters=get_nuscenes_renauly_zoe_parameters(), + vehicle_parameters=get_nuscenes_renault_zoe_parameters(), camera_metadata=_get_nuscenes_camera_metadata(nusc, scene, self.dataset_converter_config), lidar_metadata=_get_nuscenes_lidar_metadata(nusc, scene, self.dataset_converter_config), map_metadata=_get_nuscenes_map_metadata(log_record["location"]), @@ -220,7 +172,6 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: timestamp=TimePoint.from_us(sample["timestamp"]), ego_state=_extract_nuscenes_ego_state(nusc, sample, can_bus), box_detections=_extract_nuscenes_box_detections(nusc, sample), - traffic_lights=_extract_nuscenes_traffic_lights(), # nuScenes doesn't have traffic lights cameras=_extract_nuscenes_cameras( nusc=nusc, sample=sample, @@ -231,9 +182,6 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: sample=sample, dataset_converter_config=self.dataset_converter_config, ), - scenario_tags=_extract_nuscenes_scenario_tag(), # nuScenes doesn't have scenario tags - route_lane_group_ids=_extract_nuscenes_route_lane_group_ids(), - # nuScenes doesn't have route info ) sample_token = sample["next"] @@ -243,18 +191,11 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: del nusc gc.collect() - def convert_logs(self, worker: WorkerPool) -> None: - """ - NuScenes logs conversion is handled externally through convert_log method. - This method is kept for interface compatibility. - """ - pass - def _get_nuscenes_camera_metadata( - nusc: NuScenes, - scene: Dict[str, Any], - dataset_converter_config: DatasetConverterConfig, + nusc: NuScenes, + scene: Dict[str, Any], + dataset_converter_config: DatasetConverterConfig, ) -> Dict[PinholeCameraType, PinholeCameraMetadata]: camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} @@ -283,9 +224,9 @@ def _get_nuscenes_camera_metadata( def _get_nuscenes_lidar_metadata( - nusc: NuScenes, - scene: Dict[str, Any], - dataset_converter_config: DatasetConverterConfig, + nusc: NuScenes, + scene: Dict[str, Any], + dataset_converter_config: DatasetConverterConfig, ) -> Dict[LiDARType, LiDARMetadata]: metadata: Dict[LiDARType, LiDARMetadata] = {} @@ -303,9 +244,9 @@ def _get_nuscenes_lidar_metadata( extrinsic[:3, 3] = translation extrinsic = StateSE3.from_transformation_matrix(extrinsic) - metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata( - lidar_type=LiDARType.LIDAR_MERGED, - lidar_index=NuscenesLidarIndex, + metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( + lidar_type=LiDARType.LIDAR_TOP, + lidar_index=NuScenesLidarIndex, extrinsic=extrinsic, ) @@ -329,8 +270,8 @@ def _extract_nuscenes_ego_state(nusc, sample, can_bus) -> EgoStateSE3: quat = Quaternion(ego_pose["rotation"]) - vehicle_parameters = get_nuscenes_renauly_zoe_parameters() - pose = StateSE3( + vehicle_parameters = get_nuscenes_renault_zoe_parameters() + imu_pose = StateSE3( x=ego_pose["translation"][0], y=ego_pose["translation"][1], z=ego_pose["translation"][2], @@ -344,12 +285,12 @@ def _extract_nuscenes_ego_state(nusc, sample, can_bus) -> EgoStateSE3: try: pose_msgs = can_bus.get_messages(scene_name, "pose") - except Exception as e: + except Exception: pose_msgs = [] if pose_msgs: closest_msg = None - min_time_diff = float('inf') + min_time_diff = float("inf") for msg in pose_msgs: time_diff = abs(msg["utime"] - sample["timestamp"]) if time_diff < min_time_diff: @@ -371,18 +312,21 @@ def _extract_nuscenes_ego_state(nusc, sample, can_bus) -> EgoStateSE3: angular_velocity=Vector3D(*angular_velocity), ) - return EgoStateSE3( - center_se3=pose, + # return EgoStateSE3( + # center_se3=pose, + # dynamic_state_se3=dynamic_state, + # vehicle_parameters=vehicle_parameters, + # timepoint=TimePoint.from_us(sample["timestamp"]), + # ) + return EgoStateSE3.from_rear_axle( + rear_axle_se3=imu_pose, dynamic_state_se3=dynamic_state, vehicle_parameters=vehicle_parameters, - timepoint=TimePoint.from_us(sample["timestamp"]), + time_point=TimePoint.from_us(sample["timestamp"]), ) -def _extract_nuscenes_box_detections( - nusc: NuScenes, - sample: Dict[str, Any] -) -> BoxDetectionWrapper: +def _extract_nuscenes_box_detections(nusc: NuScenes, sample: Dict[str, Any]) -> BoxDetectionWrapper: box_detections: List[BoxDetectionSE3] = [] for ann_token in sample["anns"]: @@ -438,11 +382,6 @@ def _extract_nuscenes_box_detections( return BoxDetectionWrapper(box_detections=box_detections) -def _extract_nuscenes_traffic_lights() -> TrafficLightDetectionWrapper: - """nuScenes doesn't have traffic light information.""" - return TrafficLightDetectionWrapper(traffic_light_detections=[]) - - def _extract_nuscenes_cameras( nusc: NuScenes, sample: Dict[str, Any], @@ -472,8 +411,7 @@ def _extract_nuscenes_cameras( if cam_path.exists() and cam_path.is_file(): if dataset_converter_config.camera_store_option == "path": - # TODO: should be relative path - camera_data = cam_data["filename"] + camera_data = str(cam_path) elif dataset_converter_config.camera_store_option == "binary": with open(cam_path, "rb") as f: camera_data = f.read() @@ -486,194 +424,24 @@ def _extract_nuscenes_cameras( def _extract_nuscenes_lidars( - nusc: NuScenes, - sample: Dict[str, Any], - dataset_converter_config: DatasetConverterConfig, + nusc: NuScenes, + sample: Dict[str, Any], + dataset_converter_config: DatasetConverterConfig, ) -> List[LiDARData]: lidars: List[LiDARData] = [] if dataset_converter_config.include_lidars: lidar_token = sample["data"]["LIDAR_TOP"] lidar_data = nusc.get("sample_data", lidar_token) - lidar_path = NUSCENES_DATA_ROOT / lidar_data["filename"] + absolute_lidar_path = NUSCENES_DATA_ROOT / lidar_data["filename"] - if lidar_path.exists() and lidar_path.is_file(): + if absolute_lidar_path.exists() and absolute_lidar_path.is_file(): lidar = LiDARData( - lidar_type=LiDARType.LIDAR_TOP, - relative_path=str(lidar_path), - dataset_root=NUSCENES_DATA_ROOT, + lidar_type=LiDARType.LIDAR_MERGED, + relative_path=absolute_lidar_path.relative_to(NUSCENES_DATA_ROOT), + dataset_root=NUSCENES_DATA_ROOT, iteration=lidar_data.get("iteration"), ) lidars.append(lidar) - else: - lidars.append(LiDARData( - lidar_type=LiDARType.LIDAR_TOP, - relative_path=None, - dataset_root=NUSCENES_DATA_ROOT, - )) return lidars - -def _extract_nuscenes_scenario_tag() -> List[str]: - """nuScenes doesn't have scenario tags.""" - return ["unknown"] - - -def _extract_nuscenes_route_lane_group_ids() -> List[int]: - """nuScenes doesn't have route lane group information.""" - return [] - - -# Updated arrow conversion function using the new extraction functions -def convert_nuscenes_log_to_arrow( - args: List[Dict[str, Union[str, List[str]]]], - dataset_converter_config: DatasetConverterConfig, - version: str -) -> List[Any]: - for log_info in args: - scene_token: str = log_info["scene_token"] - split: str = log_info["split"] - - nusc = NuScenes(version=version, dataroot=str(NUSCENES_DATA_ROOT), verbose=False) - scene = nusc.get("scene", scene_token) - - log_file_path = dataset_converter_config.output_path / split / f"{scene_token}.arrow" - - if dataset_converter_config.force_log_conversion or not log_file_path.exists(): - if not log_file_path.parent.exists(): - log_file_path.parent.mkdir(parents=True, exist_ok=True) - - # Define schema - schema_column_list = [ - ("token", pa.string()), - ("timestamp", pa.int64()), - ("ego_state", pa.list_(pa.float64(), len(EgoStateSE3Index))), - ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), - ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), - ("detections_token", pa.list_(pa.string())), - ("detections_type", pa.list_(pa.int16())), - ("traffic_light_ids", pa.list_(pa.int64())), - ("traffic_light_types", pa.list_(pa.int16())), - ("scenario_tag", pa.list_(pa.string())), - ("route_lane_group_ids", pa.list_(pa.int64())), - ] - - if dataset_converter_config.lidar_store_option == "path": - schema_column_list.append(("lidar", pa.string())) - - if dataset_converter_config.camera_store_option == "path": - for camera_type in NUSCENES_CAMERA_TYPES.keys(): - schema_column_list.append((camera_type.serialize(), pa.string())) - schema_column_list.append((f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4))) - - recording_schema = pa.schema(schema_column_list) - - log_record = nusc.get("log", scene["log_token"]) - location = log_record["location"] - - # Create metadata using the same functions as the new interface - metadata = LogMetadata( - dataset="nuscenes", - split=split, - log_name=scene["name"], - location=location, - timestep_seconds=TARGET_DT, - vehicle_parameters=get_nuscenes_renauly_zoe_parameters(), - camera_metadata=_get_nuscenes_camera_metadata(nusc, scene, dataset_converter_config), - lidar_metadata=_get_nuscenes_lidar_metadata(nusc, scene, dataset_converter_config), - map_metadata=_get_nuscenes_map_metadata(location), - ) - - recording_schema = recording_schema.with_metadata( - { - "log_metadata": json.dumps(asdict(metadata)), - } - ) - - _write_arrow_table_with_new_interface( - nusc, scene, recording_schema, log_file_path, dataset_converter_config - ) - - del nusc - gc.collect() - - return [] - - -def _write_arrow_table_with_new_interface( - nusc: NuScenes, - scene: Dict[str, Any], - recording_schema: pa.schema, - log_file_path: Path, - dataset_converter_config: DatasetConverterConfig, -) -> None: - can_bus = NuScenesCanBus(dataroot=str(NUSCENES_DATA_ROOT)) - - with pa.OSFile(str(log_file_path), "wb") as sink: - with pa.ipc.new_file(sink, recording_schema) as writer: - step_interval = max(1, int(TARGET_DT / NUSCENES_DT)) - sample_count = 0 - - sample_token = scene["first_sample_token"] - while sample_token: - if sample_count % step_interval == 0: - sample = nusc.get("sample", sample_token) - - # Use the new extraction functions for consistency - ego_state = _extract_nuscenes_ego_state(nusc, sample, can_bus) - box_detections = _extract_nuscenes_box_detections(nusc, sample) - cameras = _extract_nuscenes_cameras(nusc, sample, dataset_converter_config) - lidars = _extract_nuscenes_lidars(nusc, sample, dataset_converter_config) - - detections_state_list = [] - for det in box_detections.box_detections: - bbox_array = det.bounding_box_se3.array - - print(f"bbox_array shape: {bbox_array.shape}, ndim: {bbox_array.ndim}") - if bbox_array.ndim > 1: - detections_state_list.append(bbox_array.flatten().tolist()) - else: - detections_state_list.append(bbox_array.tolist()) - - # Prepare row data - row_data = { - "token": [sample_token], - "timestamp": [sample["timestamp"]], - "ego_state": ego_state.array.flatten().tolist(), - "detections_state": detections_state_list, - "detections_velocity": [det.velocity.array.tolist() for det in box_detections.box_detections], - "detections_token": [det.metadata.track_token for det in box_detections.box_detections], - "detections_type": [det.metadata.box_detection_type.value for det in box_detections.box_detections], - "traffic_light_ids": [], - "traffic_light_types": [], - "scenario_tag": ["unknown"], - "route_lane_group_ids": [], - } - - # Add lidar data if configured - if dataset_converter_config.lidar_store_option == "path": - row_data["lidar"] = [lidars.get(LiDARType.LIDAR_MERGED, None)] - - # Add camera data if configured - if dataset_converter_config.camera_store_option == "path": - for camera_type in NUSCENES_CAMERA_TYPES.keys(): - if camera_type in cameras: - camera_path, extrinsic = cameras[camera_type] - row_data[camera_type.serialize()] = [camera_path] - row_data[f"{camera_type.serialize()}_extrinsic"] = [ - extrinsic.to_transformation_matrix().flatten().tolist()] - else: - row_data[camera_type.serialize()] = [None] - row_data[f"{camera_type.serialize()}_extrinsic"] = [None] - - batch = pa.record_batch(row_data, schema=recording_schema) - writer.write_batch(batch) - - sample_token = sample["next"] - sample_count += 1 - - # Sort by timestamp if required - if SORT_BY_TIMESTAMP: - recording_table = open_arrow_table(log_file_path) - recording_table = recording_table.sort_by([("timestamp", "ascending")]) - write_arrow_table(recording_table, log_file_path) diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py index 66f7a658..3fbb9b62 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py @@ -1,21 +1,16 @@ -import lanelet2 -import numpy as np - from pathlib import Path -from typing import List, Optional -from lanelet2.io import load -from lanelet2.projection import MercatorProjector -from shapely.geometry import Polygon, MultiPolygon, LineString -from shapely.validation import make_valid -from nuscenes.map_expansion.map_api import NuScenesMap -from nuscenes.map_expansion.arcline_path_utils import discretize_lane +from typing import Optional + +import numpy as np +from shapely.geometry import LineString, Polygon +from py123d.common.utils.dependencies import check_dependencies +from py123d.conversion.datasets.nuscenes.nuscenes_constants import NUSCENES_MAPS from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.datatypes.maps.cache.cache_map_objects import ( +from py123d.datatypes.maps.cache.cache_map_objects import ( # CacheIntersection, CacheCarpark, CacheCrosswalk, CacheGenericDrivable, - CacheIntersection, CacheLane, CacheLaneGroup, CacheRoadLine, @@ -24,21 +19,29 @@ from py123d.datatypes.maps.map_datatypes import RoadLineType from py123d.geometry import Polyline3D -NUSCENES_MAPS: List[str] = [ - "boston-seaport", - "singapore-hollandvillage", - "singapore-onenorth", - "singapore-queenstown" -] +check_dependencies(["lanelet2", "nuscenes"], optional_name="nuscenes") +import traceback + +import lanelet2 +from lanelet2.io import load +from lanelet2.projection import MercatorProjector +from nuscenes.map_expansion.arcline_path_utils import discretize_lane +from nuscenes.map_expansion.map_api import NuScenesMap -def write_nuscenes_map(nuscenes_maps_root: Path, location: str, map_writer: AbstractMapWriter, use_lanelet2: bool, lanelet2_root: Optional[str] = None) -> None: +def write_nuscenes_map( + nuscenes_maps_root: Path, + location: str, + map_writer: AbstractMapWriter, + use_lanelet2: bool, + lanelet2_root: Optional[str] = None, +) -> None: """ Main function to convert nuscenes map to unified format and write using map_writer. """ assert location in NUSCENES_MAPS, f"Map name {location} is not supported." nusc_map = NuScenesMap(dataroot=str(nuscenes_maps_root), map_name=location) - + # Write all layers if use_lanelet2: _write_nuscenes_lanes_lanelet2(nusc_map, map_writer, lanelet2_root) @@ -46,6 +49,7 @@ def write_nuscenes_map(nuscenes_maps_root: Path, location: str, map_writer: Abst else: _write_nuscenes_lanes(nusc_map, map_writer) _write_nuscenes_lane_groups(nusc_map, map_writer) + _write_nuscenes_intersections(nusc_map, map_writer) _write_nuscenes_crosswalks(nusc_map, map_writer) _write_nuscenes_walkways(nusc_map, map_writer) @@ -58,16 +62,16 @@ def write_nuscenes_map(nuscenes_maps_root: Path, location: str, map_writer: Abst def _write_nuscenes_lanes_lanelet2(nusc_map: NuScenesMap, map_writer: AbstractMapWriter, lanelet2_root: str) -> None: map_name = nusc_map.map_name osm_map_file = str(Path(lanelet2_root) / f"{map_name}.osm") - + if "boston" in map_name.lower(): origin_lat, origin_lon = 42.3365, -71.0577 elif "singapore" in map_name.lower(): origin_lat, origin_lon = 1.3, 103.8 else: origin_lat, origin_lon = 49.0, 8.4 - + origin = lanelet2.io.Origin(origin_lat, origin_lon) - + try: lanelet_map = lanelet2.io.load(osm_map_file, origin) except Exception: @@ -79,19 +83,19 @@ def _write_nuscenes_lanes_lanelet2(nusc_map: NuScenesMap, map_writer: AbstractMa for lanelet in lanelet_map.laneletLayer: token = lanelet.id - + try: left_bound = [(p.x, p.y) for p in lanelet.leftBound] right_bound = [(p.x, p.y) for p in lanelet.rightBound] polygon_points = left_bound + right_bound[::-1] polygon = Polygon(polygon_points) - + predecessor_ids = [int(pred.id) for pred in lanelet.previousLanelets] successor_ids = [int(succ.id) for succ in lanelet.followingLanelets] - + left_lane_id = None right_lane_id = None - + left_boundary = [(p.x, p.y) for p in lanelet.leftBound] right_boundary = [(p.x, p.y) for p in lanelet.rightBound] centerline = [] @@ -99,7 +103,7 @@ def _write_nuscenes_lanes_lanelet2(nusc_map: NuScenesMap, map_writer: AbstractMa center_x = (left_pt.x + right_pt.x) / 2 center_y = (left_pt.y + right_pt.y) / 2 centerline.append((center_x, center_y)) - + speed_limit_mps = 0.0 if "speed_limit" in lanelet.attributes: try: @@ -109,7 +113,7 @@ def _write_nuscenes_lanes_lanelet2(nusc_map: NuScenesMap, map_writer: AbstractMa speed_limit_mps = speed_kmh / 3.6 except (ValueError, TypeError): pass - + map_writer.write_lane( CacheLane( object_id=token, @@ -127,9 +131,13 @@ def _write_nuscenes_lanes_lanelet2(nusc_map: NuScenesMap, map_writer: AbstractMa ) ) except Exception: + traceback.print_exc() continue -def _write_nuscenes_lane_groups_lanelet2(nusc_map: NuScenesMap, map_writer: AbstractMapWriter, lanelet2_root: str) -> None: + +def _write_nuscenes_lane_groups_lanelet2( + nusc_map: NuScenesMap, map_writer: AbstractMapWriter, lanelet2_root: str +) -> None: map_name = nusc_map.map_name osm_map_file = str(Path(lanelet2_root) / f"{map_name}.osm") @@ -137,9 +145,9 @@ def _write_nuscenes_lane_groups_lanelet2(nusc_map: NuScenesMap, map_writer: Abst origin_lat, origin_lon = 42.3365, -71.0577 else: origin_lat, origin_lon = 1.3, 103.8 - + origin = lanelet2.io.Origin(origin_lat, origin_lon) - + try: projector = MercatorProjector(origin) lanelet_map = load(osm_map_file, projector) @@ -156,10 +164,10 @@ def _write_nuscenes_lane_groups_lanelet2(nusc_map: NuScenesMap, map_writer: Abst predecessor_ids = [] successor_ids = [] try: - if hasattr(lanelet, 'left'): + if hasattr(lanelet, "left"): for left_lane in lanelet.left: predecessor_ids.append(int(left_lane.id)) - if hasattr(lanelet, 'right'): + if hasattr(lanelet, "right"): for right_lane in lanelet.right: successor_ids.append(int(right_lane.id)) except Exception: @@ -171,6 +179,7 @@ def _write_nuscenes_lane_groups_lanelet2(nusc_map: NuScenesMap, map_writer: Abst polygon_points = left_bound + right_bound[::-1] polygon = Polygon(polygon_points) except Exception: + traceback.print_exc() continue try: @@ -188,16 +197,9 @@ def _write_nuscenes_lane_groups_lanelet2(nusc_map: NuScenesMap, map_writer: Abst ) ) except Exception: + traceback.print_exc() continue -def _get_lanelet_connections(lanelet): - """ - Helper function to extract incoming and outgoing lanelets. - """ - incoming = lanelet.incomings - outgoing = lanelet.outgoings - return incoming, outgoing - def _write_nuscenes_lanes(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: """ @@ -216,6 +218,7 @@ def _write_nuscenes_lanes(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) if not polygon.is_valid: continue except Exception: + traceback.print_exc() continue # Get topology @@ -261,19 +264,20 @@ def _write_nuscenes_lanes(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) CacheLane( object_id=token, lane_group_id=lane_record.get("road_segment_token", None), - left_boundary=left_boundary, - right_boundary=right_boundary, - centerline=baseline_path, + left_boundary=Polyline3D.from_linestring(left_boundary), + right_boundary=Polyline3D.from_linestring(right_boundary), + centerline=Polyline3D.from_linestring(baseline_path), left_lane_id=None, # Not directly available in nuscenes right_lane_id=None, # Not directly available in nuscenes predecessor_ids=incoming, successor_ids=outgoing, - speed_limit_mps=0.0, # Default value + speed_limit_mps=None, # Default value outline=None, geometry=polygon, ) ) except Exception: + traceback.print_exc() continue @@ -348,6 +352,7 @@ def _write_nuscenes_lane_groups(nusc_map: NuScenesMap, map_writer: AbstractMapWr ) ) except Exception: + traceback.print_exc() continue @@ -355,29 +360,29 @@ def _write_nuscenes_intersections(nusc_map: NuScenesMap, map_writer: AbstractMap """ Write intersection data to map_writer. """ - road_blocks = nusc_map.road_block - for block in road_blocks: - token = block["token"] - try: - if "polygon_token" in block: - polygon = nusc_map.extract_polygon(block["polygon_token"]) - else: - continue - if not polygon.is_valid: - continue - except Exception: - continue - - # Lane group IDs are not directly available; use empty list - lane_group_ids = [] - - map_writer.write_intersection( - CacheIntersection( - object_id=token, - lane_group_ids=lane_group_ids, - geometry=polygon, - ) - ) + # road_blocks = nusc_map.road_block + # for block in road_blocks: + # token = block["token"] + # try: + # if "polygon_token" in block: + # polygon = nusc_map.extract_polygon(block["polygon_token"]) + # else: + # continue + # if not polygon.is_valid: + # continue + # except Exception: + # continue + + # # Lane group IDs are not directly available; use empty list + # lane_group_ids = [] + + # map_writer.write_intersection( + # CacheIntersection( + # object_id=token, + # lane_group_ids=lane_group_ids, + # geometry=polygon, + # ) + # ) def _write_nuscenes_crosswalks(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: @@ -395,6 +400,7 @@ def _write_nuscenes_crosswalks(nusc_map: NuScenesMap, map_writer: AbstractMapWri if not polygon.is_valid: continue except Exception: + traceback.print_exc() continue map_writer.write_crosswalk( @@ -420,6 +426,7 @@ def _write_nuscenes_walkways(nusc_map: NuScenesMap, map_writer: AbstractMapWrite if not polygon.is_valid: continue except Exception: + traceback.print_exc() continue map_writer.write_walkway( @@ -470,6 +477,7 @@ def _write_nuscenes_generic_drivables(nusc_map: NuScenesMap, map_writer: Abstrac if polygon.is_valid: all_drivables.append((f"road_segment_{segment['token']}", polygon)) except Exception: + traceback.print_exc() continue # Add lanes @@ -480,6 +488,7 @@ def _write_nuscenes_generic_drivables(nusc_map: NuScenesMap, map_writer: Abstrac if polygon.is_valid: all_drivables.append((f"lane_{lane['token']}", polygon)) except Exception: + traceback.print_exc() continue # Add drivable areas @@ -490,6 +499,7 @@ def _write_nuscenes_generic_drivables(nusc_map: NuScenesMap, map_writer: Abstrac if polygon.is_valid: all_drivables.append((f"road_{road['token']}", polygon)) except Exception: + traceback.print_exc() continue for obj_id, geometry in all_drivables: @@ -516,6 +526,7 @@ def _write_nuscenes_stop_lines(nusc_map: NuScenesMap, map_writer: AbstractMapWri if not polygon.is_valid: continue except Exception: + traceback.print_exc() continue # Note: Stop lines are written as generic drivable for compatibility @@ -586,7 +597,7 @@ def _get_lane_boundary(lane_token: str, side: str, nusc_map: NuScenesMap) -> Opt divider_segment_nodes_key = f"{side}_lane_divider_segment_nodes" if divider_segment_nodes_key in lane_record and lane_record[divider_segment_nodes_key]: nodes = lane_record[divider_segment_nodes_key] - boundary = LineString([(node['x'], node['y']) for node in nodes]) + boundary = LineString([(node["x"], node["y"]) for node in nodes]) return boundary return None @@ -611,7 +622,7 @@ def _get_lane_group_boundary(segment_token: str, side: str, nusc_map: NuScenesMa # Find nearest boundary of the specified type within a threshold nearest = None - min_dist = float('inf') + min_dist = float("inf") if boundary_type == "road_divider": records = nusc_map.road_divider @@ -705,4 +716,4 @@ def align_boundary_direction(centerline: LineString, boundary: LineString) -> Li """ if not lines_same_direction(centerline, boundary): return flip_linestring(boundary) - return boundary \ No newline at end of file + return boundary diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py index 82f1ac72..7592182c 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py @@ -1,13 +1,26 @@ -import numpy as np from pathlib import Path from typing import Dict + +import numpy as np + +from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar.lidar_index import NuScenesLidarIndex +from py123d.geometry.se import StateSE3 +from py123d.geometry.transform.transform_se3 import convert_points_3d_array_between_origins -def load_nuscenes_lidar_pcs_from_file(pcd_path: Path) -> Dict[LiDARType, np.ndarray]: + +def load_nuscenes_lidar_pcs_from_file(pcd_path: Path, log_metadata: LogMetadata) -> Dict[LiDARType, np.ndarray]: points = np.fromfile(pcd_path, dtype=np.float32).reshape(-1, 5) - - lidar_pcs_dict: Dict[LiDARType, np.ndarray] = { - LiDARType.LIDAR_TOP: points - } - + + # convert lidar to ego frame + lidar_extrinsic = log_metadata.lidar_metadata[LiDARType.LIDAR_TOP].extrinsic + + points[..., NuScenesLidarIndex.XYZ] = convert_points_3d_array_between_origins( + from_origin=lidar_extrinsic, + to_origin=StateSE3(0, 0, 0, 1.0, 0, 0, 0), + points_3d_array=points[..., NuScenesLidarIndex.XYZ], + ) + lidar_pcs_dict: Dict[LiDARType, np.ndarray] = {LiDARType.LIDAR_TOP: points} + return lidar_pcs_dict diff --git a/src/py123d/conversion/registry/lidar_index_registry.py b/src/py123d/conversion/registry/lidar_index_registry.py index c5213d2a..b58e4fce 100644 --- a/src/py123d/conversion/registry/lidar_index_registry.py +++ b/src/py123d/conversion/registry/lidar_index_registry.py @@ -82,11 +82,12 @@ class PandasetLidarIndex(LiDARIndex): Y = 1 Z = 2 INTENSITY = 3 - + + @register_lidar_index -class NuscenesLidarIndex(LiDARIndex): - X = 0 - Y = 1 - Z = 2 - INTENSITY = 3 - RING = 4 +class NuScenesLidarIndex(LiDARIndex): + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 + RING = 4 diff --git a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py index 83d4da21..82c08efc 100644 --- a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py +++ b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py @@ -15,7 +15,7 @@ "av2-sensor": DATASET_PATHS.av2_sensor_data_root, "wopd": DATASET_PATHS.wopd_data_root, "pandaset": DATASET_PATHS.pandaset_data_root, - "nuscenes": DATASET_PATHS.nuscenes_sensor_root, + "nuscenes": DATASET_PATHS.nuscenes_sensor_root, } @@ -57,10 +57,11 @@ def load_lidar_pcs_from_file( from py123d.conversion.datasets.pandaset.pandaset_sensor_io import load_pandaset_lidars_pcs_from_file lidar_pcs_dict = load_pandaset_lidars_pcs_from_file(full_lidar_path, index) - + elif log_metadata.dataset == "nuscenes": from py123d.conversion.datasets.nuscenes.nuscenes_sensor_io import load_nuscenes_lidar_pcs_from_file - lidar_pcs_dict = load_nuscenes_lidar_pcs_from_file(full_lidar_path) + + lidar_pcs_dict = load_nuscenes_lidar_pcs_from_file(full_lidar_path, log_metadata) else: raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 031c726a..6345df02 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -34,11 +34,11 @@ DATASET_PATHS: DictConfig = get_dataset_paths() DATASET_SENSOR_ROOT: Dict[str, Path] = { - "nuplan": DATASET_PATHS.nuplan_sensor_root, "av2-sensor": DATASET_PATHS.av2_sensor_data_root, + "nuplan": DATASET_PATHS.nuplan_sensor_root, + "nuscenes": DATASET_PATHS.nuscenes_data_root, "wopd": DATASET_PATHS.wopd_data_root, "pandaset": DATASET_PATHS.pandaset_data_root, - "nuscenes": DATASET_PATHS.nuscenes_data_root, } diff --git a/src/py123d/datatypes/sensors/lidar/lidar_index.py b/src/py123d/datatypes/sensors/lidar/lidar_index.py index c101ba8e..b4864cac 100644 --- a/src/py123d/datatypes/sensors/lidar/lidar_index.py +++ b/src/py123d/datatypes/sensors/lidar/lidar_index.py @@ -86,9 +86,10 @@ class PandasetLidarIndex(LiDARIndex): @register_lidar_index -class NuscenesLidarIndex(LiDARIndex): - X = 0 - Y = 1 - Z = 2 - INTENSITY = 3 - RING = 4 +class NuScenesLidarIndex(LiDARIndex): + + X = 0 + Y = 1 + Z = 2 + INTENSITY = 3 + RING = 4 diff --git a/src/py123d/datatypes/vehicle_state/vehicle_parameters.py b/src/py123d/datatypes/vehicle_state/vehicle_parameters.py index 19b050c7..5e15c3b7 100644 --- a/src/py123d/datatypes/vehicle_state/vehicle_parameters.py +++ b/src/py123d/datatypes/vehicle_state/vehicle_parameters.py @@ -52,17 +52,20 @@ def get_nuplan_chrysler_pacifica_parameters() -> VehicleParameters: rear_axle_to_center_longitudinal=1.461, ) -def get_nuscenes_renauly_zoe_parameters() -> VehicleParameters: + +def get_nuscenes_renault_zoe_parameters() -> VehicleParameters: + # https://en.wikipedia.org/wiki/Renault_Zoe return VehicleParameters( - vehicle_name="nuscenes_renauly_zoe", + vehicle_name="nuscenes_renault_zoe", width=1.730, length=4.084, height=1.562, wheel_base=2.588, - rear_axle_to_center_vertical=1.562 / 2, # NOTE: missing in nuscenes, TODO: find more accurate value + rear_axle_to_center_vertical=1.562 / 2, # NOTE: missing in nuscenes, TODO: find more accurate value rear_axle_to_center_longitudinal=1.385, ) + def get_carla_lincoln_mkz_2020_parameters() -> VehicleParameters: # NOTE: values are extracted from CARLA return VehicleParameters( diff --git a/src/py123d/script/config/common/default_dataset_paths.yaml b/src/py123d/script/config/common/default_dataset_paths.yaml index 64e0a8c0..b4941707 100644 --- a/src/py123d/script/config/common/default_dataset_paths.yaml +++ b/src/py123d/script/config/common/default_dataset_paths.yaml @@ -24,4 +24,4 @@ dataset_paths: # nuScenes defaults nuscenes_data_root: ${oc.env:NUSCENES_DATA_ROOT,null} nuscenes_map_root: ${dataset_paths.nuscenes_data_root} - nuscenes_sensor_root: ${dataset_paths.nuscenes_data_root}/samples/LIDAR_TOP \ No newline at end of file + nuscenes_sensor_root: ${dataset_paths.nuscenes_data_root} diff --git a/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml index 410b910e..0f3ab95e 100644 --- a/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml @@ -1,9 +1,10 @@ nuscenes_dataset: - _target_: py123d.conversion.datasets.nuscenes.nuscenes_data_converter.NuScenesDataConverter + _target_: py123d.conversion.datasets.nuscenes.nuscenes_converter.NuScenesConverter _convert_: 'all' splits: ["nuscenes_train", "nuscenes_val", "nuscenes_test"] nuscenes_data_root: ${dataset_paths.nuscenes_data_root} + nuscenes_map_root: ${dataset_paths.nuscenes_data_root} nuscenes_lanelet2_root: ${dataset_paths.nuscenes_data_root}/lanelet2 use_lanelet2: False @@ -13,6 +14,7 @@ nuscenes_dataset: force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} + # Map include_map: true @@ -25,10 +27,10 @@ nuscenes_dataset: # Traffic Lights include_traffic_lights: false - #cameras + # Cameras include_cameras: true camera_store_option: "path" - + #lidar include_lidars: true - lidar_store_option: "path" \ No newline at end of file + lidar_store_option: "path" diff --git a/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml new file mode 100644 index 00000000..95a3c927 --- /dev/null +++ b/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml @@ -0,0 +1,36 @@ +nuscenes_dataset: + _target_: py123d.conversion.datasets.nuscenes.nuscenes_converter.NuScenesConverter + _convert_: 'all' + + splits: ["nuscenes-mini_train", "nuscenes-mini_val"] + nuscenes_data_root: ${dataset_paths.nuscenes_data_root} + nuscenes_map_root: ${dataset_paths.nuscenes_data_root} + nuscenes_lanelet2_root: ${dataset_paths.nuscenes_data_root}/lanelet2 + use_lanelet2: False + + dataset_converter_config: + _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig + _convert_: 'all' + + force_log_conversion: ${force_log_conversion} + force_map_conversion: ${force_map_conversion} + + # Map + include_map: true + + # Ego + include_ego: true + + # Box Detections + include_box_detections: true + + # Traffic Lights + include_traffic_lights: false + + # Cameras + include_cameras: true + camera_store_option: "binary" + + #lidar + include_lidars: true + lidar_store_option: "binary" diff --git a/test_viser.py b/test_viser.py index 73896531..d5375bd7 100644 --- a/test_viser.py +++ b/test_viser.py @@ -1,15 +1,16 @@ from py123d.common.multithreading.worker_sequential import Sequential from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder from py123d.datatypes.scene.scene_filter import SceneFilter -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from py123d.visualization.viser.viser_viewer import ViserViewer -if __name__ == "__main__": +# from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType - splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] +if __name__ == "__main__": + splits = ["nuscenes-mini_val", "nuscenes-mini_train"] + # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla_test"] - splits = ["wopd_val"] + # splits = ["wopd_val"] # splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] # log_names = ["2021.08.24.13.12.55_veh-45_00386_00472"] @@ -21,11 +22,11 @@ split_names=splits, log_names=log_names, scene_uuids=scene_uuids, - duration_s=10.0, + duration_s=None, history_s=0.0, timestamp_threshold_s=10.0, shuffle=True, - camera_types=[PinholeCameraType.CAM_F0], + # camera_types=[PinholeCameraType.CAM_F0], ) scene_builder = ArrowSceneBuilder() worker = Sequential() From b733b8223221f2994077f53b65b8e800e405b080 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Fri, 31 Oct 2025 23:26:06 +0100 Subject: [PATCH 130/145] Refactor nuScenes map conversion (lanelet currently missing). --- notebooks/bev_matplotlib.ipynb | 61 +- .../conversion/nuscenes_mini_conversion.sh | 2 +- .../datasets/nuscenes/nuscenes_converter.py | 4 +- .../nuscenes/nuscenes_map_conversion.py | 963 +++++++----------- .../datasets/nuscenes/nuscenes_sensor_io.py | 11 +- .../{ => utils}/nuscenes_constants.py | 0 .../nuscenes/utils/nuscenes_map_utils.py | 197 ++++ .../map_utils/road_edge/road_edge_2d_utils.py | 48 +- .../datatypes/maps/gpkg/gpkg_map_objects.py | 37 +- src/py123d/datatypes/maps/gpkg/gpkg_utils.py | 23 +- src/py123d/geometry/utils/polyline_utils.py | 22 + .../datasets/nuscenes_mini_dataset.yaml | 2 +- .../map_writer/gpkg_map_writer.yaml | 1 + 13 files changed, 693 insertions(+), 678 deletions(-) rename src/py123d/conversion/datasets/nuscenes/{ => utils}/nuscenes_constants.py (100%) create mode 100644 src/py123d/conversion/datasets/nuscenes/utils/nuscenes_map_utils.py diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index bd2dd882..910bf63a 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -10,6 +10,7 @@ "from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", "from py123d.datatypes.scene.scene_filter import SceneFilter\n", "\n", + "\n", "from py123d.common.multithreading.worker_sequential import Sequential\n", "# from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType " ] @@ -144,22 +145,22 @@ " route_lane_group_ids: Optional[List[int]] = None,\n", ") -> None:\n", " layers: List[MapLayer] = [\n", - " MapLayer.LANE,\n", + " # MapLayer.LANE,\n", " MapLayer.LANE_GROUP,\n", - " # MapLayer.GENERIC_DRIVABLE,\n", + " MapLayer.GENERIC_DRIVABLE,\n", " # MapLayer.CARPARK,\n", " # MapLayer.CROSSWALK,\n", - " MapLayer.INTERSECTION,\n", + " # MapLayer.INTERSECTION,\n", " # MapLayer.WALKWAY,\n", " MapLayer.ROAD_EDGE,\n", - " # MapLayer.ROAD_LINE,\n", + " MapLayer.ROAD_LINE,\n", " ]\n", " x_min, x_max = point_2d.x - radius, point_2d.x + radius\n", " y_min, y_max = point_2d.y - radius, point_2d.y + radius\n", " patch = geom.box(x_min, y_min, x_max, y_max)\n", " map_objects_dict = map_api.query(geometry=patch, layers=layers, predicate=\"intersects\")\n", + " # print(map_objects_dict[MapLayer.ROAD_EDGE])\n", "\n", - " done = False\n", " for layer, map_objects in map_objects_dict.items():\n", " for map_object in map_objects:\n", " try:\n", @@ -167,39 +168,45 @@ " MapLayer.GENERIC_DRIVABLE,\n", " MapLayer.CARPARK,\n", " MapLayer.CROSSWALK,\n", - " MapLayer.INTERSECTION,\n", + " # MapLayer.INTERSECTION,\n", " MapLayer.WALKWAY,\n", " ]:\n", " add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])\n", - " print(f\"Added {layer.name} with id {map_object.object_id}\")\n", "\n", " if layer in [MapLayer.LANE_GROUP]:\n", " map_object: AbstractLaneGroup\n", " add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])\n", "\n", - " if layer in [MapLayer.LANE]:\n", - " add_shapely_linestring_to_ax(ax, map_object.centerline.linestring, CENTERLINE_CONFIG)\n", + " if map_object.intersection is not None:\n", + " add_shapely_polygon_to_ax(ax, map_object.intersection.shapely_polygon, ROUTE_CONFIG)\n", + "\n", + " for lane in map_object.lanes:\n", + " add_shapely_linestring_to_ax(ax, lane.centerline.linestring, CENTERLINE_CONFIG)\n", + "\n", + " # if layer in [MapLayer.LANE]:\n", + " # add_shapely_linestring_to_ax(ax, map_object.centerline.linestring, CENTERLINE_CONFIG)\n", + " # add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])\n", "\n", " if layer in [MapLayer.ROAD_EDGE]:\n", " add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, ROAD_EDGE_CONFIG)\n", "\n", - " # if layer in [MapLayer.ROAD_LINE]:\n", - " # # line_type = int(map_object.road_line_type)\n", - " # # plt_config = PlotConfig(\n", - " # # fill_color=NEW_TAB_10[line_type % (len(NEW_TAB_10) - 1)],\n", - " # # fill_color_alpha=1.0,\n", - " # # line_color=NEW_TAB_10[line_type % (len(NEW_TAB_10) - 1)],\n", - " # # line_color_alpha=1.0,\n", - " # # line_width=1.5,\n", - " # # line_style=\"-\",\n", - " # # zorder=10,\n", - " # # )\n", - " # add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, ROAD_LINE_CONFIG)\n", + " if layer in [MapLayer.ROAD_LINE]:\n", + " # line_type = int(map_object.road_line_type)\n", + " # plt_config = PlotConfig(\n", + " # fill_color=NEW_TAB_10[line_type % (len(NEW_TAB_10) - 1)],\n", + " # fill_color_alpha=1.0,\n", + " # line_color=NEW_TAB_10[line_type % (len(NEW_TAB_10) - 1)],\n", + " # line_color_alpha=1.0,\n", + " # line_width=1.5,\n", + " # line_style=\"-\",\n", + " # zorder=10,\n", + " # )\n", + " add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, ROAD_LINE_CONFIG)\n", "\n", " except Exception:\n", " import traceback\n", "\n", - " print(f\"Error adding map object of type {layer.name} and id {map_object.id}\")\n", + " print(f\"Error adding map object of type {layer.name} and id {map_object.object_id}\")\n", " traceback.print_exc()\n", "\n", " # ax.set_title(f\"Map: {map_api.map_name}\")\n", @@ -213,8 +220,10 @@ "\n", " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", " if map_api is not None:\n", - " add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", - " # add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", + " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + "\n", + "\n", + " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", " add_box_detections_to_ax(ax, box_detections)\n", @@ -239,7 +248,7 @@ " return fig, ax\n", "\n", "\n", - "# scene_index = \n", + "# scene_index =\n", "iteration = 1\n", "\n", "scale = 10\n", @@ -249,7 +258,7 @@ "# _plot_scene_on_ax(ax[1], scene, iteration, radius=50)\n", "# _plot_scene_on_ax(ax[2], scene, iteration, radius=100)\n", "\n", - "plt.show()\n" + "plt.show()" ] }, { diff --git a/scripts/conversion/nuscenes_mini_conversion.sh b/scripts/conversion/nuscenes_mini_conversion.sh index b6d35aa4..b9a9a7d1 100644 --- a/scripts/conversion/nuscenes_mini_conversion.sh +++ b/scripts/conversion/nuscenes_mini_conversion.sh @@ -1,3 +1,3 @@ export NUSCENES_DATA_ROOT="/home/daniel/nuscenes_mini/" -py123d-conversion datasets=["nuscenes_mini_dataset"] +py123d-conversion datasets=["nuscenes_mini_dataset"] map_writer.remap_ids=true diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py index 483a6bc4..c4e1627e 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py @@ -9,14 +9,14 @@ from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter from py123d.conversion.dataset_converter_config import DatasetConverterConfig from py123d.conversion.datasets.nuplan.nuplan_converter import TARGET_DT -from py123d.conversion.datasets.nuscenes.nuscenes_constants import ( +from py123d.conversion.datasets.nuscenes.nuscenes_map_conversion import NUSCENES_MAPS, write_nuscenes_map +from py123d.conversion.datasets.nuscenes.utils.nuscenes_constants import ( NUSCENES_CAMERA_TYPES, NUSCENES_DATA_ROOT, NUSCENES_DATA_SPLITS, NUSCENES_DETECTION_NAME_DICT, NUSCENES_DT, ) -from py123d.conversion.datasets.nuscenes.nuscenes_map_conversion import NUSCENES_MAPS, write_nuscenes_map from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py index 3fbb9b62..39ae313a 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_map_conversion.py @@ -1,33 +1,45 @@ +from collections import defaultdict from pathlib import Path -from typing import Optional +from typing import Dict, Final, List, Optional import numpy as np from shapely.geometry import LineString, Polygon from py123d.common.utils.dependencies import check_dependencies -from py123d.conversion.datasets.nuscenes.nuscenes_constants import NUSCENES_MAPS +from py123d.conversion.datasets.nuscenes.utils.nuscenes_constants import NUSCENES_MAPS +from py123d.conversion.datasets.nuscenes.utils.nuscenes_map_utils import ( + extract_lane_and_boundaries, + extract_nuscenes_centerline, + order_lanes_left_to_right, +) from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.datatypes.maps.cache.cache_map_objects import ( # CacheIntersection, +from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( + get_road_edge_linear_rings, + split_line_geometry_by_max_length, + split_polygon_by_grid, +) +from py123d.datatypes.maps.cache.cache_map_objects import ( CacheCarpark, CacheCrosswalk, CacheGenericDrivable, + CacheIntersection, CacheLane, CacheLaneGroup, + CacheRoadEdge, CacheRoadLine, CacheWalkway, ) -from py123d.datatypes.maps.map_datatypes import RoadLineType -from py123d.geometry import Polyline3D - -check_dependencies(["lanelet2", "nuscenes"], optional_name="nuscenes") -import traceback +from py123d.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType +from py123d.geometry import OccupancyMap2D, Polyline2D, Polyline3D +from py123d.geometry.utils.polyline_utils import offset_points_perpendicular -import lanelet2 -from lanelet2.io import load -from lanelet2.projection import MercatorProjector -from nuscenes.map_expansion.arcline_path_utils import discretize_lane +check_dependencies(["nuscenes"], optional_name="nuscenes") from nuscenes.map_expansion.map_api import NuScenesMap +MAX_ROAD_EDGE_LENGTH: Final[float] = 100.0 # [m] +MAX_LANE_WIDTH: Final[float] = 4.0 # [m] +MIN_LANE_WIDTH: Final[float] = 1.0 # [m] + def write_nuscenes_map( nuscenes_maps_root: Path, @@ -36,634 +48,432 @@ def write_nuscenes_map( use_lanelet2: bool, lanelet2_root: Optional[str] = None, ) -> None: + """Converts the nuScenes map types to the 123D format, and sends elements to the map writer. + FIXME @DanielDauner: Currently, Lanelet2 format is not supported for nuScenes. + + :param nuscenes_maps_root: Path to the nuScenes maps root directory + :param location: Name of the specific map location to convert + :param map_writer: Map writer instance to write the converted elements + :param use_lanelet2: Flag indicating whether to use Lanelet2 format + :param lanelet2_root: Path to the Lanelet2 root directory, defaults to None """ - Main function to convert nuscenes map to unified format and write using map_writer. - """ + assert location in NUSCENES_MAPS, f"Map name {location} is not supported." - nusc_map = NuScenesMap(dataroot=str(nuscenes_maps_root), map_name=location) - - # Write all layers - if use_lanelet2: - _write_nuscenes_lanes_lanelet2(nusc_map, map_writer, lanelet2_root) - _write_nuscenes_lane_groups_lanelet2(nusc_map, map_writer, lanelet2_root) - else: - _write_nuscenes_lanes(nusc_map, map_writer) - _write_nuscenes_lane_groups(nusc_map, map_writer) - - _write_nuscenes_intersections(nusc_map, map_writer) - _write_nuscenes_crosswalks(nusc_map, map_writer) - _write_nuscenes_walkways(nusc_map, map_writer) - _write_nuscenes_carparks(nusc_map, map_writer) - _write_nuscenes_generic_drivables(nusc_map, map_writer) - _write_nuscenes_stop_lines(nusc_map, map_writer) - _write_nuscenes_road_lines(nusc_map, map_writer) - - -def _write_nuscenes_lanes_lanelet2(nusc_map: NuScenesMap, map_writer: AbstractMapWriter, lanelet2_root: str) -> None: - map_name = nusc_map.map_name - osm_map_file = str(Path(lanelet2_root) / f"{map_name}.osm") - - if "boston" in map_name.lower(): - origin_lat, origin_lon = 42.3365, -71.0577 - elif "singapore" in map_name.lower(): - origin_lat, origin_lon = 1.3, 103.8 - else: - origin_lat, origin_lon = 49.0, 8.4 - - origin = lanelet2.io.Origin(origin_lat, origin_lon) - - try: - lanelet_map = lanelet2.io.load(osm_map_file, origin) - except Exception: - try: - projector = lanelet2.projection.MercatorProjector(origin) - lanelet_map = lanelet2.io.load(osm_map_file, projector) - except Exception: - return - - for lanelet in lanelet_map.laneletLayer: - token = lanelet.id - - try: - left_bound = [(p.x, p.y) for p in lanelet.leftBound] - right_bound = [(p.x, p.y) for p in lanelet.rightBound] - polygon_points = left_bound + right_bound[::-1] - polygon = Polygon(polygon_points) - - predecessor_ids = [int(pred.id) for pred in lanelet.previousLanelets] - successor_ids = [int(succ.id) for succ in lanelet.followingLanelets] - - left_lane_id = None - right_lane_id = None - - left_boundary = [(p.x, p.y) for p in lanelet.leftBound] - right_boundary = [(p.x, p.y) for p in lanelet.rightBound] - centerline = [] - for left_pt, right_pt in zip(lanelet.leftBound, lanelet.rightBound): - center_x = (left_pt.x + right_pt.x) / 2 - center_y = (left_pt.y + right_pt.y) / 2 - centerline.append((center_x, center_y)) - - speed_limit_mps = 0.0 - if "speed_limit" in lanelet.attributes: - try: - speed_limit_str = lanelet.attributes["speed_limit"] - if "km/h" in speed_limit_str: - speed_kmh = float(speed_limit_str.replace("km/h", "").strip()) - speed_limit_mps = speed_kmh / 3.6 - except (ValueError, TypeError): - pass - - map_writer.write_lane( - CacheLane( - object_id=token, - lane_group_id=None, - left_boundary=left_boundary, - right_boundary=right_boundary, - centerline=centerline, - left_lane_id=left_lane_id, - right_lane_id=right_lane_id, - predecessor_ids=predecessor_ids, - successor_ids=successor_ids, - speed_limit_mps=speed_limit_mps, - outline=None, - geometry=polygon, - ) - ) - except Exception: - traceback.print_exc() - continue + nuscenes_map = NuScenesMap(dataroot=str(nuscenes_maps_root), map_name=location) + # 1. extract road edges (used later to determine lane connector widths) + road_edges = _extract_nuscenes_road_edges(nuscenes_map) -def _write_nuscenes_lane_groups_lanelet2( - nusc_map: NuScenesMap, map_writer: AbstractMapWriter, lanelet2_root: str -) -> None: - map_name = nusc_map.map_name - osm_map_file = str(Path(lanelet2_root) / f"{map_name}.osm") - - if "boston" in map_name.lower(): - origin_lat, origin_lon = 42.3365, -71.0577 - else: - origin_lat, origin_lon = 1.3, 103.8 - - origin = lanelet2.io.Origin(origin_lat, origin_lon) - - try: - projector = MercatorProjector(origin) - lanelet_map = load(osm_map_file, projector) - except Exception: - return - - for lanelet in lanelet_map.laneletLayer: - token = lanelet.id - lane_ids = [lanelet.id] - try: - predecessor_ids = [int(lanelet.id) for lanelet in lanelet.previous] - successor_ids = [int(lanelet.id) for lanelet in lanelet.following] - except AttributeError: - predecessor_ids = [] - successor_ids = [] - try: - if hasattr(lanelet, "left"): - for left_lane in lanelet.left: - predecessor_ids.append(int(left_lane.id)) - if hasattr(lanelet, "right"): - for right_lane in lanelet.right: - successor_ids.append(int(right_lane.id)) - except Exception: - pass - - try: - left_bound = [(p.x, p.y) for p in lanelet.leftBound] - right_bound = [(p.x, p.y) for p in lanelet.rightBound] - polygon_points = left_bound + right_bound[::-1] - polygon = Polygon(polygon_points) - except Exception: - traceback.print_exc() - continue - - try: - map_writer.write_lane_group( - CacheLaneGroup( - object_id=token, - lane_ids=lane_ids, - left_boundary=None, - right_boundary=None, - intersection_id=None, - predecessor_ids=predecessor_ids, - successor_ids=successor_ids, - outline=None, - geometry=polygon, - ) - ) - except Exception: - traceback.print_exc() - continue + # 2. extract lanes + lanes = _extract_nuscenes_lanes(nuscenes_map) + # 3. extract lane connectors (i.e. lanes on intersections) + lane_connectors = _extract_nuscenes_lane_connectors(nuscenes_map, road_edges) -def _write_nuscenes_lanes(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: - """ - Write lane data to map_writer, including topology and boundaries. - """ - lane_records = nusc_map.lane - for lane_record in lane_records: - token = lane_record["token"] + # 4. extract intersections (and store lane-connector to intersection assignment for lane groups) + intersection_assignment = _write_nuscenes_intersections(nuscenes_map, lane_connectors, map_writer) - # Extract geometry from lane record - try: - if "polygon_token" in lane_record: - polygon = nusc_map.extract_polygon(lane_record["polygon_token"]) - else: - continue - if not polygon.is_valid: - continue - except Exception: - traceback.print_exc() - continue + # 5. extract lane groups + lane_groups = _extract_nuscenes_lane_groups(nuscenes_map, lanes, lane_connectors, intersection_assignment) - # Get topology - incoming = nusc_map.get_incoming_lane_ids(token) - outgoing = nusc_map.get_outgoing_lane_ids(token) + # Write remaining map elements + _write_nuscenes_crosswalks(nuscenes_map, map_writer) + _write_nuscenes_walkways(nuscenes_map, map_writer) + _write_nuscenes_carparks(nuscenes_map, map_writer) + _write_nuscenes_generic_drivables(nuscenes_map, map_writer) + _write_nuscenes_stop_lines(nuscenes_map, map_writer) + _write_nuscenes_road_lines(nuscenes_map, map_writer) - # Get lane connectors - lane_connectors = [] - for connector in nusc_map.lane_connector: - if connector.get("incoming_lane") == token or connector.get("outgoing_lane") == token: - lane_connectors.append(connector["token"]) + for lane in lanes + lane_connectors: + map_writer.write_lane(lane) - # Extract boundaries - left_boundary = _get_lane_boundary(token, "left", nusc_map) - right_boundary = _get_lane_boundary(token, "right", nusc_map) + for road_edge in road_edges: + map_writer.write_road_edge(road_edge) - # Skip lanes without valid boundaries - if left_boundary is None or right_boundary is None: - continue - if left_boundary.is_empty or right_boundary.is_empty: - continue - - # Extract baseline path - baseline_path = None - if token in nusc_map.arcline_path_3: - arc_path = nusc_map.arcline_path_3[token] - try: - points = discretize_lane(arc_path, resolution_meters=0.1) - xy_points = [(p[0], p[1]) for p in points] - baseline_path = LineString(xy_points) - except Exception: - baseline_path = None - - # Align boundaries with baseline path direction - if baseline_path and left_boundary: - left_boundary = align_boundary_direction(baseline_path, left_boundary) - if baseline_path and right_boundary: - right_boundary = align_boundary_direction(baseline_path, right_boundary) - - # Write lane object safely - try: - map_writer.write_lane( - CacheLane( - object_id=token, - lane_group_id=lane_record.get("road_segment_token", None), - left_boundary=Polyline3D.from_linestring(left_boundary), - right_boundary=Polyline3D.from_linestring(right_boundary), - centerline=Polyline3D.from_linestring(baseline_path), - left_lane_id=None, # Not directly available in nuscenes - right_lane_id=None, # Not directly available in nuscenes - predecessor_ids=incoming, - successor_ids=outgoing, - speed_limit_mps=None, # Default value - outline=None, - geometry=polygon, - ) - ) - except Exception: - traceback.print_exc() - continue + for lane_group in lane_groups: + map_writer.write_lane_group(lane_group) -def _write_nuscenes_lane_groups(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: - """ - Write lane group data to map_writer. - """ - road_segments = nusc_map.road_segment - for segment in road_segments: - token = segment["token"] - - # Extract geometry - try: - if "polygon_token" in segment: - polygon = nusc_map.extract_polygon(segment["polygon_token"]) - else: - continue - if not polygon.is_valid: - continue - except Exception: - continue +def _extract_nuscenes_lanes(nuscenes_map: NuScenesMap) -> List[CacheLane]: + """Helper function to extract lanes from a nuScenes map.""" - # Find lanes in this segment - lane_ids = [] - for lane in nusc_map.lane: - if lane.get("road_segment_token") == token: - lane_ids.append(lane["token"]) + # NOTE: nuScenes does not provide explicitly provide lane groups and does not assign lanes to roadblocks. + # Therefore, we query the roadblocks given the middle-point of the centerline to assign lanes to a road block. + # Unlike road segments, road blocks outline a lane group going in the same direction. + # In case a roadblock cannot be assigned, e.g. because the lane is not located within any roadblock, or the + # roadblock data is invalid [1], we assign a new lane group with only this lane. + # [1] https://github.com/nutonomy/nuscenes-devkit/issues/862 - # Get connected segments - incoming, outgoing = _get_connected_segments(token, nusc_map) + road_blocks_invalid = nuscenes_map.map_name in ["singapore-queenstown", "singapore-hollandvillage"] - # Extract boundaries - left_boundary = _get_lane_group_boundary(token, "left", nusc_map) - right_boundary = _get_lane_group_boundary(token, "right", nusc_map) + road_block_dict: Dict[str, Polygon] = {} + if not road_blocks_invalid: + road_block_dict: Dict[str, Polygon] = { + road_block["token"]: nuscenes_map.extract_polygon(road_block["polygon_token"]) + for road_block in nuscenes_map.road_block + } + + road_block_map = OccupancyMap2D.from_dict(road_block_dict) + lanes: List[CacheLane] = [] + for lane_record in nuscenes_map.lane: + token = lane_record["token"] + + # 1. Extract centerline and boundaries + centerline, left_boundary, right_boundary = extract_lane_and_boundaries(nuscenes_map, lane_record) - # Skip invalid boundaries if left_boundary is None or right_boundary is None: - continue - if left_boundary.is_empty or right_boundary.is_empty: - continue - - # Use first lane's baseline path for direction alignment - baseline_path = None - if lane_ids: - first_lane_token = lane_ids[0] - if first_lane_token in nusc_map.arcline_path_3: - arc_path = nusc_map.arcline_path_3[first_lane_token] - try: - points = discretize_lane(arc_path, resolution_meters=0.1) - xy_points = [(p[0], p[1]) for p in points] - baseline_path = LineString(xy_points) - except Exception: - baseline_path = None - - if baseline_path and left_boundary: - left_boundary = align_boundary_direction(baseline_path, left_boundary) - if baseline_path and right_boundary: - right_boundary = align_boundary_direction(baseline_path, right_boundary) - - try: - map_writer.write_lane_group( - CacheLaneGroup( - object_id=token, - lane_ids=lane_ids, - left_boundary=left_boundary, - right_boundary=right_boundary, - intersection_id=None, # Handled in intersections - predecessor_ids=incoming, - successor_ids=outgoing, - outline=None, - geometry=polygon, - ) + continue # skip lanes without valid boundaries + + # 2. Query road block for lane group assignment + lane_group_id: str = token # default to self, override if road block found + if not road_blocks_invalid: + query_point = centerline.interpolate(0.5, normalized=True).shapely_point + intersecting_roadblock = road_block_map.query_nearest(query_point, max_distance=0.1, all_matches=False) + + # NOTE: if a lane cannot be assigned to a road block, we assume a new lane group with only this lane. + # The lane group id is set to be the same as the lane id in this case. + if len(intersecting_roadblock) > 0: + lane_group_id = road_block_map.ids[intersecting_roadblock[0]] + + # Get topology + incoming = nuscenes_map.get_incoming_lane_ids(token) + outgoing = nuscenes_map.get_outgoing_lane_ids(token) + + lanes.append( + CacheLane( + object_id=token, + lane_group_id=lane_group_id, + left_boundary=left_boundary, + right_boundary=right_boundary, + centerline=centerline, + left_lane_id=None, + right_lane_id=None, + predecessor_ids=incoming, + successor_ids=outgoing, + speed_limit_mps=None, + outline=None, + geometry=None, ) - except Exception: - traceback.print_exc() - continue + ) + return lanes -def _write_nuscenes_intersections(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: - """ - Write intersection data to map_writer. - """ - # road_blocks = nusc_map.road_block - # for block in road_blocks: - # token = block["token"] - # try: - # if "polygon_token" in block: - # polygon = nusc_map.extract_polygon(block["polygon_token"]) - # else: - # continue - # if not polygon.is_valid: - # continue - # except Exception: - # continue - # # Lane group IDs are not directly available; use empty list - # lane_group_ids = [] +def _extract_nuscenes_lane_connectors(nuscenes_map: NuScenesMap, road_edges: List[CacheRoadEdge]) -> List[CacheLane]: + """Helper function to extract lane connectors from a nuScenes map.""" - # map_writer.write_intersection( - # CacheIntersection( - # object_id=token, - # lane_group_ids=lane_group_ids, - # geometry=polygon, - # ) - # ) + # TODO @DanielDauner: consider using connected lanes to estimate the lane width + road_edge_map = OccupancyMap2D(geometries=[road_edge.shapely_linestring for road_edge in road_edges]) -def _write_nuscenes_crosswalks(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: - """ - Write crosswalk data to map_writer. - """ - ped_crossings = nusc_map.ped_crossing - for crossing in ped_crossings: - token = crossing["token"] - try: - if "polygon_token" in crossing: - polygon = nusc_map.extract_polygon(crossing["polygon_token"]) - else: - continue - if not polygon.is_valid: + lane_connectors: List[CacheLane] = [] + for lane_record in nuscenes_map.lane_connector: + lane_connector_token: str = lane_record["token"] + + centerline = extract_nuscenes_centerline(nuscenes_map, lane_record) + + _, nearest_edge_distances = road_edge_map.query_nearest( + centerline.linestring, return_distance=True, all_matches=False + ) + road_edge_distance = nearest_edge_distances[0] if nearest_edge_distances else float("inf") + + lane_half_width = np.clip(road_edge_distance, MIN_LANE_WIDTH / 2.0, MAX_LANE_WIDTH / 2.0) + + left_pts = offset_points_perpendicular(centerline.array, offset=lane_half_width) + right_pts = offset_points_perpendicular(centerline.array, offset=-lane_half_width) + + predecessor_ids = nuscenes_map.get_incoming_lane_ids(lane_connector_token) + successor_ids = nuscenes_map.get_outgoing_lane_ids(lane_connector_token) + + lane_group_id = lane_connector_token + + lane_connectors.append( + CacheLane( + object_id=lane_connector_token, + lane_group_id=lane_group_id, + left_boundary=Polyline2D.from_array(left_pts), + right_boundary=Polyline2D.from_array(right_pts), + centerline=centerline, + left_lane_id=None, # Not directly available in nuscenes + right_lane_id=None, # Not directly available in nuscenes + predecessor_ids=predecessor_ids, + successor_ids=successor_ids, + speed_limit_mps=None, # Default value + outline=None, + geometry=None, + ) + ) + + return lane_connectors + + +def _extract_nuscenes_lane_groups( + nuscenes_map: NuScenesMap, + lanes: List[CacheLane], + lane_connectors: List[CacheLane], + intersection_assignment: Dict[str, int], +) -> List[CacheLaneGroup]: + """Helper function to extract lane groups from a nuScenes map.""" + + lane_groups = [] + lanes_dict = {lane.object_id: lane for lane in lanes + lane_connectors} + + # 1. Gather all lane group ids that were previously assigned in the lanes (either roadblocks of lane themselves) + lane_group_lane_dict: Dict[str, List[str]] = defaultdict(list) + for lane in lanes + lane_connectors: + lane_group_lane_dict[lane.lane_group_id].append(lane.object_id) + + for lane_group_id, lane_ids in lane_group_lane_dict.items(): + + if len(lane_ids) > 1: + lane_centerlines: List[Polyline2D] = [lanes_dict[lane_id].centerline for lane_id in lane_ids] + ordered_lane_indices = order_lanes_left_to_right(lane_centerlines) + left_boundary = lanes_dict[lane_ids[ordered_lane_indices[0]]].left_boundary + right_boundary = lanes_dict[lane_ids[ordered_lane_indices[-1]]].right_boundary + + else: + lane_id = lane_ids[0] + lane = lanes_dict[lane_id] + left_boundary = lane.left_boundary + right_boundary = lane.right_boundary + + # 2. For each lane group, gather predecessor and successor lane groups + predecessor_ids = set() + successor_ids = set() + for lane_id in lane_ids: + lane = lanes_dict[lane_id] + if lane is None: continue - except Exception: - traceback.print_exc() - continue + for pred_id in lane.predecessor_ids: + pred_lane = lanes_dict.get(pred_id) + if pred_lane is not None: + predecessor_ids.add(pred_lane.lane_group_id) + for succ_id in lane.successor_ids: + succ_lane = lanes_dict.get(succ_id) + if succ_lane is not None: + successor_ids.add(succ_lane.lane_group_id) + + intersection_ids = set( + [int(intersection_assignment[lane_id]) for lane_id in lane_ids if lane_id in intersection_assignment] + ) + assert len(intersection_ids) <= 1, "A lane group cannot belong to multiple intersections." + intersection_id = None if len(intersection_ids) == 0 else intersection_ids.pop() + + lane_groups.append( + CacheLaneGroup( + object_id=lane_group_id, + lane_ids=lane_ids, + left_boundary=left_boundary, + right_boundary=right_boundary, + intersection_id=intersection_id, + predecessor_ids=list(predecessor_ids), + successor_ids=list(successor_ids), + outline=None, + geometry=None, + ) + ) + + return lane_groups + +def _write_nuscenes_intersections( + nuscenes_map: NuScenesMap, lane_connectors: List[CacheLane], map_writer: AbstractMapWriter +) -> None: + """Write intersection data to map_writer and return lane-connector to intersection assignment.""" + + intersection_assignment = {} + + # 1. Extract intersections and corresponding polygons + intersection_polygons = [] + for road_segment in nuscenes_map.road_segment: + if road_segment["is_intersection"]: + if "polygon_token" in road_segment: + polygon = nuscenes_map.extract_polygon(road_segment["polygon_token"]) + intersection_polygons.append(polygon) + + # 2. Find lane connectors within each intersection polygon + lane_connector_center_point_dict = { + lane_connector.object_id: lane_connector.centerline.interpolate(0.5, normalized=True).shapely_point + for lane_connector in lane_connectors + } + centerpoint_map = OccupancyMap2D.from_dict(lane_connector_center_point_dict) + for idx, intersection_polygon in enumerate(intersection_polygons): + intersecting_lane_connector_ids = centerpoint_map.intersects(intersection_polygon) + for lane_connector_id in intersecting_lane_connector_ids: + intersection_assignment[lane_connector_id] = idx + + map_writer.write_intersection( + CacheIntersection( + object_id=idx, + lane_group_ids=intersecting_lane_connector_ids, + outline=None, + geometry=intersection_polygon, + ) + ) + + return intersection_assignment + + +def _write_nuscenes_crosswalks(nuscenes_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """Write crosswalk data to map_writer.""" + + crosswalk_polygons = [] + for crossing in nuscenes_map.ped_crossing: + if "polygon_token" in crossing: + polygon = nuscenes_map.extract_polygon(crossing["polygon_token"]) + crosswalk_polygons.append(polygon) + + for idx, polygon in enumerate(crosswalk_polygons): map_writer.write_crosswalk( CacheCrosswalk( - object_id=token, + object_id=idx, geometry=polygon, ) ) -def _write_nuscenes_walkways(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: - """ - Write walkway data to map_writer. - """ - walkways = nusc_map.walkway - for walkway in walkways: - token = walkway["token"] - try: - if "polygon_token" in walkway: - polygon = nusc_map.extract_polygon(walkway["polygon_token"]) - else: - continue - if not polygon.is_valid: - continue - except Exception: - traceback.print_exc() - continue +def _write_nuscenes_walkways(nuscenes_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """Write walkway data to map_writer.""" + walkway_polygons = [] + for walkway_record in nuscenes_map.walkway: + if "polygon_token" in walkway_record: + polygon = nuscenes_map.extract_polygon(walkway_record["polygon_token"]) + walkway_polygons.append(polygon) + for idx, polygon in enumerate(walkway_polygons): map_writer.write_walkway( CacheWalkway( - object_id=token, + object_id=idx, geometry=polygon, ) ) -def _write_nuscenes_carparks(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: - """ - Write carpark data to map_writer. - """ - carpark_areas = nusc_map.carpark_area - for carpark in carpark_areas: - token = carpark["token"] - try: - if "polygon_token" in carpark: - polygon = nusc_map.extract_polygon(carpark["polygon_token"]) - else: - continue - if not polygon.is_valid: - continue - except Exception: - continue +def _write_nuscenes_carparks(nuscenes_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """Write carpark data to map_writer.""" + carpark_polygons = [] + for carpark_record in nuscenes_map.carpark_area: + if "polygon_token" in carpark_record: + polygon = nuscenes_map.extract_polygon(carpark_record["polygon_token"]) + carpark_polygons.append(polygon) + for idx, polygon in enumerate(carpark_polygons): map_writer.write_carpark( CacheCarpark( - object_id=token, + object_id=idx, geometry=polygon, ) ) -def _write_nuscenes_generic_drivables(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: - """ - Write generic drivable areas to map_writer. - """ - # Combine road segments, lanes, and drivable areas - all_drivables = [] - - # Add road segments - for segment in nusc_map.road_segment: - try: - if "polygon_token" in segment: - polygon = nusc_map.extract_polygon(segment["polygon_token"]) - if polygon.is_valid: - all_drivables.append((f"road_segment_{segment['token']}", polygon)) - except Exception: - traceback.print_exc() - continue - - # Add lanes - for lane in nusc_map.lane: - try: - if "polygon_token" in lane: - polygon = nusc_map.extract_polygon(lane["polygon_token"]) - if polygon.is_valid: - all_drivables.append((f"lane_{lane['token']}", polygon)) - except Exception: - traceback.print_exc() - continue - - # Add drivable areas - for road in nusc_map.drivable_area: - try: - if "polygon_token" in road: - polygon = nusc_map.extract_polygon(road["polygon_token"]) - if polygon.is_valid: - all_drivables.append((f"road_{road['token']}", polygon)) - except Exception: - traceback.print_exc() - continue - - for obj_id, geometry in all_drivables: - map_writer.write_generic_drivable( - CacheGenericDrivable( - object_id=obj_id, - geometry=geometry, - ) - ) +def _write_nuscenes_generic_drivables(nuscenes_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """Write generic drivable area data to map_writer.""" + cell_size = 10.0 + drivable_polygons = [] + for drivable_area_record in nuscenes_map.drivable_area: + drivable_area = nuscenes_map.get("drivable_area", drivable_area_record["token"]) + for polygon_token in drivable_area["polygon_tokens"]: + polygon = nuscenes_map.extract_polygon(polygon_token) + split_polygons = split_polygon_by_grid(polygon, cell_size=cell_size) + drivable_polygons.extend(split_polygons) + # drivable_polygons.append(polygon) -def _write_nuscenes_stop_lines(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: - """ - Write stop line data to map_writer. - """ - stop_lines = nusc_map.stop_line - for stop_line in stop_lines: - token = stop_line["token"] - try: - if "polygon_token" in stop_line: - polygon = nusc_map.extract_polygon(stop_line["polygon_token"]) - else: - continue - if not polygon.is_valid: - continue - except Exception: - traceback.print_exc() - continue + for idx, geometry in enumerate(drivable_polygons): + map_writer.write_generic_drivable(CacheGenericDrivable(object_id=idx, geometry=geometry)) - # Note: Stop lines are written as generic drivable for compatibility - map_writer.write_generic_drivable( - CacheGenericDrivable( - object_id=token, - geometry=polygon, - ) - ) +def _write_nuscenes_stop_lines(nuscenes_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """Write stop line data to map_writer.""" + # FIXME: Add stop lines. + # stop_lines = nuscenes_map.stop_line + # for stop_line in stop_lines: + # token = stop_line["token"] + # try: + # if "polygon_token" in stop_line: + # polygon = nuscenes_map.extract_polygon(stop_line["polygon_token"]) + # else: + # continue + # if not polygon.is_valid: + # continue + # except Exception: + # traceback.print_exc() + # continue + + # # Note: Stop lines are written as generic drivable for compatibility + # map_writer.write_generic_drivable( + # CacheGenericDrivable( + # object_id=token, + # geometry=polygon, + # ) + # ) -def _write_nuscenes_road_lines(nusc_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: - """ - Write road line data (dividers) to map_writer. - """ + +def _write_nuscenes_road_lines(nuscenes_map: NuScenesMap, map_writer: AbstractMapWriter) -> None: + """Write road line data (dividers) to map_writer.""" # Process road dividers - road_dividers = nusc_map.road_divider + road_dividers = nuscenes_map.road_divider + running_idx = 0 for divider in road_dividers: - token = divider["token"] - try: - line = nusc_map.extract_line(divider["line_token"]) - if not line.is_valid: - continue - except Exception: - continue + line = nuscenes_map.extract_line(divider["line_token"]) # Determine line type - line_type = _get_road_line_type(divider["line_token"], nusc_map) + line_type = _get_road_line_type(divider["line_token"], nuscenes_map) map_writer.write_road_line( CacheRoadLine( - object_id=token, + object_id=running_idx, road_line_type=line_type, polyline=Polyline3D(LineString(line.coords)), ) ) + running_idx += 1 # Process lane dividers - lane_dividers = nusc_map.lane_divider + lane_dividers = nuscenes_map.lane_divider for divider in lane_dividers: - token = divider["token"] - try: - line = nusc_map.extract_line(divider["line_token"]) - if not line.is_valid: - continue - except Exception: - continue - - line_type = _get_road_line_type(divider["line_token"], nusc_map) + line = nuscenes_map.extract_line(divider["line_token"]) + line_type = _get_road_line_type(divider["line_token"], nuscenes_map) map_writer.write_road_line( CacheRoadLine( - object_id=token, + object_id=running_idx, road_line_type=line_type, polyline=Polyline3D(LineString(line.coords)), ) ) + running_idx += 1 + + +def _extract_nuscenes_road_edges(nuscenes_map: NuScenesMap) -> List[CacheRoadEdge]: + """Helper function to extract road edges from a nuScenes map.""" + drivable_polygons = [] + for drivable_area_record in nuscenes_map.drivable_area: + drivable_area = nuscenes_map.get("drivable_area", drivable_area_record["token"]) + for polygon_token in drivable_area["polygon_tokens"]: + polygon = nuscenes_map.extract_polygon(polygon_token) + drivable_polygons.append(polygon) + + road_edge_linear_rings = get_road_edge_linear_rings(drivable_polygons) + road_edges_linestrings = split_line_geometry_by_max_length(road_edge_linear_rings, MAX_ROAD_EDGE_LENGTH) + + road_edges_cache: List[CacheRoadEdge] = [] + for idx in range(len(road_edges_linestrings)): + road_edges_cache.append( + CacheRoadEdge( + object_id=idx, + road_edge_type=RoadEdgeType.ROAD_EDGE_BOUNDARY, + polyline=Polyline2D.from_linestring(road_edges_linestrings[idx]), + ) + ) + return road_edges_cache -def _get_lane_boundary(lane_token: str, side: str, nusc_map: NuScenesMap) -> Optional[LineString]: - """ - Extract lane boundary geometry for a given side. - """ - lane_record = next((lr for lr in nusc_map.lane if lr["token"] == lane_token), None) - if not lane_record: - return None - - divider_segment_nodes_key = f"{side}_lane_divider_segment_nodes" - if divider_segment_nodes_key in lane_record and lane_record[divider_segment_nodes_key]: - nodes = lane_record[divider_segment_nodes_key] - boundary = LineString([(node["x"], node["y"]) for node in nodes]) - return boundary - - return None - - -def _get_lane_group_boundary(segment_token: str, side: str, nusc_map: NuScenesMap) -> Optional[LineString]: - """ - Extract lane group boundary geometry (simplified). - """ - # This is a simplified implementation; in practice, may need more robust geometry extraction - boundary_type = "road_divider" if side == "left" else "lane_divider" - - # Find the segment geometry - segment = next((rs for rs in nusc_map.road_segment if rs["token"] == segment_token), None) - if not segment: - return None - - try: - segment_geom = nusc_map.extract_polygon(segment["polygon_token"]) - except Exception: - return None - - # Find nearest boundary of the specified type within a threshold - nearest = None - min_dist = float("inf") - - if boundary_type == "road_divider": - records = nusc_map.road_divider - else: - records = nusc_map.lane_divider - - for record in records: - try: - line = nusc_map.extract_line(record["line_token"]) - dist = segment_geom.distance(line) - if dist < 10.0 and dist < min_dist: - min_dist = dist - nearest = line - except Exception: - continue - - return nearest - - -def _get_connected_segments(segment_token: str, nusc_map: NuScenesMap): - """ - Get incoming and outgoing segment connections. - """ - incoming, outgoing = [], [] - - for connector in nusc_map.lane_connector: - if connector.get("outgoing_lane") == segment_token: - incoming.append(connector.get("incoming_lane")) - elif connector.get("incoming_lane") == segment_token: - outgoing.append(connector.get("outgoing_lane")) - - incoming = [id for id in incoming if id is not None] - outgoing = [id for id in outgoing if id is not None] - - return incoming, outgoing +def _get_road_line_type(line_token: str, nuscenes_map: NuScenesMap) -> RoadLineType: + """Map nuscenes line type to RoadLineType.""" -def _get_road_line_type(line_token: str, nusc_map: NuScenesMap) -> RoadLineType: - """ - Map nuscenes line type to RoadLineType. - """ + # FIXME @DanielDauner: Store token to type mapping. Creating mapping for every call is not ideal. nuscenes_to_road_line_type = { "SINGLE_SOLID_WHITE": RoadLineType.SOLID_WHITE, "DOUBLE_DASHED_WHITE": RoadLineType.DOUBLE_DASH_WHITE, @@ -671,7 +481,7 @@ def _get_road_line_type(line_token: str, nusc_map: NuScenesMap) -> RoadLineType: } line_token_to_type = {} - for lane_record in nusc_map.lane: + for lane_record in nuscenes_map.lane: for seg in lane_record.get("left_lane_divider_segments", []): token = seg.get("line_token") seg_type = seg.get("segment_type") @@ -686,34 +496,3 @@ def _get_road_line_type(line_token: str, nusc_map: NuScenesMap) -> RoadLineType: nuscenes_type = line_token_to_type.get(line_token, "UNKNOWN") return nuscenes_to_road_line_type.get(nuscenes_type, RoadLineType.UNKNOWN) - - -def flip_linestring(linestring: LineString) -> LineString: - """ - Flip the direction of a LineString. - """ - return LineString(linestring.coords[::-1]) - - -def lines_same_direction(centerline: LineString, boundary: LineString) -> bool: - """ - Check if centerline and boundary have the same direction. - """ - center_start = np.array(centerline.coords[0]) - center_end = np.array(centerline.coords[-1]) - boundary_start = np.array(boundary.coords[0]) - boundary_end = np.array(boundary.coords[-1]) - - same_dir_dist = np.linalg.norm(center_start - boundary_start) + np.linalg.norm(center_end - boundary_end) - opposite_dir_dist = np.linalg.norm(center_start - boundary_end) + np.linalg.norm(center_end - boundary_start) - - return same_dir_dist <= opposite_dir_dist - - -def align_boundary_direction(centerline: LineString, boundary: LineString) -> LineString: - """ - Align boundary direction with centerline. - """ - if not lines_same_direction(centerline, boundary): - return flip_linestring(boundary) - return boundary diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py index 7592182c..eccf0124 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py @@ -11,16 +11,13 @@ def load_nuscenes_lidar_pcs_from_file(pcd_path: Path, log_metadata: LogMetadata) -> Dict[LiDARType, np.ndarray]: - points = np.fromfile(pcd_path, dtype=np.float32).reshape(-1, 5) + lidar_pc = np.fromfile(pcd_path, dtype=np.float32).reshape(-1, len(NuScenesLidarIndex)) # convert lidar to ego frame lidar_extrinsic = log_metadata.lidar_metadata[LiDARType.LIDAR_TOP].extrinsic - - points[..., NuScenesLidarIndex.XYZ] = convert_points_3d_array_between_origins( + lidar_pc[..., NuScenesLidarIndex.XYZ] = convert_points_3d_array_between_origins( from_origin=lidar_extrinsic, to_origin=StateSE3(0, 0, 0, 1.0, 0, 0, 0), - points_3d_array=points[..., NuScenesLidarIndex.XYZ], + points_3d_array=lidar_pc[..., NuScenesLidarIndex.XYZ], ) - lidar_pcs_dict: Dict[LiDARType, np.ndarray] = {LiDARType.LIDAR_TOP: points} - - return lidar_pcs_dict + return {LiDARType.LIDAR_TOP: lidar_pc} diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_constants.py b/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py similarity index 100% rename from src/py123d/conversion/datasets/nuscenes/nuscenes_constants.py rename to src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py diff --git a/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_map_utils.py b/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_map_utils.py new file mode 100644 index 00000000..a512dc18 --- /dev/null +++ b/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_map_utils.py @@ -0,0 +1,197 @@ +from typing import Dict, List, Optional, Tuple + +import numpy as np +from scipy.spatial.distance import cdist + +from py123d.common.utils.dependencies import check_dependencies +from py123d.geometry.polyline import Polyline2D + +check_dependencies(["nuscenes"], optional_name="nuscenes") +from nuscenes.map_expansion import arcline_path_utils +from nuscenes.map_expansion.map_api import NuScenesMap + + +def extract_lane_and_boundaries( + nuscenes_map: NuScenesMap, lane_record: Dict +) -> Tuple[Polyline2D, Optional[Polyline2D], Optional[Polyline2D]]: + """Extracts the centerline and left / right boundary from a nusScenes lane. + + :param nuscenes_map: nuScenes map API instance + :param lane_record: lane record dictionary + :return: centerline, left boundary (optional), right boundary (optional) + """ + + # NOTE @DanielDauner: Code adapted from trajdata, Apache 2.0 License. Thank you! + # https://github.com/NVlabs/trajdata/blob/main/src/trajdata/dataset_specific/nusc/nusc_utils.py#L281 + + # Getting the bounding polygon vertices. + lane_polygon_obj = nuscenes_map.get("polygon", lane_record["polygon_token"]) + polygon_nodes = [nuscenes_map.get("node", node_token) for node_token in lane_polygon_obj["exterior_node_tokens"]] + polygon_outline: np.ndarray = np.array([(node["x"], node["y"]) for node in polygon_nodes]) + + # Getting the lane center's points. + centerline = extract_nuscenes_centerline(nuscenes_map, lane_record) + centerline_array = centerline.array + + # Computing the closest lane center point to each bounding polygon vertex. + closest_midlane_pt: np.ndarray = np.argmin(cdist(polygon_outline, centerline_array), axis=1) + # Computing the local direction of the lane at each lane center point. + direction_vectors: np.ndarray = np.diff( + centerline_array, + axis=0, + prepend=centerline_array[[0]] - (centerline_array[[1]] - centerline_array[[0]]), + ) + + # Selecting the direction vectors at the closest lane center point per polygon vertex. + local_dir_vecs: np.ndarray = direction_vectors[closest_midlane_pt] + # Calculating the vectors from the the closest lane center point per polygon vertex to the polygon vertex. + origin_to_polygon_vecs: np.ndarray = polygon_outline - centerline_array[closest_midlane_pt] + + # Computing the perpendicular dot product. + # See https://www.xarg.org/book/linear-algebra/2d-perp-product/ + # If perp_dot_product < 0, then the associated polygon vertex is + # on the right edge of the lane. + perp_dot_product: np.ndarray = ( + local_dir_vecs[:, 0] * origin_to_polygon_vecs[:, 1] - local_dir_vecs[:, 1] * origin_to_polygon_vecs[:, 0] + ) + + # Determining which indices are on the right of the lane center. + on_right: np.ndarray = perp_dot_product < 0 + # Determining the boundary between the left/right polygon vertices + # (they will be together in blocks due to the ordering of the polygon vertices). + idx_changes: int = np.where(np.roll(on_right, 1) < on_right)[0].item() + + if idx_changes > 0: + # If the block of left/right points spreads across the bounds of the array, + # roll it until the boundary between left/right points is at index 0. + # This is important so that the following index selection orders points + # without jumps. + polygon_outline = np.roll(polygon_outline, shift=-idx_changes, axis=0) + on_right = np.roll(on_right, shift=-idx_changes) + + left_polyline_array: np.ndarray = polygon_outline[~on_right] + right_polyline_array: np.ndarray = polygon_outline[on_right] + + # Final ordering check, ensuring that left_pts and right_pts can be combined + # into a polygon without the endpoints intersecting. + # Reversing the one lane edge that does not match the ordering of the midline. + if endpoints_intersect(left_polyline_array, right_polyline_array): + if not order_matches(left_polyline_array, centerline_array): + left_polyline_array = left_polyline_array[::-1] + else: + right_polyline_array = right_polyline_array[::-1] + + left_boundary = Polyline2D.from_array(left_polyline_array) if len(left_polyline_array) > 1 else None + right_boundary = Polyline2D.from_array(right_polyline_array) if len(right_polyline_array) > 1 else None + return centerline, left_boundary, right_boundary + + +def extract_nuscenes_centerline(nuscenes_map: NuScenesMap, lane_record: Dict) -> Polyline2D: + """Extract the centerline of a nuScenes lane. + + :param nuscenes_map: nuScenes map API instance + :param lane_record: lane record dictionary + :return: centerline 2D polyline + """ + + # NOTE @DanielDauner: Code adapted from trajdata, Apache 2.0 License. Thank you! + # https://github.com/NVlabs/trajdata/blob/main/src/trajdata/dataset_specific/nusc/nusc_utils.py#L262 + + # Getting the lane center's points. + curr_lane = nuscenes_map.arcline_path_3.get(lane_record["token"], []) + centerline_array: np.ndarray = np.array(arcline_path_utils.discretize_lane(curr_lane, resolution_meters=0.25))[ + :, :2 + ] + + # For some reason, nuScenes duplicates a few entries + # (likely how they're building their arcline representation). + # We delete those duplicate entries here. + duplicate_check: np.ndarray = np.where( + np.linalg.norm(np.diff(centerline_array, axis=0, prepend=0), axis=1) < 1e-10 + )[0] + if duplicate_check.size > 0: + centerline_array = np.delete(centerline_array, duplicate_check, axis=0) + + return Polyline2D.from_array(centerline_array) + + +def endpoints_intersect(left_edge: np.ndarray, right_edge: np.ndarray) -> bool: + """Check if the line segment connecting the endpoints of left_edge intersects + with the line segment connecting the endpoints of right_edge. + + Forms two segments: (left_edge[-1], left_edge[0]) and (right_edge[-1], right_edge[0]), + then tests if they intersect using the counter-clockwise (CCW) orientation test. + """ + + # NOTE @DanielDauner: Code adapted from trajdata, Apache 2.0 License. Thank you! + # https://github.com/NVlabs/trajdata/blob/main/src/trajdata/utils/map_utils.py#L177 + + def ccw(A, B, C): + return (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] - A[0]) + + A, B = left_edge[-1], right_edge[-1] + C, D = right_edge[0], left_edge[0] + return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D) + + +def order_matches(pts: np.ndarray, ref: np.ndarray) -> bool: + """Check if two polylines have the same ordering direction, by comparing + the distance of their start and end points to the start point of the reference polyline. + """ + + # NOTE @DanielDauner: Code adapted from trajdata, Apache 2.0 License. Thank you! + # https://github.com/NVlabs/trajdata/blob/main/src/trajdata/utils/map_utils.py#L162 + + return np.linalg.norm(pts[0] - ref[0]) <= np.linalg.norm(pts[-1] - ref[0]) + + +def order_lanes_left_to_right(polylines: List[Polyline2D]) -> List[int]: + """ + Order lanes from left to right based on their position. + + :param polylines: List of polylines representing lanes + :return: List of indices representing the order of lanes from left to right + """ + if len(polylines) == 0: + return [] + + # Step 1: Compute the average direction vector across all lanes + all_directions = [] + for polyline in polylines: + + polyline_array = polyline.array + if len(polyline_array) < 2: + continue + start = np.array(polyline_array[0]) + end = np.array(polyline_array[-1]) + direction = end - start + all_directions.append(direction) + + avg_direction = np.mean(all_directions, axis=0) + avg_direction = avg_direction / np.linalg.norm(avg_direction) + + # Step 2: Compute perpendicular vector (left direction) + # Rotate 90 degrees counter-clockwise: (x, y) -> (-y, x) + left_vector = np.array([-avg_direction[1], avg_direction[0]]) + + # Step 3: For each lane, use midpoint of start and end, project onto left vector + lane_positions = [] + for i, polyline in enumerate(polylines): + if len(polyline) == 0: + lane_positions.append((i, 0)) + continue + + start = np.array(polyline[0]) + end = np.array(polyline[-1]) + # Use midpoint of start and end + midpoint = (start + end) / 2 + + # Project midpoint onto the left vector + position = np.dot(midpoint, left_vector) + lane_positions.append((i, position)) + + # Step 4: Sort by position (higher values are more to the left) + lane_positions.sort(key=lambda x: x[1], reverse=True) + + # Return ordered indices + return [idx for idx, _ in lane_positions] diff --git a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py index f4cbb094..f8059b12 100644 --- a/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py +++ b/src/py123d/conversion/utils/map_utils/road_edge/road_edge_2d_utils.py @@ -2,7 +2,8 @@ import numpy as np import shapely -from shapely import LinearRing, LineString, Polygon, union_all +from shapely import LinearRing, LineString, Polygon, box, union_all +from shapely.strtree import STRtree def get_road_edge_linear_rings( @@ -10,6 +11,10 @@ def get_road_edge_linear_rings( buffer_distance: float = 0.05, add_interiors: bool = True, ) -> List[LinearRing]: + """ + Helper function to extract road edges (i.e. linear rings) from drivable area polygons. + TODO: Move and rename for general use. + """ def _polygon_to_linear_rings(polygon: Polygon) -> List[LinearRing]: assert polygon.geom_type == "Polygon" @@ -40,7 +45,10 @@ def split_line_geometry_by_max_length( geometries: Union[LineString, LinearRing, List[Union[LineString, LinearRing]]], max_length_meters: float, ) -> List[LineString]: - # TODO: move somewhere more appropriate or implement in Polyline2D, PolylineSE2, etc. + """ + Splits LineString or LinearRing geometries into smaller segments based on a maximum length. + TODO: Move and rename for general use. + """ if not isinstance(geometries, list): geometries = [geometries] @@ -61,3 +69,39 @@ def split_line_geometry_by_max_length( all_segments.append(segment) return all_segments + + +def split_polygon_by_grid(polygon: Polygon, cell_size: float) -> List[Polygon]: + """ + Split a polygon by grid-like cells of given size. + TODO: Move and rename for general use. + """ + + minx, miny, maxx, maxy = polygon.bounds + + # Generate all grid cells + x_coords = np.arange(minx, maxx, cell_size) + y_coords = np.arange(miny, maxy, cell_size) + + grid_cells = [box(x, y, x + cell_size, y + cell_size) for x in x_coords for y in y_coords] + + # Build spatial index for fast queries + tree = STRtree(grid_cells) + + # Query cells that potentially intersect + candidate_indices = tree.query(polygon, predicate="intersects") + + cells = [] + for idx in candidate_indices: + cell = grid_cells[idx] + intersection = polygon.intersection(cell) + + if intersection.is_empty: + continue + + if intersection.geom_type == "Polygon": + cells.append(intersection) + elif intersection.geom_type == "MultiPolygon": + cells.extend(intersection.geoms) + + return cells diff --git a/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py b/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py index a7e0dc36..97b11d73 100644 --- a/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py +++ b/src/py123d/datatypes/maps/gpkg/gpkg_map_objects.py @@ -1,7 +1,6 @@ from __future__ import annotations import ast -import trimesh from functools import cached_property from typing import List, Optional, Union @@ -194,23 +193,9 @@ def centerline(self) -> Polyline3D: @property def outline_3d(self) -> Polyline3D: - left_array = self.left_boundary.array if getattr(self, "left_boundary", None) is not None else np.zeros((0, 3)) - right_array = self.right_boundary.array[::-1] if getattr(self, "right_boundary", None) is not None else np.zeros((0, 3)) - - outline_array = np.vstack((left_array, right_array)) if left_array.size + right_array.size > 0 else np.zeros((0, 3)) - - if outline_array.shape[0] == 0: - # fallback: use shapely polygon generate Polyline3D - poly = getattr(self, "shapely_polygon", None) - if poly is not None: - outline_array = np.array(poly.exterior.coords) - else: - return Polyline3D(np.zeros((0, 3))) - - # close - if outline_array.shape[0] > 0: - outline_array = np.vstack((outline_array, outline_array[0])) - + """Inherited, see superclass.""" + outline_array = np.vstack((self.left_boundary.array, self.right_boundary.array[::-1])) + outline_array = np.vstack((outline_array, outline_array[0])) return Polyline3D.from_linestring(geom.LineString(outline_array)) @property @@ -279,20 +264,8 @@ def right_boundary(self) -> Polyline3D: @property def outline_3d(self) -> Polyline3D: - # get left_array and right_array - left_array = self.left_boundary.array if getattr(self, "left_boundary", None) is not None else np.zeros((0, 3)) - right_array = self.right_boundary.array[::-1] if getattr(self, "right_boundary", None) is not None else np.zeros((0, 3)) - - if left_array.size + right_array.size == 0: - # fallback: use geometry polygon generate - poly = getattr(self, "shapely_polygon", None) - if poly is not None: - outline_array = np.array(poly.exterior.coords) - else: - return Polyline3D(np.zeros((0, 3))) - else: - outline_array = np.vstack((left_array, right_array)) - + """Inherited, see superclass.""" + outline_array = np.vstack((self.left_boundary.array, self.right_boundary.array[::-1])) return Polyline3D.from_linestring(geom.LineString(outline_array)) @property diff --git a/src/py123d/datatypes/maps/gpkg/gpkg_utils.py b/src/py123d/datatypes/maps/gpkg/gpkg_utils.py index ba587077..54dd93e6 100644 --- a/src/py123d/datatypes/maps/gpkg/gpkg_utils.py +++ b/src/py123d/datatypes/maps/gpkg/gpkg_utils.py @@ -21,23 +21,16 @@ def load_gdf_with_geometry_columns(gdf: gpd.GeoDataFrame, geometry_column_names: def get_all_rows_with_value( - elements: gpd.GeoDataFrame, column_label: str, desired_value -) -> gpd.GeoDataFrame: + elements: gpd.geodataframe.GeoDataFrame, column_label: str, desired_value: str +) -> gpd.geodataframe.GeoDataFrame: """ - Extract all matching elements by value. - Automatically handles both integer IDs and UUID strings. + Extract all matching elements. Note, if no matching desired_key is found and empty list is returned. + :param elements: data frame from MapsDb. + :param column_label: key to extract from a column. + :param desired_value: key which is compared with the values of column_label entry. + :return: a subset of the original GeoDataFrame containing the matching key. """ - # If the column is of integer type, attempt to convert the desired_value to an integer before comparison. - col_dtype = elements[column_label].dtype - if np.issubdtype(col_dtype, np.integer): - try: - desired_value_int = int(desired_value) - return elements[elements[column_label] == desired_value_int] - except ValueError: - raise ValueError(f"Expected an integer value for column '{column_label}', got '{desired_value}'") - else: - # Otherwise, directly compare it as a string. - return elements[elements[column_label].astype(str) == str(desired_value)] + return elements.iloc[np.where(elements[column_label].to_numpy().astype(int) == int(desired_value))] def get_row_with_value(elements: gpd.geodataframe.GeoDataFrame, column_label: str, desired_value: str) -> gpd.GeoSeries: diff --git a/src/py123d/geometry/utils/polyline_utils.py b/src/py123d/geometry/utils/polyline_utils.py index 20bf299f..d66628e8 100644 --- a/src/py123d/geometry/utils/polyline_utils.py +++ b/src/py123d/geometry/utils/polyline_utils.py @@ -3,6 +3,7 @@ from shapely.geometry import LineString from py123d.geometry.geometry_index import Point2DIndex, StateSE2Index +from py123d.geometry.transform.transform_se2 import translate_2d_along_body_frame def get_linestring_yaws(linestring: LineString) -> npt.NDArray[np.float64]: @@ -41,3 +42,24 @@ def get_path_progress(points_array: npt.NDArray[np.float64]) -> list[float]: points_diff: npt.NDArray[np.float64] = np.concatenate(([x_diff], [y_diff]), axis=0, dtype=np.float64) progress_diff = np.append(0.0, np.linalg.norm(points_diff, axis=0)) return np.cumsum(progress_diff, dtype=np.float64) # type: ignore + + +def offset_points_perpendicular(points_array: npt.NDArray[np.float64], offset: float) -> npt.NDArray[np.float64]: + if points_array.shape[-1] == len(Point2DIndex): + xy = points_array[..., Point2DIndex.XY] + yaws = get_points_2d_yaws(points_array[..., Point2DIndex.XY]) + elif points_array.shape[-1] == len(StateSE2Index): + xy = points_array[..., StateSE2Index.XY] + yaws = points_array[..., StateSE2Index.YAW] + else: + raise ValueError( + f"Invalid points_array shape: {points_array.shape}. Expected last dimension to be {len(Point2DIndex)} or " + f"{len(StateSE2Index)}." + ) + + return translate_2d_along_body_frame( + points_2d=xy, + yaws=yaws, + y_translate=offset, + x_translate=0.0, + ) diff --git a/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml index 95a3c927..4c9ba050 100644 --- a/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml @@ -6,7 +6,7 @@ nuscenes_dataset: nuscenes_data_root: ${dataset_paths.nuscenes_data_root} nuscenes_map_root: ${dataset_paths.nuscenes_data_root} nuscenes_lanelet2_root: ${dataset_paths.nuscenes_data_root}/lanelet2 - use_lanelet2: False + use_lanelet2: false dataset_converter_config: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig diff --git a/src/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml b/src/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml index 6bfb4877..86bf8e0b 100644 --- a/src/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml +++ b/src/py123d/script/config/conversion/map_writer/gpkg_map_writer.yaml @@ -2,3 +2,4 @@ _target_: py123d.conversion.map_writer.gpkg_map_writer.GPKGMapWriter _convert_: 'all' maps_root: ${dataset_paths.py123d_maps_root} +remap_ids: true From 867bd3ebbdfec692dc2ab23fc7687adeb3b118d8 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 1 Nov 2025 21:39:06 +0100 Subject: [PATCH 131/145] Reformatting, testing, modifying vehicle parameters, and changing the ego frame from imu to rear axle. --- notebooks/bev_matplotlib.ipynb | 27 +- scripts/conversion/kitti360_conversion.sh | 3 + scripts/download/download_kitti_360.sh | 86 +++++ .../conversion/datasets/kitti_360/.gitkeep | 0 .../kitti_360/kitti_360_data_converter.py | 354 ++++++++++-------- .../datasets/kitti_360/kitti_360_helper.py | 175 +++++---- .../datasets/kitti_360/kitti_360_labels.py | 208 +++++----- .../kitti_360/kitti_360_map_conversion.py | 56 ++- .../datasets/kitti_360/kitti_360_sensor_io.py | 45 +-- .../kitti_360/preprocess_detection.py | 74 ++-- .../conversion/log_writer/arrow_log_writer.py | 2 +- .../conversion/map_writer/gpkg_map_writer.py | 85 +++-- .../registry/lidar_index_registry.py | 2 + .../sensor_io/lidar/file_lidar_io.py | 12 +- .../datatypes/maps/abstract_map_objects.py | 3 +- src/py123d/datatypes/scene/abstract_scene.py | 6 +- .../datatypes/scene/arrow/arrow_scene.py | 14 +- .../scene/arrow/utils/arrow_getters.py | 4 +- src/py123d/datatypes/scene/scene_filter.py | 4 +- src/py123d/datatypes/scene/scene_metadata.py | 8 +- .../sensors/camera/fisheye_mei_camera.py | 38 +- src/py123d/datatypes/sensors/camera/utils.py | 26 +- .../vehicle_state/vehicle_parameters.py | 25 +- .../geometry/transform/transform_se3.py | 7 +- .../scene_builder/default_scene_builder.yaml | 1 - .../conversion/datasets/kitti360_dataset.yaml | 2 +- src/py123d/script/run_conversion.py | 7 + src/py123d/script/run_viser.py | 2 +- src/py123d/visualization/matplotlib/utils.py | 9 +- test_viser.py | 10 +- 30 files changed, 748 insertions(+), 547 deletions(-) create mode 100644 scripts/conversion/kitti360_conversion.sh create mode 100644 scripts/download/download_kitti_360.sh delete mode 100644 src/py123d/conversion/datasets/kitti_360/.gitkeep diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index 910bf63a..53fdcd15 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -25,23 +25,27 @@ "\n", "# splits = [\"wopd_val\"]\n", "# splits = [\"carla_test\"]\n", - "splits = [\"nuscenes-mini_val\", \"nuscenes-mini_train\"]\n", + "# splits = [\"nuscenes-mini_val\", \"nuscenes-mini_train\"]\n", "# splits = [\"av2-sensor-mini_train\"]\n", "# splits = [\"pandaset_train\"]\n", + "\n", "# log_names = None\n", "\n", + "from py123d.common.multithreading.worker_ray import RayDistributed\n", + "\n", "\n", + "splits = [\"kitti360\"]\n", "\n", - "log_names = None\n", + "log_names = [\"2013_05_28_drive_0000_sync\"]\n", "scene_uuids = None\n", "\n", "scene_filter = SceneFilter(\n", " split_names=splits,\n", " log_names=log_names,\n", " scene_uuids=scene_uuids,\n", - " duration_s=None,\n", + " duration_s=10.0,\n", " history_s=0.0,\n", - " timestamp_threshold_s=20,\n", + " timestamp_threshold_s=30,\n", " shuffle=True,\n", " # camera_types=[CameraType.CAM_F0],\n", ")\n", @@ -117,9 +121,9 @@ ")\n", "\n", "ROAD_EDGE_CONFIG: PlotConfig = PlotConfig(\n", - " fill_color=DARKER_GREY.set_brightness(0.0),\n", + " fill_color=DARKER_GREY,\n", " fill_color_alpha=1.0,\n", - " line_color=DARKER_GREY.set_brightness(0.0),\n", + " line_color=DARKER_GREY,\n", " line_color_alpha=1.0,\n", " line_width=1.0,\n", " line_style=\"-\",\n", @@ -148,10 +152,10 @@ " # MapLayer.LANE,\n", " MapLayer.LANE_GROUP,\n", " MapLayer.GENERIC_DRIVABLE,\n", - " # MapLayer.CARPARK,\n", + " MapLayer.CARPARK,\n", " # MapLayer.CROSSWALK,\n", " # MapLayer.INTERSECTION,\n", - " # MapLayer.WALKWAY,\n", + " MapLayer.WALKWAY,\n", " MapLayer.ROAD_EDGE,\n", " MapLayer.ROAD_LINE,\n", " ]\n", @@ -220,10 +224,10 @@ "\n", " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", " if map_api is not None:\n", - " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + " add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", "\n", "\n", - " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", + " # add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", " add_box_detections_to_ax(ax, box_detections)\n", @@ -256,7 +260,8 @@ "scene = np.random.choice(scenes)\n", "_plot_scene_on_ax(ax, scene, iteration, radius=80)\n", "# _plot_scene_on_ax(ax[1], scene, iteration, radius=50)\n", - "# _plot_scene_on_ax(ax[2], scene, iteration, radius=100)\n", + "# _plot_scene_on_ax(ax[2], scene, iteration,\n", + "# radius=100)\n", "\n", "plt.show()" ] diff --git a/scripts/conversion/kitti360_conversion.sh b/scripts/conversion/kitti360_conversion.sh new file mode 100644 index 00000000..1e939ad5 --- /dev/null +++ b/scripts/conversion/kitti360_conversion.sh @@ -0,0 +1,3 @@ +export KITTI360_DATA_ROOT="/home/daniel/kitti_360/KITTI-360" + +py123d-conversion datasets=["kitti360_dataset"] map_writer.remap_ids=true diff --git a/scripts/download/download_kitti_360.sh b/scripts/download/download_kitti_360.sh new file mode 100644 index 00000000..1cb3e540 --- /dev/null +++ b/scripts/download/download_kitti_360.sh @@ -0,0 +1,86 @@ +# 2D data & labels +# ---------------------------------------------------------------------------------------------------------------------- + +# Fisheye Images (355G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/a1d81d9f7fc7195c937f9ad12e2a2c66441ecb4e/download_2d_fisheye.zip + +# Fisheye Calibration Images (11G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/data_2d_raw/data_fisheye_calibration.zip + + +# Perspective Images for Train & Val (128G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/a1d81d9f7fc7195c937f9ad12e2a2c66441ecb4e/download_2d_perspective.zip + +# Test Semantic (1.5G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/data_2d_raw/data_2d_test.zip + +# Test NVS 50% Drop (0.3G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/71f967e900f4e7c2e036a542f150effa31909b53/data_2d_nvs_drop50.zip + +# est NVS 90% Drop (0.2G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/71f967e900f4e7c2e036a542f150effa31909b53/data_2d_nvs_drop90.zip + +# Test SLAM (14G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/data_2d_raw/data_2d_test_slam.zip + + +# Semantics of Left Perspective Camera (1.8G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/ed180d24c0a144f2f1ac71c2c655a3e986517ed8/data_2d_semantics.zip + +# Semantics of Right Perspective Camera (1.8G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/ed180d24c0a144f2f1ac71c2c655a3e986517ed8/data_2d_semantics_image_01.zip + + +# Confidence of Left Perspective Camera (44G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/ed180d24c0a144f2f1ac71c2c655a3e986517ed8/data_2d_confidence.zip + +# Confidence of Right Perspective Camera (44G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/ed180d24c0a144f2f1ac71c2c655a3e986517ed8/data_2d_confidence_image_01.zip + + + +# 3D data & labels +# ---------------------------------------------------------------------------------------------------------------------- + +# Raw Velodyne Scans (119G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/a1d81d9f7fc7195c937f9ad12e2a2c66441ecb4e/download_3d_velodyne.zip + +# Test SLAM (12G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/data_3d_raw/data_3d_test_slam.zip + +# Test Completion (35M) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/6489aabd632d115c4280b978b2dcf72cb0142ad9/data_3d_ssc_test.zip + + +# Raw SICK Scans (0.4G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/a1d81d9f7fc7195c937f9ad12e2a2c66441ecb4e/download_3d_sick.zip + + +# Accumulated Point Clouds for Train & Val (12G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/6489aabd632d115c4280b978b2dcf72cb0142ad9/data_3d_semantics.zip + +# Test Semantic (1.2G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/6489aabd632d115c4280b978b2dcf72cb0142ad9/data_3d_semantics_test.zip + + +# 3D Bounding Boxes (30M) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/ffa164387078f48a20f0188aa31b0384bb19ce60/data_3d_bboxes.zip + + + +# Calibrations & Poses +# ---------------------------------------------------------------------------------------------------------------------- + +# Calibrations (3K) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/384509ed5413ccc81328cf8c55cc6af078b8c444/calibration.zip + + +# Vechicle Poses (8.9M) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/89a6bae3c8a6f789e12de4807fc1e8fdcf182cf4/data_poses.zip + + +# OXTS Sync Measurements (37.3M) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/89a6bae3c8a6f789e12de4807fc1e8fdcf182cf4/data_poses_oxts.zip + +# OXTS Raw Measurements (0.4G) +wget https://s3.eu-central-1.amazonaws.com/avg-projects/KITTI-360/89a6bae3c8a6f789e12de4807fc1e8fdcf182cf4/data_poses_oxts_extract.zip diff --git a/src/py123d/conversion/datasets/kitti_360/.gitkeep b/src/py123d/conversion/datasets/kitti_360/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py index c29c8a13..d4d17d99 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py @@ -1,62 +1,71 @@ +import datetime +import logging import os +import pickle import re -import yaml +import xml.etree.ElementTree as ET +from collections import defaultdict from pathlib import Path from typing import Any, Dict, Final, List, Optional, Tuple, Union import numpy as np -import pickle -from collections import defaultdict -import datetime -import xml.etree.ElementTree as ET -import logging +import yaml +from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter +from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.conversion.datasets.kitti_360.kitti_360_helper import ( + KITTI3602NUPLAN_IMU_CALIBRATION, + KITTI360Bbox3D, + get_lidar_extrinsic, +) +from py123d.conversion.datasets.kitti_360.kitti_360_labels import ( + BBOX_LABLES_TO_DETECTION_NAME_DICT, + KITTI360_DETECTION_NAME_DICT, + kittiId2label, +) +from py123d.conversion.datasets.kitti_360.kitti_360_map_conversion import convert_kitti360_map_with_writer +from py123d.conversion.datasets.kitti_360.preprocess_detection import process_detection +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.registry.lidar_index_registry import Kitti360LidarIndex from py123d.datatypes.detections.box_detections import ( BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper, ) -from py123d.datatypes.sensors.camera.pinhole_camera import ( - PinholeCameraMetadata, - PinholeCameraType, - PinholeDistortion, - PinholeIntrinsics, -) +from py123d.datatypes.maps.map_metadata import MapMetadata +from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.camera.fisheye_mei_camera import ( FisheyeMEICameraMetadata, FisheyeMEICameraType, FisheyeMEIDistortion, FisheyeMEIProjection, ) -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType -from py123d.conversion.registry.lidar_index_registry import Kitti360LidarIndex +from py123d.datatypes.sensors.camera.pinhole_camera import ( + PinholeCameraMetadata, + PinholeCameraType, + PinholeDistortion, + PinholeIntrinsics, +) +from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 -from py123d.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3 -from py123d.common.utils.uuid_utils import create_deterministic_uuid -from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter -from py123d.conversion.dataset_converter_config import DatasetConverterConfig -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData -from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.datatypes.maps.map_metadata import MapMetadata -from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.conversion.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from py123d.conversion.datasets.kitti_360.kitti_360_labels import KITTI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT -from py123d.conversion.datasets.kitti_360.kitti_360_map_conversion import ( - convert_kitti360_map_with_writer +from py123d.datatypes.vehicle_state.vehicle_parameters import ( + get_kitti360_vw_passat_parameters, + rear_axle_se3_to_center_se3, ) -from py123d.geometry import BoundingBoxSE3, StateSE3, Vector3D -from py123d.geometry.rotation import EulerAngles +from py123d.geometry import BoundingBoxSE3, Quaternion, StateSE3, Vector3D +from py123d.geometry.transform.transform_se3 import convert_se3_array_between_origins, translate_se3_along_body_frame KITTI360_DT: Final[float] = 0.1 KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) KITTI360_CAMERA_TYPES = { - PinholeCameraType.CAM_STEREO_L: "image_00", - PinholeCameraType.CAM_STEREO_R: "image_01", - FisheyeMEICameraType.CAM_L: "image_02", - FisheyeMEICameraType.CAM_R: "image_03", + PinholeCameraType.CAM_STEREO_L: "image_00", + PinholeCameraType.CAM_STEREO_R: "image_01", + FisheyeMEICameraType.CAM_L: "image_02", + FisheyeMEICameraType.CAM_R: "image_03", } DIR_2D_RAW = "data_2d_raw" @@ -67,8 +76,7 @@ DIR_POSES = "data_poses" DIR_CALIB = "calibration" -# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW -PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT +PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW PATH_3D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_SMT @@ -83,20 +91,25 @@ DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train", } -D123_DEVKIT_ROOT = Path(os.environ["PY123D_DEVKIT_ROOT"]) -PREPOCESS_DETECTION_DIR = D123_DEVKIT_ROOT / "src" / "py123d" / "conversion" / "datasets" / "kitti_360" / "detection_preprocess" - -def create_token(split: str, log_name: str, timestamp_us: int, misc: str = None) -> str: - """Create a deterministic UUID-based token for KITTI-360 data. - - :param split: The data split (e.g., "kitti360") - :param log_name: The name of the log without file extension - :param timestamp_us: The timestamp in microseconds - :param misc: Any additional information to include in the UUID, defaults to None - :return: The generated deterministic UUID as hex string - """ - uuid_obj = create_deterministic_uuid(split=split, log_name=log_name, timestamp_us=timestamp_us, misc=misc) - return uuid_obj.hex +KITTI360_ALL_SEQUENCES: Final[List[str]] = [ + "2013_05_28_drive_0000_sync", + "2013_05_28_drive_0002_sync", + "2013_05_28_drive_0003_sync", + # "2013_05_28_drive_0004_sync", + # "2013_05_28_drive_0005_sync", + # "2013_05_28_drive_0006_sync", + # "2013_05_28_drive_0007_sync", + # "2013_05_28_drive_0008_sync", + # "2013_05_28_drive_0009_sync", + # "2013_05_28_drive_0010_sync", + # "2013_05_28_drive_0018_sync", +] + +# Create a temporary directory for detection preprocessing +# PREPROCESS_DETECTION_DIR = Path(tempfile.mkdtemp(prefix="kitti360_detection_")) + +PREPROCESS_DETECTION_DIR = Path("/home/daniel/kitti360_detection_temp") + def get_kitti360_map_metadata(split: str, log_name: str) -> MapMetadata: return MapMetadata( @@ -108,12 +121,14 @@ def get_kitti360_map_metadata(split: str, log_name: str) -> MapMetadata: map_is_local=True, ) + class Kitti360DataConverter(AbstractDatasetConverter): def __init__( self, splits: List[str], kitti360_data_root: Union[Path, str], dataset_converter_config: DatasetConverterConfig, + kitti36_sequences: List[str] = KITTI360_ALL_SEQUENCES, ) -> None: super().__init__(dataset_converter_config) for split in splits: @@ -123,8 +138,9 @@ def __init__( self._splits: List[str] = splits self._log_path: Path = Path(kitti360_data_root) + self._kitti36_sequences: List[str] = kitti36_sequences self._log_paths_and_split: List[Tuple[Path, str]] = self._collect_log_paths() - + self._total_maps = len(self._log_paths_and_split) # Each log has its own map self._total_logs = len(self._log_paths_and_split) @@ -138,9 +154,13 @@ def _collect_log_paths(self) -> List[Tuple[Path, str]]: missing_roots = [str(p) for p in KITTI360_REQUIRED_MODALITY_ROOTS.values() if not p.exists()] if missing_roots: raise FileNotFoundError(f"KITTI-360 required roots missing: {missing_roots}") - + # Enumerate candidate sequences from data_2d_raw - candidates = sorted(p for p in PATH_2D_RAW_ROOT.iterdir() if p.is_dir() and p.name.endswith("_sync")) + candidates = sorted( + p + for p in PATH_2D_RAW_ROOT.iterdir() + if p.is_dir() and p.name.endswith("_sync") and p.name in self._kitti36_sequences + ) def _has_modality(seq_name: str, modality_name: str, root: Path) -> bool: if modality_name == DIR_3D_BBOX: @@ -165,22 +185,22 @@ def _has_modality(seq_name: str, modality_name: str, root: Path) -> bool: f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. " f"Root: {KITTI360_DATA_ROOT}" ) - + logging.info(f"Valid sequences found: {len(log_paths_and_split)}") return log_paths_and_split - + def get_available_splits(self) -> List[str]: """Returns a list of available raw data types.""" return ["kitti360"] - + def get_number_of_maps(self) -> int: """Returns the number of available raw data maps for conversion.""" return self._total_maps - + def get_number_of_logs(self) -> int: """Returns the number of available raw data logs for conversion.""" return self._total_logs - + def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: """ Convert a single map in raw data format to the uniform 123D format. @@ -189,15 +209,15 @@ def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: """ source_log_path, split = self._log_paths_and_split[map_index] log_name = source_log_path.stem - + map_metadata = get_kitti360_map_metadata(split, log_name) - + map_needs_writing = map_writer.reset(self.dataset_converter_config, map_metadata) if map_needs_writing: convert_kitti360_map_with_writer(log_name, map_writer) - + map_writer.close() - + def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: """ Convert a single log in raw data format to the uniform 123D format. @@ -206,7 +226,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: """ source_log_path, split = self._log_paths_and_split[log_index] log_name = source_log_path.stem - + # Create log metadata log_metadata = LogMetadata( dataset="kitti360", @@ -214,20 +234,23 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: log_name=log_name, location=log_name, timestep_seconds=KITTI360_DT, - vehicle_parameters=get_kitti360_station_wagon_parameters(), + vehicle_parameters=get_kitti360_vw_passat_parameters(), camera_metadata=get_kitti360_camera_metadata(), lidar_metadata=get_kitti360_lidar_metadata(), - map_metadata=get_kitti360_map_metadata(split, log_name) + map_metadata=get_kitti360_map_metadata(split, log_name), ) - + log_needs_writing = log_writer.reset(self.dataset_converter_config, log_metadata) if log_needs_writing: _write_recording_table(log_name, log_writer, self.dataset_converter_config) - + log_writer.close() -def get_kitti360_camera_metadata() -> Dict[Union[PinholeCameraType, FisheyeMEICameraType], Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]]: - + +def get_kitti360_camera_metadata() -> ( + Dict[Union[PinholeCameraType, FisheyeMEICameraType], Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] +): + persp = PATH_CALIB_ROOT / "perspective.txt" assert persp.exists() @@ -244,15 +267,17 @@ def get_kitti360_camera_metadata() -> Dict[Union[PinholeCameraType, FisheyeMEICa persp_result[f"image_{cam_id}"]["wh"] = [int(round(float(x))) for x in value.split()] elif key.startswith("D_"): persp_result[f"image_{cam_id}"]["distortion"] = [float(x) for x in value.split()] - + fisheye_camera02_path = PATH_CALIB_ROOT / "image_02.yaml" fisheye_camera03_path = PATH_CALIB_ROOT / "image_03.yaml" assert fisheye_camera02_path.exists() and fisheye_camera03_path.exists() fisheye02 = _readYAMLFile(fisheye_camera02_path) fisheye03 = _readYAMLFile(fisheye_camera03_path) fisheye_result = {"image_02": fisheye02, "image_03": fisheye03} - - log_cam_infos: Dict[Union[PinholeCameraType, FisheyeMEICameraType], Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = {} + + log_cam_infos: Dict[ + Union[PinholeCameraType, FisheyeMEICameraType], Union[PinholeCameraMetadata, FisheyeMEICameraMetadata] + ] = {} for cam_type, cam_name in KITTI360_CAMERA_TYPES.items(): if cam_name in ["image_00", "image_01"]: log_cam_infos[cam_type] = PinholeCameraMetadata( @@ -262,23 +287,23 @@ def get_kitti360_camera_metadata() -> Dict[Union[PinholeCameraType, FisheyeMEICa intrinsics=PinholeIntrinsics.from_camera_matrix(np.array(persp_result[cam_name]["intrinsic"])), distortion=PinholeDistortion.from_array(np.array(persp_result[cam_name]["distortion"])), ) - elif cam_name in ["image_02","image_03"]: + elif cam_name in ["image_02", "image_03"]: distortion_params = fisheye_result[cam_name]["distortion_parameters"] distortion = FisheyeMEIDistortion( - k1=distortion_params['k1'], - k2=distortion_params['k2'], - p1=distortion_params['p1'], - p2=distortion_params['p2'], + k1=distortion_params["k1"], + k2=distortion_params["k2"], + p1=distortion_params["p1"], + p2=distortion_params["p2"], ) - + projection_params = fisheye_result[cam_name]["projection_parameters"] projection = FisheyeMEIProjection( - gamma1=projection_params['gamma1'], - gamma2=projection_params['gamma2'], - u0=projection_params['u0'], - v0=projection_params['v0'], + gamma1=projection_params["gamma1"], + gamma2=projection_params["gamma2"], + u0=projection_params["u0"], + v0=projection_params["v0"], ) - + log_cam_infos[cam_type] = FisheyeMEICameraMetadata( camera_type=cam_type, width=fisheye_result[cam_name]["image_width"], @@ -290,6 +315,7 @@ def get_kitti360_camera_metadata() -> Dict[Union[PinholeCameraType, FisheyeMEICa return log_cam_infos + def _read_projection_matrix(p_line: str) -> np.ndarray: parts = p_line.split(" ", 1) if len(parts) != 2: @@ -299,44 +325,47 @@ def _read_projection_matrix(p_line: str) -> np.ndarray: K = P[:, :3] return K -def _readYAMLFile(fileName:Path) -> Dict[str, Any]: - '''make OpenCV YAML file compatible with python''' + +def _readYAMLFile(fileName: Path) -> Dict[str, Any]: + """make OpenCV YAML file compatible with python""" ret = {} - skip_lines=1 # Skip the first line which says "%YAML:1.0". Or replace it with "%YAML 1.0" + skip_lines = 1 # Skip the first line which says "%YAML:1.0". Or replace it with "%YAML 1.0" with open(fileName) as fin: for i in range(skip_lines): fin.readline() yamlFileOut = fin.read() - myRe = re.compile(r":([^ ])") # Add space after ":", if it doesn't exist. Python yaml requirement - yamlFileOut = myRe.sub(r': \1', yamlFileOut) + myRe = re.compile(r":([^ ])") # Add space after ":", if it doesn't exist. Python yaml requirement + yamlFileOut = myRe.sub(r": \1", yamlFileOut) ret = yaml.safe_load(yamlFileOut) return ret + def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: metadata: Dict[LiDARType, LiDARMetadata] = {} extrinsic = get_lidar_extrinsic() extrinsic_state_se3 = StateSE3.from_transformation_matrix(extrinsic) + extrinsic_state_se3 = _extrinsic_from_imu_to_rear_axle(extrinsic_state_se3) metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( lidar_type=LiDARType.LIDAR_TOP, lidar_index=Kitti360LidarIndex, - extrinsic=extrinsic_state_se3, + extrinsic=extrinsic_state_se3, ) return metadata + def _write_recording_table( - log_name: str, - log_writer: AbstractLogWriter, - data_converter_config: DatasetConverterConfig + log_name: str, log_writer: AbstractLogWriter, data_converter_config: DatasetConverterConfig ) -> None: - + ts_list: List[TimePoint] = _read_timestamps(log_name) ego_state_all, valid_timestamp = _extract_ego_state_all(log_name) - ego_states_xyz = np.array([ego_state.center.array[:3] for ego_state in ego_state_all],dtype=np.float64) - box_detection_wrapper_all = _extract_detections(log_name,len(ts_list),ego_states_xyz,valid_timestamp) + ego_states_xyz = np.array([ego_state.center.array[:3] for ego_state in ego_state_all], dtype=np.float64) + box_detection_wrapper_all = _extract_detections(log_name, len(ts_list), ego_states_xyz, valid_timestamp) logging.info(f"Number of valid timestamps with ego states: {len(valid_timestamp)}") + for idx in range(len(valid_timestamp)): valid_idx = valid_timestamp[idx] - + cameras = _extract_cameras(log_name, valid_idx, data_converter_config) lidars = _extract_lidar(log_name, valid_idx, data_converter_config) @@ -351,10 +380,6 @@ def _write_recording_table( route_lane_group_ids=None, ) - # if SORT_BY_TIMESTAMP: - # recording_table = open_arrow_table(log_file_path) - # recording_table = recording_table.sort_by([("timestamp", "ascending")]) - # write_arrow_table(recording_table, log_file_path) def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: """ @@ -365,7 +390,7 @@ def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: PATH_2D_RAW_ROOT / log_name / "image_00" / "timestamps.txt", PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt", ] - + if log_name == "2013_05_28_drive_0002_sync": ts_files = ts_files[1:] @@ -377,22 +402,23 @@ def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: s = line.strip() if not s: continue - dt_str, ns_str = s.split('.') + dt_str, ns_str = s.split(".") dt_obj = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S") dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc) unix_epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) - + total_seconds = (dt_obj - unix_epoch).total_seconds() - + ns_value = int(ns_str) us_from_ns = ns_value // 1000 total_us = int(total_seconds * 1_000_000) + us_from_ns - + tps.append(TimePoint.from_us(total_us)) return tps return None + def _extract_ego_state_all(log_name: str) -> Tuple[List[EgoStateSE3], List[int]]: ego_state_all: List[List[float]] = [] @@ -403,31 +429,29 @@ def _extract_ego_state_all(log_name: str) -> Tuple[List[EgoStateSE3], List[int]] poses = np.loadtxt(pose_file) poses_time = poses[:, 0].astype(np.int32) valid_timestamp: List[int] = list(poses_time) - - oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data" - + + oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data" + for idx in range(len(valid_timestamp)): oxts_path_file = oxts_path / f"{int(valid_timestamp[idx]):010d}.txt" oxts_data = np.loadtxt(oxts_path_file) - vehicle_parameters = get_kitti360_station_wagon_parameters() + vehicle_parameters = get_kitti360_vw_passat_parameters() - pos = idx - if log_name=="2013_05_28_drive_0004_sync" and pos == 0: + pos = idx + if log_name == "2013_05_28_drive_0004_sync" and pos == 0: pos = 1 - + # NOTE you can use oxts_data[3:6] as roll, pitch, yaw for simplicity - #roll, pitch, yaw = oxts_data[3:6] + # roll, pitch, yaw = oxts_data[3:6] r00, r01, r02 = poses[pos, 1:4] r10, r11, r12 = poses[pos, 5:8] r20, r21, r22 = poses[pos, 9:12] - R_mat = np.array([[r00, r01, r02], - [r10, r11, r12], - [r20, r21, r22]], dtype=np.float64) - R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3] + R_mat = np.array([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]], dtype=np.float64) + R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3, :3] - ego_quaternion = EulerAngles.from_rotation_matrix(R_mat_cali).quaternion - rear_axle_pose = StateSE3( + ego_quaternion = Quaternion.from_rotation_matrix(R_mat_cali) + imu_pose = StateSE3( x=poses[pos, 4], y=poses[pos, 8], z=poses[pos, 12], @@ -437,6 +461,11 @@ def _extract_ego_state_all(log_name: str) -> Tuple[List[EgoStateSE3], List[int]] qz=ego_quaternion.qz, ) + rear_axle_pose = translate_se3_along_body_frame( + imu_pose, + Vector3D(0.05, -0.32, 0.0), + ) + center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters) dynamic_state = DynamicStateSE3( velocity=Vector3D( @@ -449,14 +478,14 @@ def _extract_ego_state_all(log_name: str) -> Tuple[List[EgoStateSE3], List[int]] y=oxts_data[15], z=oxts_data[16], ), - angular_velocity=Vector3D( + angular_velocity=Vector3D( x=oxts_data[20], y=oxts_data[21], z=oxts_data[22], ), ) ego_state_all.append( - EgoStateSE3( + EgoStateSE3( center_se3=center, dynamic_state_se3=dynamic_state, vehicle_parameters=vehicle_parameters, @@ -465,13 +494,14 @@ def _extract_ego_state_all(log_name: str) -> Tuple[List[EgoStateSE3], List[int]] ) return ego_state_all, valid_timestamp + def _extract_detections( log_name: str, ts_len: int, ego_states_xyz: np.ndarray, valid_timestamp: List[int], ) -> List[BoxDetectionWrapper]: - + detections_states: List[List[List[float]]] = [[] for _ in range(ts_len)] detections_velocity: List[List[List[float]]] = [[] for _ in range(ts_len)] detections_tokens: List[List[str]] = [[] for _ in range(ts_len)] @@ -483,37 +513,38 @@ def _extract_detections( bbox_3d_path = PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" if not bbox_3d_path.exists(): raise FileNotFoundError(f"BBox 3D file not found: {bbox_3d_path}") - + tree = ET.parse(bbox_3d_path) root = tree.getroot() - detection_preprocess_path = PREPOCESS_DETECTION_DIR / f"{log_name}_detection_preprocessed.pkl" - if detection_preprocess_path.exists(): - with open(detection_preprocess_path, "rb") as f: - detection_preprocess_result = pickle.load(f) - static_records_dict = {record_item["global_id"]: record_item for record_item in detection_preprocess_result["static"]} - logging.info(f"Loaded detection preprocess data from {detection_preprocess_path}") - else: - detection_preprocess_result = None + detection_preprocess_path = PREPROCESS_DETECTION_DIR / f"{log_name}_detection_preprocessed.pkl" + if not detection_preprocess_path.exists(): + process_detection(log_name=log_name, radius_m=60.0, output_dir=PREPROCESS_DETECTION_DIR) + with open(detection_preprocess_path, "rb") as f: + detection_preprocess_result = pickle.load(f) + static_records_dict = { + record_item["global_id"]: record_item for record_item in detection_preprocess_result["static"] + } + logging.info(f"Loaded detection preprocess data from {detection_preprocess_path}") dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list) for child in root: - if child.find('semanticId') is not None: - semanticIdKITTI = int(child.find('semanticId').text) + if child.find("semanticId") is not None: + semanticIdKITTI = int(child.find("semanticId").text) name = kittiId2label[semanticIdKITTI].name else: - lable = child.find('label').text - name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, 'unknown') - if child.find('transform') is None or name not in KITTI360_DETECTION_NAME_DICT.keys(): + lable = child.find("label").text + name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, "unknown") + if child.find("transform") is None or name not in KITTI360_DETECTION_NAME_DICT.keys(): continue obj = KITTI360Bbox3D() obj.parseBbox(child) - - #static object + + # static object if obj.timestamp == -1: if detection_preprocess_result is None: - obj.filter_by_radius(ego_states_xyz,valid_timestamp,radius=50.0) + obj.filter_by_radius(ego_states_xyz, valid_timestamp, radius=50.0) else: obj.load_detection_preprocess(static_records_dict) for record in obj.valid_frames["records"]: @@ -521,7 +552,7 @@ def _extract_detections( detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append(np.array([0.0, 0.0, 0.0])) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(KITTI360_DETECTION_NAME_DICT[obj.name]) + detections_types[frame].append(KITTI360_DETECTION_NAME_DICT[obj.name]) else: global_ID = obj.globalID dynamic_objs[global_ID].append(obj) @@ -530,22 +561,22 @@ def _extract_detections( for global_id, obj_list in dynamic_objs.items(): obj_list.sort(key=lambda obj: obj.timestamp) num_frames = len(obj_list) - + positions = [obj.get_state_array()[:3] for obj in obj_list] timestamps = [int(obj.timestamp) for obj in obj_list] velocities = [] for i in range(1, num_frames - 1): - dt_frames = timestamps[i+1] - timestamps[i-1] + dt_frames = timestamps[i + 1] - timestamps[i - 1] if dt_frames > 0: dt = dt_frames * KITTI360_DT - vel = (positions[i+1] - positions[i-1]) / dt - vel = KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3] @ obj_list[i].Rm.T @ vel + vel = (positions[i + 1] - positions[i - 1]) / dt + vel = KITTI3602NUPLAN_IMU_CALIBRATION[:3, :3] @ obj_list[i].Rm.T @ vel else: vel = np.zeros(3) velocities.append(vel) - + if num_frames > 1: # first and last frame velocities.insert(0, velocities[0]) @@ -588,35 +619,38 @@ def _extract_detections( box_detection_wrapper_all.append(BoxDetectionWrapper(box_detections=box_detections)) return box_detection_wrapper_all + def _extract_lidar(log_name: str, idx: int, data_converter_config: DatasetConverterConfig) -> List[LiDARData]: - + lidars: List[LiDARData] = [] if data_converter_config.include_lidars: - #NOTE special case for sequence 2013_05_28_drive_0002_sync which has no lidar data before frame 4391 + # NOTE special case for sequence 2013_05_28_drive_0002_sync which has no lidar data before frame 4391 if log_name == "2013_05_28_drive_0002_sync" and idx <= 4390: return lidars - + lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" + if lidar_full_path.exists(): - relative_path = f"data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin" + lidars.append( LiDARData( lidar_type=LiDARType.LIDAR_TOP, timestamp=None, iteration=idx, - dataset_root=PATH_3D_RAW_ROOT, - relative_path=relative_path, + dataset_root=KITTI360_DATA_ROOT, + relative_path=lidar_full_path.relative_to(KITTI360_DATA_ROOT), ) ) else: raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}") - + return lidars + def _extract_cameras( log_name: str, idx: int, data_converter_config: DatasetConverterConfig ) -> Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]]: - + camera_dict: Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]] = {} for camera_type, cam_dir_name in KITTI360_CAMERA_TYPES.items(): if cam_dir_name in ["image_00", "image_01"]: @@ -627,9 +661,9 @@ def _extract_cameras( cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" if not cam2pose_txt.exists(): raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") - - lastrow = np.array([0,0,0,1]).reshape(1,4) - with open(cam2pose_txt, 'r') as f: + + lastrow = np.array([0, 0, 0, 1]).reshape(1, 4) + with open(cam2pose_txt, "r") as f: for line in f: parts = line.strip().split() key = parts[0][:-1] @@ -639,6 +673,9 @@ def _extract_cameras( cam2pose = np.concatenate((matrix, lastrow)) cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose + camera_extrinsic = StateSE3.from_transformation_matrix(cam2pose) + camera_extrinsic = _extrinsic_from_imu_to_rear_axle(camera_extrinsic) + if img_path_png.exists(): if data_converter_config.camera_store_option == "path": camera_data = str(img_path_png) @@ -647,7 +684,12 @@ def _extract_cameras( camera_data = f.read() else: camera_data = None - - camera_extrinsic = StateSE3.from_transformation_matrix(cam2pose) + camera_dict[camera_type] = camera_data, camera_extrinsic return camera_dict + + +def _extrinsic_from_imu_to_rear_axle(extrinsic: StateSE3) -> StateSE3: + imu_se3 = StateSE3(x=-0.05, y=0.32, z=0.0, qw=1.0, qx=0.0, qy=0.0, qz=0.0) + rear_axle_se3 = StateSE3(x=0.0, y=0.0, z=0.0, qw=1.0, qx=0.0, qy=0.0, qz=0.0) + return StateSE3.from_array(convert_se3_array_between_origins(imu_se3, rear_axle_se3, extrinsic.array)) diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py index 8486329c..09d7d1e4 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py @@ -1,17 +1,15 @@ -import numpy as np - -from collections import defaultdict -from typing import Dict, Any, List, Tuple import copy +import os +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import numpy as np from scipy.linalg import polar +from py123d.conversion.datasets.kitti_360.kitti_360_labels import BBOX_LABLES_TO_DETECTION_NAME_DICT, kittiId2label from py123d.geometry import BoundingBoxSE3, StateSE3 from py123d.geometry.polyline import Polyline3D from py123d.geometry.rotation import EulerAngles -from py123d.conversion.datasets.kitti_360.kitti_360_labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT - -import os -from pathlib import Path KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) DIR_CALIB = "calibration" @@ -20,23 +18,29 @@ DEFAULT_ROLL = 0.0 DEFAULT_PITCH = 0.0 -kitti3602nuplan_imu_calibration_ideal = np.array([ +kitti3602nuplan_imu_calibration_ideal = np.array( + [ [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1], - ], dtype=np.float64) + ], + dtype=np.float64, +) KITTI3602NUPLAN_IMU_CALIBRATION = kitti3602nuplan_imu_calibration_ideal MAX_N = 1000 -def local2global(semanticId: int, instanceId: int) -> int: - globalId = semanticId*MAX_N + instanceId + + +def local2global(semanticId: int, instanceId: int) -> int: + globalId = semanticId * MAX_N + instanceId if isinstance(globalId, np.ndarray): return globalId.astype(np.int32) else: return int(globalId) - + + def global2local(globalId: int) -> Tuple[int, int]: semanticId = globalId // MAX_N instanceId = globalId % MAX_N @@ -45,7 +49,8 @@ def global2local(globalId: int) -> Tuple[int, int]: else: return int(semanticId), int(instanceId) -class KITTI360Bbox3D(): + +class KITTI360Bbox3D: # global id(only used for sequence 0004) dynamic_global_id = 2000000 @@ -68,34 +73,34 @@ def __init__(self): self.timestamp = -1 # name - self.name = '' + self.name = "" + + # label + self.label = "" - #label - self.label = '' - def parseBbox(self, child): - self.timestamp = int(child.find('timestamp').text) + self.timestamp = int(child.find("timestamp").text) - self.annotationId = int(child.find('index').text) + 1 + self.annotationId = int(child.find("index").text) + 1 - self.label = child.find('label').text + self.label = child.find("label").text - if child.find('semanticId') is None: - self.name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(self.label, 'unknown') - self.is_dynamic = int(child.find('dynamic').text) + if child.find("semanticId") is None: + self.name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(self.label, "unknown") + self.is_dynamic = int(child.find("dynamic").text) if self.is_dynamic != 0: - dynamicSeq = int(child.find('dynamicSeq').text) + dynamicSeq = int(child.find("dynamicSeq").text) self.globalID = KITTI360Bbox3D.dynamic_global_id + dynamicSeq else: self.globalID = KITTI360Bbox3D.static_global_id KITTI360Bbox3D.static_global_id += 1 else: - self.start_frame = int(child.find('start_frame').text) - self.end_frame = int(child.find('end_frame').text) - - semanticIdKITTI = int(child.find('semanticId').text) + self.start_frame = int(child.find("start_frame").text) + self.end_frame = int(child.find("end_frame").text) + + semanticIdKITTI = int(child.find("semanticId").text) self.semanticId = kittiId2label[semanticIdKITTI].id - self.instanceId = int(child.find('instanceId').text) + self.instanceId = int(child.find("instanceId").text) self.name = kittiId2label[semanticIdKITTI].name self.globalID = local2global(self.semanticId, self.instanceId) @@ -106,26 +111,26 @@ def parseBbox(self, child): self.parse_scale_rotation() def parseVertices(self, child): - transform = parseOpencvMatrix(child.find('transform')) - R = transform[:3,:3] - T = transform[:3,3] - vertices = parseOpencvMatrix(child.find('vertices')) + transform = parseOpencvMatrix(child.find("transform")) + R = transform[:3, :3] + T = transform[:3, 3] + vertices = parseOpencvMatrix(child.find("vertices")) self.vertices_template = copy.deepcopy(vertices) - + vertices = np.matmul(R, vertices.transpose()).transpose() + T self.vertices = vertices - + self.R = R self.T = T - + def parse_scale_rotation(self): - Rm, Sm = polar(self.R) + Rm, Sm = polar(self.R) if np.linalg.det(Rm) < 0: Rm[0] = -Rm[0] scale = np.diag(Sm) # yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False) euler_angles = EulerAngles.from_rotation_matrix(Rm) - yaw,pitch,roll = euler_angles.yaw, euler_angles.pitch, euler_angles.roll + yaw, pitch, roll = euler_angles.yaw, euler_angles.pitch, euler_angles.roll obj_quaternion = euler_angles.quaternion # obj_quaternion = EulerAngles(roll=roll, pitch=pitch, yaw=yaw).quaternion @@ -133,13 +138,13 @@ def parse_scale_rotation(self): self.Sm = np.array(Sm) self.scale = scale self.yaw = yaw - self.pitch = pitch + self.pitch = pitch self.roll = roll self.qw = obj_quaternion.qw self.qx = obj_quaternion.qx self.qy = obj_quaternion.qy self.qz = obj_quaternion.qz - + def get_state_array(self) -> np.ndarray: center = StateSE3( x=self.T[0], @@ -156,100 +161,110 @@ def get_state_array(self) -> np.ndarray: return bounding_box_se3.array def filter_by_radius(self, ego_state_xyz: np.ndarray, valid_timestamp: List[int], radius: float = 50.0) -> None: - ''' first stage of detection, used to filter out detections by radius ''' + """first stage of detection, used to filter out detections by radius""" d = np.linalg.norm(ego_state_xyz - self.T[None, :], axis=1) idxs = np.where(d <= radius)[0] for idx in idxs: - self.valid_frames["records"].append({ - "timestamp": valid_timestamp[idx], - "points_in_box": None, - }) + self.valid_frames["records"].append( + { + "timestamp": valid_timestamp[idx], + "points_in_box": None, + } + ) def box_visible_in_point_cloud(self, points: np.ndarray) -> Tuple[bool, int]: - ''' points: (N,3) , box: (8,3) ''' + """points: (N,3) , box: (8,3)""" box = self.vertices.copy() # avoid calculating ground point cloud z_offset = 0.1 - box[:,2] += z_offset + box[:, 2] += z_offset O, A, B, C = box[0], box[1], box[2], box[5] OA = A - O OB = B - O OC = C - O POA, POB, POC = (points @ OA[..., None])[:, 0], (points @ OB[..., None])[:, 0], (points @ OC[..., None])[:, 0] - mask = (np.dot(O, OA) < POA) & (POA < np.dot(A, OA)) & \ - (np.dot(O, OB) < POB) & (POB < np.dot(B, OB)) & \ - (np.dot(O, OC) < POC) & (POC < np.dot(C, OC)) - + mask = ( + (np.dot(O, OA) < POA) + & (POA < np.dot(A, OA)) + & (np.dot(O, OB) < POB) + & (POB < np.dot(B, OB)) + & (np.dot(O, OC) < POC) + & (POC < np.dot(C, OC)) + ) + points_in_box = np.sum(mask) visible = True if points_in_box > 40 else False return visible, points_in_box - + def load_detection_preprocess(self, records_dict: Dict[int, Any]): if self.globalID in records_dict: self.valid_frames["records"] = records_dict[self.globalID]["records"] -class KITTI360_MAP_Bbox3D(): + +class KITTI360_MAP_Bbox3D: def __init__(self): self.id = -1 - self.label = ' ' + self.label = " " self.vertices: Polyline3D = None self.R = None self.T = None - + def parseVertices_plane(self, child): - transform = parseOpencvMatrix(child.find('transform')) - R = transform[:3,:3] - T = transform[:3,3] - if child.find("transform_plane").find('rows').text == '0': - vertices = parseOpencvMatrix(child.find('vertices')) + transform = parseOpencvMatrix(child.find("transform")) + R = transform[:3, :3] + T = transform[:3, 3] + if child.find("transform_plane").find("rows").text == "0": + vertices = parseOpencvMatrix(child.find("vertices")) else: - vertices = parseOpencvMatrix(child.find('vertices_plane')) - + vertices = parseOpencvMatrix(child.find("vertices_plane")) + vertices = np.matmul(R, vertices.transpose()).transpose() + T self.vertices = Polyline3D.from_array(vertices) - + self.R = R self.T = T def parseBbox(self, child): - self.id = int(child.find('index').text) - self.label = child.find('label').text + self.id = int(child.find("index").text) + self.label = child.find("label").text self.parseVertices_plane(child) - + def parseOpencvMatrix(node): - rows = int(node.find('rows').text) - cols = int(node.find('cols').text) - data = node.find('data').text.split(' ') + rows = int(node.find("rows").text) + cols = int(node.find("cols").text) + data = node.find("data").text.split(" ") mat = [] for d in data: - d = d.replace('\n', '') - if len(d)<1: + d = d.replace("\n", "") + if len(d) < 1: continue mat.append(float(d)) mat = np.reshape(mat, [rows, cols]) return mat + def get_lidar_extrinsic() -> np.ndarray: cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" if not cam2pose_txt.exists(): raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") - + cam2velo_txt = PATH_CALIB_ROOT / "calib_cam_to_velo.txt" if not cam2velo_txt.exists(): raise FileNotFoundError(f"calib_cam_to_velo.txt file not found: {cam2velo_txt}") - - lastrow = np.array([0,0,0,1]).reshape(1,4) - with open(cam2pose_txt, 'r') as f: + lastrow = np.array([0, 0, 0, 1]).reshape(1, 4) + + with open(cam2pose_txt, "r") as f: image_00 = next(f) values = list(map(float, image_00.strip().split()[1:])) matrix = np.array(values).reshape(3, 4) cam2pose = np.concatenate((matrix, lastrow)) cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose - - cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow)) - extrinsic = cam2pose @ np.linalg.inv(cam2velo) - return extrinsic \ No newline at end of file + + cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3, 4), lastrow)) + extrinsic = cam2pose @ np.linalg.inv(cam2velo) + + return extrinsic diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_labels.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_labels.py index 7a58b113..6feafc1d 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_labels.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_labels.py @@ -5,58 +5,50 @@ from collections import namedtuple - -#-------------------------------------------------------------------------------- +# -------------------------------------------------------------------------------- # Definitions -#-------------------------------------------------------------------------------- +# -------------------------------------------------------------------------------- # a label and all meta information -Label = namedtuple( 'Label' , [ - - 'name' , # The identifier of this label, e.g. 'car', 'person', ... . - # We use them to uniquely name a class - - 'id' , # An integer ID that is associated with this label. - # The IDs are used to represent the label in ground truth images - # An ID of -1 means that this label does not have an ID and thus - # is ignored when creating ground truth images (e.g. license plate). - # Do not modify these IDs, since exactly these IDs are expected by the - # evaluation server. - - 'kittiId' , # An integer ID that is associated with this label for KITTI-360 - # NOT FOR RELEASING - - 'trainId' , # Feel free to modify these IDs as suitable for your method. Then create - # ground truth images with train IDs, using the tools provided in the - # 'preparation' folder. However, make sure to validate or submit results - # to our evaluation server using the regular IDs above! - # For trainIds, multiple labels might have the same ID. Then, these labels - # are mapped to the same class in the ground truth images. For the inverse - # mapping, we use the label that is defined first in the list below. - # For example, mapping all void-type classes to the same ID in training, - # might make sense for some approaches. - # Max value is 255! - - 'category' , # The name of the category that this label belongs to - - 'categoryId' , # The ID of this category. Used to create ground truth images - # on category level. - - 'hasInstances', # Whether this label distinguishes between single instances or not - - 'ignoreInEval', # Whether pixels having this class as ground truth label are ignored - # during evaluations or not - - 'ignoreInInst', # Whether pixels having this class as ground truth label are ignored - # during evaluations of instance segmentation or not - - 'color' , # The color of this label - ] ) - - -#-------------------------------------------------------------------------------- +Label = namedtuple( + "Label", + [ + "name", # The identifier of this label, e.g. 'car', 'person', ... . + # We use them to uniquely name a class + "id", # An integer ID that is associated with this label. + # The IDs are used to represent the label in ground truth images + # An ID of -1 means that this label does not have an ID and thus + # is ignored when creating ground truth images (e.g. license plate). + # Do not modify these IDs, since exactly these IDs are expected by the + # evaluation server. + "kittiId", # An integer ID that is associated with this label for KITTI-360 + # NOT FOR RELEASING + "trainId", # Feel free to modify these IDs as suitable for your method. Then create + # ground truth images with train IDs, using the tools provided in the + # 'preparation' folder. However, make sure to validate or submit results + # to our evaluation server using the regular IDs above! + # For trainIds, multiple labels might have the same ID. Then, these labels + # are mapped to the same class in the ground truth images. For the inverse + # mapping, we use the label that is defined first in the list below. + # For example, mapping all void-type classes to the same ID in training, + # might make sense for some approaches. + # Max value is 255! + "category", # The name of the category that this label belongs to + "categoryId", # The ID of this category. Used to create ground truth images + # on category level. + "hasInstances", # Whether this label distinguishes between single instances or not + "ignoreInEval", # Whether pixels having this class as ground truth label are ignored + # during evaluations or not + "ignoreInInst", # Whether pixels having this class as ground truth label are ignored + # during evaluations of instance segmentation or not + "color", # The color of this label + ], +) + + +# -------------------------------------------------------------------------------- # A list of all labels -#-------------------------------------------------------------------------------- +# -------------------------------------------------------------------------------- # Please adapt the train IDs as appropriate for your approach. # Note that you might want to ignore labels with ID 255 during training. @@ -66,68 +58,68 @@ labels = [ # name id kittiId, trainId category catId hasInstances ignoreInEval ignoreInInst color - Label( 'unlabeled' , 0 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ), - Label( 'ego vehicle' , 1 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ), - Label( 'rectification border' , 2 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ), - Label( 'out of roi' , 3 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ), - Label( 'static' , 4 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ), - Label( 'dynamic' , 5 , -1 , 255 , 'void' , 0 , False , True , True , (111, 74, 0) ), - Label( 'ground' , 6 , -1 , 255 , 'void' , 0 , False , True , True , ( 81, 0, 81) ), - Label( 'road' , 7 , 1 , 0 , 'flat' , 1 , False , False , False , (128, 64,128) ), - Label( 'sidewalk' , 8 , 3 , 1 , 'flat' , 1 , False , False , False , (244, 35,232) ), - Label( 'parking' , 9 , 2 , 255 , 'flat' , 1 , False , True , True , (250,170,160) ), - Label( 'rail track' , 10 , 10, 255 , 'flat' , 1 , False , True , True , (230,150,140) ), - Label( 'building' , 11 , 11, 2 , 'construction' , 2 , True , False , False , ( 70, 70, 70) ), - Label( 'wall' , 12 , 7 , 3 , 'construction' , 2 , False , False , False , (102,102,156) ), - Label( 'fence' , 13 , 8 , 4 , 'construction' , 2 , False , False , False , (190,153,153) ), - Label( 'guard rail' , 14 , 30, 255 , 'construction' , 2 , False , True , True , (180,165,180) ), - Label( 'bridge' , 15 , 31, 255 , 'construction' , 2 , False , True , True , (150,100,100) ), - Label( 'tunnel' , 16 , 32, 255 , 'construction' , 2 , False , True , True , (150,120, 90) ), - Label( 'pole' , 17 , 21, 5 , 'object' , 3 , True , False , True , (153,153,153) ), - Label( 'polegroup' , 18 , -1 , 255 , 'object' , 3 , False , True , True , (153,153,153) ), - Label( 'traffic light' , 19 , 23, 6 , 'object' , 3 , True , False , True , (250,170, 30) ), - Label( 'traffic sign' , 20 , 24, 7 , 'object' , 3 , True , False , True , (220,220, 0) ), - Label( 'vegetation' , 21 , 5 , 8 , 'nature' , 4 , False , False , False , (107,142, 35) ), - Label( 'terrain' , 22 , 4 , 9 , 'nature' , 4 , False , False , False , (152,251,152) ), - Label( 'sky' , 23 , 9 , 10 , 'sky' , 5 , False , False , False , ( 70,130,180) ), - Label( 'person' , 24 , 19, 11 , 'human' , 6 , True , False , False , (220, 20, 60) ), - Label( 'rider' , 25 , 20, 12 , 'human' , 6 , True , False , False , (255, 0, 0) ), - Label( 'car' , 26 , 13, 13 , 'vehicle' , 7 , True , False , False , ( 0, 0,142) ), - Label( 'truck' , 27 , 14, 14 , 'vehicle' , 7 , True , False , False , ( 0, 0, 70) ), - Label( 'bus' , 28 , 34, 15 , 'vehicle' , 7 , True , False , False , ( 0, 60,100) ), - Label( 'caravan' , 29 , 16, 255 , 'vehicle' , 7 , True , True , True , ( 0, 0, 90) ), - Label( 'trailer' , 30 , 15, 255 , 'vehicle' , 7 , True , True , True , ( 0, 0,110) ), - Label( 'train' , 31 , 33, 16 , 'vehicle' , 7 , True , False , False , ( 0, 80,100) ), - Label( 'motorcycle' , 32 , 17, 17 , 'vehicle' , 7 , True , False , False , ( 0, 0,230) ), - Label( 'bicycle' , 33 , 18, 18 , 'vehicle' , 7 , True , False , False , (119, 11, 32) ), - Label( 'garage' , 34 , 12, 2 , 'construction' , 2 , True , True , True , ( 64,128,128) ), - Label( 'gate' , 35 , 6 , 4 , 'construction' , 2 , False , True , True , (190,153,153) ), - Label( 'stop' , 36 , 29, 255 , 'construction' , 2 , True , True , True , (150,120, 90) ), - Label( 'smallpole' , 37 , 22, 5 , 'object' , 3 , True , True , True , (153,153,153) ), - Label( 'lamp' , 38 , 25, 255 , 'object' , 3 , True , True , True , (0, 64, 64) ), - Label( 'trash bin' , 39 , 26, 255 , 'object' , 3 , True , True , True , (0, 128,192) ), - Label( 'vending machine' , 40 , 27, 255 , 'object' , 3 , True , True , True , (128, 64, 0) ), - Label( 'box' , 41 , 28, 255 , 'object' , 3 , True , True , True , (64, 64,128) ), - Label( 'unknown construction' , 42 , 35, 255 , 'void' , 0 , False , True , True , (102, 0, 0) ), - Label( 'unknown vehicle' , 43 , 36, 255 , 'void' , 0 , False , True , True , ( 51, 0, 51) ), - Label( 'unknown object' , 44 , 37, 255 , 'void' , 0 , False , True , True , ( 32, 32, 32) ), - Label( 'license plate' , -1 , -1, -1 , 'vehicle' , 7 , False , True , True , ( 0, 0,142) ), + Label("unlabeled", 0, -1, 255, "void", 0, False, True, True, (0, 0, 0)), + Label("ego vehicle", 1, -1, 255, "void", 0, False, True, True, (0, 0, 0)), + Label("rectification border", 2, -1, 255, "void", 0, False, True, True, (0, 0, 0)), + Label("out of roi", 3, -1, 255, "void", 0, False, True, True, (0, 0, 0)), + Label("static", 4, -1, 255, "void", 0, False, True, True, (0, 0, 0)), + Label("dynamic", 5, -1, 255, "void", 0, False, True, True, (111, 74, 0)), + Label("ground", 6, -1, 255, "void", 0, False, True, True, (81, 0, 81)), + Label("road", 7, 1, 0, "flat", 1, False, False, False, (128, 64, 128)), + Label("sidewalk", 8, 3, 1, "flat", 1, False, False, False, (244, 35, 232)), + Label("parking", 9, 2, 255, "flat", 1, False, True, True, (250, 170, 160)), + Label("rail track", 10, 10, 255, "flat", 1, False, True, True, (230, 150, 140)), + Label("building", 11, 11, 2, "construction", 2, True, False, False, (70, 70, 70)), + Label("wall", 12, 7, 3, "construction", 2, False, False, False, (102, 102, 156)), + Label("fence", 13, 8, 4, "construction", 2, False, False, False, (190, 153, 153)), + Label("guard rail", 14, 30, 255, "construction", 2, False, True, True, (180, 165, 180)), + Label("bridge", 15, 31, 255, "construction", 2, False, True, True, (150, 100, 100)), + Label("tunnel", 16, 32, 255, "construction", 2, False, True, True, (150, 120, 90)), + Label("pole", 17, 21, 5, "object", 3, True, False, True, (153, 153, 153)), + Label("polegroup", 18, -1, 255, "object", 3, False, True, True, (153, 153, 153)), + Label("traffic light", 19, 23, 6, "object", 3, True, False, True, (250, 170, 30)), + Label("traffic sign", 20, 24, 7, "object", 3, True, False, True, (220, 220, 0)), + Label("vegetation", 21, 5, 8, "nature", 4, False, False, False, (107, 142, 35)), + Label("terrain", 22, 4, 9, "nature", 4, False, False, False, (152, 251, 152)), + Label("sky", 23, 9, 10, "sky", 5, False, False, False, (70, 130, 180)), + Label("person", 24, 19, 11, "human", 6, True, False, False, (220, 20, 60)), + Label("rider", 25, 20, 12, "human", 6, True, False, False, (255, 0, 0)), + Label("car", 26, 13, 13, "vehicle", 7, True, False, False, (0, 0, 142)), + Label("truck", 27, 14, 14, "vehicle", 7, True, False, False, (0, 0, 70)), + Label("bus", 28, 34, 15, "vehicle", 7, True, False, False, (0, 60, 100)), + Label("caravan", 29, 16, 255, "vehicle", 7, True, True, True, (0, 0, 90)), + Label("trailer", 30, 15, 255, "vehicle", 7, True, True, True, (0, 0, 110)), + Label("train", 31, 33, 16, "vehicle", 7, True, False, False, (0, 80, 100)), + Label("motorcycle", 32, 17, 17, "vehicle", 7, True, False, False, (0, 0, 230)), + Label("bicycle", 33, 18, 18, "vehicle", 7, True, False, False, (119, 11, 32)), + Label("garage", 34, 12, 2, "construction", 2, True, True, True, (64, 128, 128)), + Label("gate", 35, 6, 4, "construction", 2, False, True, True, (190, 153, 153)), + Label("stop", 36, 29, 255, "construction", 2, True, True, True, (150, 120, 90)), + Label("smallpole", 37, 22, 5, "object", 3, True, True, True, (153, 153, 153)), + Label("lamp", 38, 25, 255, "object", 3, True, True, True, (0, 64, 64)), + Label("trash bin", 39, 26, 255, "object", 3, True, True, True, (0, 128, 192)), + Label("vending machine", 40, 27, 255, "object", 3, True, True, True, (128, 64, 0)), + Label("box", 41, 28, 255, "object", 3, True, True, True, (64, 64, 128)), + Label("unknown construction", 42, 35, 255, "void", 0, False, True, True, (102, 0, 0)), + Label("unknown vehicle", 43, 36, 255, "void", 0, False, True, True, (51, 0, 51)), + Label("unknown object", 44, 37, 255, "void", 0, False, True, True, (32, 32, 32)), + Label("license plate", -1, -1, -1, "vehicle", 7, False, True, True, (0, 0, 142)), ] -#-------------------------------------------------------------------------------- +# -------------------------------------------------------------------------------- # Create dictionaries for a fast lookup -#-------------------------------------------------------------------------------- +# -------------------------------------------------------------------------------- # Please refer to the main method below for example usages! # name to label object -name2label = { label.name : label for label in labels } +name2label = {label.name: label for label in labels} # id to label object -id2label = { label.id : label for label in labels } +id2label = {label.id: label for label in labels} # trainId to label object -trainId2label = { label.trainId : label for label in reversed(labels) } +trainId2label = {label.trainId: label for label in reversed(labels)} # KITTI-360 ID to cityscapes ID -kittiId2label = { label.kittiId : label for label in labels } +kittiId2label = {label.kittiId: label for label in labels} # category to list of label objects category2labels = {} for label in labels: @@ -137,9 +129,10 @@ else: category2labels[category] = [label] -#-------------------------------------------------------------------------------- +# -------------------------------------------------------------------------------- # Assure single instance name -#-------------------------------------------------------------------------------- +# -------------------------------------------------------------------------------- + # returns the label name that describes a single instance (if possible) # e.g. input | output @@ -149,7 +142,7 @@ # foo | None # foogroup | None # skygroup | None -def assureSingleInstanceName( name ): +def assureSingleInstanceName(name): # if the name is known, it is not a group if name in name2label: return name @@ -157,9 +150,9 @@ def assureSingleInstanceName( name ): if not name.endswith("group"): return None # remove group - name = name[:-len("group")] + name = name[: -len("group")] # test if the new name exists - if not name in name2label: + if name not in name2label: return None # test if the new name denotes a label that actually has instances if not name2label[name].hasInstances: @@ -167,11 +160,12 @@ def assureSingleInstanceName( name ): # all good then return name + from py123d.datatypes.detections.box_detection_types import BoxDetectionType BBOX_LABLES_TO_DETECTION_NAME_DICT = { - 'car': 'car', - 'truck': 'truck', + "car": "car", + "truck": "truck", "bicycle": "bicycle", "trafficLight": "traffic light", "trailer": "trailer", diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_map_conversion.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_map_conversion.py index c7653cc2..09975ca5 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_map_conversion.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_map_conversion.py @@ -1,26 +1,25 @@ import os +import xml.etree.ElementTree as ET from pathlib import Path from typing import List import geopandas as gpd -import numpy as np import pandas as pd -import xml.etree.ElementTree as ET import shapely.geometry as geom +from py123d.conversion.datasets.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D +from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, split_line_geometry_by_max_length, ) -from py123d.datatypes.maps.map_datatypes import RoadEdgeType -from py123d.geometry.polyline import Polyline3D -from py123d.conversion.datasets.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D -from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.datatypes.maps.cache.cache_map_objects import ( CacheGenericDrivable, - CacheWalkway, CacheRoadEdge, + CacheWalkway, ) +from py123d.datatypes.maps.map_datatypes import RoadEdgeType +from py123d.geometry.polyline import Polyline3D MAX_ROAD_EDGE_LENGTH = 100.0 # meters, used to filter out very long road edges @@ -38,6 +37,7 @@ # "driveway", ] + def _get_none_data() -> gpd.GeoDataFrame: ids = [] geometries = [] @@ -45,6 +45,7 @@ def _get_none_data() -> gpd.GeoDataFrame: gdf = gpd.GeoDataFrame(data, geometry=geometries) return gdf + def _extract_generic_drivable_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: ids: List[int] = [] outlines: List[geom.LineString] = [] @@ -59,6 +60,7 @@ def _extract_generic_drivable_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoData gdf = gpd.GeoDataFrame(data, geometry=geometries) return gdf + def _extract_walkway_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: ids: List[int] = [] outlines: List[geom.LineString] = [] @@ -74,6 +76,7 @@ def _extract_walkway_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: gdf = gpd.GeoDataFrame(data, geometry=geometries) return gdf + def _extract_road_edge_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: geometries: List[geom.Polygon] = [] for obj in objs: @@ -97,63 +100,52 @@ def convert_kitti360_map_with_writer(log_name: str, map_writer: AbstractMapWrite """ Convert KITTI-360 map data using the provided map writer. This function extracts map data from KITTI-360 XML files and writes them using the map writer interface. - + :param log_name: The name of the log to convert :param map_writer: The map writer to use for writing the converted map """ xml_path = PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml" if not xml_path.exists(): xml_path = PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" - + if not xml_path.exists(): raise FileNotFoundError(f"BBox 3D file not found: {xml_path}") - + tree = ET.parse(xml_path) root = tree.getroot() objs: List[KITTI360_MAP_Bbox3D] = [] - + for child in root: - label = child.find('label').text + label = child.find("label").text if child.find("transform") is None or label not in KITTI360_MAP_BBOX: continue obj = KITTI360_MAP_Bbox3D() obj.parseBbox(child) objs.append(obj) - generic_drivable_gdf = _extract_generic_drivable_df(objs) walkway_gdf = _extract_walkway_df(objs) road_edge_gdf = _extract_road_edge_df(objs) - + for idx, row in generic_drivable_gdf.iterrows(): if not row.geometry.is_empty: - map_writer.write_generic_drivable( - CacheGenericDrivable( - object_id=idx, - geometry=row.geometry - ) - ) - + map_writer.write_generic_drivable(CacheGenericDrivable(object_id=idx, geometry=row.geometry)) + for idx, row in walkway_gdf.iterrows(): if not row.geometry.is_empty: - map_writer.write_walkway( - CacheWalkway( - object_id=idx, - geometry=row.geometry - ) - ) - + map_writer.write_walkway(CacheWalkway(object_id=idx, geometry=row.geometry)) + for idx, row in road_edge_gdf.iterrows(): if not row.geometry.is_empty: - if hasattr(row.geometry, 'exterior'): + if hasattr(row.geometry, "exterior"): road_edge_line = row.geometry.exterior else: road_edge_line = row.geometry - + map_writer.write_road_edge( CacheRoadEdge( object_id=idx, road_edge_type=RoadEdgeType.ROAD_EDGE_BOUNDARY, - polyline=Polyline3D.from_linestring(road_edge_line) + polyline=Polyline3D.from_linestring(road_edge_line), ) - ) \ No newline at end of file + ) diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_sensor_io.py b/src/py123d/conversion/datasets/kitti_360/kitti_360_sensor_io.py index 46318ea8..5a0cf7e1 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_sensor_io.py +++ b/src/py123d/conversion/datasets/kitti_360/kitti_360_sensor_io.py @@ -1,34 +1,29 @@ +import logging from pathlib import Path - from typing import Dict -import numpy as np -import logging -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType -from py123d.conversion.datasets.kitti_360.kitti_360_helper import get_lidar_extrinsic -def load_kitti360_lidar_pcs_from_file(filepath: Path) -> Dict[LiDARType, np.ndarray]: - if not filepath.exists(): - logging.warning(f"LiDAR file does not exist: {filepath}. Returning empty point cloud.") - return {LiDARType.LIDAR_TOP: np.zeros((1, 4), dtype=np.float32)} - - pcd = np.fromfile(filepath, dtype=np.float32) - pcd = np.reshape(pcd,[-1,4]) # [N,4] - - xyz = pcd[:, :3] - intensity = pcd[:, 3] - - ones = np.ones((xyz.shape[0], 1), dtype=pcd.dtype) - points_h = np.concatenate([xyz, ones], axis=1) #[N,4] +import numpy as np - transformed_h = get_lidar_extrinsic() @ points_h.T #[4,N] - # transformed_h = lidar_metadata.extrinsic.transformation_matrix @ points_h.T #[4,N] +from py123d.datatypes.scene.scene_metadata import LogMetadata +from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar.lidar_index import Kitti360LidarIndex +from py123d.geometry.se import StateSE3 +from py123d.geometry.transform.transform_se3 import convert_points_3d_array_between_origins - transformed_xyz = transformed_h[:3, :] # (3,N) - intensity_row = intensity[np.newaxis, :] # (1,N) +def load_kitti360_lidar_pcs_from_file(filepath: Path, log_metadata: LogMetadata) -> Dict[LiDARType, np.ndarray]: + if not filepath.exists(): + logging.warning(f"LiDAR file does not exist: {filepath}. Returning empty point cloud.") + return {LiDARType.LIDAR_TOP: np.zeros((1, len(Kitti360LidarIndex)), dtype=np.float32)} - point_cloud_4xN = np.vstack([transformed_xyz, intensity_row]).astype(np.float32) # (4,N) + lidar_extrinsic = log_metadata.lidar_metadata[LiDARType.LIDAR_TOP].extrinsic + lidar_pc = np.fromfile(filepath, dtype=np.float32) + lidar_pc = np.reshape(lidar_pc, [-1, len(Kitti360LidarIndex)]) - point_cloud_Nx4 = point_cloud_4xN.T # (N,4) + lidar_pc[..., Kitti360LidarIndex.XYZ] = convert_points_3d_array_between_origins( + from_origin=lidar_extrinsic, + to_origin=StateSE3(0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0), + points_3d_array=lidar_pc[..., Kitti360LidarIndex.XYZ], + ) - return {LiDARType.LIDAR_TOP: point_cloud_Nx4} + return {LiDARType.LIDAR_TOP: lidar_pc} diff --git a/src/py123d/conversion/datasets/kitti_360/preprocess_detection.py b/src/py123d/conversion/datasets/kitti_360/preprocess_detection.py index 2f959b06..e99d6af5 100644 --- a/src/py123d/conversion/datasets/kitti_360/preprocess_detection.py +++ b/src/py123d/conversion/datasets/kitti_360/preprocess_detection.py @@ -9,18 +9,17 @@ """ from __future__ import annotations + +import concurrent.futures +import logging import os import pickle -import logging -import copy +import xml.etree.ElementTree as ET from pathlib import Path -from typing import Dict, List, Tuple, Optional, Any -from collections import defaultdict -import concurrent.futures +from typing import Any, Dict, List, Optional, Tuple import numpy as np import numpy.typing as npt -import xml.etree.ElementTree as ET KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) DIR_3D_RAW = "data_3d_raw" @@ -31,22 +30,34 @@ PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES -from py123d.conversion.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic -from py123d.conversion.datasets.kitti_360.kitti_360_labels import KITTI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT +from py123d.conversion.datasets.kitti_360.kitti_360_helper import ( + KITTI3602NUPLAN_IMU_CALIBRATION, + KITTI360Bbox3D, + get_lidar_extrinsic, +) +from py123d.conversion.datasets.kitti_360.kitti_360_labels import ( + BBOX_LABLES_TO_DETECTION_NAME_DICT, + KITTI360_DETECTION_NAME_DICT, + kittiId2label, +) + def _bbox_xml_path(log_name: str) -> Path: if log_name == "2013_05_28_drive_0004_sync": return PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml" return PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" + def _lidar_frame_path(log_name: str, frame_idx: int) -> Path: return PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{frame_idx:010d}.bin" + def _load_lidar_xyz(filepath: Path) -> np.ndarray: """Load one LiDAR frame and return Nx3 xyz.""" arr = np.fromfile(filepath, dtype=np.float32) return arr.reshape(-1, 4)[:, :3] + def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]: """Parse XML and collect static objects with valid class names.""" xml_path = _bbox_xml_path(log_name) @@ -58,13 +69,13 @@ def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]: static_objs: List[KITTI360Bbox3D] = [] for child in root: - if child.find('semanticId') is not None: - semanticIdKITTI = int(child.find('semanticId').text) + if child.find("semanticId") is not None: + semanticIdKITTI = int(child.find("semanticId").text) name = kittiId2label[semanticIdKITTI].name else: - lable = child.find('label').text - name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, 'unknown') - timestamp = int(child.find('timestamp').text) # -1 for static objects + lable = child.find("label").text + name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, "unknown") + timestamp = int(child.find("timestamp").text) # -1 for static objects if child.find("transform") is None or name not in KITTI360_DETECTION_NAME_DICT or timestamp != -1: continue obj = KITTI360Bbox3D() @@ -72,17 +83,18 @@ def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]: static_objs.append(obj) return static_objs + def _collect_ego_states(log_name: str) -> Tuple[npt.NDArray[np.float64], list[int]]: """Load ego states from poses.txt.""" pose_file = PATH_POSES_ROOT / log_name / "poses.txt" if not pose_file.exists(): raise FileNotFoundError(f"Pose file not found: {pose_file}") - + poses = np.loadtxt(pose_file) poses_time = poses[:, 0].astype(np.int32) valid_timestamp: List[int] = list(poses_time) - + ego_states = [] for time_idx in range(len(valid_timestamp)): pos = time_idx @@ -90,15 +102,15 @@ def _collect_ego_states(log_name: str) -> Tuple[npt.NDArray[np.float64], list[in r00, r01, r02 = poses[pos, 1:4] r10, r11, r12 = poses[pos, 5:8] r20, r21, r22 = poses[pos, 9:12] - R_mat = np.array([[r00, r01, r02], - [r10, r11, r12], - [r20, r21, r22]], dtype=np.float64) - R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3] - ego_state_xyz = np.array([ - poses[pos, 4], - poses[pos, 8], - poses[pos, 12], - ]) + R_mat = np.array([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]], dtype=np.float64) + R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3, :3] + ego_state_xyz = np.array( + [ + poses[pos, 4], + poses[pos, 8], + poses[pos, 12], + ] + ) state_item[:3, :3] = R_mat_cali state_item[:3, 3] = ego_state_xyz @@ -147,23 +159,25 @@ def process_one_frame(time_idx: int) -> None: if not lidar_path.exists(): logging.warning(f"[preprocess] {log_name}: LiDAR frame not found: {lidar_path}") return - + lidar_xyz = _load_lidar_xyz(lidar_path) # lidar to pose lidar_h = np.concatenate((lidar_xyz, np.ones((lidar_xyz.shape[0], 1), dtype=lidar_xyz.dtype)), axis=1) lidar_in_imu = lidar_h @ lidar_extrinsic.T - lidar_in_imu = lidar_in_imu[:,:3] + lidar_in_imu = lidar_in_imu[:, :3] # pose to world - lidar_in_world = lidar_in_imu @ ego_states[time_idx][:3,:3].T + ego_states[time_idx][:3,3] + lidar_in_world = lidar_in_imu @ ego_states[time_idx][:3, :3].T + ego_states[time_idx][:3, 3] for obj in static_objs: if not any(record["timestamp"] == valid_time_idx for record in obj.valid_frames["records"]): continue visible, points_in_box = obj.box_visible_in_point_cloud(lidar_in_world) if not visible: - obj.valid_frames["records"] = [record for record in obj.valid_frames["records"] if record["timestamp"] != valid_time_idx] + obj.valid_frames["records"] = [ + record for record in obj.valid_frames["records"] if record["timestamp"] != valid_time_idx + ] else: for record in obj.valid_frames["records"]: if record["timestamp"] == valid_time_idx: @@ -172,7 +186,7 @@ def process_one_frame(time_idx: int) -> None: max_workers = os.cpu_count() * 2 with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: - results = list(executor.map(process_one_frame, range(len(valid_timestamp)))) + list(executor.map(process_one_frame, range(len(valid_timestamp)))) # 4) Save pickle static_records: List[Dict[str, Any]] = [] @@ -192,8 +206,10 @@ def process_one_frame(time_idx: int) -> None: pickle.dump(payload, f) logging.info(f"[preprocess] saved: {out_path}") + if __name__ == "__main__": import argparse + logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="Precompute KITTI-360 detections filters") parser.add_argument("--log_name", default="2013_05_28_drive_0000_sync") diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 46d39b75..532b7dda 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -15,8 +15,8 @@ from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDARType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3, EgoStateSE3Index diff --git a/src/py123d/conversion/map_writer/gpkg_map_writer.py b/src/py123d/conversion/map_writer/gpkg_map_writer.py index 5e68a411..d5acf041 100644 --- a/src/py123d/conversion/map_writer/gpkg_map_writer.py +++ b/src/py123d/conversion/map_writer/gpkg_map_writer.py @@ -188,45 +188,66 @@ def _write_line_layer(self, layer: MapLayer, line_object: AbstractLineMapObject) self._map_data[layer]["geometry"].append(line_object.shapely_linestring) -def _map_ids_to_integer( - map_dfs: Dict[MapLayer, gpd.GeoDataFrame], -) -> None: +def _map_ids_to_integer(map_dfs: Dict[MapLayer, gpd.GeoDataFrame]) -> None: + """Helper function to remap string IDs to integers in the map dataframes.""" # initialize id mappings lane_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.LANE]["id"]) + lane_group_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.LANE_GROUP]["id"]) + intersection_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.INTERSECTION]["id"]) + walkway_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.WALKWAY]["id"]) carpark_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.CARPARK]["id"]) generic_drivable_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.GENERIC_DRIVABLE]["id"]) - lane_group_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.LANE_GROUP]["id"]) - - # Adjust cross reference in map_dfs[MapLayer.LANE] and map_dfs[MapLayer.LANE_GROUP] - map_dfs[MapLayer.LANE]["lane_group_id"] = map_dfs[MapLayer.LANE]["lane_group_id"].map( - lane_group_id_mapping.str_to_int - ) - map_dfs[MapLayer.LANE_GROUP]["lane_ids"] = map_dfs[MapLayer.LANE_GROUP]["lane_ids"].apply( - lambda x: lane_id_mapping.map_list(x) - ) - - # Adjust predecessor/successor in map_dfs[MapLayer.LANE] and map_dfs[MapLayer.LANE_GROUP] - for column in ["predecessor_ids", "successor_ids"]: - map_dfs[MapLayer.LANE][column] = map_dfs[MapLayer.LANE][column].apply(lambda x: lane_id_mapping.map_list(x)) - map_dfs[MapLayer.LANE_GROUP][column] = map_dfs[MapLayer.LANE_GROUP][column].apply( + road_line_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.ROAD_LINE]["id"]) + road_edge_id_mapping = IntIDMapping.from_series(map_dfs[MapLayer.ROAD_EDGE]["id"]) + + # 1. Remap lane ids in LANE layer + if len(map_dfs[MapLayer.LANE]) > 0: + map_dfs[MapLayer.LANE]["id"] = map_dfs[MapLayer.LANE]["id"].map(lane_id_mapping.str_to_int) + map_dfs[MapLayer.LANE]["lane_group_id"] = map_dfs[MapLayer.LANE]["lane_group_id"].map( + lane_group_id_mapping.str_to_int + ) + for column in ["predecessor_ids", "successor_ids"]: + map_dfs[MapLayer.LANE][column] = map_dfs[MapLayer.LANE][column].apply(lambda x: lane_id_mapping.map_list(x)) + for column in ["left_lane_id", "right_lane_id"]: + map_dfs[MapLayer.LANE][column] = map_dfs[MapLayer.LANE][column].apply( + lambda x: str(lane_id_mapping.str_to_int[x]) if pd.notna(x) and x is not None else x + ) + + # 2. Remap lane group ids in LANE_GROUP + if len(map_dfs[MapLayer.LANE_GROUP]) > 0: + map_dfs[MapLayer.LANE_GROUP]["id"] = map_dfs[MapLayer.LANE_GROUP]["id"].map(lane_group_id_mapping.str_to_int) + map_dfs[MapLayer.LANE_GROUP]["lane_ids"] = map_dfs[MapLayer.LANE_GROUP]["lane_ids"].apply( + lambda x: lane_id_mapping.map_list(x) + ) + map_dfs[MapLayer.LANE_GROUP]["intersection_id"] = map_dfs[MapLayer.LANE_GROUP]["intersection_id"].map( + intersection_id_mapping.str_to_int + ) + for column in ["predecessor_ids", "successor_ids"]: + map_dfs[MapLayer.LANE_GROUP][column] = map_dfs[MapLayer.LANE_GROUP][column].apply( + lambda x: lane_group_id_mapping.map_list(x) + ) + + # 3. Remap lane group ids in INTERSECTION + if len(map_dfs[MapLayer.INTERSECTION]) > 0: + map_dfs[MapLayer.INTERSECTION]["id"] = map_dfs[MapLayer.INTERSECTION]["id"].map( + intersection_id_mapping.str_to_int + ) + map_dfs[MapLayer.INTERSECTION]["lane_group_ids"] = map_dfs[MapLayer.INTERSECTION]["lane_group_ids"].apply( lambda x: lane_group_id_mapping.map_list(x) ) - for column in ["left_lane_id", "right_lane_id"]: - map_dfs[MapLayer.LANE][column] = map_dfs[MapLayer.LANE][column].apply( - lambda x: str(lane_id_mapping.str_to_int[x]) if pd.notna(x) and x is not None else x + # 4. Remap ids in other layers + if len(map_dfs[MapLayer.WALKWAY]) > 0: + map_dfs[MapLayer.WALKWAY]["id"] = map_dfs[MapLayer.WALKWAY]["id"].map(walkway_id_mapping.str_to_int) + if len(map_dfs[MapLayer.CARPARK]) > 0: + map_dfs[MapLayer.CARPARK]["id"] = map_dfs[MapLayer.CARPARK]["id"].map(carpark_id_mapping.str_to_int) + if len(map_dfs[MapLayer.GENERIC_DRIVABLE]) > 0: + map_dfs[MapLayer.GENERIC_DRIVABLE]["id"] = map_dfs[MapLayer.GENERIC_DRIVABLE]["id"].map( + generic_drivable_id_mapping.str_to_int ) - - map_dfs[MapLayer.LANE]["id"] = map_dfs[MapLayer.LANE]["id"].map(lane_id_mapping.str_to_int) - map_dfs[MapLayer.WALKWAY]["id"] = map_dfs[MapLayer.WALKWAY]["id"].map(walkway_id_mapping.str_to_int) - map_dfs[MapLayer.CARPARK]["id"] = map_dfs[MapLayer.CARPARK]["id"].map(carpark_id_mapping.str_to_int) - map_dfs[MapLayer.GENERIC_DRIVABLE]["id"] = map_dfs[MapLayer.GENERIC_DRIVABLE]["id"].map( - generic_drivable_id_mapping.str_to_int - ) - map_dfs[MapLayer.LANE_GROUP]["id"] = map_dfs[MapLayer.LANE_GROUP]["id"].map(lane_group_id_mapping.str_to_int) - - map_dfs[MapLayer.INTERSECTION]["lane_group_ids"] = map_dfs[MapLayer.INTERSECTION]["lane_group_ids"].apply( - lambda x: lane_group_id_mapping.map_list(x) - ) + if len(map_dfs[MapLayer.ROAD_LINE]) > 0: + map_dfs[MapLayer.ROAD_LINE]["id"] = map_dfs[MapLayer.ROAD_LINE]["id"].map(road_line_id_mapping.str_to_int) + if len(map_dfs[MapLayer.ROAD_EDGE]) > 0: + map_dfs[MapLayer.ROAD_EDGE]["id"] = map_dfs[MapLayer.ROAD_EDGE]["id"].map(road_edge_id_mapping.str_to_int) diff --git a/src/py123d/conversion/registry/lidar_index_registry.py b/src/py123d/conversion/registry/lidar_index_registry.py index bbc97ab4..7a7891f8 100644 --- a/src/py123d/conversion/registry/lidar_index_registry.py +++ b/src/py123d/conversion/registry/lidar_index_registry.py @@ -60,6 +60,7 @@ class WOPDLidarIndex(LiDARIndex): Y = 4 Z = 5 + @register_lidar_index class Kitti360LidarIndex(LiDARIndex): X = 0 @@ -67,6 +68,7 @@ class Kitti360LidarIndex(LiDARIndex): Z = 2 INTENSITY = 3 + @register_lidar_index class AVSensorLidarIndex(LiDARIndex): """Argoverse Sensor LiDAR Indexing Scheme. diff --git a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py index 2cd2a0e5..cd918c05 100644 --- a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py +++ b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py @@ -36,7 +36,7 @@ def load_lidar_pcs_from_file( assert sensor_root is not None, f"Dataset path for sensor loading not found for dataset: {log_metadata.dataset}" full_lidar_path = Path(sensor_root) / relative_path - assert full_lidar_path.exists(), f"LiDAR file not found: {full_lidar_path}" + assert full_lidar_path.exists(), f"LiDAR file not found: {sensor_root} / {relative_path}" # NOTE: We move data specific import into if-else block, to avoid data specific import errors if log_metadata.dataset == "nuplan": @@ -58,17 +58,17 @@ def load_lidar_pcs_from_file( from py123d.conversion.datasets.pandaset.pandaset_sensor_io import load_pandaset_lidars_pcs_from_file lidar_pcs_dict = load_pandaset_lidars_pcs_from_file(full_lidar_path, index) - + elif log_metadata.dataset == "kitti360": from py123d.conversion.datasets.kitti_360.kitti_360_sensor_io import load_kitti360_lidar_pcs_from_file - - lidar_pcs_dict = load_kitti360_lidar_pcs_from_file(full_lidar_path) - + + lidar_pcs_dict = load_kitti360_lidar_pcs_from_file(full_lidar_path, log_metadata) + elif log_metadata.dataset == "nuscenes": from py123d.conversion.datasets.nuscenes.nuscenes_sensor_io import load_nuscenes_lidar_pcs_from_file lidar_pcs_dict = load_nuscenes_lidar_pcs_from_file(full_lidar_path, log_metadata) - + else: raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.") diff --git a/src/py123d/datatypes/maps/abstract_map_objects.py b/src/py123d/datatypes/maps/abstract_map_objects.py index de43bc81..baea8d87 100644 --- a/src/py123d/datatypes/maps/abstract_map_objects.py +++ b/src/py123d/datatypes/maps/abstract_map_objects.py @@ -2,11 +2,10 @@ import abc from typing import List, Optional, Tuple, Union -from typing_extensions import TypeAlias - import shapely.geometry as geom import trimesh +from typing_extensions import TypeAlias from py123d.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType from py123d.geometry import Polyline2D, Polyline3D, PolylineSE2 diff --git a/src/py123d/datatypes/scene/abstract_scene.py b/src/py123d/datatypes/scene/abstract_scene.py index 8bb2d381..cdad4033 100644 --- a/src/py123d/datatypes/scene/abstract_scene.py +++ b/src/py123d/datatypes/scene/abstract_scene.py @@ -7,8 +7,8 @@ from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.maps.abstract_map import AbstractMap from py123d.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -54,7 +54,9 @@ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: raise NotImplementedError @abc.abstractmethod - def get_camera_at_iteration(self, iteration: int, camera_type: Union[PinholeCameraType, FisheyeMEICameraType]) -> Optional[Union[PinholeCamera, FisheyeMEICamera]]: + def get_camera_at_iteration( + self, iteration: int, camera_type: Union[PinholeCameraType, FisheyeMEICameraType] + ) -> Optional[Union[PinholeCamera, FisheyeMEICamera]]: raise NotImplementedError @abc.abstractmethod diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene.py b/src/py123d/datatypes/scene/arrow/arrow_scene.py index 87aa038f..a3c4db55 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene.py @@ -19,8 +19,8 @@ ) from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow from py123d.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -38,11 +38,11 @@ def __init__( self._log_metadata: LogMetadata = get_log_metadata_from_arrow(arrow_file_path) with pa.memory_map(str(self._arrow_file_path), "r") as source: - reader = pa.ipc.open_file(source) - table = reader.read_all() + reader = pa.ipc.open_file(source) + table = reader.read_all() num_rows = table.num_rows - initial_uuid = table['uuid'][0].as_py() - + initial_uuid = table["uuid"][0].as_py() + if scene_extraction_metadata is None: scene_extraction_metadata = SceneExtractionMetadata( initial_uuid=initial_uuid, @@ -128,7 +128,9 @@ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: route_lane_group_ids = table["route_lane_group_ids"][self._get_table_index(iteration)].as_py() return route_lane_group_ids - def get_camera_at_iteration(self, iteration: int, camera_type: Union[PinholeCameraType, FisheyeMEICameraType]) -> Optional[Union[PinholeCamera, FisheyeMEICamera]]: + def get_camera_at_iteration( + self, iteration: int, camera_type: Union[PinholeCameraType, FisheyeMEICameraType] + ) -> Optional[Union[PinholeCamera, FisheyeMEICamera]]: camera: Optional[Union[PinholeCamera, FisheyeMEICamera]] = None if camera_type in self.available_camera_types: camera = get_camera_from_arrow_table( diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 2d3eb1aa..1631b9e3 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -23,8 +23,8 @@ TrafficLightStatus, ) from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType from py123d.datatypes.sensors.lidar.lidar_index import DefaultLidarIndex from py123d.datatypes.time.time_point import TimePoint @@ -138,7 +138,7 @@ def get_camera_from_arrow_table( raise NotImplementedError("Only string file paths for camera data are supported.") camera_metadata = log_metadata.camera_metadata[camera_type] - if hasattr(camera_metadata, 'mirror_parameter') and camera_metadata.mirror_parameter is not None: + if hasattr(camera_metadata, "mirror_parameter") and camera_metadata.mirror_parameter is not None: return FisheyeMEICamera( metadata=camera_metadata, image=image, diff --git a/src/py123d/datatypes/scene/scene_filter.py b/src/py123d/datatypes/scene/scene_filter.py index f3d516a2..d4bada57 100644 --- a/src/py123d/datatypes/scene/scene_filter.py +++ b/src/py123d/datatypes/scene/scene_filter.py @@ -1,9 +1,9 @@ from dataclasses import dataclass from typing import List, Optional, Union -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraType -from py123d.datatypes.sensors.camera.utils import get_camera_type_by_value, deserialize_camera_type +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.camera.utils import deserialize_camera_type, get_camera_type_by_value # TODO: Add more filter options (e.g. scene tags, ego movement, or whatever appropriate) diff --git a/src/py123d/datatypes/scene/scene_metadata.py b/src/py123d/datatypes/scene/scene_metadata.py index ee91c70a..c7f4ae76 100644 --- a/src/py123d/datatypes/scene/scene_metadata.py +++ b/src/py123d/datatypes/scene/scene_metadata.py @@ -1,12 +1,12 @@ from __future__ import annotations from dataclasses import asdict, dataclass, field -from typing import Dict, Union, Optional +from typing import Dict, Optional, Union import py123d from py123d.datatypes.maps.map_metadata import MapMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraMetadata, PinholeCameraType from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraMetadata, FisheyeMEICameraType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraMetadata, PinholeCameraType from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from py123d.datatypes.vehicle_state.vehicle_parameters import VehicleParameters @@ -21,7 +21,9 @@ class LogMetadata: timestep_seconds: float vehicle_parameters: Optional[VehicleParameters] = None - camera_metadata: Union[Dict[PinholeCameraType, PinholeCameraMetadata], Dict[FisheyeMEICameraType, FisheyeMEICameraMetadata]] = field(default_factory=dict) + camera_metadata: Union[ + Dict[PinholeCameraType, PinholeCameraMetadata], Dict[FisheyeMEICameraType, FisheyeMEICameraMetadata] + ] = field(default_factory=dict) lidar_metadata: Dict[LiDARType, LiDARMetadata] = field(default_factory=dict) map_metadata: Optional[MapMetadata] = None diff --git a/src/py123d/datatypes/sensors/camera/fisheye_mei_camera.py b/src/py123d/datatypes/sensors/camera/fisheye_mei_camera.py index 038ec2a4..afb27960 100644 --- a/src/py123d/datatypes/sensors/camera/fisheye_mei_camera.py +++ b/src/py123d/datatypes/sensors/camera/fisheye_mei_camera.py @@ -11,12 +11,14 @@ from py123d.common.utils.mixin import ArrayMixin from py123d.geometry.se import StateSE3 + class FisheyeMEICameraType(SerialIntEnum): """ Enum for fisheye cameras in d123. """ - #NOTE Use higher values to avoid conflicts with PinholeCameraType - CAM_L = 10 + + # NOTE Use higher values to avoid conflicts with PinholeCameraType + CAM_L = 10 CAM_R = 11 @@ -138,10 +140,14 @@ class FisheyeMEICameraMetadata: def from_dict(cls, data_dict: Dict[str, Any]) -> FisheyeMEICameraMetadata: data_dict["camera_type"] = FisheyeMEICameraType(data_dict["camera_type"]) data_dict["distortion"] = ( - FisheyeMEIDistortion.from_array(np.array(data_dict["distortion"])) if data_dict["distortion"] is not None else None + FisheyeMEIDistortion.from_array(np.array(data_dict["distortion"])) + if data_dict["distortion"] is not None + else None ) data_dict["projection"] = ( - FisheyeMEIProjection.from_array(np.array(data_dict["projection"])) if data_dict["projection"] is not None else None + FisheyeMEIProjection.from_array(np.array(data_dict["projection"])) + if data_dict["projection"] is not None + else None ) return FisheyeMEICameraMetadata(**data_dict) @@ -153,15 +159,15 @@ def to_dict(self) -> Dict[str, Any]: return data_dict def cam2image(self, points_3d: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - ''' camera coordinate to image plane ''' + """camera coordinate to image plane""" norm = np.linalg.norm(points_3d, axis=1) - x = points_3d[:,0] / norm - y = points_3d[:,1] / norm - z = points_3d[:,2] / norm + x = points_3d[:, 0] / norm + y = points_3d[:, 1] / norm + z = points_3d[:, 2] / norm - x /= z+self.mirror_parameter - y /= z+self.mirror_parameter + x /= z + self.mirror_parameter + y /= z + self.mirror_parameter if self.distortion is not None: k1 = self.distortion.k1 @@ -178,11 +184,11 @@ def cam2image(self, points_3d: npt.NDArray[np.float64]) -> npt.NDArray[np.float6 gamma1 = gamma2 = 1.0 u0 = v0 = 0.0 - ro2 = x*x + y*y - x *= 1 + k1*ro2 + k2*ro2*ro2 - y *= 1 + k1*ro2 + k2*ro2*ro2 + ro2 = x * x + y * y + x *= 1 + k1 * ro2 + k2 * ro2 * ro2 + y *= 1 + k1 * ro2 + k2 * ro2 * ro2 - x = gamma1*x + u0 - y = gamma2*y + v0 + x = gamma1 * x + u0 + y = gamma2 * y + v0 - return x, y, norm * points_3d[:,2] / np.abs(points_3d[:,2]) + return x, y, norm * points_3d[:, 2] / np.abs(points_3d[:, 2]) diff --git a/src/py123d/datatypes/sensors/camera/utils.py b/src/py123d/datatypes/sensors/camera/utils.py index 504d1e46..9ed591b0 100644 --- a/src/py123d/datatypes/sensors/camera/utils.py +++ b/src/py123d/datatypes/sensors/camera/utils.py @@ -1,21 +1,25 @@ from typing import Union -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraType +from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType + def get_camera_type_by_value(value: int) -> Union[PinholeCameraType, FisheyeMEICameraType]: """Dynamically determine camera type based on value range.""" pinhole_values = [member.value for member in PinholeCameraType] fisheye_values = [member.value for member in FisheyeMEICameraType] - + if value in pinhole_values: return PinholeCameraType(value) elif value in fisheye_values: return FisheyeMEICameraType(value) else: - raise ValueError(f"Invalid camera type value: {value}. " - f"Valid PinholeCameraType values: {pinhole_values}, " - f"Valid FisheyeMEICameraType values: {fisheye_values}") + raise ValueError( + f"Invalid camera type value: {value}. " + f"Valid PinholeCameraType values: {pinhole_values}, " + f"Valid FisheyeMEICameraType values: {fisheye_values}" + ) + def deserialize_camera_type(camera_str: str) -> Union[PinholeCameraType, FisheyeMEICameraType]: """Deserialize camera type string to appropriate enum.""" @@ -23,14 +27,16 @@ def deserialize_camera_type(camera_str: str) -> Union[PinholeCameraType, Fisheye return PinholeCameraType.deserialize(camera_str) except (ValueError, KeyError): pass - + try: return FisheyeMEICameraType.deserialize(camera_str) except (ValueError, KeyError): pass - + pinhole_names = [member.name.lower() for member in PinholeCameraType] fisheye_names = [member.name.lower() for member in FisheyeMEICameraType] - raise ValueError(f"Unknown camera type: '{camera_str}'. " - f"Valid PinholeCameraType names: {pinhole_names}, " - f"Valid FisheyeMEICameraType names: {fisheye_names}") \ No newline at end of file + raise ValueError( + f"Unknown camera type: '{camera_str}'. " + f"Valid PinholeCameraType names: {pinhole_names}, " + f"Valid FisheyeMEICameraType names: {fisheye_names}" + ) diff --git a/src/py123d/datatypes/vehicle_state/vehicle_parameters.py b/src/py123d/datatypes/vehicle_state/vehicle_parameters.py index b68d1125..ca2a1944 100644 --- a/src/py123d/datatypes/vehicle_state/vehicle_parameters.py +++ b/src/py123d/datatypes/vehicle_state/vehicle_parameters.py @@ -92,19 +92,24 @@ def get_wopd_chrysler_pacifica_parameters() -> VehicleParameters: rear_axle_to_center_longitudinal=1.461, ) -def get_kitti360_station_wagon_parameters() -> VehicleParameters: - #NOTE: Parameters are estimated from the vehicle model. - #https://www.cvlibs.net/datasets/kitti-360/documentation.php + +def get_kitti360_vw_passat_parameters() -> VehicleParameters: + # The KITTI-360 dataset uses a 2006 VW Passat Variant B6. + # https://en.wikipedia.org/wiki/Volkswagen_Passat_(B6) + # [1] https://scispace.com/pdf/team-annieway-s-autonomous-system-18ql8b7kki.pdf + # NOTE: Parameters are estimated from the vehicle model. + # https://www.cvlibs.net/datasets/kitti-360/documentation.php return VehicleParameters( - vehicle_name="kitti360_station_wagon", - width=1.800, - length=3.500, - height=1.400, - wheel_base=2.710, - rear_axle_to_center_vertical=0.45, - rear_axle_to_center_longitudinal=2.71/2 + 0.05, + vehicle_name="kitti360_vw_passat", + width=1.820, + length=4.775, + height=1.516, + wheel_base=2.709, + rear_axle_to_center_vertical=1.516 / 2 - 0.9, + rear_axle_to_center_longitudinal=1.3369, ) + def get_av2_ford_fusion_hybrid_parameters() -> VehicleParameters: # NOTE: Parameters are estimated from the vehicle model. # https://en.wikipedia.org/wiki/Ford_Fusion_Hybrid#Second_generation diff --git a/src/py123d/geometry/transform/transform_se3.py b/src/py123d/geometry/transform/transform_se3.py index 8bf907ba..8f394772 100644 --- a/src/py123d/geometry/transform/transform_se3.py +++ b/src/py123d/geometry/transform/transform_se3.py @@ -206,10 +206,11 @@ def convert_points_3d_array_between_origins( assert points_3d_array.ndim >= 1 assert points_3d_array.shape[-1] == len(Point3DIndex) - abs_points = points_3d_array @ R_from.T + t_from - new_rel_points = (abs_points - t_to) @ R_to + R_rel = R_to.T @ R_from # Relative rotation matrix + t_rel = R_to.T @ (t_from - t_to) # Relative translation - return new_rel_points + conv_points_3d_array = (R_rel @ points_3d_array.T).T + t_rel + return conv_points_3d_array def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3: diff --git a/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml b/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml index 1fadc982..ae1b1033 100644 --- a/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml +++ b/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml @@ -4,4 +4,3 @@ _convert_: 'all' # dataset_path: ${dataset_paths.py123d_data_root} logs_root: ${dataset_paths.py123d_logs_root} maps_root: ${dataset_paths.py123d_maps_root} - diff --git a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml index 77cea31c..e85cfcab 100644 --- a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml @@ -14,7 +14,7 @@ kitti360_dataset: # Map include_map: true - + # Ego include_ego: true diff --git a/src/py123d/script/run_conversion.py b/src/py123d/script/run_conversion.py index bb4fe510..c2510b9b 100644 --- a/src/py123d/script/run_conversion.py +++ b/src/py123d/script/run_conversion.py @@ -37,10 +37,17 @@ def main(cfg: DictConfig) -> None: logger.info(f"Processing dataset: {dataset_converter.__class__.__name__}") map_args = [{"map_index": i} for i in range(dataset_converter.get_number_of_maps())] + logger.info( + f"Found maps: {dataset_converter.get_number_of_maps()} for dataset: {dataset_converter.__class__.__name__}" + ) + worker_map(worker, partial(_convert_maps, cfg=cfg, dataset_converter=dataset_converter), map_args) logger.info(f"Finished maps: {dataset_converter.__class__.__name__}") log_args = [{"log_index": i} for i in range(dataset_converter.get_number_of_logs())] + logger.info( + f"Found logs: {dataset_converter.get_number_of_logs()} for dataset: {dataset_converter.__class__.__name__}" + ) worker_map(worker, partial(_convert_logs, cfg=cfg, dataset_converter=dataset_converter), log_args) logger.info(f"Finished logs: {dataset_converter.__class__.__name__}") diff --git a/src/py123d/script/run_viser.py b/src/py123d/script/run_viser.py index 302d3ce7..cc89e024 100644 --- a/src/py123d/script/run_viser.py +++ b/src/py123d/script/run_viser.py @@ -36,4 +36,4 @@ def main(cfg: DictConfig) -> None: if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/py123d/visualization/matplotlib/utils.py b/src/py123d/visualization/matplotlib/utils.py index 3c552eac..81c60260 100644 --- a/src/py123d/visualization/matplotlib/utils.py +++ b/src/py123d/visualization/matplotlib/utils.py @@ -34,19 +34,20 @@ def _add_element_helper(element: geom.Polygon): # Create path with exterior and interior rings def create_polygon_path(polygon): # Get exterior coordinates - exterior_coords = list(polygon.exterior.coords) + # NOTE: Only take first two dimensions in case of 3D coords + exterior_coords = np.array(polygon.exterior.coords)[:, :2].tolist() # Start with exterior ring - vertices = exterior_coords + vertices_2d = exterior_coords codes = [Path.MOVETO] + [Path.LINETO] * (len(exterior_coords) - 2) + [Path.CLOSEPOLY] # Add interior rings (holes) for interior in polygon.interiors: interior_coords = list(interior.coords) - vertices.extend(interior_coords) + vertices_2d.extend(interior_coords) codes.extend([Path.MOVETO] + [Path.LINETO] * (len(interior_coords) - 2) + [Path.CLOSEPOLY]) - return Path(vertices, codes) + return Path(vertices_2d, codes) path = create_polygon_path(element) diff --git a/test_viser.py b/test_viser.py index d5375bd7..6db46ac4 100644 --- a/test_viser.py +++ b/test_viser.py @@ -6,7 +6,8 @@ # from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType if __name__ == "__main__": - splits = ["nuscenes-mini_val", "nuscenes-mini_train"] + splits = ["kitti360"] + # splits = ["nuscenes-mini_val", "nuscenes-mini_train"] # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla_test"] @@ -14,17 +15,16 @@ # splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] # log_names = ["2021.08.24.13.12.55_veh-45_00386_00472"] - log_names = None - + log_names = ["2013_05_28_drive_0000_sync"] scene_uuids = None scene_filter = SceneFilter( split_names=splits, log_names=log_names, scene_uuids=scene_uuids, - duration_s=None, + duration_s=10.0, history_s=0.0, - timestamp_threshold_s=10.0, + timestamp_threshold_s=30.0, shuffle=True, # camera_types=[PinholeCameraType.CAM_F0], ) From a80de1d5e4e818ef55fd1ba55483e6c0aaddb329 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sat, 1 Nov 2025 21:52:08 +0100 Subject: [PATCH 132/145] Remove the underscore for consistent `kitti360` naming in the package. --- .../datasets/{kitti_360 => kitti360}/__init__.py | 0 .../kitti360_converter.py} | 10 +++++----- .../kitti360_map_conversion.py} | 2 +- .../kitti360_sensor_io.py} | 0 .../conversion/datasets/kitti360/utils/__init__.py | 0 .../utils/kitti360_helper.py} | 2 +- .../utils/kitti360_labels.py} | 0 .../utils}/preprocess_detection.py | 4 ++-- src/py123d/conversion/sensor_io/lidar/file_lidar_io.py | 2 +- .../config/conversion/datasets/kitti360_dataset.yaml | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) rename src/py123d/conversion/datasets/{kitti_360 => kitti360}/__init__.py (100%) rename src/py123d/conversion/datasets/{kitti_360/kitti_360_data_converter.py => kitti360/kitti360_converter.py} (98%) rename src/py123d/conversion/datasets/{kitti_360/kitti_360_map_conversion.py => kitti360/kitti360_map_conversion.py} (98%) rename src/py123d/conversion/datasets/{kitti_360/kitti_360_sensor_io.py => kitti360/kitti360_sensor_io.py} (100%) create mode 100644 src/py123d/conversion/datasets/kitti360/utils/__init__.py rename src/py123d/conversion/datasets/{kitti_360/kitti_360_helper.py => kitti360/utils/kitti360_helper.py} (98%) rename src/py123d/conversion/datasets/{kitti_360/kitti_360_labels.py => kitti360/utils/kitti360_labels.py} (100%) rename src/py123d/conversion/datasets/{kitti_360 => kitti360/utils}/preprocess_detection.py (98%) diff --git a/src/py123d/conversion/datasets/kitti_360/__init__.py b/src/py123d/conversion/datasets/kitti360/__init__.py similarity index 100% rename from src/py123d/conversion/datasets/kitti_360/__init__.py rename to src/py123d/conversion/datasets/kitti360/__init__.py diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py similarity index 98% rename from src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py rename to src/py123d/conversion/datasets/kitti360/kitti360_converter.py index d4d17d99..2bda2124 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_data_converter.py +++ b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py @@ -13,18 +13,18 @@ from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter from py123d.conversion.dataset_converter_config import DatasetConverterConfig -from py123d.conversion.datasets.kitti_360.kitti_360_helper import ( +from py123d.conversion.datasets.kitti360.kitti360_map_conversion import convert_kitti360_map_with_writer +from py123d.conversion.datasets.kitti360.utils.kitti360_helper import ( KITTI3602NUPLAN_IMU_CALIBRATION, KITTI360Bbox3D, get_lidar_extrinsic, ) -from py123d.conversion.datasets.kitti_360.kitti_360_labels import ( +from py123d.conversion.datasets.kitti360.utils.kitti360_labels import ( BBOX_LABLES_TO_DETECTION_NAME_DICT, KITTI360_DETECTION_NAME_DICT, kittiId2label, ) -from py123d.conversion.datasets.kitti_360.kitti_360_map_conversion import convert_kitti360_map_with_writer -from py123d.conversion.datasets.kitti_360.preprocess_detection import process_detection +from py123d.conversion.datasets.kitti360.utils.preprocess_detection import process_detection from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.registry.lidar_index_registry import Kitti360LidarIndex @@ -122,7 +122,7 @@ def get_kitti360_map_metadata(split: str, log_name: str) -> MapMetadata: ) -class Kitti360DataConverter(AbstractDatasetConverter): +class Kitti360Converter(AbstractDatasetConverter): def __init__( self, splits: List[str], diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_map_conversion.py b/src/py123d/conversion/datasets/kitti360/kitti360_map_conversion.py similarity index 98% rename from src/py123d/conversion/datasets/kitti_360/kitti_360_map_conversion.py rename to src/py123d/conversion/datasets/kitti360/kitti360_map_conversion.py index 09975ca5..08562314 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_map_conversion.py +++ b/src/py123d/conversion/datasets/kitti360/kitti360_map_conversion.py @@ -7,7 +7,7 @@ import pandas as pd import shapely.geometry as geom -from py123d.conversion.datasets.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D +from py123d.conversion.datasets.kitti360.utils.kitti360_helper import KITTI360_MAP_Bbox3D from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import ( get_road_edge_linear_rings, diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_sensor_io.py b/src/py123d/conversion/datasets/kitti360/kitti360_sensor_io.py similarity index 100% rename from src/py123d/conversion/datasets/kitti_360/kitti_360_sensor_io.py rename to src/py123d/conversion/datasets/kitti360/kitti360_sensor_io.py diff --git a/src/py123d/conversion/datasets/kitti360/utils/__init__.py b/src/py123d/conversion/datasets/kitti360/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py b/src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py similarity index 98% rename from src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py rename to src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py index 09d7d1e4..75eba581 100644 --- a/src/py123d/conversion/datasets/kitti_360/kitti_360_helper.py +++ b/src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py @@ -6,7 +6,7 @@ import numpy as np from scipy.linalg import polar -from py123d.conversion.datasets.kitti_360.kitti_360_labels import BBOX_LABLES_TO_DETECTION_NAME_DICT, kittiId2label +from py123d.conversion.datasets.kitti360.utils.kitti360_labels import BBOX_LABLES_TO_DETECTION_NAME_DICT, kittiId2label from py123d.geometry import BoundingBoxSE3, StateSE3 from py123d.geometry.polyline import Polyline3D from py123d.geometry.rotation import EulerAngles diff --git a/src/py123d/conversion/datasets/kitti_360/kitti_360_labels.py b/src/py123d/conversion/datasets/kitti360/utils/kitti360_labels.py similarity index 100% rename from src/py123d/conversion/datasets/kitti_360/kitti_360_labels.py rename to src/py123d/conversion/datasets/kitti360/utils/kitti360_labels.py diff --git a/src/py123d/conversion/datasets/kitti_360/preprocess_detection.py b/src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py similarity index 98% rename from src/py123d/conversion/datasets/kitti_360/preprocess_detection.py rename to src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py index e99d6af5..324cb337 100644 --- a/src/py123d/conversion/datasets/kitti_360/preprocess_detection.py +++ b/src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py @@ -30,12 +30,12 @@ PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES -from py123d.conversion.datasets.kitti_360.kitti_360_helper import ( +from py123d.conversion.datasets.kitti360.utils.kitti360_helper import ( KITTI3602NUPLAN_IMU_CALIBRATION, KITTI360Bbox3D, get_lidar_extrinsic, ) -from py123d.conversion.datasets.kitti_360.kitti_360_labels import ( +from py123d.conversion.datasets.kitti360.utils.kitti360_labels import ( BBOX_LABLES_TO_DETECTION_NAME_DICT, KITTI360_DETECTION_NAME_DICT, kittiId2label, diff --git a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py index cd918c05..ab94e578 100644 --- a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py +++ b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py @@ -60,7 +60,7 @@ def load_lidar_pcs_from_file( lidar_pcs_dict = load_pandaset_lidars_pcs_from_file(full_lidar_path, index) elif log_metadata.dataset == "kitti360": - from py123d.conversion.datasets.kitti_360.kitti_360_sensor_io import load_kitti360_lidar_pcs_from_file + from py123d.conversion.datasets.kitti360.kitti360_sensor_io import load_kitti360_lidar_pcs_from_file lidar_pcs_dict = load_kitti360_lidar_pcs_from_file(full_lidar_path, log_metadata) diff --git a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml index e85cfcab..729b9587 100644 --- a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml @@ -1,5 +1,5 @@ kitti360_dataset: - _target_: py123d.conversion.datasets.kitti_360.kitti_360_data_converter.Kitti360DataConverter + _target_: py123d.conversion.datasets.kitti360.kitti360_data_converter.Kitti360Converter _convert_: 'all' splits: ["kitti360"] From 0455f90ea19c162f0f483696ca7cb08b0d2c00b4 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Sun, 2 Nov 2025 18:37:48 +0100 Subject: [PATCH 133/145] Extract 3D road edges for kitti, and a few minor general changes. --- notebooks/bev_matplotlib.ipynb | 140 ++------------- notebooks/bev_render.ipynb | 35 ++-- notebooks/camera_render.ipynb | 165 ++++++++++++++++++ pyproject.toml | 3 + .../av2/{utils => }/av2_map_conversion.py | 0 .../datasets/av2/av2_sensor_converter.py | 2 +- .../kitti360/kitti360_map_conversion.py | 135 ++++++-------- .../datasets/pandaset/pandaset_converter.py | 4 +- .../datasets/pandaset/pandaset_sensor_io.py | 2 +- .../{ => utils}/pandaset_constants.py | 0 .../pandaset/{ => utils}/pandaset_utlis.py | 0 .../scene_builder/default_scene_builder.yaml | 1 - .../conversion/datasets/kitti360_dataset.yaml | 2 +- src/py123d/visualization/matplotlib/camera.py | 17 +- src/py123d/visualization/matplotlib/plots.py | 3 +- .../viser/elements/render_elements.py | 51 +++++- .../visualization/viser/viser_viewer.py | 64 +++---- test_viser.py | 11 +- 18 files changed, 349 insertions(+), 286 deletions(-) create mode 100644 notebooks/camera_render.ipynb rename src/py123d/conversion/datasets/av2/{utils => }/av2_map_conversion.py (100%) rename src/py123d/conversion/datasets/pandaset/{ => utils}/pandaset_constants.py (100%) rename src/py123d/conversion/datasets/pandaset/{ => utils}/pandaset_utlis.py (100%) diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index 53fdcd15..44acc015 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -22,39 +22,31 @@ "metadata": {}, "outputs": [], "source": [ - "\n", - "# splits = [\"wopd_val\"]\n", - "# splits = [\"carla_test\"]\n", - "# splits = [\"nuscenes-mini_val\", \"nuscenes-mini_train\"]\n", - "# splits = [\"av2-sensor-mini_train\"]\n", - "# splits = [\"pandaset_train\"]\n", - "\n", - "# log_names = None\n", - "\n", - "from py123d.common.multithreading.worker_ray import RayDistributed\n", - "\n", - "\n", "splits = [\"kitti360\"]\n", - "\n", - "log_names = [\"2013_05_28_drive_0000_sync\"]\n", + "# splits = [\"nuscenes-mini_val\", \"nuscenes-mini_train\"]\n", + "# splits = [\"nuplan-mini_test\", \"nuplan-mini_train\", \"nuplan-mini_val\"]\n", + "# splits = [\"nuplan_private_test\"]\n", + "# splits = [\"carla_test\"]\n", + "# splits = [\"wopd_val\"]\n", + "# splits = [\"av2-sensor_train\"]\n", + "# splits = [\"pandaset_test\", \"pandaset_val\", \"pandaset_train\"]\n", + "log_names = None\n", "scene_uuids = None\n", "\n", "scene_filter = SceneFilter(\n", " split_names=splits,\n", " log_names=log_names,\n", " scene_uuids=scene_uuids,\n", - " duration_s=10.0,\n", + " duration_s=30.0,\n", " history_s=0.0,\n", - " timestamp_threshold_s=30,\n", + " timestamp_threshold_s=30.0,\n", " shuffle=True,\n", - " # camera_types=[CameraType.CAM_F0],\n", + " # camera_types=[PinholeCameraType.CAM_F0],\n", ")\n", "scene_builder = ArrowSceneBuilder()\n", "worker = Sequential()\n", - "# worker = RayDistributed()\n", "scenes = scene_builder.get_scenes(scene_filter, worker)\n", - "\n", - "print(f\"Found {len(scenes)} scenes\")" + "print(f\"Found {len(scenes)} scenes\")\n" ] }, { @@ -291,113 +283,7 @@ "metadata": {}, "outputs": [], "source": [ - "import shapely\n", - "from py123d.conversion.utils.map_utils.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n", - "\n", - "# from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import lift_road_edges_to_3d\n", - "from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import (\n", - " _interpolate_z_on_segment,\n", - " _split_continuous_segments,\n", - ")\n", - "from py123d.geometry.geometry_index import Point3DIndex\n", - "from py123d.geometry.occupancy_map import OccupancyMap2D\n", - "from py123d.geometry.polyline import Polyline3D\n", - "\n", - "\n", - "fix, ax = plt.subplots()\n", - "\n", - "\n", - "def lift_outlines_to_3d(\n", - " outlines_2d: List[shapely.LinearRing],\n", - " boundaries: List[Polyline3D],\n", - " max_distance: float = 10.0,\n", - ") -> List[Polyline3D]:\n", - " \"\"\"Lift 2D road edges to 3D by querying elevation from boundary segments.\n", - "\n", - " :param road_edges_2d: List of 2D road edge geometries.\n", - " :param boundaries: List of 3D boundary geometries.\n", - " :param max_distance: Maximum 2D distance for edge-boundary association.\n", - " :return: List of lifted 3D road edge geometries.\n", - " \"\"\"\n", - "\n", - " outlines_3d: List[Polyline3D] = []\n", - "\n", - " if len(outlines_2d) >= 1 and len(boundaries) >= 1:\n", - "\n", - " # 1. Build comprehensive spatial index with all boundary segments\n", - " # NOTE @DanielDauner: We split each boundary polyline into small segments.\n", - " # The spatial indexing uses axis-aligned bounding boxes, where small geometries lead to better performance.\n", - " boundary_segments = []\n", - " for boundary in boundaries:\n", - " coords = boundary.array.reshape(-1, 1, 3)\n", - " segment_coords_boundary = np.concatenate([coords[:-1], coords[1:]], axis=1)\n", - " boundary_segments.append(segment_coords_boundary)\n", - "\n", - " boundary_segments = np.concatenate(boundary_segments, axis=0)\n", - " boundary_segment_linestrings = shapely.creation.linestrings(boundary_segments)\n", - " occupancy_map = OccupancyMap2D(boundary_segment_linestrings)\n", - "\n", - " for linear_ring in outlines_2d:\n", - " points_2d = np.array(linear_ring.coords, dtype=np.float64)\n", - " points_3d = np.zeros((len(points_2d), len(Point3DIndex)), dtype=np.float64)\n", - " points_3d[..., Point3DIndex.XY] = points_2d\n", - "\n", - " # 3. Batch query for all points\n", - " query_points = shapely.creation.points(points_2d)\n", - " results = occupancy_map.query_nearest(query_points, max_distance=max_distance, exclusive=True)\n", - "\n", - " for query_idx, geometry_idx in zip(*results):\n", - " query_point = query_points[query_idx]\n", - " segment_coords = boundary_segments[geometry_idx]\n", - " best_z = _interpolate_z_on_segment(query_point, segment_coords)\n", - " points_3d[query_idx, 2] = best_z\n", - "\n", - " outlines_3d.append(Polyline3D.from_array(points_3d))\n", - "\n", - " return outlines_3d\n", - "\n", - "\n", - "def _extract_intersection_outline(lane_groups: List[AbstractLaneGroup], junction_id: str = 0) -> Polyline3D:\n", - " \"\"\"Helper method to extract intersection outline in 3D from lane group helpers.\"\"\"\n", - "\n", - " # 1. Extract the intersection outlines in 2D\n", - " intersection_polygons: List[shapely.Polygon] = [\n", - " lane_group_helper.shapely_polygon for lane_group_helper in lane_groups\n", - " ]\n", - " # for intersection_polygon in intersection_polygons:\n", - " # ax.plot(*intersection_polygon.exterior.xy)\n", - "\n", - " # for lane_group_helper in lane_groups:\n", - " # ax.plot(*lane_group_helper.outline.linestring.xy, color=\"blue\")\n", - " intersection_edges = get_road_edge_linear_rings(intersection_polygons, add_interiors=False)\n", - "\n", - " # for linear_ring in intersection_edges:\n", - " # ax.plot(*linear_ring.xy, color=\"blue\")\n", - "\n", - " # 2. Lift the 2D outlines to 3D\n", - " lane_group_outlines: List[Polyline3D] = [lane_group_helper.outline_3d for lane_group_helper in lane_groups]\n", - " intersection_outlines = lift_outlines_to_3d(intersection_edges, lane_group_outlines)\n", - "\n", - " print(len(intersection_outlines))\n", - "\n", - " # NOTE: When the intersection has multiple non-overlapping outlines, we cannot return a single outline in 3D.\n", - " # For now, we return the longest outline.\n", - "\n", - " valid_outlines = [outline for outline in intersection_outlines if outline.array.shape[0] > 2]\n", - " assert len(valid_outlines) > 0, f\"No valid intersection outlines found for Junction {junction_id}!\"\n", - "\n", - " longest_outline = max(valid_outlines, key=lambda outline: outline.length)\n", - "\n", - " # for linear_ring in intersection_outlines:\n", - " # ax.plot(*linear_ring.linestring.xy, color=\"red\")\n", - "\n", - " # ax.plot(*longest_outline.linestring.xy, color=\"red\")\n", - " # longest_outline.line\n", - " print(longest_outline.array[:, 2])\n", - " return longest_outline\n", - "\n", - "\n", - "_extract_intersection_outline(lane_groups)" + "asd" ] }, { diff --git a/notebooks/bev_render.ipynb b/notebooks/bev_render.ipynb index 6e84c122..c6eb260f 100644 --- a/notebooks/bev_render.ipynb +++ b/notebooks/bev_render.ipynb @@ -21,35 +21,36 @@ "metadata": {}, "outputs": [], "source": [ - "\n", - "# splits = [\"wopd_val\"]\n", - "splits = [\"carla_test\"]\n", - "# splits = [\"nuplan-mini_test\"]\n", - "# splits = [\"av2-sensor-mini_train\"]\n", - "# splits = [\"pandaset_train\"]\n", - "# log_names = None\n", - "\n", - "\n", - "\n", + "# splits = [\"kitti360\"]\n", + "# splits = [\"nuscenes-mini_val\", \"nuscenes-mini_train\"]\n", + "# splits = [\"nuplan-mini_test\", \"nuplan-mini_train\", \"nuplan-mini_val\"]\n", + "# splits = [\"nuplan_private_test\"]\n", + "# splits = [\"carla_test\"]\n", + "splits = [\"wopd_val\"]\n", + "# splits = [\"av2-sensor_train\"]\n", + "# splits = [\"pandaset_test\", \"pandaset_val\", \"pandaset_train\"]\n", + "# log_names = [\"2021.08.24.13.12.55_veh-45_00386_00472\"]\n", + "# log_names = [\"2013_05_28_drive_0000_sync\"]\n", + "# log_names = [\"2013_05_28_drive_0000_sync\"]\n", "log_names = None\n", - "scene_uuids = None\n", + "scene_uuids = [\"9727e2b3-46b0-51bd-84a9-c516c0993045\"]\n", "\n", "scene_filter = SceneFilter(\n", " split_names=splits,\n", " log_names=log_names,\n", " scene_uuids=scene_uuids,\n", - " duration_s=20.0,\n", + " duration_s=None,\n", " history_s=0.0,\n", - " timestamp_threshold_s=20,\n", + " timestamp_threshold_s=None,\n", " shuffle=True,\n", - " # camera_types=[CameraType.CAM_F0],\n", + " # camera_types=[PinholeCameraType.CAM_F0],\n", ")\n", "scene_builder = ArrowSceneBuilder()\n", "worker = Sequential()\n", - "# worker = RayDistributed()\n", "scenes = scene_builder.get_scenes(scene_filter, worker)\n", "\n", - "print(f\"Found {len(scenes)} scenes\")" + "scenes = [scene for scene in scenes if scene.uuid in scene_uuids]\n", + "print(f\"Found {len(scenes)} scenes\")\n" ] }, { @@ -61,7 +62,7 @@ "source": [ "from py123d.visualization.matplotlib.plots import render_scene_animation\n", "\n", - "for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:\n", + "for i in [0]:\n", " render_scene_animation(scenes[i], output_path=\"test\", format=\"mp4\", fps=20, step=1, radius=50)" ] }, diff --git a/notebooks/camera_render.ipynb b/notebooks/camera_render.ipynb new file mode 100644 index 00000000..4cb5fd50 --- /dev/null +++ b/notebooks/camera_render.ipynb @@ -0,0 +1,165 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n", + "from py123d.datatypes.scene.scene_filter import SceneFilter\n", + "\n", + "from py123d.common.multithreading.worker_sequential import Sequential\n", + "from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType\n", + "\n", + "KITTI360_DATA_ROOT = \"/home/daniel/kitti_360/KITTI-360\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "# splits = [\"kitti360\"]\n", + "# splits = [\"nuscenes-mini_val\", \"nuscenes-mini_train\"]\n", + "# splits = [\"nuplan-mini_test\", \"nuplan-mini_train\", \"nuplan-mini_val\"]\n", + "# splits = [\"nuplan_private_test\"]\n", + "# splits = [\"carla_test\"]\n", + "splits = [\"wopd_val\"]\n", + "# splits = [\"av2-sensor_train\"]\n", + "# splits = [\"pandaset_test\", \"pandaset_val\", \"pandaset_train\"]\n", + "# log_names = [\"2021.08.24.13.12.55_veh-45_00386_00472\"]\n", + "# log_names = [\"2013_05_28_drive_0000_sync\"]\n", + "# log_names = [\"2013_05_28_drive_0000_sync\"]\n", + "log_names = None\n", + "scene_uuids = [\"9727e2b3-46b0-51bd-84a9-c516c0993045\"]\n", + "\n", + "scene_filter = SceneFilter(\n", + " split_names=splits,\n", + " log_names=log_names,\n", + " scene_uuids=scene_uuids,\n", + " duration_s=None,\n", + " history_s=0.0,\n", + " timestamp_threshold_s=None,\n", + " shuffle=True,\n", + " # camera_types=[PinholeCameraType.CAM_F0],\n", + ")\n", + "scene_builder = ArrowSceneBuilder()\n", + "worker = Sequential()\n", + "scenes = scene_builder.get_scenes(scene_filter, worker)\n", + "\n", + "scenes = [scene for scene in scenes if scene.uuid in scene_uuids]\n", + "print(f\"Found {len(scenes)} scenes\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "from matplotlib import pyplot as plt\n", + "from py123d.datatypes.scene.abstract_scene import AbstractScene\n", + "from py123d.visualization.matplotlib.camera import add_box_detections_to_camera_ax, add_camera_ax\n", + "import imageio\n", + "import numpy as np\n", + "\n", + "iteration = 0\n", + "scene = scenes[0]\n", + "\n", + "scene: AbstractScene\n", + "fps = 15 # frames per second\n", + "output_file = f\"camera_{scene.log_metadata.split}_{scene.uuid}.mp4\"\n", + "\n", + "writer = imageio.get_writer(output_file, fps=fps)\n", + "\n", + "scale = 3.0\n", + "fig, ax = plt.subplots(2, 3, figsize=(scale * 6, scale * 2.5))\n", + "\n", + "\n", + "camera_type = PinholeCameraType.CAM_F0\n", + "\n", + "for i in range(scene.number_of_iterations):\n", + " camera = scene.get_camera_at_iteration(i, camera_type)\n", + " box_detections = scene.get_box_detections_at_iteration(i)\n", + " ego_state = scene.get_ego_state_at_iteration(i)\n", + "\n", + " _, image = add_box_detections_to_camera_ax(\n", + " None,\n", + " camera,\n", + " box_detections,\n", + " ego_state,\n", + " return_image=True,\n", + " )\n", + " writer.append_data(image)\n", + "\n", + "writer.close()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "py123d", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pyproject.toml b/pyproject.toml index 9284be71..267ec19d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,6 +109,9 @@ waymo = [ "tensorflow==2.13.0", "waymo-open-dataset-tf-2-12-0==1.6.6", ] +ffmpeg = [ + "imageio[ffmpeg]", +] [tool.setuptools.packages.find] where = ["src"] diff --git a/src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py b/src/py123d/conversion/datasets/av2/av2_map_conversion.py similarity index 100% rename from src/py123d/conversion/datasets/av2/utils/av2_map_conversion.py rename to src/py123d/conversion/datasets/av2/av2_map_conversion.py diff --git a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py index 172954a9..8fd3fd9c 100644 --- a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py @@ -6,6 +6,7 @@ from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter from py123d.conversion.dataset_converter_config import DatasetConverterConfig +from py123d.conversion.datasets.av2.av2_map_conversion import convert_av2_map from py123d.conversion.datasets.av2.utils.av2_constants import ( AV2_CAMERA_TYPE_MAPPING, AV2_SENSOR_SPLITS, @@ -18,7 +19,6 @@ find_closest_target_fpath, get_slice_with_timestamp_ns, ) -from py123d.conversion.datasets.av2.utils.av2_map_conversion import convert_av2_map from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.registry.lidar_index_registry import AVSensorLidarIndex diff --git a/src/py123d/conversion/datasets/kitti360/kitti360_map_conversion.py b/src/py123d/conversion/datasets/kitti360/kitti360_map_conversion.py index 08562314..847250eb 100644 --- a/src/py123d/conversion/datasets/kitti360/kitti360_map_conversion.py +++ b/src/py123d/conversion/datasets/kitti360/kitti360_map_conversion.py @@ -3,8 +3,7 @@ from pathlib import Path from typing import List -import geopandas as gpd -import pandas as pd +import numpy as np import shapely.geometry as geom from py123d.conversion.datasets.kitti360.utils.kitti360_helper import KITTI360_MAP_Bbox3D @@ -13,7 +12,9 @@ get_road_edge_linear_rings, split_line_geometry_by_max_length, ) +from py123d.conversion.utils.map_utils.road_edge.road_edge_3d_utils import lift_road_edges_to_3d from py123d.datatypes.maps.cache.cache_map_objects import ( + CacheCarpark, CacheGenericDrivable, CacheRoadEdge, CacheWalkway, @@ -34,68 +35,10 @@ "sidewalk", # "railtrack", # "ground", - # "driveway", + "driveway", ] -def _get_none_data() -> gpd.GeoDataFrame: - ids = [] - geometries = [] - data = pd.DataFrame({"id": ids}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def _extract_generic_drivable_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: - ids: List[int] = [] - outlines: List[geom.LineString] = [] - geometries: List[geom.Polygon] = [] - for obj in objs: - if obj.label != "road": - continue - ids.append(obj.id) - outlines.append(obj.vertices.linestring) - geometries.append(geom.Polygon(obj.vertices.array[:, :3])) - data = pd.DataFrame({"id": ids, "outline": outlines}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def _extract_walkway_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: - ids: List[int] = [] - outlines: List[geom.LineString] = [] - geometries: List[geom.Polygon] = [] - for obj in objs: - if obj.label != "sidewalk": - continue - ids.append(obj.id) - outlines.append(obj.vertices.linestring) - geometries.append(geom.Polygon(obj.vertices.array[:, :3])) - - data = pd.DataFrame({"id": ids, "outline": outlines}) - gdf = gpd.GeoDataFrame(data, geometry=geometries) - return gdf - - -def _extract_road_edge_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame: - geometries: List[geom.Polygon] = [] - for obj in objs: - if obj.label != "road": - continue - geometries.append(geom.Polygon(obj.vertices.array[:, :3])) - road_edge_linear_rings = get_road_edge_linear_rings(geometries) - road_edges = split_line_geometry_by_max_length(road_edge_linear_rings, MAX_ROAD_EDGE_LENGTH) - - ids = [] - road_edge_types = [] - for idx in range(len(road_edges)): - ids.append(idx) - road_edge_types.append(int(RoadEdgeType.ROAD_EDGE_BOUNDARY)) - - data = pd.DataFrame({"id": ids, "road_edge_type": road_edge_types}) - return gpd.GeoDataFrame(data, geometry=road_edges) - - def convert_kitti360_map_with_writer(log_name: str, map_writer: AbstractMapWriter) -> None: """ Convert KITTI-360 map data using the provided map writer. @@ -123,29 +66,51 @@ def convert_kitti360_map_with_writer(log_name: str, map_writer: AbstractMapWrite obj.parseBbox(child) objs.append(obj) - generic_drivable_gdf = _extract_generic_drivable_df(objs) - walkway_gdf = _extract_walkway_df(objs) - road_edge_gdf = _extract_road_edge_df(objs) - - for idx, row in generic_drivable_gdf.iterrows(): - if not row.geometry.is_empty: - map_writer.write_generic_drivable(CacheGenericDrivable(object_id=idx, geometry=row.geometry)) - - for idx, row in walkway_gdf.iterrows(): - if not row.geometry.is_empty: - map_writer.write_walkway(CacheWalkway(object_id=idx, geometry=row.geometry)) - - for idx, row in road_edge_gdf.iterrows(): - if not row.geometry.is_empty: - if hasattr(row.geometry, "exterior"): - road_edge_line = row.geometry.exterior - else: - road_edge_line = row.geometry - - map_writer.write_road_edge( - CacheRoadEdge( - object_id=idx, - road_edge_type=RoadEdgeType.ROAD_EDGE_BOUNDARY, - polyline=Polyline3D.from_linestring(road_edge_line), + # 1. Write roads, sidewalks, driveways, and collect road geometries + road_outlines_3d: List[Polyline3D] = [] + for obj in objs: + if obj.label == "road": + map_writer.write_generic_drivable( + CacheGenericDrivable( + object_id=obj.id, + outline=obj.vertices, + geometry=geom.Polygon(obj.vertices.array[:, :3]), ) ) + road_outline_array = np.concatenate([obj.vertices.array[:, :3], obj.vertices.array[0:, :3]]) + road_outlines_3d.append(Polyline3D.from_array(road_outline_array)) + elif obj.label == "sidewalk": + map_writer.write_walkway( + CacheWalkway( + object_id=obj.id, + outline=obj.vertices, + geometry=geom.Polygon(obj.vertices.array[:, :3]), + ) + ) + elif obj.label == "driveway": + map_writer.write_carpark( + CacheCarpark( + object_id=obj.id, + outline=obj.vertices, + geometry=geom.Polygon(obj.vertices.array[:, :3]), + ) + ) + + # 2. Use road geometries to create road edges + + # NOTE @DanielDauner: We merge all drivable areas in 2D and lift the outlines to 3D. + # Currently the method assumes that the drivable areas do not overlap and all road surfaces are included. + road_polygons_2d = [geom.Polygon(road_outline.array[:, :2]) for road_outline in road_outlines_3d] + road_edges_2d = get_road_edge_linear_rings(road_polygons_2d) + road_edges_3d = lift_road_edges_to_3d(road_edges_2d, road_outlines_3d) + road_edges_linestrings_3d = [polyline.linestring for polyline in road_edges_3d] + road_edges_linestrings_3d = split_line_geometry_by_max_length(road_edges_linestrings_3d, MAX_ROAD_EDGE_LENGTH) + + for idx in range(len(road_edges_linestrings_3d)): + map_writer.write_road_edge( + CacheRoadEdge( + object_id=idx, + road_edge_type=RoadEdgeType.ROAD_EDGE_BOUNDARY, + polyline=Polyline3D.from_linestring(road_edges_linestrings_3d[idx]), + ) + ) diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py index 49a81d19..dcefb187 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py @@ -6,7 +6,7 @@ from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter from py123d.conversion.dataset_converter_config import DatasetConverterConfig -from py123d.conversion.datasets.pandaset.pandaset_constants import ( +from py123d.conversion.datasets.pandaset.utils.pandaset_constants import ( PANDASET_BOX_DETECTION_FROM_STR, PANDASET_BOX_DETECTION_TO_DEFAULT, PANDASET_CAMERA_DISTORTIONS, @@ -16,7 +16,7 @@ PANDASET_LOG_NAMES, PANDASET_SPLITS, ) -from py123d.conversion.datasets.pandaset.pandaset_utlis import ( +from py123d.conversion.datasets.pandaset.utils.pandaset_utlis import ( main_lidar_to_rear_axle, pandaset_pose_dict_to_state_se3, read_json, diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py b/src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py index 30fff374..e07ff916 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py @@ -4,7 +4,7 @@ import numpy as np import pandas as pd -from py123d.conversion.datasets.pandaset.pandaset_utlis import ( +from py123d.conversion.datasets.pandaset.utils.pandaset_utlis import ( main_lidar_to_rear_axle, pandaset_pose_dict_to_state_se3, read_json, diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_constants.py b/src/py123d/conversion/datasets/pandaset/utils/pandaset_constants.py similarity index 100% rename from src/py123d/conversion/datasets/pandaset/pandaset_constants.py rename to src/py123d/conversion/datasets/pandaset/utils/pandaset_constants.py diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_utlis.py b/src/py123d/conversion/datasets/pandaset/utils/pandaset_utlis.py similarity index 100% rename from src/py123d/conversion/datasets/pandaset/pandaset_utlis.py rename to src/py123d/conversion/datasets/pandaset/utils/pandaset_utlis.py diff --git a/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml b/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml index ae1b1033..cf2e553a 100644 --- a/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml +++ b/src/py123d/script/config/common/scene_builder/default_scene_builder.yaml @@ -1,6 +1,5 @@ _target_: py123d.datatypes.scene.arrow.arrow_scene_builder.ArrowSceneBuilder _convert_: 'all' -# dataset_path: ${dataset_paths.py123d_data_root} logs_root: ${dataset_paths.py123d_logs_root} maps_root: ${dataset_paths.py123d_maps_root} diff --git a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml index 729b9587..5b06890e 100644 --- a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml @@ -1,5 +1,5 @@ kitti360_dataset: - _target_: py123d.conversion.datasets.kitti360.kitti360_data_converter.Kitti360Converter + _target_: py123d.conversion.datasets.kitti360.kitti360_converter.Kitti360Converter _convert_: 'all' splits: ["kitti360"] diff --git a/src/py123d/visualization/matplotlib/camera.py b/src/py123d/visualization/matplotlib/camera.py index 9126655d..39bf98a3 100644 --- a/src/py123d/visualization/matplotlib/camera.py +++ b/src/py123d/visualization/matplotlib/camera.py @@ -73,20 +73,9 @@ def add_box_detections_to_camera_ax( camera: PinholeCamera, box_detections: BoxDetectionWrapper, ego_state_se3: EgoStateSE3, + return_image: bool = False, ) -> plt.Axes: - # box_labels = annotations.names - # boxes = _transform_annotations_to_camera( - # annotations.boxes, - # camera.sensor2lidar_rotation, - # camera.sensor2lidar_translation, - # ) - # box_positions, box_dimensions, box_heading = ( - # boxes[:, BoundingBoxIndex.POSITION], - # boxes[:, BoundingBoxIndex.DIMENSION], - # boxes[:, BoundingBoxIndex.HEADING], - # ) - box_detection_array = np.zeros((len(box_detections.box_detections), len(BoundingBoxSE3Index)), dtype=np.float64) detection_types = np.array( [detection.metadata.box_detection_type for detection in box_detections.box_detections], dtype=object @@ -123,6 +112,10 @@ def add_box_detections_to_camera_ax( box_corners, detection_types = box_corners[valid_corners], detection_types[valid_corners] image = _plot_rect_3d_on_img(camera.image.copy(), box_corners, detection_types) + if return_image: + # ax.imshow(image) + return ax, image + ax.imshow(image) return ax diff --git a/src/py123d/visualization/matplotlib/plots.py b/src/py123d/visualization/matplotlib/plots.py index cbbdca61..01100f01 100644 --- a/src/py123d/visualization/matplotlib/plots.py +++ b/src/py123d/visualization/matplotlib/plots.py @@ -25,7 +25,8 @@ def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, ra point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d if map_api is not None: add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=route_lane_group_ids) - add_traffic_lights_to_ax(ax, traffic_light_detections, map_api) + if traffic_light_detections is not None: + add_traffic_lights_to_ax(ax, traffic_light_detections, map_api) add_box_detections_to_ax(ax, box_detections) add_ego_vehicle_to_ax(ax, ego_vehicle_state) diff --git a/src/py123d/visualization/viser/elements/render_elements.py b/src/py123d/visualization/viser/elements/render_elements.py index 6df316b2..f807033e 100644 --- a/src/py123d/visualization/viser/elements/render_elements.py +++ b/src/py123d/visualization/viser/elements/render_elements.py @@ -1,7 +1,10 @@ +import numpy as np + from py123d.conversion.utils.sensor_utils.camera_conventions import convert_camera_convention from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.geometry.geometry_index import StateSE3Index +from py123d.geometry.rotation import EulerAngles from py123d.geometry.se import StateSE3 from py123d.geometry.transform.transform_se3 import translate_se3_along_body_frame from py123d.geometry.vector import Vector3D @@ -16,10 +19,56 @@ def get_ego_3rd_person_view_position( ego_pose = scene.get_ego_state_at_iteration(iteration).rear_axle_se3.array ego_pose[StateSE3Index.XYZ] -= scene_center_array ego_pose_se3 = StateSE3.from_array(ego_pose) - ego_pose_se3 = translate_se3_along_body_frame(ego_pose_se3, Vector3D(-10.0, 0.0, 5.0)) + ego_pose_se3 = translate_se3_along_body_frame(ego_pose_se3, Vector3D(-15.0, 0.0, 15)) + + # adjust the pitch to -10 degrees. + # euler_angles_array = ego_pose_se3.euler_angles.array + # euler_angles_array[1] += np.deg2rad(30) + # new_quaternion = EulerAngles.from_array(euler_angles_array).quaternion + + ego_pose_se3 = _pitch_se3_by_degrees(ego_pose_se3, 30.0) return convert_camera_convention( ego_pose_se3, from_convention="pXpZmY", to_convention="pZmYpX", ) + + +def get_ego_bev_view_position( + scene: AbstractScene, + iteration: int, + initial_ego_state: EgoStateSE3, +) -> StateSE3: + scene_center_array = initial_ego_state.center.point_3d.array + ego_center = scene.get_ego_state_at_iteration(iteration).center.array + ego_center[StateSE3Index.XYZ] -= scene_center_array + ego_center_planar = StateSE3.from_array(ego_center) + + planar_euler_angles = EulerAngles(0.0, 0.0, ego_center_planar.euler_angles.yaw) + quaternion = planar_euler_angles.quaternion + ego_center_planar._array[StateSE3Index.QUATERNION] = quaternion.array + + ego_center_planar = translate_se3_along_body_frame(ego_center_planar, Vector3D(0.0, 0.0, 50)) + ego_center_planar = _pitch_se3_by_degrees(ego_center_planar, 90.0) + + return convert_camera_convention( + ego_center_planar, + from_convention="pXpZmY", + to_convention="pZmYpX", + ) + + +def _pitch_se3_by_degrees(state_se3: StateSE3, degrees: float) -> StateSE3: + + quaternion = EulerAngles(0.0, np.deg2rad(degrees), state_se3.yaw).quaternion + + return StateSE3( + x=state_se3.x, + y=state_se3.y, + z=state_se3.z, + qw=quaternion.qw, + qx=quaternion.qx, + qy=quaternion.qy, + qz=quaternion.qz, + ) diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index b2008def..89e6d108 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -1,3 +1,4 @@ +import io import logging import time from typing import Dict, List, Optional @@ -10,7 +11,6 @@ from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDARType from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.visualization.viser.elements import ( add_box_detections_to_viser_server, @@ -19,35 +19,15 @@ add_lidar_pc_to_viser_server, add_map_to_viser_server, ) -from py123d.visualization.viser.elements.render_elements import get_ego_3rd_person_view_position +from py123d.visualization.viser.elements.render_elements import ( + get_ego_3rd_person_view_position, + get_ego_bev_view_position, +) from py123d.visualization.viser.viser_config import ViserConfig logger = logging.getLogger(__name__) -all_camera_types: List[PinholeCameraType] = [ - PinholeCameraType.CAM_F0, - PinholeCameraType.CAM_B0, - PinholeCameraType.CAM_L0, - PinholeCameraType.CAM_L1, - PinholeCameraType.CAM_L2, - PinholeCameraType.CAM_R0, - PinholeCameraType.CAM_R1, - PinholeCameraType.CAM_R2, - PinholeCameraType.CAM_STEREO_L, - PinholeCameraType.CAM_STEREO_R, -] - -all_lidar_types: List[LiDARType] = [ - LiDARType.LIDAR_MERGED, - LiDARType.LIDAR_TOP, - LiDARType.LIDAR_FRONT, - LiDARType.LIDAR_SIDE_LEFT, - LiDARType.LIDAR_SIDE_RIGHT, - LiDARType.LIDAR_BACK, -] - - def _build_viser_server(viser_config: ViserConfig) -> viser.ViserServer: server = viser.ViserServer( host=viser_config.server_host, @@ -140,7 +120,12 @@ def set_scene(self, scene: AbstractScene) -> None: "FPS options", ("10", "25", "50", "75", "100") ) - button = self._viser_server.gui.add_button("Render Scene") + with self._viser_server.gui.add_folder("Render", expand_by_default=False): + render_format = self._viser_server.gui.add_dropdown("Format", ["gif", "mp4"], initial_value="mp4") + render_view = self._viser_server.gui.add_dropdown( + "View", ["3rd Person", "BEV", "Manual"], initial_value="3rd Person" + ) + button = self._viser_server.gui.add_button("Render Scene") # Frame step buttons. @gui_next_frame.on_click @@ -217,6 +202,7 @@ def _(_) -> None: @button.on_click def _(event: viser.GuiEvent) -> None: + nonlocal server_rendering client = event.client assert client is not None @@ -227,12 +213,24 @@ def _(event: viser.GuiEvent) -> None: for i in tqdm(range(scene.number_of_iterations)): gui_timestep.value = i - ego_view = get_ego_3rd_person_view_position(scene, i, initial_ego_state) - client.camera.position = ego_view.point_3d.array - client.camera.wxyz = ego_view.quaternion.array - images.append(client.get_render(height=720, width=1280)) - - client.send_file_download("image.mp4", iio.imwrite("", images, extension=".mp4", fps=30)) + if render_view.value == "BEV": + ego_view = get_ego_bev_view_position(scene, i, initial_ego_state) + client.camera.position = ego_view.point_3d.array + client.camera.wxyz = ego_view.quaternion.array + elif render_view.value == "3rd Person": + ego_view = get_ego_3rd_person_view_position(scene, i, initial_ego_state) + client.camera.position = ego_view.point_3d.array + client.camera.wxyz = ego_view.quaternion.array + images.append(client.get_render(height=1080, width=1920)) + format = render_format.value + buffer = io.BytesIO() + if format == "gif": + iio.imwrite(buffer, images, extension=".gif", loop=False) + elif format == "mp4": + iio.imwrite(buffer, images, extension=".mp4", fps=20) + content = buffer.getvalue() + scene_name = f"{scene.log_metadata.split}_{scene.uuid}" + client.send_file_download(f"{scene_name}.{format}", content, save_immediately=True) server_rendering = False camera_frustum_handles: Dict[PinholeCameraType, viser.CameraFrustumHandle] = {} @@ -284,6 +282,8 @@ def _(event: viser.GuiEvent) -> None: if gui_playing.value and not server_rendering: gui_timestep.value = (gui_timestep.value + 1) % num_frames + else: + time.sleep(0.1) self._viser_server.flush() self.next() diff --git a/test_viser.py b/test_viser.py index 6db46ac4..a2b83796 100644 --- a/test_viser.py +++ b/test_viser.py @@ -3,8 +3,6 @@ from py123d.datatypes.scene.scene_filter import SceneFilter from py123d.visualization.viser.viser_viewer import ViserViewer -# from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType - if __name__ == "__main__": splits = ["kitti360"] # splits = ["nuscenes-mini_val", "nuscenes-mini_train"] @@ -15,16 +13,19 @@ # splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] # log_names = ["2021.08.24.13.12.55_veh-45_00386_00472"] - log_names = ["2013_05_28_drive_0000_sync"] + # log_names = ["2013_05_28_drive_0000_sync"] + # log_names = ["2013_05_28_drive_0000_sync"] + log_names = None + # scene_uuids = ["60a37beb-6df4-5413-b753-9280125020cf"] scene_uuids = None scene_filter = SceneFilter( split_names=splits, log_names=log_names, scene_uuids=scene_uuids, - duration_s=10.0, + duration_s=None, history_s=0.0, - timestamp_threshold_s=30.0, + timestamp_threshold_s=None, shuffle=True, # camera_types=[PinholeCameraType.CAM_F0], ) From fbd431b3d1bb62ed4cb6e3e1e6be6975aa081dde Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 3 Nov 2025 14:10:23 +0100 Subject: [PATCH 134/145] Implement fisheye cameras as separate modality. General refactorings of sensors. Remove reliance of environment file paths for kitti 360. --- notebooks/bev_matplotlib.ipynb | 14 +- notebooks/bev_render.ipynb | 2 +- notebooks/camera_matplotlib.ipynb | 20 +- notebooks/camera_render.ipynb | 2 +- src/py123d/common/utils/enums.py | 14 + .../conversion/dataset_converter_config.py | 18 +- .../datasets/av2/av2_sensor_converter.py | 42 +- .../conversion/datasets/av2/av2_sensor_io.py | 2 +- .../datasets/av2/utils/av2_constants.py | 20 +- .../datasets/kitti360/kitti360_converter.py | 595 ++++++++++-------- .../datasets/kitti360/kitti360_sensor_io.py | 12 +- .../kitti360/utils/kitti360_helper.py | 21 +- .../kitti360/utils/preprocess_detection.py | 53 +- .../datasets/nuplan/nuplan_converter.py | 36 +- .../datasets/nuplan/nuplan_sensor_io.py | 6 +- .../datasets/nuplan/utils/nuplan_constants.py | 2 +- .../datasets/nuscenes/nuscenes_converter.py | 22 +- .../datasets/nuscenes/nuscenes_sensor_io.py | 10 +- .../nuscenes/utils/nuscenes_constants.py | 14 +- .../datasets/pandaset/pandaset_converter.py | 20 +- .../datasets/pandaset/pandaset_sensor_io.py | 8 +- .../pandaset/utils/pandaset_constants.py | 16 +- .../datasets/pandaset/utils/pandaset_utlis.py | 10 +- .../datasets/wopd/utils/wopd_constants.py | 14 +- .../waymo_map_utils/wopd_map_utils copy.py | 390 ------------ .../datasets/wopd/waymo_sensor_io.py | 4 +- .../datasets/wopd/wopd_converter.py | 16 +- .../log_writer/abstract_log_writer.py | 8 +- .../conversion/log_writer/arrow_log_writer.py | 124 +++- .../conversion/log_writer/utils/__init__.py | 0 .../registry/lidar_index_registry.py | 16 +- .../sensor_io/camera/jpeg_camera_io.py | 2 +- .../sensor_io/lidar/draco_lidar_io.py | 2 +- .../sensor_io/lidar/file_lidar_io.py | 2 +- .../sensor_io/lidar/laz_lidar_io.py | 2 +- src/py123d/datatypes/scene/abstract_scene.py | 28 +- .../datatypes/scene/arrow/arrow_scene.py | 29 +- .../scene/arrow/arrow_scene_builder.py | 4 +- .../scene/arrow/utils/arrow_getters.py | 14 +- src/py123d/datatypes/scene/scene_filter.py | 35 +- src/py123d/datatypes/scene/scene_metadata.py | 48 +- src/py123d/datatypes/sensors/__init__.py | 15 +- .../datatypes/sensors/camera/__init__.py | 0 src/py123d/datatypes/sensors/camera/utils.py | 42 -- .../{camera => }/fisheye_mei_camera.py | 5 +- .../datatypes/sensors/{lidar => }/lidar.py | 0 .../datatypes/sensors/lidar/__init__.py | 0 .../datatypes/sensors/lidar/lidar_index.py | 103 --- .../sensors/{camera => }/pinhole_camera.py | 25 +- .../datasets/av2_sensor_dataset.yaml | 14 +- .../conversion/datasets/carla_dataset.yaml | 35 -- .../conversion/datasets/kitti360_dataset.yaml | 46 +- .../conversion/datasets/nuplan_dataset.yaml | 10 +- .../datasets/nuplan_mini_dataset.yaml | 12 +- .../conversion/datasets/nuscenes_dataset.yaml | 17 +- .../datasets/nuscenes_mini_dataset.yaml | 19 +- .../conversion/datasets/pandaset_dataset.yaml | 21 +- .../conversion/datasets/wopd_dataset.yaml | 9 +- src/py123d/visualization/matplotlib/camera.py | 2 +- .../viser/elements/sensor_elements.py | 8 +- .../visualization/viser/viser_config.py | 26 +- .../visualization/viser/viser_viewer.py | 2 +- test_viser.py | 4 +- 63 files changed, 885 insertions(+), 1227 deletions(-) delete mode 100644 src/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py delete mode 100644 src/py123d/conversion/log_writer/utils/__init__.py delete mode 100644 src/py123d/datatypes/sensors/camera/__init__.py delete mode 100644 src/py123d/datatypes/sensors/camera/utils.py rename src/py123d/datatypes/sensors/{camera => }/fisheye_mei_camera.py (98%) rename src/py123d/datatypes/sensors/{lidar => }/lidar.py (100%) delete mode 100644 src/py123d/datatypes/sensors/lidar/__init__.py delete mode 100644 src/py123d/datatypes/sensors/lidar/lidar_index.py rename src/py123d/datatypes/sensors/{camera => }/pinhole_camera.py (96%) delete mode 100644 src/py123d/script/config/conversion/datasets/carla_dataset.yaml diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index 44acc015..94234bd5 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -12,7 +12,7 @@ "\n", "\n", "from py123d.common.multithreading.worker_sequential import Sequential\n", - "# from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType " + "# from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType " ] }, { @@ -282,9 +282,7 @@ "id": "4", "metadata": {}, "outputs": [], - "source": [ - "asd" - ] + "source": [] }, { "cell_type": "code", @@ -301,14 +299,6 @@ "metadata": {}, "outputs": [], "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/notebooks/bev_render.ipynb b/notebooks/bev_render.ipynb index c6eb260f..1bc41014 100644 --- a/notebooks/bev_render.ipynb +++ b/notebooks/bev_render.ipynb @@ -11,7 +11,7 @@ "from py123d.datatypes.scene.scene_filter import SceneFilter\n", "\n", "from py123d.common.multithreading.worker_sequential import Sequential\n", - "# from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType " + "# from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType " ] }, { diff --git a/notebooks/camera_matplotlib.ipynb b/notebooks/camera_matplotlib.ipynb index f9a0433a..b33cfdd8 100644 --- a/notebooks/camera_matplotlib.ipynb +++ b/notebooks/camera_matplotlib.ipynb @@ -11,7 +11,7 @@ "from py123d.datatypes.scene.scene_filter import SceneFilter\n", "\n", "from py123d.common.multithreading.worker_sequential import Sequential\n", - "from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType" + "from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType" ] }, { @@ -42,7 +42,7 @@ " history_s=0.0,\n", " timestamp_threshold_s=20,\n", " shuffle=True,\n", - " camera_types=[PinholeCameraType.CAM_F0],\n", + " pinhole_camera_types=[PinholeCameraType.PCAM_F0],\n", ")\n", "scene_builder = ArrowSceneBuilder()\n", "worker = Sequential()\n", @@ -67,24 +67,24 @@ "scene = scenes[0]\n", "\n", "scene: AbstractScene\n", - "print(scene.uuid, scene.available_camera_types)\n", + "print(scene.uuid, scene.available_pinhole_camera_types)\n", "\n", "scale = 3.0\n", "fig, ax = plt.subplots(2, 3, figsize=(scale * 6, scale * 2.5))\n", "\n", "\n", "camera_ax_mapping = {\n", - " PinholeCameraType.CAM_L0: ax[0, 0],\n", - " PinholeCameraType.CAM_F0: ax[0, 1],\n", - " PinholeCameraType.CAM_R0: ax[0, 2],\n", - " PinholeCameraType.CAM_L1: ax[1, 0],\n", - " PinholeCameraType.CAM_B0: ax[1, 1],\n", - " PinholeCameraType.CAM_R1: ax[1, 2],\n", + " PinholeCameraType.PCAM_L0: ax[0, 0],\n", + " PinholeCameraType.PCAM_F0: ax[0, 1],\n", + " PinholeCameraType.PCAM_R0: ax[0, 2],\n", + " PinholeCameraType.PCAM_L1: ax[1, 0],\n", + " PinholeCameraType.PCAM_B0: ax[1, 1],\n", + " PinholeCameraType.PCAM_R1: ax[1, 2],\n", "}\n", "\n", "\n", "for camera_type, ax_ in camera_ax_mapping.items():\n", - " camera = scene.get_camera_at_iteration(iteration, camera_type)\n", + " camera = scene.get_pinhole_camera_at_iteration(iteration, camera_type)\n", " box_detections = scene.get_box_detections_at_iteration(iteration)\n", " ego_state = scene.get_ego_state_at_iteration(iteration)\n", "\n", diff --git a/notebooks/camera_render.ipynb b/notebooks/camera_render.ipynb index 4cb5fd50..4365c424 100644 --- a/notebooks/camera_render.ipynb +++ b/notebooks/camera_render.ipynb @@ -11,7 +11,7 @@ "from py123d.datatypes.scene.scene_filter import SceneFilter\n", "\n", "from py123d.common.multithreading.worker_sequential import Sequential\n", - "from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType\n", + "from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType\n", "\n", "KITTI360_DATA_ROOT = \"/home/daniel/kitti_360/KITTI-360\"" ] diff --git a/src/py123d/common/utils/enums.py b/src/py123d/common/utils/enums.py index 33300c00..9f7d233e 100644 --- a/src/py123d/common/utils/enums.py +++ b/src/py123d/common/utils/enums.py @@ -2,6 +2,8 @@ from enum import IntEnum +from pyparsing import Union + class classproperty(object): def __init__(self, f): @@ -27,3 +29,15 @@ def deserialize(cls, key: str) -> SerialIntEnum: def from_int(cls, value: int) -> SerialIntEnum: """Get the enum from an int.""" return cls(value) + + @classmethod + def from_arbitrary(cls, value: Union[int, str, SerialIntEnum]) -> SerialIntEnum: + """Get the enum from an int, string, or enum instance.""" + if isinstance(value, cls): + return value + elif isinstance(value, int): + return cls.from_int(value) + elif isinstance(value, str): + return cls.deserialize(value) + else: + raise ValueError(f"Invalid value for {cls.__name__}: {value}") diff --git a/src/py123d/conversion/dataset_converter_config.py b/src/py123d/conversion/dataset_converter_config.py index 6539e3e4..d4924b01 100644 --- a/src/py123d/conversion/dataset_converter_config.py +++ b/src/py123d/conversion/dataset_converter_config.py @@ -23,9 +23,13 @@ class DatasetConverterConfig: # Traffic Lights include_traffic_lights: bool = False - # Cameras - include_cameras: bool = False - camera_store_option: Literal["path", "binary", "mp4"] = "path" + # Pinhole Cameras + include_pinhole_cameras: bool = False + pinhole_camera_store_option: Literal["path", "binary", "mp4"] = "path" + + # Fisheye MEI Cameras + include_fisheye_mei_cameras: bool = False + fisheye_mei_camera_store_option: Literal["path", "binary", "mp4"] = "path" # LiDARs include_lidars: bool = False @@ -37,11 +41,13 @@ class DatasetConverterConfig: include_route: bool = False def __post_init__(self): - assert self.camera_store_option != "mp4", "MP4 format is not yet supported, but planned for future releases." - assert self.camera_store_option in [ + assert ( + self.pinhole_camera_store_option != "mp4" + ), "MP4 format is not yet supported, but planned for future releases." + assert self.pinhole_camera_store_option in [ "path", "binary", - ], f"Invalid camera store option, got {self.camera_store_option}." + ], f"Invalid camera store option, got {self.pinhole_camera_store_option}." assert self.lidar_store_option in [ "path", diff --git a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py index 8fd3fd9c..9891e10c 100644 --- a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py @@ -21,18 +21,18 @@ ) from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.conversion.registry.lidar_index_registry import AVSensorLidarIndex +from py123d.conversion.registry.lidar_index_registry import AVSensorLiDARIndex from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import ( +from py123d.datatypes.sensors.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.sensors.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, ) -from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import ( @@ -118,7 +118,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=map_metadata.location, timestep_seconds=0.1, vehicle_parameters=get_av2_ford_fusion_hybrid_parameters(), - camera_metadata=_get_av2_camera_metadata(source_log_path, self.dataset_converter_config), + pinhole_camera_metadata=_get_av2_pinhole_camera_metadata(source_log_path, self.dataset_converter_config), lidar_metadata=_get_av2_lidar_metadata(source_log_path, self.dataset_converter_config), map_metadata=map_metadata, ) @@ -151,7 +151,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: timestamp=TimePoint.from_ns(int(lidar_timestamp_ns)), ego_state=ego_state, box_detections=_extract_av2_sensor_box_detections(annotations_df, lidar_timestamp_ns, ego_state), - cameras=_extract_av2_sensor_camera( + pinhole_cameras=_extract_av2_sensor_pinhole_cameras( lidar_timestamp_ns, egovehicle_se3_sensor_df, synchronization_df, @@ -185,27 +185,25 @@ def _get_av2_sensor_map_metadata(split: str, source_log_path: Path) -> MapMetada ) -def _get_av2_camera_metadata( +def _get_av2_pinhole_camera_metadata( source_log_path: Path, dataset_converter_config: DatasetConverterConfig ) -> Dict[PinholeCameraType, PinholeCameraMetadata]: - camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} - - if dataset_converter_config.include_cameras: + pinhole_camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} + if dataset_converter_config.include_pinhole_cameras: intrinsics_file = source_log_path / "calibration" / "intrinsics.feather" intrinsics_df = pd.read_feather(intrinsics_file) for _, row in intrinsics_df.iterrows(): row = row.to_dict() camera_type = AV2_CAMERA_TYPE_MAPPING[row["sensor_name"]] - camera_metadata[camera_type] = PinholeCameraMetadata( + pinhole_camera_metadata[camera_type] = PinholeCameraMetadata( camera_type=camera_type, width=row["width_px"], height=row["height_px"], intrinsics=PinholeIntrinsics(fx=row["fx_px"], fy=row["fy_px"], cx=row["cx_px"], cy=row["cy_px"]), distortion=PinholeDistortion(k1=row["k1"], k2=row["k2"], p1=0.0, p2=0.0, k3=row["k3"]), ) - - return camera_metadata + return pinhole_camera_metadata def _get_av2_lidar_metadata( @@ -226,7 +224,7 @@ def _get_av2_lidar_metadata( # top lidar: metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( lidar_type=LiDARType.LIDAR_TOP, - lidar_index=AVSensorLidarIndex, + lidar_index=AVSensorLiDARIndex, extrinsic=_row_dict_to_state_se3( calibration_df[calibration_df["sensor_name"] == "up_lidar"].iloc[0].to_dict() ), @@ -234,7 +232,7 @@ def _get_av2_lidar_metadata( # down lidar: metadata[LiDARType.LIDAR_DOWN] = LiDARMetadata( lidar_type=LiDARType.LIDAR_DOWN, - lidar_index=AVSensorLidarIndex, + lidar_index=AVSensorLiDARIndex, extrinsic=_row_dict_to_state_se3( calibration_df[calibration_df["sensor_name"] == "down_lidar"].iloc[0].to_dict() ), @@ -321,7 +319,7 @@ def _extract_av2_sensor_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_ti ) -def _extract_av2_sensor_camera( +def _extract_av2_sensor_pinhole_cameras( lidar_timestamp_ns: int, egovehicle_se3_sensor_df: pd.DataFrame, synchronization_df: pd.DataFrame, @@ -333,7 +331,7 @@ def _extract_av2_sensor_camera( split = source_log_path.parent.name log_id = source_log_path.name - if dataset_converter_config.include_cameras: + if dataset_converter_config.include_pinhole_cameras: av2_sensor_data_root = source_log_path.parent.parent for _, row in egovehicle_se3_sensor_df.iterrows(): @@ -341,15 +339,15 @@ def _extract_av2_sensor_camera( if row["sensor_name"] not in AV2_CAMERA_TYPE_MAPPING: continue - camera_name = row["sensor_name"] - camera_type = AV2_CAMERA_TYPE_MAPPING[camera_name] + pinhole_camera_name = row["sensor_name"] + pinhole_camera_type = AV2_CAMERA_TYPE_MAPPING[pinhole_camera_name] relative_image_path = find_closest_target_fpath( split=split, log_id=log_id, src_sensor_name="lidar", src_timestamp_ns=lidar_timestamp_ns, - target_sensor_name=camera_name, + target_sensor_name=pinhole_camera_name, synchronization_df=synchronization_df, ) if relative_image_path is not None: @@ -359,12 +357,12 @@ def _extract_av2_sensor_camera( # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. camera_extrinsic = _row_dict_to_state_se3(row) camera_data = None - if dataset_converter_config.camera_store_option == "path": + if dataset_converter_config.pinhole_camera_store_option == "path": camera_data = str(relative_image_path) - elif dataset_converter_config.camera_store_option == "binary": + elif dataset_converter_config.pinhole_camera_store_option == "binary": with open(absolute_image_path, "rb") as f: camera_data = f.read() - camera_dict[camera_type] = camera_data, camera_extrinsic + camera_dict[pinhole_camera_type] = camera_data, camera_extrinsic return camera_dict diff --git a/src/py123d/conversion/datasets/av2/av2_sensor_io.py b/src/py123d/conversion/datasets/av2/av2_sensor_io.py index a17e4892..81a3de3a 100644 --- a/src/py123d/conversion/datasets/av2/av2_sensor_io.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_io.py @@ -4,7 +4,7 @@ import numpy as np import pandas as pd -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar import LiDARType def load_av2_sensor_lidar_pcs_from_file(feather_path: Union[Path, str]) -> Dict[LiDARType, np.ndarray]: diff --git a/src/py123d/conversion/datasets/av2/utils/av2_constants.py b/src/py123d/conversion/datasets/av2/utils/av2_constants.py index 7f81f48c..5ac7af9d 100644 --- a/src/py123d/conversion/datasets/av2/utils/av2_constants.py +++ b/src/py123d/conversion/datasets/av2/utils/av2_constants.py @@ -3,7 +3,7 @@ from py123d.common.utils.enums import SerialIntEnum from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.maps.map_datatypes import RoadLineType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType AV2_SENSOR_SPLITS: Set[str] = {"av2-sensor_train", "av2-sensor_val", "av2-sensor_test"} @@ -80,15 +80,15 @@ class AV2SensorBoxDetectionType(SerialIntEnum): AV2_CAMERA_TYPE_MAPPING: Dict[str, PinholeCameraType] = { - "ring_front_center": PinholeCameraType.CAM_F0, - "ring_front_left": PinholeCameraType.CAM_L0, - "ring_front_right": PinholeCameraType.CAM_R0, - "ring_side_left": PinholeCameraType.CAM_L1, - "ring_side_right": PinholeCameraType.CAM_R1, - "ring_rear_left": PinholeCameraType.CAM_L2, - "ring_rear_right": PinholeCameraType.CAM_R2, - "stereo_front_left": PinholeCameraType.CAM_STEREO_L, - "stereo_front_right": PinholeCameraType.CAM_STEREO_R, + "ring_front_center": PinholeCameraType.PCAM_F0, + "ring_front_left": PinholeCameraType.PCAM_L0, + "ring_front_right": PinholeCameraType.PCAM_R0, + "ring_side_left": PinholeCameraType.PCAM_L1, + "ring_side_right": PinholeCameraType.PCAM_R1, + "ring_rear_left": PinholeCameraType.PCAM_L2, + "ring_rear_right": PinholeCameraType.PCAM_R2, + "stereo_front_left": PinholeCameraType.PCAM_STEREO_L, + "stereo_front_right": PinholeCameraType.PCAM_STEREO_R, } # AV2_LIDAR_TYPES: Dict[str, str] = { diff --git a/src/py123d/conversion/datasets/kitti360/kitti360_converter.py b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py index 2bda2124..d525ab3a 100644 --- a/src/py123d/conversion/datasets/kitti360/kitti360_converter.py +++ b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py @@ -1,6 +1,5 @@ import datetime import logging -import os import pickle import re import xml.etree.ElementTree as ET @@ -17,7 +16,7 @@ from py123d.conversion.datasets.kitti360.utils.kitti360_helper import ( KITTI3602NUPLAN_IMU_CALIBRATION, KITTI360Bbox3D, - get_lidar_extrinsic, + get_kitti360_lidar_extrinsic, ) from py123d.conversion.datasets.kitti360.utils.kitti360_labels import ( BBOX_LABLES_TO_DETECTION_NAME_DICT, @@ -27,7 +26,7 @@ from py123d.conversion.datasets.kitti360.utils.preprocess_detection import process_detection from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.conversion.registry.lidar_index_registry import Kitti360LidarIndex +from py123d.conversion.registry.lidar_index_registry import Kitti360LiDARIndex from py123d.datatypes.detections.box_detections import ( BoxDetectionMetadata, BoxDetectionSE3, @@ -35,19 +34,19 @@ ) from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.fisheye_mei_camera import ( +from py123d.datatypes.sensors.fisheye_mei_camera import ( FisheyeMEICameraMetadata, FisheyeMEICameraType, FisheyeMEIDistortion, FisheyeMEIProjection, ) -from py123d.datatypes.sensors.camera.pinhole_camera import ( +from py123d.datatypes.sensors.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.sensors.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, ) -from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import ( @@ -59,15 +58,32 @@ KITTI360_DT: Final[float] = 0.1 -KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) +KITTI360_PINHOLE_CAMERA_TYPES = { + PinholeCameraType.PCAM_STEREO_L: "image_00", + PinholeCameraType.PCAM_STEREO_R: "image_01", +} -KITTI360_CAMERA_TYPES = { - PinholeCameraType.CAM_STEREO_L: "image_00", - PinholeCameraType.CAM_STEREO_R: "image_01", - FisheyeMEICameraType.CAM_L: "image_02", - FisheyeMEICameraType.CAM_R: "image_03", +KITTI360_FISHEYE_MEI_CAMERA_TYPES = { + FisheyeMEICameraType.FCAM_L: "image_02", + FisheyeMEICameraType.FCAM_R: "image_03", } +KITTI360_SPLITS: List[str] = ["kitti360_train", "kitti360_val", "kitti360_test"] +KITTI360_ALL_SEQUENCES: Final[List[str]] = [ + "2013_05_28_drive_0000_sync", + "2013_05_28_drive_0002_sync", + "2013_05_28_drive_0003_sync", + "2013_05_28_drive_0004_sync", + "2013_05_28_drive_0005_sync", + "2013_05_28_drive_0006_sync", + "2013_05_28_drive_0007_sync", + "2013_05_28_drive_0008_sync", + "2013_05_28_drive_0009_sync", + "2013_05_28_drive_0010_sync", + "2013_05_28_drive_0018_sync", +] + +DIR_ROOT = "root" DIR_2D_RAW = "data_2d_raw" DIR_2D_SMT = "data_2d_semantics" DIR_3D_RAW = "data_3d_raw" @@ -76,50 +92,27 @@ DIR_POSES = "data_poses" DIR_CALIB = "calibration" -PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW -PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT -PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW -PATH_3D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_SMT -PATH_3D_BBOX_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_BBOX -PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES -PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB - -KITTI360_REQUIRED_MODALITY_ROOTS: Dict[str, Path] = { - DIR_2D_RAW: PATH_2D_RAW_ROOT, - DIR_3D_RAW: PATH_3D_RAW_ROOT, - DIR_POSES: PATH_POSES_ROOT, - DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train", -} - -KITTI360_ALL_SEQUENCES: Final[List[str]] = [ - "2013_05_28_drive_0000_sync", - "2013_05_28_drive_0002_sync", - "2013_05_28_drive_0003_sync", - # "2013_05_28_drive_0004_sync", - # "2013_05_28_drive_0005_sync", - # "2013_05_28_drive_0006_sync", - # "2013_05_28_drive_0007_sync", - # "2013_05_28_drive_0008_sync", - # "2013_05_28_drive_0009_sync", - # "2013_05_28_drive_0010_sync", - # "2013_05_28_drive_0018_sync", -] - -# Create a temporary directory for detection preprocessing -# PREPROCESS_DETECTION_DIR = Path(tempfile.mkdtemp(prefix="kitti360_detection_")) -PREPROCESS_DETECTION_DIR = Path("/home/daniel/kitti360_detection_temp") +def _get_kitti360_paths_from_root(kitti_data_root: Path) -> Dict[str, Path]: + return { + DIR_ROOT: kitti_data_root, + DIR_2D_RAW: kitti_data_root / DIR_2D_RAW, + DIR_2D_SMT: kitti_data_root / DIR_2D_SMT, + DIR_3D_RAW: kitti_data_root / DIR_3D_RAW, + DIR_3D_SMT: kitti_data_root / DIR_3D_SMT, + DIR_3D_BBOX: kitti_data_root / DIR_3D_BBOX, + DIR_POSES: kitti_data_root / DIR_POSES, + DIR_CALIB: kitti_data_root / DIR_CALIB, + } -def get_kitti360_map_metadata(split: str, log_name: str) -> MapMetadata: - return MapMetadata( - dataset="kitti360", - split=split, - log_name=log_name, - location=log_name, - map_has_z=True, - map_is_local=True, - ) +def _get_kitti360_required_modality_roots(kitti360_folders: Dict[str, Path]) -> Dict[str, Path]: + return { + DIR_2D_RAW: kitti360_folders[DIR_2D_RAW], + DIR_3D_RAW: kitti360_folders[DIR_3D_RAW], + DIR_POSES: kitti360_folders[DIR_POSES], + DIR_3D_BBOX: kitti360_folders[DIR_3D_BBOX] / "train", + } class Kitti360Converter(AbstractDatasetConverter): @@ -127,40 +120,41 @@ def __init__( self, splits: List[str], kitti360_data_root: Union[Path, str], + detection_cache_root: Union[Path, str], + detection_radius: float, dataset_converter_config: DatasetConverterConfig, - kitti36_sequences: List[str] = KITTI360_ALL_SEQUENCES, + train_sequences: List[str], + val_sequences: List[str], + test_sequences: List[str], ) -> None: super().__init__(dataset_converter_config) for split in splits: - assert ( - split in self.get_available_splits() - ), f"Split {split} is not available. Available splits: {self.available_splits}" + assert split in KITTI360_SPLITS, f"Split {split} is not available. Available splits: {KITTI360_SPLITS}" self._splits: List[str] = splits - self._log_path: Path = Path(kitti360_data_root) - self._kitti36_sequences: List[str] = kitti36_sequences - self._log_paths_and_split: List[Tuple[Path, str]] = self._collect_log_paths() + self._kitti360_data_root: Path = Path(kitti360_data_root) + self._kitti360_folders: Dict[str, Path] = _get_kitti360_paths_from_root(self._kitti360_data_root) - self._total_maps = len(self._log_paths_and_split) # Each log has its own map - self._total_logs = len(self._log_paths_and_split) + # NOTE: We preprocess detections into cache directory to speed up repeated conversions + # The bounding boxes are preprocessed into a per-frame format based on the ego distance and + # visibility based on the lidar point cloud. + self._detection_cache_root: Path = Path(detection_cache_root) + self._detection_radius: float = detection_radius - def _collect_log_paths(self) -> List[Tuple[Path, str]]: - """ - Collect candidate sequence folders under data_2d_raw that end with '_sync', - and keep only those sequences that are present in ALL required modality roots - (e.g., data_2d_semantics, data_3d_raw, etc.). - Returns a list of (log_path, split) tuples. - """ - missing_roots = [str(p) for p in KITTI360_REQUIRED_MODALITY_ROOTS.values() if not p.exists()] - if missing_roots: - raise FileNotFoundError(f"KITTI-360 required roots missing: {missing_roots}") + self._train_sequences: List[str] = train_sequences + self._val_sequences: List[str] = val_sequences + self._test_sequences: List[str] = test_sequences - # Enumerate candidate sequences from data_2d_raw - candidates = sorted( - p - for p in PATH_2D_RAW_ROOT.iterdir() - if p.is_dir() and p.name.endswith("_sync") and p.name in self._kitti36_sequences - ) + self._log_names_and_split: List[Tuple[str, str]] = self._collect_valid_logs() + self._total_maps = len(self._log_names_and_split) # Each log has its own map + self._total_logs = len(self._log_names_and_split) + + def _collect_valid_logs(self) -> List[Tuple[str, str]]: + """Helper function to collect valid KITTI sequences ("logs") from the dataset root + + :raises FileNotFoundError: If required modality roots are missing + :return: A list of tuples containing the log name and split name + """ def _has_modality(seq_name: str, modality_name: str, root: Path) -> bool: if modality_name == DIR_3D_BBOX: @@ -170,29 +164,45 @@ def _has_modality(seq_name: str, modality_name: str, root: Path) -> bool: else: return (root / seq_name).exists() + required_modality_roots = _get_kitti360_required_modality_roots(self._kitti360_folders) + missing_roots = [str(p) for p in required_modality_roots.values() if not p.exists()] + if missing_roots: + raise FileNotFoundError(f"KITTI-360 required roots missing: {missing_roots}") + + # Find all sequences in the 2D raw data directory, and add to split + split_sequence_candidates: Dict[str, List[str]] = defaultdict(list) + for sequence_path in required_modality_roots[DIR_2D_RAW].iterdir(): + if sequence_path.is_dir() and sequence_path.name.endswith("_sync"): + seq_name = sequence_path.name + if seq_name in self._train_sequences: + split_sequence_candidates["kitti360_train"].append(seq_name) + elif seq_name in self._val_sequences: + split_sequence_candidates["kitti360_val"].append(seq_name) + elif seq_name in self._test_sequences: + split_sequence_candidates["kitti360_test"].append(seq_name) + + # Iterate all candidates, check that modalities available, and add to flat list log_paths_and_split: List[Tuple[Path, str]] = [] - for seq_dir in candidates: - seq_name = seq_dir.name - missing_modalities = [ - modality_name - for modality_name, root in KITTI360_REQUIRED_MODALITY_ROOTS.items() - if not _has_modality(seq_name, modality_name, root) - ] - if not missing_modalities: - log_paths_and_split.append((seq_dir, "kitti360")) - else: - logging.info( - f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. " - f"Root: {KITTI360_DATA_ROOT}" - ) + for split, sequence_names in split_sequence_candidates.items(): + if split not in self._splits: + continue + for sequence_name in sequence_names: + missing_modalities = [ + modality_name + for modality_name, root in required_modality_roots.items() + if not _has_modality(sequence_name, modality_name, root) + ] + if len(missing_modalities) == 0: + log_paths_and_split.append((sequence_name, split)) + else: + logging.info( + f"Sequence '{sequence_name}' skipped: missing modalities {missing_modalities}. " + f"Root: {self._kitti360_data_root}" + ) logging.info(f"Valid sequences found: {len(log_paths_and_split)}") return log_paths_and_split - def get_available_splits(self) -> List[str]: - """Returns a list of available raw data types.""" - return ["kitti360"] - def get_number_of_maps(self) -> int: """Returns the number of available raw data maps for conversion.""" return self._total_maps @@ -207,15 +217,11 @@ def convert_map(self, map_index: int, map_writer: AbstractMapWriter) -> None: :param map_index: The index of the map to convert. :param map_writer: The map writer to use for writing the converted map. """ - source_log_path, split = self._log_paths_and_split[map_index] - log_name = source_log_path.stem - - map_metadata = get_kitti360_map_metadata(split, log_name) - + log_name, split = self._log_names_and_split[map_index] + map_metadata = _get_kitti360_map_metadata(split, log_name) map_needs_writing = map_writer.reset(self.dataset_converter_config, map_metadata) if map_needs_writing: convert_kitti360_map_with_writer(log_name, map_writer) - map_writer.close() def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: @@ -224,8 +230,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: :param log_index: The index of the log to convert. :param log_writer: The log writer to use for writing the converted log. """ - source_log_path, split = self._log_paths_and_split[log_index] - log_name = source_log_path.stem + log_name, split = self._log_names_and_split[log_index] # Create log metadata log_metadata = LogMetadata( @@ -235,60 +240,131 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=log_name, timestep_seconds=KITTI360_DT, vehicle_parameters=get_kitti360_vw_passat_parameters(), - camera_metadata=get_kitti360_camera_metadata(), - lidar_metadata=get_kitti360_lidar_metadata(), - map_metadata=get_kitti360_map_metadata(split, log_name), + pinhole_camera_metadata=_get_kitti360_pinhole_camera_metadata( + self._kitti360_folders, + self.dataset_converter_config, + ), + fisheye_mei_camera_metadata=_get_kitti360_fisheye_mei_camera_metadata( + self._kitti360_folders, + self.dataset_converter_config, + ), + lidar_metadata=_get_kitti360_lidar_metadata( + self._kitti360_folders, + self.dataset_converter_config, + ), + map_metadata=_get_kitti360_map_metadata(split, log_name), ) log_needs_writing = log_writer.reset(self.dataset_converter_config, log_metadata) + if log_needs_writing: - _write_recording_table(log_name, log_writer, self.dataset_converter_config) + ts_list: List[TimePoint] = _read_timestamps(log_name, self._kitti360_folders) + ego_state_all, valid_timestamp = _extract_ego_state_all(log_name, self._kitti360_folders) + ego_states_xyz = np.array([ego_state.center.array[:3] for ego_state in ego_state_all], dtype=np.float64) + box_detection_wrapper_all = _extract_kitti360_box_detections_all( + log_name, + len(ts_list), + ego_states_xyz, + valid_timestamp, + self._kitti360_folders, + self._detection_cache_root, + self._detection_radius, + ) + camera_calibration = _load_kitti_360_calibration(self._kitti360_data_root) + logging.info(f"Number of valid timestamps with ego states: {len(valid_timestamp)}") + + for idx in range(len(valid_timestamp)): + valid_idx = valid_timestamp[idx] + + pinhole_cameras = _extract_kitti360_pinhole_cameras( + log_name, + valid_idx, + camera_calibration, + self._kitti360_folders, + self.dataset_converter_config, + ) + fisheye_cameras = _extract_kitti360_fisheye_mei_cameras( + log_name, + valid_idx, + camera_calibration, + self._kitti360_folders, + self.dataset_converter_config, + ) + lidars = _extract_kitti360_lidar( + log_name, + valid_idx, + self._kitti360_folders, + self.dataset_converter_config, + ) + + log_writer.write( + timestamp=ts_list[valid_idx], + ego_state=ego_state_all[idx], + box_detections=box_detection_wrapper_all[valid_idx], + traffic_lights=None, + pinhole_cameras=pinhole_cameras, + fisheye_mei_cameras=fisheye_cameras, + lidars=lidars, + scenario_tags=None, + route_lane_group_ids=None, + ) log_writer.close() -def get_kitti360_camera_metadata() -> ( - Dict[Union[PinholeCameraType, FisheyeMEICameraType], Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] -): - - persp = PATH_CALIB_ROOT / "perspective.txt" - - assert persp.exists() - persp_result = {"image_00": {}, "image_01": {}} - - with open(persp, "r") as f: - lines = [ln.strip() for ln in f if ln.strip()] - for ln in lines: - key, value = ln.split(" ", 1) - cam_id = key.split("_")[-1][:2] - if key.startswith("P_rect_"): - persp_result[f"image_{cam_id}"]["intrinsic"] = _read_projection_matrix(ln) - elif key.startswith("S_rect_"): - persp_result[f"image_{cam_id}"]["wh"] = [int(round(float(x))) for x in value.split()] - elif key.startswith("D_"): - persp_result[f"image_{cam_id}"]["distortion"] = [float(x) for x in value.split()] - - fisheye_camera02_path = PATH_CALIB_ROOT / "image_02.yaml" - fisheye_camera03_path = PATH_CALIB_ROOT / "image_03.yaml" - assert fisheye_camera02_path.exists() and fisheye_camera03_path.exists() - fisheye02 = _readYAMLFile(fisheye_camera02_path) - fisheye03 = _readYAMLFile(fisheye_camera03_path) - fisheye_result = {"image_02": fisheye02, "image_03": fisheye03} - - log_cam_infos: Dict[ - Union[PinholeCameraType, FisheyeMEICameraType], Union[PinholeCameraMetadata, FisheyeMEICameraMetadata] - ] = {} - for cam_type, cam_name in KITTI360_CAMERA_TYPES.items(): - if cam_name in ["image_00", "image_01"]: - log_cam_infos[cam_type] = PinholeCameraMetadata( - camera_type=cam_type, - width=persp_result[cam_name]["wh"][0], - height=persp_result[cam_name]["wh"][1], - intrinsics=PinholeIntrinsics.from_camera_matrix(np.array(persp_result[cam_name]["intrinsic"])), - distortion=PinholeDistortion.from_array(np.array(persp_result[cam_name]["distortion"])), +def _get_kitti360_pinhole_camera_metadata( + kitti360_folders: Dict[str, Path], + dataset_converter_config: DatasetConverterConfig, +) -> Dict[PinholeCameraType, PinholeCameraMetadata]: + + pinhole_cam_metadatas: Dict[PinholeCameraType, PinholeCameraMetadata] = {} + if dataset_converter_config.include_pinhole_cameras: + persp = kitti360_folders[DIR_CALIB] / "perspective.txt" + assert persp.exists() + persp_result = {"image_00": {}, "image_01": {}} + + with open(persp, "r") as f: + lines = [ln.strip() for ln in f if ln.strip()] + for ln in lines: + key, value = ln.split(" ", 1) + cam_id = key.split("_")[-1][:2] + if key.startswith("P_rect_"): + persp_result[f"image_{cam_id}"]["intrinsic"] = _read_projection_matrix(ln) + elif key.startswith("S_rect_"): + persp_result[f"image_{cam_id}"]["wh"] = [int(round(float(x))) for x in value.split()] + elif key.startswith("D_"): + persp_result[f"image_{cam_id}"]["distortion"] = [float(x) for x in value.split()] + + for pcam_type, pcam_name in KITTI360_PINHOLE_CAMERA_TYPES.items(): + pinhole_cam_metadatas[pcam_type] = PinholeCameraMetadata( + camera_type=pcam_type, + width=persp_result[pcam_name]["wh"][0], + height=persp_result[pcam_name]["wh"][1], + intrinsics=PinholeIntrinsics.from_camera_matrix(np.array(persp_result[pcam_name]["intrinsic"])), + distortion=PinholeDistortion.from_array(np.array(persp_result[pcam_name]["distortion"])), ) - elif cam_name in ["image_02", "image_03"]: - distortion_params = fisheye_result[cam_name]["distortion_parameters"] + + return pinhole_cam_metadatas + + +def _get_kitti360_fisheye_mei_camera_metadata( + kitti360_folders: Dict[str, Path], + dataset_converter_config: DatasetConverterConfig, +) -> Dict[FisheyeMEICameraType, FisheyeMEICameraMetadata]: + fisheye_cam_metadatas: Dict[FisheyeMEICameraType, FisheyeMEICameraMetadata] = {} + if dataset_converter_config.include_fisheye_mei_cameras: + + fisheye_camera02_path = kitti360_folders[DIR_CALIB] / "image_02.yaml" + fisheye_camera03_path = kitti360_folders[DIR_CALIB] / "image_03.yaml" + + assert fisheye_camera02_path.exists() and fisheye_camera03_path.exists() + fisheye02 = _readYAMLFile(fisheye_camera02_path) + fisheye03 = _readYAMLFile(fisheye_camera03_path) + fisheye_result = {"image_02": fisheye02, "image_03": fisheye03} + + for fcam_type, fcam_name in KITTI360_FISHEYE_MEI_CAMERA_TYPES.items(): + + distortion_params = fisheye_result[fcam_name]["distortion_parameters"] distortion = FisheyeMEIDistortion( k1=distortion_params["k1"], k2=distortion_params["k2"], @@ -296,7 +372,7 @@ def get_kitti360_camera_metadata() -> ( p2=distortion_params["p2"], ) - projection_params = fisheye_result[cam_name]["projection_parameters"] + projection_params = fisheye_result[fcam_name]["projection_parameters"] projection = FisheyeMEIProjection( gamma1=projection_params["gamma1"], gamma2=projection_params["gamma2"], @@ -304,16 +380,27 @@ def get_kitti360_camera_metadata() -> ( v0=projection_params["v0"], ) - log_cam_infos[cam_type] = FisheyeMEICameraMetadata( - camera_type=cam_type, - width=fisheye_result[cam_name]["image_width"], - height=fisheye_result[cam_name]["image_height"], - mirror_parameter=fisheye_result[cam_name]["mirror_parameters"], + fisheye_cam_metadatas[fcam_type] = FisheyeMEICameraMetadata( + camera_type=fcam_type, + width=fisheye_result[fcam_name]["image_width"], + height=fisheye_result[fcam_name]["image_height"], + mirror_parameter=fisheye_result[fcam_name]["mirror_parameters"], distortion=distortion, projection=projection, ) - return log_cam_infos + return fisheye_cam_metadatas + + +def _get_kitti360_map_metadata(split: str, log_name: str) -> MapMetadata: + return MapMetadata( + dataset="kitti360", + split=split, + log_name=log_name, + location=log_name, + map_has_z=True, + map_is_local=True, + ) def _read_projection_matrix(p_line: str) -> np.ndarray: @@ -340,55 +427,31 @@ def _readYAMLFile(fileName: Path) -> Dict[str, Any]: return ret -def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]: +def _get_kitti360_lidar_metadata( + kitti360_folders: Dict[str, Path], + dataset_converter_config: DatasetConverterConfig, +) -> Dict[LiDARType, LiDARMetadata]: metadata: Dict[LiDARType, LiDARMetadata] = {} - extrinsic = get_lidar_extrinsic() - extrinsic_state_se3 = StateSE3.from_transformation_matrix(extrinsic) - extrinsic_state_se3 = _extrinsic_from_imu_to_rear_axle(extrinsic_state_se3) - metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( - lidar_type=LiDARType.LIDAR_TOP, - lidar_index=Kitti360LidarIndex, - extrinsic=extrinsic_state_se3, - ) - return metadata - - -def _write_recording_table( - log_name: str, log_writer: AbstractLogWriter, data_converter_config: DatasetConverterConfig -) -> None: - - ts_list: List[TimePoint] = _read_timestamps(log_name) - ego_state_all, valid_timestamp = _extract_ego_state_all(log_name) - ego_states_xyz = np.array([ego_state.center.array[:3] for ego_state in ego_state_all], dtype=np.float64) - box_detection_wrapper_all = _extract_detections(log_name, len(ts_list), ego_states_xyz, valid_timestamp) - logging.info(f"Number of valid timestamps with ego states: {len(valid_timestamp)}") - - for idx in range(len(valid_timestamp)): - valid_idx = valid_timestamp[idx] - - cameras = _extract_cameras(log_name, valid_idx, data_converter_config) - lidars = _extract_lidar(log_name, valid_idx, data_converter_config) - - log_writer.write( - timestamp=ts_list[valid_idx], - ego_state=ego_state_all[idx], - box_detections=box_detection_wrapper_all[valid_idx], - traffic_lights=None, - cameras=cameras, - lidars=lidars, - scenario_tags=None, - route_lane_group_ids=None, + if dataset_converter_config.include_lidars: + extrinsic = get_kitti360_lidar_extrinsic(kitti360_folders[DIR_CALIB]) + extrinsic_state_se3 = StateSE3.from_transformation_matrix(extrinsic) + extrinsic_state_se3 = _extrinsic_from_imu_to_rear_axle(extrinsic_state_se3) + metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( + lidar_type=LiDARType.LIDAR_TOP, + lidar_index=Kitti360LiDARIndex, + extrinsic=extrinsic_state_se3, ) + return metadata -def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: +def _read_timestamps(log_name: str, kitti360_folders: Dict[str, Path]) -> Optional[List[TimePoint]]: """ Read KITTI-360 timestamps for the given sequence and return Unix epoch timestamps. """ ts_files = [ - PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt", - PATH_2D_RAW_ROOT / log_name / "image_00" / "timestamps.txt", - PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt", + kitti360_folders[DIR_3D_RAW] / log_name / "velodyne_points" / "timestamps.txt", + kitti360_folders[DIR_2D_RAW] / log_name / "image_00" / "timestamps.txt", + kitti360_folders[DIR_2D_RAW] / log_name / "image_01" / "timestamps.txt", ] if log_name == "2013_05_28_drive_0002_sync": @@ -406,31 +469,25 @@ def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]: dt_obj = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S") dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc) unix_epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) - total_seconds = (dt_obj - unix_epoch).total_seconds() - ns_value = int(ns_str) us_from_ns = ns_value // 1000 - total_us = int(total_seconds * 1_000_000) + us_from_ns - tps.append(TimePoint.from_us(total_us)) return tps return None -def _extract_ego_state_all(log_name: str) -> Tuple[List[EgoStateSE3], List[int]]: +def _extract_ego_state_all(log_name: str, kitti360_folders: Dict[str, Path]) -> Tuple[List[EgoStateSE3], List[int]]: ego_state_all: List[List[float]] = [] - - pose_file = PATH_POSES_ROOT / log_name / "poses.txt" + pose_file = kitti360_folders[DIR_POSES] / log_name / "poses.txt" if not pose_file.exists(): raise FileNotFoundError(f"Pose file not found: {pose_file}") poses = np.loadtxt(pose_file) poses_time = poses[:, 0].astype(np.int32) valid_timestamp: List[int] = list(poses_time) - - oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data" + oxts_path = kitti360_folders[DIR_POSES] / log_name / "oxts" / "data" for idx in range(len(valid_timestamp)): oxts_path_file = oxts_path / f"{int(valid_timestamp[idx]):010d}.txt" @@ -495,11 +552,14 @@ def _extract_ego_state_all(log_name: str) -> Tuple[List[EgoStateSE3], List[int]] return ego_state_all, valid_timestamp -def _extract_detections( +def _extract_kitti360_box_detections_all( log_name: str, ts_len: int, ego_states_xyz: np.ndarray, valid_timestamp: List[int], + kitti360_folders: Dict[str, Path], + detection_cache_root: Path, + detection_radius: float, ) -> List[BoxDetectionWrapper]: detections_states: List[List[List[float]]] = [[] for _ in range(ts_len)] @@ -508,18 +568,23 @@ def _extract_detections( detections_types: List[List[int]] = [[] for _ in range(ts_len)] if log_name == "2013_05_28_drive_0004_sync": - bbox_3d_path = PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml" + bbox_3d_path = kitti360_folders[DIR_3D_BBOX] / "train_full" / f"{log_name}.xml" else: - bbox_3d_path = PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" + bbox_3d_path = kitti360_folders[DIR_3D_BBOX] / "train" / f"{log_name}.xml" if not bbox_3d_path.exists(): raise FileNotFoundError(f"BBox 3D file not found: {bbox_3d_path}") tree = ET.parse(bbox_3d_path) root = tree.getroot() - detection_preprocess_path = PREPROCESS_DETECTION_DIR / f"{log_name}_detection_preprocessed.pkl" + detection_preprocess_path = detection_cache_root / f"{log_name}_detection_preprocessed.pkl" if not detection_preprocess_path.exists(): - process_detection(log_name=log_name, radius_m=60.0, output_dir=PREPROCESS_DETECTION_DIR) + process_detection( + kitti360_data_root=kitti360_folders[DIR_ROOT], + log_name=log_name, + radius_m=detection_radius, + output_dir=detection_cache_root, + ) with open(detection_preprocess_path, "rb") as f: detection_preprocess_result = pickle.load(f) static_records_dict = { @@ -620,7 +685,12 @@ def _extract_detections( return box_detection_wrapper_all -def _extract_lidar(log_name: str, idx: int, data_converter_config: DatasetConverterConfig) -> List[LiDARData]: +def _extract_kitti360_lidar( + log_name: str, + idx: int, + kitti360_folders: Dict[str, Path], + data_converter_config: DatasetConverterConfig, +) -> List[LiDARData]: lidars: List[LiDARData] = [] if data_converter_config.include_lidars: @@ -628,17 +698,15 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DatasetConver if log_name == "2013_05_28_drive_0002_sync" and idx <= 4390: return lidars - lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" - + lidar_full_path = kitti360_folders[DIR_3D_RAW] / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin" if lidar_full_path.exists(): - lidars.append( LiDARData( lidar_type=LiDARType.LIDAR_TOP, timestamp=None, iteration=idx, - dataset_root=KITTI360_DATA_ROOT, - relative_path=lidar_full_path.relative_to(KITTI360_DATA_ROOT), + dataset_root=kitti360_folders[DIR_ROOT], + relative_path=lidar_full_path.relative_to(kitti360_folders[DIR_ROOT]), ) ) else: @@ -647,46 +715,77 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DatasetConver return lidars -def _extract_cameras( - log_name: str, idx: int, data_converter_config: DatasetConverterConfig +def _extract_kitti360_pinhole_cameras( + log_name: str, + idx: int, + camera_calibration: Dict[str, StateSE3], + kitti360_folders: Dict[str, Path], + data_converter_config: DatasetConverterConfig, ) -> Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]]: - camera_dict: Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]] = {} - for camera_type, cam_dir_name in KITTI360_CAMERA_TYPES.items(): - if cam_dir_name in ["image_00", "image_01"]: - img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rect" / f"{idx:010d}.png" - elif cam_dir_name in ["image_02", "image_03"]: - img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rgb" / f"{idx:010d}.png" - - cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" - if not cam2pose_txt.exists(): - raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") - - lastrow = np.array([0, 0, 0, 1]).reshape(1, 4) - with open(cam2pose_txt, "r") as f: - for line in f: - parts = line.strip().split() - key = parts[0][:-1] - if key == cam_dir_name: - values = list(map(float, parts[1:])) - matrix = np.array(values).reshape(3, 4) - cam2pose = np.concatenate((matrix, lastrow)) - cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose - - camera_extrinsic = StateSE3.from_transformation_matrix(cam2pose) - camera_extrinsic = _extrinsic_from_imu_to_rear_axle(camera_extrinsic) + pinhole_camera_dict: Dict[PinholeCameraType, Optional[Tuple[Union[str, bytes], StateSE3]]] = {} + if data_converter_config.include_pinhole_cameras: + + for camera_type, cam_dir_name in KITTI360_PINHOLE_CAMERA_TYPES.items(): + img_path_png = kitti360_folders[DIR_2D_RAW] / log_name / cam_dir_name / "data_rect" / f"{idx:010d}.png" + camera_extrinsic = camera_calibration[cam_dir_name] + + if img_path_png.exists(): + if data_converter_config.pinhole_camera_store_option == "path": + camera_data = str(img_path_png) + elif data_converter_config.pinhole_camera_store_option == "binary": + with open(img_path_png, "rb") as f: + camera_data = f.read() + else: + camera_data = None + + pinhole_camera_dict[camera_type] = camera_data, camera_extrinsic + return pinhole_camera_dict + + +def _extract_kitti360_fisheye_mei_cameras( + log_name: str, + idx: int, + camera_calibration: Dict[str, StateSE3], + kitti360_folders: Dict[str, Path], + data_converter_config: DatasetConverterConfig, +) -> Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]]: + fisheye_camera_dict: Dict[FisheyeMEICameraType, Optional[Tuple[Union[str, bytes], StateSE3]]] = {} + for camera_type, cam_dir_name in KITTI360_FISHEYE_MEI_CAMERA_TYPES.items(): + img_path_png = kitti360_folders[DIR_2D_RAW] / log_name / cam_dir_name / "data_rgb" / f"{idx:010d}.png" + camera_extrinsic = camera_calibration[cam_dir_name] if img_path_png.exists(): - if data_converter_config.camera_store_option == "path": + if data_converter_config.pinhole_camera_store_option == "path": camera_data = str(img_path_png) - elif data_converter_config.camera_store_option == "binary": + elif data_converter_config.pinhole_camera_store_option == "binary": with open(img_path_png, "rb") as f: camera_data = f.read() else: camera_data = None - - camera_dict[camera_type] = camera_data, camera_extrinsic - return camera_dict + fisheye_camera_dict[camera_type] = camera_data, camera_extrinsic + return fisheye_camera_dict + + +def _load_kitti_360_calibration(kitti_360_data_root: Path) -> Dict[str, StateSE3]: + calib_file = kitti_360_data_root / DIR_CALIB / "calib_cam_to_pose.txt" + if not calib_file.exists(): + raise FileNotFoundError(f"Calibration file not found: {calib_file}") + + lastrow = np.array([0, 0, 0, 1]).reshape(1, 4) + calib_dict: Dict[str, StateSE3] = {} + with open(calib_file, "r") as f: + for line in f: + parts = line.strip().split() + key = parts[0][:-1] + values = list(map(float, parts[1:])) + matrix = np.array(values).reshape(3, 4) + cam2pose = np.concatenate((matrix, lastrow)) + cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose + camera_extrinsic = StateSE3.from_transformation_matrix(cam2pose) + camera_extrinsic = _extrinsic_from_imu_to_rear_axle(camera_extrinsic) + calib_dict[key] = camera_extrinsic + return calib_dict def _extrinsic_from_imu_to_rear_axle(extrinsic: StateSE3) -> StateSE3: diff --git a/src/py123d/conversion/datasets/kitti360/kitti360_sensor_io.py b/src/py123d/conversion/datasets/kitti360/kitti360_sensor_io.py index 5a0cf7e1..e58b165d 100644 --- a/src/py123d/conversion/datasets/kitti360/kitti360_sensor_io.py +++ b/src/py123d/conversion/datasets/kitti360/kitti360_sensor_io.py @@ -4,9 +4,9 @@ import numpy as np +from py123d.conversion.registry.lidar_index_registry import Kitti360LiDARIndex from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.lidar.lidar import LiDARType -from py123d.datatypes.sensors.lidar.lidar_index import Kitti360LidarIndex +from py123d.datatypes.sensors.lidar import LiDARType from py123d.geometry.se import StateSE3 from py123d.geometry.transform.transform_se3 import convert_points_3d_array_between_origins @@ -14,16 +14,16 @@ def load_kitti360_lidar_pcs_from_file(filepath: Path, log_metadata: LogMetadata) -> Dict[LiDARType, np.ndarray]: if not filepath.exists(): logging.warning(f"LiDAR file does not exist: {filepath}. Returning empty point cloud.") - return {LiDARType.LIDAR_TOP: np.zeros((1, len(Kitti360LidarIndex)), dtype=np.float32)} + return {LiDARType.LIDAR_TOP: np.zeros((1, len(Kitti360LiDARIndex)), dtype=np.float32)} lidar_extrinsic = log_metadata.lidar_metadata[LiDARType.LIDAR_TOP].extrinsic lidar_pc = np.fromfile(filepath, dtype=np.float32) - lidar_pc = np.reshape(lidar_pc, [-1, len(Kitti360LidarIndex)]) + lidar_pc = np.reshape(lidar_pc, [-1, len(Kitti360LiDARIndex)]) - lidar_pc[..., Kitti360LidarIndex.XYZ] = convert_points_3d_array_between_origins( + lidar_pc[..., Kitti360LiDARIndex.XYZ] = convert_points_3d_array_between_origins( from_origin=lidar_extrinsic, to_origin=StateSE3(0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0), - points_3d_array=lidar_pc[..., Kitti360LidarIndex.XYZ], + points_3d_array=lidar_pc[..., Kitti360LiDARIndex.XYZ], ) return {LiDARType.LIDAR_TOP: lidar_pc} diff --git a/src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py b/src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py index 75eba581..ef3511c4 100644 --- a/src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py +++ b/src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py @@ -1,5 +1,4 @@ import copy -import os from pathlib import Path from typing import Any, Dict, List, Tuple @@ -11,14 +10,11 @@ from py123d.geometry.polyline import Polyline3D from py123d.geometry.rotation import EulerAngles -KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) -DIR_CALIB = "calibration" -PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB +# KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) +# DIR_CALIB = "calibration" +# PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB -DEFAULT_ROLL = 0.0 -DEFAULT_PITCH = 0.0 - -kitti3602nuplan_imu_calibration_ideal = np.array( +KITTI3602NUPLAN_IMU_CALIBRATION = np.array( [ [1, 0, 0, 0], [0, -1, 0, 0], @@ -27,9 +23,6 @@ ], dtype=np.float64, ) - -KITTI3602NUPLAN_IMU_CALIBRATION = kitti3602nuplan_imu_calibration_ideal - MAX_N = 1000 @@ -246,12 +239,12 @@ def parseOpencvMatrix(node): return mat -def get_lidar_extrinsic() -> np.ndarray: - cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt" +def get_kitti360_lidar_extrinsic(kitti360_calibration_root: Path) -> np.ndarray: + cam2pose_txt = kitti360_calibration_root / "calib_cam_to_pose.txt" if not cam2pose_txt.exists(): raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}") - cam2velo_txt = PATH_CALIB_ROOT / "calib_cam_to_velo.txt" + cam2velo_txt = kitti360_calibration_root / "calib_cam_to_velo.txt" if not cam2velo_txt.exists(): raise FileNotFoundError(f"calib_cam_to_velo.txt file not found: {cam2velo_txt}") diff --git a/src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py b/src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py index 324cb337..3f65b375 100644 --- a/src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py +++ b/src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py @@ -21,19 +21,10 @@ import numpy as np import numpy.typing as npt -KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) -DIR_3D_RAW = "data_3d_raw" -DIR_3D_BBOX = "data_3d_bboxes" -DIR_POSES = "data_poses" - -PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW -PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX -PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES - from py123d.conversion.datasets.kitti360.utils.kitti360_helper import ( KITTI3602NUPLAN_IMU_CALIBRATION, KITTI360Bbox3D, - get_lidar_extrinsic, + get_kitti360_lidar_extrinsic, ) from py123d.conversion.datasets.kitti360.utils.kitti360_labels import ( BBOX_LABLES_TO_DETECTION_NAME_DICT, @@ -41,15 +32,24 @@ kittiId2label, ) +# KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) +# DIR_3D_RAW = "data_3d_raw" +# DIR_3D_BBOX = "data_3d_bboxes" +# DIR_POSES = "data_poses" + +# PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW +# PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX +# PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES + -def _bbox_xml_path(log_name: str) -> Path: +def _bbox_xml_path(kitti360_dataset_root: Path, log_name: str) -> Path: if log_name == "2013_05_28_drive_0004_sync": - return PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml" - return PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml" + return kitti360_dataset_root / "data_3d_bboxes" / "train_full" / f"{log_name}.xml" + return kitti360_dataset_root / "data_3d_bboxes" / "train" / f"{log_name}.xml" -def _lidar_frame_path(log_name: str, frame_idx: int) -> Path: - return PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{frame_idx:010d}.bin" +def _lidar_frame_path(kitti360_dataset_root: Path, log_name: str, frame_idx: int) -> Path: + return kitti360_dataset_root / "data_3d_raw" / log_name / "velodyne_points" / "data" / f"{frame_idx:010d}.bin" def _load_lidar_xyz(filepath: Path) -> np.ndarray: @@ -58,9 +58,9 @@ def _load_lidar_xyz(filepath: Path) -> np.ndarray: return arr.reshape(-1, 4)[:, :3] -def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]: +def _collect_static_objects(kitti360_dataset_root: Path, log_name: str) -> List[KITTI360Bbox3D]: """Parse XML and collect static objects with valid class names.""" - xml_path = _bbox_xml_path(log_name) + xml_path = _bbox_xml_path(kitti360_dataset_root, log_name) if not xml_path.exists(): raise FileNotFoundError(f"BBox 3D file not found: {xml_path}") tree = ET.parse(xml_path) @@ -84,10 +84,10 @@ def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]: return static_objs -def _collect_ego_states(log_name: str) -> Tuple[npt.NDArray[np.float64], list[int]]: +def _collect_ego_states(kitti360_data_root: Path, log_name: str) -> Tuple[npt.NDArray[np.float64], list[int]]: """Load ego states from poses.txt.""" - pose_file = PATH_POSES_ROOT / log_name / "poses.txt" + pose_file = kitti360_data_root / "data_poses" / log_name / "poses.txt" if not pose_file.exists(): raise FileNotFoundError(f"Pose file not found: {pose_file}") @@ -121,6 +121,7 @@ def _collect_ego_states(log_name: str) -> Tuple[npt.NDArray[np.float64], list[in def process_detection( + kitti360_data_root: Path, log_name: str, radius_m: float = 60.0, output_dir: Optional[Path] = None, @@ -133,29 +134,29 @@ def process_detection( Save per-frame detections to a pickle to avoid recomputation. """ - lidar_dir = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" + lidar_dir = kitti360_data_root / "data_3d_raw" / log_name / "velodyne_points" / "data" if not lidar_dir.exists(): raise FileNotFoundError(f"LiDAR data folder not found: {lidar_dir}") ts_len = len(list(lidar_dir.glob("*.bin"))) logging.info(f"[preprocess] {log_name}: found {ts_len} lidar frames") # 1) Parse objects from XML - static_objs: List[KITTI360Bbox3D] = _collect_static_objects(log_name) + static_objs: List[KITTI360Bbox3D] = _collect_static_objects(kitti360_data_root, log_name) logging.info(f"[preprocess] {log_name}: static objects = {len(static_objs)}") # 2) Filter static objs by ego-centered radius - ego_states, valid_timestamp = _collect_ego_states(log_name) + ego_states, valid_timestamp = _collect_ego_states(kitti360_data_root, log_name) logging.info(f"[preprocess] {log_name}: ego states = {len(ego_states)}") for obj in static_objs: obj.filter_by_radius(ego_states[:, :3, 3], valid_timestamp, radius_m) # 3) Filter static objs by LiDAR point cloud visibility - lidar_extrinsic = get_lidar_extrinsic() + lidar_extrinsic = get_kitti360_lidar_extrinsic(kitti360_data_root / "calibration") def process_one_frame(time_idx: int) -> None: valid_time_idx = valid_timestamp[time_idx] logging.info(f"[preprocess] {log_name}: t={valid_time_idx}") - lidar_path = _lidar_frame_path(log_name, valid_time_idx) + lidar_path = _lidar_frame_path(kitti360_data_root, log_name, valid_time_idx) if not lidar_path.exists(): logging.warning(f"[preprocess] {log_name}: LiDAR frame not found: {lidar_path}") return @@ -194,7 +195,7 @@ def process_one_frame(time_idx: int) -> None: static_records.append(obj.valid_frames) if output_dir is None: - output_dir = PATH_3D_BBOX_ROOT / "preprocess" + output_dir = kitti360_data_root / "data_3d_bboxes" / "preprocess" output_dir.mkdir(parents=True, exist_ok=True) out_path = output_dir / f"{log_name}_detection_preprocessed.pkl" @@ -212,12 +213,14 @@ def process_one_frame(time_idx: int) -> None: logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="Precompute KITTI-360 detections filters") + parser.add_argument("--kitti360_data_root", type=Path, default=".", help="KITTI-360 data root directory") parser.add_argument("--log_name", default="2013_05_28_drive_0000_sync") parser.add_argument("--radius", type=float, default=60.0) parser.add_argument("--out", type=Path, default="detection_preprocess", help="output directory for pkl") args = parser.parse_args() process_detection( + kitti360_data_root=args.kitti360_data_root, log_name=args.log_name, radius_m=args.radius, output_dir=args.out, diff --git a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py index c837e559..8f2620ef 100644 --- a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py +++ b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py @@ -24,18 +24,18 @@ ) from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.conversion.registry.lidar_index_registry import NuPlanLidarIndex +from py123d.conversion.registry.lidar_index_registry import NuPlanLiDARIndex from py123d.datatypes.detections.box_detections import BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetection, TrafficLightDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import ( +from py123d.datatypes.sensors.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.sensors.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, ) -from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import ( @@ -52,14 +52,14 @@ # NOTE: Leaving this constant here, to avoid having a nuplan dependency in nuplan_constants.py NUPLAN_CAMERA_MAPPING = { - PinholeCameraType.CAM_F0: CameraChannel.CAM_F0, - PinholeCameraType.CAM_B0: CameraChannel.CAM_B0, - PinholeCameraType.CAM_L0: CameraChannel.CAM_L0, - PinholeCameraType.CAM_L1: CameraChannel.CAM_L1, - PinholeCameraType.CAM_L2: CameraChannel.CAM_L2, - PinholeCameraType.CAM_R0: CameraChannel.CAM_R0, - PinholeCameraType.CAM_R1: CameraChannel.CAM_R1, - PinholeCameraType.CAM_R2: CameraChannel.CAM_R2, + PinholeCameraType.PCAM_F0: CameraChannel.CAM_F0, + PinholeCameraType.PCAM_B0: CameraChannel.CAM_B0, + PinholeCameraType.PCAM_L0: CameraChannel.CAM_L0, + PinholeCameraType.PCAM_L1: CameraChannel.CAM_L1, + PinholeCameraType.PCAM_L2: CameraChannel.CAM_L2, + PinholeCameraType.PCAM_R0: CameraChannel.CAM_R0, + PinholeCameraType.PCAM_R1: CameraChannel.CAM_R1, + PinholeCameraType.PCAM_R2: CameraChannel.CAM_R2, } TARGET_DT: Final[float] = 0.1 # TODO: make configurable @@ -176,7 +176,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=nuplan_log_db.log.map_version, timestep_seconds=TARGET_DT, vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(), - camera_metadata=_get_nuplan_camera_metadata(source_log_path, self.dataset_converter_config), + pinhole_camera_metadata=_get_nuplan_camera_metadata(source_log_path, self.dataset_converter_config), lidar_metadata=_get_nuplan_lidar_metadata( self._nuplan_sensor_root, log_name, self.dataset_converter_config ), @@ -196,7 +196,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: ego_state=_extract_nuplan_ego_state(nuplan_lidar_pc), box_detections=_extract_nuplan_box_detections(nuplan_lidar_pc, source_log_path), traffic_lights=_extract_nuplan_traffic_lights(nuplan_log_db, lidar_pc_token), - cameras=_extract_nuplan_cameras( + pinhole_cameras=_extract_nuplan_cameras( nuplan_log_db=nuplan_log_db, nuplan_lidar_pc=nuplan_lidar_pc, source_log_path=source_log_path, @@ -256,7 +256,7 @@ def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadat ) camera_metadata: Dict[str, PinholeCameraMetadata] = {} - if dataset_converter_config.include_cameras: + if dataset_converter_config.include_pinhole_cameras: for camera_type in NUPLAN_CAMERA_MAPPING.keys(): camera_metadata[camera_type] = _get_camera_metadata(camera_type) @@ -277,7 +277,7 @@ def _get_nuplan_lidar_metadata( for lidar_type in NUPLAN_LIDAR_DICT.values(): metadata[lidar_type] = LiDARMetadata( lidar_type=lidar_type, - lidar_index=NuPlanLidarIndex, + lidar_index=NuPlanLiDARIndex, extrinsic=None, # NOTE: LiDAR extrinsic are unknown ) return metadata @@ -350,7 +350,7 @@ def _extract_nuplan_cameras( camera_dict: Dict[str, Union[str, bytes]] = {} - if dataset_converter_config.include_cameras: + if dataset_converter_config.include_pinhole_cameras: log_cam_infos = {camera.token: camera for camera in nuplan_log_db.log.cameras} for camera_type, camera_channel in NUPLAN_CAMERA_MAPPING.items(): camera_data: Optional[Union[str, bytes]] = None @@ -387,9 +387,9 @@ def _extract_nuplan_cameras( # Store camera data, either as path or binary camera_data: Optional[Union[str, bytes]] = None - if dataset_converter_config.camera_store_option == "path": + if dataset_converter_config.pinhole_camera_store_option == "path": camera_data = str(filename_jpg) - elif dataset_converter_config.camera_store_option == "binary": + elif dataset_converter_config.pinhole_camera_store_option == "binary": with open(filename_jpg, "rb") as f: camera_data = f.read() diff --git a/src/py123d/conversion/datasets/nuplan/nuplan_sensor_io.py b/src/py123d/conversion/datasets/nuplan/nuplan_sensor_io.py index fed2d508..8c2506f0 100644 --- a/src/py123d/conversion/datasets/nuplan/nuplan_sensor_io.py +++ b/src/py123d/conversion/datasets/nuplan/nuplan_sensor_io.py @@ -6,8 +6,8 @@ from py123d.common.utils.dependencies import check_dependencies from py123d.conversion.datasets.nuplan.utils.nuplan_constants import NUPLAN_LIDAR_DICT -from py123d.datatypes.sensors.lidar.lidar import LiDARType -from py123d.datatypes.sensors.lidar.lidar_index import NuPlanLidarIndex +from py123d.conversion.registry.lidar_index_registry import NuPlanLiDARIndex +from py123d.datatypes.sensors.lidar import LiDARType check_dependencies(["nuplan"], "nuplan") from nuplan.database.utils.pointclouds.lidar import LidarPointCloud @@ -23,7 +23,7 @@ def load_nuplan_lidar_pcs_from_file(pcd_path: Path) -> Dict[LiDARType, np.ndarra lidar_pcs_dict: Dict[LiDARType, np.ndarray] = {} for lidar_id, lidar_type in NUPLAN_LIDAR_DICT.items(): mask = merged_lidar_pc[-1, :] == lidar_id - lidar_pc = merged_lidar_pc[: len(NuPlanLidarIndex), mask].T.astype(np.float32) + lidar_pc = merged_lidar_pc[: len(NuPlanLiDARIndex), mask].T.astype(np.float32) lidar_pcs_dict[lidar_type] = lidar_pc return lidar_pcs_dict diff --git a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py index 4b074d53..d1159dc4 100644 --- a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py +++ b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py @@ -4,7 +4,7 @@ from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.detections.traffic_light_detections import TrafficLightStatus from py123d.datatypes.maps.map_datatypes import RoadLineType -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar import LiDARType from py123d.datatypes.time.time_point import TimePoint diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py index c4e1627e..e7cbf2e2 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py @@ -19,17 +19,17 @@ ) from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.registry.lidar_index_registry import NuScenesLiDARIndex from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import ( +from py123d.datatypes.sensors.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.sensors.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, ) -from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType -from py123d.datatypes.sensors.lidar.lidar_index import NuScenesLidarIndex from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import get_nuscenes_renault_zoe_parameters @@ -148,7 +148,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=log_record["location"], timestep_seconds=TARGET_DT, vehicle_parameters=get_nuscenes_renault_zoe_parameters(), - camera_metadata=_get_nuscenes_camera_metadata(nusc, scene, self.dataset_converter_config), + pinhole_camera_metadata=_get_nuscenes_pinhole_camera_metadata(nusc, scene, self.dataset_converter_config), lidar_metadata=_get_nuscenes_lidar_metadata(nusc, scene, self.dataset_converter_config), map_metadata=_get_nuscenes_map_metadata(log_record["location"]), ) @@ -172,7 +172,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: timestamp=TimePoint.from_us(sample["timestamp"]), ego_state=_extract_nuscenes_ego_state(nusc, sample, can_bus), box_detections=_extract_nuscenes_box_detections(nusc, sample), - cameras=_extract_nuscenes_cameras( + pinhole_cameras=_extract_nuscenes_cameras( nusc=nusc, sample=sample, dataset_converter_config=self.dataset_converter_config, @@ -192,14 +192,14 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: gc.collect() -def _get_nuscenes_camera_metadata( +def _get_nuscenes_pinhole_camera_metadata( nusc: NuScenes, scene: Dict[str, Any], dataset_converter_config: DatasetConverterConfig, ) -> Dict[PinholeCameraType, PinholeCameraMetadata]: camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} - if dataset_converter_config.include_cameras: + if dataset_converter_config.include_pinhole_cameras: first_sample_token = scene["first_sample_token"] first_sample = nusc.get("sample", first_sample_token) @@ -246,7 +246,7 @@ def _get_nuscenes_lidar_metadata( metadata[LiDARType.LIDAR_TOP] = LiDARMetadata( lidar_type=LiDARType.LIDAR_TOP, - lidar_index=NuScenesLidarIndex, + lidar_index=NuScenesLiDARIndex, extrinsic=extrinsic, ) @@ -389,7 +389,7 @@ def _extract_nuscenes_cameras( ) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} - if dataset_converter_config.include_cameras: + if dataset_converter_config.include_pinhole_cameras: for camera_type, camera_channel in NUSCENES_CAMERA_TYPES.items(): cam_token = sample["data"][camera_channel] cam_data = nusc.get("sample_data", cam_token) @@ -410,9 +410,9 @@ def _extract_nuscenes_cameras( cam_path = NUSCENES_DATA_ROOT / cam_data["filename"] if cam_path.exists() and cam_path.is_file(): - if dataset_converter_config.camera_store_option == "path": + if dataset_converter_config.pinhole_camera_store_option == "path": camera_data = str(cam_path) - elif dataset_converter_config.camera_store_option == "binary": + elif dataset_converter_config.pinhole_camera_store_option == "binary": with open(cam_path, "rb") as f: camera_data = f.read() else: diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py index eccf0124..e09caae6 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_sensor_io.py @@ -3,21 +3,21 @@ import numpy as np +from py123d.conversion.registry.lidar_index_registry import NuScenesLiDARIndex from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.lidar.lidar import LiDARType -from py123d.datatypes.sensors.lidar.lidar_index import NuScenesLidarIndex +from py123d.datatypes.sensors.lidar import LiDARType from py123d.geometry.se import StateSE3 from py123d.geometry.transform.transform_se3 import convert_points_3d_array_between_origins def load_nuscenes_lidar_pcs_from_file(pcd_path: Path, log_metadata: LogMetadata) -> Dict[LiDARType, np.ndarray]: - lidar_pc = np.fromfile(pcd_path, dtype=np.float32).reshape(-1, len(NuScenesLidarIndex)) + lidar_pc = np.fromfile(pcd_path, dtype=np.float32).reshape(-1, len(NuScenesLiDARIndex)) # convert lidar to ego frame lidar_extrinsic = log_metadata.lidar_metadata[LiDARType.LIDAR_TOP].extrinsic - lidar_pc[..., NuScenesLidarIndex.XYZ] = convert_points_3d_array_between_origins( + lidar_pc[..., NuScenesLiDARIndex.XYZ] = convert_points_3d_array_between_origins( from_origin=lidar_extrinsic, to_origin=StateSE3(0, 0, 0, 1.0, 0, 0, 0), - points_3d_array=lidar_pc[..., NuScenesLidarIndex.XYZ], + points_3d_array=lidar_pc[..., NuScenesLiDARIndex.XYZ], ) return {LiDARType.LIDAR_TOP: lidar_pc} diff --git a/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py b/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py index dd04d91a..9ea29413 100644 --- a/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py +++ b/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py @@ -3,7 +3,7 @@ from typing import Final, List from py123d.datatypes.detections.box_detection_types import BoxDetectionType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType NUSCENES_MAPS: List[str] = ["boston-seaport", "singapore-hollandvillage", "singapore-onenorth", "singapore-queenstown"] @@ -50,11 +50,11 @@ } NUSCENES_CAMERA_TYPES = { - PinholeCameraType.CAM_F0: "CAM_FRONT", - PinholeCameraType.CAM_B0: "CAM_BACK", - PinholeCameraType.CAM_L0: "CAM_FRONT_LEFT", - PinholeCameraType.CAM_L1: "CAM_BACK_LEFT", - PinholeCameraType.CAM_R0: "CAM_FRONT_RIGHT", - PinholeCameraType.CAM_R1: "CAM_BACK_RIGHT", + PinholeCameraType.PCAM_F0: "CAM_FRONT", + PinholeCameraType.PCAM_B0: "CAM_BACK", + PinholeCameraType.PCAM_L0: "CAM_FRONT_LEFT", + PinholeCameraType.PCAM_L1: "CAM_BACK_LEFT", + PinholeCameraType.PCAM_R0: "CAM_FRONT_RIGHT", + PinholeCameraType.PCAM_R1: "CAM_BACK_RIGHT", } NUSCENES_DATA_ROOT = Path(os.environ["NUSCENES_DATA_ROOT"]) diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py index dcefb187..9656da00 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py @@ -25,15 +25,15 @@ ) from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.conversion.registry.lidar_index_registry import PandasetLidarIndex +from py123d.conversion.registry.lidar_index_registry import PandasetLiDARIndex from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import ( +from py123d.datatypes.sensors.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.sensors.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeIntrinsics, ) -from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import ( @@ -114,7 +114,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=None, # TODO: Add location information. timestep_seconds=0.1, vehicle_parameters=get_pandaset_chrysler_pacifica_parameters(), - camera_metadata=_get_pandaset_camera_metadata(source_log_path, self.dataset_converter_config), + pinhole_camera_metadata=_get_pandaset_camera_metadata(source_log_path, self.dataset_converter_config), lidar_metadata=_get_pandaset_lidar_metadata(source_log_path, self.dataset_converter_config), map_metadata=None, # NOTE: Pandaset does not have maps. ) @@ -142,7 +142,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: timestamp=TimePoint.from_s(timestep_s), ego_state=ego_state, box_detections=_extract_pandaset_box_detections(source_log_path, iteration, ego_state), - cameras=_extract_pandaset_sensor_camera( + pinhole_cameras=_extract_pandaset_sensor_camera( source_log_path, iteration, ego_state, @@ -167,7 +167,7 @@ def _get_pandaset_camera_metadata( camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {} - if dataset_config.include_cameras: + if dataset_config.include_pinhole_cameras: all_cameras_folder = source_log_path / "camera" for camera_folder in all_cameras_folder.iterdir(): camera_name = camera_folder.name @@ -204,7 +204,7 @@ def _get_pandaset_lidar_metadata( for lidar_name, lidar_type in PANDASET_LIDAR_MAPPING.items(): lidar_metadata[lidar_type] = LiDARMetadata( lidar_type=lidar_type, - lidar_index=PandasetLidarIndex, + lidar_index=PandasetLiDARIndex, extrinsic=PANDASET_LIDAR_EXTRINSICS[ lidar_name ], # TODO: These extrinsics are incorrect, and need to be transformed correctly. @@ -343,7 +343,7 @@ def _extract_pandaset_sensor_camera( camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} iteration_str = f"{iteration:02d}" - if dataset_converter_config.include_cameras: + if dataset_converter_config.include_pinhole_cameras: for camera_name, camera_type in PANDASET_CAMERA_MAPPING.items(): @@ -359,10 +359,10 @@ def _extract_pandaset_sensor_camera( ) camera_data = None - if dataset_converter_config.camera_store_option == "path": + if dataset_converter_config.pinhole_camera_store_option == "path": pandaset_data_root = source_log_path.parent camera_data = str(image_abs_path.relative_to(pandaset_data_root)) - elif dataset_converter_config.camera_store_option == "binary": + elif dataset_converter_config.pinhole_camera_store_option == "binary": with open(image_abs_path, "rb") as f: camera_data = f.read() camera_dict[camera_type] = camera_data, camera_extrinsic diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py b/src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py index e07ff916..14f1f236 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_sensor_io.py @@ -10,8 +10,8 @@ read_json, read_pkl_gz, ) -from py123d.conversion.registry.lidar_index_registry import PandasetLidarIndex -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.conversion.registry.lidar_index_registry import PandasetLiDARIndex +from py123d.datatypes.sensors.lidar import LiDARType from py123d.geometry.transform.transform_se3 import convert_absolute_to_relative_points_3d_array @@ -46,9 +46,9 @@ def load_pandaset_lidars_pcs_from_file( ) for lidar_type in lidar_pc_dict.keys(): - lidar_pc_dict[lidar_type][..., PandasetLidarIndex.XYZ] = convert_absolute_to_relative_points_3d_array( + lidar_pc_dict[lidar_type][..., PandasetLiDARIndex.XYZ] = convert_absolute_to_relative_points_3d_array( ego_pose, - lidar_pc_dict[lidar_type][..., PandasetLidarIndex.XYZ], + lidar_pc_dict[lidar_type][..., PandasetLiDARIndex.XYZ], ) return lidar_pc_dict diff --git a/src/py123d/conversion/datasets/pandaset/utils/pandaset_constants.py b/src/py123d/conversion/datasets/pandaset/utils/pandaset_constants.py index 93ef4bc8..1e65b509 100644 --- a/src/py123d/conversion/datasets/pandaset/utils/pandaset_constants.py +++ b/src/py123d/conversion/datasets/pandaset/utils/pandaset_constants.py @@ -2,19 +2,19 @@ from py123d.common.utils.enums import SerialIntEnum from py123d.datatypes.detections.box_detection_types import BoxDetectionType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType, PinholeDistortion, PinholeIntrinsics -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar import LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType, PinholeDistortion, PinholeIntrinsics from py123d.geometry import StateSE3 PANDASET_SPLITS: List[str] = ["pandaset_train", "pandaset_val", "pandaset_test"] PANDASET_CAMERA_MAPPING: Dict[str, PinholeCameraType] = { - "front_camera": PinholeCameraType.CAM_F0, - "back_camera": PinholeCameraType.CAM_B0, - "front_left_camera": PinholeCameraType.CAM_L0, - "front_right_camera": PinholeCameraType.CAM_R0, - "left_camera": PinholeCameraType.CAM_L1, - "right_camera": PinholeCameraType.CAM_R1, + "front_camera": PinholeCameraType.PCAM_F0, + "back_camera": PinholeCameraType.PCAM_B0, + "front_left_camera": PinholeCameraType.PCAM_L0, + "front_right_camera": PinholeCameraType.PCAM_R0, + "left_camera": PinholeCameraType.PCAM_L1, + "right_camera": PinholeCameraType.PCAM_R1, } PANDASET_LIDAR_MAPPING: Dict[str, LiDARType] = {"main_pandar64": LiDARType.LIDAR_TOP, "front_gt": LiDARType.LIDAR_FRONT} diff --git a/src/py123d/conversion/datasets/pandaset/utils/pandaset_utlis.py b/src/py123d/conversion/datasets/pandaset/utils/pandaset_utlis.py index e179a41c..68575e7e 100644 --- a/src/py123d/conversion/datasets/pandaset/utils/pandaset_utlis.py +++ b/src/py123d/conversion/datasets/pandaset/utils/pandaset_utlis.py @@ -61,10 +61,6 @@ def rotate_pandaset_pose_to_iso_coordinates(pose: StateSE3) -> StateSE3: transformation_matrix = pose.transformation_matrix.copy() transformation_matrix[0:3, 0:3] = transformation_matrix[0:3, 0:3] @ F - # transformation_matrix[0, 3] = pose.y - # transformation_matrix[1, 3] = -pose.x - # transformation_matrix[2, 3] = pose.z - return StateSE3.from_transformation_matrix(transformation_matrix) @@ -78,15 +74,11 @@ def main_lidar_to_rear_axle(pose: StateSE3) -> StateSE3: ], dtype=np.float64, ).T - # F = np.eye(3, dtype=np.float64) transformation_matrix = pose.transformation_matrix.copy() transformation_matrix[0:3, 0:3] = transformation_matrix[0:3, 0:3] @ F rotated_pose = StateSE3.from_transformation_matrix(transformation_matrix) - imu_pose = translate_se3_along_body_frame( - rotated_pose, - vector_3d=Vector3D(x=-0.840, y=0.0, z=0.0), - ) + imu_pose = translate_se3_along_body_frame(rotated_pose, vector_3d=Vector3D(x=-0.840, y=0.0, z=0.0)) return imu_pose diff --git a/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py b/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py index 963a056d..82b0c891 100644 --- a/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py +++ b/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py @@ -2,8 +2,8 @@ from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.maps.map_datatypes import LaneType, RoadEdgeType, RoadLineType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar import LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType WOPD_AVAILABLE_SPLITS: List[str] = [ "wopd_train", @@ -22,11 +22,11 @@ # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L50 WOPD_CAMERA_TYPES: Dict[int, PinholeCameraType] = { - 1: PinholeCameraType.CAM_F0, # front_camera - 2: PinholeCameraType.CAM_L0, # front_left_camera - 3: PinholeCameraType.CAM_R0, # front_right_camera - 4: PinholeCameraType.CAM_L1, # left_camera - 5: PinholeCameraType.CAM_R1, # right_camera + 1: PinholeCameraType.PCAM_F0, # front_camera + 2: PinholeCameraType.PCAM_L0, # front_left_camera + 3: PinholeCameraType.PCAM_R0, # front_right_camera + 4: PinholeCameraType.PCAM_L1, # left_camera + 5: PinholeCameraType.PCAM_R1, # right_camera } # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L66 diff --git a/src/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py b/src/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py deleted file mode 100644 index 0cc69d25..00000000 --- a/src/py123d/conversion/datasets/wopd/waymo_map_utils/wopd_map_utils copy.py +++ /dev/null @@ -1,390 +0,0 @@ -# from collections import defaultdict -# from pathlib import Path -# from typing import Dict, List, Optional - -# import geopandas as gpd -# import numpy as np -# import numpy.typing as npt -# import pandas as pd -# import shapely.geometry as geom - -# from py123d.common.utils.dependencies import check_dependencies -# from py123d.conversion.datasets.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries -# from py123d.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType -# from py123d.geometry import Point3DIndex, Polyline3D -# from py123d.geometry.utils.units import mph_to_mps - -# check_dependencies(modules=["waymo_open_dataset"], optional_name="waymo") -# from waymo_open_dataset import dataset_pb2 - -# # TODO: -# # - Implement stop signs -# # - Implement speed bumps -# # - Implement driveways with a different semantic type if needed -# # - Implement intersections and lane group logic - -# WAYMO_ROAD_LINE_CONVERSION = { -# 0: RoadLineType.UNKNOWN, # aka. UNKNOWN -# 1: RoadLineType.DASHED_WHITE, # aka. BROKEN_SINGLE_WHITE -# 2: RoadLineType.SOLID_WHITE, # aka. SOLID_SINGLE_WHITE -# 3: RoadLineType.DOUBLE_SOLID_WHITE, # aka. SOLID_DOUBLE_WHITE -# 4: RoadLineType.DASHED_YELLOW, # aka. BROKEN_SINGLE_YELLOW -# 5: RoadLineType.DOUBLE_DASH_YELLOW, # aka. BROKEN_DOUBLE_YELLOW -# 6: RoadLineType.SOLID_YELLOW, # aka. SOLID_SINGLE_YELLOW -# 7: RoadLineType.DOUBLE_SOLID_YELLOW, # aka. SOLID_DOUBLE_YELLOW -# 8: RoadLineType.DOUBLE_DASH_YELLOW, # aka. PASSING_DOUBLE_YELLOW -# } - -# WAYMO_ROAD_EDGE_CONVERSION = { -# 0: RoadEdgeType.UNKNOWN, -# 1: RoadEdgeType.ROAD_EDGE_BOUNDARY, -# 2: RoadEdgeType.ROAD_EDGE_MEDIAN, -# } - - -# def convert_wopd_map(frame: dataset_pb2.Frame, map_file_path: Path) -> None: - -# def _extract_polyline(data) -> npt.NDArray[np.float64]: -# polyline = np.array([[p.x, p.y, p.z] for p in data.polyline], dtype=np.float64) -# return polyline - -# def _extract_polygon(data) -> npt.NDArray[np.float64]: -# polygon = np.array([[p.x, p.y, p.z] for p in data.polygon], dtype=np.float64) -# assert polygon.shape[0] >= 3, "Polygon must have at least 3 points" -# assert polygon.shape[1] == 3, "Polygon must have 3 coordinates (x, y, z)" -# return polygon - -# def _extract_neighbors(data) -> List[Dict[str, int]]: -# neighbors = [] -# for neighbor in data: -# neighbors.append( -# { -# "lane_id": neighbor.feature_id, -# "self_start_index": neighbor.self_start_index, -# "self_end_index": neighbor.self_end_index, -# "neighbor_start_index": neighbor.neighbor_start_index, -# "neighbor_end_index": neighbor.neighbor_end_index, -# } -# ) -# return neighbors - -# lanes: Dict[int, npt.NDArray[np.float64]] = {} -# lanes_successors = defaultdict(list) -# lanes_predecessors = defaultdict(list) -# lanes_speed_limit_mps: Dict[int, float] = {} -# lanes_type: Dict[int, int] = {} -# lanes_left_neighbors: Dict[int, List[Dict[str, int]]] = {} -# lanes_right_neighbors: Dict[int, List[Dict[str, int]]] = {} - -# road_lines: Dict[int, npt.NDArray[np.float64]] = {} -# road_lines_type: Dict[int, RoadLineType] = {} - -# road_edges: Dict[int, npt.NDArray[np.float64]] = {} -# road_edges_type: Dict[int, int] = {} - -# crosswalks: Dict[int, npt.NDArray[np.float64]] = {} -# carparks: Dict[int, npt.NDArray[np.float64]] = {} - -# for map_feature in frame.map_features: -# if map_feature.HasField("lane"): -# polyline = _extract_polyline(map_feature.lane) -# # Ignore lanes with less than 2 points or not 2D -# if polyline.ndim != 2 or polyline.shape[0] < 2: -# continue -# lanes[map_feature.id] = polyline -# for lane_id_ in map_feature.lane.exit_lanes: -# lanes_successors[map_feature.id].append(lane_id_) -# for lane_id_ in map_feature.lane.exit_lanes: -# lanes_predecessors[map_feature.id].append(lane_id_) -# lanes_speed_limit_mps[map_feature.id] = mph_to_mps(map_feature.lane.speed_limit_mph) -# lanes_type[map_feature.id] = map_feature.lane.type -# lanes_left_neighbors[map_feature.id] = _extract_neighbors(map_feature.lane.left_neighbors) -# lanes_right_neighbors[map_feature.id] = _extract_neighbors(map_feature.lane.right_neighbors) -# elif map_feature.HasField("road_line"): -# polyline = _extract_polyline(map_feature.road_line) -# if polyline.ndim != 2 or polyline.shape[0] < 2: -# continue -# road_lines[map_feature.id] = polyline -# road_lines_type[map_feature.id] = WAYMO_ROAD_LINE_CONVERSION.get( -# map_feature.road_line.type, RoadLineType.UNKNOWN -# ) -# elif map_feature.HasField("road_edge"): -# polyline = _extract_polyline(map_feature.road_edge) -# if polyline.ndim != 2 or polyline.shape[0] < 2: -# continue -# road_edges[map_feature.id] = polyline -# road_edges_type[map_feature.id] = WAYMO_ROAD_EDGE_CONVERSION.get( -# map_feature.road_edge.type, RoadEdgeType.UNKNOWN -# ) -# elif map_feature.HasField("stop_sign"): -# # TODO: implement stop signs -# pass -# elif map_feature.HasField("crosswalk"): -# crosswalks[map_feature.id] = _extract_polygon(map_feature.crosswalk) -# elif map_feature.HasField("speed_bump"): -# # TODO: implement speed bumps -# pass -# elif map_feature.HasField("driveway"): -# # NOTE: Determine whether to use a different semantic type for driveways. -# carparks[map_feature.id] = _extract_polygon(map_feature.driveway) - -# lane_left_boundaries_3d, lane_right_boundaries_3d = extract_lane_boundaries( -# lanes, lanes_successors, lanes_predecessors, road_lines, road_edges -# ) - -# lane_df = get_lane_df( -# lanes, -# lanes_successors, -# lanes_predecessors, -# lanes_speed_limit_mps, -# lane_left_boundaries_3d, -# lane_right_boundaries_3d, -# lanes_type, -# lanes_left_neighbors, -# lanes_right_neighbors, -# ) -# lane_group_df = get_lane_group_df( -# lanes, -# lanes_successors, -# lanes_predecessors, -# lane_left_boundaries_3d, -# lane_right_boundaries_3d, -# ) -# intersection_df = get_intersections_df() -# crosswalk_df = get_crosswalk_df(crosswalks) -# walkway_df = get_walkway_df() -# carpark_df = get_carpark_df(carparks) -# generic_drivable_df = get_generic_drivable_df() -# road_edge_df = get_road_edge_df(road_edges, road_edges_type) -# road_line_df = get_road_line_df(road_lines, road_lines_type) - -# map_file_path.unlink(missing_ok=True) -# if not map_file_path.parent.exists(): -# map_file_path.parent.mkdir(parents=True, exist_ok=True) - -# lane_df.to_file(map_file_path, layer=MapLayer.LANE.serialize(), driver="GPKG") -# lane_group_df.to_file(map_file_path, layer=MapLayer.LANE_GROUP.serialize(), driver="GPKG", mode="a") -# intersection_df.to_file(map_file_path, layer=MapLayer.INTERSECTION.serialize(), driver="GPKG", mode="a") -# crosswalk_df.to_file(map_file_path, layer=MapLayer.CROSSWALK.serialize(), driver="GPKG", mode="a") -# walkway_df.to_file(map_file_path, layer=MapLayer.WALKWAY.serialize(), driver="GPKG", mode="a") -# carpark_df.to_file(map_file_path, layer=MapLayer.CARPARK.serialize(), driver="GPKG", mode="a") -# generic_drivable_df.to_file(map_file_path, layer=MapLayer.GENERIC_DRIVABLE.serialize(), driver="GPKG", mode="a") -# road_edge_df.to_file(map_file_path, layer=MapLayer.ROAD_EDGE.serialize(), driver="GPKG", mode="a") -# road_line_df.to_file(map_file_path, layer=MapLayer.ROAD_LINE.serialize(), driver="GPKG", mode="a") - - -# def get_lane_df( -# lanes: Dict[int, npt.NDArray[np.float64]], -# lanes_successors: Dict[int, List[int]], -# lanes_predecessors: Dict[int, List[int]], -# lanes_speed_limit_mps: Dict[int, float], -# lanes_left_boundaries_3d: Dict[int, Polyline3D], -# lanes_right_boundaries_3d: Dict[int, Polyline3D], -# lanes_type: Dict[int, int], -# lanes_left_neighbors: Dict[int, List[Dict[str, int]]], -# lanes_right_neighbors: Dict[int, List[Dict[str, int]]], -# ) -> gpd.GeoDataFrame: - -# ids = [] -# lane_types = [] -# lane_group_ids = [] -# speed_limits_mps = [] -# predecessor_ids = [] -# successor_ids = [] -# left_boundaries = [] -# right_boundaries = [] -# left_lane_ids = [] -# right_lane_ids = [] -# baseline_paths = [] -# geometries = [] - -# def _get_majority_neighbor(neighbors: List[Dict[str, int]]) -> Optional[int]: -# if len(neighbors) == 0: -# return None -# length = { -# neighbor["lane_id"]: neighbor["self_end_index"] - neighbor["self_start_index"] for neighbor in neighbors -# } -# return str(max(length, key=length.get)) - -# for lane_id, lane_centerline_array in lanes.items(): -# if lane_id not in lanes_left_boundaries_3d or lane_id not in lanes_right_boundaries_3d: -# continue -# lane_centerline = Polyline3D.from_array(lane_centerline_array) -# lane_speed_limit_mps = lanes_speed_limit_mps[lane_id] if lanes_speed_limit_mps[lane_id] > 0.0 else None - -# ids.append(lane_id) -# lane_types.append(lanes_type[lane_id]) -# lane_group_ids.append([lane_id]) -# speed_limits_mps.append(lane_speed_limit_mps) -# predecessor_ids.append(lanes_predecessors[lane_id]) -# successor_ids.append(lanes_successors[lane_id]) -# left_boundaries.append(lanes_left_boundaries_3d[lane_id].linestring) -# right_boundaries.append(lanes_right_boundaries_3d[lane_id].linestring) -# left_lane_ids.append(_get_majority_neighbor(lanes_left_neighbors[lane_id])) -# right_lane_ids.append(_get_majority_neighbor(lanes_right_neighbors[lane_id])) -# baseline_paths.append(lane_centerline.linestring) - -# geometry = geom.Polygon( -# np.vstack( -# [ -# lanes_left_boundaries_3d[lane_id].array[:, :2], -# lanes_right_boundaries_3d[lane_id].array[:, :2][::-1], -# ] -# ) -# ) -# geometries.append(geometry) - -# data = pd.DataFrame( -# { -# "id": ids, -# "lane_type": lane_types, -# "lane_group_id": lane_group_ids, -# "speed_limit_mps": speed_limits_mps, -# "predecessor_ids": predecessor_ids, -# "successor_ids": successor_ids, -# "left_boundary": left_boundaries, -# "right_boundary": right_boundaries, -# "left_lane_id": left_lane_ids, -# "right_lane_id": right_lane_ids, -# "baseline_path": baseline_paths, -# } -# ) - -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_lane_group_df( -# lanes: Dict[int, npt.NDArray[np.float64]], -# lanes_successors: Dict[int, List[int]], -# lanes_predecessors: Dict[int, List[int]], -# lanes_left_boundaries_3d: Dict[int, Polyline3D], -# lanes_right_boundaries_3d: Dict[int, Polyline3D], -# ) -> gpd.GeoDataFrame: - -# ids = [] -# lane_ids = [] -# intersection_ids = [] -# predecessor_lane_group_ids = [] -# successor_lane_group_ids = [] -# left_boundaries = [] -# right_boundaries = [] -# geometries = [] - -# # NOTE: WOPD does not provide lane groups, so we create a lane group for each lane. -# for lane_id in lanes.keys(): -# if lane_id not in lanes_left_boundaries_3d or lane_id not in lanes_right_boundaries_3d: -# continue -# ids.append(lane_id) -# lane_ids.append([lane_id]) -# intersection_ids.append(None) # WOPD does not provide intersections -# predecessor_lane_group_ids.append(lanes_predecessors[lane_id]) -# successor_lane_group_ids.append(lanes_successors[lane_id]) -# left_boundaries.append(lanes_left_boundaries_3d[lane_id].linestring) -# right_boundaries.append(lanes_right_boundaries_3d[lane_id].linestring) -# geometry = geom.Polygon( -# np.vstack( -# [ -# lanes_left_boundaries_3d[lane_id].array[:, :2], -# lanes_right_boundaries_3d[lane_id].array[:, :2][::-1], -# ] -# ) -# ) -# geometries.append(geometry) - -# data = pd.DataFrame( -# { -# "id": ids, -# "lane_ids": lane_ids, -# "intersection_id": intersection_ids, -# "predecessor_lane_group_ids": predecessor_lane_group_ids, -# "successor_lane_group_ids": successor_lane_group_ids, -# "left_boundary": left_boundaries, -# "right_boundary": right_boundaries, -# } -# ) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_intersections_df() -> gpd.GeoDataFrame: -# ids = [] -# lane_group_ids = [] -# geometries = [] - -# # NOTE: WOPD does not provide intersections, so we create an empty DataFrame. -# data = pd.DataFrame({"id": ids, "lane_group_ids": lane_group_ids}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_carpark_df(carparks) -> gpd.GeoDataFrame: -# ids = list(carparks.keys()) -# outlines = [geom.LineString(outline) for outline in carparks.values()] -# geometries = [geom.Polygon(outline[..., Point3DIndex.XY]) for outline in carparks.values()] - -# data = pd.DataFrame({"id": ids, "outline": outlines}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_walkway_df() -> gpd.GeoDataFrame: -# ids = [] -# geometries = [] - -# # NOTE: WOPD does not provide walkways, so we create an empty DataFrame. -# data = pd.DataFrame({"id": ids}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_crosswalk_df(crosswalks: Dict[int, npt.NDArray[np.float64]]) -> gpd.GeoDataFrame: -# ids = list(crosswalks.keys()) -# outlines = [geom.LineString(outline) for outline in crosswalks.values()] -# geometries = [geom.Polygon(outline[..., Point3DIndex.XY]) for outline in crosswalks.values()] - -# data = pd.DataFrame({"id": ids, "outline": outlines}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_generic_drivable_df() -> gpd.GeoDataFrame: -# ids = [] -# geometries = [] - -# # NOTE: WOPD does not provide generic drivable areas, so we create an empty DataFrame. -# data = pd.DataFrame({"id": ids}) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_road_edge_df( -# road_edges: Dict[int, npt.NDArray[np.float64]], road_edges_type: Dict[int, RoadEdgeType] -# ) -> gpd.GeoDataFrame: -# ids = list(road_edges.keys()) -# geometries = [Polyline3D.from_array(road_edge).linestring for road_edge in road_edges.values()] - -# data = pd.DataFrame( -# { -# "id": ids, -# "road_edge_type": [int(road_edge_type) for road_edge_type in road_edges_type.values()], -# } -# ) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf - - -# def get_road_line_df( -# road_lines: Dict[int, npt.NDArray[np.float64]], road_lines_type: Dict[int, RoadLineType] -# ) -> gpd.GeoDataFrame: -# ids = list(road_lines.keys()) -# geometries = [Polyline3D.from_array(road_edge).linestring for road_edge in road_lines.values()] - -# data = pd.DataFrame( -# { -# "id": ids, -# "road_line_type": [int(road_line_type) for road_line_type in road_lines_type.values()], -# } -# ) -# gdf = gpd.GeoDataFrame(data, geometry=geometries) -# return gdf diff --git a/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py b/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py index ca32c3d8..cf25274c 100644 --- a/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py +++ b/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py @@ -6,8 +6,8 @@ from py123d.common.utils.dependencies import check_dependencies from py123d.conversion.datasets.wopd.utils.wopd_constants import WOPD_CAMERA_TYPES, WOPD_LIDAR_TYPES from py123d.conversion.datasets.wopd.utils.wopd_utils import parse_range_image_and_camera_projection -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar import LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo") import tensorflow as tf diff --git a/src/py123d/conversion/datasets/wopd/wopd_converter.py b/src/py123d/conversion/datasets/wopd/wopd_converter.py index c2fe667f..cc42ab4d 100644 --- a/src/py123d/conversion/datasets/wopd/wopd_converter.py +++ b/src/py123d/conversion/datasets/wopd/wopd_converter.py @@ -19,18 +19,18 @@ from py123d.conversion.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter -from py123d.conversion.registry.lidar_index_registry import DefaultLidarIndex, WOPDLidarIndex +from py123d.conversion.registry.lidar_index_registry import DefaultLiDARIndex, WOPDLiDARIndex from py123d.conversion.utils.sensor_utils.camera_conventions import CameraConvention, convert_camera_convention from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import ( +from py123d.datatypes.sensors.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.sensors.pinhole_camera import ( PinholeCameraMetadata, PinholeCameraType, PinholeDistortion, PinholeIntrinsics, ) -from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters @@ -143,7 +143,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=str(initial_frame.context.stats.location), timestep_seconds=0.1, vehicle_parameters=get_wopd_chrysler_pacifica_parameters(), - camera_metadata=_get_wopd_camera_metadata( + pinhole_camera_metadata=_get_wopd_camera_metadata( initial_frame, self.dataset_converter_config, ), @@ -178,7 +178,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: ego_state=_extract_wopd_ego_state(frame, map_pose_offset), box_detections=_extract_wopd_box_detections(frame, map_pose_offset, self._zero_roll_pitch), traffic_lights=None, # TODO: Check if WOPD has traffic light information - cameras=_extract_wopd_cameras(frame, self.dataset_converter_config), + pinhole_cameras=_extract_wopd_cameras(frame, self.dataset_converter_config), lidars=_extract_wopd_lidars( frame, self._keep_polar_features, @@ -232,7 +232,7 @@ def _get_wopd_camera_metadata( camera_metadata_dict: Dict[PinholeCameraType, PinholeCameraMetadata] = {} - if dataset_converter_config.camera_store_option is not None: + if dataset_converter_config.pinhole_camera_store_option is not None: for calibration in initial_frame.context.camera_calibrations: camera_type = WOPD_CAMERA_TYPES[calibration.name] # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L96 @@ -261,7 +261,7 @@ def _get_wopd_lidar_metadata( laser_metadatas: Dict[LiDARType, LiDARMetadata] = {} # NOTE: Using - lidar_index = WOPDLidarIndex if keep_polar_features else DefaultLidarIndex + lidar_index = WOPDLiDARIndex if keep_polar_features else DefaultLiDARIndex if dataset_converter_config.lidar_store_option is not None: for laser_calibration in initial_frame.context.laser_calibrations: @@ -381,7 +381,7 @@ def _extract_wopd_cameras( camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} - if dataset_converter_config.include_cameras: + if dataset_converter_config.include_pinhole_cameras: # NOTE @DanielDauner: The extrinsic matrix in frame.context.camera_calibration is fixed to model the ego to camera transformation. # The poses in frame.images[idx] are the motion compensated ego poses when the camera triggers. diff --git a/src/py123d/conversion/log_writer/abstract_log_writer.py b/src/py123d/conversion/log_writer/abstract_log_writer.py index 6e5185a2..b367ea69 100644 --- a/src/py123d/conversion/log_writer/abstract_log_writer.py +++ b/src/py123d/conversion/log_writer/abstract_log_writer.py @@ -9,8 +9,9 @@ from py123d.datatypes.detections.box_detections import BoxDetectionWrapper from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICameraType +from py123d.datatypes.sensors.lidar import LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -39,7 +40,8 @@ def write( ego_state: Optional[EgoStateSE3] = None, box_detections: Optional[BoxDetectionWrapper] = None, traffic_lights: Optional[TrafficLightDetectionWrapper] = None, - cameras: Optional[Dict[PinholeCameraType, Tuple[Any, ...]]] = None, + pinhole_cameras: Optional[Dict[PinholeCameraType, Tuple[Any, ...]]] = None, + fisheye_mei_cameras: Optional[Dict[FisheyeMEICameraType, Tuple[Any, ...]]] = None, lidars: Optional[List[LiDARData]] = None, scenario_tags: Optional[List[str]] = None, route_lane_group_ids: Optional[List[int]] = None, diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 532b7dda..446b1126 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -15,9 +15,9 @@ from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICameraType +from py123d.datatypes.sensors.lidar import LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3, EgoStateSE3Index from py123d.geometry import BoundingBoxSE3Index, StateSE3, StateSE3Index, Vector3DIndex @@ -84,7 +84,8 @@ def write( ego_state: Optional[EgoStateSE3] = None, box_detections: Optional[BoxDetectionWrapper] = None, traffic_lights: Optional[TrafficLightDetectionWrapper] = None, - cameras: Optional[Dict[Union[PinholeCameraType, FisheyeMEICameraType], Tuple[Any, ...]]] = None, + pinhole_cameras: Optional[Dict[PinholeCameraType, Tuple[Any, ...]]] = None, + fisheye_mei_cameras: Optional[Dict[FisheyeMEICameraType, Tuple[Any, ...]]] = None, lidars: Optional[List[LiDARData]] = None, scenario_tags: Optional[List[str]] = None, route_lane_group_ids: Optional[List[int]] = None, @@ -159,34 +160,68 @@ def write( record_batch_data["traffic_light_types"] = [traffic_light_types] # -------------------------------------------------------------------------------------------------------------- - # Cameras + # Pinhole Cameras # -------------------------------------------------------------------------------------------------------------- - if self._dataset_converter_config.include_cameras: - assert cameras is not None, "Camera data is required but not provided." - provided_cameras = set(cameras.keys()) - expected_cameras = set(self._log_metadata.camera_metadata.keys()) - for camera_type in expected_cameras: - camera_name = camera_type.serialize() + if self._dataset_converter_config.include_pinhole_cameras: + assert pinhole_cameras is not None, "Pinhole camera data is required but not provided." + provided_pinhole_cameras = set(pinhole_cameras.keys()) + expected_pinhole_cameras = set(self._log_metadata.pinhole_camera_metadata.keys()) + for pinhole_camera_type in expected_pinhole_cameras: + pinhole_camera_name = pinhole_camera_type.serialize() # NOTE @DanielDauner: Missing cameras are allowed, e.g., for synchronization mismatches. # In this case, we write None/null to the arrow table. - camera_data: Optional[Any] = None - camera_pose: Optional[StateSE3] = None - if camera_type in provided_cameras: - camera_data, camera_pose = cameras[camera_type] + pinhole_camera_data: Optional[Any] = None + pinhole_camera_pose: Optional[StateSE3] = None + if pinhole_camera_type in provided_pinhole_cameras: + pinhole_camera_data, pinhole_camera_pose = pinhole_cameras[pinhole_camera_type] # TODO: Refactor how camera data handed to the writer. # This should be combined with configurations to write to log, sensor_root, or sensor_root as mp4. - if isinstance(camera_data, Path) or isinstance(camera_data, str): - camera_data = str(camera_data) - elif isinstance(camera_data, bytes): - camera_data = camera_data - elif isinstance(camera_data, np.ndarray): - _, encoded_img = cv2.imencode(".jpg", camera_data) - camera_data = encoded_img.tobytes() + if isinstance(pinhole_camera_data, Path) or isinstance(pinhole_camera_data, str): + pinhole_camera_data = str(pinhole_camera_data) + elif isinstance(pinhole_camera_data, bytes): + pinhole_camera_data = pinhole_camera_data + elif isinstance(pinhole_camera_data, np.ndarray): + _, encoded_img = cv2.imencode(".jpg", pinhole_camera_data) + pinhole_camera_data = encoded_img.tobytes() + + record_batch_data[f"{pinhole_camera_name}_data"] = [pinhole_camera_data] + record_batch_data[f"{pinhole_camera_name}_extrinsic"] = [ + pinhole_camera_pose.array if pinhole_camera_pose else None + ] + + # -------------------------------------------------------------------------------------------------------------- + # Fisheye MEI Cameras + # -------------------------------------------------------------------------------------------------------------- + if self._dataset_converter_config.include_fisheye_mei_cameras: + assert fisheye_mei_cameras is not None, "Fisheye MEI camera data is required but not provided." + provided_fisheye_mei_cameras = set(fisheye_mei_cameras.keys()) + expected_fisheye_mei_cameras = set(self._log_metadata.fisheye_mei_camera_metadata.keys()) + for fisheye_mei_camera_type in expected_fisheye_mei_cameras: + fisheye_mei_camera_name = fisheye_mei_camera_type.serialize() + + # NOTE @DanielDauner: Missing cameras are allowed, e.g., for synchronization mismatches. + # In this case, we write None/null to the arrow table. + fisheye_mei_camera_data: Optional[Any] = None + fisheye_mei_camera_pose: Optional[StateSE3] = None + if fisheye_mei_camera_type in provided_fisheye_mei_cameras: + fisheye_mei_camera_data, fisheye_mei_camera_pose = fisheye_mei_cameras[fisheye_mei_camera_type] - record_batch_data[f"{camera_name}_data"] = [camera_data] - record_batch_data[f"{camera_name}_extrinsic"] = [camera_pose.array if camera_pose else None] + # TODO: Refactor how camera data handed to the writer. + # This should be combined with configurations to write to log, sensor_root, or sensor_root as mp4. + if isinstance(fisheye_mei_camera_data, Path) or isinstance(fisheye_mei_camera_data, str): + fisheye_mei_camera_data = str(fisheye_mei_camera_data) + elif isinstance(fisheye_mei_camera_data, bytes): + fisheye_mei_camera_data = fisheye_mei_camera_data + elif isinstance(fisheye_mei_camera_data, np.ndarray): + _, encoded_img = cv2.imencode(".jpg", fisheye_mei_camera_data) + fisheye_mei_camera_data = encoded_img.tobytes() + + record_batch_data[f"{fisheye_mei_camera_name}_data"] = [fisheye_mei_camera_data] + record_batch_data[f"{fisheye_mei_camera_name}_extrinsic"] = [ + fisheye_mei_camera_pose.array if fisheye_mei_camera_pose else None + ] # -------------------------------------------------------------------------------------------------------------- # LiDARs @@ -286,21 +321,44 @@ def _build_schema(dataset_converter_config: DatasetConverterConfig, log_metadata ) # -------------------------------------------------------------------------------------------------------------- - # Cameras + # Pinhole Cameras # -------------------------------------------------------------------------------------------------------------- - if dataset_converter_config.include_cameras: - for camera_type in log_metadata.camera_metadata.keys(): - camera_name = camera_type.serialize() + if dataset_converter_config.include_pinhole_cameras: + for pinhole_camera_type in log_metadata.pinhole_camera_metadata.keys(): + pinhole_camera_name = pinhole_camera_type.serialize() # Depending on the storage option, define the schema for camera data - if dataset_converter_config.camera_store_option == "path": - schema_list.append((f"{camera_name}_data", pa.string())) + if dataset_converter_config.pinhole_camera_store_option == "path": + schema_list.append((f"{pinhole_camera_name}_data", pa.string())) + + elif dataset_converter_config.pinhole_camera_store_option == "binary": + schema_list.append((f"{pinhole_camera_name}_data", pa.binary())) + + elif dataset_converter_config.pinhole_camera_store_option == "mp4": + raise NotImplementedError("MP4 format is not yet supported, but planned for future releases.") + + # Add camera pose + schema_list.append((f"{pinhole_camera_name}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))) + + # -------------------------------------------------------------------------------------------------------------- + # Fisheye MEI Cameras + # -------------------------------------------------------------------------------------------------------------- + if dataset_converter_config.include_fisheye_mei_cameras: + for fisheye_mei_camera_type in log_metadata.fisheye_mei_camera_metadata.keys(): + fisheye_mei_camera_name = fisheye_mei_camera_type.serialize() + + # Depending on the storage option, define the schema for camera data + if dataset_converter_config.fisheye_mei_camera_store_option == "path": + schema_list.append((f"{fisheye_mei_camera_name}_data", pa.string())) + + elif dataset_converter_config.fisheye_mei_camera_store_option == "binary": + schema_list.append((f"{fisheye_mei_camera_name}_data", pa.binary())) - elif dataset_converter_config.camera_store_option == "binary": - schema_list.append((f"{camera_name}_data", pa.binary())) + elif dataset_converter_config.fisheye_mei_camera_store_option == "mp4": + raise NotImplementedError("MP4 format is not yet supported, but planned for future releases.") # Add camera pose - schema_list.append((f"{camera_name}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))) + schema_list.append((f"{fisheye_mei_camera_name}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))) # -------------------------------------------------------------------------------------------------------------- # LiDARs diff --git a/src/py123d/conversion/log_writer/utils/__init__.py b/src/py123d/conversion/log_writer/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/py123d/conversion/registry/lidar_index_registry.py b/src/py123d/conversion/registry/lidar_index_registry.py index 7a7891f8..a65903b4 100644 --- a/src/py123d/conversion/registry/lidar_index_registry.py +++ b/src/py123d/conversion/registry/lidar_index_registry.py @@ -28,14 +28,14 @@ def XYZ(self) -> slice: @register_lidar_index -class DefaultLidarIndex(LiDARIndex): +class DefaultLiDARIndex(LiDARIndex): X = 0 Y = 1 Z = 2 @register_lidar_index -class NuPlanLidarIndex(LiDARIndex): +class NuPlanLiDARIndex(LiDARIndex): X = 0 Y = 1 Z = 2 @@ -44,7 +44,7 @@ class NuPlanLidarIndex(LiDARIndex): @register_lidar_index -class CARLALidarIndex(LiDARIndex): +class CARLALiDARIndex(LiDARIndex): X = 0 Y = 1 Z = 2 @@ -52,7 +52,7 @@ class CARLALidarIndex(LiDARIndex): @register_lidar_index -class WOPDLidarIndex(LiDARIndex): +class WOPDLiDARIndex(LiDARIndex): RANGE = 0 INTENSITY = 1 ELONGATION = 2 @@ -62,7 +62,7 @@ class WOPDLidarIndex(LiDARIndex): @register_lidar_index -class Kitti360LidarIndex(LiDARIndex): +class Kitti360LiDARIndex(LiDARIndex): X = 0 Y = 1 Z = 2 @@ -70,7 +70,7 @@ class Kitti360LidarIndex(LiDARIndex): @register_lidar_index -class AVSensorLidarIndex(LiDARIndex): +class AVSensorLiDARIndex(LiDARIndex): """Argoverse Sensor LiDAR Indexing Scheme. NOTE: The LiDAR files also include, 'offset_ns', which we do not currently include. @@ -83,7 +83,7 @@ class AVSensorLidarIndex(LiDARIndex): @register_lidar_index -class PandasetLidarIndex(LiDARIndex): +class PandasetLiDARIndex(LiDARIndex): """Pandaset LiDAR Indexing Scheme.""" X = 0 @@ -93,7 +93,7 @@ class PandasetLidarIndex(LiDARIndex): @register_lidar_index -class NuScenesLidarIndex(LiDARIndex): +class NuScenesLiDARIndex(LiDARIndex): X = 0 Y = 1 Z = 2 diff --git a/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py b/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py index 327db77c..4e9684e7 100644 --- a/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py +++ b/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py @@ -4,7 +4,7 @@ from omegaconf import DictConfig from pyparsing import Union -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraMetadata +from py123d.datatypes.sensors.pinhole_camera import PinholeCamera, PinholeCameraMetadata from py123d.script.utils.dataset_path_utils import get_dataset_paths DATASET_PATHS: DictConfig = get_dataset_paths() diff --git a/src/py123d/conversion/sensor_io/lidar/draco_lidar_io.py b/src/py123d/conversion/sensor_io/lidar/draco_lidar_io.py index 80948ef3..61473f08 100644 --- a/src/py123d/conversion/sensor_io/lidar/draco_lidar_io.py +++ b/src/py123d/conversion/sensor_io/lidar/draco_lidar_io.py @@ -4,7 +4,7 @@ import numpy as np import numpy.typing as npt -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata +from py123d.datatypes.sensors.lidar import LiDAR, LiDARMetadata # TODO: add to config DRACO_QUANTIZATION_BITS: Final[int] = 14 diff --git a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py index ab94e578..1a9e2583 100644 --- a/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py +++ b/src/py123d/conversion/sensor_io/lidar/file_lidar_io.py @@ -6,7 +6,7 @@ from omegaconf import DictConfig from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar import LiDARType from py123d.script.utils.dataset_path_utils import get_dataset_paths DATASET_PATHS: DictConfig = get_dataset_paths() diff --git a/src/py123d/conversion/sensor_io/lidar/laz_lidar_io.py b/src/py123d/conversion/sensor_io/lidar/laz_lidar_io.py index cedfb2b6..b109c7ca 100644 --- a/src/py123d/conversion/sensor_io/lidar/laz_lidar_io.py +++ b/src/py123d/conversion/sensor_io/lidar/laz_lidar_io.py @@ -4,7 +4,7 @@ import numpy as np import numpy.typing as npt -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata +from py123d.datatypes.sensors.lidar import LiDAR, LiDARMetadata def encode_lidar_pc_as_laz_binary(point_cloud: npt.NDArray[np.float32], lidar_metadata: LiDARMetadata) -> bytes: diff --git a/src/py123d/datatypes/scene/abstract_scene.py b/src/py123d/datatypes/scene/abstract_scene.py index cdad4033..33611539 100644 --- a/src/py123d/datatypes/scene/abstract_scene.py +++ b/src/py123d/datatypes/scene/abstract_scene.py @@ -1,15 +1,15 @@ from __future__ import annotations import abc -from typing import List, Optional, Union +from typing import List, Optional from py123d.datatypes.detections.box_detections import BoxDetectionWrapper from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.maps.abstract_map import AbstractMap from py123d.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata -from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType +from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType +from py123d.datatypes.sensors.lidar import LiDAR, LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import VehicleParameters @@ -54,9 +54,15 @@ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: raise NotImplementedError @abc.abstractmethod - def get_camera_at_iteration( - self, iteration: int, camera_type: Union[PinholeCameraType, FisheyeMEICameraType] - ) -> Optional[Union[PinholeCamera, FisheyeMEICamera]]: + def get_pinhole_camera_at_iteration( + self, iteration: int, camera_type: PinholeCameraType + ) -> Optional[PinholeCamera]: + raise NotImplementedError + + @abc.abstractmethod + def get_fisheye_mei_camera_at_iteration( + self, iteration: int, camera_type: FisheyeMEICameraType + ) -> Optional[FisheyeMEICamera]: raise NotImplementedError @abc.abstractmethod @@ -81,8 +87,12 @@ def vehicle_parameters(self) -> VehicleParameters: return self.log_metadata.vehicle_parameters @property - def available_camera_types(self) -> List[Union[PinholeCameraType, FisheyeMEICameraType]]: - return list(self.log_metadata.camera_metadata.keys()) + def available_pinhole_camera_types(self) -> List[PinholeCameraType]: + return list(self.log_metadata.pinhole_camera_metadata.keys()) + + @property + def available_fisheye_mei_camera_types(self) -> List[FisheyeMEICameraType]: + return list(self.log_metadata.fisheye_mei_camera_metadata.keys()) @property def available_lidar_types(self) -> List[LiDARType]: diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene.py b/src/py123d/datatypes/scene/arrow/arrow_scene.py index a3c4db55..79fd4d87 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene.py @@ -19,9 +19,9 @@ ) from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow from py123d.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata -from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARType +from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType +from py123d.datatypes.sensors.lidar import LiDAR, LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -128,18 +128,31 @@ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]: route_lane_group_ids = table["route_lane_group_ids"][self._get_table_index(iteration)].as_py() return route_lane_group_ids - def get_camera_at_iteration( + def get_pinhole_camera_at_iteration( self, iteration: int, camera_type: Union[PinholeCameraType, FisheyeMEICameraType] ) -> Optional[Union[PinholeCamera, FisheyeMEICamera]]: - camera: Optional[Union[PinholeCamera, FisheyeMEICamera]] = None - if camera_type in self.available_camera_types: - camera = get_camera_from_arrow_table( + pinhole_camera: Optional[PinholeCamera] = None + if camera_type in self.available_pinhole_camera_types: + pinhole_camera = get_camera_from_arrow_table( self._get_recording_table(), self._get_table_index(iteration), camera_type, self.log_metadata, ) - return camera + return pinhole_camera + + def get_fisheye_mei_camera_at_iteration( + self, iteration: int, camera_type: FisheyeMEICameraType + ) -> Optional[FisheyeMEICamera]: + fisheye_mei_camera: Optional[FisheyeMEICamera] = None + if camera_type in self.available_pinhole_camera_types: + fisheye_mei_camera = get_camera_from_arrow_table( + self._get_recording_table(), + self._get_table_index(iteration), + camera_type, + self.log_metadata, + ) + return fisheye_mei_camera def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> Optional[LiDAR]: lidar: Optional[LiDAR] = None diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py b/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py index e5840f76..f5f9d067 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py @@ -158,10 +158,10 @@ def _get_scene_extraction_metadatas(log_path: Union[str, Path], filter: SceneFil # Check if camera data is available for the scene, if specified in filter # NOTE: We only check camera availability at the initial index of the scene. - if filter.camera_types is not None: + if filter.pinhole_camera_types is not None: cameras_available = [ recording_table[f"{camera_type.serialize()}_data"][start_idx].as_py() is not None - for camera_type in filter.camera_types + for camera_type in filter.pinhole_camera_types ] if not all(cameras_available): continue diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 1631b9e3..946ffe1f 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -7,6 +7,7 @@ import pyarrow as pa from omegaconf import DictConfig +from py123d.conversion.registry.lidar_index_registry import DefaultLiDARIndex from py123d.conversion.sensor_io.lidar.draco_lidar_io import load_lidar_from_draco_binary from py123d.conversion.sensor_io.lidar.file_lidar_io import load_lidar_pcs_from_file from py123d.conversion.sensor_io.lidar.laz_lidar_io import load_lidar_from_laz_binary @@ -23,10 +24,9 @@ TrafficLightStatus, ) from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType -from py123d.datatypes.sensors.lidar.lidar_index import DefaultLidarIndex +from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraType +from py123d.datatypes.sensors.lidar import LiDAR, LiDARMetadata, LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import VehicleParameters @@ -137,7 +137,7 @@ def get_camera_from_arrow_table( else: raise NotImplementedError("Only string file paths for camera data are supported.") - camera_metadata = log_metadata.camera_metadata[camera_type] + camera_metadata = log_metadata.pinhole_camera_metadata[camera_type] if hasattr(camera_metadata, "mirror_parameter") and camera_metadata.mirror_parameter is not None: return FisheyeMEICamera( metadata=camera_metadata, @@ -178,7 +178,7 @@ def get_lidar_from_arrow_table( lidar = LiDAR( metadata=LiDARMetadata( lidar_type=LiDARType.LIDAR_MERGED, - lidar_index=DefaultLidarIndex, + lidar_index=DefaultLiDARIndex, extrinsic=None, ), point_cloud=merged_pc, @@ -192,7 +192,7 @@ def get_lidar_from_arrow_table( lidar_metadata = log_metadata.lidar_metadata[lidar_type] if lidar_data.startswith(b"DRACO"): # NOTE: DRACO only allows XYZ compression, so we need to override the lidar index here. - lidar_metadata.lidar_index = DefaultLidarIndex + lidar_metadata.lidar_index = DefaultLiDARIndex lidar = load_lidar_from_draco_binary(lidar_data, lidar_metadata) elif lidar_data.startswith(b"LASF"): diff --git a/src/py123d/datatypes/scene/scene_filter.py b/src/py123d/datatypes/scene/scene_filter.py index d4bada57..5aa4ae42 100644 --- a/src/py123d/datatypes/scene/scene_filter.py +++ b/src/py123d/datatypes/scene/scene_filter.py @@ -1,9 +1,9 @@ from dataclasses import dataclass from typing import List, Optional, Union -from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from py123d.datatypes.sensors.camera.utils import deserialize_camera_type, get_camera_type_by_value +from py123d.common.utils.enums import SerialIntEnum +from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICameraType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType # TODO: Add more filter options (e.g. scene tags, ego movement, or whatever appropriate) @@ -13,7 +13,6 @@ class SceneFilter: split_types: Optional[List[str]] = None split_names: Optional[List[str]] = None - # scene_tags: List[str] = None log_names: Optional[List[str]] = None locations: Optional[List[str]] = None # TODO: @@ -25,24 +24,20 @@ class SceneFilter: duration_s: Optional[float] = 10.0 history_s: Optional[float] = 3.0 - camera_types: Optional[List[Union[PinholeCameraType, FisheyeMEICameraType]]] = None + pinhole_camera_types: Optional[List[PinholeCameraType]] = None + fisheye_mei_camera_types: Optional[List[FisheyeMEICameraType]] = None max_num_scenes: Optional[int] = None shuffle: bool = False def __post_init__(self): - if self.camera_types is not None: - assert isinstance(self.camera_types, list), "camera_types must be a list of CameraType" - camera_types = [] - for camera_type in self.camera_types: - if isinstance(camera_type, str): - camera_type = deserialize_camera_type(camera_type) - camera_types.append(camera_type) - elif isinstance(camera_type, int): - camera_type = get_camera_type_by_value(camera_type) - camera_types.append(camera_type) - elif isinstance(camera_type, (PinholeCameraType, FisheyeMEICameraType)): - camera_types.append(camera_type) - else: - raise ValueError(f"Invalid camera type: {camera_type}") - self.camera_types = camera_types + if self.pinhole_camera_types is not None: + assert isinstance(self.pinhole_camera_types, list), "camera_types must be a list of CameraType" + + def _resolve_enum_arguments( + serial_enum_cls: SerialIntEnum, input: List[Union[int, str, SerialIntEnum]] + ) -> List[SerialIntEnum]: + return [serial_enum_cls.from_arbitrary(value) for value in input] + + self.pinhole_camera_types = _resolve_enum_arguments(PinholeCameraType, self.pinhole_camera_types) + self.fisheye_mei_camera_types = _resolve_enum_arguments(FisheyeMEICameraType, self.fisheye_mei_camera_types) diff --git a/src/py123d/datatypes/scene/scene_metadata.py b/src/py123d/datatypes/scene/scene_metadata.py index c7f4ae76..751b9e04 100644 --- a/src/py123d/datatypes/scene/scene_metadata.py +++ b/src/py123d/datatypes/scene/scene_metadata.py @@ -1,13 +1,13 @@ from __future__ import annotations from dataclasses import asdict, dataclass, field -from typing import Dict, Optional, Union +from typing import Dict, Optional import py123d from py123d.datatypes.maps.map_metadata import MapMetadata -from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraMetadata, FisheyeMEICameraType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraMetadata, PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICameraMetadata, FisheyeMEICameraType +from py123d.datatypes.sensors.lidar import LiDARMetadata, LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraMetadata, PinholeCameraType from py123d.datatypes.vehicle_state.vehicle_parameters import VehicleParameters @@ -21,9 +21,8 @@ class LogMetadata: timestep_seconds: float vehicle_parameters: Optional[VehicleParameters] = None - camera_metadata: Union[ - Dict[PinholeCameraType, PinholeCameraMetadata], Dict[FisheyeMEICameraType, FisheyeMEICameraMetadata] - ] = field(default_factory=dict) + pinhole_camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = field(default_factory=dict) + fisheye_mei_camera_metadata: Dict[FisheyeMEICameraType, FisheyeMEICameraMetadata] = field(default_factory=dict) lidar_metadata: Dict[LiDARType, LiDARMetadata] = field(default_factory=dict) map_metadata: Optional[MapMetadata] = None @@ -32,23 +31,31 @@ class LogMetadata: @classmethod def from_dict(cls, data_dict: Dict) -> LogMetadata: + # Ego Vehicle Parameters if data_dict["vehicle_parameters"] is not None: data_dict["vehicle_parameters"] = VehicleParameters.from_dict(data_dict["vehicle_parameters"]) - camera_metadata = {} - for key, value in data_dict.get("camera_metadata", {}).items(): - if value.get("mirror_parameter") is not None: - camera_type = FisheyeMEICameraType.deserialize(key) - camera_metadata[camera_type] = FisheyeMEICameraMetadata.from_dict(value) - else: - camera_type = PinholeCameraType.deserialize(key) - camera_metadata[camera_type] = PinholeCameraMetadata.from_dict(value) - data_dict["camera_metadata"] = camera_metadata - + # Pinhole Camera Metadata + pinhole_camera_metadata = {} + for key, value in data_dict.get("pinhole_camera_metadata", {}).items(): + pinhole_camera_metadata[PinholeCameraType.deserialize(key)] = PinholeCameraMetadata.from_dict(value) + data_dict["pinhole_camera_metadata"] = pinhole_camera_metadata + + # Fisheye MEI Camera Metadata + fisheye_mei_camera_metadata = {} + for key, value in data_dict.get("fisheye_mei_camera_metadata", {}).items(): + fisheye_mei_camera_metadata[FisheyeMEICameraType.deserialize(key)] = FisheyeMEICameraMetadata.from_dict( + value + ) + data_dict["fisheye_mei_camera_metadata"] = fisheye_mei_camera_metadata + + # LiDAR Metadata data_dict["lidar_metadata"] = { LiDARType.deserialize(key): LiDARMetadata.from_dict(value) for key, value in data_dict.get("lidar_metadata", {}).items() } + + # Map Metadata if data_dict["map_metadata"] is not None: data_dict["map_metadata"] = MapMetadata.from_dict(data_dict["map_metadata"]) @@ -57,7 +64,12 @@ def from_dict(cls, data_dict: Dict) -> LogMetadata: def to_dict(self) -> Dict: data_dict = asdict(self) data_dict["vehicle_parameters"] = self.vehicle_parameters.to_dict() if self.vehicle_parameters else None - data_dict["camera_metadata"] = {key.serialize(): value.to_dict() for key, value in self.camera_metadata.items()} + data_dict["pinhole_camera_metadata"] = { + key.serialize(): value.to_dict() for key, value in self.pinhole_camera_metadata.items() + } + data_dict["fisheye_mei_camera_metadata"] = { + key.serialize(): value.to_dict() for key, value in self.fisheye_mei_camera_metadata.items() + } data_dict["lidar_metadata"] = {key.serialize(): value.to_dict() for key, value in self.lidar_metadata.items()} data_dict["map_metadata"] = self.map_metadata.to_dict() if self.map_metadata else None return data_dict diff --git a/src/py123d/datatypes/sensors/__init__.py b/src/py123d/datatypes/sensors/__init__.py index 89175f33..54cd70a1 100644 --- a/src/py123d/datatypes/sensors/__init__.py +++ b/src/py123d/datatypes/sensors/__init__.py @@ -1,4 +1,4 @@ -from py123d.datatypes.sensors.camera.pinhole_camera import ( +from py123d.datatypes.sensors.pinhole_camera import ( PinholeCameraType, PinholeCamera, PinholeIntrinsicsIndex, @@ -7,3 +7,16 @@ PinholeDistortion, PinholeCameraMetadata, ) +from py123d.datatypes.sensors.fisheye_mei_camera import ( + FisheyeMEICameraType, + FisheyeMEICamera, + FisheyeMEIDistortionIndex, + FisheyeMEIProjectionIndex, + FisheyeMEIProjection, + FisheyeMEICameraMetadata, +) +from py123d.datatypes.sensors.lidar import ( + LiDARType, + LiDARMetadata, + LiDAR, +) diff --git a/src/py123d/datatypes/sensors/camera/__init__.py b/src/py123d/datatypes/sensors/camera/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/py123d/datatypes/sensors/camera/utils.py b/src/py123d/datatypes/sensors/camera/utils.py deleted file mode 100644 index 9ed591b0..00000000 --- a/src/py123d/datatypes/sensors/camera/utils.py +++ /dev/null @@ -1,42 +0,0 @@ -from typing import Union - -from py123d.datatypes.sensors.camera.fisheye_mei_camera import FisheyeMEICameraType -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType - - -def get_camera_type_by_value(value: int) -> Union[PinholeCameraType, FisheyeMEICameraType]: - """Dynamically determine camera type based on value range.""" - pinhole_values = [member.value for member in PinholeCameraType] - fisheye_values = [member.value for member in FisheyeMEICameraType] - - if value in pinhole_values: - return PinholeCameraType(value) - elif value in fisheye_values: - return FisheyeMEICameraType(value) - else: - raise ValueError( - f"Invalid camera type value: {value}. " - f"Valid PinholeCameraType values: {pinhole_values}, " - f"Valid FisheyeMEICameraType values: {fisheye_values}" - ) - - -def deserialize_camera_type(camera_str: str) -> Union[PinholeCameraType, FisheyeMEICameraType]: - """Deserialize camera type string to appropriate enum.""" - try: - return PinholeCameraType.deserialize(camera_str) - except (ValueError, KeyError): - pass - - try: - return FisheyeMEICameraType.deserialize(camera_str) - except (ValueError, KeyError): - pass - - pinhole_names = [member.name.lower() for member in PinholeCameraType] - fisheye_names = [member.name.lower() for member in FisheyeMEICameraType] - raise ValueError( - f"Unknown camera type: '{camera_str}'. " - f"Valid PinholeCameraType names: {pinhole_names}, " - f"Valid FisheyeMEICameraType names: {fisheye_names}" - ) diff --git a/src/py123d/datatypes/sensors/camera/fisheye_mei_camera.py b/src/py123d/datatypes/sensors/fisheye_mei_camera.py similarity index 98% rename from src/py123d/datatypes/sensors/camera/fisheye_mei_camera.py rename to src/py123d/datatypes/sensors/fisheye_mei_camera.py index afb27960..d8f53f14 100644 --- a/src/py123d/datatypes/sensors/camera/fisheye_mei_camera.py +++ b/src/py123d/datatypes/sensors/fisheye_mei_camera.py @@ -17,9 +17,8 @@ class FisheyeMEICameraType(SerialIntEnum): Enum for fisheye cameras in d123. """ - # NOTE Use higher values to avoid conflicts with PinholeCameraType - CAM_L = 10 - CAM_R = 11 + FCAM_L = 0 + FCAM_R = 1 @dataclass diff --git a/src/py123d/datatypes/sensors/lidar/lidar.py b/src/py123d/datatypes/sensors/lidar.py similarity index 100% rename from src/py123d/datatypes/sensors/lidar/lidar.py rename to src/py123d/datatypes/sensors/lidar.py diff --git a/src/py123d/datatypes/sensors/lidar/__init__.py b/src/py123d/datatypes/sensors/lidar/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/py123d/datatypes/sensors/lidar/lidar_index.py b/src/py123d/datatypes/sensors/lidar/lidar_index.py deleted file mode 100644 index 4a295cce..00000000 --- a/src/py123d/datatypes/sensors/lidar/lidar_index.py +++ /dev/null @@ -1,103 +0,0 @@ -from enum import IntEnum - -from py123d.common.utils.enums import classproperty - -LIDAR_INDEX_REGISTRY = {} - - -def register_lidar_index(enum_class): - LIDAR_INDEX_REGISTRY[enum_class.__name__] = enum_class - return enum_class - - -class LiDARIndex(IntEnum): - - @classproperty - def XY(self) -> slice: - """ - Returns a slice for the XY coordinates of the LiDAR point cloud. - """ - return slice(self.X, self.Y + 1) - - @classproperty - def XYZ(self) -> slice: - """ - Returns a slice for the XYZ coordinates of the LiDAR point cloud. - """ - return slice(self.X, self.Z + 1) - - -@register_lidar_index -class DefaultLidarIndex(LiDARIndex): - X = 0 - Y = 1 - Z = 2 - - -@register_lidar_index -class NuPlanLidarIndex(LiDARIndex): - X = 0 - Y = 1 - Z = 2 - INTENSITY = 3 - RING = 4 - ID = 5 - - -@register_lidar_index -class CARLALidarIndex(LiDARIndex): - X = 0 - Y = 1 - Z = 2 - INTENSITY = 3 - - -@register_lidar_index -class WOPDLidarIndex(LiDARIndex): - RANGE = 0 - INTENSITY = 1 - ELONGATION = 2 - X = 3 - Y = 4 - Z = 5 - - -@register_lidar_index -class Kitti360LidarIndex(LiDARIndex): - X = 0 - Y = 1 - Z = 2 - INTENSITY = 3 - - -@register_lidar_index -class AVSensorLidarIndex(LiDARIndex): - """Argoverse Sensor LiDAR Indexing Scheme. - - NOTE: The LiDAR files also include, 'offset_ns', which we do not currently include. - """ - - X = 0 - Y = 1 - Z = 2 - INTENSITY = 3 - - -@register_lidar_index -class PandasetLidarIndex(LiDARIndex): - """Pandaset LiDAR Indexing Scheme.""" - - X = 0 - Y = 1 - Z = 2 - INTENSITY = 3 - - -@register_lidar_index -class NuScenesLidarIndex(LiDARIndex): - - X = 0 - Y = 1 - Z = 2 - INTENSITY = 3 - RING = 4 diff --git a/src/py123d/datatypes/sensors/camera/pinhole_camera.py b/src/py123d/datatypes/sensors/pinhole_camera.py similarity index 96% rename from src/py123d/datatypes/sensors/camera/pinhole_camera.py rename to src/py123d/datatypes/sensors/pinhole_camera.py index 0bb99be6..beefa883 100644 --- a/src/py123d/datatypes/sensors/camera/pinhole_camera.py +++ b/src/py123d/datatypes/sensors/pinhole_camera.py @@ -13,20 +13,17 @@ class PinholeCameraType(SerialIntEnum): - """ - Enum for cameras in py123d. - """ - - CAM_F0 = 0 - CAM_B0 = 1 - CAM_L0 = 2 - CAM_L1 = 3 - CAM_L2 = 4 - CAM_R0 = 5 - CAM_R1 = 6 - CAM_R2 = 7 - CAM_STEREO_L = 8 - CAM_STEREO_R = 9 + + PCAM_F0 = 0 + PCAM_B0 = 1 + PCAM_L0 = 2 + PCAM_L1 = 3 + PCAM_L2 = 4 + PCAM_R0 = 5 + PCAM_R1 = 6 + PCAM_R2 = 7 + PCAM_STEREO_L = 8 + PCAM_STEREO_R = 9 @dataclass diff --git a/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml index 1a121fd9..ff8a2433 100644 --- a/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/av2_sensor_dataset.yaml @@ -21,18 +21,16 @@ av2_sensor_dataset: # Box Detections include_box_detections: true - # Traffic Lights - include_traffic_lights: false - - # Cameras - include_cameras: true - camera_store_option: "binary" # "path", "binary", "mp4" + # Pinhole Cameras + include_pinhole_cameras: true + pinhole_camera_store_option: "binary" # "path", "binary", "mp4" # LiDARs include_lidars: true lidar_store_option: "binary" # "path", "path_merged", "binary" - # Scenario tag / Route - # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + # Not available: + include_traffic_lights: false include_scenario_tags: false include_route: false + include_fisheye_mei_cameras: false diff --git a/src/py123d/script/config/conversion/datasets/carla_dataset.yaml b/src/py123d/script/config/conversion/datasets/carla_dataset.yaml deleted file mode 100644 index c28ccb76..00000000 --- a/src/py123d/script/config/conversion/datasets/carla_dataset.yaml +++ /dev/null @@ -1,35 +0,0 @@ -carla_dataset: - _target_: py123d.conversion.datasets.carla.carla_data_converter.CarlaDataConverter - _convert_: 'all' - - splits: ["carla"] - log_path: "${oc.env:HOME}/carla_workspace/data" - - dataset_converter_config: - _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig - _convert_: 'all' - - force_log_conversion: ${force_log_conversion} - force_map_conversion: ${force_map_conversion} - - # Ego - include_ego: true - - # Box Detections - include_box_detections: true - - # Traffic Lights - include_traffic_lights: true - - # Cameras - include_cameras: true - camera_store_option: "path" # "path", "binary", "mp4" - - # LiDARs - include_lidars: true - lidar_store_option: "path" # "path", "binary" - - # Scenario tag / Route - # NOTE: These are only supported for nuPlan. Consider removing or expanding support. - include_scenario_tags: true - include_route: true diff --git a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml index 5b06890e..4919ed79 100644 --- a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml @@ -2,9 +2,17 @@ kitti360_dataset: _target_: py123d.conversion.datasets.kitti360.kitti360_converter.Kitti360Converter _convert_: 'all' - splits: ["kitti360"] + splits: ["kitti360_train", "kitti360_val", "kitti360_test"] + kitti360_data_root: ${dataset_paths.kitti360_data_root} + # NOTE: We preprocess detections into cache directory to speed up repeated conversions + # The bounding boxes are preprocessed into a per-frame format based on the ego distance and + # visibility based on the lidar point cloud. + detection_cache_root: ${dataset_paths.kitti360_data_root}/preprocessed_detections + detection_radius: 60.0 + + # NOTE: dataset_converter_config: _target_: py123d.conversion.dataset_converter_config.DatasetConverterConfig _convert_: 'all' @@ -21,17 +29,39 @@ kitti360_dataset: # Box Detections include_box_detections: true - # Traffic Lights - include_traffic_lights: false + # Pinhole Cameras + include_pinhole_cameras: true + pinhole_camera_store_option: "path" - # Cameras - include_cameras: true - camera_store_option: "path" + # Fisheye Cameras + include_fisheye_mei_cameras: false + fisheye_mei_camera_store_option: "path" # LiDARs include_lidars: true lidar_store_option: "path" - # Scenario tag / Route - include_scenario_tags: false + # Not available: + include_traffic_lights: false include_route: false + include_scenario_tags: false + + # NOTE: Pandaset does not have official splits, so we create our own here. + # We use 80% of the logs for training, 10% for validation, and 10% for testing. + train_sequences: + - "2013_05_28_drive_0000_sync" + - "2013_05_28_drive_0002_sync" + - "2013_05_28_drive_0003_sync" + + + val_sequences: + - "2013_05_28_drive_0004_sync" + - "2013_05_28_drive_0005_sync" + - "2013_05_28_drive_0006_sync" + - "2013_05_28_drive_0007_sync" + + test_sequences: + - "2013_05_28_drive_0008_sync" + - "2013_05_28_drive_0009_sync" + - "2013_05_28_drive_0010_sync" + - "2013_05_28_drive_0018_sync" diff --git a/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml index 671b960c..19b0d0f2 100644 --- a/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml @@ -26,15 +26,17 @@ nuplan_dataset: # Traffic Lights include_traffic_lights: true - # Cameras - include_cameras: true - camera_store_option: "path" # "path", "binary", "mp4" + # Pinhole Cameras + include_pinhole_cameras: true + pinhole_camera_store_option: "path" # "path", "binary", "mp4" # LiDARs include_lidars: true lidar_store_option: "path_merged" # "path", "path_merged", "binary" # Scenario tag / Route - # NOTE: These are only supported for nuPlan. Consider removing or expanding support. include_scenario_tags: true include_route: true + + # Not available: + include_fisheye_mei_cameras: false diff --git a/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml index a59e67a7..50aea778 100644 --- a/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml @@ -26,15 +26,17 @@ nuplan_mini_dataset: # Traffic Lights include_traffic_lights: true - # Cameras - include_cameras: true - camera_store_option: "path" # "path", "binary", "mp4" + # Pinhole Cameras + include_pinhole_cameras: true + pinhole_camera_store_option: "path" # "path", "binary", "mp4" # LiDARs include_lidars: true - lidar_store_option: "binary" # "path", "path_merged", "binary" + lidar_store_option: "path_merged" # "path", "path_merged", "binary" # Scenario tag / Route - # NOTE: These are only supported for nuPlan. Consider removing or expanding support. include_scenario_tags: true include_route: true + + # Not available: + include_fisheye_mei_cameras: false diff --git a/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml index 0f3ab95e..7ad5834f 100644 --- a/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml @@ -24,13 +24,16 @@ nuscenes_dataset: # Box Detections include_box_detections: true - # Traffic Lights - include_traffic_lights: false - - # Cameras - include_cameras: true - camera_store_option: "path" + # Pinhole Cameras + include_pinhole_cameras: true + pinhole_camera_store_option: "path" - #lidar + # LiDARs include_lidars: true lidar_store_option: "path" + + # Not available: + include_fisheye_mei_cameras: false + include_traffic_lights: false + include_scenario_tags: false + include_route: false diff --git a/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml index 4c9ba050..e7181c47 100644 --- a/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml @@ -24,13 +24,16 @@ nuscenes_dataset: # Box Detections include_box_detections: true - # Traffic Lights - include_traffic_lights: false - - # Cameras - include_cameras: true - camera_store_option: "binary" + # Pinhole Cameras + include_pinhole_cameras: true + pinhole_camera_store_option: "path" - #lidar + # LiDARs include_lidars: true - lidar_store_option: "binary" + lidar_store_option: "path" + + # Not available: + include_fisheye_mei_cameras: false + include_traffic_lights: false + include_scenario_tags: false + include_route: false diff --git a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml index 51d8e18c..e3e0b1ec 100644 --- a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml @@ -12,31 +12,28 @@ pandaset_dataset: force_log_conversion: ${force_log_conversion} force_map_conversion: ${force_map_conversion} - # Map - include_map: false - # Ego include_ego: true # Box Detections include_box_detections: true - # Traffic Lights - include_traffic_lights: false - - # Cameras - include_cameras: true - camera_store_option: "binary" # "path", "binary", "mp4" + # Pinhole Cameras + include_pinhole_cameras: true + pinhole_camera_store_option: "path" # LiDARs include_lidars: true - lidar_store_option: "binary" # "path", "path_merged", "binary" + lidar_store_option: "path" - # Scenario tag / Route - # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + # Not available: + include_map: false + include_fisheye_mei_cameras: false + include_traffic_lights: false include_scenario_tags: false include_route: false + # NOTE: Pandaset does not have official splits, so we create our own here. # We use 80% of the logs for training, 10% for validation, and 10% for testing. train_log_names: diff --git a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml index 441c4966..ed8a16b7 100644 --- a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml @@ -28,15 +28,14 @@ wopd_dataset: # Traffic Lights include_traffic_lights: false - # Cameras - include_cameras: true - camera_store_option: "binary" # "path", "binary", "mp4" + # Pinhole Cameras + include_pinhole_cameras: true + pinhole_camera_store_option: "binary" # "path", "binary", "mp4" # LiDARs include_lidars: true lidar_store_option: "binary" # "path", "path_merged", "binary" - # Scenario tag / Route - # NOTE: These are only supported for nuPlan. Consider removing or expanding support. + # Not available: include_scenario_tags: false include_route: false diff --git a/src/py123d/visualization/matplotlib/camera.py b/src/py123d/visualization/matplotlib/camera.py index 39bf98a3..aadd0baf 100644 --- a/src/py123d/visualization/matplotlib/camera.py +++ b/src/py123d/visualization/matplotlib/camera.py @@ -12,7 +12,7 @@ from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.detections.box_detections import BoxDetectionSE3, BoxDetectionWrapper -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeIntrinsics +from py123d.datatypes.sensors.pinhole_camera import PinholeCamera, PinholeIntrinsics from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.geometry import BoundingBoxSE3Index, Corners3DIndex from py123d.geometry.transform.transform_se3 import convert_absolute_to_relative_se3_array diff --git a/src/py123d/visualization/viser/elements/sensor_elements.py b/src/py123d/visualization/viser/elements/sensor_elements.py index 410cccb9..2dd02c23 100644 --- a/src/py123d/visualization/viser/elements/sensor_elements.py +++ b/src/py123d/visualization/viser/elements/sensor_elements.py @@ -7,8 +7,8 @@ import viser from py123d.datatypes.scene.abstract_scene import AbstractScene -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar import LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.geometry import StateSE3Index from py123d.geometry.transform.transform_se3 import ( @@ -34,7 +34,7 @@ def add_camera_frustums_to_viser_server( ego_pose[StateSE3Index.XYZ] -= scene_center_array def _add_camera_frustums_to_viser_server(camera_type: PinholeCameraType) -> None: - camera = scene.get_camera_at_iteration(scene_interation, camera_type) + camera = scene.get_pinhole_camera_at_iteration(scene_interation, camera_type) if camera is not None: camera_position, camera_quaternion, camera_image = _get_camera_values( camera, @@ -83,7 +83,7 @@ def add_camera_gui_to_viser_server( ) -> None: if viser_config.camera_gui_visible: for camera_type in viser_config.camera_gui_types: - camera = scene.get_camera_at_iteration(scene_interation, camera_type) + camera = scene.get_pinhole_camera_at_iteration(scene_interation, camera_type) if camera is not None: if camera_type in camera_gui_handles: camera_gui_handles[camera_type].image = _rescale_image( diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index 77cefd2f..510151f2 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -1,21 +1,21 @@ from dataclasses import dataclass, field from typing import List, Literal, Optional, Tuple -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType -from py123d.datatypes.sensors.lidar.lidar import LiDARType +from py123d.datatypes.sensors.lidar import LiDARType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.visualization.color.color import ELLIS_5 all_camera_types: List[PinholeCameraType] = [ - PinholeCameraType.CAM_F0, - PinholeCameraType.CAM_B0, - PinholeCameraType.CAM_L0, - PinholeCameraType.CAM_L1, - PinholeCameraType.CAM_L2, - PinholeCameraType.CAM_R0, - PinholeCameraType.CAM_R1, - PinholeCameraType.CAM_R2, - PinholeCameraType.CAM_STEREO_L, - PinholeCameraType.CAM_STEREO_R, + PinholeCameraType.PCAM_F0, + PinholeCameraType.PCAM_B0, + PinholeCameraType.PCAM_L0, + PinholeCameraType.PCAM_L1, + PinholeCameraType.PCAM_L2, + PinholeCameraType.PCAM_R0, + PinholeCameraType.PCAM_R1, + PinholeCameraType.PCAM_R2, + PinholeCameraType.PCAM_STEREO_L, + PinholeCameraType.PCAM_STEREO_R, ] all_lidar_types: List[LiDARType] = [ @@ -66,7 +66,7 @@ class ViserConfig: # -> GUI camera_gui_visible: bool = True - camera_gui_types: List[PinholeCameraType] = field(default_factory=lambda: [PinholeCameraType.CAM_F0].copy()) + camera_gui_types: List[PinholeCameraType] = field(default_factory=lambda: [PinholeCameraType.PCAM_F0].copy()) camera_gui_image_scale: float = 0.25 # Resize factor for the camera image shown in the GUI (<1.0 for speed) # LiDAR diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index 89e6d108..e6333f81 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -10,7 +10,7 @@ from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.datatypes.scene.abstract_scene import AbstractScene -from py123d.datatypes.sensors.camera.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.visualization.viser.elements import ( add_box_detections_to_viser_server, diff --git a/test_viser.py b/test_viser.py index a2b83796..1f467be0 100644 --- a/test_viser.py +++ b/test_viser.py @@ -4,7 +4,7 @@ from py123d.visualization.viser.viser_viewer import ViserViewer if __name__ == "__main__": - splits = ["kitti360"] + splits = ["kitti360_train"] # splits = ["nuscenes-mini_val", "nuscenes-mini_train"] # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] @@ -27,7 +27,7 @@ history_s=0.0, timestamp_threshold_s=None, shuffle=True, - # camera_types=[PinholeCameraType.CAM_F0], + # pinhole_camera_types=[PinholeCameraType.CAM_F0], ) scene_builder = ArrowSceneBuilder() worker = Sequential() From c40ade58152199e1fb00172ebf9e7759860c422f Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 3 Nov 2025 20:04:53 +0100 Subject: [PATCH 135/145] Fixing a few issues not related to KITTI. Testing all dataset (working fine). --- pyproject.toml | 2 +- .../datasets/av2/av2_map_conversion.py | 8 +- .../datasets/av2/av2_sensor_converter.py | 1 + .../datasets/kitti360/kitti360_converter.py | 1 + .../datasets/nuplan/nuplan_converter.py | 16 ++- .../datasets/nuplan/nuplan_map_conversion.py | 4 +- .../datasets/nuscenes/nuscenes_converter.py | 24 +++- .../nuscenes/utils/nuscenes_constants.py | 3 - .../datasets/pandaset/pandaset_converter.py | 1 + .../conversion/map_writer/gpkg_map_writer.py | 38 +++--- .../conversion/map_writer/utils/gpkg_utils.py | 23 +++- .../scene/arrow/arrow_scene_builder.py | 122 +++++++++++------- .../scene/arrow/utils/arrow_getters.py | 1 - src/py123d/datatypes/scene/scene_filter.py | 17 +-- .../conversion/datasets/pandaset_dataset.yaml | 2 +- .../conversion/datasets/wopd_dataset.yaml | 4 +- test_viser.py | 7 +- 17 files changed, 171 insertions(+), 103 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 267ec19d..655a2612 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -105,7 +105,7 @@ nuscenes_expanded = [ "yourdfpy==0.0.58", ] waymo = [ - "protobuf==6.30.2", + "protobuf==4.21.0", "tensorflow==2.13.0", "waymo-open-dataset-tf-2-12-0==1.6.6", ] diff --git a/src/py123d/conversion/datasets/av2/av2_map_conversion.py b/src/py123d/conversion/datasets/av2/av2_map_conversion.py index 41851c58..a55a9cf4 100644 --- a/src/py123d/conversion/datasets/av2/av2_map_conversion.py +++ b/src/py123d/conversion/datasets/av2/av2_map_conversion.py @@ -113,6 +113,10 @@ def _get_centerline_from_boundaries( right_boundary=lane_dict["right_lane_boundary"], ) + # NOTE @DanielDauner: Some neighbor lane IDs might not be present in the dataset. + left_lane_id = lane_dict["left_neighbor_id"] if lane_dict["left_neighbor_id"] in lanes else None + right_lane_id = lane_dict["right_neighbor_id"] if lane_dict["right_neighbor_id"] in lanes else None + map_writer.write_lane( CacheLane( object_id=lane_id, @@ -120,8 +124,8 @@ def _get_centerline_from_boundaries( left_boundary=lane_dict["left_lane_boundary"], right_boundary=lane_dict["right_lane_boundary"], centerline=lane_centerline, - left_lane_id=lane_dict["left_neighbor_id"], - right_lane_id=lane_dict["right_neighbor_id"], + left_lane_id=left_lane_id, + right_lane_id=right_lane_id, predecessor_ids=lane_dict["predecessors"], successor_ids=lane_dict["successors"], speed_limit_mps=None, diff --git a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py index 9891e10c..aebebd19 100644 --- a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py @@ -52,6 +52,7 @@ def __init__( dataset_converter_config: DatasetConverterConfig, ) -> None: super().__init__(dataset_converter_config) + assert av2_data_root is not None, "The variable `av2_data_root` must be provided." for split in splits: assert ( split in AV2_SENSOR_SPLITS diff --git a/src/py123d/conversion/datasets/kitti360/kitti360_converter.py b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py index d525ab3a..8bb9b497 100644 --- a/src/py123d/conversion/datasets/kitti360/kitti360_converter.py +++ b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py @@ -127,6 +127,7 @@ def __init__( val_sequences: List[str], test_sequences: List[str], ) -> None: + assert kitti360_data_root is not None, "The variable `kitti360_data_root` must be provided." super().__init__(dataset_converter_config) for split in splits: assert split in KITTI360_SPLITS, f"Split {split} is not available. Available splits: {KITTI360_SPLITS}" diff --git a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py index 8f2620ef..8c77169c 100644 --- a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py +++ b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py @@ -84,7 +84,9 @@ def __init__( dataset_converter_config: DatasetConverterConfig, ) -> None: super().__init__(dataset_converter_config) - + assert nuplan_data_root is not None, "The variable `nuplan_data_root` must be provided." + assert nuplan_maps_root is not None, "The variable `nuplan_maps_root` must be provided." + assert nuplan_sensor_root is not None, "The variable `nuplan_sensor_root` must be provided." for split in splits: assert ( split in NUPLAN_DATA_SPLITS @@ -176,7 +178,9 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=nuplan_log_db.log.map_version, timestep_seconds=TARGET_DT, vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(), - pinhole_camera_metadata=_get_nuplan_camera_metadata(source_log_path, self.dataset_converter_config), + pinhole_camera_metadata=_get_nuplan_camera_metadata( + source_log_path, self._nuplan_sensor_root, self.dataset_converter_config + ), lidar_metadata=_get_nuplan_lidar_metadata( self._nuplan_sensor_root, log_name, self.dataset_converter_config ), @@ -235,6 +239,7 @@ def _get_nuplan_map_metadata(location: str) -> MapMetadata: def _get_nuplan_camera_metadata( source_log_path: Path, + nuplan_sensor_root: Path, dataset_converter_config: DatasetConverterConfig, ) -> Dict[PinholeCameraType, PinholeCameraMetadata]: @@ -257,8 +262,11 @@ def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadat camera_metadata: Dict[str, PinholeCameraMetadata] = {} if dataset_converter_config.include_pinhole_cameras: - for camera_type in NUPLAN_CAMERA_MAPPING.keys(): - camera_metadata[camera_type] = _get_camera_metadata(camera_type) + log_name = source_log_path.stem + for camera_type, nuplan_camera_type in NUPLAN_CAMERA_MAPPING.items(): + camera_folder = nuplan_sensor_root / log_name / f"{nuplan_camera_type.value}" + if camera_folder.exists() and camera_folder.is_dir(): + camera_metadata[camera_type] = _get_camera_metadata(camera_type) return camera_metadata diff --git a/src/py123d/conversion/datasets/nuplan/nuplan_map_conversion.py b/src/py123d/conversion/datasets/nuplan/nuplan_map_conversion.py index bff709be..b8b010cb 100644 --- a/src/py123d/conversion/datasets/nuplan/nuplan_map_conversion.py +++ b/src/py123d/conversion/datasets/nuplan/nuplan_map_conversion.py @@ -135,8 +135,8 @@ def _write_nuplan_lane_connectors(nuplan_gdf: Dict[str, gpd.GeoDataFrame], map_w # 1. predecessor_ids, successor_ids lane_connector_row = get_row_with_value(nuplan_gdf["lane_connectors"], "fid", str(lane_id)) - predecessor_ids = lane_connector_row["entry_lane_fid"] - successor_ids = lane_connector_row["exit_lane_fid"] + predecessor_ids = [lane_connector_row["entry_lane_fid"]] + successor_ids = [lane_connector_row["exit_lane_fid"]] # 2. left_boundaries, right_boundaries lane_connector_polygons_row = get_row_with_value( diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py index e7cbf2e2..6c22d6ce 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py @@ -12,7 +12,6 @@ from py123d.conversion.datasets.nuscenes.nuscenes_map_conversion import NUSCENES_MAPS, write_nuscenes_map from py123d.conversion.datasets.nuscenes.utils.nuscenes_constants import ( NUSCENES_CAMERA_TYPES, - NUSCENES_DATA_ROOT, NUSCENES_DATA_SPLITS, NUSCENES_DETECTION_NAME_DICT, NUSCENES_DT, @@ -56,11 +55,19 @@ def __init__( ) -> None: super().__init__(dataset_converter_config) + assert nuscenes_data_root is not None, "The variable `nuscenes_data_root` must be provided." + assert nuscenes_map_root is not None, "The variable `nuscenes_map_root` must be provided." for split in splits: assert ( split in NUSCENES_DATA_SPLITS ), f"Split {split} is not available. Available splits: {NUSCENES_DATA_SPLITS}" + if dataset_converter_config.include_lidars: + assert dataset_converter_config.lidar_store_option in ["path", "binary"], ( + f"Invalid lidar_store_option: {dataset_converter_config.lidar_store_option}. " + f"Supported options are 'path' and 'binary'." + ) + self._splits: List[str] = splits self._nuscenes_data_root: Path = Path(nuscenes_data_root) @@ -175,11 +182,13 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: pinhole_cameras=_extract_nuscenes_cameras( nusc=nusc, sample=sample, + nuscenes_data_root=self._nuscenes_data_root, dataset_converter_config=self.dataset_converter_config, ), lidars=_extract_nuscenes_lidars( nusc=nusc, sample=sample, + nuscenes_data_root=self._nuscenes_data_root, dataset_converter_config=self.dataset_converter_config, ), ) @@ -385,6 +394,7 @@ def _extract_nuscenes_box_detections(nusc: NuScenes, sample: Dict[str, Any]) -> def _extract_nuscenes_cameras( nusc: NuScenes, sample: Dict[str, Any], + nuscenes_data_root: Path, dataset_converter_config: DatasetConverterConfig, ) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} @@ -407,7 +417,7 @@ def _extract_nuscenes_cameras( extrinsic_matrix[:3, 3] = translation extrinsic = StateSE3.from_transformation_matrix(extrinsic_matrix) - cam_path = NUSCENES_DATA_ROOT / cam_data["filename"] + cam_path = nuscenes_data_root / cam_data["filename"] if cam_path.exists() and cam_path.is_file(): if dataset_converter_config.pinhole_camera_store_option == "path": @@ -426,6 +436,7 @@ def _extract_nuscenes_cameras( def _extract_nuscenes_lidars( nusc: NuScenes, sample: Dict[str, Any], + nuscenes_data_root: Path, dataset_converter_config: DatasetConverterConfig, ) -> List[LiDARData]: lidars: List[LiDARData] = [] @@ -433,15 +444,14 @@ def _extract_nuscenes_lidars( if dataset_converter_config.include_lidars: lidar_token = sample["data"]["LIDAR_TOP"] lidar_data = nusc.get("sample_data", lidar_token) - absolute_lidar_path = NUSCENES_DATA_ROOT / lidar_data["filename"] + absolute_lidar_path = nuscenes_data_root / lidar_data["filename"] if absolute_lidar_path.exists() and absolute_lidar_path.is_file(): lidar = LiDARData( - lidar_type=LiDARType.LIDAR_MERGED, - relative_path=absolute_lidar_path.relative_to(NUSCENES_DATA_ROOT), - dataset_root=NUSCENES_DATA_ROOT, + lidar_type=LiDARType.LIDAR_TOP, + relative_path=absolute_lidar_path.relative_to(nuscenes_data_root), + dataset_root=nuscenes_data_root, iteration=lidar_data.get("iteration"), ) lidars.append(lidar) - return lidars diff --git a/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py b/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py index 9ea29413..8878401d 100644 --- a/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py +++ b/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py @@ -1,5 +1,3 @@ -import os -from pathlib import Path from typing import Final, List from py123d.datatypes.detections.box_detection_types import BoxDetectionType @@ -57,4 +55,3 @@ PinholeCameraType.PCAM_R0: "CAM_FRONT_RIGHT", PinholeCameraType.PCAM_R1: "CAM_BACK_RIGHT", } -NUSCENES_DATA_ROOT = Path(os.environ["NUSCENES_DATA_ROOT"]) diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py index 9656da00..0f177af1 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py @@ -62,6 +62,7 @@ def __init__( super().__init__(dataset_converter_config) for split in splits: assert split in PANDASET_SPLITS, f"Split {split} is not available. Available splits: {PANDASET_SPLITS}" + assert pandaset_data_root is not None, "The variable `pandaset_data_root` must be provided." self._splits: List[str] = splits self._pandaset_data_root: Path = Path(pandaset_data_root) diff --git a/src/py123d/conversion/map_writer/gpkg_map_writer.py b/src/py123d/conversion/map_writer/gpkg_map_writer.py index d5acf041..289e1cc2 100644 --- a/src/py123d/conversion/map_writer/gpkg_map_writer.py +++ b/src/py123d/conversion/map_writer/gpkg_map_writer.py @@ -204,25 +204,25 @@ def _map_ids_to_integer(map_dfs: Dict[MapLayer, gpd.GeoDataFrame]) -> None: # 1. Remap lane ids in LANE layer if len(map_dfs[MapLayer.LANE]) > 0: - map_dfs[MapLayer.LANE]["id"] = map_dfs[MapLayer.LANE]["id"].map(lane_id_mapping.str_to_int) - map_dfs[MapLayer.LANE]["lane_group_id"] = map_dfs[MapLayer.LANE]["lane_group_id"].map( - lane_group_id_mapping.str_to_int + map_dfs[MapLayer.LANE]["id"] = map_dfs[MapLayer.LANE]["id"].apply(lambda x: lane_id_mapping.map(x)) + map_dfs[MapLayer.LANE]["lane_group_id"] = map_dfs[MapLayer.LANE]["lane_group_id"].apply( + lambda x: lane_group_id_mapping.map(x) ) for column in ["predecessor_ids", "successor_ids"]: map_dfs[MapLayer.LANE][column] = map_dfs[MapLayer.LANE][column].apply(lambda x: lane_id_mapping.map_list(x)) for column in ["left_lane_id", "right_lane_id"]: - map_dfs[MapLayer.LANE][column] = map_dfs[MapLayer.LANE][column].apply( - lambda x: str(lane_id_mapping.str_to_int[x]) if pd.notna(x) and x is not None else x - ) + map_dfs[MapLayer.LANE][column] = map_dfs[MapLayer.LANE][column].apply(lambda x: lane_id_mapping.map(x)) # 2. Remap lane group ids in LANE_GROUP if len(map_dfs[MapLayer.LANE_GROUP]) > 0: - map_dfs[MapLayer.LANE_GROUP]["id"] = map_dfs[MapLayer.LANE_GROUP]["id"].map(lane_group_id_mapping.str_to_int) + map_dfs[MapLayer.LANE_GROUP]["id"] = map_dfs[MapLayer.LANE_GROUP]["id"].apply( + lambda x: lane_group_id_mapping.map(x) + ) map_dfs[MapLayer.LANE_GROUP]["lane_ids"] = map_dfs[MapLayer.LANE_GROUP]["lane_ids"].apply( lambda x: lane_id_mapping.map_list(x) ) - map_dfs[MapLayer.LANE_GROUP]["intersection_id"] = map_dfs[MapLayer.LANE_GROUP]["intersection_id"].map( - intersection_id_mapping.str_to_int + map_dfs[MapLayer.LANE_GROUP]["intersection_id"] = map_dfs[MapLayer.LANE_GROUP]["intersection_id"].apply( + lambda x: intersection_id_mapping.map(x) ) for column in ["predecessor_ids", "successor_ids"]: map_dfs[MapLayer.LANE_GROUP][column] = map_dfs[MapLayer.LANE_GROUP][column].apply( @@ -231,8 +231,8 @@ def _map_ids_to_integer(map_dfs: Dict[MapLayer, gpd.GeoDataFrame]) -> None: # 3. Remap lane group ids in INTERSECTION if len(map_dfs[MapLayer.INTERSECTION]) > 0: - map_dfs[MapLayer.INTERSECTION]["id"] = map_dfs[MapLayer.INTERSECTION]["id"].map( - intersection_id_mapping.str_to_int + map_dfs[MapLayer.INTERSECTION]["id"] = map_dfs[MapLayer.INTERSECTION]["id"].apply( + lambda x: intersection_id_mapping.map(x) ) map_dfs[MapLayer.INTERSECTION]["lane_group_ids"] = map_dfs[MapLayer.INTERSECTION]["lane_group_ids"].apply( lambda x: lane_group_id_mapping.map_list(x) @@ -240,14 +240,18 @@ def _map_ids_to_integer(map_dfs: Dict[MapLayer, gpd.GeoDataFrame]) -> None: # 4. Remap ids in other layers if len(map_dfs[MapLayer.WALKWAY]) > 0: - map_dfs[MapLayer.WALKWAY]["id"] = map_dfs[MapLayer.WALKWAY]["id"].map(walkway_id_mapping.str_to_int) + map_dfs[MapLayer.WALKWAY]["id"] = map_dfs[MapLayer.WALKWAY]["id"].apply(lambda x: walkway_id_mapping.map(x)) if len(map_dfs[MapLayer.CARPARK]) > 0: - map_dfs[MapLayer.CARPARK]["id"] = map_dfs[MapLayer.CARPARK]["id"].map(carpark_id_mapping.str_to_int) + map_dfs[MapLayer.CARPARK]["id"] = map_dfs[MapLayer.CARPARK]["id"].apply(lambda x: carpark_id_mapping.map(x)) if len(map_dfs[MapLayer.GENERIC_DRIVABLE]) > 0: - map_dfs[MapLayer.GENERIC_DRIVABLE]["id"] = map_dfs[MapLayer.GENERIC_DRIVABLE]["id"].map( - generic_drivable_id_mapping.str_to_int + map_dfs[MapLayer.GENERIC_DRIVABLE]["id"] = map_dfs[MapLayer.GENERIC_DRIVABLE]["id"].apply( + lambda x: generic_drivable_id_mapping.map(x) ) if len(map_dfs[MapLayer.ROAD_LINE]) > 0: - map_dfs[MapLayer.ROAD_LINE]["id"] = map_dfs[MapLayer.ROAD_LINE]["id"].map(road_line_id_mapping.str_to_int) + map_dfs[MapLayer.ROAD_LINE]["id"] = map_dfs[MapLayer.ROAD_LINE]["id"].apply( + lambda x: road_line_id_mapping.map(x) + ) if len(map_dfs[MapLayer.ROAD_EDGE]) > 0: - map_dfs[MapLayer.ROAD_EDGE]["id"] = map_dfs[MapLayer.ROAD_EDGE]["id"].map(road_edge_id_mapping.str_to_int) + map_dfs[MapLayer.ROAD_EDGE]["id"] = map_dfs[MapLayer.ROAD_EDGE]["id"].apply( + lambda x: road_edge_id_mapping.map(x) + ) diff --git a/src/py123d/conversion/map_writer/utils/gpkg_utils.py b/src/py123d/conversion/map_writer/utils/gpkg_utils.py index 6ad4e559..2b9ab334 100644 --- a/src/py123d/conversion/map_writer/utils/gpkg_utils.py +++ b/src/py123d/conversion/map_writer/utils/gpkg_utils.py @@ -1,7 +1,7 @@ from __future__ import annotations from dataclasses import dataclass -from typing import Dict, List, Optional +from typing import Any, Dict, List, Optional import pandas as pd @@ -16,14 +16,29 @@ def __post_init__(self): @classmethod def from_series(cls, series: pd.Series) -> IntIDMapping: - unique_ids = series.unique() + # Drop NaN values and convert all to strings + unique_ids = series.dropna().astype(str).unique() str_to_int = {str_id: idx for idx, str_id in enumerate(unique_ids)} return IntIDMapping(str_to_int) - def map_list(self, id_list: Optional[List[str]]) -> pd.Series: + def map(self, str_like: Any) -> Optional[int]: + # Handle NaN and None values + if pd.isna(str_like) or str_like is None: + return None + + # Convert to string for uniform handling + str_key = str(str_like) + return self.str_to_int.get(str_key, None) + + def map_list(self, id_list: Optional[List[str]]) -> List[int]: if id_list is None: return [] - return [self.str_to_int.get(id_str, -1) for id_str in id_list] + list_ = [] + for id_str in id_list: + mapped_id = self.map(id_str) + if mapped_id is not None: + list_.append(mapped_id) + return list_ class IncrementalIntIDMapping: diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py b/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py index f5f9d067..2afebbba 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene_builder.py @@ -102,22 +102,23 @@ def _get_scene_extraction_metadatas(log_path: Union[str, Path], filter: SceneFil recording_table = get_lru_cached_arrow_table(log_path) log_metadata = get_log_metadata_from_arrow(log_path) + start_idx = int(filter.history_s / log_metadata.timestep_seconds) if filter.history_s is not None else 0 + end_idx = ( + len(recording_table) - int(filter.duration_s / log_metadata.timestep_seconds) + if filter.duration_s is not None + else len(recording_table) + ) + # 1. Filter location if ( filter.locations is not None and log_metadata.map_metadata is not None and log_metadata.map_metadata.location not in filter.locations ): - return scene_extraction_metadatas + pass - start_idx = int(filter.history_s / log_metadata.timestep_seconds) if filter.history_s is not None else 0 - end_idx = ( - len(recording_table) - int(filter.duration_s / log_metadata.timestep_seconds) - if filter.duration_s is not None - else len(recording_table) - ) - if filter.duration_s is None: - return [ + elif filter.duration_s is None: + scene_extraction_metadatas.append( SceneExtractionMetadata( initial_uuid=str(recording_table["uuid"][start_idx].as_py()), initial_idx=start_idx, @@ -125,48 +126,75 @@ def _get_scene_extraction_metadatas(log_path: Union[str, Path], filter: SceneFil history_s=filter.history_s if filter.history_s is not None else 0.0, iteration_duration_s=log_metadata.timestep_seconds, ) - ] - - scene_uuid_set = set(filter.scene_uuids) if filter.scene_uuids is not None else None - - for idx in range(start_idx, end_idx): - scene_extraction_metadata: Optional[SceneExtractionMetadata] = None - - if scene_uuid_set is None: - scene_extraction_metadata = SceneExtractionMetadata( - initial_uuid=str(recording_table["uuid"][idx].as_py()), - initial_idx=idx, - duration_s=filter.duration_s, - history_s=filter.history_s, - iteration_duration_s=log_metadata.timestep_seconds, - ) - elif str(recording_table["uuid"][idx]) in scene_uuid_set: - scene_extraction_metadata = SceneExtractionMetadata( - initial_uuid=str(recording_table["uuid"][idx].as_py()), - initial_idx=idx, - duration_s=filter.duration_s, - history_s=filter.history_s, - iteration_duration_s=log_metadata.timestep_seconds, - ) + ) + else: + scene_uuid_set = set(filter.scene_uuids) if filter.scene_uuids is not None else None + for idx in range(start_idx, end_idx): + scene_extraction_metadata: Optional[SceneExtractionMetadata] = None + + if scene_uuid_set is None: + scene_extraction_metadata = SceneExtractionMetadata( + initial_uuid=str(recording_table["uuid"][idx].as_py()), + initial_idx=idx, + duration_s=filter.duration_s, + history_s=filter.history_s, + iteration_duration_s=log_metadata.timestep_seconds, + ) + elif str(recording_table["uuid"][idx]) in scene_uuid_set: + scene_extraction_metadata = SceneExtractionMetadata( + initial_uuid=str(recording_table["uuid"][idx].as_py()), + initial_idx=idx, + duration_s=filter.duration_s, + history_s=filter.history_s, + iteration_duration_s=log_metadata.timestep_seconds, + ) - if scene_extraction_metadata is not None: - # Check of timestamp threshold exceeded between previous scene, if specified in filter - if filter.timestamp_threshold_s is not None and len(scene_extraction_metadatas) > 0: - iteration_delta = idx - scene_extraction_metadatas[-1].initial_idx - if (iteration_delta * log_metadata.timestep_seconds) < filter.timestamp_threshold_s: + if scene_extraction_metadata is not None: + # Check of timestamp threshold exceeded between previous scene, if specified in filter + if filter.timestamp_threshold_s is not None and len(scene_extraction_metadatas) > 0: + iteration_delta = idx - scene_extraction_metadatas[-1].initial_idx + if (iteration_delta * log_metadata.timestep_seconds) < filter.timestamp_threshold_s: + continue + + scene_extraction_metadatas.append(scene_extraction_metadata) + + scene_extraction_metadatas_ = [] + for scene_extraction_metadata in scene_extraction_metadatas: + + add_scene = True + start_idx = scene_extraction_metadata.initial_idx + if filter.pinhole_camera_types is not None: + for pinhole_camera_type in filter.pinhole_camera_types: + column_name = f"{pinhole_camera_type.serialize()}_data" + + if ( + pinhole_camera_type in log_metadata.pinhole_camera_metadata + and column_name in recording_table.schema.names + and recording_table[column_name][start_idx].as_py() is not None + ): continue - - # Check if camera data is available for the scene, if specified in filter - # NOTE: We only check camera availability at the initial index of the scene. - if filter.pinhole_camera_types is not None: - cameras_available = [ - recording_table[f"{camera_type.serialize()}_data"][start_idx].as_py() is not None - for camera_type in filter.pinhole_camera_types - ] - if not all(cameras_available): + else: + add_scene = False + break + + if filter.fisheye_mei_camera_types is not None: + for fisheye_mei_camera_type in filter.fisheye_mei_camera_types: + column_name = f"{fisheye_mei_camera_type.serialize()}_data" + + if ( + fisheye_mei_camera_type in log_metadata.fisheye_mei_camera_metadata + and column_name in recording_table.schema.names + and recording_table[column_name][start_idx].as_py() is not None + ): continue + else: + add_scene = False + break + + if add_scene: + scene_extraction_metadatas_.append(scene_extraction_metadata) - scene_extraction_metadatas.append(scene_extraction_metadata) + scene_extraction_metadatas = scene_extraction_metadatas_ del recording_table, log_metadata return scene_extraction_metadatas diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 946ffe1f..8aea7801 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -169,7 +169,6 @@ def get_lidar_from_arrow_table( if lidar_column_name in arrow_table.schema.names: lidar_data = arrow_table[lidar_column_name][index].as_py() - if isinstance(lidar_data, str): lidar_pc_dict = load_lidar_pcs_from_file(relative_path=lidar_data, log_metadata=log_metadata, index=index) if lidar_type == LiDARType.LIDAR_MERGED: diff --git a/src/py123d/datatypes/scene/scene_filter.py b/src/py123d/datatypes/scene/scene_filter.py index 5aa4ae42..62ad9301 100644 --- a/src/py123d/datatypes/scene/scene_filter.py +++ b/src/py123d/datatypes/scene/scene_filter.py @@ -31,13 +31,14 @@ class SceneFilter: shuffle: bool = False def __post_init__(self): - if self.pinhole_camera_types is not None: - assert isinstance(self.pinhole_camera_types, list), "camera_types must be a list of CameraType" + def _resolve_enum_arguments( + serial_enum_cls: SerialIntEnum, input: Optional[List[Union[int, str, SerialIntEnum]]] + ) -> List[SerialIntEnum]: - def _resolve_enum_arguments( - serial_enum_cls: SerialIntEnum, input: List[Union[int, str, SerialIntEnum]] - ) -> List[SerialIntEnum]: - return [serial_enum_cls.from_arbitrary(value) for value in input] + if input is None: + return None + assert isinstance(input, list), f"input must be a list of {serial_enum_cls.__name__}" + return [serial_enum_cls.from_arbitrary(value) for value in input] - self.pinhole_camera_types = _resolve_enum_arguments(PinholeCameraType, self.pinhole_camera_types) - self.fisheye_mei_camera_types = _resolve_enum_arguments(FisheyeMEICameraType, self.fisheye_mei_camera_types) + self.pinhole_camera_types = _resolve_enum_arguments(PinholeCameraType, self.pinhole_camera_types) + self.fisheye_mei_camera_types = _resolve_enum_arguments(FisheyeMEICameraType, self.fisheye_mei_camera_types) diff --git a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml index e3e0b1ec..d70a2aab 100644 --- a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml @@ -24,7 +24,7 @@ pandaset_dataset: # LiDARs include_lidars: true - lidar_store_option: "path" + lidar_store_option: "binary" # Not available: include_map: false diff --git a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml index ed8a16b7..3fb5acee 100644 --- a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml @@ -25,9 +25,6 @@ wopd_dataset: # Box Detections include_box_detections: true - # Traffic Lights - include_traffic_lights: false - # Pinhole Cameras include_pinhole_cameras: true pinhole_camera_store_option: "binary" # "path", "binary", "mp4" @@ -37,5 +34,6 @@ wopd_dataset: lidar_store_option: "binary" # "path", "path_merged", "binary" # Not available: + include_traffic_lights: false include_scenario_tags: false include_route: false diff --git a/test_viser.py b/test_viser.py index 1f467be0..3a1c887e 100644 --- a/test_viser.py +++ b/test_viser.py @@ -1,15 +1,16 @@ from py123d.common.multithreading.worker_sequential import Sequential from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder from py123d.datatypes.scene.scene_filter import SceneFilter +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.visualization.viser.viser_viewer import ViserViewer if __name__ == "__main__": - splits = ["kitti360_train"] + # splits = ["kitti360_train"] # splits = ["nuscenes-mini_val", "nuscenes-mini_train"] # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla_test"] - # splits = ["wopd_val"] + splits = ["wopd_val"] # splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] # log_names = ["2021.08.24.13.12.55_veh-45_00386_00472"] @@ -27,7 +28,7 @@ history_s=0.0, timestamp_threshold_s=None, shuffle=True, - # pinhole_camera_types=[PinholeCameraType.CAM_F0], + pinhole_camera_types=[PinholeCameraType.PCAM_F0], ) scene_builder = ArrowSceneBuilder() worker = Sequential() From 7879690d1dda34fce99937a2772202795cee2a07 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Mon, 3 Nov 2025 22:51:53 +0100 Subject: [PATCH 136/145] Dynamic configuration in GUI of viser server. --- .../datasets/nuplan/utils/nuplan_constants.py | 16 +-- .../datasets/nuscenes/nuscenes_converter.py | 6 +- .../detections/box_detection_types.py | 8 +- .../viser/elements/detection_elements.py | 39 +++--- .../viser/elements/map_elements.py | 3 +- .../viser/elements/sensor_elements.py | 12 +- .../visualization/viser/viser_config.py | 7 ++ .../visualization/viser/viser_viewer.py | 112 +++++++++++++++--- test_viser.py | 4 +- 9 files changed, 150 insertions(+), 57 deletions(-) diff --git a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py index e2dab9a6..26697ec0 100644 --- a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py +++ b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py @@ -1,6 +1,6 @@ from typing import Dict, Final, List, Set -from py123d.datatypes.detections.box_detection_types import AbstractBoxDetectionType +from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.detections.traffic_light_detections import TrafficLightStatus from py123d.datatypes.maps.map_datatypes import RoadLineType from py123d.datatypes.sensors.lidar import LiDARType @@ -16,13 +16,13 @@ NUPLAN_DETECTION_NAME_DICT = { - "vehicle": AbstractBoxDetectionType.VEHICLE, - "bicycle": AbstractBoxDetectionType.BICYCLE, - "pedestrian": AbstractBoxDetectionType.PEDESTRIAN, - "traffic_cone": AbstractBoxDetectionType.TRAFFIC_CONE, - "barrier": AbstractBoxDetectionType.BARRIER, - "czone_sign": AbstractBoxDetectionType.CZONE_SIGN, - "generic_object": AbstractBoxDetectionType.GENERIC_OBJECT, + "vehicle": BoxDetectionType.VEHICLE, + "bicycle": BoxDetectionType.BICYCLE, + "pedestrian": BoxDetectionType.PEDESTRIAN, + "traffic_cone": BoxDetectionType.TRAFFIC_CONE, + "barrier": BoxDetectionType.BARRIER, + "czone_sign": BoxDetectionType.CZONE_SIGN, + "generic_object": BoxDetectionType.GENERIC_OBJECT, } # https://github.com/motional/nuplan-devkit/blob/e9241677997dd86bfc0bcd44817ab04fe631405b/nuplan/database/nuplan_db_orm/utils.py#L1129-L1135 diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py index 6c22d6ce..cc055e1d 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py @@ -8,7 +8,6 @@ from py123d.common.utils.dependencies import check_dependencies from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter from py123d.conversion.dataset_converter_config import DatasetConverterConfig -from py123d.conversion.datasets.nuplan.nuplan_converter import TARGET_DT from py123d.conversion.datasets.nuscenes.nuscenes_map_conversion import NUSCENES_MAPS, write_nuscenes_map from py123d.conversion.datasets.nuscenes.utils.nuscenes_constants import ( NUSCENES_CAMERA_TYPES, @@ -77,7 +76,6 @@ def __init__( self._use_lanelet2 = use_lanelet2 self._version = version self._scene_tokens_per_split: Dict[str, List[str]] = self._collect_scene_tokens() - self._target_dt: float = TARGET_DT def _collect_scene_tokens(self) -> Dict[str, List[str]]: @@ -153,7 +151,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: split=split, log_name=scene["name"], location=log_record["location"], - timestep_seconds=TARGET_DT, + timestep_seconds=NUSCENES_DT, vehicle_parameters=get_nuscenes_renault_zoe_parameters(), pinhole_camera_metadata=_get_nuscenes_pinhole_camera_metadata(nusc, scene, self.dataset_converter_config), lidar_metadata=_get_nuscenes_lidar_metadata(nusc, scene, self.dataset_converter_config), @@ -166,7 +164,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: if log_needs_writing: can_bus = NuScenesCanBus(dataroot=str(self._nuscenes_data_root)) - step_interval = max(1, int(TARGET_DT / NUSCENES_DT)) + step_interval = max(1, int(NUSCENES_DT / NUSCENES_DT)) sample_count = 0 # Traverse all samples in the scene diff --git a/src/py123d/datatypes/detections/box_detection_types.py b/src/py123d/datatypes/detections/box_detection_types.py index 0ffd47ce..05f5aadc 100644 --- a/src/py123d/datatypes/detections/box_detection_types.py +++ b/src/py123d/datatypes/detections/box_detection_types.py @@ -1,6 +1,6 @@ from __future__ import annotations -from importlib import abc +import abc from py123d.common.utils.enums import SerialIntEnum @@ -17,12 +17,6 @@ class BoxDetectionType(AbstractBoxDetectionType): Enum for agents in py123d. """ - # TODO: - # - Add detection types compatible with other datasets - # - Add finer detection types (e.g. bicycle, motorcycle) and add generic types (e.g. two-wheeled vehicle) for general use. - - # NOTE: Current types strongly aligned with nuPlan. - VEHICLE = 0 # Includes all four or more wheeled vehicles, as well as trailers. BICYCLE = 1 # Includes bicycles, motorcycles and tricycles. PEDESTRIAN = 2 # Pedestrians, incl. strollers and wheelchairs. diff --git a/src/py123d/visualization/viser/elements/detection_elements.py b/src/py123d/visualization/viser/elements/detection_elements.py index bdd76e00..41b7001d 100644 --- a/src/py123d/visualization/viser/elements/detection_elements.py +++ b/src/py123d/visualization/viser/elements/detection_elements.py @@ -1,3 +1,5 @@ +from typing import Optional, Union + import numpy as np import numpy.typing as npt import trimesh @@ -22,39 +24,48 @@ def add_box_detections_to_viser_server( initial_ego_state: EgoStateSE3, viser_server: viser.ViserServer, viser_config: ViserConfig, + box_detection_handles: Optional[Union[viser.GlbHandle, viser.LineSegmentsHandle]], ) -> None: + visible_handle_keys = [] if viser_config.bounding_box_visible: if viser_config.bounding_box_type == "mesh": mesh = _get_bounding_box_meshes(scene, scene_interation, initial_ego_state) - viser_server.scene.add_mesh_trimesh( + box_detection_handles["mesh"] = viser_server.scene.add_mesh_trimesh( "box_detections", mesh=mesh, visible=True, ) + visible_handle_keys.append("mesh") elif viser_config.bounding_box_type == "lines": lines, colors, se3_array = _get_bounding_box_outlines(scene, scene_interation, initial_ego_state) - viser_server.scene.add_line_segments( + box_detection_handles["lines"] = viser_server.scene.add_line_segments( "box_detections", points=lines, colors=colors, line_width=viser_config.bounding_box_line_width, + visible=True, ) - viser_server.scene.add_batched_axes( - "frames", - batched_wxyzs=se3_array[:-1, StateSE3Index.QUATERNION], - batched_positions=se3_array[:-1, StateSE3Index.XYZ], - ) - ego_rear_axle_se3 = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array - ego_rear_axle_se3[StateSE3Index.XYZ] -= initial_ego_state.center_se3.array[StateSE3Index.XYZ] - viser_server.scene.add_frame( - "ego_rear_axle", - position=ego_rear_axle_se3[StateSE3Index.XYZ], - wxyz=ego_rear_axle_se3[StateSE3Index.QUATERNION], - ) + # viser_server.scene.add_batched_axes( + # "frames", + # batched_wxyzs=se3_array[:-1, StateSE3Index.QUATERNION], + # batched_positions=se3_array[:-1, StateSE3Index.XYZ], + # ) + # ego_rear_axle_se3 = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array + # ego_rear_axle_se3[StateSE3Index.XYZ] -= initial_ego_state.center_se3.array[StateSE3Index.XYZ] + # viser_server.scene.add_frame( + # "ego_rear_axle", + # position=ego_rear_axle_se3[StateSE3Index.XYZ], + # wxyz=ego_rear_axle_se3[StateSE3Index.QUATERNION], + # ) + visible_handle_keys.append("lines") else: raise ValueError(f"Unknown bounding box type: {viser_config.bounding_box_type}") + for key in box_detection_handles: + if key not in visible_handle_keys and box_detection_handles[key] is not None: + box_detection_handles[key].visible = False + def _get_bounding_box_meshes(scene: AbstractScene, iteration: int, initial_ego_state: EgoStateSE3) -> trimesh.Trimesh: diff --git a/src/py123d/visualization/viser/elements/map_elements.py b/src/py123d/visualization/viser/elements/map_elements.py index 57eefe58..6e16726e 100644 --- a/src/py123d/visualization/viser/elements/map_elements.py +++ b/src/py123d/visualization/viser/elements/map_elements.py @@ -29,7 +29,7 @@ def add_map_to_viser_server( map_trimesh_dict: Optional[Dict[MapLayer, trimesh.Trimesh]] = None - if len(map_handles) == 0: + if len(map_handles) == 0 or viser_config._force_map_update: current_ego_state = initial_ego_state map_trimesh_dict = _get_map_trimesh_dict( scene, @@ -38,6 +38,7 @@ def add_map_to_viser_server( viser_config, ) last_query_position = current_ego_state.center_se3.point_3d + viser_config._force_map_update = False elif viser_config.map_requery: current_ego_state = scene.get_ego_state_at_iteration(iteration) diff --git a/src/py123d/visualization/viser/elements/sensor_elements.py b/src/py123d/visualization/viser/elements/sensor_elements.py index 2dd02c23..d6a1b5d9 100644 --- a/src/py123d/visualization/viser/elements/sensor_elements.py +++ b/src/py123d/visualization/viser/elements/sensor_elements.py @@ -103,7 +103,7 @@ def add_lidar_pc_to_viser_server( initial_ego_state: EgoStateSE3, viser_server: viser.ViserServer, viser_config: ViserConfig, - lidar_pc_handle: Optional[viser.PointCloudHandle], + lidar_pc_handles: Dict[LiDARType, Optional[viser.PointCloudHandle]], ) -> None: if viser_config.lidar_visible: @@ -151,17 +151,19 @@ def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]: # wxyz=lidar_extrinsic[StateSE3Index.QUATERNION], # ) - if lidar_pc_handle is not None: - lidar_pc_handle.points = points - lidar_pc_handle.colors = colors + if lidar_pc_handles[LiDARType.LIDAR_MERGED] is not None: + lidar_pc_handles[LiDARType.LIDAR_MERGED].points = points + lidar_pc_handles[LiDARType.LIDAR_MERGED].colors = colors else: - lidar_pc_handle = viser_server.scene.add_point_cloud( + lidar_pc_handles[LiDARType.LIDAR_MERGED] = viser_server.scene.add_point_cloud( "lidar_points", points=points, colors=colors, point_size=viser_config.lidar_point_size, point_shape=viser_config.lidar_point_shape, ) + elif lidar_pc_handles[LiDARType.LIDAR_MERGED] is not None: + lidar_pc_handles[LiDARType.LIDAR_MERGED].visible = False def _get_camera_values( diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index 510151f2..e8182729 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -46,6 +46,10 @@ class ViserConfig: theme_show_share_button: bool = True theme_brand_color: Optional[Tuple[int, int, int]] = ELLIS_5[0].rgb + # Play Controls + is_playing: bool = False + playback_speed: float = 1.0 # Multiplier for real-time speed + # Map map_visible: bool = True map_radius: float = 100.0 # [m] @@ -74,3 +78,6 @@ class ViserConfig: lidar_types: List[LiDARType] = field(default_factory=lambda: all_lidar_types.copy()) lidar_point_size: float = 0.05 lidar_point_shape: Literal["square", "diamond", "circle", "rounded", "sparkle"] = "circle" + + # internal use + _force_map_update: bool = False diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index e6333f81..0236e313 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -1,7 +1,7 @@ import io import logging import time -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Union import imageio.v3 as iio import viser @@ -10,6 +10,7 @@ from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.sensors.lidar import LiDARType from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.visualization.viser.elements import ( @@ -114,10 +115,33 @@ def set_scene(self, scene: AbstractScene) -> None: gui_next_frame = self._viser_server.gui.add_button("Next Frame", disabled=True) gui_prev_frame = self._viser_server.gui.add_button("Prev Frame", disabled=True) gui_next_scene = self._viser_server.gui.add_button("Next Scene", disabled=False) - gui_playing = self._viser_server.gui.add_checkbox("Playing", True) - gui_framerate = self._viser_server.gui.add_slider("FPS", min=1, max=100, step=1, initial_value=10) - gui_framerate_options = self._viser_server.gui.add_button_group( - "FPS options", ("10", "25", "50", "75", "100") + gui_playing = self._viser_server.gui.add_checkbox("Playing", self._viser_config.is_playing) + gui_speed = self._viser_server.gui.add_slider( + "Playback speed", min=0.1, max=10.0, step=0.1, initial_value=self._viser_config.playback_speed + ) + gui_speed_options = self._viser_server.gui.add_button_group( + "Options.", ("0.5", "1.0", "2.0", "5.0", "10.0") + ) + + with self._viser_server.gui.add_folder("Modalities", expand_by_default=True): + modalities_map_visible = self._viser_server.gui.add_checkbox("Map", self._viser_config.map_visible) + modalities_bounding_box_visible = self._viser_server.gui.add_checkbox( + "Bounding Boxes", self._viser_config.bounding_box_visible + ) + modalities_camera_frustum_visible = self._viser_server.gui.add_checkbox( + "Camera Frustums", self._viser_config.camera_frustum_visible + ) + modalities_lidar_visible = self._viser_server.gui.add_checkbox("Lidar", self._viser_config.lidar_visible) + + with self._viser_server.gui.add_folder("Options", expand_by_default=False): + option_bounding_box_type = self._viser_server.gui.add_dropdown( + "Bounding Box Type", ("mesh", "lines"), initial_value=self._viser_config.bounding_box_type + ) + options_map_radius_slider = self._viser_server.gui.add_slider( + "Map Radius", min=10.0, max=1000.0, step=1.0, initial_value=self._viser_config.map_radius + ) + options_map_radius_options = self._viser_server.gui.add_button_group( + "Map Radius Options.", ("25", "50", "100", "500") ) with self._viser_server.gui.add_folder("Render", expand_by_default=False): @@ -125,7 +149,49 @@ def set_scene(self, scene: AbstractScene) -> None: render_view = self._viser_server.gui.add_dropdown( "View", ["3rd Person", "BEV", "Manual"], initial_value="3rd Person" ) - button = self._viser_server.gui.add_button("Render Scene") + render_button = self._viser_server.gui.add_button("Render Scene") + + # Options: + @modalities_map_visible.on_update + def _(_) -> None: + for map_handle in map_handles.values(): + map_handle.visible = modalities_map_visible.value + self._viser_config.map_visible = modalities_map_visible.value + + @modalities_bounding_box_visible.on_update + def _(_) -> None: + if box_detection_handles["lines"] is not None: + box_detection_handles["lines"].visible = modalities_bounding_box_visible.value + if box_detection_handles["mesh"] is not None: + box_detection_handles["mesh"].visible = modalities_bounding_box_visible.value + self._viser_config.bounding_box_visible = modalities_bounding_box_visible.value + + @modalities_camera_frustum_visible.on_update + def _(_) -> None: + for frustum_handle in camera_frustum_handles.values(): + frustum_handle.visible = modalities_camera_frustum_visible.value + self._viser_config.camera_frustum_visible = modalities_camera_frustum_visible.value + + @modalities_lidar_visible.on_update + def _(_) -> None: + for lidar_pc_handle in lidar_pc_handles.values(): + if lidar_pc_handle is not None: + lidar_pc_handle.visible = modalities_lidar_visible.value + self._viser_config.lidar_visible = modalities_lidar_visible.value + + @option_bounding_box_type.on_update + def _(_) -> None: + self._viser_config.bounding_box_type = option_bounding_box_type.value + + @options_map_radius_slider.on_update + def _(_) -> None: + self._viser_config.map_radius = options_map_radius_slider.value + self._viser_config._force_map_update = True + + @options_map_radius_options.on_click + def _(_) -> None: + options_map_radius_slider.value = float(options_map_radius_options.value) + self._viser_config._force_map_update = True # Frame step buttons. @gui_next_frame.on_click @@ -147,11 +213,12 @@ def _(_) -> None: gui_timestep.disabled = gui_playing.value gui_next_frame.disabled = gui_playing.value gui_prev_frame.disabled = gui_playing.value + self._viser_config.is_playing = gui_playing.value # Set the framerate when we click one of the options. - @gui_framerate_options.on_click + @gui_speed_options.on_click def _(_) -> None: - gui_framerate.value = int(gui_framerate_options.value) + gui_speed.value = float(gui_speed_options.value) # Toggle frame visibility when the timestep slider changes. @gui_timestep.on_update @@ -163,6 +230,7 @@ def _(_) -> None: initial_ego_state, self._viser_server, self._viser_config, + box_detection_handles, ) add_camera_frustums_to_viser_server( scene, @@ -185,7 +253,7 @@ def _(_) -> None: initial_ego_state, self._viser_server, self._viser_config, - lidar_pc_handle, + lidar_pc_handles, ) add_map_to_viser_server( scene, @@ -196,11 +264,18 @@ def _(_) -> None: map_handles, ) rendering_time = time.perf_counter() - start - sleep_time = 1.0 / gui_framerate.value - rendering_time + + sleep_time = 1.0 / gui_speed.value - rendering_time + + # Calculate sleep time based on speed factor + base_frame_time = scene.log_metadata.timestep_seconds + target_frame_time = base_frame_time / gui_speed.value + sleep_time = target_frame_time - rendering_time + if sleep_time > 0 and not server_rendering: time.sleep(max(sleep_time, 0.0)) - @button.on_click + @render_button.on_click def _(event: viser.GuiEvent) -> None: nonlocal server_rendering client = event.client @@ -233,9 +308,13 @@ def _(event: viser.GuiEvent) -> None: client.send_file_download(f"{scene_name}.{format}", content, save_immediately=True) server_rendering = False + box_detection_handles: Dict[str, Union[viser.GlbHandle, viser.LineSegmentsHandle]] = { + "mesh": None, + "lines": None, + } camera_frustum_handles: Dict[PinholeCameraType, viser.CameraFrustumHandle] = {} camera_gui_handles: Dict[PinholeCameraType, viser.GuiImageHandle] = {} - lidar_pc_handle: Optional[viser.PointCloudHandle] = None + lidar_pc_handles: Dict[LiDARType, Optional[viser.PointCloudHandle]] = {LiDARType.LIDAR_MERGED: None} map_handles: Dict[MapLayer, viser.MeshHandle] = {} add_box_detections_to_viser_server( @@ -244,6 +323,7 @@ def _(event: viser.GuiEvent) -> None: initial_ego_state, self._viser_server, self._viser_config, + box_detection_handles, ) add_camera_frustums_to_viser_server( scene, @@ -266,7 +346,7 @@ def _(event: viser.GuiEvent) -> None: initial_ego_state, self._viser_server, self._viser_config, - lidar_pc_handle, + lidar_pc_handles, ) add_map_to_viser_server( scene, @@ -279,11 +359,11 @@ def _(event: viser.GuiEvent) -> None: # Playback update loop. while server_playing: - if gui_playing.value and not server_rendering: gui_timestep.value = (gui_timestep.value + 1) % num_frames - else: - time.sleep(0.1) + + # update config + self._viser_config.playback_speed = gui_speed.value self._viser_server.flush() self.next() diff --git a/test_viser.py b/test_viser.py index 3a1c887e..ee35330c 100644 --- a/test_viser.py +++ b/test_viser.py @@ -7,10 +7,10 @@ if __name__ == "__main__": # splits = ["kitti360_train"] # splits = ["nuscenes-mini_val", "nuscenes-mini_train"] - # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] + splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla_test"] - splits = ["wopd_val"] + # splits = ["wopd_val"] # splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] # log_names = ["2021.08.24.13.12.55_veh-45_00386_00472"] From 0be8c97d30b5b21f9749eacf327c5224280b94c0 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 4 Nov 2025 10:09:24 +0100 Subject: [PATCH 137/145] Add point cloud to lidar writing data (simplifies CARLA data saving) --- .../log_writer/abstract_log_writer.py | 16 ++++++++++++++-- .../conversion/log_writer/arrow_log_writer.py | 19 ++++++++++++------- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/src/py123d/conversion/log_writer/abstract_log_writer.py b/src/py123d/conversion/log_writer/abstract_log_writer.py index b367ea69..8f839a29 100644 --- a/src/py123d/conversion/log_writer/abstract_log_writer.py +++ b/src/py123d/conversion/log_writer/abstract_log_writer.py @@ -5,6 +5,9 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union +import numpy as np +import numpy.typing as npt + from py123d.conversion.dataset_converter_config import DatasetConverterConfig from py123d.datatypes.detections.box_detections import BoxDetectionWrapper from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper @@ -63,11 +66,20 @@ class LiDARData: iteration: Optional[int] = None dataset_root: Optional[Union[str, Path]] = None relative_path: Optional[Union[str, Path]] = None + point_cloud: Optional[npt.NDArray[np.float32]] = None def __post_init__(self): - has_file_path = self.dataset_root is not None and self.relative_path is not None + assert ( + self.has_file_path or self.has_point_cloud + ), "Either file path (dataset_root and relative_path) or point_cloud must be provided for LiDARData." + + @property + def has_file_path(self) -> bool: + return self.dataset_root is not None and self.relative_path is not None - assert has_file_path, "Either file path (dataset_root and relative_path) must be provided for LiDARData." + @property + def has_point_cloud(self) -> bool: + return self.point_cloud is not None @dataclass diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 446b1126..335fc9e3 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -234,6 +234,7 @@ def write( # LiDAR point clouds in a single file. In this case, writing the file path several times is wasteful. # Instead, we store the file path once, and divide the point clouds during reading. assert len(lidars) == 1, "Exactly one LiDAR data must be provided for merged LiDAR storage." + assert lidars[0].has_file_path, "LiDAR data must provide file path for merged LiDAR storage." merged_lidar_data: Optional[str] = str(lidars[0].relative_path) record_batch_data[f"{LiDARType.LIDAR_MERGED.serialize()}_data"] = [merged_lidar_data] @@ -393,6 +394,7 @@ def _prepare_lidar_data_dict(self, lidars: List[LiDARData]) -> Dict[LiDARType, U if self._dataset_converter_config.lidar_store_option == "path": for lidar_data in lidars: + assert lidar_data.has_file_path, "LiDAR data must provide file path for path storage." lidar_data_dict[lidar_data.lidar_type] = str(lidar_data.relative_path) elif self._dataset_converter_config.lidar_store_option == "binary": @@ -400,14 +402,17 @@ def _prepare_lidar_data_dict(self, lidars: List[LiDARData]) -> Dict[LiDARType, U # 1. Load point clouds from files for lidar_data in lidars: - lidar_pcs_dict.update( - load_lidar_pcs_from_file( - lidar_data.relative_path, - self._log_metadata, - lidar_data.iteration, - lidar_data.dataset_root, + if lidar_data.has_point_cloud: + lidar_pcs_dict[lidar_data.lidar_type] = lidar_data.point_cloud + elif lidar_data.has_file_path: + lidar_pcs_dict.update( + load_lidar_pcs_from_file( + lidar_data.relative_path, + self._log_metadata, + lidar_data.iteration, + lidar_data.dataset_root, + ) ) - ) # 2. Compress the point clouds to bytes for lidar_type, point_cloud in lidar_pcs_dict.items(): From 8997c9c2e7312746bab546a4702380ab2a76925c Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 4 Nov 2025 13:29:10 +0100 Subject: [PATCH 138/145] Add a few config helpers to viser. --- src/py123d/visualization/viser/viser_config.py | 4 ++-- src/py123d/visualization/viser/viser_viewer.py | 14 +++++++++++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index e8182729..a9c3c5e1 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -52,13 +52,13 @@ class ViserConfig: # Map map_visible: bool = True - map_radius: float = 100.0 # [m] + map_radius: float = 500.0 # [m] map_non_road_z_offset: float = 0.1 # small z-translation to place crosswalks, parking, etc. on top of the road map_requery: bool = True # Re-query map when ego vehicle moves out of current map bounds # Bounding boxes bounding_box_visible: bool = True - bounding_box_type: Literal["mesh", "lines"] = "lines" + bounding_box_type: Literal["mesh", "lines"] = "mesh" bounding_box_line_width: float = 4.0 # Cameras diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index 0236e313..6c0123b7 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -104,6 +104,8 @@ def set_scene(self, scene: AbstractScene) -> None: server_rendering = False with self._viser_server.gui.add_folder("Playback"): + gui_info = self._viser_server.gui.add_markdown(content=_get_scene_info_markdown(scene)) + gui_timestep = self._viser_server.gui.add_slider( "Timestep", min=0, @@ -133,7 +135,7 @@ def set_scene(self, scene: AbstractScene) -> None: ) modalities_lidar_visible = self._viser_server.gui.add_checkbox("Lidar", self._viser_config.lidar_visible) - with self._viser_server.gui.add_folder("Options", expand_by_default=False): + with self._viser_server.gui.add_folder("Options", expand_by_default=True): option_bounding_box_type = self._viser_server.gui.add_dropdown( "Bounding Box Type", ("mesh", "lines"), initial_value=self._viser_config.bounding_box_type ) @@ -367,3 +369,13 @@ def _(event: viser.GuiEvent) -> None: self._viser_server.flush() self.next() + + +def _get_scene_info_markdown(scene: AbstractScene) -> str: + markdown = f""" + - Dataset: {scene.log_metadata.split} + - Location: {scene.log_metadata.location if scene.log_metadata.location else 'N/A'} + - UUID: {scene.uuid} + """ + # - UUID: {scene.log_name} + return markdown From f416414f8cc8c171245812f23e700fec6ba77b3d Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 4 Nov 2025 17:31:19 +0100 Subject: [PATCH 139/145] Use dataset specific label with registry structure, instead of only storing the unified semantic bounding box type. (#61) --- notebooks/bev_matplotlib.ipynb | 47 +-- .../datasets/av2/av2_sensor_converter.py | 18 +- .../datasets/av2/utils/av2_constants.py | 38 -- .../datasets/kitti360/kitti360_converter.py | 22 +- .../kitti360/utils/kitti360_helper.py | 4 +- .../kitti360/utils/kitti360_labels.py | 67 ++-- .../kitti360/utils/preprocess_detection.py | 8 +- .../datasets/nuplan/nuplan_converter.py | 2 + .../datasets/nuplan/utils/nuplan_constants.py | 16 +- .../nuplan/utils/nuplan_sql_helper.py | 8 +- .../datasets/nuscenes/nuscenes_converter.py | 14 +- .../nuscenes/utils/nuscenes_constants.py | 49 +-- .../datasets/pandaset/pandaset_converter.py | 21 +- .../pandaset/utils/pandaset_constants.py | 89 ++--- .../datasets/wopd/utils/wopd_constants.py | 14 +- .../datasets/wopd/wopd_converter.py | 4 +- .../conversion/log_writer/arrow_log_writer.py | 8 +- .../registry/box_detection_label_registry.py | 351 ++++++++++++++++++ .../registry/box_detection_type_registry.py | 107 ------ .../detections/box_detection_types.py | 48 --- .../datatypes/detections/box_detections.py | 21 +- .../datatypes/scene/arrow/arrow_scene.py | 6 +- .../scene/arrow/utils/arrow_getters.py | 14 +- src/py123d/datatypes/scene/scene_metadata.py | 16 +- .../datatypes/vehicle_state/ego_state.py | 6 +- src/py123d/visualization/color/default.py | 22 +- src/py123d/visualization/matplotlib/camera.py | 14 +- .../visualization/matplotlib/observation.py | 12 +- .../viser/elements/detection_elements.py | 14 +- test_viser.py | 7 +- 30 files changed, 578 insertions(+), 489 deletions(-) create mode 100644 src/py123d/conversion/registry/box_detection_label_registry.py delete mode 100644 src/py123d/conversion/registry/box_detection_type_registry.py delete mode 100644 src/py123d/datatypes/detections/box_detection_types.py diff --git a/notebooks/bev_matplotlib.ipynb b/notebooks/bev_matplotlib.ipynb index 94234bd5..e7bbc334 100644 --- a/notebooks/bev_matplotlib.ipynb +++ b/notebooks/bev_matplotlib.ipynb @@ -12,7 +12,7 @@ "\n", "\n", "from py123d.common.multithreading.worker_sequential import Sequential\n", - "# from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType " + "# from py123d.datatypes.sensors.pinhole_camera_type import PinholeCameraType" ] }, { @@ -22,14 +22,13 @@ "metadata": {}, "outputs": [], "source": [ - "splits = [\"kitti360\"]\n", + "# splits = [\"kitti360_train\"]\n", "# splits = [\"nuscenes-mini_val\", \"nuscenes-mini_train\"]\n", "# splits = [\"nuplan-mini_test\", \"nuplan-mini_train\", \"nuplan-mini_val\"]\n", - "# splits = [\"nuplan_private_test\"]\n", "# splits = [\"carla_test\"]\n", "# splits = [\"wopd_val\"]\n", "# splits = [\"av2-sensor_train\"]\n", - "# splits = [\"pandaset_test\", \"pandaset_val\", \"pandaset_train\"]\n", + "splits = [\"pandaset_test\", \"pandaset_val\", \"pandaset_train\"]\n", "log_names = None\n", "scene_uuids = None\n", "\n", @@ -37,7 +36,7 @@ " split_names=splits,\n", " log_names=log_names,\n", " scene_uuids=scene_uuids,\n", - " duration_s=30.0,\n", + " duration_s=None,\n", " history_s=0.0,\n", " timestamp_threshold_s=30.0,\n", " shuffle=True,\n", @@ -216,10 +215,10 @@ "\n", " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n", " if map_api is not None:\n", - " add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", + " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n", "\n", "\n", - " # add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", + " add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n", " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n", "\n", " add_box_detections_to_ax(ax, box_detections)\n", @@ -264,40 +263,6 @@ "id": "3", "metadata": {}, "outputs": [], - "source": [ - "\n", - "map_api = scene.get_map_api()\n", - "map_api: AbstractMap\n", - "\n", - "intersection = map_api.get_map_object(\"562\", MapLayer.INTERSECTION)\n", - "\n", - "\n", - "lane_groups = intersection.lane_groups\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], "source": [] } ], diff --git a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py index aebebd19..2bace3a1 100644 --- a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py @@ -7,12 +7,7 @@ from py123d.conversion.abstract_dataset_converter import AbstractDatasetConverter from py123d.conversion.dataset_converter_config import DatasetConverterConfig from py123d.conversion.datasets.av2.av2_map_conversion import convert_av2_map -from py123d.conversion.datasets.av2.utils.av2_constants import ( - AV2_CAMERA_TYPE_MAPPING, - AV2_SENSOR_SPLITS, - AV2_TO_DETECTION_TYPE, - AV2SensorBoxDetectionType, -) +from py123d.conversion.datasets.av2.utils.av2_constants import AV2_CAMERA_TYPE_MAPPING, AV2_SENSOR_SPLITS from py123d.conversion.datasets.av2.utils.av2_helper import ( build_sensor_dataframe, build_synchronization_dataframe, @@ -21,8 +16,8 @@ ) from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.registry.box_detection_label_registry import AV2SensorBoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import AVSensorLiDARIndex -from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.scene.scene_metadata import LogMetadata @@ -118,6 +113,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: log_name=source_log_path.name, location=map_metadata.location, timestep_seconds=0.1, + box_detection_label_class=AV2SensorBoxDetectionLabel, vehicle_parameters=get_av2_ford_fusion_hybrid_parameters(), pinhole_camera_metadata=_get_av2_pinhole_camera_metadata(source_log_path, self.dataset_converter_config), lidar_metadata=_get_av2_lidar_metadata(source_log_path, self.dataset_converter_config), @@ -258,7 +254,7 @@ def _extract_av2_sensor_box_detections( detections_state = np.zeros((num_detections, len(BoundingBoxSE3Index)), dtype=np.float64) detections_velocity = np.zeros((num_detections, len(Vector3DIndex)), dtype=np.float64) detections_token: List[str] = annotations_slice["track_uuid"].tolist() - detections_types: List[BoxDetectionType] = [] + detections_labels: List[AV2SensorBoxDetectionLabel] = [] for detection_idx, (_, row) in enumerate(annotations_slice.iterrows()): row = row.to_dict() @@ -267,8 +263,8 @@ def _extract_av2_sensor_box_detections( detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = [row["qw"], row["qx"], row["qy"], row["qz"]] detections_state[detection_idx, BoundingBoxSE3Index.EXTENT] = [row["length_m"], row["width_m"], row["height_m"]] - av2_detection_type = AV2SensorBoxDetectionType.deserialize(row["category"]) - detections_types.append(AV2_TO_DETECTION_TYPE[av2_detection_type]) + detections_label = AV2SensorBoxDetectionLabel.deserialize(row["category"]) + detections_labels.append(detections_label) detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array( origin=ego_state_se3.rear_axle_se3, @@ -280,7 +276,7 @@ def _extract_av2_sensor_box_detections( box_detections.append( BoxDetectionSE3( metadata=BoxDetectionMetadata( - box_detection_type=detections_types[detection_idx], + label=detections_labels[detection_idx], timepoint=None, track_token=detections_token[detection_idx], confidence=None, diff --git a/src/py123d/conversion/datasets/av2/utils/av2_constants.py b/src/py123d/conversion/datasets/av2/utils/av2_constants.py index e68f7cef..30b59fa2 100644 --- a/src/py123d/conversion/datasets/av2/utils/av2_constants.py +++ b/src/py123d/conversion/datasets/av2/utils/av2_constants.py @@ -1,49 +1,11 @@ from typing import Dict, Final, Set -from py123d.conversion.registry.box_detection_type_registry import AV2SensorBoxDetectionType -from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.maps.map_datatypes import RoadLineType from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType AV2_SENSOR_SPLITS: Set[str] = {"av2-sensor_train", "av2-sensor_val", "av2-sensor_test"} -# Mapping from AV2SensorBoxDetectionType to general DetectionType -# TODO: Change the detection types. Multiple mistakes, e.g. animals/dogs are not generic objects. -AV2_TO_DETECTION_TYPE = { - AV2SensorBoxDetectionType.ANIMAL: BoxDetectionType.GENERIC_OBJECT, - AV2SensorBoxDetectionType.ARTICULATED_BUS: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.BICYCLE: BoxDetectionType.BICYCLE, - AV2SensorBoxDetectionType.BICYCLIST: BoxDetectionType.PEDESTRIAN, - AV2SensorBoxDetectionType.BOLLARD: BoxDetectionType.BARRIER, - AV2SensorBoxDetectionType.BOX_TRUCK: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.BUS: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.CONSTRUCTION_BARREL: BoxDetectionType.BARRIER, - AV2SensorBoxDetectionType.CONSTRUCTION_CONE: BoxDetectionType.TRAFFIC_CONE, - AV2SensorBoxDetectionType.DOG: BoxDetectionType.GENERIC_OBJECT, - AV2SensorBoxDetectionType.LARGE_VEHICLE: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.MESSAGE_BOARD_TRAILER: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.MOBILE_PEDESTRIAN_CROSSING_SIGN: BoxDetectionType.CZONE_SIGN, - AV2SensorBoxDetectionType.MOTORCYCLE: BoxDetectionType.BICYCLE, - AV2SensorBoxDetectionType.MOTORCYCLIST: BoxDetectionType.BICYCLE, - AV2SensorBoxDetectionType.OFFICIAL_SIGNALER: BoxDetectionType.PEDESTRIAN, - AV2SensorBoxDetectionType.PEDESTRIAN: BoxDetectionType.PEDESTRIAN, - AV2SensorBoxDetectionType.RAILED_VEHICLE: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.REGULAR_VEHICLE: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.SCHOOL_BUS: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.SIGN: BoxDetectionType.SIGN, - AV2SensorBoxDetectionType.STOP_SIGN: BoxDetectionType.SIGN, - AV2SensorBoxDetectionType.STROLLER: BoxDetectionType.PEDESTRIAN, - AV2SensorBoxDetectionType.TRAFFIC_LIGHT_TRAILER: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.TRUCK: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.TRUCK_CAB: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.VEHICULAR_TRAILER: BoxDetectionType.VEHICLE, - AV2SensorBoxDetectionType.WHEELCHAIR: BoxDetectionType.PEDESTRIAN, - AV2SensorBoxDetectionType.WHEELED_DEVICE: BoxDetectionType.GENERIC_OBJECT, - AV2SensorBoxDetectionType.WHEELED_RIDER: BoxDetectionType.BICYCLE, -} - - AV2_CAMERA_TYPE_MAPPING: Dict[str, PinholeCameraType] = { "ring_front_center": PinholeCameraType.PCAM_F0, "ring_front_left": PinholeCameraType.PCAM_L0, diff --git a/src/py123d/conversion/datasets/kitti360/kitti360_converter.py b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py index 8bb9b497..c236c0e4 100644 --- a/src/py123d/conversion/datasets/kitti360/kitti360_converter.py +++ b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py @@ -20,12 +20,13 @@ ) from py123d.conversion.datasets.kitti360.utils.kitti360_labels import ( BBOX_LABLES_TO_DETECTION_NAME_DICT, - KITTI360_DETECTION_NAME_DICT, + KIITI360_DETECTION_NAME_DICT, kittiId2label, ) from py123d.conversion.datasets.kitti360.utils.preprocess_detection import process_detection from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.registry.box_detection_label_registry import KITTI360BoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import Kitti360LiDARIndex from py123d.datatypes.detections.box_detections import ( BoxDetectionMetadata, @@ -240,6 +241,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: log_name=log_name, location=log_name, timestep_seconds=KITTI360_DT, + box_detection_label_class=KITTI360BoxDetectionLabel, vehicle_parameters=get_kitti360_vw_passat_parameters(), pinhole_camera_metadata=_get_kitti360_pinhole_camera_metadata( self._kitti360_folders, @@ -566,7 +568,7 @@ def _extract_kitti360_box_detections_all( detections_states: List[List[List[float]]] = [[] for _ in range(ts_len)] detections_velocity: List[List[List[float]]] = [[] for _ in range(ts_len)] detections_tokens: List[List[str]] = [[] for _ in range(ts_len)] - detections_types: List[List[int]] = [[] for _ in range(ts_len)] + detections_labels: List[List[int]] = [[] for _ in range(ts_len)] if log_name == "2013_05_28_drive_0004_sync": bbox_3d_path = kitti360_folders[DIR_3D_BBOX] / "train_full" / f"{log_name}.xml" @@ -600,9 +602,9 @@ def _extract_kitti360_box_detections_all( semanticIdKITTI = int(child.find("semanticId").text) name = kittiId2label[semanticIdKITTI].name else: - lable = child.find("label").text - name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, "unknown") - if child.find("transform") is None or name not in KITTI360_DETECTION_NAME_DICT.keys(): + label = child.find("label").text + name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(label, "unknown") + if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT.keys(): continue obj = KITTI360Bbox3D() obj.parseBbox(child) @@ -618,7 +620,7 @@ def _extract_kitti360_box_detections_all( detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append(np.array([0.0, 0.0, 0.0])) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(KITTI360_DETECTION_NAME_DICT[obj.name]) + detections_labels[frame].append(KIITI360_DETECTION_NAME_DICT[obj.name]) else: global_ID = obj.globalID dynamic_objs[global_ID].append(obj) @@ -655,21 +657,21 @@ def _extract_kitti360_box_detections_all( detections_states[frame].append(obj.get_state_array()) detections_velocity[frame].append(vel) detections_tokens[frame].append(str(obj.globalID)) - detections_types[frame].append(KITTI360_DETECTION_NAME_DICT[obj.name]) + detections_labels[frame].append(KIITI360_DETECTION_NAME_DICT[obj.name]) box_detection_wrapper_all: List[BoxDetectionWrapper] = [] for frame in range(ts_len): box_detections: List[BoxDetectionSE3] = [] - for state, velocity, token, detection_type in zip( + for state, velocity, token, detection_label in zip( detections_states[frame], detections_velocity[frame], detections_tokens[frame], - detections_types[frame], + detections_labels[frame], ): if state is None: break detection_metadata = BoxDetectionMetadata( - box_detection_type=detection_type, + label=detection_label, timepoint=None, track_token=token, confidence=None, diff --git a/src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py b/src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py index ef3511c4..fa3afa77 100644 --- a/src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py +++ b/src/py123d/conversion/datasets/kitti360/utils/kitti360_helper.py @@ -6,9 +6,7 @@ from scipy.linalg import polar from py123d.conversion.datasets.kitti360.utils.kitti360_labels import BBOX_LABLES_TO_DETECTION_NAME_DICT, kittiId2label -from py123d.geometry import BoundingBoxSE3, StateSE3 -from py123d.geometry.polyline import Polyline3D -from py123d.geometry.rotation import EulerAngles +from py123d.geometry import BoundingBoxSE3, EulerAngles, Polyline3D, StateSE3 # KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"]) # DIR_CALIB = "calibration" diff --git a/src/py123d/conversion/datasets/kitti360/utils/kitti360_labels.py b/src/py123d/conversion/datasets/kitti360/utils/kitti360_labels.py index 6feafc1d..a40cffca 100644 --- a/src/py123d/conversion/datasets/kitti360/utils/kitti360_labels.py +++ b/src/py123d/conversion/datasets/kitti360/utils/kitti360_labels.py @@ -5,6 +5,8 @@ from collections import namedtuple +from py123d.conversion.registry.box_detection_label_registry import KITTI360BoxDetectionLabel + # -------------------------------------------------------------------------------- # Definitions # -------------------------------------------------------------------------------- @@ -161,8 +163,6 @@ def assureSingleInstanceName(name): return name -from py123d.datatypes.detections.box_detection_types import BoxDetectionType - BBOX_LABLES_TO_DETECTION_NAME_DICT = { "car": "car", "truck": "truck", @@ -176,42 +176,33 @@ def assureSingleInstanceName(name): "trafficSign": "traffic sign", "rider": "rider", "caravan": "caravan", + "box": "box", + "lamp": "lamp", + "pole": "pole", + "smallpole": "smallpole", + "train": "train", + "trashBin": "trash bin", + "vendingMachine": "vending machine", } -KITTI360_DETECTION_NAME_DICT = { - "traffic light": BoxDetectionType.SIGN, - "traffic sign": BoxDetectionType.SIGN, - "person": BoxDetectionType.PEDESTRIAN, - "rider": BoxDetectionType.BICYCLE, - "car": BoxDetectionType.VEHICLE, - "truck": BoxDetectionType.VEHICLE, - "bus": BoxDetectionType.VEHICLE, - "caravan": BoxDetectionType.VEHICLE, - "trailer": BoxDetectionType.VEHICLE, - "train": BoxDetectionType.VEHICLE, - "motorcycle": BoxDetectionType.BICYCLE, - "bicycle": BoxDetectionType.BICYCLE, - "stop": BoxDetectionType.SIGN, +KIITI360_DETECTION_NAME_DICT = { + "bicycle": KITTI360BoxDetectionLabel.BICYCLE, + "box": KITTI360BoxDetectionLabel.BOX, + "bus": KITTI360BoxDetectionLabel.BUS, + "car": KITTI360BoxDetectionLabel.CAR, + "caravan": KITTI360BoxDetectionLabel.CARAVAN, + "lamp": KITTI360BoxDetectionLabel.LAMP, + "motorcycle": KITTI360BoxDetectionLabel.MOTORCYCLE, + "person": KITTI360BoxDetectionLabel.PERSON, + "pole": KITTI360BoxDetectionLabel.POLE, + "rider": KITTI360BoxDetectionLabel.RIDER, + "smallpole": KITTI360BoxDetectionLabel.SMALLPOLE, + "stop": KITTI360BoxDetectionLabel.STOP, + "traffic light": KITTI360BoxDetectionLabel.TRAFFIC_LIGHT, + "traffic sign": KITTI360BoxDetectionLabel.TRAFFIC_SIGN, + "trailer": KITTI360BoxDetectionLabel.TRAILER, + "train": KITTI360BoxDetectionLabel.TRAIN, + "trash bin": KITTI360BoxDetectionLabel.TRASH_BIN, + "truck": KITTI360BoxDetectionLabel.TRUCK, + "vending machine": KITTI360BoxDetectionLabel.VENDING_MACHINE, } - -# KIITI360_DETECTION_NAME_DICT = { -# "pole": DetectionType.GENERIC_OBJECT, -# "traffic light": DetectionType.SIGN, -# "traffic sign": DetectionType.SIGN, -# "person": DetectionType.PEDESTRIAN, -# "rider": DetectionType.BICYCLE, -# "car": DetectionType.VEHICLE, -# "truck": DetectionType.VEHICLE, -# "bus": DetectionType.VEHICLE, -# "caravan": DetectionType.VEHICLE, -# "trailer": DetectionType.VEHICLE, -# "train": DetectionType.VEHICLE, -# "motorcycle": DetectionType.BICYCLE, -# "bicycle": DetectionType.BICYCLE, -# "stop": DetectionType.SIGN, -# "smallpole": DetectionType.GENERIC_OBJECT, -# "lamp": DetectionType.GENERIC_OBJECT, -# "trash bin": DetectionType.GENERIC_OBJECT, -# "vending machine": DetectionType.GENERIC_OBJECT, -# "box": DetectionType.GENERIC_OBJECT, -# } diff --git a/src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py b/src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py index 3f65b375..1c2beeca 100644 --- a/src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py +++ b/src/py123d/conversion/datasets/kitti360/utils/preprocess_detection.py @@ -28,7 +28,7 @@ ) from py123d.conversion.datasets.kitti360.utils.kitti360_labels import ( BBOX_LABLES_TO_DETECTION_NAME_DICT, - KITTI360_DETECTION_NAME_DICT, + KIITI360_DETECTION_NAME_DICT, kittiId2label, ) @@ -73,10 +73,10 @@ def _collect_static_objects(kitti360_dataset_root: Path, log_name: str) -> List[ semanticIdKITTI = int(child.find("semanticId").text) name = kittiId2label[semanticIdKITTI].name else: - lable = child.find("label").text - name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, "unknown") + label = child.find("label").text + name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(label, "unknown") timestamp = int(child.find("timestamp").text) # -1 for static objects - if child.find("transform") is None or name not in KITTI360_DETECTION_NAME_DICT or timestamp != -1: + if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT.keys() or timestamp != -1: continue obj = KITTI360Bbox3D() obj.parseBbox(child) diff --git a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py index 8c77169c..92863acd 100644 --- a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py +++ b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py @@ -24,6 +24,7 @@ ) from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.registry.box_detection_label_registry import NuPlanBoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import NuPlanLiDARIndex from py123d.datatypes.detections.box_detections import BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetection, TrafficLightDetectionWrapper @@ -178,6 +179,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=nuplan_log_db.log.map_version, timestep_seconds=TARGET_DT, vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(), + box_detection_label_class=NuPlanBoxDetectionLabel, pinhole_camera_metadata=_get_nuplan_camera_metadata( source_log_path, self._nuplan_sensor_root, self.dataset_converter_config ), diff --git a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py index 26697ec0..d904a698 100644 --- a/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py +++ b/src/py123d/conversion/datasets/nuplan/utils/nuplan_constants.py @@ -1,6 +1,6 @@ from typing import Dict, Final, List, Set -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.conversion.registry.box_detection_label_registry import NuPlanBoxDetectionLabel from py123d.datatypes.detections.traffic_light_detections import TrafficLightStatus from py123d.datatypes.maps.map_datatypes import RoadLineType from py123d.datatypes.sensors.lidar import LiDARType @@ -16,13 +16,13 @@ NUPLAN_DETECTION_NAME_DICT = { - "vehicle": BoxDetectionType.VEHICLE, - "bicycle": BoxDetectionType.BICYCLE, - "pedestrian": BoxDetectionType.PEDESTRIAN, - "traffic_cone": BoxDetectionType.TRAFFIC_CONE, - "barrier": BoxDetectionType.BARRIER, - "czone_sign": BoxDetectionType.CZONE_SIGN, - "generic_object": BoxDetectionType.GENERIC_OBJECT, + "vehicle": NuPlanBoxDetectionLabel.VEHICLE, + "bicycle": NuPlanBoxDetectionLabel.BICYCLE, + "pedestrian": NuPlanBoxDetectionLabel.PEDESTRIAN, + "traffic_cone": NuPlanBoxDetectionLabel.TRAFFIC_CONE, + "barrier": NuPlanBoxDetectionLabel.BARRIER, + "czone_sign": NuPlanBoxDetectionLabel.CZONE_SIGN, + "generic_object": NuPlanBoxDetectionLabel.GENERIC_OBJECT, } # https://github.com/motional/nuplan-devkit/blob/e9241677997dd86bfc0bcd44817ab04fe631405b/nuplan/database/nuplan_db_orm/utils.py#L1129-L1135 diff --git a/src/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py b/src/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py index 3f2089e2..866246ba 100644 --- a/src/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py +++ b/src/py123d/conversion/datasets/nuplan/utils/nuplan_sql_helper.py @@ -51,13 +51,13 @@ def get_box_detections_for_lidarpc_token_from_db(log_file: str, token: str) -> L qy=quaternion.qy, qz=quaternion.qz, ), - length=row["length"], # nuPlan uses length, - width=row["width"], # width, - height=row["height"], # height + length=row["length"], + width=row["width"], + height=row["height"], ) box_detection = BoxDetectionSE3( metadata=BoxDetectionMetadata( - box_detection_type=NUPLAN_DETECTION_NAME_DICT[row["category_name"]], + label=NUPLAN_DETECTION_NAME_DICT[row["category_name"]], track_token=row["track_token"].hex(), ), bounding_box_se3=bounding_box, diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py index cc055e1d..22921491 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py @@ -17,6 +17,7 @@ ) from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.registry.box_detection_label_registry import NuScenesBoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import NuScenesLiDARIndex from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.maps.map_metadata import MapMetadata @@ -153,6 +154,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=log_record["location"], timestep_seconds=NUSCENES_DT, vehicle_parameters=get_nuscenes_renault_zoe_parameters(), + box_detection_label_class=NuScenesBoxDetectionLabel, pinhole_camera_metadata=_get_nuscenes_pinhole_camera_metadata(nusc, scene, self.dataset_converter_config), lidar_metadata=_get_nuscenes_lidar_metadata(nusc, scene, self.dataset_converter_config), map_metadata=_get_nuscenes_map_metadata(log_record["location"]), @@ -357,22 +359,14 @@ def _extract_nuscenes_box_detections(nusc: NuScenes, sample: Dict[str, Any]) -> bounding_box = BoundingBoxSE3(center, box.wlh[1], box.wlh[0], box.wlh[2]) # Get detection type category = ann["category_name"] - det_type = None - for key, value in NUSCENES_DETECTION_NAME_DICT.items(): - if category.startswith(key): - det_type = value - break - - if det_type is None: - print(f"Warning: Unmapped nuScenes category: {category}, skipping") - continue + label = NUSCENES_DETECTION_NAME_DICT[category] # Get velocity if available velocity = nusc.box_velocity(ann_token) velocity_3d = Vector3D(x=velocity[0], y=velocity[1], z=velocity[2] if len(velocity) > 2 else 0.0) metadata = BoxDetectionMetadata( - box_detection_type=det_type, + label=label, track_token=ann["instance_token"], timepoint=TimePoint.from_us(sample["timestamp"]), confidence=1.0, # nuScenes annotations are ground truth diff --git a/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py b/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py index 8878401d..5cafb870 100644 --- a/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py +++ b/src/py123d/conversion/datasets/nuscenes/utils/nuscenes_constants.py @@ -1,6 +1,6 @@ from typing import Final, List -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.conversion.registry.box_detection_label_registry import NuScenesBoxDetectionLabel from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType NUSCENES_MAPS: List[str] = ["boston-seaport", "singapore-hollandvillage", "singapore-onenorth", "singapore-queenstown"] @@ -18,35 +18,36 @@ SORT_BY_TIMESTAMP: Final[bool] = True NUSCENES_DETECTION_NAME_DICT = { # Vehicles (4+ wheels) - "vehicle.car": BoxDetectionType.VEHICLE, - "vehicle.truck": BoxDetectionType.VEHICLE, - "vehicle.bus.bendy": BoxDetectionType.VEHICLE, - "vehicle.bus.rigid": BoxDetectionType.VEHICLE, - "vehicle.construction": BoxDetectionType.VEHICLE, - "vehicle.emergency.ambulance": BoxDetectionType.VEHICLE, - "vehicle.emergency.police": BoxDetectionType.VEHICLE, - "vehicle.trailer": BoxDetectionType.VEHICLE, + "vehicle.car": NuScenesBoxDetectionLabel.VEHICLE_CAR, + "vehicle.truck": NuScenesBoxDetectionLabel.VEHICLE_TRUCK, + "vehicle.bus.bendy": NuScenesBoxDetectionLabel.VEHICLE_BUS_BENDY, + "vehicle.bus.rigid": NuScenesBoxDetectionLabel.VEHICLE_BUS_RIGID, + "vehicle.construction": NuScenesBoxDetectionLabel.VEHICLE_CONSTRUCTION, + "vehicle.emergency.ambulance": NuScenesBoxDetectionLabel.VEHICLE_EMERGENCY_AMBULANCE, + "vehicle.emergency.police": NuScenesBoxDetectionLabel.VEHICLE_EMERGENCY_POLICE, + "vehicle.trailer": NuScenesBoxDetectionLabel.VEHICLE_TRAILER, # Bicycles / Motorcycles - "vehicle.bicycle": BoxDetectionType.BICYCLE, - "vehicle.motorcycle": BoxDetectionType.BICYCLE, + "vehicle.bicycle": NuScenesBoxDetectionLabel.VEHICLE_BICYCLE, + "vehicle.motorcycle": NuScenesBoxDetectionLabel.VEHICLE_MOTORCYCLE, # Pedestrians (all subtypes) - "human.pedestrian.adult": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.child": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.construction_worker": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.personal_mobility": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.police_officer": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.stroller": BoxDetectionType.PEDESTRIAN, - "human.pedestrian.wheelchair": BoxDetectionType.PEDESTRIAN, + "human.pedestrian.adult": NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_ADULT, + "human.pedestrian.child": NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_CHILD, + "human.pedestrian.construction_worker": NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_CONSTRUCTION_WORKER, + "human.pedestrian.personal_mobility": NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_PERSONAL_MOBILITY, + "human.pedestrian.police_officer": NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_POLICE_OFFICER, + "human.pedestrian.stroller": NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_STROLLER, + "human.pedestrian.wheelchair": NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_WHEELCHAIR, # Traffic cone / barrier - "movable_object.trafficcone": BoxDetectionType.TRAFFIC_CONE, - "movable_object.barrier": BoxDetectionType.BARRIER, + "movable_object.trafficcone": NuScenesBoxDetectionLabel.MOVABLE_OBJECT_TRAFFICCONE, + "movable_object.barrier": NuScenesBoxDetectionLabel.MOVABLE_OBJECT_BARRIER, # Generic objects - "movable_object.pushable_pullable": BoxDetectionType.GENERIC_OBJECT, - "movable_object.debris": BoxDetectionType.GENERIC_OBJECT, - "static_object.bicycle_rack": BoxDetectionType.GENERIC_OBJECT, - "animal": BoxDetectionType.GENERIC_OBJECT, + "movable_object.pushable_pullable": NuScenesBoxDetectionLabel.MOVABLE_OBJECT_PUSHABLE_PULLABLE, + "movable_object.debris": NuScenesBoxDetectionLabel.MOVABLE_OBJECT_DEBRIS, + "static_object.bicycle_rack": NuScenesBoxDetectionLabel.STATIC_OBJECT_BICYCLE_RACK, + "animal": NuScenesBoxDetectionLabel.ANIMAL, } + NUSCENES_CAMERA_TYPES = { PinholeCameraType.PCAM_F0: "CAM_FRONT", PinholeCameraType.PCAM_B0: "CAM_BACK", diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py index 0f177af1..9c244ae1 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py @@ -8,7 +8,6 @@ from py123d.conversion.dataset_converter_config import DatasetConverterConfig from py123d.conversion.datasets.pandaset.utils.pandaset_constants import ( PANDASET_BOX_DETECTION_FROM_STR, - PANDASET_BOX_DETECTION_TO_DEFAULT, PANDASET_CAMERA_DISTORTIONS, PANDASET_CAMERA_MAPPING, PANDASET_LIDAR_EXTRINSICS, @@ -25,26 +24,20 @@ ) from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.registry.box_detection_label_registry import PandasetBoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import PandasetLiDARIndex from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.scene.scene_metadata import LogMetadata from py123d.datatypes.sensors.lidar import LiDARMetadata, LiDARType -from py123d.datatypes.sensors.pinhole_camera import ( - PinholeCameraMetadata, - PinholeCameraType, - PinholeIntrinsics, -) +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraMetadata, PinholeCameraType, PinholeIntrinsics from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3 from py123d.datatypes.vehicle_state.vehicle_parameters import ( get_pandaset_chrysler_pacifica_parameters, rear_axle_se3_to_center_se3, ) -from py123d.geometry import BoundingBoxSE3, StateSE3, Vector3D -from py123d.geometry.geometry_index import BoundingBoxSE3Index, EulerAnglesIndex -from py123d.geometry.transform.transform_se3 import ( - convert_absolute_to_relative_se3_array, -) +from py123d.geometry import BoundingBoxSE3, BoundingBoxSE3Index, EulerAnglesIndex, StateSE3, Vector3D +from py123d.geometry.transform.transform_se3 import convert_absolute_to_relative_se3_array from py123d.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL from py123d.geometry.utils.rotation_utils import get_quaternion_array_from_euler_array @@ -115,6 +108,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=None, # TODO: Add location information. timestep_seconds=0.1, vehicle_parameters=get_pandaset_chrysler_pacifica_parameters(), + box_detection_label_class=PandasetBoxDetectionLabel, pinhole_camera_metadata=_get_pandaset_camera_metadata(source_log_path, self.dataset_converter_config), lidar_metadata=_get_pandaset_lidar_metadata(source_log_path, self.dataset_converter_config), map_metadata=None, # NOTE: Pandaset does not have maps. @@ -311,8 +305,7 @@ def _extract_pandaset_box_detections( if sensor_ids[box_idx] == 1 and sibling_ids[box_idx] in top_lidar_uuids: continue - pandaset_box_detection_type = PANDASET_BOX_DETECTION_FROM_STR[box_label_names[box_idx]] - box_detection_type = PANDASET_BOX_DETECTION_TO_DEFAULT[pandaset_box_detection_type] + pandaset_box_detection_label = PANDASET_BOX_DETECTION_FROM_STR[box_label_names[box_idx]] # Convert coordinates to ISO 8855 # NOTE: This would be faster over a batch operation. @@ -322,7 +315,7 @@ def _extract_pandaset_box_detections( box_detection_se3 = BoxDetectionSE3( metadata=BoxDetectionMetadata( - box_detection_type=box_detection_type, + label=pandaset_box_detection_label, track_token=box_uuids[box_idx], ), bounding_box_se3=BoundingBoxSE3.from_array(box_se3_array[box_idx]), diff --git a/src/py123d/conversion/datasets/pandaset/utils/pandaset_constants.py b/src/py123d/conversion/datasets/pandaset/utils/pandaset_constants.py index e771060b..8428aadd 100644 --- a/src/py123d/conversion/datasets/pandaset/utils/pandaset_constants.py +++ b/src/py123d/conversion/datasets/pandaset/utils/pandaset_constants.py @@ -1,7 +1,6 @@ from typing import Dict, List -from py123d.conversion.registry.box_detection_type_registry import PandasetBoxDetectionType -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.conversion.registry.box_detection_label_registry import PandasetBoxDetectionLabel from py123d.datatypes.sensors.lidar import LiDARType from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType, PinholeDistortion, PinholeIntrinsics from py123d.geometry import StateSE3 @@ -20,67 +19,37 @@ PANDASET_LIDAR_MAPPING: Dict[str, LiDARType] = {"main_pandar64": LiDARType.LIDAR_TOP, "front_gt": LiDARType.LIDAR_FRONT} -PANDASET_BOX_DETECTION_FROM_STR: Dict[str, PandasetBoxDetectionType] = { - "Animals - Bird": PandasetBoxDetectionType.ANIMALS_BIRD, - "Animals - Other": PandasetBoxDetectionType.ANIMALS_OTHER, - "Bicycle": PandasetBoxDetectionType.BICYCLE, - "Bus": PandasetBoxDetectionType.BUS, - "Car": PandasetBoxDetectionType.CAR, - "Cones": PandasetBoxDetectionType.CONES, - "Construction Signs": PandasetBoxDetectionType.CONSTRUCTION_SIGNS, - "Emergency Vehicle": PandasetBoxDetectionType.EMERGENCY_VEHICLE, - "Medium-sized Truck": PandasetBoxDetectionType.MEDIUM_SIZED_TRUCK, - "Motorcycle": PandasetBoxDetectionType.MOTORCYCLE, - "Motorized Scooter": PandasetBoxDetectionType.MOTORIZED_SCOOTER, - "Other Vehicle - Construction Vehicle": PandasetBoxDetectionType.OTHER_VEHICLE_CONSTRUCTION_VEHICLE, - "Other Vehicle - Pedicab": PandasetBoxDetectionType.OTHER_VEHICLE_PEDICAB, - "Other Vehicle - Uncommon": PandasetBoxDetectionType.OTHER_VEHICLE_UNCOMMON, - "Pedestrian": PandasetBoxDetectionType.PEDESTRIAN, - "Pedestrian with Object": PandasetBoxDetectionType.PEDESTRIAN_WITH_OBJECT, - "Personal Mobility Device": PandasetBoxDetectionType.PERSONAL_MOBILITY_DEVICE, - "Pickup Truck": PandasetBoxDetectionType.PICKUP_TRUCK, - "Pylons": PandasetBoxDetectionType.PYLONS, - "Road Barriers": PandasetBoxDetectionType.ROAD_BARRIERS, - "Rolling Containers": PandasetBoxDetectionType.ROLLING_CONTAINERS, - "Semi-truck": PandasetBoxDetectionType.SEMI_TRUCK, - "Signs": PandasetBoxDetectionType.SIGNS, - "Temporary Construction Barriers": PandasetBoxDetectionType.TEMPORARY_CONSTRUCTION_BARRIERS, - "Towed Object": PandasetBoxDetectionType.TOWED_OBJECT, - "Train": PandasetBoxDetectionType.TRAIN, - "Tram / Subway": PandasetBoxDetectionType.TRAM_SUBWAY, +PANDASET_BOX_DETECTION_FROM_STR: Dict[str, PandasetBoxDetectionLabel] = { + "Animals - Bird": PandasetBoxDetectionLabel.ANIMALS_BIRD, + "Animals - Other": PandasetBoxDetectionLabel.ANIMALS_OTHER, + "Bicycle": PandasetBoxDetectionLabel.BICYCLE, + "Bus": PandasetBoxDetectionLabel.BUS, + "Car": PandasetBoxDetectionLabel.CAR, + "Cones": PandasetBoxDetectionLabel.CONES, + "Construction Signs": PandasetBoxDetectionLabel.CONSTRUCTION_SIGNS, + "Emergency Vehicle": PandasetBoxDetectionLabel.EMERGENCY_VEHICLE, + "Medium-sized Truck": PandasetBoxDetectionLabel.MEDIUM_SIZED_TRUCK, + "Motorcycle": PandasetBoxDetectionLabel.MOTORCYCLE, + "Motorized Scooter": PandasetBoxDetectionLabel.MOTORIZED_SCOOTER, + "Other Vehicle - Construction Vehicle": PandasetBoxDetectionLabel.OTHER_VEHICLE_CONSTRUCTION_VEHICLE, + "Other Vehicle - Pedicab": PandasetBoxDetectionLabel.OTHER_VEHICLE_PEDICAB, + "Other Vehicle - Uncommon": PandasetBoxDetectionLabel.OTHER_VEHICLE_UNCOMMON, + "Pedestrian": PandasetBoxDetectionLabel.PEDESTRIAN, + "Pedestrian with Object": PandasetBoxDetectionLabel.PEDESTRIAN_WITH_OBJECT, + "Personal Mobility Device": PandasetBoxDetectionLabel.PERSONAL_MOBILITY_DEVICE, + "Pickup Truck": PandasetBoxDetectionLabel.PICKUP_TRUCK, + "Pylons": PandasetBoxDetectionLabel.PYLONS, + "Road Barriers": PandasetBoxDetectionLabel.ROAD_BARRIERS, + "Rolling Containers": PandasetBoxDetectionLabel.ROLLING_CONTAINERS, + "Semi-truck": PandasetBoxDetectionLabel.SEMI_TRUCK, + "Signs": PandasetBoxDetectionLabel.SIGNS, + "Temporary Construction Barriers": PandasetBoxDetectionLabel.TEMPORARY_CONSTRUCTION_BARRIERS, + "Towed Object": PandasetBoxDetectionLabel.TOWED_OBJECT, + "Train": PandasetBoxDetectionLabel.TRAIN, + "Tram / Subway": PandasetBoxDetectionLabel.TRAM_SUBWAY, } -PANDASET_BOX_DETECTION_TO_DEFAULT: Dict[PandasetBoxDetectionType, BoxDetectionType] = { - PandasetBoxDetectionType.ANIMALS_BIRD: BoxDetectionType.GENERIC_OBJECT, # TODO: Adjust default types - PandasetBoxDetectionType.ANIMALS_OTHER: BoxDetectionType.GENERIC_OBJECT, # TODO: Adjust default types - PandasetBoxDetectionType.BICYCLE: BoxDetectionType.BICYCLE, - PandasetBoxDetectionType.BUS: BoxDetectionType.VEHICLE, - PandasetBoxDetectionType.CAR: BoxDetectionType.VEHICLE, - PandasetBoxDetectionType.CONES: BoxDetectionType.TRAFFIC_CONE, - PandasetBoxDetectionType.CONSTRUCTION_SIGNS: BoxDetectionType.CZONE_SIGN, - PandasetBoxDetectionType.EMERGENCY_VEHICLE: BoxDetectionType.VEHICLE, - PandasetBoxDetectionType.MEDIUM_SIZED_TRUCK: BoxDetectionType.VEHICLE, - PandasetBoxDetectionType.MOTORCYCLE: BoxDetectionType.BICYCLE, - PandasetBoxDetectionType.MOTORIZED_SCOOTER: BoxDetectionType.BICYCLE, - PandasetBoxDetectionType.OTHER_VEHICLE_CONSTRUCTION_VEHICLE: BoxDetectionType.VEHICLE, - PandasetBoxDetectionType.OTHER_VEHICLE_PEDICAB: BoxDetectionType.BICYCLE, - PandasetBoxDetectionType.OTHER_VEHICLE_UNCOMMON: BoxDetectionType.VEHICLE, - PandasetBoxDetectionType.PEDESTRIAN: BoxDetectionType.PEDESTRIAN, - PandasetBoxDetectionType.PEDESTRIAN_WITH_OBJECT: BoxDetectionType.PEDESTRIAN, - PandasetBoxDetectionType.PERSONAL_MOBILITY_DEVICE: BoxDetectionType.BICYCLE, - PandasetBoxDetectionType.PICKUP_TRUCK: BoxDetectionType.VEHICLE, - PandasetBoxDetectionType.PYLONS: BoxDetectionType.TRAFFIC_CONE, - PandasetBoxDetectionType.ROAD_BARRIERS: BoxDetectionType.BARRIER, - PandasetBoxDetectionType.ROLLING_CONTAINERS: BoxDetectionType.GENERIC_OBJECT, - PandasetBoxDetectionType.SEMI_TRUCK: BoxDetectionType.VEHICLE, - PandasetBoxDetectionType.SIGNS: BoxDetectionType.SIGN, - PandasetBoxDetectionType.TEMPORARY_CONSTRUCTION_BARRIERS: BoxDetectionType.BARRIER, - PandasetBoxDetectionType.TOWED_OBJECT: BoxDetectionType.VEHICLE, - PandasetBoxDetectionType.TRAIN: BoxDetectionType.GENERIC_OBJECT, # TODO: Adjust default types - PandasetBoxDetectionType.TRAM_SUBWAY: BoxDetectionType.GENERIC_OBJECT, # TODO: Adjust default types -} - # https://github.com/scaleapi/pandaset-devkit/blob/master/docs/static_extrinsic_calibration.yaml PANDASET_LIDAR_EXTRINSICS: Dict[str, StateSE3] = { "front_gt": StateSE3( diff --git a/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py b/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py index e617d2aa..efa577d6 100644 --- a/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py +++ b/src/py123d/conversion/datasets/wopd/utils/wopd_constants.py @@ -1,6 +1,6 @@ from typing import Dict, List -from py123d.conversion.registry.box_detection_type_registry import WOPDBoxDetectionType +from py123d.conversion.registry.box_detection_label_registry import WOPDBoxDetectionLabel from py123d.datatypes.maps.map_datatypes import LaneType, RoadEdgeType, RoadLineType from py123d.datatypes.sensors.lidar import LiDARType from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType @@ -12,12 +12,12 @@ ] # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63 -WOPD_DETECTION_NAME_DICT: Dict[int, WOPDBoxDetectionType] = { - 0: WOPDBoxDetectionType.TYPE_UNKNOWN, - 1: WOPDBoxDetectionType.TYPE_VEHICLE, - 2: WOPDBoxDetectionType.TYPE_PEDESTRIAN, - 3: WOPDBoxDetectionType.TYPE_SIGN, - 4: WOPDBoxDetectionType.TYPE_CYCLIST, +WOPD_DETECTION_NAME_DICT: Dict[int, WOPDBoxDetectionLabel] = { + 0: WOPDBoxDetectionLabel.TYPE_UNKNOWN, + 1: WOPDBoxDetectionLabel.TYPE_VEHICLE, + 2: WOPDBoxDetectionLabel.TYPE_PEDESTRIAN, + 3: WOPDBoxDetectionLabel.TYPE_SIGN, + 4: WOPDBoxDetectionLabel.TYPE_CYCLIST, } # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L50 diff --git a/src/py123d/conversion/datasets/wopd/wopd_converter.py b/src/py123d/conversion/datasets/wopd/wopd_converter.py index cc42ab4d..2622dc66 100644 --- a/src/py123d/conversion/datasets/wopd/wopd_converter.py +++ b/src/py123d/conversion/datasets/wopd/wopd_converter.py @@ -19,6 +19,7 @@ from py123d.conversion.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter +from py123d.conversion.registry.box_detection_label_registry import WOPDBoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import DefaultLiDARIndex, WOPDLiDARIndex from py123d.conversion.utils.sensor_utils.camera_conventions import CameraConvention, convert_camera_convention from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE3, BoxDetectionWrapper @@ -143,6 +144,7 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: location=str(initial_frame.context.stats.location), timestep_seconds=0.1, vehicle_parameters=get_wopd_chrysler_pacifica_parameters(), + box_detection_label_class=WOPDBoxDetectionLabel, pinhole_camera_metadata=_get_wopd_camera_metadata( initial_frame, self.dataset_converter_config, @@ -362,7 +364,7 @@ def _extract_wopd_box_detections( box_detections.append( BoxDetectionSE3( metadata=BoxDetectionMetadata( - box_detection_type=detections_types[detection_idx], + label=detections_types[detection_idx], timepoint=None, track_token=detections_token[detection_idx], confidence=None, diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 335fc9e3..5f25edb7 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -126,19 +126,19 @@ def write( box_detection_state = [] box_detection_velocity = [] box_detection_token = [] - box_detection_type = [] + box_detection_label = [] for box_detection in box_detections: box_detection_state.append(box_detection.bounding_box.array) box_detection_velocity.append(box_detection.velocity.array) # TODO: make optional box_detection_token.append(box_detection.metadata.track_token) - box_detection_type.append(int(box_detection.metadata.box_detection_type)) + box_detection_label.append(int(box_detection.metadata.label)) # Add to record batch data record_batch_data["box_detection_state"] = [box_detection_state] record_batch_data["box_detection_velocity"] = [box_detection_velocity] record_batch_data["box_detection_token"] = [box_detection_token] - record_batch_data["box_detection_type"] = [box_detection_type] + record_batch_data["box_detection_label"] = [box_detection_label] # -------------------------------------------------------------------------------------------------------------- # Traffic Lights @@ -306,7 +306,7 @@ def _build_schema(dataset_converter_config: DatasetConverterConfig, log_metadata ("box_detection_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))), ("box_detection_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))), ("box_detection_token", pa.list_(pa.string())), - ("box_detection_type", pa.list_(pa.int16())), + ("box_detection_label", pa.list_(pa.int16())), ] ) diff --git a/src/py123d/conversion/registry/box_detection_label_registry.py b/src/py123d/conversion/registry/box_detection_label_registry.py new file mode 100644 index 00000000..0d39e2d3 --- /dev/null +++ b/src/py123d/conversion/registry/box_detection_label_registry.py @@ -0,0 +1,351 @@ +from __future__ import annotations + +import abc + +from py123d.common.utils.enums import SerialIntEnum + +BOX_DETECTION_LABEL_REGISTRY = {} + + +def register_box_detection_label(enum_class): + BOX_DETECTION_LABEL_REGISTRY[enum_class.__name__] = enum_class + return enum_class + + +class BoxDetectionLabel(SerialIntEnum): + + @abc.abstractmethod + def to_default(self) -> DefaultBoxDetectionLabel: + raise NotImplementedError("Subclasses must implement this method.") + + +@register_box_detection_label +class DefaultBoxDetectionLabel(BoxDetectionLabel): + """ + Enum for agents in py123d. + """ + + VEHICLE = 0 + BICYCLE = 1 + PEDESTRIAN = 2 + + TRAFFIC_CONE = 3 + BARRIER = 4 + CZONE_SIGN = 5 + GENERIC_OBJECT = 6 + + EGO = 7 + SIGN = 8 # TODO: Remove or extent + + def to_default(self) -> DefaultBoxDetectionLabel: + """Inherited, see superclass.""" + return self + + +@register_box_detection_label +class AV2SensorBoxDetectionLabel(BoxDetectionLabel): + """Sensor dataset annotation categories.""" + + ANIMAL = 1 + ARTICULATED_BUS = 2 + BICYCLE = 3 + BICYCLIST = 4 + BOLLARD = 5 + BOX_TRUCK = 6 + BUS = 7 + CONSTRUCTION_BARREL = 8 + CONSTRUCTION_CONE = 9 + DOG = 10 + LARGE_VEHICLE = 11 + MESSAGE_BOARD_TRAILER = 12 + MOBILE_PEDESTRIAN_CROSSING_SIGN = 13 + MOTORCYCLE = 14 + MOTORCYCLIST = 15 + OFFICIAL_SIGNALER = 16 + PEDESTRIAN = 17 + RAILED_VEHICLE = 18 + REGULAR_VEHICLE = 19 + SCHOOL_BUS = 20 + SIGN = 21 + STOP_SIGN = 22 + STROLLER = 23 + TRAFFIC_LIGHT_TRAILER = 24 + TRUCK = 25 + TRUCK_CAB = 26 + VEHICULAR_TRAILER = 27 + WHEELCHAIR = 28 + WHEELED_DEVICE = 29 + WHEELED_RIDER = 30 + + def to_default(self) -> DefaultBoxDetectionLabel: + """Inherited, see superclass.""" + mapping = { + AV2SensorBoxDetectionLabel.ANIMAL: DefaultBoxDetectionLabel.GENERIC_OBJECT, + AV2SensorBoxDetectionLabel.ARTICULATED_BUS: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.BICYCLE: DefaultBoxDetectionLabel.BICYCLE, + AV2SensorBoxDetectionLabel.BICYCLIST: DefaultBoxDetectionLabel.PEDESTRIAN, + AV2SensorBoxDetectionLabel.BOLLARD: DefaultBoxDetectionLabel.BARRIER, + AV2SensorBoxDetectionLabel.BOX_TRUCK: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.BUS: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.CONSTRUCTION_BARREL: DefaultBoxDetectionLabel.BARRIER, + AV2SensorBoxDetectionLabel.CONSTRUCTION_CONE: DefaultBoxDetectionLabel.TRAFFIC_CONE, + AV2SensorBoxDetectionLabel.DOG: DefaultBoxDetectionLabel.GENERIC_OBJECT, + AV2SensorBoxDetectionLabel.LARGE_VEHICLE: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.MESSAGE_BOARD_TRAILER: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.MOBILE_PEDESTRIAN_CROSSING_SIGN: DefaultBoxDetectionLabel.CZONE_SIGN, + AV2SensorBoxDetectionLabel.MOTORCYCLE: DefaultBoxDetectionLabel.BICYCLE, + AV2SensorBoxDetectionLabel.MOTORCYCLIST: DefaultBoxDetectionLabel.BICYCLE, + AV2SensorBoxDetectionLabel.OFFICIAL_SIGNALER: DefaultBoxDetectionLabel.PEDESTRIAN, + AV2SensorBoxDetectionLabel.PEDESTRIAN: DefaultBoxDetectionLabel.PEDESTRIAN, + AV2SensorBoxDetectionLabel.RAILED_VEHICLE: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.REGULAR_VEHICLE: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.SCHOOL_BUS: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.SIGN: DefaultBoxDetectionLabel.SIGN, + AV2SensorBoxDetectionLabel.STOP_SIGN: DefaultBoxDetectionLabel.SIGN, + AV2SensorBoxDetectionLabel.STROLLER: DefaultBoxDetectionLabel.PEDESTRIAN, + AV2SensorBoxDetectionLabel.TRAFFIC_LIGHT_TRAILER: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.TRUCK: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.TRUCK_CAB: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.VEHICULAR_TRAILER: DefaultBoxDetectionLabel.VEHICLE, + AV2SensorBoxDetectionLabel.WHEELCHAIR: DefaultBoxDetectionLabel.PEDESTRIAN, + AV2SensorBoxDetectionLabel.WHEELED_DEVICE: DefaultBoxDetectionLabel.GENERIC_OBJECT, + AV2SensorBoxDetectionLabel.WHEELED_RIDER: DefaultBoxDetectionLabel.BICYCLE, + } + return mapping[self] + + +@register_box_detection_label +class KITTI360BoxDetectionLabel(BoxDetectionLabel): + + BICYCLE = 0 + BOX = 1 + BUS = 2 + CAR = 3 + CARAVAN = 4 + LAMP = 5 + MOTORCYCLE = 6 + PERSON = 7 + POLE = 8 + RIDER = 9 + SMALLPOLE = 10 + STOP = 11 + TRAFFIC_LIGHT = 12 + TRAFFIC_SIGN = 13 + TRAILER = 14 + TRAIN = 15 + TRASH_BIN = 16 + TRUCK = 17 + VENDING_MACHINE = 18 + + def to_default(self) -> DefaultBoxDetectionLabel: + mapping = { + KITTI360BoxDetectionLabel.BICYCLE: DefaultBoxDetectionLabel.BICYCLE, + KITTI360BoxDetectionLabel.BOX: DefaultBoxDetectionLabel.GENERIC_OBJECT, + KITTI360BoxDetectionLabel.BUS: DefaultBoxDetectionLabel.VEHICLE, + KITTI360BoxDetectionLabel.CAR: DefaultBoxDetectionLabel.VEHICLE, + KITTI360BoxDetectionLabel.CARAVAN: DefaultBoxDetectionLabel.VEHICLE, + KITTI360BoxDetectionLabel.LAMP: DefaultBoxDetectionLabel.GENERIC_OBJECT, + KITTI360BoxDetectionLabel.MOTORCYCLE: DefaultBoxDetectionLabel.BICYCLE, + KITTI360BoxDetectionLabel.PERSON: DefaultBoxDetectionLabel.PEDESTRIAN, + KITTI360BoxDetectionLabel.POLE: DefaultBoxDetectionLabel.GENERIC_OBJECT, + KITTI360BoxDetectionLabel.RIDER: DefaultBoxDetectionLabel.BICYCLE, + KITTI360BoxDetectionLabel.SMALLPOLE: DefaultBoxDetectionLabel.GENERIC_OBJECT, + KITTI360BoxDetectionLabel.STOP: DefaultBoxDetectionLabel.SIGN, + KITTI360BoxDetectionLabel.TRAFFIC_LIGHT: DefaultBoxDetectionLabel.SIGN, + KITTI360BoxDetectionLabel.TRAFFIC_SIGN: DefaultBoxDetectionLabel.SIGN, + KITTI360BoxDetectionLabel.TRAILER: DefaultBoxDetectionLabel.VEHICLE, + KITTI360BoxDetectionLabel.TRAIN: DefaultBoxDetectionLabel.VEHICLE, + KITTI360BoxDetectionLabel.TRASH_BIN: DefaultBoxDetectionLabel.GENERIC_OBJECT, + KITTI360BoxDetectionLabel.TRUCK: DefaultBoxDetectionLabel.VEHICLE, + KITTI360BoxDetectionLabel.VENDING_MACHINE: DefaultBoxDetectionLabel.GENERIC_OBJECT, + } + return mapping[self] + + +@register_box_detection_label +class NuPlanBoxDetectionLabel(BoxDetectionLabel): + """ + Semantic labels for nuPlan bounding box detections. + + Descriptions in `.db` files: + - vehicle: Includes all four or more wheeled vehicles, as well as trailers. + - bicycle: Includes bicycles, motorcycles and tricycles. + - pedestrian: All types of pedestrians, incl. strollers and wheelchairs. + - traffic_cone: Cones that are temporarily placed to control the flow of traffic. + - barrier: Solid barriers that can be either temporary or permanent. + - czone_sign: Temporary signs that indicate construction zones. + - generic_object: Animals, debris, pushable/pullable objects, permanent poles. + """ + + VEHICLE = 0 + BICYCLE = 1 + PEDESTRIAN = 2 + TRAFFIC_CONE = 3 + BARRIER = 4 + CZONE_SIGN = 5 + GENERIC_OBJECT = 6 + + def to_default(self) -> DefaultBoxDetectionLabel: + mapping = { + NuPlanBoxDetectionLabel.VEHICLE: DefaultBoxDetectionLabel.VEHICLE, + NuPlanBoxDetectionLabel.BICYCLE: DefaultBoxDetectionLabel.BICYCLE, + NuPlanBoxDetectionLabel.PEDESTRIAN: DefaultBoxDetectionLabel.PEDESTRIAN, + NuPlanBoxDetectionLabel.TRAFFIC_CONE: DefaultBoxDetectionLabel.TRAFFIC_CONE, + NuPlanBoxDetectionLabel.BARRIER: DefaultBoxDetectionLabel.BARRIER, + NuPlanBoxDetectionLabel.CZONE_SIGN: DefaultBoxDetectionLabel.CZONE_SIGN, + NuPlanBoxDetectionLabel.GENERIC_OBJECT: DefaultBoxDetectionLabel.GENERIC_OBJECT, + } + return mapping[self] + + +@register_box_detection_label +class NuScenesBoxDetectionLabel(BoxDetectionLabel): + """ + Semantic labels for nuScenes bounding box detections. + [1] https://github.com/nutonomy/nuscenes-devkit/blob/master/docs/instructions_nuscenes.md#labels + """ + + VEHICLE_CAR = 0 + VEHICLE_TRUCK = 1 + VEHICLE_BUS_BENDY = 2 + VEHICLE_BUS_RIGID = 3 + VEHICLE_CONSTRUCTION = 4 + VEHICLE_EMERGENCY_AMBULANCE = 5 + VEHICLE_EMERGENCY_POLICE = 6 + VEHICLE_TRAILER = 7 + VEHICLE_BICYCLE = 8 + VEHICLE_MOTORCYCLE = 9 + HUMAN_PEDESTRIAN_ADULT = 10 + HUMAN_PEDESTRIAN_CHILD = 11 + HUMAN_PEDESTRIAN_CONSTRUCTION_WORKER = 12 + HUMAN_PEDESTRIAN_PERSONAL_MOBILITY = 13 + HUMAN_PEDESTRIAN_POLICE_OFFICER = 14 + HUMAN_PEDESTRIAN_STROLLER = 15 + HUMAN_PEDESTRIAN_WHEELCHAIR = 16 + MOVABLE_OBJECT_TRAFFICCONE = 17 + MOVABLE_OBJECT_BARRIER = 18 + MOVABLE_OBJECT_PUSHABLE_PULLABLE = 19 + MOVABLE_OBJECT_DEBRIS = 20 + STATIC_OBJECT_BICYCLE_RACK = 21 + ANIMAL = 22 + + def to_default(self): + mapping = { + NuScenesBoxDetectionLabel.VEHICLE_CAR: DefaultBoxDetectionLabel.VEHICLE, + NuScenesBoxDetectionLabel.VEHICLE_TRUCK: DefaultBoxDetectionLabel.VEHICLE, + NuScenesBoxDetectionLabel.VEHICLE_BUS_BENDY: DefaultBoxDetectionLabel.VEHICLE, + NuScenesBoxDetectionLabel.VEHICLE_BUS_RIGID: DefaultBoxDetectionLabel.VEHICLE, + NuScenesBoxDetectionLabel.VEHICLE_CONSTRUCTION: DefaultBoxDetectionLabel.VEHICLE, + NuScenesBoxDetectionLabel.VEHICLE_EMERGENCY_AMBULANCE: DefaultBoxDetectionLabel.VEHICLE, + NuScenesBoxDetectionLabel.VEHICLE_EMERGENCY_POLICE: DefaultBoxDetectionLabel.VEHICLE, + NuScenesBoxDetectionLabel.VEHICLE_TRAILER: DefaultBoxDetectionLabel.VEHICLE, + NuScenesBoxDetectionLabel.VEHICLE_BICYCLE: DefaultBoxDetectionLabel.BICYCLE, + NuScenesBoxDetectionLabel.VEHICLE_MOTORCYCLE: DefaultBoxDetectionLabel.BICYCLE, + NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_ADULT: DefaultBoxDetectionLabel.PEDESTRIAN, + NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_CHILD: DefaultBoxDetectionLabel.PEDESTRIAN, + NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_CONSTRUCTION_WORKER: DefaultBoxDetectionLabel.PEDESTRIAN, + NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_PERSONAL_MOBILITY: DefaultBoxDetectionLabel.PEDESTRIAN, + NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_POLICE_OFFICER: DefaultBoxDetectionLabel.PEDESTRIAN, + NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_STROLLER: DefaultBoxDetectionLabel.PEDESTRIAN, + NuScenesBoxDetectionLabel.HUMAN_PEDESTRIAN_WHEELCHAIR: DefaultBoxDetectionLabel.PEDESTRIAN, + NuScenesBoxDetectionLabel.MOVABLE_OBJECT_TRAFFICCONE: DefaultBoxDetectionLabel.TRAFFIC_CONE, + NuScenesBoxDetectionLabel.MOVABLE_OBJECT_BARRIER: DefaultBoxDetectionLabel.BARRIER, + NuScenesBoxDetectionLabel.MOVABLE_OBJECT_PUSHABLE_PULLABLE: DefaultBoxDetectionLabel.GENERIC_OBJECT, + NuScenesBoxDetectionLabel.MOVABLE_OBJECT_DEBRIS: DefaultBoxDetectionLabel.GENERIC_OBJECT, + NuScenesBoxDetectionLabel.STATIC_OBJECT_BICYCLE_RACK: DefaultBoxDetectionLabel.GENERIC_OBJECT, + NuScenesBoxDetectionLabel.ANIMAL: DefaultBoxDetectionLabel.GENERIC_OBJECT, + } + return mapping[self] + + +@register_box_detection_label +class PandasetBoxDetectionLabel(BoxDetectionLabel): + """ + Semantic labels for Pandaset bounding box detections. + [1] https://github.com/scaleapi/pandaset-devkit/blob/master/docs/annotation_instructions_cuboids.pdf + """ + + ANIMALS_BIRD = 0 + ANIMALS_OTHER = 1 + BICYCLE = 2 + BUS = 3 + CAR = 4 + CONES = 5 + CONSTRUCTION_SIGNS = 6 + EMERGENCY_VEHICLE = 7 + MEDIUM_SIZED_TRUCK = 8 + MOTORCYCLE = 9 + MOTORIZED_SCOOTER = 10 + OTHER_VEHICLE_CONSTRUCTION_VEHICLE = 11 + OTHER_VEHICLE_PEDICAB = 12 + OTHER_VEHICLE_UNCOMMON = 13 + PEDESTRIAN = 14 + PEDESTRIAN_WITH_OBJECT = 15 + PERSONAL_MOBILITY_DEVICE = 16 + PICKUP_TRUCK = 17 + PYLONS = 18 + ROAD_BARRIERS = 19 + ROLLING_CONTAINERS = 20 + SEMI_TRUCK = 21 + SIGNS = 22 + TEMPORARY_CONSTRUCTION_BARRIERS = 23 + TOWED_OBJECT = 24 + TRAIN = 25 + TRAM_SUBWAY = 26 + + def to_default(self) -> DefaultBoxDetectionLabel: + mapping = { + PandasetBoxDetectionLabel.ANIMALS_BIRD: DefaultBoxDetectionLabel.GENERIC_OBJECT, # TODO: Adjust default types + PandasetBoxDetectionLabel.ANIMALS_OTHER: DefaultBoxDetectionLabel.GENERIC_OBJECT, # TODO: Adjust default types + PandasetBoxDetectionLabel.BICYCLE: DefaultBoxDetectionLabel.BICYCLE, + PandasetBoxDetectionLabel.BUS: DefaultBoxDetectionLabel.VEHICLE, + PandasetBoxDetectionLabel.CAR: DefaultBoxDetectionLabel.VEHICLE, + PandasetBoxDetectionLabel.CONES: DefaultBoxDetectionLabel.TRAFFIC_CONE, + PandasetBoxDetectionLabel.CONSTRUCTION_SIGNS: DefaultBoxDetectionLabel.CZONE_SIGN, + PandasetBoxDetectionLabel.EMERGENCY_VEHICLE: DefaultBoxDetectionLabel.VEHICLE, + PandasetBoxDetectionLabel.MEDIUM_SIZED_TRUCK: DefaultBoxDetectionLabel.VEHICLE, + PandasetBoxDetectionLabel.MOTORCYCLE: DefaultBoxDetectionLabel.BICYCLE, + PandasetBoxDetectionLabel.MOTORIZED_SCOOTER: DefaultBoxDetectionLabel.BICYCLE, + PandasetBoxDetectionLabel.OTHER_VEHICLE_CONSTRUCTION_VEHICLE: DefaultBoxDetectionLabel.VEHICLE, + PandasetBoxDetectionLabel.OTHER_VEHICLE_PEDICAB: DefaultBoxDetectionLabel.BICYCLE, + PandasetBoxDetectionLabel.OTHER_VEHICLE_UNCOMMON: DefaultBoxDetectionLabel.VEHICLE, + PandasetBoxDetectionLabel.PEDESTRIAN: DefaultBoxDetectionLabel.PEDESTRIAN, + PandasetBoxDetectionLabel.PEDESTRIAN_WITH_OBJECT: DefaultBoxDetectionLabel.PEDESTRIAN, + PandasetBoxDetectionLabel.PERSONAL_MOBILITY_DEVICE: DefaultBoxDetectionLabel.BICYCLE, + PandasetBoxDetectionLabel.PICKUP_TRUCK: DefaultBoxDetectionLabel.VEHICLE, + PandasetBoxDetectionLabel.PYLONS: DefaultBoxDetectionLabel.TRAFFIC_CONE, + PandasetBoxDetectionLabel.ROAD_BARRIERS: DefaultBoxDetectionLabel.BARRIER, + PandasetBoxDetectionLabel.ROLLING_CONTAINERS: DefaultBoxDetectionLabel.GENERIC_OBJECT, + PandasetBoxDetectionLabel.SEMI_TRUCK: DefaultBoxDetectionLabel.VEHICLE, + PandasetBoxDetectionLabel.SIGNS: DefaultBoxDetectionLabel.SIGN, + PandasetBoxDetectionLabel.TEMPORARY_CONSTRUCTION_BARRIERS: DefaultBoxDetectionLabel.BARRIER, + PandasetBoxDetectionLabel.TOWED_OBJECT: DefaultBoxDetectionLabel.VEHICLE, + PandasetBoxDetectionLabel.TRAIN: DefaultBoxDetectionLabel.GENERIC_OBJECT, # TODO: Adjust default types + PandasetBoxDetectionLabel.TRAM_SUBWAY: DefaultBoxDetectionLabel.GENERIC_OBJECT, # TODO: Adjust default types + } + return mapping[self] + + +@register_box_detection_label +class WOPDBoxDetectionLabel(BoxDetectionLabel): + """ + Semantic labels for Waymo Open Dataset bounding box detections. + [1] https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/labeling_specifications.md + [2] https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63-L69 + """ + + TYPE_UNKNOWN = 0 + TYPE_VEHICLE = 1 + TYPE_PEDESTRIAN = 2 + TYPE_SIGN = 3 + TYPE_CYCLIST = 4 + + def to_default(self) -> DefaultBoxDetectionLabel: + mapping = { + WOPDBoxDetectionLabel.TYPE_UNKNOWN: DefaultBoxDetectionLabel.GENERIC_OBJECT, + WOPDBoxDetectionLabel.TYPE_VEHICLE: DefaultBoxDetectionLabel.VEHICLE, + WOPDBoxDetectionLabel.TYPE_PEDESTRIAN: DefaultBoxDetectionLabel.PEDESTRIAN, + WOPDBoxDetectionLabel.TYPE_SIGN: DefaultBoxDetectionLabel.SIGN, + WOPDBoxDetectionLabel.TYPE_CYCLIST: DefaultBoxDetectionLabel.BICYCLE, + } + return mapping[self] diff --git a/src/py123d/conversion/registry/box_detection_type_registry.py b/src/py123d/conversion/registry/box_detection_type_registry.py deleted file mode 100644 index e1f38091..00000000 --- a/src/py123d/conversion/registry/box_detection_type_registry.py +++ /dev/null @@ -1,107 +0,0 @@ -from py123d.datatypes.detections.box_detection_types import AbstractBoxDetectionType - -BOX_DETECTION_TYPE_REGISTRY = {} - - -def register_box_detection_type(enum_class): - BOX_DETECTION_TYPE_REGISTRY[enum_class.__name__] = enum_class - return enum_class - - -@register_box_detection_type -class AV2SensorBoxDetectionType(AbstractBoxDetectionType): - """Sensor dataset annotation categories.""" - - ANIMAL = 1 - ARTICULATED_BUS = 2 - BICYCLE = 3 - BICYCLIST = 4 - BOLLARD = 5 - BOX_TRUCK = 6 - BUS = 7 - CONSTRUCTION_BARREL = 8 - CONSTRUCTION_CONE = 9 - DOG = 10 - LARGE_VEHICLE = 11 - MESSAGE_BOARD_TRAILER = 12 - MOBILE_PEDESTRIAN_CROSSING_SIGN = 13 - MOTORCYCLE = 14 - MOTORCYCLIST = 15 - OFFICIAL_SIGNALER = 16 - PEDESTRIAN = 17 - RAILED_VEHICLE = 18 - REGULAR_VEHICLE = 19 - SCHOOL_BUS = 20 - SIGN = 21 - STOP_SIGN = 22 - STROLLER = 23 - TRAFFIC_LIGHT_TRAILER = 24 - TRUCK = 25 - TRUCK_CAB = 26 - VEHICULAR_TRAILER = 27 - WHEELCHAIR = 28 - WHEELED_DEVICE = 29 - WHEELED_RIDER = 30 - - -@register_box_detection_type -class KITTI360BoxDetectionType(AbstractBoxDetectionType): - pass - - -@register_box_detection_type -class NuPlanBoxDetectionType(AbstractBoxDetectionType): - - VEHICLE = 0 - BICYCLE = 1 - PEDESTRIAN = 2 - TRAFFIC_CONE = 3 - BARRIER = 4 - CZONE_SIGN = 5 - GENERIC_OBJECT = 6 - - -@register_box_detection_type -class NuScenesBoxDetectionType(AbstractBoxDetectionType): - pass - - -class PandasetBoxDetectionType(AbstractBoxDetectionType): - - ANIMALS_BIRD = 0 - ANIMALS_OTHER = 1 - BICYCLE = 2 - BUS = 3 - CAR = 4 - CONES = 5 - CONSTRUCTION_SIGNS = 6 - EMERGENCY_VEHICLE = 7 - MEDIUM_SIZED_TRUCK = 8 - MOTORCYCLE = 9 - MOTORIZED_SCOOTER = 10 - OTHER_VEHICLE_CONSTRUCTION_VEHICLE = 11 - OTHER_VEHICLE_PEDICAB = 12 - OTHER_VEHICLE_UNCOMMON = 13 - PEDESTRIAN = 14 - PEDESTRIAN_WITH_OBJECT = 15 - PERSONAL_MOBILITY_DEVICE = 16 - PICKUP_TRUCK = 17 - PYLONS = 18 - ROAD_BARRIERS = 19 - ROLLING_CONTAINERS = 20 - SEMI_TRUCK = 21 - SIGNS = 22 - TEMPORARY_CONSTRUCTION_BARRIERS = 23 - TOWED_OBJECT = 24 - TRAIN = 25 - TRAM_SUBWAY = 26 - - -class WOPDBoxDetectionType(AbstractBoxDetectionType): - # https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63-L69 - - TYPE_UNKNOWN = 0 - TYPE_VEHICLE = 1 - TYPE_PEDESTRIAN = 2 - TYPE_SIGN = 3 - TYPE_CYCLIST = 4 diff --git a/src/py123d/datatypes/detections/box_detection_types.py b/src/py123d/datatypes/detections/box_detection_types.py deleted file mode 100644 index 05f5aadc..00000000 --- a/src/py123d/datatypes/detections/box_detection_types.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import annotations - -import abc - -from py123d.common.utils.enums import SerialIntEnum - - -class AbstractBoxDetectionType(SerialIntEnum): - - @abc.abstractmethod - def to_default_type(self): - raise NotImplementedError("Subclasses must implement this method.") - - -class BoxDetectionType(AbstractBoxDetectionType): - """ - Enum for agents in py123d. - """ - - VEHICLE = 0 # Includes all four or more wheeled vehicles, as well as trailers. - BICYCLE = 1 # Includes bicycles, motorcycles and tricycles. - PEDESTRIAN = 2 # Pedestrians, incl. strollers and wheelchairs. - - TRAFFIC_CONE = 3 # Cones that are temporarily placed to control the flow of traffic. - BARRIER = 4 # Solid barriers that can be either temporary or permanent. - CZONE_SIGN = 5 # Temporary signs that indicate construction zones. - GENERIC_OBJECT = 6 # Animals, debris, pushable/pullable objects, permanent poles. - - EGO = 7 - SIGN = 8 # TODO: Remove or extent - - def to_default_type(self): - """Inherited, see superclass.""" - return self - - -DYNAMIC_DETECTION_TYPES: set[BoxDetectionType] = { - BoxDetectionType.VEHICLE, - BoxDetectionType.BICYCLE, - BoxDetectionType.PEDESTRIAN, -} - -STATIC_DETECTION_TYPES: set[BoxDetectionType] = { - BoxDetectionType.TRAFFIC_CONE, - BoxDetectionType.BARRIER, - BoxDetectionType.CZONE_SIGN, - BoxDetectionType.GENERIC_OBJECT, -} diff --git a/src/py123d/datatypes/detections/box_detections.py b/src/py123d/datatypes/detections/box_detections.py index 9527c6a5..64ebb851 100644 --- a/src/py123d/datatypes/detections/box_detections.py +++ b/src/py123d/datatypes/detections/box_detections.py @@ -1,10 +1,10 @@ from dataclasses import dataclass from functools import cached_property -from typing import Iterable, List, Optional, Union +from typing import List, Optional, Union import shapely -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.conversion.registry.box_detection_label_registry import BoxDetectionLabel, DefaultBoxDetectionLabel from py123d.datatypes.time.time_point import TimePoint from py123d.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, StateSE3, Vector2D, Vector3D @@ -12,15 +12,15 @@ @dataclass class BoxDetectionMetadata: - box_detection_type: BoxDetectionType + label: BoxDetectionLabel track_token: str - confidence: Optional[float] = None # Confidence score of the detection, if available - num_lidar_points: Optional[int] = None # Number of LiDAR points within the bounding box - timepoint: Optional[TimePoint] = None # TimePoint when the detection was made, if available + confidence: Optional[float] = None + num_lidar_points: Optional[int] = None + timepoint: Optional[TimePoint] = None @property - def default_box_detection_type(self) -> BoxDetectionType: - return self.box_detection_type.to_default_type() + def default_label(self) -> DefaultBoxDetectionLabel: + return self.label.to_default() @dataclass @@ -96,11 +96,6 @@ def __len__(self) -> int: def __iter__(self): return iter(self.box_detections) - def get_box_detections_by_types(self, detection_types: Iterable[BoxDetectionType]) -> List[BoxDetection]: - return [ - detection for detection in self.box_detections if detection.metadata.box_detection_type in detection_types - ] - def get_detection_by_track_token(self, track_token: str) -> Optional[BoxDetection]: box_detection: Optional[BoxDetection] = None for detection in self.box_detections: diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene.py b/src/py123d/datatypes/scene/arrow/arrow_scene.py index 79fd4d87..06c6c1b0 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene.py @@ -114,7 +114,11 @@ def get_ego_state_at_iteration(self, iteration: int) -> Optional[EgoStateSE3]: ) def get_box_detections_at_iteration(self, iteration: int) -> Optional[BoxDetectionWrapper]: - return get_box_detections_from_arrow_table(self._get_recording_table(), self._get_table_index(iteration)) + return get_box_detections_from_arrow_table( + self._get_recording_table(), + self._get_table_index(iteration), + self.log_metadata, + ) def get_traffic_light_detections_at_iteration(self, iteration: int) -> Optional[TrafficLightDetectionWrapper]: return get_traffic_light_detections_from_arrow_table( diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 8aea7801..46c23f40 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -11,7 +11,6 @@ from py123d.conversion.sensor_io.lidar.draco_lidar_io import load_lidar_from_draco_binary from py123d.conversion.sensor_io.lidar.file_lidar_io import load_lidar_pcs_from_file from py123d.conversion.sensor_io.lidar.laz_lidar_io import load_lidar_from_laz_binary -from py123d.datatypes.detections.box_detection_types import BoxDetectionType from py123d.datatypes.detections.box_detections import ( BoxDetection, BoxDetectionMetadata, @@ -59,19 +58,24 @@ def get_ego_vehicle_state_from_arrow_table( ) -def get_box_detections_from_arrow_table(arrow_table: pa.Table, index: int) -> BoxDetectionWrapper: +def get_box_detections_from_arrow_table( + arrow_table: pa.Table, + index: int, + log_metadata: LogMetadata, +) -> BoxDetectionWrapper: timepoint = get_timepoint_from_arrow_table(arrow_table, index) box_detections: List[BoxDetection] = [] + box_detection_label_class = log_metadata.box_detection_label_class - for detection_state, detection_velocity, detection_token, detection_type in zip( + for detection_state, detection_velocity, detection_token, detection_label in zip( arrow_table["box_detection_state"][index].as_py(), arrow_table["box_detection_velocity"][index].as_py(), arrow_table["box_detection_token"][index].as_py(), - arrow_table["box_detection_type"][index].as_py(), + arrow_table["box_detection_label"][index].as_py(), ): box_detection = BoxDetectionSE3( metadata=BoxDetectionMetadata( - box_detection_type=BoxDetectionType(detection_type), + label=box_detection_label_class(detection_label), timepoint=timepoint, track_token=detection_token, confidence=None, diff --git a/src/py123d/datatypes/scene/scene_metadata.py b/src/py123d/datatypes/scene/scene_metadata.py index 751b9e04..2bc271f1 100644 --- a/src/py123d/datatypes/scene/scene_metadata.py +++ b/src/py123d/datatypes/scene/scene_metadata.py @@ -1,9 +1,10 @@ from __future__ import annotations from dataclasses import asdict, dataclass, field -from typing import Dict, Optional +from typing import Dict, Optional, Type import py123d +from py123d.conversion.registry.box_detection_label_registry import BOX_DETECTION_LABEL_REGISTRY, BoxDetectionLabel from py123d.datatypes.maps.map_metadata import MapMetadata from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICameraMetadata, FisheyeMEICameraType from py123d.datatypes.sensors.lidar import LiDARMetadata, LiDARType @@ -21,6 +22,7 @@ class LogMetadata: timestep_seconds: float vehicle_parameters: Optional[VehicleParameters] = None + box_detection_label_class: Optional[Type[BoxDetectionLabel]] = None pinhole_camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = field(default_factory=dict) fisheye_mei_camera_metadata: Dict[FisheyeMEICameraType, FisheyeMEICameraMetadata] = field(default_factory=dict) lidar_metadata: Dict[LiDARType, LiDARMetadata] = field(default_factory=dict) @@ -35,6 +37,16 @@ def from_dict(cls, data_dict: Dict) -> LogMetadata: if data_dict["vehicle_parameters"] is not None: data_dict["vehicle_parameters"] = VehicleParameters.from_dict(data_dict["vehicle_parameters"]) + # Box detection label class specific to the dataset + if data_dict["box_detection_label_class"] in BOX_DETECTION_LABEL_REGISTRY: + data_dict["box_detection_label_class"] = BOX_DETECTION_LABEL_REGISTRY[ + data_dict["box_detection_label_class"] + ] + elif data_dict["box_detection_label_class"] is None: + data_dict["box_detection_label_class"] = None + else: + raise ValueError(f"Unknown box detection label class: {data_dict['box_detection_label_class']}") + # Pinhole Camera Metadata pinhole_camera_metadata = {} for key, value in data_dict.get("pinhole_camera_metadata", {}).items(): @@ -64,6 +76,8 @@ def from_dict(cls, data_dict: Dict) -> LogMetadata: def to_dict(self) -> Dict: data_dict = asdict(self) data_dict["vehicle_parameters"] = self.vehicle_parameters.to_dict() if self.vehicle_parameters else None + if self.box_detection_label_class is not None: + data_dict["box_detection_label_class"] = self.box_detection_label_class.__name__ data_dict["pinhole_camera_metadata"] = { key.serialize(): value.to_dict() for key, value in self.pinhole_camera_metadata.items() } diff --git a/src/py123d/datatypes/vehicle_state/ego_state.py b/src/py123d/datatypes/vehicle_state/ego_state.py index fcc6f8fe..3ddc09a5 100644 --- a/src/py123d/datatypes/vehicle_state/ego_state.py +++ b/src/py123d/datatypes/vehicle_state/ego_state.py @@ -8,7 +8,7 @@ import numpy.typing as npt from py123d.common.utils.enums import classproperty -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.conversion.registry.box_detection_label_registry import DefaultBoxDetectionLabel from py123d.datatypes.detections.box_detections import BoxDetectionMetadata, BoxDetectionSE2, BoxDetectionSE3 from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.vehicle_parameters import ( @@ -148,7 +148,7 @@ def bounding_box_se2(self) -> BoundingBoxSE2: def box_detection(self) -> BoxDetectionSE3: return BoxDetectionSE3( metadata=BoxDetectionMetadata( - box_detection_type=BoxDetectionType.EGO, + label=DefaultBoxDetectionLabel.EGO, timepoint=self.timepoint, track_token=EGO_TRACK_TOKEN, confidence=1.0, @@ -231,7 +231,7 @@ def bounding_box_se2(self) -> BoundingBoxSE2: def box_detection(self) -> BoxDetectionSE2: return BoxDetectionSE2( metadata=BoxDetectionMetadata( - box_detection_type=BoxDetectionType.EGO, + label=DefaultBoxDetectionLabel.EGO, timepoint=self.timepoint, track_token=EGO_TRACK_TOKEN, confidence=1.0, diff --git a/src/py123d/visualization/color/default.py b/src/py123d/visualization/color/default.py index df0691f9..ed5ccd25 100644 --- a/src/py123d/visualization/color/default.py +++ b/src/py123d/visualization/color/default.py @@ -1,6 +1,6 @@ from typing import Dict -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.conversion.registry.box_detection_label_registry import DefaultBoxDetectionLabel from py123d.datatypes.detections.traffic_light_detections import TrafficLightStatus from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.visualization.color.color import ( @@ -83,8 +83,8 @@ ), } -BOX_DETECTION_CONFIG: Dict[BoxDetectionType, PlotConfig] = { - BoxDetectionType.VEHICLE: PlotConfig( +BOX_DETECTION_CONFIG: Dict[DefaultBoxDetectionLabel, PlotConfig] = { + DefaultBoxDetectionLabel.VEHICLE: PlotConfig( fill_color=ELLIS_5[4], fill_color_alpha=1.0, line_color=BLACK, @@ -95,7 +95,7 @@ marker_size=1.0, zorder=3, ), - BoxDetectionType.PEDESTRIAN: PlotConfig( + DefaultBoxDetectionLabel.PEDESTRIAN: PlotConfig( fill_color=NEW_TAB_10[6], fill_color_alpha=1.0, line_color=BLACK, @@ -106,7 +106,7 @@ marker_size=1.0, zorder=2, ), - BoxDetectionType.BICYCLE: PlotConfig( + DefaultBoxDetectionLabel.BICYCLE: PlotConfig( fill_color=ELLIS_5[3], fill_color_alpha=1.0, line_color=BLACK, @@ -117,7 +117,7 @@ marker_size=1.0, zorder=2, ), - BoxDetectionType.TRAFFIC_CONE: PlotConfig( + DefaultBoxDetectionLabel.TRAFFIC_CONE: PlotConfig( fill_color=NEW_TAB_10[5], fill_color_alpha=1.0, line_color=BLACK, @@ -127,7 +127,7 @@ marker_style=None, zorder=2, ), - BoxDetectionType.BARRIER: PlotConfig( + DefaultBoxDetectionLabel.BARRIER: PlotConfig( fill_color=NEW_TAB_10[5], fill_color_alpha=1.0, line_color=BLACK, @@ -137,7 +137,7 @@ marker_style=None, zorder=2, ), - BoxDetectionType.CZONE_SIGN: PlotConfig( + DefaultBoxDetectionLabel.CZONE_SIGN: PlotConfig( fill_color=NEW_TAB_10[5], fill_color_alpha=1.0, line_color=BLACK, @@ -147,7 +147,7 @@ marker_style=None, zorder=2, ), - BoxDetectionType.GENERIC_OBJECT: PlotConfig( + DefaultBoxDetectionLabel.GENERIC_OBJECT: PlotConfig( fill_color=NEW_TAB_10[5], fill_color_alpha=1.0, line_color=BLACK, @@ -157,7 +157,7 @@ marker_style=None, zorder=2, ), - BoxDetectionType.SIGN: PlotConfig( + DefaultBoxDetectionLabel.SIGN: PlotConfig( fill_color=NEW_TAB_10[8], fill_color_alpha=1.0, line_color=BLACK, @@ -167,7 +167,7 @@ marker_style=None, zorder=2, ), - BoxDetectionType.EGO: PlotConfig( + DefaultBoxDetectionLabel.EGO: PlotConfig( fill_color=ELLIS_5[0], fill_color_alpha=1.0, line_color=BLACK, diff --git a/src/py123d/visualization/matplotlib/camera.py b/src/py123d/visualization/matplotlib/camera.py index aadd0baf..5155aae4 100644 --- a/src/py123d/visualization/matplotlib/camera.py +++ b/src/py123d/visualization/matplotlib/camera.py @@ -10,7 +10,7 @@ # from PIL import ImageColor from pyquaternion import Quaternion -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.conversion.registry.box_detection_label_registry import DefaultBoxDetectionLabel from py123d.datatypes.detections.box_detections import BoxDetectionSE3, BoxDetectionWrapper from py123d.datatypes.sensors.pinhole_camera import PinholeCamera, PinholeIntrinsics from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -77,8 +77,8 @@ def add_box_detections_to_camera_ax( ) -> plt.Axes: box_detection_array = np.zeros((len(box_detections.box_detections), len(BoundingBoxSE3Index)), dtype=np.float64) - detection_types = np.array( - [detection.metadata.box_detection_type for detection in box_detections.box_detections], dtype=object + default_labels = np.array( + [detection.metadata.default_label for detection in box_detections.box_detections], dtype=object ) for idx, box_detection in enumerate(box_detections.box_detections): assert isinstance( @@ -109,8 +109,8 @@ def add_box_detections_to_camera_ax( corners_pc_in_fov = corners_pc_in_fov.reshape(-1, 8) valid_corners = corners_pc_in_fov.any(-1) - box_corners, detection_types = box_corners[valid_corners], detection_types[valid_corners] - image = _plot_rect_3d_on_img(camera.image.copy(), box_corners, detection_types) + box_corners, default_labels = box_corners[valid_corners], default_labels[valid_corners] + image = _plot_rect_3d_on_img(camera.image.copy(), box_corners, default_labels) if return_image: # ax.imshow(image) @@ -211,7 +211,7 @@ def _rotation_3d_in_axis(points: npt.NDArray[np.float32], angles: npt.NDArray[np def _plot_rect_3d_on_img( image: npt.NDArray[np.float32], box_corners: npt.NDArray[np.float32], - detection_types: List[BoxDetectionType], + labels: List[DefaultBoxDetectionLabel], thickness: int = 3, ) -> npt.NDArray[np.uint8]: """ @@ -238,7 +238,7 @@ def _plot_rect_3d_on_img( (6, 7), ) for i in range(len(box_corners)): - color = BOX_DETECTION_CONFIG[detection_types[i]].fill_color.rgb + color = BOX_DETECTION_CONFIG[labels[i]].fill_color.rgb corners = box_corners[i].astype(np.int64) for start, end in line_indices: cv2.line( diff --git a/src/py123d/visualization/matplotlib/observation.py b/src/py123d/visualization/matplotlib/observation.py index 03c0d711..e0a826f2 100644 --- a/src/py123d/visualization/matplotlib/observation.py +++ b/src/py123d/visualization/matplotlib/observation.py @@ -4,7 +4,7 @@ import numpy as np import shapely.geometry as geom -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.conversion.registry.box_detection_label_registry import DefaultBoxDetectionLabel from py123d.datatypes.detections.box_detections import BoxDetectionWrapper from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.maps.abstract_map import AbstractMap @@ -85,7 +85,7 @@ def add_box_detections_to_ax(ax: plt.Axes, box_detections: BoxDetectionWrapper) # TODO: Optionally, continue on boxes outside of plot. # if box_detection.metadata.detection_type == DetectionType.GENERIC_OBJECT: # continue - plot_config = BOX_DETECTION_CONFIG[box_detection.metadata.box_detection_type] + plot_config = BOX_DETECTION_CONFIG[box_detection.metadata.default_label] add_bounding_box_to_ax(ax, box_detection.bounding_box, plot_config) @@ -96,7 +96,7 @@ def add_box_future_detections_to_ax(ax: plt.Axes, scene: AbstractScene, iteratio agents_poses = { agent.metadata.track_token: [agent.center_se3] for agent in initial_agents - if agent.metadata.box_detection_type == BoxDetectionType.VEHICLE + if agent.metadata.default_label == DefaultBoxDetectionLabel.VEHICLE } frequency = 1 for iteration in range(iteration + frequency, scene.number_of_iterations, frequency): @@ -115,10 +115,10 @@ def add_box_future_detections_to_ax(ax: plt.Axes, scene: AbstractScene, iteratio ax.plot( poses[i : i + 2, 0], poses[i : i + 2, 1], - color=BOX_DETECTION_CONFIG[BoxDetectionType.VEHICLE].fill_color.hex, + color=BOX_DETECTION_CONFIG[DefaultBoxDetectionLabel.VEHICLE].fill_color.hex, alpha=alphas[i + 1], - linewidth=BOX_DETECTION_CONFIG[BoxDetectionType.VEHICLE].line_width * 5, - zorder=BOX_DETECTION_CONFIG[BoxDetectionType.VEHICLE].zorder, + linewidth=BOX_DETECTION_CONFIG[DefaultBoxDetectionLabel.VEHICLE].line_width * 5, + zorder=BOX_DETECTION_CONFIG[DefaultBoxDetectionLabel.VEHICLE].zorder, ) diff --git a/src/py123d/visualization/viser/elements/detection_elements.py b/src/py123d/visualization/viser/elements/detection_elements.py index 41b7001d..be08021b 100644 --- a/src/py123d/visualization/viser/elements/detection_elements.py +++ b/src/py123d/visualization/viser/elements/detection_elements.py @@ -5,7 +5,7 @@ import trimesh import viser -from py123d.datatypes.detections.box_detection_types import BoxDetectionType +from py123d.conversion.registry.box_detection_label_registry import DefaultBoxDetectionLabel from py123d.datatypes.scene.abstract_scene import AbstractScene from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 from py123d.geometry.geometry_index import BoundingBoxSE3Index, Corners3DIndex, StateSE3Index @@ -74,7 +74,7 @@ def _get_bounding_box_meshes(scene: AbstractScene, iteration: int, initial_ego_s # Load boxes to visualize, including ego vehicle at the last position boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] - boxes_type = [bd.metadata.box_detection_type for bd in box_detections.box_detections] + [BoxDetectionType.EGO] + boxes_labels = [bd.metadata.default_label for bd in box_detections.box_detections] + [DefaultBoxDetectionLabel.EGO] # create meshes for all boxes box_se3_array = np.array([box.array for box in boxes]) @@ -84,8 +84,8 @@ def _get_bounding_box_meshes(scene: AbstractScene, iteration: int, initial_ego_s # Create colors for each box based on detection type box_colors = [] - for box_type in boxes_type: - box_colors.append(BOX_DETECTION_CONFIG[box_type].fill_color.rgba) + for box_lable in boxes_labels: + box_colors.append(BOX_DETECTION_CONFIG[box_lable].fill_color.rgba) # Convert to numpy array and repeat for each vertex box_colors = np.array(box_colors) @@ -135,7 +135,7 @@ def _get_bounding_box_outlines( # Load boxes to visualize, including ego vehicle at the last position boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3] - boxes_type = [bd.metadata.box_detection_type for bd in box_detections.box_detections] + [BoxDetectionType.EGO] + boxes_labels = [bd.metadata.default_label for bd in box_detections.box_detections] + [DefaultBoxDetectionLabel.EGO] # Create lines for all boxes box_se3_array = np.array([box.array for box in boxes]) @@ -145,8 +145,8 @@ def _get_bounding_box_outlines( # Create colors for all boxes box_colors = np.zeros(box_outlines.shape, dtype=np.float32) - for i, box_type in enumerate(boxes_type): - box_colors[i, ...] = BOX_DETECTION_CONFIG[box_type].fill_color.rgb_norm + for i, box_label in enumerate(boxes_labels): + box_colors[i, ...] = BOX_DETECTION_CONFIG[box_label].fill_color.rgb_norm box_outlines = box_outlines.reshape(-1, *box_outlines.shape[2:]) box_colors = box_colors.reshape(-1, *box_colors.shape[2:]) diff --git a/test_viser.py b/test_viser.py index ee35330c..544e539b 100644 --- a/test_viser.py +++ b/test_viser.py @@ -1,7 +1,8 @@ from py123d.common.multithreading.worker_sequential import Sequential from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder from py123d.datatypes.scene.scene_filter import SceneFilter -from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType + +# from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.visualization.viser.viser_viewer import ViserViewer if __name__ == "__main__": @@ -17,7 +18,7 @@ # log_names = ["2013_05_28_drive_0000_sync"] # log_names = ["2013_05_28_drive_0000_sync"] log_names = None - # scene_uuids = ["60a37beb-6df4-5413-b753-9280125020cf"] + # scene_uuids = ["87bf69e4-f2fb-5491-99fa-8b7e89fb697c"] scene_uuids = None scene_filter = SceneFilter( @@ -28,7 +29,7 @@ history_s=0.0, timestamp_threshold_s=None, shuffle=True, - pinhole_camera_types=[PinholeCameraType.PCAM_F0], + # pinhole_camera_types=[PinholeCameraType.PCAM_F0], ) scene_builder = ArrowSceneBuilder() worker = Sequential() From 1a184aa5c9110dcab0fe964a86a589bfd00f4150 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Tue, 4 Nov 2025 22:17:50 +0100 Subject: [PATCH 140/145] Add camera convention helper. --- .../conversion/utils/sensor_utils/camera_conventions.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/py123d/conversion/utils/sensor_utils/camera_conventions.py b/src/py123d/conversion/utils/sensor_utils/camera_conventions.py index 49e3a533..75837932 100644 --- a/src/py123d/conversion/utils/sensor_utils/camera_conventions.py +++ b/src/py123d/conversion/utils/sensor_utils/camera_conventions.py @@ -79,6 +79,14 @@ def convert_camera_convention( ], dtype=np.float64, ).T, + (CameraConvention.pZmYpX, CameraConvention.pXpZmY): np.array( + [ + [0.0, 0.0, 1.0], # new X = old Z (right) + [-1.0, 0.0, 0.0], # new Y = old -X (down) + [0.0, -1.0, 0.0], # new Z = old -Y (forward) + ], + dtype=np.float64, + ).T, } pose_transformation = from_pose.transformation_matrix.copy() From 30813104eb9294c7af31d78b4d843319bc2a6045 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Wed, 5 Nov 2025 12:33:08 +0100 Subject: [PATCH 141/145] Fix loading of fisheye mei camera and add to viser. --- .../datasets/kitti360/kitti360_converter.py | 2 +- .../datatypes/scene/arrow/arrow_scene.py | 2 +- .../scene/arrow/utils/arrow_getters.py | 5 +- .../datatypes/sensors/fisheye_mei_camera.py | 4 + .../conversion/datasets/kitti360_dataset.yaml | 2 +- .../viser/elements/sensor_elements.py | 137 +++++++++++++++++- .../visualization/viser/viser_config.py | 17 ++- .../visualization/viser/viser_viewer.py | 19 +++ test_viser.py | 4 +- 9 files changed, 181 insertions(+), 11 deletions(-) diff --git a/src/py123d/conversion/datasets/kitti360/kitti360_converter.py b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py index c236c0e4..c3494d0b 100644 --- a/src/py123d/conversion/datasets/kitti360/kitti360_converter.py +++ b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py @@ -387,7 +387,7 @@ def _get_kitti360_fisheye_mei_camera_metadata( camera_type=fcam_type, width=fisheye_result[fcam_name]["image_width"], height=fisheye_result[fcam_name]["image_height"], - mirror_parameter=fisheye_result[fcam_name]["mirror_parameters"], + mirror_parameter=float(fisheye_result[fcam_name]["mirror_parameters"]["xi"]), distortion=distortion, projection=projection, ) diff --git a/src/py123d/datatypes/scene/arrow/arrow_scene.py b/src/py123d/datatypes/scene/arrow/arrow_scene.py index 06c6c1b0..ee26a5f4 100644 --- a/src/py123d/datatypes/scene/arrow/arrow_scene.py +++ b/src/py123d/datatypes/scene/arrow/arrow_scene.py @@ -149,7 +149,7 @@ def get_fisheye_mei_camera_at_iteration( self, iteration: int, camera_type: FisheyeMEICameraType ) -> Optional[FisheyeMEICamera]: fisheye_mei_camera: Optional[FisheyeMEICamera] = None - if camera_type in self.available_pinhole_camera_types: + if camera_type in self.available_fisheye_mei_camera_types: fisheye_mei_camera = get_camera_from_arrow_table( self._get_recording_table(), self._get_table_index(iteration), diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 46c23f40..6ce6701a 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -141,14 +141,15 @@ def get_camera_from_arrow_table( else: raise NotImplementedError("Only string file paths for camera data are supported.") - camera_metadata = log_metadata.pinhole_camera_metadata[camera_type] - if hasattr(camera_metadata, "mirror_parameter") and camera_metadata.mirror_parameter is not None: + if camera_name.startswith("fcam"): + camera_metadata = log_metadata.fisheye_mei_camera_metadata[camera_type] return FisheyeMEICamera( metadata=camera_metadata, image=image, extrinsic=extrinsic, ) else: + camera_metadata = log_metadata.pinhole_camera_metadata[camera_type] return PinholeCamera( metadata=camera_metadata, image=image, diff --git a/src/py123d/datatypes/sensors/fisheye_mei_camera.py b/src/py123d/datatypes/sensors/fisheye_mei_camera.py index d8f53f14..4a98afad 100644 --- a/src/py123d/datatypes/sensors/fisheye_mei_camera.py +++ b/src/py123d/datatypes/sensors/fisheye_mei_camera.py @@ -150,6 +150,10 @@ def from_dict(cls, data_dict: Dict[str, Any]) -> FisheyeMEICameraMetadata: ) return FisheyeMEICameraMetadata(**data_dict) + @property + def aspect_ratio(self) -> float: + return self.width / self.height + def to_dict(self) -> Dict[str, Any]: data_dict = asdict(self) data_dict["camera_type"] = int(self.camera_type) diff --git a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml index 4919ed79..f15509e3 100644 --- a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml @@ -34,7 +34,7 @@ kitti360_dataset: pinhole_camera_store_option: "path" # Fisheye Cameras - include_fisheye_mei_cameras: false + include_fisheye_mei_cameras: true fisheye_mei_camera_store_option: "path" # LiDARs diff --git a/src/py123d/visualization/viser/elements/sensor_elements.py b/src/py123d/visualization/viser/elements/sensor_elements.py index d6a1b5d9..2cc6bacb 100644 --- a/src/py123d/visualization/viser/elements/sensor_elements.py +++ b/src/py123d/visualization/viser/elements/sensor_elements.py @@ -7,6 +7,7 @@ import viser from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICamera, FisheyeMEICameraMetadata, FisheyeMEICameraType from py123d.datatypes.sensors.lidar import LiDARType from py123d.datatypes.sensors.pinhole_camera import PinholeCamera, PinholeCameraType from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -50,7 +51,7 @@ def _add_camera_frustums_to_viser_server(camera_type: PinholeCameraType) -> None f"camera_frustums/{camera_type.serialize()}", fov=camera.metadata.fov_y, aspect=camera.metadata.aspect_ratio, - scale=viser_config.camera_frustum_frustum_scale, + scale=viser_config.camera_frustum_scale, image=camera_image, position=camera_position, wxyz=camera_quaternion, @@ -74,6 +75,60 @@ def _add_camera_frustums_to_viser_server(camera_type: PinholeCameraType) -> None return None +def add_fisheye_frustums_to_viser_server( + scene: AbstractScene, + scene_interation: int, + initial_ego_state: EgoStateSE3, + viser_server: viser.ViserServer, + viser_config: ViserConfig, + fisheye_frustum_handles: Dict[FisheyeMEICameraType, viser.CameraFrustumHandle], +) -> None: + if viser_config.fisheye_frustum_visible: + scene_center_array = initial_ego_state.center.point_3d.array + ego_pose = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array + ego_pose[StateSE3Index.XYZ] -= scene_center_array + + def _add_fisheye_frustums_to_viser_server(fisheye_camera_type: FisheyeMEICameraType) -> None: + camera = scene.get_fisheye_mei_camera_at_iteration(scene_interation, fisheye_camera_type) + if camera is not None: + fcam_position, fcam_quaternion, fcam_image = _get_fisheye_camera_values( + camera, + ego_pose.copy(), + viser_config.fisheye_frustum_image_scale, + ) + if fisheye_camera_type in fisheye_frustum_handles: + fisheye_frustum_handles[fisheye_camera_type].position = fcam_position + fisheye_frustum_handles[fisheye_camera_type].wxyz = fcam_quaternion + fisheye_frustum_handles[fisheye_camera_type].image = fcam_image + else: + # NOTE @DanielDauner: The FOV is just taking as a static value here. + # The function se + fisheye_frustum_handles[fisheye_camera_type] = viser_server.scene.add_camera_frustum( + f"camera_frustums/{fisheye_camera_type.serialize()}", + fov=185, # vertical fov + aspect=camera.metadata.aspect_ratio, + scale=viser_config.fisheye_frustum_scale, + image=fcam_image, + position=fcam_position, + wxyz=fcam_quaternion, + ) + + return None + + # NOTE; In order to speed up adding camera frustums, we use multithreading and resize the images. + with concurrent.futures.ThreadPoolExecutor( + max_workers=len(viser_config.fisheye_mei_camera_frustum_types) + ) as executor: + future_to_camera = { + executor.submit(_add_fisheye_frustums_to_viser_server, fcam_type): fcam_type + for fcam_type in viser_config.fisheye_mei_camera_frustum_types + } + for future in concurrent.futures.as_completed(future_to_camera): + _ = future.result() + + return None + + def add_camera_gui_to_viser_server( scene: AbstractScene, scene_interation: int, @@ -183,6 +238,23 @@ def _get_camera_values( return camera_position, camera_rotation, camera_image +def _get_fisheye_camera_values( + camera: FisheyeMEICamera, + ego_pose: npt.NDArray[np.float64], + resize_factor: Optional[float] = None, +) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.uint8]]: + assert ego_pose.ndim == 1 and len(ego_pose) == len(StateSE3Index) + + rel_camera_pose = camera.extrinsic.array + abs_camera_pose = convert_relative_to_absolute_se3_array(origin=ego_pose, se3_array=rel_camera_pose) + + camera_position = abs_camera_pose[StateSE3Index.XYZ] + camera_rotation = abs_camera_pose[StateSE3Index.QUATERNION] + + camera_image = _rescale_image(camera.image, resize_factor) + return camera_position, camera_rotation, camera_image + + def _rescale_image(image: npt.NDArray[np.uint8], scale: float) -> npt.NDArray[np.uint8]: if scale == 1.0: return image @@ -190,3 +262,66 @@ def _rescale_image(image: npt.NDArray[np.uint8], scale: float) -> npt.NDArray[np new_height = int(image.shape[0] * scale) downscaled_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LINEAR) return downscaled_image + + +import numpy as np + + +def calculate_fov(metadata: FisheyeMEICameraMetadata) -> tuple[float, float]: + """ + Calculate horizontal and vertical FOV in degrees. + + Returns: + (horizontal_fov, vertical_fov) in degrees + """ + xi = metadata.mirror_parameter + gamma1 = metadata.projection.gamma1 + gamma2 = metadata.projection.gamma2 + u0 = metadata.projection.u0 + v0 = metadata.projection.v0 + + width = metadata.width + height = metadata.height + + # Calculate corner positions (furthest from principal point) + corners = np.array([[0, 0], [width, 0], [0, height], [width, height]]) + + # Convert to normalized coordinates + x_norm = (corners[:, 0] - u0) / gamma1 + y_norm = (corners[:, 1] - v0) / gamma2 + + # For MEI model, inverse projection (ignoring distortion for FOV estimate): + # r² = x² + y² + # θ = arctan(r / (1 - ξ·√(1 + r²))) + + r_squared = x_norm**2 + y_norm**2 + r = np.sqrt(r_squared) + + # Calculate incident angle for each corner + # From MEI model: r = (X/Z_s) where Z_s = Z + ξ·√(X² + Y² + Z²) + # This gives: θ = arctan(r·√(1 + (1-ξ²)r²) / (1 - ξ²·r²)) + # Simplified approximation: + + if xi < 1e-6: # Perspective camera + theta = np.arctan(r) + else: + # For small angles or as approximation + denominator = 1 - xi * np.sqrt(1 + r_squared) + theta = np.arctan2(r, denominator) + + np.max(np.abs(theta)) + + # Calculate horizontal and vertical FOV separately + x_max = np.max(np.abs(x_norm)) + y_max = np.max(np.abs(y_norm)) + + if xi < 1e-6: + h_fov = 2 * np.arctan(x_max) + v_fov = 2 * np.arctan(y_max) + else: + denom_h = 1 - xi * np.sqrt(1 + x_max**2) + denom_v = 1 - xi * np.sqrt(1 + y_max**2) + h_fov = 2 * np.arctan2(x_max, denom_h) + v_fov = 2 * np.arctan2(y_max, denom_v) + + return h_fov, v_fov diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index a9c3c5e1..b96ff96a 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -1,6 +1,7 @@ from dataclasses import dataclass, field from typing import List, Literal, Optional, Tuple +from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICameraType from py123d.datatypes.sensors.lidar import LiDARType from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.visualization.color.color import ELLIS_5 @@ -52,7 +53,7 @@ class ViserConfig: # Map map_visible: bool = True - map_radius: float = 500.0 # [m] + map_radius: float = 200.0 # [m] map_non_road_z_offset: float = 0.1 # small z-translation to place crosswalks, parking, etc. on top of the road map_requery: bool = True # Re-query map when ego vehicle moves out of current map bounds @@ -61,11 +62,11 @@ class ViserConfig: bounding_box_type: Literal["mesh", "lines"] = "mesh" bounding_box_line_width: float = 4.0 - # Cameras + # Pinhole Cameras # -> Frustum camera_frustum_visible: bool = True camera_frustum_types: List[PinholeCameraType] = field(default_factory=lambda: all_camera_types.copy()) - camera_frustum_frustum_scale: float = 1.0 + camera_frustum_scale: float = 1.0 camera_frustum_image_scale: float = 0.25 # Resize factor for the camera image shown on the frustum (<1.0 for speed) # -> GUI @@ -73,6 +74,16 @@ class ViserConfig: camera_gui_types: List[PinholeCameraType] = field(default_factory=lambda: [PinholeCameraType.PCAM_F0].copy()) camera_gui_image_scale: float = 0.25 # Resize factor for the camera image shown in the GUI (<1.0 for speed) + # Fisheye MEI Cameras + # -> Frustum + fisheye_frustum_visible: bool = True + fisheye_mei_camera_frustum_visible: bool = True + fisheye_mei_camera_frustum_types: List[PinholeCameraType] = field( + default_factory=lambda: [fcam for fcam in FisheyeMEICameraType] + ) + fisheye_frustum_scale: float = 1.0 + fisheye_frustum_image_scale: float = 0.25 # Resize factor for the camera image shown on the frustum + # LiDAR lidar_visible: bool = True lidar_types: List[LiDARType] = field(default_factory=lambda: all_lidar_types.copy()) diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index 6c0123b7..3d5dc044 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -10,6 +10,7 @@ from py123d.datatypes.maps.map_datatypes import MapLayer from py123d.datatypes.scene.abstract_scene import AbstractScene +from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICameraType from py123d.datatypes.sensors.lidar import LiDARType from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 @@ -24,6 +25,7 @@ get_ego_3rd_person_view_position, get_ego_bev_view_position, ) +from py123d.visualization.viser.elements.sensor_elements import add_fisheye_frustums_to_viser_server from py123d.visualization.viser.viser_config import ViserConfig logger = logging.getLogger(__name__) @@ -249,6 +251,14 @@ def _(_) -> None: self._viser_config, camera_gui_handles, ) + add_fisheye_frustums_to_viser_server( + scene, + gui_timestep.value, + initial_ego_state, + self._viser_server, + self._viser_config, + fisheye_frustum_handles, + ) add_lidar_pc_to_viser_server( scene, gui_timestep.value, @@ -315,6 +325,7 @@ def _(event: viser.GuiEvent) -> None: "lines": None, } camera_frustum_handles: Dict[PinholeCameraType, viser.CameraFrustumHandle] = {} + fisheye_frustum_handles: Dict[FisheyeMEICameraType, viser.CameraFrustumHandle] = {} camera_gui_handles: Dict[PinholeCameraType, viser.GuiImageHandle] = {} lidar_pc_handles: Dict[LiDARType, Optional[viser.PointCloudHandle]] = {LiDARType.LIDAR_MERGED: None} map_handles: Dict[MapLayer, viser.MeshHandle] = {} @@ -342,6 +353,14 @@ def _(event: viser.GuiEvent) -> None: self._viser_config, camera_gui_handles, ) + add_fisheye_frustums_to_viser_server( + scene, + gui_timestep.value, + initial_ego_state, + self._viser_server, + self._viser_config, + fisheye_frustum_handles, + ) add_lidar_pc_to_viser_server( scene, gui_timestep.value, diff --git a/test_viser.py b/test_viser.py index 544e539b..f7ae44bb 100644 --- a/test_viser.py +++ b/test_viser.py @@ -7,8 +7,8 @@ if __name__ == "__main__": # splits = ["kitti360_train"] - # splits = ["nuscenes-mini_val", "nuscenes-mini_train"] - splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] + splits = ["nuscenes-mini_val", "nuscenes-mini_train"] + # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla_test"] # splits = ["wopd_val"] From 8480cdf85c9d3818f3ff45dc61ccfcd78b90a71d Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Wed, 5 Nov 2025 17:49:51 +0100 Subject: [PATCH 142/145] Add mp4 compression and reading for pinhole and fisheye cameras. --- .../conversion/dataset_converter_config.py | 5 +- .../datasets/av2/av2_sensor_converter.py | 26 ++- .../datasets/kitti360/kitti360_converter.py | 62 +++---- .../datasets/nuplan/nuplan_converter.py | 25 ++- .../datasets/nuscenes/nuscenes_converter.py | 30 ++-- .../datasets/pandaset/pandaset_converter.py | 27 ++- .../datasets/wopd/waymo_sensor_io.py | 6 +- .../datasets/wopd/wopd_converter.py | 17 +- .../log_writer/abstract_log_writer.py | 34 +++- .../conversion/log_writer/arrow_log_writer.py | 168 ++++++++++++++---- .../sensor_io/camera/jpeg_camera_io.py | 64 +++---- .../sensor_io/camera/mp4_camera_io.py | 117 ++++-------- .../scene/arrow/utils/arrow_getters.py | 24 ++- .../conversion/datasets/kitti360_dataset.yaml | 6 +- .../conversion/datasets/nuplan_dataset.yaml | 4 +- .../datasets/nuplan_mini_dataset.yaml | 4 +- .../conversion/datasets/nuscenes_dataset.yaml | 4 +- .../datasets/nuscenes_mini_dataset.yaml | 4 +- .../conversion/datasets/pandaset_dataset.yaml | 2 +- .../conversion/datasets/wopd_dataset.yaml | 2 +- test_viser.py | 9 +- 21 files changed, 355 insertions(+), 285 deletions(-) diff --git a/src/py123d/conversion/dataset_converter_config.py b/src/py123d/conversion/dataset_converter_config.py index d4924b01..f9264cd7 100644 --- a/src/py123d/conversion/dataset_converter_config.py +++ b/src/py123d/conversion/dataset_converter_config.py @@ -41,12 +41,11 @@ class DatasetConverterConfig: include_route: bool = False def __post_init__(self): - assert ( - self.pinhole_camera_store_option != "mp4" - ), "MP4 format is not yet supported, but planned for future releases." + assert self.pinhole_camera_store_option in [ "path", "binary", + "mp4", ], f"Invalid camera store option, got {self.pinhole_camera_store_option}." assert self.lidar_store_option in [ diff --git a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py index 2bace3a1..1e5a192a 100644 --- a/src/py123d/conversion/datasets/av2/av2_sensor_converter.py +++ b/src/py123d/conversion/datasets/av2/av2_sensor_converter.py @@ -14,7 +14,7 @@ find_closest_target_fpath, get_slice_with_timestamp_ns, ) -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, CameraData, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.registry.box_detection_label_registry import AV2SensorBoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import AVSensorLiDARIndex @@ -322,9 +322,9 @@ def _extract_av2_sensor_pinhole_cameras( synchronization_df: pd.DataFrame, source_log_path: Path, dataset_converter_config: DatasetConverterConfig, -) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: +) -> List[CameraData]: - camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} + camera_data_list: List[CameraData] = [] split = source_log_path.parent.name log_id = source_log_path.name @@ -351,17 +351,15 @@ def _extract_av2_sensor_pinhole_cameras( absolute_image_path = av2_sensor_data_root / relative_image_path assert absolute_image_path.exists() - # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic. - camera_extrinsic = _row_dict_to_state_se3(row) - camera_data = None - if dataset_converter_config.pinhole_camera_store_option == "path": - camera_data = str(relative_image_path) - elif dataset_converter_config.pinhole_camera_store_option == "binary": - with open(absolute_image_path, "rb") as f: - camera_data = f.read() - camera_dict[pinhole_camera_type] = camera_data, camera_extrinsic - - return camera_dict + camera_data = CameraData( + camera_type=pinhole_camera_type, + extrinsic=_row_dict_to_state_se3(row), + dataset_root=av2_sensor_data_root, + relative_path=relative_image_path, + ) + camera_data_list.append(camera_data) + + return camera_data_list def _extract_av2_sensor_lidars( diff --git a/src/py123d/conversion/datasets/kitti360/kitti360_converter.py b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py index c3494d0b..22d84229 100644 --- a/src/py123d/conversion/datasets/kitti360/kitti360_converter.py +++ b/src/py123d/conversion/datasets/kitti360/kitti360_converter.py @@ -24,7 +24,7 @@ kittiId2label, ) from py123d.conversion.datasets.kitti360.utils.preprocess_detection import process_detection -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, CameraData, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.registry.box_detection_label_registry import KITTI360BoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import Kitti360LiDARIndex @@ -304,12 +304,9 @@ def convert_log(self, log_index: int, log_writer: AbstractLogWriter) -> None: timestamp=ts_list[valid_idx], ego_state=ego_state_all[idx], box_detections=box_detection_wrapper_all[valid_idx], - traffic_lights=None, pinhole_cameras=pinhole_cameras, fisheye_mei_cameras=fisheye_cameras, lidars=lidars, - scenario_tags=None, - route_lane_group_ids=None, ) log_writer.close() @@ -724,26 +721,24 @@ def _extract_kitti360_pinhole_cameras( camera_calibration: Dict[str, StateSE3], kitti360_folders: Dict[str, Path], data_converter_config: DatasetConverterConfig, -) -> Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]]: +) -> List[CameraData]: - pinhole_camera_dict: Dict[PinholeCameraType, Optional[Tuple[Union[str, bytes], StateSE3]]] = {} + pinhole_camera_data_list: List[CameraData] = [] if data_converter_config.include_pinhole_cameras: - for camera_type, cam_dir_name in KITTI360_PINHOLE_CAMERA_TYPES.items(): img_path_png = kitti360_folders[DIR_2D_RAW] / log_name / cam_dir_name / "data_rect" / f"{idx:010d}.png" camera_extrinsic = camera_calibration[cam_dir_name] - if img_path_png.exists(): - if data_converter_config.pinhole_camera_store_option == "path": - camera_data = str(img_path_png) - elif data_converter_config.pinhole_camera_store_option == "binary": - with open(img_path_png, "rb") as f: - camera_data = f.read() - else: - camera_data = None + pinhole_camera_data_list.append( + CameraData( + camera_type=camera_type, + extrinsic=camera_extrinsic, + dataset_root=kitti360_folders[DIR_ROOT], + relative_path=img_path_png.relative_to(kitti360_folders[DIR_ROOT]), + ) + ) - pinhole_camera_dict[camera_type] = camera_data, camera_extrinsic - return pinhole_camera_dict + return pinhole_camera_data_list def _extract_kitti360_fisheye_mei_cameras( @@ -752,22 +747,23 @@ def _extract_kitti360_fisheye_mei_cameras( camera_calibration: Dict[str, StateSE3], kitti360_folders: Dict[str, Path], data_converter_config: DatasetConverterConfig, -) -> Dict[Union[PinholeCameraType, FisheyeMEICameraType], Optional[Tuple[Union[str, bytes], StateSE3]]]: - - fisheye_camera_dict: Dict[FisheyeMEICameraType, Optional[Tuple[Union[str, bytes], StateSE3]]] = {} - for camera_type, cam_dir_name in KITTI360_FISHEYE_MEI_CAMERA_TYPES.items(): - img_path_png = kitti360_folders[DIR_2D_RAW] / log_name / cam_dir_name / "data_rgb" / f"{idx:010d}.png" - camera_extrinsic = camera_calibration[cam_dir_name] - if img_path_png.exists(): - if data_converter_config.pinhole_camera_store_option == "path": - camera_data = str(img_path_png) - elif data_converter_config.pinhole_camera_store_option == "binary": - with open(img_path_png, "rb") as f: - camera_data = f.read() - else: - camera_data = None - fisheye_camera_dict[camera_type] = camera_data, camera_extrinsic - return fisheye_camera_dict +) -> List[CameraData]: + + fisheye_camera_data_list: List[CameraData] = [] + if data_converter_config.include_fisheye_mei_cameras: + for camera_type, cam_dir_name in KITTI360_FISHEYE_MEI_CAMERA_TYPES.items(): + img_path_png = kitti360_folders[DIR_2D_RAW] / log_name / cam_dir_name / "data_rgb" / f"{idx:010d}.png" + camera_extrinsic = camera_calibration[cam_dir_name] + if img_path_png.exists(): + fisheye_camera_data_list.append( + CameraData( + camera_type=camera_type, + extrinsic=camera_extrinsic, + dataset_root=kitti360_folders[DIR_ROOT], + relative_path=img_path_png.relative_to(kitti360_folders[DIR_ROOT]), + ) + ) + return fisheye_camera_data_list def _load_kitti_360_calibration(kitti_360_data_root: Path) -> Dict[str, StateSE3]: diff --git a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py index 92863acd..36fa3a25 100644 --- a/src/py123d/conversion/datasets/nuplan/nuplan_converter.py +++ b/src/py123d/conversion/datasets/nuplan/nuplan_converter.py @@ -22,7 +22,7 @@ get_box_detections_for_lidarpc_token_from_db, get_nearest_ego_pose_for_timestamp_from_db, ) -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, CameraData, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.registry.box_detection_label_registry import NuPlanBoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import NuPlanLiDARIndex @@ -356,9 +356,9 @@ def _extract_nuplan_cameras( source_log_path: Path, nuplan_sensor_root: Path, dataset_converter_config: DatasetConverterConfig, -) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: +) -> List[CameraData]: - camera_dict: Dict[str, Union[str, bytes]] = {} + camera_data_list: List[CameraData] = [] if dataset_converter_config.include_pinhole_cameras: log_cam_infos = {camera.token: camera for camera in nuplan_log_db.log.cameras} @@ -395,18 +395,17 @@ def _extract_nuplan_cameras( c2e = img_e2e @ c2img_e extrinsic = StateSE3.from_transformation_matrix(c2e) - # Store camera data, either as path or binary - camera_data: Optional[Union[str, bytes]] = None - if dataset_converter_config.pinhole_camera_store_option == "path": - camera_data = str(filename_jpg) - elif dataset_converter_config.pinhole_camera_store_option == "binary": - with open(filename_jpg, "rb") as f: - camera_data = f.read() - # Store in dictionary - camera_dict[camera_type] = camera_data, extrinsic + camera_data_list.append( + CameraData( + camera_type=camera_type, + extrinsic=extrinsic, + dataset_root=nuplan_sensor_root, + relative_path=filename_jpg.relative_to(nuplan_sensor_root), + ) + ) - return camera_dict + return camera_data_list def _extract_nuplan_lidars( diff --git a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py index 22921491..7cc39d34 100644 --- a/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py +++ b/src/py123d/conversion/datasets/nuscenes/nuscenes_converter.py @@ -1,6 +1,6 @@ import gc from pathlib import Path -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Dict, List, Union import numpy as np from pyquaternion import Quaternion @@ -15,7 +15,7 @@ NUSCENES_DETECTION_NAME_DICT, NUSCENES_DT, ) -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, CameraData, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.registry.box_detection_label_registry import NuScenesBoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import NuScenesLiDARIndex @@ -388,8 +388,8 @@ def _extract_nuscenes_cameras( sample: Dict[str, Any], nuscenes_data_root: Path, dataset_converter_config: DatasetConverterConfig, -) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: - camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} +) -> List[CameraData]: + camera_data_list: List[CameraData] = [] if dataset_converter_config.include_pinhole_cameras: for camera_type, camera_channel in NUSCENES_CAMERA_TYPES.items(): @@ -409,20 +409,20 @@ def _extract_nuscenes_cameras( extrinsic_matrix[:3, 3] = translation extrinsic = StateSE3.from_transformation_matrix(extrinsic_matrix) - cam_path = nuscenes_data_root / cam_data["filename"] + cam_path = nuscenes_data_root / str(cam_data["filename"]) if cam_path.exists() and cam_path.is_file(): - if dataset_converter_config.pinhole_camera_store_option == "path": - camera_data = str(cam_path) - elif dataset_converter_config.pinhole_camera_store_option == "binary": - with open(cam_path, "rb") as f: - camera_data = f.read() - else: - continue - - camera_dict[camera_type] = (camera_data, extrinsic) + # camera_dict[camera_type] = (camera_data, extrinsic) + camera_data_list.append( + CameraData( + camera_type=camera_type, + extrinsic=extrinsic, + relative_path=cam_path.relative_to(nuscenes_data_root), + dataset_root=nuscenes_data_root, + ) + ) - return camera_dict + return camera_data_list def _extract_nuscenes_lidars( diff --git a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py index 9c244ae1..008ab4e0 100644 --- a/src/py123d/conversion/datasets/pandaset/pandaset_converter.py +++ b/src/py123d/conversion/datasets/pandaset/pandaset_converter.py @@ -22,7 +22,7 @@ read_pkl_gz, rotate_pandaset_pose_to_iso_coordinates, ) -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, CameraData, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.registry.box_detection_label_registry import PandasetBoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import PandasetLiDARIndex @@ -332,9 +332,8 @@ def _extract_pandaset_sensor_camera( ego_state_se3: EgoStateSE3, camera_poses: Dict[str, List[Dict[str, Dict[str, float]]]], dataset_converter_config: DatasetConverterConfig, -) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: - - camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} +) -> List[CameraData]: + camera_data_list: List[CameraData] = [] iteration_str = f"{iteration:02d}" if dataset_converter_config.include_pinhole_cameras: @@ -346,22 +345,20 @@ def _extract_pandaset_sensor_camera( camera_pose_dict = camera_poses[camera_name][iteration] camera_extrinsic = pandaset_pose_dict_to_state_se3(camera_pose_dict) - # camera_extrinsic = rotate_pandaset_pose_to_iso_coordinates(camera_extrinsic) camera_extrinsic = StateSE3.from_array( convert_absolute_to_relative_se3_array(ego_state_se3.rear_axle_se3, camera_extrinsic.array), copy=True ) + camera_data_list.append( + CameraData( + camera_type=camera_type, + extrinsic=camera_extrinsic, + dataset_root=source_log_path.parent, + relative_path=image_abs_path.relative_to(source_log_path.parent), + ) + ) - camera_data = None - if dataset_converter_config.pinhole_camera_store_option == "path": - pandaset_data_root = source_log_path.parent - camera_data = str(image_abs_path.relative_to(pandaset_data_root)) - elif dataset_converter_config.pinhole_camera_store_option == "binary": - with open(image_abs_path, "rb") as f: - camera_data = f.read() - camera_dict[camera_type] = camera_data, camera_extrinsic - - return camera_dict + return camera_data_list def _extract_pandaset_lidar( diff --git a/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py b/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py index cf25274c..cca4bc53 100644 --- a/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py +++ b/src/py123d/conversion/datasets/wopd/waymo_sensor_io.py @@ -28,7 +28,11 @@ def _get_frame_at_iteration(filepath: Path, iteration: int) -> Optional[dataset_ return frame -def load_jpeg_binary_from_file(tf_record_path: Path, iteration: int, pinhole_camera_type: PinholeCameraType) -> bytes: +def load_jpeg_binary_from_tf_record_file( + tf_record_path: Path, + iteration: int, + pinhole_camera_type: PinholeCameraType, +) -> bytes: frame = _get_frame_at_iteration(tf_record_path, iteration) assert frame is not None, f"Frame at iteration {iteration} not found in Waymo file: {tf_record_path}" diff --git a/src/py123d/conversion/datasets/wopd/wopd_converter.py b/src/py123d/conversion/datasets/wopd/wopd_converter.py index 2622dc66..c5d7a411 100644 --- a/src/py123d/conversion/datasets/wopd/wopd_converter.py +++ b/src/py123d/conversion/datasets/wopd/wopd_converter.py @@ -17,7 +17,7 @@ WOPD_LIDAR_TYPES, ) from py123d.conversion.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map -from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, LiDARData +from py123d.conversion.log_writer.abstract_log_writer import AbstractLogWriter, CameraData, LiDARData from py123d.conversion.map_writer.abstract_map_writer import AbstractMapWriter from py123d.conversion.registry.box_detection_label_registry import WOPDBoxDetectionLabel from py123d.conversion.registry.lidar_index_registry import DefaultLiDARIndex, WOPDLiDARIndex @@ -379,9 +379,9 @@ def _extract_wopd_box_detections( def _extract_wopd_cameras( frame: dataset_pb2.Frame, dataset_converter_config: DatasetConverterConfig -) -> Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]]: +) -> List[CameraData]: - camera_dict: Dict[PinholeCameraType, Tuple[Union[str, bytes], StateSE3]] = {} + camera_data_list: List[CameraData] = [] if dataset_converter_config.include_pinhole_cameras: @@ -404,10 +404,15 @@ def _extract_wopd_cameras( for image_proto in frame.images: camera_type = WOPD_CAMERA_TYPES[image_proto.name] - camera_bytes: bytes = image_proto.image - camera_dict[camera_type] = camera_bytes, camera_extrinsic[camera_type] + camera_data_list.append( + CameraData( + camera_type=camera_type, + extrinsic=camera_extrinsic[camera_type], + jpeg_binary=image_proto.image, + ) + ) - return camera_dict + return camera_data_list def _extract_wopd_lidars( diff --git a/src/py123d/conversion/log_writer/abstract_log_writer.py b/src/py123d/conversion/log_writer/abstract_log_writer.py index 8f839a29..238919d6 100644 --- a/src/py123d/conversion/log_writer/abstract_log_writer.py +++ b/src/py123d/conversion/log_writer/abstract_log_writer.py @@ -3,7 +3,7 @@ import abc from dataclasses import dataclass from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import List, Optional, Union import numpy as np import numpy.typing as npt @@ -17,6 +17,7 @@ from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.datatypes.time.time_point import TimePoint from py123d.datatypes.vehicle_state.ego_state import EgoStateSE3 +from py123d.geometry import StateSE3 class AbstractLogWriter(abc.ABC): @@ -43,8 +44,8 @@ def write( ego_state: Optional[EgoStateSE3] = None, box_detections: Optional[BoxDetectionWrapper] = None, traffic_lights: Optional[TrafficLightDetectionWrapper] = None, - pinhole_cameras: Optional[Dict[PinholeCameraType, Tuple[Any, ...]]] = None, - fisheye_mei_cameras: Optional[Dict[FisheyeMEICameraType, Tuple[Any, ...]]] = None, + pinhole_cameras: Optional[List[CameraData]] = None, + fisheye_mei_cameras: Optional[List[CameraData]] = None, lidars: Optional[List[LiDARData]] = None, scenario_tags: Optional[List[str]] = None, route_lane_group_ids: Optional[List[int]] = None, @@ -85,17 +86,32 @@ def has_point_cloud(self) -> bool: @dataclass class CameraData: - camera_type: PinholeCameraType + camera_type: Union[PinholeCameraType, FisheyeMEICameraType] + extrinsic: StateSE3 timestamp: Optional[TimePoint] = None jpeg_binary: Optional[bytes] = None + numpy_image: Optional[npt.NDArray[np.uint8]] = None dataset_root: Optional[Union[str, Path]] = None relative_path: Optional[Union[str, Path]] = None def __post_init__(self): - has_file_path = self.dataset_root is not None and self.relative_path is not None - has_jpeg_binary = self.jpeg_binary is not None - assert ( - has_file_path or has_jpeg_binary - ), "Either file path (dataset_root and relative_path) or jpeg_binary must be provided for CameraData." + self.has_file_path or self.has_jpeg_binary or self.has_numpy_image + ), "Either file path (dataset_root and relative_path) or jpeg_binary or numpy_image must be provided for CameraData." + + if self.has_file_path: + absolute_path = Path(self.dataset_root) / self.relative_path + assert absolute_path.exists(), f"Camera file not found: {absolute_path}" + + @property + def has_file_path(self) -> bool: + return self.dataset_root is not None and self.relative_path is not None + + @property + def has_jpeg_binary(self) -> bool: + return self.jpeg_binary is not None + + @property + def has_numpy_image(self) -> bool: + return self.numpy_image is not None diff --git a/src/py123d/conversion/log_writer/arrow_log_writer.py b/src/py123d/conversion/log_writer/arrow_log_writer.py index 5f25edb7..acbcbb3b 100644 --- a/src/py123d/conversion/log_writer/arrow_log_writer.py +++ b/src/py123d/conversion/log_writer/arrow_log_writer.py @@ -1,13 +1,19 @@ from pathlib import Path from typing import Any, Dict, List, Literal, Optional, Tuple, Union -import cv2 import numpy as np import pyarrow as pa from py123d.common.utils.uuid_utils import create_deterministic_uuid from py123d.conversion.abstract_dataset_converter import AbstractLogWriter, DatasetConverterConfig -from py123d.conversion.log_writer.abstract_log_writer import LiDARData +from py123d.conversion.log_writer.abstract_log_writer import CameraData, LiDARData +from py123d.conversion.sensor_io.camera.jpeg_camera_io import ( + decode_image_from_jpeg_binary, + encode_image_as_jpeg_binary, + load_image_from_jpeg_file, + load_jpeg_binary_from_jpeg_file, +) +from py123d.conversion.sensor_io.camera.mp4_camera_io import MP4Writer from py123d.conversion.sensor_io.lidar.draco_lidar_io import encode_lidar_pc_as_draco_binary from py123d.conversion.sensor_io.lidar.file_lidar_io import load_lidar_pcs_from_file from py123d.conversion.sensor_io.lidar.laz_lidar_io import encode_lidar_pc_as_laz_binary @@ -15,7 +21,6 @@ from py123d.datatypes.detections.traffic_light_detections import TrafficLightDetectionWrapper from py123d.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema from py123d.datatypes.scene.scene_metadata import LogMetadata -from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICameraType from py123d.datatypes.sensors.lidar import LiDARType from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.datatypes.time.time_point import TimePoint @@ -23,17 +28,33 @@ from py123d.geometry import BoundingBoxSE3Index, StateSE3, StateSE3Index, Vector3DIndex +def _get_logs_root() -> Path: + from py123d.script.utils.dataset_path_utils import get_dataset_paths + + DATASET_PATHS = get_dataset_paths() + return Path(DATASET_PATHS.py123d_logs_root) + + +def _get_sensors_root() -> Path: + from py123d.script.utils.dataset_path_utils import get_dataset_paths + + DATASET_PATHS = get_dataset_paths() + return Path(DATASET_PATHS.py123d_sensors_root) + + class ArrowLogWriter(AbstractLogWriter): def __init__( self, - logs_root: Union[str, Path], + logs_root: Optional[Union[str, Path]] = None, + sensors_root: Optional[Union[str, Path]] = None, ipc_compression: Optional[Literal["lz4", "zstd"]] = None, ipc_compression_level: Optional[int] = None, lidar_compression: Optional[Literal["draco", "laz"]] = "draco", ) -> None: - self._logs_root = Path(logs_root) + self._logs_root = Path(logs_root) if logs_root is not None else _get_logs_root() + self._sensors_root = Path(sensors_root) if sensors_root is not None else _get_sensors_root() self._ipc_compression = ipc_compression self._ipc_compression_level = ipc_compression_level self._lidar_compression = lidar_compression @@ -44,6 +65,8 @@ def __init__( self._schema: Optional[LogMetadata] = None self._source: Optional[pa.NativeFile] = None self._record_batch_writer: Optional[pa.ipc.RecordBatchWriter] = None + self._pinhole_mp4_writers: Dict[str, MP4Writer] = {} + self._fisheye_mei_mp4_writers: Dict[str, MP4Writer] = {} def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: LogMetadata) -> bool: @@ -76,6 +99,9 @@ def reset(self, dataset_converter_config: DatasetConverterConfig, log_metadata: self._source = pa.OSFile(str(sink_log_path), "wb") self._record_batch_writer = pa.ipc.new_file(self._source, schema=self._schema, options=options) + self._pinhole_mp4_writers = {} + self._fisheye_mei_mp4_writers = {} + return log_needs_writing def write( @@ -84,8 +110,8 @@ def write( ego_state: Optional[EgoStateSE3] = None, box_detections: Optional[BoxDetectionWrapper] = None, traffic_lights: Optional[TrafficLightDetectionWrapper] = None, - pinhole_cameras: Optional[Dict[PinholeCameraType, Tuple[Any, ...]]] = None, - fisheye_mei_cameras: Optional[Dict[FisheyeMEICameraType, Tuple[Any, ...]]] = None, + pinhole_cameras: Optional[List[CameraData]] = None, + fisheye_mei_cameras: Optional[List[CameraData]] = None, lidars: Optional[List[LiDARData]] = None, scenario_tags: Optional[List[str]] = None, route_lane_group_ids: Optional[List[int]] = None, @@ -164,8 +190,14 @@ def write( # -------------------------------------------------------------------------------------------------------------- if self._dataset_converter_config.include_pinhole_cameras: assert pinhole_cameras is not None, "Pinhole camera data is required but not provided." - provided_pinhole_cameras = set(pinhole_cameras.keys()) + provided_pinhole_data = self._prepare_camera_data_dict( + pinhole_cameras, self._dataset_converter_config.pinhole_camera_store_option + ) + provided_pinhole_extrinsics = { + camera_data.camera_type: camera_data.extrinsic for camera_data in pinhole_cameras + } expected_pinhole_cameras = set(self._log_metadata.pinhole_camera_metadata.keys()) + for pinhole_camera_type in expected_pinhole_cameras: pinhole_camera_name = pinhole_camera_type.serialize() @@ -173,18 +205,9 @@ def write( # In this case, we write None/null to the arrow table. pinhole_camera_data: Optional[Any] = None pinhole_camera_pose: Optional[StateSE3] = None - if pinhole_camera_type in provided_pinhole_cameras: - pinhole_camera_data, pinhole_camera_pose = pinhole_cameras[pinhole_camera_type] - - # TODO: Refactor how camera data handed to the writer. - # This should be combined with configurations to write to log, sensor_root, or sensor_root as mp4. - if isinstance(pinhole_camera_data, Path) or isinstance(pinhole_camera_data, str): - pinhole_camera_data = str(pinhole_camera_data) - elif isinstance(pinhole_camera_data, bytes): - pinhole_camera_data = pinhole_camera_data - elif isinstance(pinhole_camera_data, np.ndarray): - _, encoded_img = cv2.imencode(".jpg", pinhole_camera_data) - pinhole_camera_data = encoded_img.tobytes() + if pinhole_camera_type in provided_pinhole_data: + pinhole_camera_data = provided_pinhole_data[pinhole_camera_type] + pinhole_camera_pose = provided_pinhole_extrinsics[pinhole_camera_type] record_batch_data[f"{pinhole_camera_name}_data"] = [pinhole_camera_data] record_batch_data[f"{pinhole_camera_name}_extrinsic"] = [ @@ -196,8 +219,14 @@ def write( # -------------------------------------------------------------------------------------------------------------- if self._dataset_converter_config.include_fisheye_mei_cameras: assert fisheye_mei_cameras is not None, "Fisheye MEI camera data is required but not provided." - provided_fisheye_mei_cameras = set(fisheye_mei_cameras.keys()) + provided_fisheye_mei_data = self._prepare_camera_data_dict( + fisheye_mei_cameras, self._dataset_converter_config.fisheye_mei_camera_store_option + ) + provided_fisheye_mei_extrinsics = { + camera_data.camera_type: camera_data.extrinsic for camera_data in fisheye_mei_cameras + } expected_fisheye_mei_cameras = set(self._log_metadata.fisheye_mei_camera_metadata.keys()) + for fisheye_mei_camera_type in expected_fisheye_mei_cameras: fisheye_mei_camera_name = fisheye_mei_camera_type.serialize() @@ -205,18 +234,9 @@ def write( # In this case, we write None/null to the arrow table. fisheye_mei_camera_data: Optional[Any] = None fisheye_mei_camera_pose: Optional[StateSE3] = None - if fisheye_mei_camera_type in provided_fisheye_mei_cameras: - fisheye_mei_camera_data, fisheye_mei_camera_pose = fisheye_mei_cameras[fisheye_mei_camera_type] - - # TODO: Refactor how camera data handed to the writer. - # This should be combined with configurations to write to log, sensor_root, or sensor_root as mp4. - if isinstance(fisheye_mei_camera_data, Path) or isinstance(fisheye_mei_camera_data, str): - fisheye_mei_camera_data = str(fisheye_mei_camera_data) - elif isinstance(fisheye_mei_camera_data, bytes): - fisheye_mei_camera_data = fisheye_mei_camera_data - elif isinstance(fisheye_mei_camera_data, np.ndarray): - _, encoded_img = cv2.imencode(".jpg", fisheye_mei_camera_data) - fisheye_mei_camera_data = encoded_img.tobytes() + if fisheye_mei_camera_type in provided_fisheye_mei_data: + fisheye_mei_camera_data = provided_fisheye_mei_data[fisheye_mei_camera_type] + fisheye_mei_camera_pose = provided_fisheye_mei_extrinsics[fisheye_mei_camera_type] record_batch_data[f"{fisheye_mei_camera_name}_data"] = [fisheye_mei_camera_data] record_batch_data[f"{fisheye_mei_camera_name}_extrinsic"] = [ @@ -279,6 +299,13 @@ def close(self) -> None: self._log_metadata: Optional[LogMetadata] = None self._schema: Optional[LogMetadata] = None + for mp4_writer in self._pinhole_mp4_writers.values(): + mp4_writer.close() + self._pinhole_mp4_writers = {} + for mp4_writer in self._fisheye_mei_mp4_writers.values(): + mp4_writer.close() + self._fisheye_mei_mp4_writers = {} + @staticmethod def _build_schema(dataset_converter_config: DatasetConverterConfig, log_metadata: LogMetadata) -> pa.Schema: @@ -336,7 +363,7 @@ def _build_schema(dataset_converter_config: DatasetConverterConfig, log_metadata schema_list.append((f"{pinhole_camera_name}_data", pa.binary())) elif dataset_converter_config.pinhole_camera_store_option == "mp4": - raise NotImplementedError("MP4 format is not yet supported, but planned for future releases.") + schema_list.append((f"{pinhole_camera_name}_data", pa.int64())) # Add camera pose schema_list.append((f"{pinhole_camera_name}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))) @@ -356,7 +383,7 @@ def _build_schema(dataset_converter_config: DatasetConverterConfig, log_metadata schema_list.append((f"{fisheye_mei_camera_name}_data", pa.binary())) elif dataset_converter_config.fisheye_mei_camera_store_option == "mp4": - raise NotImplementedError("MP4 format is not yet supported, but planned for future releases.") + schema_list.append((f"{fisheye_mei_camera_name}_data", pa.int64())) # Add camera pose schema_list.append((f"{fisheye_mei_camera_name}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))) @@ -425,3 +452,74 @@ def _prepare_lidar_data_dict(self, lidars: List[LiDARData]) -> Dict[LiDARType, U lidar_data_dict[lidar_type] = binary return lidar_data_dict + + def _prepare_camera_data_dict( + self, cameras: List[CameraData], store_option: Literal["path", "binary"] + ) -> Dict[PinholeCameraType, Union[str, bytes]]: + camera_data_dict: Dict[PinholeCameraType, Union[str, int, bytes]] = {} + + for camera_data in cameras: + if store_option == "path": + if camera_data.has_file_path: + camera_data_dict[camera_data.camera_type] = str(camera_data.relative_path) + else: + raise NotImplementedError("Only file path storage is supported for camera data.") + elif store_option == "binary": + camera_data_dict[camera_data.camera_type] = _get_jpeg_binary_from_camera_data(camera_data) + elif store_option == "mp4": + camera_name = camera_data.camera_type.serialize() + if camera_name not in self._pinhole_mp4_writers: + mp4_path = ( + self._sensors_root + / self._log_metadata.split + / self._log_metadata.log_name + / f"{camera_name}.mp4" + ) + frame_interval = self._log_metadata.timestep_seconds + self._pinhole_mp4_writers[camera_name] = MP4Writer(mp4_path, fps=1 / frame_interval) + + image = _get_numpy_image_from_camera_data(camera_data) + camera_data_dict[camera_data.camera_type] = self._pinhole_mp4_writers[camera_name].write_frame(image) + + else: + raise NotImplementedError(f"Unsupported camera store option: {store_option}") + + return camera_data_dict + + +def _get_jpeg_binary_from_camera_data(camera_data: CameraData) -> Optional[bytes]: + jpeg_binary: Optional[bytes] = None + + if camera_data.has_jpeg_binary: + jpeg_binary = camera_data.jpeg_binary + elif camera_data.has_numpy_image: + jpeg_binary = encode_image_as_jpeg_binary(camera_data.numpy_image) + elif camera_data.has_file_path: + absolute_path = Path(camera_data.dataset_root) / camera_data.relative_path + + if absolute_path.suffix.lower() in [".jpg", ".jpeg"]: + jpeg_binary = load_jpeg_binary_from_jpeg_file(absolute_path) + else: + raise NotImplementedError(f"Unsupported camera file format: {absolute_path.suffix} for binary storage.") + else: + raise NotImplementedError("Camera data must provide jpeg_binary, numpy_image, or file path for binary storage.") + + assert jpeg_binary is not None + return jpeg_binary + + +def _get_numpy_image_from_camera_data(camera_data: CameraData) -> Optional[np.ndarray]: + numpy_image: Optional[np.ndarray] = None + + if camera_data.has_numpy_image: + numpy_image = camera_data.numpy_image + elif camera_data.has_jpeg_binary: + numpy_image = decode_image_from_jpeg_binary(camera_data.jpeg_binary) + elif camera_data.has_file_path: + absolute_path = Path(camera_data.dataset_root) / camera_data.relative_path + numpy_image = load_image_from_jpeg_file(absolute_path) + else: + raise NotImplementedError("Camera data must provide numpy_image, jpeg_binary, or file path for numpy image.") + + assert numpy_image is not None + return numpy_image diff --git a/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py b/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py index 4e9684e7..d942e729 100644 --- a/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py +++ b/src/py123d/conversion/sensor_io/camera/jpeg_camera_io.py @@ -1,39 +1,29 @@ from pathlib import Path -from typing import Dict, Optional - -from omegaconf import DictConfig -from pyparsing import Union - -from py123d.datatypes.sensors.pinhole_camera import PinholeCamera, PinholeCameraMetadata -from py123d.script.utils.dataset_path_utils import get_dataset_paths - -DATASET_PATHS: DictConfig = get_dataset_paths() -DATASET_SENSOR_ROOT: Dict[str, Path] = { - "nuplan": DATASET_PATHS.nuplan_sensor_root, - "av2-sensor": DATASET_PATHS.av2_sensor_data_root, - "wopd": DATASET_PATHS.wopd_data_root, - "pandaset": DATASET_PATHS.pandaset_data_root, -} - - -def load_image_from_jpeg_file( - dataset_name: str, - dataset_root: Path, - relative_path: Union[str, Path], - camera_metadata: PinholeCameraMetadata, - iteration: Optional[int] = None, -) -> PinholeCamera: - assert relative_path is not None, "Relative path to camera JPEG file must be provided." - - -def load_image_from_jpeg_binary( - dataset_name: str, - relative_path: Union[str, Path], - pinhole_camera_metadata: PinholeCameraMetadata, - iteration: Optional[int] = None, -) -> PinholeCamera: - assert relative_path is not None, "Relative path to camera JPEG file must be provided." - absolute_path = Path(dataset_name) / relative_path - with open(absolute_path, "rb") as f: + +import cv2 +import numpy as np +import numpy.typing as npt + + +def encode_image_as_jpeg_binary(image: npt.NDArray[np.uint8]) -> bytes: + _, encoded_img = cv2.imencode(".jpg", image) + jpeg_binary = encoded_img.tobytes() + return jpeg_binary + + +def decode_image_from_jpeg_binary(jpeg_binary: bytes) -> npt.NDArray[np.uint8]: + image = cv2.imdecode(np.frombuffer(jpeg_binary, np.uint8), cv2.IMREAD_UNCHANGED) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + return image + + +def load_jpeg_binary_from_jpeg_file(jpeg_path: Path) -> bytes: + with open(jpeg_path, "rb") as f: jpeg_binary = f.read() - return PinholeCamera(metadata=pinhole_camera_metadata, jpeg_binary=jpeg_binary) + return jpeg_binary + + +def load_image_from_jpeg_file(jpeg_path: Path) -> npt.NDArray[np.uint8]: + image = cv2.imread(str(jpeg_path), cv2.IMREAD_COLOR) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + return image diff --git a/src/py123d/conversion/sensor_io/camera/mp4_camera_io.py b/src/py123d/conversion/sensor_io/camera/mp4_camera_io.py index 626d7a63..9cb91aad 100644 --- a/src/py123d/conversion/sensor_io/camera/mp4_camera_io.py +++ b/src/py123d/conversion/sensor_io/camera/mp4_camera_io.py @@ -3,8 +3,9 @@ def load_image_from_mp4_file() -> None: raise NotImplementedError +from functools import lru_cache from pathlib import Path -from typing import Optional +from typing import Optional, Union import cv2 import numpy as np @@ -13,7 +14,7 @@ def load_image_from_mp4_file() -> None: class MP4Writer: """Write images sequentially to an MP4 video file.""" - def __init__(self, output_path: str, fps: float = 30.0, codec: str = "mp4v"): + def __init__(self, output_path: Union[str, Path], fps: float = 30.0, codec: str = "mp4v"): """ Initialize MP4 writer. @@ -22,32 +23,36 @@ def __init__(self, output_path: str, fps: float = 30.0, codec: str = "mp4v"): fps: Frames per second codec: Video codec ('mp4v', 'avc1', 'h264') """ - self.output_path = output_path + self.output_path = Path(output_path) self.fps = fps self.codec = codec self.writer = None self.frame_size = None self.frame_count = 0 - def write_frame(self, frame: np.ndarray): + def write_frame(self, frame: np.ndarray) -> int: """ Write a single frame to the video. Args: - frame: Image as numpy array (BGR format) + frame: Image as numpy array (RGB format) """ + frame_idx = int(self.frame_count) if self.writer is None: # Initialize writer with first frame's dimensions h, w = frame.shape[:2] self.frame_size = (w, h) fourcc = cv2.VideoWriter_fourcc(*self.codec) + self.output_path.parent.mkdir(parents=True, exist_ok=True) self.writer = cv2.VideoWriter(self.output_path, fourcc, self.fps, self.frame_size) if frame.shape[:2][::-1] != self.frame_size: raise ValueError(f"Frame size {frame.shape[:2][::-1]} doesn't match " f"video size {self.frame_size}") + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) self.writer.write(frame) self.frame_count += 1 + return frame_idx def close(self): """Release the video writer.""" @@ -55,17 +60,11 @@ def close(self): self.writer.release() self.writer = None - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - class MP4Reader: """Read MP4 video with random frame access.""" - def __init__(self, video_path: str): + def __init__(self, video_path: Union[str, Path], read_all: bool = False): """ Initialize MP4 reader. @@ -85,6 +84,18 @@ def __init__(self, video_path: str): self.fps = self.cap.get(cv2.CAP_PROP_FPS) self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + self.read_all = read_all + + if read_all: + self.frames = [] + for _ in range(self.frame_count): + ret, frame = self.cap.read() + if not ret: + break + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + self.frames.append(frame) + self.cap.release() + self.cap = None def get_frame(self, frame_index: int) -> Optional[np.ndarray]: """ @@ -94,87 +105,29 @@ def get_frame(self, frame_index: int) -> Optional[np.ndarray]: frame_index: Zero-based frame index Returns: - Frame as numpy array (BGR format) or None if invalid index + Frame as numpy array (RGB format) or None if invalid index """ + if frame_index < 0 or frame_index >= self.frame_count: - raise IndexError(f"Frame index {frame_index} out of range " f"[0, {self.frame_count})") + raise IndexError(f"Frame index {frame_index} out of range " f"[0, {len(self.frames)})") + + if self.read_all: + return self.frames[frame_index] # Set the frame position self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index) + # Convert BGR to RGB for sane convention ret, frame = self.cap.read() + if ret: + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) return frame if ret else None - def read_sequential(self) -> Optional[np.ndarray]: - """ - Read next frame sequentially. - - Returns: - Frame as numpy array or None if end of video - """ - ret, frame = self.cap.read() - return frame if ret else None - - def reset(self): - """Reset to beginning of video.""" - self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) - - def close(self): - """Release the video capture.""" - if self.cap is not None: - self.cap.release() - self.cap = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - - def __len__(self): - return self.frame_count - def __getitem__(self, index: int) -> np.ndarray: """Allow indexing like reader[10]""" return self.get_frame(index) -# Example usage -if __name__ == "__main__": - # Create sample video - print("Creating sample video...") - with MP4Writer("output.mp4", fps=30.0) as writer: - for i in range(100): - # Create colored frames - frame = np.zeros((480, 640, 3), dtype=np.uint8) - color = int(255 * i / 100) - frame[:, :] = (color, 255 - color, 128) - - # Add frame number text - cv2.putText(frame, f"Frame {i}", (50, 240), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3) - - writer.write_frame(frame) - - print(f"Video created with {writer.frame_count} frames") - - # Read video with indexing - print("\nReading video with random access...") - with MP4Reader("output.mp4") as reader: - print(f"Video info: {len(reader)} frames, " f"{reader.width}x{reader.height}, {reader.fps} fps") - - # Read specific frames - frames_to_read = [0, 25, 50, 75, 99] - for idx in frames_to_read: - frame = reader[idx] - if frame is not None: - print(f"Successfully read frame {idx}") - else: - print(f"Failed to read frame {idx}") - - # Sequential reading example - print("\nReading first 5 frames sequentially...") - reader.reset() - for i in range(5): - frame = reader.read_sequential() - if frame is not None: - print(f"Read sequential frame {i}") +@lru_cache(maxsize=64) +def get_mp4_reader_from_path(mp4_path: str) -> MP4Reader: + return MP4Reader(mp4_path, read_all=False) diff --git a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py index 6ce6701a..71c08154 100644 --- a/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py +++ b/src/py123d/datatypes/scene/arrow/utils/arrow_getters.py @@ -8,6 +8,8 @@ from omegaconf import DictConfig from py123d.conversion.registry.lidar_index_registry import DefaultLiDARIndex +from py123d.conversion.sensor_io.camera.jpeg_camera_io import decode_image_from_jpeg_binary +from py123d.conversion.sensor_io.camera.mp4_camera_io import get_mp4_reader_from_path from py123d.conversion.sensor_io.lidar.draco_lidar_io import load_lidar_from_draco_binary from py123d.conversion.sensor_io.lidar.file_lidar_io import load_lidar_pcs_from_file from py123d.conversion.sensor_io.lidar.laz_lidar_io import load_lidar_from_laz_binary @@ -136,10 +138,13 @@ def get_camera_from_arrow_table( image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) elif isinstance(table_data, bytes): - image = cv2.imdecode(np.frombuffer(table_data, np.uint8), cv2.IMREAD_UNCHANGED) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + image = decode_image_from_jpeg_binary(table_data) + elif isinstance(table_data, int): + image = _unoptimized_demo_mp4_read(log_metadata, camera_name, table_data) else: - raise NotImplementedError("Only string file paths for camera data are supported.") + raise NotImplementedError( + f"Only string file paths, bytes, or int frame indices are supported for camera data, got {type(table_data)}" + ) if camera_name.startswith("fcam"): camera_metadata = log_metadata.fisheye_mei_camera_metadata[camera_type] @@ -210,3 +215,16 @@ def get_lidar_from_arrow_table( ) return lidar + + +def _unoptimized_demo_mp4_read(log_metadata: LogMetadata, camera_name: str, frame_index: int) -> Optional[np.ndarray]: + """A quick and dirty MP4 reader for testing purposes only. Not optimized for performance.""" + image: Optional[npt.NDArray[np.uint8]] = None + + py123d_sensor_root = Path(DATASET_PATHS.py123d_sensors_root) + mp4_path = py123d_sensor_root / log_metadata.split / log_metadata.log_name / f"{camera_name}.mp4" + if mp4_path.exists(): + reader = get_mp4_reader_from_path(str(mp4_path)) + image = reader.get_frame(frame_index) + + return image diff --git a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml index f15509e3..544eb879 100644 --- a/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/kitti360_dataset.yaml @@ -31,11 +31,11 @@ kitti360_dataset: # Pinhole Cameras include_pinhole_cameras: true - pinhole_camera_store_option: "path" + pinhole_camera_store_option: "binary" # Fisheye Cameras include_fisheye_mei_cameras: true - fisheye_mei_camera_store_option: "path" + fisheye_mei_camera_store_option: "binary" # LiDARs include_lidars: true @@ -46,8 +46,6 @@ kitti360_dataset: include_route: false include_scenario_tags: false - # NOTE: Pandaset does not have official splits, so we create our own here. - # We use 80% of the logs for training, 10% for validation, and 10% for testing. train_sequences: - "2013_05_28_drive_0000_sync" - "2013_05_28_drive_0002_sync" diff --git a/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml index 19b0d0f2..1241c2ae 100644 --- a/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuplan_dataset.yaml @@ -28,11 +28,11 @@ nuplan_dataset: # Pinhole Cameras include_pinhole_cameras: true - pinhole_camera_store_option: "path" # "path", "binary", "mp4" + pinhole_camera_store_option: "binary" # "path", "binary", "mp4" # LiDARs include_lidars: true - lidar_store_option: "path_merged" # "path", "path_merged", "binary" + lidar_store_option: "binary" # "path", "path_merged", "binary" # Scenario tag / Route include_scenario_tags: true diff --git a/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml index 50aea778..fb7818b2 100644 --- a/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuplan_mini_dataset.yaml @@ -28,11 +28,11 @@ nuplan_mini_dataset: # Pinhole Cameras include_pinhole_cameras: true - pinhole_camera_store_option: "path" # "path", "binary", "mp4" + pinhole_camera_store_option: "binary" # "path", "binary", "mp4" # LiDARs include_lidars: true - lidar_store_option: "path_merged" # "path", "path_merged", "binary" + lidar_store_option: "binary" # "path", "path_merged", "binary" # Scenario tag / Route include_scenario_tags: true diff --git a/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml index 7ad5834f..52ce8207 100644 --- a/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuscenes_dataset.yaml @@ -26,11 +26,11 @@ nuscenes_dataset: # Pinhole Cameras include_pinhole_cameras: true - pinhole_camera_store_option: "path" + pinhole_camera_store_option: "binary" # LiDARs include_lidars: true - lidar_store_option: "path" + lidar_store_option: "binary" # Not available: include_fisheye_mei_cameras: false diff --git a/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml b/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml index e7181c47..9bc7f019 100644 --- a/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/nuscenes_mini_dataset.yaml @@ -26,11 +26,11 @@ nuscenes_dataset: # Pinhole Cameras include_pinhole_cameras: true - pinhole_camera_store_option: "path" + pinhole_camera_store_option: "binary" # LiDARs include_lidars: true - lidar_store_option: "path" + lidar_store_option: "binary" # Not available: include_fisheye_mei_cameras: false diff --git a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml index d70a2aab..30747b42 100644 --- a/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/pandaset_dataset.yaml @@ -20,7 +20,7 @@ pandaset_dataset: # Pinhole Cameras include_pinhole_cameras: true - pinhole_camera_store_option: "path" + pinhole_camera_store_option: "binary" # LiDARs include_lidars: true diff --git a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml index 3fb5acee..14a643d8 100644 --- a/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml +++ b/src/py123d/script/config/conversion/datasets/wopd_dataset.yaml @@ -30,7 +30,7 @@ wopd_dataset: pinhole_camera_store_option: "binary" # "path", "binary", "mp4" # LiDARs - include_lidars: true + include_lidars: false lidar_store_option: "binary" # "path", "path_merged", "binary" # Not available: diff --git a/test_viser.py b/test_viser.py index f7ae44bb..a91005cf 100644 --- a/test_viser.py +++ b/test_viser.py @@ -1,17 +1,16 @@ from py123d.common.multithreading.worker_sequential import Sequential from py123d.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder from py123d.datatypes.scene.scene_filter import SceneFilter - -# from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType +from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType from py123d.visualization.viser.viser_viewer import ViserViewer if __name__ == "__main__": # splits = ["kitti360_train"] - splits = ["nuscenes-mini_val", "nuscenes-mini_train"] + # splits = ["nuscenes-mini_val", "nuscenes-mini_train"] # splits = ["nuplan-mini_test", "nuplan-mini_train", "nuplan-mini_val"] # splits = ["nuplan_private_test"] # splits = ["carla_test"] - # splits = ["wopd_val"] + splits = ["wopd_val"] # splits = ["av2-sensor_train"] # splits = ["pandaset_test", "pandaset_val", "pandaset_train"] # log_names = ["2021.08.24.13.12.55_veh-45_00386_00472"] @@ -29,7 +28,7 @@ history_s=0.0, timestamp_threshold_s=None, shuffle=True, - # pinhole_camera_types=[PinholeCameraType.PCAM_F0], + pinhole_camera_types=[PinholeCameraType.PCAM_F0], ) scene_builder = ArrowSceneBuilder() worker = Sequential() From 7695b272a17899073824ccaae2763b5351a08499 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 6 Nov 2025 10:00:15 +0100 Subject: [PATCH 143/145] Add viser to CLI and refactorings. --- test_viser.py => examples/01_viser.py | 0 .../script/builders/viser_config_builder.py | 22 ++++ .../script/config/common/default_common.yaml | 19 +-- .../config/common/default_experiment.yaml | 2 +- .../{all_scenes.yaml => nuplan_logs.yaml} | 15 ++- ..._mini_train.yaml => nuplan_mini_logs.yaml} | 8 +- .../common/scene_filter/nuplan_mini_val.yaml | 17 --- .../common/scene_filter/nuplan_sim_agent.yaml | 117 ------------------ .../common/scene_filter/viser_scenes.yaml | 8 +- .../config/conversion/default_conversion.yaml | 2 +- .../script/config/viser/default_viser.yaml | 61 ++++++++- src/py123d/script/run_conversion.py | 2 +- src/py123d/script/run_viser.py | 6 +- .../visualization/viser/viser_config.py | 32 ++++- .../visualization/viser/viser_viewer.py | 13 +- 15 files changed, 142 insertions(+), 182 deletions(-) rename test_viser.py => examples/01_viser.py (100%) create mode 100644 src/py123d/script/builders/viser_config_builder.py rename src/py123d/script/config/common/scene_filter/{all_scenes.yaml => nuplan_logs.yaml} (69%) rename src/py123d/script/config/common/scene_filter/{nuplan_mini_train.yaml => nuplan_mini_logs.yaml} (69%) delete mode 100644 src/py123d/script/config/common/scene_filter/nuplan_mini_val.yaml delete mode 100644 src/py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml diff --git a/test_viser.py b/examples/01_viser.py similarity index 100% rename from test_viser.py rename to examples/01_viser.py diff --git a/src/py123d/script/builders/viser_config_builder.py b/src/py123d/script/builders/viser_config_builder.py new file mode 100644 index 00000000..a94fcf7a --- /dev/null +++ b/src/py123d/script/builders/viser_config_builder.py @@ -0,0 +1,22 @@ +import logging + +from hydra.utils import instantiate +from omegaconf import DictConfig + +from py123d.script.builders.utils.utils_type import validate_type +from py123d.visualization.viser.viser_config import ViserConfig + +logger = logging.getLogger(__name__) + + +def build_viser_config(cfg: DictConfig) -> ViserConfig: + """ + Builds the config dataclass for the viser viewer. + :param cfg: DictConfig. Configuration that is used to run the viewer. + :return: Instance of ViserConfig. + """ + logger.info("Building ViserConfig...") + viser_config: ViserConfig = instantiate(cfg) + validate_type(viser_config, ViserConfig) + logger.info("Building ViserConfig...DONE!") + return viser_config diff --git a/src/py123d/script/config/common/default_common.yaml b/src/py123d/script/config/common/default_common.yaml index 5012ff14..a43a5c2d 100644 --- a/src/py123d/script/config/common/default_common.yaml +++ b/src/py123d/script/config/common/default_common.yaml @@ -3,23 +3,6 @@ defaults: - worker: ray_distributed - scene_filter: all_scenes - scene_builder: default_scene_builder - - override hydra/job_logging: colorlog + - override hydra/job_logging: disabled - override hydra/hydra_logging: colorlog - _self_ - -distributed_timeout_seconds: 7200 # Sets how long to wait while synchronizing across worker nodes in a distributed context. -selected_simulation_metrics: null - -# Sets verbosity level, in particular determines if progress bars are shown or not. -verbose: false - -# Logger -logger_level: info # Level of logger -logger_format_string: null # Logger format string, set null to use the default format string - -# Execution -max_number_of_workers: null # Set null to disable threading for simulation execution -gpu: true # Whether to use available GPUs during training/simulation - - -seed: 42 diff --git a/src/py123d/script/config/common/default_experiment.yaml b/src/py123d/script/config/common/default_experiment.yaml index 049ad4cc..b6f236c9 100644 --- a/src/py123d/script/config/common/default_experiment.yaml +++ b/src/py123d/script/config/common/default_experiment.yaml @@ -1,8 +1,8 @@ defaults: + - _self_ - default_dataset_paths - override hydra/job_logging: colorlog - override hydra/hydra_logging: colorlog - - _self_ # Cache parameters experiment_name: ??? diff --git a/src/py123d/script/config/common/scene_filter/all_scenes.yaml b/src/py123d/script/config/common/scene_filter/nuplan_logs.yaml similarity index 69% rename from src/py123d/script/config/common/scene_filter/all_scenes.yaml rename to src/py123d/script/config/common/scene_filter/nuplan_logs.yaml index 83360b6d..a9dc8275 100644 --- a/src/py123d/script/config/common/scene_filter/all_scenes.yaml +++ b/src/py123d/script/config/common/scene_filter/nuplan_logs.yaml @@ -2,7 +2,10 @@ _target_: py123d.datatypes.scene.scene_filter.SceneFilter _convert_: 'all' split_types: null -split_names: null +split_names: + - "nuplan_train" + - "nuplan_val" + - "nuplan_test" log_names: null @@ -10,11 +13,7 @@ locations: null scene_uuids: null timestamp_threshold_s: null ego_displacement_minimum_m: null - -duration_s: 9.2 -history_s: 3.0 - -camera_types: null - max_num_scenes: null -shuffle: false + +duration_s: null +history_s: null diff --git a/src/py123d/script/config/common/scene_filter/nuplan_mini_train.yaml b/src/py123d/script/config/common/scene_filter/nuplan_mini_logs.yaml similarity index 69% rename from src/py123d/script/config/common/scene_filter/nuplan_mini_train.yaml rename to src/py123d/script/config/common/scene_filter/nuplan_mini_logs.yaml index 62bb8789..9fd43e0d 100644 --- a/src/py123d/script/config/common/scene_filter/nuplan_mini_train.yaml +++ b/src/py123d/script/config/common/scene_filter/nuplan_mini_logs.yaml @@ -4,14 +4,16 @@ _convert_: 'all' split_types: null split_names: - "nuplan_mini_train" + - "nuplan_mini_val" + - "nuplan_mini_test" log_names: null locations: null scene_uuids: null -timestamp_threshold_s: 1.0 +timestamp_threshold_s: null ego_displacement_minimum_m: null max_num_scenes: null -duration_s: 8.1 -history_s: 1.0 +duration_s: null +history_s: null diff --git a/src/py123d/script/config/common/scene_filter/nuplan_mini_val.yaml b/src/py123d/script/config/common/scene_filter/nuplan_mini_val.yaml deleted file mode 100644 index f767bbcc..00000000 --- a/src/py123d/script/config/common/scene_filter/nuplan_mini_val.yaml +++ /dev/null @@ -1,17 +0,0 @@ -_target_: py123d.datatypes.scene.scene_filter.SceneFilter -_convert_: 'all' - -split_types: null -split_names: - - "nuplan_mini_val" -log_names: null - - -locations: null -scene_uuids: null -timestamp_threshold_s: 1.0 -ego_displacement_minimum_m: null -max_num_scenes: null - -duration_s: 8.1 -history_s: 1.0 diff --git a/src/py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml b/src/py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml deleted file mode 100644 index 8e2b3c49..00000000 --- a/src/py123d/script/config/common/scene_filter/nuplan_sim_agent.yaml +++ /dev/null @@ -1,117 +0,0 @@ -_target_: py123d.datatypes.scene.scene_filter.SceneFilter -_convert_: 'all' - -split_types: null -split_names: - - "nuplan_mini_test" -log_names: null - - -locations: null -scene_uuids: -- "796266a84fd65c71" -- "1ef8b2f08cd65f9a" -- "8922138735b45195" -- "d851006613b85e7b" -- "38ef797d14445e44" -- "a317258b9849542f" -- "83ca370d430e59bc" -- "3159261edbf95519" -- "d3d80c3089905994" -- "8d3b06d2311955cb" -- "1d33208c85c95470" -- "e333523147ba5cc9" -- "94fa6640d9f25c43" -- "0eb46626e2345bae" -- "c20a47af332e58a0" -- "c4153e20ef9f5429" -- "51dc7988d78d545f" -- "7348ea10ac0d58fd" -- "32abded47532514d" -- "4c8bd4c5b1015b82" -- "24334adc21055f43" -- "c1afafec7c1f55c6" -- "61af5c7ad0a75a17" -- "4dc74226975f5060" -- "f3b9c29ceb205216" -- "6fd1090347e05103" -- "8564bae63ebf5e42" -- "4656fe861aed509e" -- "f4aa5f3bf2095608" -- "5dc7c5ab6ad450cb" -- "852e81b1ce2d5ce0" -- "9634e64f32b0584d" -- "765a3a60360950d8" -- "d726695b080d5662" -- "646d3f76770a5159" -- "cc0ba43b91395054" -- "5a30039504185834" -- "396ced8ecffe5d92" -- "2583343f0e095b76" -- "940480c560a459dd" -- "c106ea1d18fa5b46" -- "8a1f92741cf25e94" -- "1e1d33afe06a5dc9" -- "cd1295e8085d5f8a" -- "6c68b2a6d8cc5ea9" -- "be3c5bf59f675e00" -- "053be6c85efa5e17" -- "14756446a3a65d51" -- "f8b10bcaa98a53e8" -- "d72ab3f1878c51a0" -- "0ee073448a2657dc" -- "5726da45814a5649" -- "f8aaded37e735549" -- "c7f921589ed956f3" -- "3852b5feb1215add" -- "fc931d0c156057b5" -- "a8d21fc93a3755c6" -- "3ee7077fadab55aa" -- "5845b5db282858b9" -- "36908fc5872d538d" -- "a075179737ad5140" -- "754820401e085625" -- "469cb3e6d6055f87" -- "230150f0e8ff5d4d" -- "7a8c727978005f14" -- "50038631e81b58c8" -- "b7ca09eed06d5090" -- "780103da783e597b" -- "0c72e0f43f9e5dc5" -- "68ad7730c5885243" -- "2cdec17051d25bb4" -- "fe17bdf412f755e4" -- "53632970c56a5207" -- "6ba4abec7990550c" -- "023028c423715317" -- "963ff52c54685dd3" -- "fdef7ee7067859d3" -- "79d99553e8c15302" -- "e3eaafd948b15069" -- "123dfe0afe85550f" -- "956bfd38d4e75fea" -- "c979bfba27385ba1" -- "74e40b83c2e55f8d" -- "2f6a2c0802b75922" -- "a190fbdd675052e7" -- "4c59ff2f17cd5b12" -- "adb469e65ae256aa" -- "00273245ce175544" -- "49ce38623a9d56fc" -- "e00f8ae7c6fd548e" -- "c525191820d65766" -- "2e190d8d1d435577" -- "2e8e6e5238495570" -- "6629f76b367b5305" -- "de8590ad0fc15d29" -- "f520d009788d5346" -- "e50e1bb8fce354b6" -- "da55b0a04d465781" -- "652ae5b39e575205" -- "2d767bf67a655fb6" - -timestamp_threshold_s: null -ego_displacement_minimum_m: null - -duration_s: 15.1 -history_s: 1.0 diff --git a/src/py123d/script/config/common/scene_filter/viser_scenes.yaml b/src/py123d/script/config/common/scene_filter/viser_scenes.yaml index c52ec40d..1b619af1 100644 --- a/src/py123d/script/config/common/scene_filter/viser_scenes.yaml +++ b/src/py123d/script/config/common/scene_filter/viser_scenes.yaml @@ -8,13 +8,13 @@ log_names: null locations: null scene_uuids: null -timestamp_threshold_s: 10.0 +timestamp_threshold_s: null ego_displacement_minimum_m: null -duration_s: 10.0 -history_s: 0.0 +duration_s: null +history_s: null -camera_types: null +pinhole_camera_types: null max_num_scenes: null shuffle: True diff --git a/src/py123d/script/config/conversion/default_conversion.yaml b/src/py123d/script/config/conversion/default_conversion.yaml index 4adf788b..48e55dcc 100644 --- a/src/py123d/script/config/conversion/default_conversion.yaml +++ b/src/py123d/script/config/conversion/default_conversion.yaml @@ -1,7 +1,7 @@ hydra: + output_subdir: null run: dir: . - output_subdir: null searchpath: - pkg://py123d.script.config - pkg://py123d.script.config.common diff --git a/src/py123d/script/config/viser/default_viser.yaml b/src/py123d/script/config/viser/default_viser.yaml index 4a1c88bd..b9ff6f9b 100644 --- a/src/py123d/script/config/viser/default_viser.yaml +++ b/src/py123d/script/config/viser/default_viser.yaml @@ -1,7 +1,7 @@ hydra: + output_subdir: null run: dir: . - output_subdir: null searchpath: - pkg://py123d.script.config - pkg://py123d.script.config.common @@ -14,4 +14,61 @@ defaults: - override scene_filter: viser_scenes - _self_ -port_number: 8080 +viser_config: + _target_: py123d.visualization.viser.viser_config.ViserConfig + _convert_: 'all' + + # Server + server_host: "localhost" + server_port: 8080 + server_label: "123D Viser Server" + server_verbose: true + + # Theme + theme_control_layout: "floating" # ["floating", "collapsible", "fixed"] + theme_control_width: "large" # ["small", "medium", "large"] + theme_dark_mode: false + theme_show_logo: true + theme_show_share_button: true + # theme_brand_color: # Loads ellis blue by default + + # Play Controls + is_playing: false + playback_speed: 1.0 + + # Map + map_visible: true + map_radius: 200.0 + map_non_road_z_offset: 0.1 + map_requery: true + + # Bounding boxes + bounding_box_visible: true + bounding_box_type: "mesh" # ["mesh", "lines"] + bounding_box_line_width: 4.0 + + # Pinhole Cameras + # -> Frustum + camera_frustum_visible: true + # camera_frustum_types: null # Loads all by default + camera_frustum_scale: 1.0 + camera_frustum_image_scale: 0.25 + + # -> GUI + camera_gui_visible: true + camera_gui_types: [] + camera_gui_image_scale: 0.25 + + # Fisheye MEI Cameras + # -> Frustum + fisheye_frustum_visible: true + fisheye_mei_camera_frustum_visible: true + # fisheye_mei_camera_frustum_types: [] # Loads all by default + fisheye_frustum_scale: 1.0 + fisheye_frustum_image_scale: 0.25 + + # LiDAR + lidar_visible: true + # lidar_types: [] # Loads all by default + lidar_point_size: 0.05 + lidar_point_shape: "circle" # ["square", "diamond", "circle", "rounded", "sparkle"] diff --git a/src/py123d/script/run_conversion.py b/src/py123d/script/run_conversion.py index c2510b9b..52ed5cd6 100644 --- a/src/py123d/script/run_conversion.py +++ b/src/py123d/script/run_conversion.py @@ -13,7 +13,7 @@ from py123d.script.utils.dataset_path_utils import setup_dataset_paths logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) +logger = logging.getLogger() CONFIG_PATH = "config/conversion" CONFIG_NAME = "default_conversion" diff --git a/src/py123d/script/run_viser.py b/src/py123d/script/run_viser.py index cc89e024..df14382c 100644 --- a/src/py123d/script/run_viser.py +++ b/src/py123d/script/run_viser.py @@ -5,6 +5,7 @@ from py123d.script.builders.scene_builder_builder import build_scene_builder from py123d.script.builders.scene_filter_builder import build_scene_filter +from py123d.script.builders.viser_config_builder import build_viser_config from py123d.script.run_conversion import build_worker from py123d.script.utils.dataset_path_utils import setup_dataset_paths from py123d.visualization.viser.viser_viewer import ViserViewer @@ -31,8 +32,11 @@ def main(cfg: DictConfig) -> None: # Get scenes from scene builder scenes = scene_builder.get_scenes(scene_filter, worker=worker) + # Build Viser config + viser_config = build_viser_config(cfg.viser_config) + # Launch Viser viewer with the scenes - ViserViewer(scenes=scenes) + ViserViewer(scenes=scenes, viser_config=viser_config) if __name__ == "__main__": diff --git a/src/py123d/visualization/viser/viser_config.py b/src/py123d/visualization/viser/viser_config.py index b96ff96a..be22f7f5 100644 --- a/src/py123d/visualization/viser/viser_config.py +++ b/src/py123d/visualization/viser/viser_config.py @@ -1,6 +1,7 @@ from dataclasses import dataclass, field -from typing import List, Literal, Optional, Tuple +from typing import List, Literal, Optional, Tuple, Union +from py123d.common.utils.enums import SerialIntEnum from py123d.datatypes.sensors.fisheye_mei_camera import FisheyeMEICameraType from py123d.datatypes.sensors.lidar import LiDARType from py123d.datatypes.sensors.pinhole_camera import PinholeCameraType @@ -45,7 +46,7 @@ class ViserConfig: theme_dark_mode: bool = False theme_show_logo: bool = True theme_show_share_button: bool = True - theme_brand_color: Optional[Tuple[int, int, int]] = ELLIS_5[0].rgb + theme_brand_color: Optional[Tuple[int, int, int]] = ELLIS_5[4].rgb # Play Controls is_playing: bool = False @@ -92,3 +93,30 @@ class ViserConfig: # internal use _force_map_update: bool = False + + def __post_init__(self): + def _resolve_enum_arguments( + serial_enum_cls: SerialIntEnum, input: Optional[List[Union[int, str, SerialIntEnum]]] + ) -> List[SerialIntEnum]: + + if input is None: + return None + assert isinstance(input, list), f"input must be a list of {serial_enum_cls.__name__}" + return [serial_enum_cls.from_arbitrary(value) for value in input] + + self.camera_frustum_types = _resolve_enum_arguments( + PinholeCameraType, + self.camera_frustum_types, + ) + self.camera_gui_types = _resolve_enum_arguments( + FisheyeMEICameraType, + self.camera_gui_types, + ) + self.fisheye_mei_camera_frustum_types = _resolve_enum_arguments( + FisheyeMEICameraType, + self.fisheye_mei_camera_frustum_types, + ) + self.lidar_types = _resolve_enum_arguments( + LiDARType, + self.lidar_types, + ) diff --git a/src/py123d/visualization/viser/viser_viewer.py b/src/py123d/visualization/viser/viser_viewer.py index 3d5dc044..1fb5559e 100644 --- a/src/py123d/visualization/viser/viser_viewer.py +++ b/src/py123d/visualization/viser/viser_viewer.py @@ -43,24 +43,24 @@ def _build_viser_server(viser_config: ViserConfig) -> viser.ViserServer: TitlebarButton( text="Getting Started", icon=None, - href="https://danieldauner.github.io/123d", + href="https://danieldauner.github.io/py123d", ), TitlebarButton( text="Github", icon="GitHub", - href="https://github.com/DanielDauner/123d", + href="https://github.com/DanielDauner/py123d", ), TitlebarButton( text="Documentation", icon="Description", - href="https://danieldauner.github.io/123d", + href="https://danieldauner.github.io/py123d", ), ) image = TitlebarImage( - image_url_light="https://danieldauner.github.io/123d/_static/logo_black.png", - image_url_dark="https://danieldauner.github.io/123d/_static/logo_white.png", + image_url_light="https://danieldauner.github.io/py123d/_static/logo_black.png", + image_url_dark="https://danieldauner.github.io/py123d/_static/logo_white.png", image_alt="123D", - href="https://danieldauner.github.io/123d/", + href="https://danieldauner.github.io/py123d/", ) titlebar_theme = TitlebarConfig(buttons=buttons, image=image) @@ -396,5 +396,4 @@ def _get_scene_info_markdown(scene: AbstractScene) -> str: - Location: {scene.log_metadata.location if scene.log_metadata.location else 'N/A'} - UUID: {scene.uuid} """ - # - UUID: {scene.log_name} return markdown From a7052f860eedfbe43339f818c0c14c6c89efdeea Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 6 Nov 2025 10:52:55 +0100 Subject: [PATCH 144/145] Minor change to the `README.md` --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0e7e22c4..46440e04 100644 --- a/README.md +++ b/README.md @@ -4,5 +4,5 @@ Logo -

      123D: One Library for 2D and 3D Driving Dataset

      +

      123D: One Library for 2D and 3D Driving Datasets

      From 72a6bd39c674cebbe4dc8f983419b05848e33d39 Mon Sep 17 00:00:00 2001 From: Daniel Dauner Date: Thu, 6 Nov 2025 10:55:15 +0100 Subject: [PATCH 145/145] Change branch for docs. --- .github/workflows/deploy-docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index 1bf5e6ab..aff1e7dc 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -3,7 +3,7 @@ name: docs on: push: branches: - - dev_v0.0.7 # Change this to your branch name (e.g., docs, dev, etc.) + - main # Change this to your branch name (e.g., docs, dev, etc.) workflow_dispatch: # Allows manual triggering permissions: