-
I'm running the toy example I went through the tutorial to generate the scene: https://github.com/openMVG/openMVG/wiki/OpenMVG-on-your-image-dataset Not sure if i'm parsing well the poses but the results i get i don't think they are correct because of an error of mine or something else. I attach a screenshot and the code i'm using to visualize UPDATE (11/02/2023): code and image updated with pose fixed. Code: """Example script to load data from openMVG and log it to rerun."""
from __future__ import annotations
import argparse
from pathlib import Path
import json
import rerun as rr
import torch
import kornia as K
def _find_extrinsics(id_pose: int, all_extrinsics: dict) -> dict | None:
"""Find the extrinsics corresponding to the given view id.
Args:
id_pose: The id of the pose.
all_extrinsics: The extrinsics dictionary.
Returns:
The extrinsics corresponding to the given view id.
"""
for extrinsics in all_extrinsics:
if extrinsics["key"] == id_pose:
return extrinsics["value"]
return None
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=Path, required=True, help="Path to the openMVG json file.")
parser.add_argument("--data-images", type=Path, help="Path to the images folder.")
args = parser.parse_args()
rr.init("rerun_example_my_data")
rr.connect()
rr.log("/", rr.ViewCoordinates.RIGHT_HAND_Y_DOWN, timeless=True)
# load data
with Path(args.data).open(encoding="utf-8") as f:
data = json.load(f)
# add cameras and images
for view in data["views"]:
view_data = view["value"]["ptr_wrapper"]["data"]
rr.set_time_sequence("view", view_data["id_view"])
extrinsics: dict | None = _find_extrinsics(view_data["id_pose"], data["extrinsics"])
if extrinsics is None:
continue
rotation = torch.tensor(extrinsics["rotation"]).reshape(3, 3)
quat_wxyz = K.geometry.quaternion.Quaternion.from_matrix(rotation)
quat_xyzw = quat_wxyz.data.roll(-1)
center = torch.tensor(extrinsics["center"])
translation = -rotation @ center
rr.log("world/camera", rr.Transform3D(
translation=translation.detach().numpy(),
rotation=rr.Quaternion(xyzw=quat_xyzw.detach().numpy()),
from_parent=True),
)
rr.log("camera", rr.ViewCoordinates.RDF, timeless=True) # X=Right, Y=Down, Z=Forward
intrinsics = data["intrinsics"][view_data["id_intrinsic"]]["value"]["ptr_wrapper"]["data"]
rr.log(
"world/camera",
rr.Pinhole(
resolution=[intrinsics["width"], intrinsics["height"]],
focal_length=intrinsics["focal_length"],
principal_point=intrinsics["principal_point"],
)
)
# add keypoints
keypoints = [
obs["value"]["x"] for structure in data["structure"]
for obs in structure["value"]["observations"] if obs["key"] == view_data["id_view"]
]
rr.log("world/camera/image/keypoints", rr.Points2D(keypoints, colors=[34, 138, 167]))
# add image data if available
if args.data_images:
image_path = args.data_images / view_data["filename"]
rr.log("world/camera/image", rr.ImageEncoded(path=image_path))
# Add 3d points
points = [obs["value"]["X"] for obs in data["structure"]]
rr.log("world/points", rr.Points3D(points))
if __name__ == "__main__":
main() JSON file added to: sfm_data.json |
Beta Was this translation helpful? Give feedback.
Replies: 3 comments
-
You can see here the rerun logger I made for rerun 0.9 beccf88 The point cloud and camera position are correct. It is just a work to camera or camera to world issue. See how I set the transform to adjust your code |
Beta Was this translation helpful? Give feedback.
-
I will see if I can update this sample to fit the new rerun api and merge it to develop |
Beta Was this translation helpful? Give feedback.
-
@pmoulon thanks, i managed to make it work with you fix. |
Beta Was this translation helpful? Give feedback.
You can see here the rerun logger I made for rerun 0.9 beccf88
The point cloud and camera position are correct. It is just a work to camera or camera to world issue. See how I set the transform to adjust your code