-
Notifications
You must be signed in to change notification settings - Fork 97
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* New branch created for instance camera/merging of Point-Clout LiDAR * Finished instance camera and related APIs * Formatted * Fixing for tests * Fixing for format * Accomadate according to PR Review by Quanyi * Formatting
- Loading branch information
1 parent
cc3ad2e
commit 19ade86
Showing
3 changed files
with
238 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
from metadrive.component.sensors.semantic_camera import SemanticCamera | ||
import cv2 | ||
from panda3d.core import GeoMipTerrain, PNMImage | ||
from panda3d.core import RenderState, LightAttrib, ColorAttrib, ShaderAttrib, TextureAttrib, LVecBase4, MaterialAttrib | ||
from metadrive.constants import Semantics | ||
from metadrive.component.sensors.base_camera import BaseCamera | ||
from metadrive.constants import CamMask | ||
from metadrive.constants import RENDER_MODE_NONE | ||
from metadrive.engine.asset_loader import AssetLoader | ||
from metadrive.engine.engine_utils import get_engine | ||
import random | ||
|
||
|
||
class InstanceCamera(SemanticCamera): | ||
CAM_MASK = CamMask.SemanticCam | ||
|
||
def __init__(self, width, height, engine, *, cuda=False): | ||
super().__init__(width, height, engine, cuda=cuda) | ||
|
||
def track(self, base_object): | ||
self._setup_effect() | ||
super().track(base_object) | ||
|
||
def _setup_effect(self): | ||
""" | ||
Use tag to apply color to different object class | ||
Returns: None | ||
""" | ||
# setup camera | ||
|
||
if get_engine() is None: | ||
super()._setup_effect() | ||
else: | ||
mapping = get_engine().id_c | ||
spawned_objects = get_engine().get_objects() | ||
for id, obj in spawned_objects.items(): | ||
obj.origin.setTag("id", id) | ||
cam = self.get_cam().node() | ||
cam.setTagStateKey("id") | ||
cam.setInitialState( | ||
RenderState.make( | ||
ShaderAttrib.makeOff(), LightAttrib.makeAllOff(), TextureAttrib.makeOff(), | ||
ColorAttrib.makeFlat((0, 0, 0, 1)), 1 | ||
) | ||
) | ||
for id, c in mapping.items(): | ||
cam.setTagState(id, RenderState.make(ColorAttrib.makeFlat((c[0], c[1], c[2], 1)), 1)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
import pytest | ||
|
||
from metadrive.component.sensors.instance_camera import InstanceCamera | ||
from metadrive.envs.metadrive_env import MetaDriveEnv | ||
import numpy as np | ||
blackbox_test_configs = dict( | ||
# standard=dict(stack_size=3, width=256, height=128, rgb_clip=True), | ||
small=dict(stack_size=1, width=64, height=32, rgb_clip=True), | ||
) | ||
|
||
|
||
@pytest.mark.parametrize("config", list(blackbox_test_configs.values()), ids=list(blackbox_test_configs.keys())) | ||
def test_instance_cam(config, render=False): | ||
""" | ||
Test the output shape of Instance camera. This can NOT make sure the correctness of rendered image but only for | ||
checking the shape of image output and image retrieve pipeline | ||
Args: | ||
config: test parameter | ||
render: render with cv2 | ||
Returns: None | ||
""" | ||
env = MetaDriveEnv( | ||
{ | ||
"num_scenarios": 1, | ||
"traffic_density": 0.1, | ||
"map": "S", | ||
"show_terrain": False, | ||
"start_seed": 4, | ||
"stack_size": config["stack_size"], | ||
"vehicle_config": dict(image_source="camera"), | ||
"sensors": { | ||
"camera": (InstanceCamera, config["width"], config["height"]) | ||
}, | ||
"interface_panel": ["dashboard", "camera"], | ||
"image_observation": True, # it is a switch telling metadrive to use rgb as observation | ||
"rgb_clip": config["rgb_clip"], # clip rgb to range(0,1) instead of (0, 255) | ||
} | ||
) | ||
env.reset() | ||
base_free = len(env.engine.COLORS_FREE) | ||
base_occupied = len(env.engine.COLORS_OCCUPIED) | ||
assert base_free + base_occupied == 4096 | ||
try: | ||
import cv2 | ||
import time | ||
start = time.time() | ||
for i in range(1, 10): | ||
o, r, tm, tc, info = env.step([0, 1]) | ||
assert env.observation_space.contains(o) | ||
# Reverse | ||
assert o["image"].shape == ( | ||
config["height"], config["width"], InstanceCamera.num_channels, config["stack_size"] | ||
) | ||
image = o["image"][..., -1] | ||
image = image.reshape(-1, 3) | ||
unique_colors = np.unique(image, axis=0) | ||
#Making sure every color observed correspond to an object | ||
for unique_color in unique_colors: | ||
if (unique_color != np.array((0, 0, 0))).all(): #Ignore the black background. | ||
color = unique_color.tolist() | ||
color = ( | ||
round(color[2], 5), round(color[1], 5), round(color[0], 5) | ||
) #In engine, we use 5-diigt float for keys | ||
assert color in env.engine.COLORS_OCCUPIED | ||
assert color not in env.engine.COLORS_FREE | ||
assert color in env.engine.c_id.keys() | ||
assert env.engine.id_c[env.engine.c_id[color]] == color #Making sure the color-id is a bijection | ||
assert len(env.engine.c_id.keys()) == len(env.engine.COLORS_OCCUPIED) | ||
assert len(env.engine.id_c.keys()) == len(env.engine.COLORS_OCCUPIED) | ||
assert len(env.engine.COLORS_FREE) + len(env.engine.COLORS_OCCUPIED) == 4096 | ||
#Making sure every object in the engine(not necessarily observable) have corresponding color | ||
for id, object in env.engine.get_objects().items(): | ||
assert id in env.engine.id_c.keys() | ||
if render: | ||
cv2.imshow('img', o["image"][..., -1]) | ||
cv2.waitKey(1) | ||
print("FPS:", 10 / (time.time() - start)) | ||
finally: | ||
env.close() | ||
|
||
|
||
if __name__ == '__main__': | ||
test_instance_cam(config=blackbox_test_configs["small"], render=True) | ||
my_dict = {(0, 0, 0): "Hello, World!"} |