diff --git a/src/opendr/perception/panoptic_segmentation/README.md b/src/opendr/perception/panoptic_segmentation/README.md index 87e95ef627..edf907319f 100644 --- a/src/opendr/perception/panoptic_segmentation/README.md +++ b/src/opendr/perception/panoptic_segmentation/README.md @@ -39,7 +39,7 @@ Please note that the original repository is heavily based on **Prepare the downloaded Cityscapes dataset** (see the [datasets' readme](./datasets/README.md) as well) ```python -from opendr.perception.panoptic_segmentation.datasets import CityscapesDataset +from opendr.perception.panoptic_segmentation import CityscapesDataset DOWNLOAD_PATH = '~/data/cityscapes_raw' DATA_ROOT = '~/data/cityscapes' CityscapesDataset.prepare_data(DOWNLOAD_PATH, DATA_ROOT) @@ -49,7 +49,7 @@ CityscapesDataset.prepare_data(DOWNLOAD_PATH, DATA_ROOT) ```python import mmcv from opendr.engine.data import Image -from opendr.perception.panoptic_segmentation.efficient_ps import EfficientPsLearner +from opendr.perception.panoptic_segmentation import EfficientPsLearner DATA_ROOT = '~/data/cityscapes' image_filenames = [ f'{DATA_ROOT}/val/images/lindau_000001_000019.png', @@ -58,7 +58,7 @@ image_filenames = [ ] images = [Image(mmcv.imread(f)) for f in image_filenames] learner = EfficientPsLearner() -learner.load('model.pth') +learner.load('model.pth') # alternatively, one can just specify the path to the folder predictions = learner.infer(images) for image, prediction in zip(images, predictions): EfficientPsLearner.visualize(image, prediction) @@ -66,19 +66,17 @@ for image, prediction in zip(images, predictions): **Run evaluation** ```python -from opendr.perception.panoptic_segmentation.datasets import CityscapesDataset -from opendr.perception.panoptic_segmentation.efficient_ps import EfficientPsLearner +from opendr.perception.panoptic_segmentation import EfficientPsLearner, CityscapesDataset DATA_ROOT = '~/data/cityscapes' val_dataset = CityscapesDataset(path=f'{DATA_ROOT}/val') learner = EfficientPsLearner() -learner.load('model.pth') +learner.load('model.pth') # alternatively, one can just specify the path to the folder learner.eval(val_dataset, print_results=True) ``` **Run training** ```python -from opendr.perception.panoptic_segmentation.datasets import CityscapesDataset -from opendr.perception.panoptic_segmentation.efficient_ps import EfficientPsLearner +from opendr.perception.panoptic_segmentation import EfficientPsLearner, CityscapesDataset DATA_ROOT = '~/data/cityscapes' train_dataset = CityscapesDataset(path=f'{DATA_ROOT}/training') val_dataset = CityscapesDataset(path=f'{DATA_ROOT}/val') diff --git a/src/opendr/perception/panoptic_segmentation/__init__.py b/src/opendr/perception/panoptic_segmentation/__init__.py new file mode 100644 index 0000000000..f046d14245 --- /dev/null +++ b/src/opendr/perception/panoptic_segmentation/__init__.py @@ -0,0 +1,4 @@ +from opendr.perception.panoptic_segmentation.datasets import CityscapesDataset, KittiDataset +from opendr.perception.panoptic_segmentation.efficient_ps import EfficientPsLearner + +__all__ = ['CityscapesDataset', 'KittiDataset', 'EfficientPsLearner'] diff --git a/src/opendr/perception/panoptic_segmentation/datasets/README.md b/src/opendr/perception/panoptic_segmentation/datasets/README.md index 67c235d550..6e06697acc 100644 --- a/src/opendr/perception/panoptic_segmentation/datasets/README.md +++ b/src/opendr/perception/panoptic_segmentation/datasets/README.md @@ -6,7 +6,7 @@ 2. Extract both files. 3. Convert the files to the expected folder structure and generate panoptic ground truth data for evaluation ```python -from opendr.perception.panoptic_segmentation.datasets import CityscapesDataset +from opendr.perception.panoptic_segmentation import CityscapesDataset DOWNLOAD_PATH = '~/data/cityscapes_raw' DATA_ROOT = '~/data/cityscapes' CityscapesDataset.prepare_data(DOWNLOAD_PATH, DATA_ROOT) @@ -18,7 +18,7 @@ CityscapesDataset.prepare_data(DOWNLOAD_PATH, DATA_ROOT) 2. Extract the file. 3. Convert the files to the expected folder structure and generate panoptic ground truth data for evaluation ```python -from opendr.perception.panoptic_segmentation.datasets import KittiDataset +from opendr.perception.panoptic_segmentation import KittiDataset DOWNLOAD_PATH = '~/data/KITTI-panoptic-segmentation-dataset' DATA_ROOT = '~/data/kitti' KittiDataset.prepare_data(DOWNLOAD_PATH, DATA_ROOT) diff --git a/src/opendr/perception/panoptic_segmentation/efficient_ps/example_usage.py b/src/opendr/perception/panoptic_segmentation/efficient_ps/example_usage.py index 608a0da72e..ec0dddb353 100644 --- a/src/opendr/perception/panoptic_segmentation/efficient_ps/example_usage.py +++ b/src/opendr/perception/panoptic_segmentation/efficient_ps/example_usage.py @@ -16,8 +16,7 @@ import cv2 from opendr.engine.data import Image -from opendr.perception.panoptic_segmentation.datasets import CityscapesDataset, KittiDataset -from opendr.perception.panoptic_segmentation.efficient_ps import EfficientPsLearner +from opendr.perception.panoptic_segmentation import EfficientPsLearner, CityscapesDataset, KittiDataset DATA_ROOT = '/home/USER/data/efficientPS' CITYSCAPES_ROOT = f'{DATA_ROOT}/converted_datasets/cityscapes' diff --git a/tests/sources/tools/perception/panoptic_segmentation/efficient_ps/test_efficient_ps.py b/tests/sources/tools/perception/panoptic_segmentation/efficient_ps/test_efficient_ps.py index 81580cc9d4..d15e1d4f5a 100644 --- a/tests/sources/tools/perception/panoptic_segmentation/efficient_ps/test_efficient_ps.py +++ b/tests/sources/tools/perception/panoptic_segmentation/efficient_ps/test_efficient_ps.py @@ -21,8 +21,7 @@ from opendr.engine.data import Image from opendr.engine.target import Heatmap -from opendr.perception.panoptic_segmentation.datasets import CityscapesDataset -from opendr.perception.panoptic_segmentation.efficient_ps import EfficientPsLearner +from opendr.perception.panoptic_segmentation import EfficientPsLearner, CityscapesDataset def rmfile(path):