diff --git a/src/superannotate/lib/core/usecases/models.py b/src/superannotate/lib/core/usecases/models.py index 4bc6d1f52..83c8048ee 100644 --- a/src/superannotate/lib/core/usecases/models.py +++ b/src/superannotate/lib/core/usecases/models.py @@ -479,7 +479,7 @@ def execute(self): images = self._service_provider.items.list_by_names( project=self._project, folder=self._folder, names=self._images_list ).data - image_ids = [image.uuid for image in images] + image_ids = [image.id for image in images] image_names = [image.name for image in images] if not len(image_names): @@ -502,36 +502,36 @@ def execute(self): ml_model_id=ml_model.id, image_ids=image_ids, ) - if not res.ok: - return self._response.data - - success_images = [] - failed_images = [] - while len(success_images) + len(failed_images) != len(image_ids): - images_metadata = self._service_provider.items.list_by_names( - project=self._project, folder=self._folder, names=self._images_list - ).data - - success_images = [ - img.name - for img in images_metadata - if img.prediction_status - == constances.SegmentationStatus.COMPLETED.value - ] - failed_images = [ - img.name - for img in images_metadata - if img.prediction_status - == constances.SegmentationStatus.FAILED.value - ] - - complete_images = success_images + failed_images - logger.info( - f"prediction complete on {len(complete_images)} / {len(image_ids)} images" - ) - time.sleep(5) + if res.ok: + success_images = [] + failed_images = [] + while len(success_images) + len(failed_images) != len(image_ids): + images_metadata = self._service_provider.items.list_by_names( + project=self._project, folder=self._folder, names=self._images_list + ).data + + success_images = [ + img.name + for img in images_metadata + if img.prediction_status + == constances.SegmentationStatus.COMPLETED.value + ] + failed_images = [ + img.name + for img in images_metadata + if img.prediction_status + == constances.SegmentationStatus.FAILED.value + ] + + complete_images = success_images + failed_images + logger.info( + f"prediction complete on {len(complete_images)} / {len(image_ids)} images" + ) + time.sleep(5) - self._response.data = (success_images, failed_images) + self._response.data = (success_images, failed_images) + else: + self._response.errors = res.error return self._response diff --git a/src/superannotate/lib/infrastructure/repositories.py b/src/superannotate/lib/infrastructure/repositories.py index 3357bcf26..83c827f5c 100644 --- a/src/superannotate/lib/infrastructure/repositories.py +++ b/src/superannotate/lib/infrastructure/repositories.py @@ -1,8 +1,4 @@ import io -from typing import List - -from lib.core.conditions import Condition -from lib.core.entities import ProjectEntity from lib.core.entities import S3FileEntity from lib.core.repositories import BaseS3Repository @@ -22,12 +18,3 @@ def insert(self, entity: S3FileEntity) -> S3FileEntity: data["Metadata"] = temp self.bucket.put_object(**data) return entity - - def update(self, entity: ProjectEntity): - self._service.update_project(entity.to_dict()) - - def delete(self, uuid: int): - self._service.delete_project(uuid) - - def get_all(self, condition: Condition = None) -> List[ProjectEntity]: - pass diff --git a/src/superannotate/lib/infrastructure/serviceprovider.py b/src/superannotate/lib/infrastructure/serviceprovider.py index 7e000488b..71965a507 100644 --- a/src/superannotate/lib/infrastructure/serviceprovider.py +++ b/src/superannotate/lib/infrastructure/serviceprovider.py @@ -195,6 +195,7 @@ def run_prediction( self.URL_PREDICTION, "post", data={ + "team_id": project.team_id, "project_id": project.id, "ml_model_id": ml_model_id, "image_ids": image_ids, diff --git a/tests/integration/test_ml_funcs.py b/tests/integration/test_ml_funcs.py index 02167694f..3a39a8180 100644 --- a/tests/integration/test_ml_funcs.py +++ b/tests/integration/test_ml_funcs.py @@ -1,10 +1,10 @@ import os -import time from os.path import dirname -import pytest from src.superannotate import SAClient +from src.superannotate import AppException from tests.integration.base import BaseTestCase +import pytest sa = SAClient() @@ -23,17 +23,16 @@ def folder_path(self): return os.path.join(dirname(dirname(__file__)), self.TEST_FOLDER_PATH) def test_run_prediction_with_non_exist_images(self): - with pytest.raises(Exception) as e: + with self.assertRaisesRegexp(AppException, 'No valid image names were provided.'): sa.run_prediction( - self.PROJECT_NAME, ["NonExistantImage.jpg"], self.MODEL_NAME + self.PROJECT_NAME, ["NotExistingImage.jpg"], self.MODEL_NAME ) - @pytest.mark.skip(reason="Need to adjust") + @pytest.mark.skip(reason="Test skipped due to long execution") def test_run_prediction_for_all_images(self): sa.upload_images_from_folder_to_project( project=self.PROJECT_NAME, folder_path=self.folder_path ) - time.sleep(2) image_names_vector = [i["name"] for i in sa.search_items(self.PROJECT_NAME)] succeeded_images, failed_images = sa.run_prediction( self.PROJECT_NAME, image_names_vector, self.MODEL_NAME