diff --git a/pytest.ini b/pytest.ini index 46f818c07..66f5fc363 100644 --- a/pytest.ini +++ b/pytest.ini @@ -2,5 +2,5 @@ minversion = 3.7 log_cli=true python_files = test_*.py -pytest_plugins = ['pytest_profiling'] -;addopts = -n auto --dist=loadscope \ No newline at end of file +;pytest_plugins = ['pytest_profiling'] +addopts = -n auto --dist=loadscope \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index ee1a6f573..512184269 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ requests==2.26.0 requests-toolbelt>=0.9.1 aiohttp>=3.8.1 tqdm==4.64.0 -pillow>=7.2.0<9.0.1 +pillow>=7.2.0, <9.0.1 matplotlib>=3.3.1 xmltodict==0.12.0 opencv-python>=4.4.0.42 diff --git a/src/superannotate/lib/app/interface/sdk_interface.py b/src/superannotate/lib/app/interface/sdk_interface.py index 871e33a12..d40b0757d 100644 --- a/src/superannotate/lib/app/interface/sdk_interface.py +++ b/src/superannotate/lib/app/interface/sdk_interface.py @@ -3,6 +3,7 @@ import io import json import os +import sys import tempfile from pathlib import Path from typing import Callable @@ -11,39 +12,43 @@ from typing import List from typing import Optional from typing import Tuple +from typing import TypeVar from typing import Union +import pydantic +from typing_extensions import Literal + +if sys.version_info < (3, 11): + from typing_extensions import TypedDict, NotRequired # noqa +else: + from typing import TypedDict, NotRequired # noqa + import boto3 +from pydantic import StrictBool +from pydantic import conlist +from pydantic import constr +from pydantic import parse_obj_as +from pydantic.error_wrappers import ValidationError +from tqdm import tqdm + import lib.core as constants from lib.app.helpers import get_annotation_paths from lib.app.helpers import get_name_url_duplicated_from_csv from lib.app.helpers import wrap_error as wrap_validation_errors from lib.app.interface.base_interface import BaseInterfaceFacade from lib.app.interface.base_interface import TrackableMeta -from lib.app.interface.types import AnnotationStatuses -from lib.app.interface.types import AnnotationType -from lib.app.interface.types import AnnotatorRole -from lib.app.interface.types import ApprovalStatuses -from lib.app.interface.types import AttachmentArg -from lib.app.interface.types import AttachmentDict -from lib.app.interface.types import ClassType from lib.app.interface.types import EmailStr -from lib.app.interface.types import FolderStatusEnum -from lib.app.interface.types import ImageQualityChoices -from lib.app.interface.types import NotEmptyStr -from lib.app.interface.types import ProjectStatusEnum -from lib.app.interface.types import ProjectTypes -from lib.app.interface.types import Setting from lib.app.serializers import BaseSerializer from lib.app.serializers import FolderSerializer from lib.app.serializers import ItemSerializer from lib.app.serializers import ProjectSerializer from lib.app.serializers import SettingsSerializer from lib.app.serializers import TeamSerializer -from lib.core import entities from lib.core import LIMITED_FUNCTIONS -from lib.core.conditions import Condition +from lib.core import entities +from lib.core import enums from lib.core.conditions import CONDITION_EQ as EQ +from lib.core.conditions import Condition from lib.core.conditions import EmptyCondition from lib.core.entities import AttachmentEntity from lib.core.entities import SettingEntity @@ -53,20 +58,62 @@ from lib.core.enums import ImageQuality from lib.core.exceptions import AppException from lib.core.types import MLModel -from lib.core.types import PriorityScore +from lib.core.types import PriorityScoreEntity from lib.core.types import Project from lib.infrastructure.utils import extract_project_folder from lib.infrastructure.validators import wrap_error -from pydantic import conlist -from pydantic import parse_obj_as -from pydantic import StrictBool -from pydantic.error_wrappers import ValidationError from superannotate.logger import get_default_logger -from tqdm import tqdm logger = get_default_logger() +NotEmptyStr = TypeVar("NotEmptyStr", bound=constr(strict=True, min_length=1)) + + +PROJECT_STATUS = Literal["Undefined", "NotStarted", "InProgress", "Completed", "OnHold"] + +PROJECT_TYPE = Literal[ + "Vector", "Pixel", "Video", "Document", "Tiled", "Other", "PointCloud" +] + +CLASS_TYPE = Literal["object", "tag"] + +ANNOTATION_STATUS = Literal[ + "NotStarted", "InProgress", "QualityCheck", "Returned", "Completed", "Skipped" +] + +APPROVAL_STATUS = Literal["Approved", "Disapproved"] + +IMAGE_QUALITY = Literal["compressed", "original"] + +ANNOTATION_TYPE = Literal["bbox", "polygon", "point", "tag"] + +ANNOTATOR_ROLE = Literal["Admin", "Annotator", "QA"] + +FOLDER_STATUS = Literal[ + "Undefined", + "NotStarted", + "InProgress", + "Completed", + "OnHold", +] + + +class Setting(TypedDict): + attribute: str + value: Union[str, float, int] + + +class PriorityScore(TypedDict): + name: str + priority: float + + +class Attachment(TypedDict, total=False): + url: str + name: NotRequired[str] # noqa + + class SAClient(BaseInterfaceFacade, metaclass=TrackableMeta): """Create SAClient instance to authorize SDK in a team scope. In case of no argument has been provided, SA_TOKEN environmental variable @@ -187,7 +234,7 @@ def search_projects( name: Optional[NotEmptyStr] = None, return_metadata: bool = False, include_complete_image_count: bool = False, - status: Optional[Union[ProjectStatusEnum, List[ProjectStatusEnum]]] = None, + status: Optional[Union[PROJECT_STATUS, List[PROJECT_STATUS]]] = None, ): """ Project name based case-insensitive search for projects. @@ -244,7 +291,7 @@ def create_project( self, project_name: NotEmptyStr, project_description: NotEmptyStr, - project_type: NotEmptyStr, + project_type: PROJECT_TYPE, settings: List[Setting] = None, ): """Create a new project in the team. @@ -268,12 +315,6 @@ def create_project( settings = parse_obj_as(List[SettingEntity], settings) else: settings = [] - try: - ProjectTypes.validate(project_type) - except TypeError: - raise AppException( - f"Please provide a valid project type: {', '.join(constants.ProjectType.titles())}." - ) response = self.controller.projects.create( entities.ProjectEntity( name=project_name, @@ -300,12 +341,11 @@ def create_project_from_metadata(self, project_metadata: Project): :rtype: dict """ project_metadata = project_metadata.dict() - try: - ProjectTypes.validate(project_metadata["type"]) - except TypeError: + if project_metadata["type"] not in enums.ProjectType.titles(): raise AppException( "Please provide a valid project type: Vector, Pixel, Document, or Video." ) + response = self.controller.projects.create( entities.ProjectEntity( name=project_metadata["name"], @@ -465,7 +505,7 @@ def search_folders( self, project: NotEmptyStr, folder_name: Optional[NotEmptyStr] = None, - status: Optional[Union[FolderStatusEnum, List[FolderStatusEnum]]] = None, + status: Optional[Union[FOLDER_STATUS, List[FOLDER_STATUS]]] = None, return_metadata: Optional[StrictBool] = False, ): """Folder name based case-insensitive search for folders in project. @@ -835,14 +875,15 @@ def upload_images_from_folder_to_project( :type from_s3_bucket: str :param exclude_file_patterns: filename patterns to exclude from uploading, - default value is to exclude SuperAnnotate export related ["___save.png", "___fuse.png"] + default value is to exclude SuperAnnotate export related ["___save.png", "___fuse.png"] :type exclude_file_patterns: list or tuple of strs :param recursive_subfolders: enable recursive subfolder parsing :type recursive_subfolders: bool :param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor. - Can be either "compressed" or "original". If None then the default value in project settings will be used. + Can be either "compressed" or "original". + If None then the default value in project settings will be used. :type image_quality_in_editor: str :return: uploaded, could-not-upload, existing-images filepaths @@ -852,7 +893,8 @@ def upload_images_from_folder_to_project( project_name, folder_name = extract_project_folder(project) if recursive_subfolders: logger.info( - "When using recursive subfolder parsing same name images in different subfolders will overwrite each other." + "When using recursive subfolder parsing same name images" + " in different subfolders will overwrite each other." ) if not isinstance(extensions, (list, tuple)): print(extensions) @@ -991,7 +1033,7 @@ def prepare_export( self, project: Union[NotEmptyStr, dict], folder_names: Optional[List[NotEmptyStr]] = None, - annotation_statuses: Optional[List[AnnotationStatuses]] = None, + annotation_statuses: Optional[List[ANNOTATION_STATUS]] = None, include_fuse: Optional[StrictBool] = False, only_pinned=False, ): @@ -1051,8 +1093,8 @@ def upload_videos_from_folder_to_project( target_fps: Optional[int] = None, start_time: Optional[float] = 0.0, end_time: Optional[float] = None, - annotation_status: Optional[AnnotationStatuses] = "NotStarted", - image_quality_in_editor: Optional[ImageQualityChoices] = None, + annotation_status: Optional[ANNOTATION_STATUS] = "NotStarted", + image_quality_in_editor: Optional[IMAGE_QUALITY] = None, ): """Uploads image frames from all videos with given extensions from folder_path to the project. Sets status of all the uploaded images to set_status if it is not None. @@ -1129,8 +1171,8 @@ def upload_video_to_project( target_fps: Optional[int] = None, start_time: Optional[float] = 0.0, end_time: Optional[float] = None, - annotation_status: Optional[AnnotationStatuses] = "NotStarted", - image_quality_in_editor: Optional[ImageQualityChoices] = None, + annotation_status: Optional[ANNOTATION_STATUS] = "NotStarted", + image_quality_in_editor: Optional[IMAGE_QUALITY] = None, ): """Uploads image frames from video to platform. Uploaded images will have names "_.jpg". @@ -1150,7 +1192,8 @@ def upload_video_to_project( video frames NotStarted InProgress QualityCheck Returned Completed Skipped :type annotation_status: str :param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor. - Can be either "compressed" or "original". If None then the default value in project settings will be used. + Can be either "compressed" or "original". + If None then the default value in project settings will be used. :type image_quality_in_editor: str :return: filenames of uploaded images @@ -1179,7 +1222,7 @@ def create_annotation_class( name: NotEmptyStr, color: NotEmptyStr, attribute_groups: Optional[List[AttributeGroup]] = None, - class_type: ClassType = "object", + class_type: CLASS_TYPE = "object", ): """Create annotation class in project @@ -1293,7 +1336,7 @@ def delete_annotation_class( try: annotation_class = AnnotationClassEntity( name=annotation_class, - color="#ffffff", # Random, just need to serialize + color="#ffffff", # noqa Random, just need to serialize ) except ValidationError as e: raise AppException(wrap_error(e)) @@ -1415,9 +1458,9 @@ def set_project_workflow( """Sets project's workflow. new_workflow example: [{ "step" : , "className" : , "tool" : , - "attribute":[{"attribute" : {"name" : , "attribute_group" : {"name": }}}, - ...] - },...] + "attribute":[{"attribute" : {"name" : , "attribute_group" : {"name": }}}, + ...] + },...] :param project: project name or metadata :type project: str or dict @@ -1690,10 +1733,11 @@ def benchmark( folder_names: List[NotEmptyStr], export_root: Optional[Union[str, Path]] = None, image_list=None, - annot_type: Optional[AnnotationType] = "bbox", + annot_type: Optional[ANNOTATION_TYPE] = "bbox", show_plots=False, ): - """Computes benchmark score for each instance of given images that are present both gt_project_name project and projects in folder_names list: + """Computes benchmark score for each instance of given images that are present both gt_project_name project + and projects in folder_names list: :param project: project name or metadata of the project :type project: str or dict @@ -1703,14 +1747,17 @@ def benchmark( :type folder_names: list of str :param export_root: root export path of the projects :type export_root: Path-like (str or Path) - :param image_list: List of image names from the projects list that must be used. If None, then all images from the projects list will be used. Default: None + :param image_list: List of image names from the projects list that must be used. If None, + then all images from the projects list will be used. Default: None :type image_list: list - :param annot_type: Type of annotation instances to consider. Available candidates are: ["bbox", "polygon", "point"] + :param annot_type: Type of annotation instances to consider. + Available candidates are: ["bbox", "polygon", "point"] :type annot_type: str :param show_plots: If True, show plots based on results of consensus computation. Default: False :type show_plots: bool - :return: Pandas DateFrame with columns (creatorEmail, QA, imageName, instanceId, className, area, attribute, folderName, score) + :return: Pandas DateFrame with columns + (creatorEmail, QA, imageName, instanceId, className, area, attribute, folderName, score) :rtype: pandas DataFrame """ project_name = project @@ -1750,11 +1797,12 @@ def benchmark( def consensus( self, project: NotEmptyStr, - folder_names: List[NotEmptyStr], + folder_names: List[int], image_list: Optional[List[NotEmptyStr]] = None, - annotation_type: Optional[AnnotationType] = "bbox", + annotation_type: Optional[ANNOTATION_TYPE] = "bbox", ): - """Computes consensus score for each instance of given images that are present in at least 2 of the given projects: + """Computes consensus score for each instance of given images + that are present in at least 2 of the given projects: :param project: project name :type project: str @@ -1762,13 +1810,16 @@ def consensus( :param folder_names: list of folder names in the project for which the scores will be computed :type folder_names: list of str - :param image_list: List of image names from the projects list that must be used. If None, then all images from the projects list will be used. Default: None + :param image_list: List of image names from the projects list that must be used. + If None, then all images from the projects list will be used. Default: None :type image_list: list - :param annotation_type: Type of annotation instances to consider. Available candidates are: ["bbox", "polygon", "point"] + :param annotation_type: Type of annotation instances to consider. + Available candidates are: ["bbox", "polygon", "point"] :type annotation_type: str - :return: Pandas DateFrame with columns (creatorEmail, QA, imageName, instanceId, className, area, attribute, folderName, score) + :return: Pandas DateFrame with columns + (creatorEmail, QA, imageName, instanceId, className, area, attribute, folderName, score) :rtype: pandas DataFrame """ @@ -1788,7 +1839,8 @@ def run_prediction( images_list: List[NotEmptyStr], model: Union[NotEmptyStr, dict], ): - """This function runs smart prediction on given list of images from a given project using the neural network of your choice + """This function runs smart prediction on given list of images from a given project + using the neural network of your choice :param project: the project in which the target images are uploaded. :type project: str or dict @@ -1796,7 +1848,7 @@ def run_prediction( :type images_list: list of str :param model: the name of the model that should be used for running smart prediction :type model: str or dict - :return: tupe of two lists, list of images on which the prediction has succeded and failed respectively + :return: tuple of two lists, list of images on which the prediction has succeeded and failed respectively :rtype: tuple """ project_name = None @@ -1825,7 +1877,7 @@ def upload_image_to_project( project: NotEmptyStr, img, image_name: Optional[NotEmptyStr] = None, - annotation_status: Optional[AnnotationStatuses] = "NotStarted", + annotation_status: Optional[ANNOTATION_STATUS] = "NotStarted", from_s3_bucket=None, image_quality_in_editor: Optional[NotEmptyStr] = None, ): @@ -1839,12 +1891,14 @@ def upload_image_to_project( :param image_name: image name to set on platform. If None and img is filepath, image name will be set to filename of the path :type image_name: str - :param annotation_status: value to set the annotation statuses of the uploaded image NotStarted InProgress QualityCheck Returned Completed Skipped + :param annotation_status: value to set the annotation statuses of the uploaded image + NotStarted InProgress QualityCheck Returned Completed Skipped :type annotation_status: str :param from_s3_bucket: AWS S3 bucket to use. If None then folder_path is in local filesystem :type from_s3_bucket: str :param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor. - Can be either "compressed" or "original". If None then the default value in project settings will be used. + Can be either "compressed" or "original". + If None then the default value in project settings will be used. :type image_quality_in_editor: str """ project_name, folder_name = extract_project_folder(project) @@ -1908,9 +1962,9 @@ def upload_images_to_project( self, project: NotEmptyStr, img_paths: List[NotEmptyStr], - annotation_status: Optional[AnnotationStatuses] = "NotStarted", + annotation_status: Optional[ANNOTATION_STATUS] = "NotStarted", from_s3_bucket=None, - image_quality_in_editor: Optional[ImageQualityChoices] = None, + image_quality_in_editor: Optional[IMAGE_QUALITY] = None, ): """Uploads all images given in list of path objects in img_paths to the project. Sets status of all the uploaded images to set_status if it is not None. @@ -1923,12 +1977,14 @@ def upload_images_to_project( :type project: str :param img_paths: list of Path-like (str or Path) objects to upload :type img_paths: list - :param annotation_status: value to set the annotation statuses of the uploaded images NotStarted InProgress QualityCheck Returned Completed Skipped + :param annotation_status: value to set the annotation statuses of the uploaded images + NotStarted InProgress QualityCheck Returned Completed Skipped :type annotation_status: str :param from_s3_bucket: AWS S3 bucket to use. If None then folder_path is in local filesystem :type from_s3_bucket: str :param image_quality_in_editor: image quality be seen in SuperAnnotate web annotation editor. - Can be either "compressed" or "original". If None then the default value in project settings will be used. + Can be either "compressed" or "original". + If None then the default value in project settings will be used. :type image_quality_in_editor: str :return: uploaded, could-not-upload, existing-images filepaths @@ -1970,7 +2026,7 @@ def upload_images_to_project( @staticmethod def aggregate_annotations_as_df( project_root: Union[NotEmptyStr, Path], - project_type: ProjectTypes, + project_type: PROJECT_TYPE, folder_names: Optional[List[Union[Path, NotEmptyStr]]] = None, ): """Aggregate annotations as pandas dataframe from project root. @@ -1990,12 +2046,8 @@ def aggregate_annotations_as_df( """ from superannotate.lib.app.analytics.aggregators import DataAggregator - try: - ProjectTypes.validate(project_type) - except TypeError as e: - raise AppException(e) return DataAggregator( - project_type=project_type, + project_type=project_type, # noqa project_root=project_root, folder_names=folder_names, ).aggregate_annotations_as_df() @@ -2021,7 +2073,7 @@ def delete_annotations( def validate_annotations( self, - project_type: ProjectTypes, + project_type: PROJECT_TYPE, annotations_json: Union[NotEmptyStr, Path, dict], ): """Validates given annotation JSON. @@ -2052,7 +2104,7 @@ def add_contributors_to_project( self, project: NotEmptyStr, emails: conlist(EmailStr, min_items=1), - role: AnnotatorRole, + role: ANNOTATOR_ROLE, ) -> Tuple[List[str], List[str]]: """Add contributors to project. @@ -2156,6 +2208,7 @@ def upload_priority_scores(self, project: NotEmptyStr, scores: List[PriorityScor :return: lists of uploaded, skipped items :rtype: tuple (2 members) of lists of strs """ + scores = parse_obj_as(List[PriorityScoreEntity], scores) project_name, folder_name = extract_project_folder(project) project_folder_name = project project, folder = self.controller.get_project_folder(project_name, folder_name) @@ -2176,7 +2229,9 @@ def get_integrations(self): if response.errors: raise AppException(response.errors) integrations = response.data - return BaseSerializer.serialize_iterable(integrations, ("name", "type", "root")) + return BaseSerializer.serialize_iterable( + integrations, ("name", "type", "root") # noqa + ) def attach_items_from_integrated_storage( self, @@ -2309,7 +2364,7 @@ def search_items( self, project: NotEmptyStr, name_contains: NotEmptyStr = None, - annotation_status: Optional[AnnotationStatuses] = None, + annotation_status: Optional[ANNOTATION_STATUS] = None, annotator_email: Optional[NotEmptyStr] = None, qa_email: Optional[NotEmptyStr] = None, recursive: bool = False, @@ -2415,8 +2470,8 @@ def search_items( def attach_items( self, project: Union[NotEmptyStr, dict], - attachments: AttachmentArg, - annotation_status: Optional[AnnotationStatuses] = "NotStarted", + attachments: Union[NotEmptyStr, Path, conlist(Attachment, min_items=1)], + annotation_status: Optional[ANNOTATION_STATUS] = "NotStarted", ): """Link items from external storage to SuperAnnotate using URLs. @@ -2435,16 +2490,17 @@ def attach_items( “Skipped” :type annotation_status: str """ - attachments = attachments.data + project_name, folder_name = extract_project_folder(project) - if attachments and isinstance(attachments[0], AttachmentDict): + try: + attachments = parse_obj_as(List[AttachmentEntity], attachments) unique_attachments = set(attachments) duplicate_attachments = [ item for item, count in collections.Counter(attachments).items() if count > 1 ] - else: + except pydantic.ValidationError: ( unique_attachments, duplicate_attachments, @@ -2562,7 +2618,7 @@ def move_items( def set_annotation_statuses( self, project: Union[NotEmptyStr, dict], - annotation_status: AnnotationStatuses, + annotation_status: ANNOTATION_STATUS, items: Optional[List[NotEmptyStr]] = None, ): """Sets annotation statuses of items @@ -2608,16 +2664,19 @@ def download_annotations( :param project: project name or folder path (e.g., “project1/folder1”). :type project: str - :param path: local directory path where the annotations will be downloaded. If none, the current directory is used. + :param path: local directory path where the annotations will be downloaded. + If none, the current directory is used. :type path: Path-like (str or Path) :param items: list of item names whose annotations will be downloaded - (e.g., ["Image_1.jpeg", "Image_2.jpeg"]). If the value is None, then all the annotations of the given directory will be downloaded. + (e.g., ["Image_1.jpeg", "Image_2.jpeg"]). If the value is None, + then all the annotations of the given directory will be downloaded. :type items: list of str - :param recursive: download annotations from the project’s root and all of its folders with the preserved structure. - If False download only from the project’s root or given directory. + :param recursive: download annotations from the project’s root + and all of its folders with the preserved structure. + If False download only from the project’s root or given directory. :type recursive: bool :param callback: a function that allows you to modify each annotation’s dict before downloading. @@ -2784,7 +2843,7 @@ def get_custom_fields(self, project: NotEmptyStr): return response.data def delete_custom_fields( - self, project: NotEmptyStr, fields: conlist(NotEmptyStr, min_items=1) + self, project: NotEmptyStr, fields: conlist(str, min_items=1) ): """Remove custom fields from a project’s custom metadata schema. @@ -2953,10 +3012,12 @@ def add_items_to_subset( :param project: project name (e.g., “project1”) :type project: str - :param subset: a name of an existing/new subset to associate items with. New subsets will be automatically created. + :param subset: a name of an existing/new subset to associate items with. + New subsets will be automatically created. :type subset: str - :param items: list of items metadata. Required keys are 'name' and 'path' if the 'id' key is not provided in the dict. + :param items: list of items metadata. + Required keys are 'name' and 'path' if the 'id' key is not provided in the dict. :type items: list of dicts :return: dictionary with succeeded, skipped and failed items lists. @@ -3025,7 +3086,7 @@ def add_items_to_subset( def set_approval_statuses( self, project: NotEmptyStr, - approval_status: Union[ApprovalStatuses, None], + approval_status: Union[APPROVAL_STATUS, None], items: Optional[List[NotEmptyStr]] = None, ): """Sets annotation statuses of items diff --git a/src/superannotate/lib/app/interface/types.py b/src/superannotate/lib/app/interface/types.py index fac2c4346..0f8faf8ef 100644 --- a/src/superannotate/lib/app/interface/types.py +++ b/src/superannotate/lib/app/interface/types.py @@ -1,35 +1,17 @@ -import uuid from functools import wraps -from pathlib import Path -from typing import Optional from typing import Union -from lib.core.enums import AnnotationStatus -from lib.core.enums import ApprovalStatus from lib.core.enums import BaseTitledEnum -from lib.core.enums import ClassTypeEnum -from lib.core.enums import FolderStatus -from lib.core.enums import ProjectStatus -from lib.core.enums import ProjectType -from lib.core.enums import UserRole from lib.core.exceptions import AppException from lib.infrastructure.validators import wrap_error -from pydantic import BaseModel -from pydantic import conlist from pydantic import constr from pydantic import errors -from pydantic import Extra -from pydantic import Field -from pydantic import parse_obj_as -from pydantic import root_validator from pydantic import StrictStr from pydantic import validate_arguments as pydantic_validate_arguments from pydantic import ValidationError from pydantic.errors import PydanticTypeError from pydantic.errors import StrRegexError -NotEmptyStr = constr(strict=True, min_length=1) - class EnumMemberError(PydanticTypeError): code = "enum" @@ -54,174 +36,20 @@ def validate(cls, value: Union[str]) -> Union[str]: regex=r"^(?=.{1,254}$)(?=.{1,64}@)[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)" r"*@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}" r"[a-zA-Z0-9])?)*$" - ).validate(value) + ).validate( # noqa + value + ) except StrRegexError: raise ValueError("Invalid email") return value -class ProjectStatusEnum(StrictStr): - @classmethod - def validate(cls, value: Union[str]) -> Union[str]: - if cls.curtail_length and len(value) > cls.curtail_length: - value = value[: cls.curtail_length] - if value.lower() not in ProjectStatus.values(): - raise TypeError( - f"Available statuses is {', '.join(ProjectStatus.titles())}. " - ) - return value - - -class FolderStatusEnum(StrictStr): - @classmethod - def validate(cls, value: Union[str]) -> Union[str]: - if cls.curtail_length and len(value) > cls.curtail_length: - value = value[: cls.curtail_length] - if value.lower() not in FolderStatus.values(): - raise TypeError( - f"Available statuses is {', '.join(FolderStatus.titles())}. " - ) - return value - - -class AnnotatorRole(StrictStr): - ANNOTATOR_ROLES = (UserRole.ADMIN.name, UserRole.ANNOTATOR.name, UserRole.QA.name) - - @classmethod - def validate(cls, value: Union[str]) -> Union[str]: - if cls.curtail_length and len(value) > cls.curtail_length: - value = value[: cls.curtail_length] - if value.lower() not in [role.lower() for role in cls.ANNOTATOR_ROLES]: - raise TypeError( - f"Invalid user role provided. Please specify one of {', '.join(cls.ANNOTATOR_ROLES)}. " - ) - return value - - -class AnnotationType(StrictStr): - VALID_TYPES = ["bbox", "polygon", "point", "tag"] - - @classmethod - def validate(cls, value: Union[str]) -> Union[str]: - if value.lower() not in cls.VALID_TYPES: - raise TypeError( - f"Available annotation_types are {', '.join(cls.VALID_TYPES)}. " - ) - return value - - -class AttachmentDict(BaseModel): - url: StrictStr - name: Optional[StrictStr] = Field(default_factory=lambda: str(uuid.uuid4())) - - class Config: - extra = Extra.ignore - - def __hash__(self): - return hash(self.name) - - def __eq__(self, other): - return self.url == other.url and self.name.strip() == other.name.strip() - - -AttachmentArgType = Union[NotEmptyStr, Path, conlist(AttachmentDict, min_items=1)] - - -class Setting(BaseModel): - attribute: NotEmptyStr - value: Union[NotEmptyStr, float, int] - - class Config: - extra = Extra.ignore - - -class AttachmentArg(BaseModel): - __root__: AttachmentArgType - - def __getitem__(self, index): - return self.__root__[index] - - @property - def data(self): - return self.__root__ - - @root_validator(pre=True) - def validate_root(cls, values): - try: - parse_obj_as(AttachmentArgType, values["__root__"]) - except ValidationError: - raise ValueError( - "The value must be str, path, or list of dicts with the required 'url' and optional 'name' keys" - ) - return values - - -class ImageQualityChoices(StrictStr): - VALID_CHOICES = ["compressed", "original"] - - @classmethod - def validate(cls, value: Union[str]) -> Union[str]: - super().validate(value) - if value.lower() not in cls.VALID_CHOICES: - raise TypeError( - f"Image quality available choices are {', '.join(cls.VALID_CHOICES)}." - ) - return value.lower() - - -class ProjectTypes(StrictStr): - @classmethod - def validate(cls, value: Union[str]) -> Union[str]: - if value.lower() not in ProjectType.values(): - raise TypeError( - f" Available project types are {', '.join(ProjectType.titles())}. " - ) - return value - - -class ClassType(StrictStr): - @classmethod - def validate(cls, value: Union[str]) -> Union[str]: - enum_values = [e.name.lower() for e in ClassTypeEnum] - if value.lower() not in enum_values: - raise TypeError( - f"Invalid type provided. Please specify one of the {', '.join(enum_values)}. " - ) - return value.lower() - - -class AnnotationStatuses(StrictStr): - @classmethod - def validate(cls, value: Union[str]) -> Union[str]: - if value.lower() not in AnnotationStatus.values(): - raise TypeError( - f"Available an notation_statuses are {', '.join(AnnotationStatus.titles())}. " - ) - return value - - -class ApprovalStatuses(StrictStr): - @classmethod - def validate(cls, value: Union[str]) -> Union[str]: - if value is None: - return value - if value.lower() not in ApprovalStatus.values() or not isinstance(value, str): - raise TypeError( - f"Available approval_status options are {', '.join(map(str, ApprovalStatus.titles()))}." - ) - return value - - @classmethod - def __get_validators__(cls): - yield cls.validate - - def validate_arguments(func): @wraps(func) def wrapped(self, *args, **kwargs): try: return pydantic_validate_arguments(func)(self, *args, **kwargs) except ValidationError as e: - raise AppException(wrap_error(e)) + raise AppException(wrap_error(e)) from e return wrapped diff --git a/src/superannotate/lib/core/entities/project.py b/src/superannotate/lib/core/entities/project.py index 83526898d..97004ad6e 100644 --- a/src/superannotate/lib/core/entities/project.py +++ b/src/superannotate/lib/core/entities/project.py @@ -47,6 +47,9 @@ class AttachmentEntity(BaseModel): class Config: extra = Extra.ignore + def __hash__(self): + return hash(self.name) + class WorkflowEntity(BaseModel): id: Optional[int] diff --git a/src/superannotate/lib/core/enums.py b/src/superannotate/lib/core/enums.py index d3222966b..a98314812 100644 --- a/src/superannotate/lib/core/enums.py +++ b/src/superannotate/lib/core/enums.py @@ -1,3 +1,4 @@ +import typing from enum import Enum from types import DynamicClassAttribute @@ -27,6 +28,9 @@ def choices(cls): def name(self) -> str: return self.__doc__ + def __unicode__(self): + return self.__doc__ + @DynamicClassAttribute def value(self): return super().value @@ -51,8 +55,8 @@ def values(cls): return [enum.__doc__.lower() if enum else None for enum in list(cls)] @classmethod - def titles(cls): - return [enum.__doc__ for enum in list(cls)] + def titles(cls) -> typing.Tuple: + return tuple(enum.__doc__ for enum in list(cls)) def equals(self, other: Enum): return self.__doc__.lower() == other.__doc__.lower() @@ -60,6 +64,9 @@ def equals(self, other: Enum): def __eq__(self, other): return super().__eq__(other) + def __repr__(self): + return self.name + def __hash__(self): return hash(self.name) diff --git a/src/superannotate/lib/core/types.py b/src/superannotate/lib/core/types.py index 0c735ad98..d49f630b0 100644 --- a/src/superannotate/lib/core/types.py +++ b/src/superannotate/lib/core/types.py @@ -25,7 +25,7 @@ class Config: extra = Extra.allow -class PriorityScore(BaseModel): +class PriorityScoreEntity(BaseModel): name: NotEmptyStr priority: float diff --git a/src/superannotate/lib/core/usecases/annotations.py b/src/superannotate/lib/core/usecases/annotations.py index b92fb7c47..d3b09eeee 100644 --- a/src/superannotate/lib/core/usecases/annotations.py +++ b/src/superannotate/lib/core/usecases/annotations.py @@ -39,7 +39,7 @@ from lib.core.response import Response from lib.core.service_types import UploadAnnotationAuthData from lib.core.serviceproviders import BaseServiceProvider -from lib.core.types import PriorityScore +from lib.core.types import PriorityScoreEntity from lib.core.usecases.base import BaseReportableUseCase from lib.core.video_convertor import VideoFrameGenerator from pydantic import BaseModel @@ -585,6 +585,7 @@ def get_existing_name_item_mapping( @property def annotation_upload_data(self) -> UploadAnnotationAuthData: + CHUNK_SIZE = UploadAnnotationsFromFolderUseCase.CHUNK_SIZE_PATHS if self._annotation_upload_data: @@ -598,7 +599,7 @@ def annotation_upload_data(self) -> UploadAnnotationAuthData: item_ids=self._item_ids[i : i + CHUNK_SIZE], ) if not tmp.ok: - raise AppException(tmp.errors) + raise AppException(tmp.error) else: images.update(tmp.data.images) @@ -1195,7 +1196,7 @@ def __init__( reporter, project: ProjectEntity, folder: FolderEntity, - scores: List[PriorityScore], + scores: List[PriorityScoreEntity], project_folder_name: str, service_provider: BaseServiceProvider, ): diff --git a/src/superannotate/lib/core/usecases/base.py b/src/superannotate/lib/core/usecases/base.py index 06f95c907..4592df432 100644 --- a/src/superannotate/lib/core/usecases/base.py +++ b/src/superannotate/lib/core/usecases/base.py @@ -64,11 +64,11 @@ def __init__(self, reporter: Reporter): self.reporter = reporter -class BaseUserBasedUseCase(BaseReportableUseCase, metaclass=ABCMeta): +class BaseUserBasedUseCase(BaseUseCase, metaclass=ABCMeta): """ class contain validation of unique emails """ - def __init__(self, reporter: Reporter, emails: List[str]): - super().__init__(reporter) + def __init__(self, emails: List[str]): + super().__init__() self._emails = emails diff --git a/src/superannotate/lib/core/usecases/classes.py b/src/superannotate/lib/core/usecases/classes.py index 3d21fef93..7738381af 100644 --- a/src/superannotate/lib/core/usecases/classes.py +++ b/src/superannotate/lib/core/usecases/classes.py @@ -7,10 +7,12 @@ from lib.core.entities import ProjectEntity from lib.core.enums import ProjectType from lib.core.exceptions import AppException -from lib.core.reporter import Reporter +from lib.core.reporter import Spinner from lib.core.serviceproviders import BaseServiceProvider -from lib.core.usecases.base import BaseReportableUseCase from lib.core.usecases.base import BaseUseCase +from superannotate.logger import get_default_logger + +logger = get_default_logger() class GetAnnotationClassesUseCase(BaseUseCase): @@ -32,15 +34,14 @@ def execute(self): return self._response -class CreateAnnotationClassUseCase(BaseReportableUseCase): +class CreateAnnotationClassUseCase(BaseUseCase): def __init__( self, - reporter: Reporter, service_provider: BaseServiceProvider, annotation_class: AnnotationClassEntity, project: ProjectEntity, ): - super().__init__(reporter) + super().__init__() self._service_provider = service_provider self._annotation_class = annotation_class self._project = project @@ -63,7 +64,7 @@ def validate_project_type(self): and self._annotation_class.type == "tag" ): raise AppException( - f"Predefined tagging functionality is not supported for projects" + "Predefined tagging functionality is not supported for projects" f" of type {ProjectType.get_name(self._project.type)}." ) @@ -90,21 +91,20 @@ def execute(self): response.error.replace(". ", ".\n") ) else: - self.reporter.log_error("This class name already exists. Skipping.") + logger.error("This class name already exists. Skipping.") return self._response -class CreateAnnotationClassesUseCase(BaseReportableUseCase): +class CreateAnnotationClassesUseCase(BaseUseCase): CHUNK_SIZE = 500 def __init__( self, - reporter: Reporter, service_provider: BaseServiceProvider, annotation_classes: List[AnnotationClassEntity], project: ProjectEntity, ): - super().__init__(reporter) + super().__init__() self._project = project self._service_provider = service_provider self._annotation_classes = annotation_classes @@ -145,12 +145,12 @@ def execute(self): unique_annotation_classes ) if not_unique_classes_count: - self.reporter.log_warning( + logger.warning( f"{not_unique_classes_count} annotation classes already exist.Skipping." ) created = [] chunk_failed = False - with self.reporter.spinner: + with Spinner(): # this is in reverse order because of the front-end for i in range(len(unique_annotation_classes), 0, -self.CHUNK_SIZE): response = ( @@ -166,7 +166,7 @@ def execute(self): else: chunk_failed = True if created: - self.reporter.log_info( + logger.info( f"{len(created)} annotation classes were successfully created in {self._project.name}." ) if chunk_failed: @@ -177,21 +177,20 @@ def execute(self): return self._response -class DownloadAnnotationClassesUseCase(BaseReportableUseCase): +class DownloadAnnotationClassesUseCase(BaseUseCase): def __init__( self, - reporter: Reporter, download_path: str, project: ProjectEntity, service_provider: BaseServiceProvider, ): - super().__init__(reporter) + super().__init__() self._download_path = download_path self._project = project self._service_provider = service_provider def execute(self): - self.reporter.log_info( + logger.info( f"Downloading classes.json from project {self._project.name} to folder {str(self._download_path)}." ) response = self._service_provider.annotation_classes.list( diff --git a/src/superannotate/lib/core/usecases/items.py b/src/superannotate/lib/core/usecases/items.py index a62ffc1c5..ebb8ca514 100644 --- a/src/superannotate/lib/core/usecases/items.py +++ b/src/superannotate/lib/core/usecases/items.py @@ -1,15 +1,15 @@ import copy import traceback from collections import defaultdict -from concurrent.futures import ThreadPoolExecutor from concurrent.futures import as_completed +from concurrent.futures import ThreadPoolExecutor from typing import Dict from typing import List from typing import Optional import superannotate.lib.core as constants -from lib.core.conditions import CONDITION_EQ as EQ from lib.core.conditions import Condition +from lib.core.conditions import CONDITION_EQ as EQ from lib.core.entities import AttachmentEntity from lib.core.entities import BaseItemEntity from lib.core.entities import DocumentEntity @@ -43,7 +43,7 @@ def __init__(self, item_id, project, service_provider): super().__init__() def execute( - self, + self, ): try: @@ -65,13 +65,13 @@ def execute( class GetItem(BaseReportableUseCase): def __init__( - self, - reporter: Reporter, - project: ProjectEntity, - folder: FolderEntity, - service_provider: BaseServiceProvider, - item_name: str, - include_custom_metadata: bool, + self, + reporter: Reporter, + project: ProjectEntity, + folder: FolderEntity, + service_provider: BaseServiceProvider, + item_name: str, + include_custom_metadata: bool, ): super().__init__(reporter) self._project = project @@ -82,8 +82,8 @@ def __init__( def validate_project_type(self): if ( - self._project.type == constants.ProjectType.PIXEL.value - and self._include_custom_metadata + self._project.type == constants.ProjectType.PIXEL.value + and self._include_custom_metadata ): raise AppException(constants.METADATA_DEPRICATED_FOR_PIXEL) @@ -109,10 +109,10 @@ def serialize_entity(entity: BaseItemEntity, project: ProjectEntity): def execute(self) -> Response: if self.is_valid(): condition = ( - Condition("name", self._item_name, EQ) - & Condition("project_id", self._project.id, EQ) - & Condition("folder_id", self._folder.id, EQ) - & Condition("includeCustomMetadata", self._include_custom_metadata, EQ) + Condition("name", self._item_name, EQ) + & Condition("project_id", self._project.id, EQ) + & Condition("folder_id", self._folder.id, EQ) + & Condition("includeCustomMetadata", self._include_custom_metadata, EQ) ) response = self._service_provider.items.list(condition) if not response.ok: @@ -130,13 +130,13 @@ def execute(self) -> Response: class QueryEntitiesUseCase(BaseReportableUseCase): def __init__( - self, - reporter: Reporter, - project: ProjectEntity, - folder: FolderEntity, - service_provider: BaseServiceProvider, - query: str, - subset: str = None, + self, + reporter: Reporter, + project: ProjectEntity, + folder: FolderEntity, + service_provider: BaseServiceProvider, + query: str, + subset: str = None, ): super().__init__(reporter) self._project = project @@ -218,13 +218,13 @@ def execute(self) -> Response: class ListItems(BaseUseCase): def __init__( - self, - project: ProjectEntity, - folder: FolderEntity, - service_provider: BaseServiceProvider, - search_condition: Condition, - recursive: bool = False, - include_custom_metadata: bool = False, + self, + project: ProjectEntity, + folder: FolderEntity, + service_provider: BaseServiceProvider, + search_condition: Condition, + recursive: bool = False, + include_custom_metadata: bool = False, ): super().__init__() self._project = project @@ -240,8 +240,8 @@ def validate_recursive_case(self): def validate_project_type(self): if ( - self._project.type == constants.ProjectType.PIXEL.value - and self._include_custom_metadata + self._project.type == constants.ProjectType.PIXEL.value + and self._include_custom_metadata ): raise AppException(constants.METADATA_DEPRICATED_FOR_PIXEL) @@ -288,12 +288,12 @@ class AssignItemsUseCase(BaseUseCase): CHUNK_SIZE = 500 def __init__( - self, - service_provider: BaseServiceProvider, - project: ProjectEntity, - folder: FolderEntity, - item_names: list, - user: str, + self, + service_provider: BaseServiceProvider, + project: ProjectEntity, + folder: FolderEntity, + item_names: list, + user: str, ): super().__init__() self._project = project @@ -303,7 +303,7 @@ def __init__( self._service_provider = service_provider def validate_item_names( - self, + self, ): self._item_names = list(set(self._item_names)) @@ -316,7 +316,7 @@ def execute(self): project=self._project, folder=self._folder, user=self._user, - item_names=self._item_names[i: i + self.CHUNK_SIZE], # noqa: E203 + item_names=self._item_names[i : i + self.CHUNK_SIZE], # noqa: E203 ) if not response.ok and response.error: # User not found self._response.errors += response.error @@ -333,11 +333,11 @@ class UnAssignItemsUseCase(BaseUseCase): CHUNK_SIZE = 500 def __init__( - self, - service_provider: BaseServiceProvider, - project: ProjectEntity, - folder: FolderEntity, - item_names: list, + self, + service_provider: BaseServiceProvider, + project: ProjectEntity, + folder: FolderEntity, + item_names: list, ): super().__init__() self._project = project @@ -351,7 +351,7 @@ def execute(self): response = self._service_provider.projects.un_assign_items( project=self._project, folder=self._folder, - item_names=self._item_names[i: i + self.CHUNK_SIZE], # noqa: E203 + item_names=self._item_names[i : i + self.CHUNK_SIZE], # noqa: E203 ) if not response.ok: self._response.errors = AppException( @@ -365,14 +365,14 @@ class AttachItems(BaseReportableUseCase): CHUNK_SIZE = 500 def __init__( - self, - reporter: Reporter, - project: ProjectEntity, - folder: FolderEntity, - attachments: List[AttachmentEntity], - annotation_status: str, - service_provider: BaseServiceProvider, - upload_state_code: int = constants.UploadState.EXTERNAL.value, + self, + reporter: Reporter, + project: ProjectEntity, + folder: FolderEntity, + attachments: List[AttachmentEntity], + annotation_status: str, + service_provider: BaseServiceProvider, + upload_state_code: int = constants.UploadState.EXTERNAL.value, ): super().__init__(reporter) self._project = project @@ -403,8 +403,8 @@ def validate_limitations(self): elif attachments_count > response.data.project_limit.remaining_image_count: raise AppValidationException(constants.ATTACH_PROJECT_LIMIT_ERROR_MESSAGE) elif ( - response.data.user_limit - and attachments_count > response.data.user_limit.remaining_image_count + response.data.user_limit + and attachments_count > response.data.user_limit.remaining_image_count ): raise AppValidationException(constants.ATTACH_USER_LIMIT_ERROR_MESSAGE) @@ -422,7 +422,7 @@ def execute(self) -> Response: attached = [] self.reporter.start_progress(self.attachments_count, "Attaching URLs") for i in range(0, self.attachments_count, self.CHUNK_SIZE): - attachments = self._attachments[i: i + self.CHUNK_SIZE] # noqa: E203 + attachments = self._attachments[i : i + self.CHUNK_SIZE] # noqa: E203 response = self._service_provider.items.list_by_names( project=self._project, folder=self._folder, @@ -468,14 +468,14 @@ class CopyItems(BaseReportableUseCase): CHUNK_SIZE = 500 def __init__( - self, - reporter: Reporter, - project: ProjectEntity, - from_folder: FolderEntity, - to_folder: FolderEntity, - item_names: List[str], - service_provider: BaseServiceProvider, - include_annotations: bool, + self, + reporter: Reporter, + project: ProjectEntity, + from_folder: FolderEntity, + to_folder: FolderEntity, + item_names: List[str], + service_provider: BaseServiceProvider, + include_annotations: bool, ): super().__init__(reporter) self._project = project @@ -518,7 +518,7 @@ def execute(self): cand_items = self._service_provider.items.list_by_names( project=self._project, folder=self._to_folder, - names=items[i: i + self.CHUNK_SIZE], # noqa + names=items[i : i + self.CHUNK_SIZE], # noqa ).data if isinstance(cand_items, dict): continue @@ -533,7 +533,7 @@ def execute(self): return self._response if items_to_copy: for i in range(0, len(items_to_copy), self.CHUNK_SIZE): - chunk_to_copy = items_to_copy[i: i + self.CHUNK_SIZE] # noqa: E203 + chunk_to_copy = items_to_copy[i : i + self.CHUNK_SIZE] # noqa: E203 response = self._service_provider.items.copy_multiple( project=self._project, from_folder=self._from_folder, @@ -558,7 +558,7 @@ def execute(self): cand_items = self._service_provider.items.list_by_names( project=self._project, folder=self._to_folder, - names=items[i: i + self.CHUNK_SIZE], # noqa + names=items[i : i + self.CHUNK_SIZE], # noqa ) if isinstance(cand_items, dict): continue @@ -583,13 +583,13 @@ class MoveItems(BaseReportableUseCase): CHUNK_SIZE = 1000 def __init__( - self, - reporter: Reporter, - project: ProjectEntity, - from_folder: FolderEntity, - to_folder: FolderEntity, - item_names: List[str], - service_provider: BaseServiceProvider, + self, + reporter: Reporter, + project: ProjectEntity, + from_folder: FolderEntity, + to_folder: FolderEntity, + item_names: List[str], + service_provider: BaseServiceProvider, ): super().__init__(reporter) self._project = project @@ -637,7 +637,7 @@ def execute(self): project=self._project, from_folder=self._from_folder, to_folder=self._to_folder, - item_names=items[i: i + self.CHUNK_SIZE], # noqa: E203 + item_names=items[i : i + self.CHUNK_SIZE], # noqa: E203 ) if response.ok and response.data.get("done"): moved_images.extend(response.data["done"]) @@ -657,13 +657,13 @@ class SetAnnotationStatues(BaseReportableUseCase): ERROR_MESSAGE = "Failed to change status" def __init__( - self, - reporter: Reporter, - project: ProjectEntity, - folder: FolderEntity, - annotation_status: str, - service_provider: BaseServiceProvider, - item_names: List[str] = None, + self, + reporter: Reporter, + project: ProjectEntity, + folder: FolderEntity, + annotation_status: str, + service_provider: BaseServiceProvider, + item_names: List[str] = None, ): super().__init__(reporter) self._project = project @@ -685,7 +685,7 @@ def validate_items(self): return existing_items = [] for i in range(0, len(self._item_names), self.CHUNK_SIZE): - search_names = self._item_names[i: i + self.CHUNK_SIZE] # noqa + search_names = self._item_names[i : i + self.CHUNK_SIZE] # noqa response = self._service_provider.items.list_by_names( project=self._project, folder=self._folder, @@ -709,7 +709,7 @@ def execute(self): status_changed = self._service_provider.items.set_statuses( project=self._project, folder=self._folder, - item_names=self._item_names[i: i + self.CHUNK_SIZE], # noqa: E203, + item_names=self._item_names[i : i + self.CHUNK_SIZE], # noqa: E203, annotation_status=self._annotation_status_code, ) if not status_changed: @@ -723,13 +723,13 @@ class SetApprovalStatues(BaseReportableUseCase): ERROR_MESSAGE = "Failed to change approval status." def __init__( - self, - reporter: Reporter, - project: ProjectEntity, - folder: FolderEntity, - approval_status: str, - service_provider: BaseServiceProvider, - item_names: List[str] = None, + self, + reporter: Reporter, + project: ProjectEntity, + folder: FolderEntity, + approval_status: str, + service_provider: BaseServiceProvider, + item_names: List[str] = None, ): super().__init__(reporter) self._project = project @@ -757,7 +757,7 @@ def validate_items(self): self._item_names = list(_tmp) existing_items = [] for i in range(0, len(self._item_names), self.CHUNK_SIZE): - search_names = self._item_names[i: i + self.CHUNK_SIZE] # noqa + search_names = self._item_names[i : i + self.CHUNK_SIZE] # noqa response = self._service_provider.items.list_by_names( project=self._project, folder=self._folder, @@ -781,13 +781,15 @@ def execute(self): response = self._service_provider.items.set_approval_statuses( project=self._project, folder=self._folder, - item_names=self._item_names[i: i + self.CHUNK_SIZE], # noqa: E203, + item_names=self._item_names[i : i + self.CHUNK_SIZE], # noqa: E203, approval_status=self._approval_status_code, ) if not response.ok: - if response.error == 'Unsupported project type.': - self._response.errors = f"The function is not supported for" \ - f" {constants.ProjectType.get_name(self._project.type)} projects." + if response.error == "Unsupported project type.": + self._response.errors = ( + f"The function is not supported for" + f" {constants.ProjectType.get_name(self._project.type)} projects." + ) else: self._response.errors = self.ERROR_MESSAGE return self._response @@ -803,11 +805,11 @@ class DeleteItemsUseCase(BaseUseCase): CHUNK_SIZE = 1000 def __init__( - self, - project: ProjectEntity, - folder: FolderEntity, - service_provider: BaseServiceProvider, - item_names: List[str] = None, + self, + project: ProjectEntity, + folder: FolderEntity, + service_provider: BaseServiceProvider, + item_names: List[str] = None, ): super().__init__() self._project = project @@ -838,7 +840,7 @@ def execute(self): for i in range(0, len(item_ids), self.CHUNK_SIZE): self._service_provider.items.delete_multiple( project=self._project, - item_ids=item_ids[i: i + self.CHUNK_SIZE], # noqa: E203 + item_ids=item_ids[i : i + self.CHUNK_SIZE], # noqa: E203 ) logger.info( f"Items deleted in project {self._project.name}{'/' + self._folder.name if not self._folder.is_root else ''}" @@ -851,13 +853,13 @@ class AddItemsToSubsetUseCase(BaseUseCase): CHUNK_SIZE = 5000 def __init__( - self, - reporter: Reporter, - project: ProjectEntity, - subset_name: str, - items: List[dict], - service_provider: BaseServiceProvider, - root_folder: FolderEntity, + self, + reporter: Reporter, + project: ProjectEntity, + subset_name: str, + items: List[dict], + service_provider: BaseServiceProvider, + root_folder: FolderEntity, ): self.reporter = reporter self.project = project @@ -871,7 +873,7 @@ def __init__( super().__init__() def __filter_duplicates( - self, + self, ): def uniqueQ(item, seen): result = True @@ -893,7 +895,7 @@ def uniqueQ(item, seen): return uniques def __filter_invalid_items( - self, + self, ): def validQ(item): if "id" in item: @@ -908,7 +910,7 @@ def validQ(item): return filtered_items def __separate_to_paths( - self, + self, ): for item in self.items: if "id" in item: @@ -1021,7 +1023,7 @@ def __distribute_to_results(self, item_id, response, item): self.results["failed"].append(item) def validate_items( - self, + self, ): filtered_items = self.__filter_duplicates() @@ -1039,7 +1041,7 @@ def validate_project(self): raise AppException(response.error) def execute( - self, + self, ): if self.is_valid(): @@ -1078,7 +1080,7 @@ def execute( for i in range(0, len(self.item_ids), self.CHUNK_SIZE): tmp_response = self._service_provider.subsets.add_items( project=self.project, - item_ids=self.item_ids[i: i + self.CHUNK_SIZE], # noqa + item_ids=self.item_ids[i : i + self.CHUNK_SIZE], # noqa subset=subset, ) diff --git a/src/superannotate/lib/core/usecases/projects.py b/src/superannotate/lib/core/usecases/projects.py index 3166156e2..1666367bd 100644 --- a/src/superannotate/lib/core/usecases/projects.py +++ b/src/superannotate/lib/core/usecases/projects.py @@ -13,10 +13,8 @@ from lib.core.entities import TeamEntity from lib.core.exceptions import AppException from lib.core.exceptions import AppValidationException -from lib.core.reporter import Reporter from lib.core.response import Response from lib.core.serviceproviders import BaseServiceProvider -from lib.core.usecases.base import BaseReportableUseCase from lib.core.usecases.base import BaseUseCase from lib.core.usecases.base import BaseUserBasedUseCase from requests.exceptions import RequestException @@ -65,7 +63,7 @@ def execute(self): if response.ok: self._response.data = response.data else: - self._response.errors = response.errors + self._response.errors = response.error return self._response @@ -102,10 +100,9 @@ def execute(self): return self._response -class GetProjectMetaDataUseCase(BaseReportableUseCase): +class GetProjectMetaDataUseCase(BaseUseCase): def __init__( self, - reporter: Reporter, project: ProjectEntity, service_provider: BaseServiceProvider, include_annotation_classes: bool, @@ -114,7 +111,7 @@ def __init__( include_contributors: bool, include_complete_image_count: bool, ): - super().__init__(reporter) + super().__init__() self._project = project self._service_provider = service_provider @@ -191,7 +188,8 @@ def validate_settings(self): raise AppValidationException( "FrameRate is available only for Video projects" ) - if isinstance(setting.value, (float, int)): + try: + setting.value = float(setting.value) if ( not (0.0001 < setting.value < 120) or decimal.Decimal(str(setting.value)).as_tuple().exponent < -3 @@ -211,7 +209,7 @@ def validate_settings(self): ) else: frame_mode.value = 1 - else: + except ValueError: raise AppValidationException("The FrameRate value should be float") def validate_project_name(self): @@ -256,6 +254,9 @@ def execute(self): if instructions_link: entity.instructions_link = instructions_link self._service_provider.projects.update(entity) + if not entity: + self._response.errors = AppException("Failed to create project.") + return self._response self._response.data = entity data = {} # TODO delete @@ -297,10 +298,8 @@ def execute(self): self._response.errors = set_workflow_response.errors logger.info( - "Created project %s (ID %s) with type %s", - self._response.data.name, - self._response.data.id, - constances.ProjectType.get_name(self._response.data.type), + f"Created project {entity.name} (ID {entity.id}) " + f"with type {constances.ProjectType.get_name(self._response.data.type)}" ) return self._response @@ -376,7 +375,7 @@ def execute(self): if self.is_valid(): response = self._service_provider.projects.update(self._project) if not response.ok: - self._response.errors = response.errors + self._response.errors = response.error else: self._response.data = response.data return self._response @@ -927,14 +926,13 @@ class AddContributorsToProject(BaseUserBasedUseCase): def __init__( self, - reporter: Reporter, team: TeamEntity, project: ProjectEntity, emails: list, role: str, service_provider: BaseServiceProvider, ): - super().__init__(reporter, emails) + super().__init__(emails) self._team = team self._project = project self._role = role @@ -973,7 +971,7 @@ def execute(self): to_skip = list(set(self._emails).difference(to_add)) if to_skip: - self.reporter.log_warning( + logger.warning( f"Skipped {len(to_skip)}/{len(self._emails)} " "contributors that are out of the team scope or already have access to the project." ) @@ -986,7 +984,7 @@ def execute(self): ], ) if response and not response.data.get("invalidUsers"): - self.reporter.log_info( + logger.info( f"Added {len(to_add)}/{len(self._emails)} " f"contributors to the project {self._project.name} with the {self._role} role." ) @@ -1001,13 +999,12 @@ class InviteContributorsToTeam(BaseUserBasedUseCase): def __init__( self, - reporter: Reporter, team: TeamEntity, emails: list, set_admin: bool, service_provider: BaseServiceProvider, ): - super().__init__(reporter, emails) + super().__init__(emails) self._team = team self._set_admin = set_admin self._service_provider = service_provider @@ -1026,7 +1023,7 @@ def execute(self): to_add = list(emails.difference(to_skip)) invited, failed = [], to_skip if to_skip: - self.reporter.log_warning( + logger.warning( f"Found {len(to_skip)}/{len(self._emails)} existing members of the team." ) if to_add: @@ -1043,14 +1040,14 @@ def execute(self): response.data["failed"]["emails"], ) if invited: - self.reporter.log_info( + logger.info( f"Sent team {'admin' if self._set_admin else 'contributor'} invitations" f" to {len(invited)}/{len(self._emails)} users." ) if failed: to_skip = set(to_skip) to_skip.update(set(failed)) - self.reporter.log_info( + logger.info( f"Skipped team {'admin' if self._set_admin else 'contributor'} " f"invitations for {len(failed)}/{len(self._emails)} users." ) @@ -1058,14 +1055,13 @@ def execute(self): return self._response -class ListSubsetsUseCase(BaseReportableUseCase): +class ListSubsetsUseCase(BaseUseCase): def __init__( self, - reporter: Reporter, project: ProjectEntity, service_provider: BaseServiceProvider, ): - super().__init__(reporter) + super().__init__() self._project = project self._service_provider = service_provider diff --git a/src/superannotate/lib/infrastructure/controller.py b/src/superannotate/lib/infrastructure/controller.py index e02edfaca..2332f8183 100644 --- a/src/superannotate/lib/infrastructure/controller.py +++ b/src/superannotate/lib/infrastructure/controller.py @@ -73,7 +73,6 @@ def get_metadata( include_complete_image_count: bool = False, ): use_case = usecases.GetProjectMetaDataUseCase( - reporter=Reporter(), project=project, service_provider=self.service_provider, include_annotation_classes=include_annotation_classes, @@ -160,7 +159,6 @@ def set_workflows(self, project: ProjectEntity, steps: List): def add_contributors(self, project: ProjectEntity, team, emails: list, role: str): project = self.get_metadata(project).data use_case = usecases.AddContributorsToProject( - reporter=Reporter(), team=team, project=project, emails=emails, @@ -233,7 +231,6 @@ def _get_s3_repository(self, project: ProjectEntity, folder: FolderEntity): def create(self, project: ProjectEntity, annotation_class: AnnotationClassEntity): use_case = usecases.CreateAnnotationClassUseCase( - reporter=Reporter(), annotation_class=annotation_class, project=project, service_provider=self.service_provider, @@ -244,7 +241,6 @@ def create_multiple( self, project: ProjectEntity, annotation_classes: List[AnnotationClassEntity] ): use_case = usecases.CreateAnnotationClassesUseCase( - reporter=Reporter(), service_provider=self.service_provider, annotation_classes=annotation_classes, project=project, @@ -293,7 +289,6 @@ def copy_multiple( def download(self, project: ProjectEntity, download_path: str): use_case = usecases.DownloadAnnotationClassesUseCase( project=project, - reporter=Reporter(), download_path=download_path, service_provider=self.service_provider, ) @@ -375,7 +370,7 @@ def get_by_name( def get_by_id(self, item_id: int, project: ProjectEntity): use_case = usecases.GetItemByIDUseCase( item_id=item_id, - project=project.data, + project=project, service_provider=self.service_provider, ) return use_case.execute() @@ -763,7 +758,6 @@ def attach_items( class SubsetManager(BaseManager): def list(self, project: ProjectEntity): use_case = usecases.ListSubsetsUseCase( - reporter=Reporter(), project=project, service_provider=self.service_provider, ) @@ -1224,7 +1218,6 @@ def validate_annotations(self, project_type: str, annotation: dict): def invite_contributors_to_team(self, emails: list, set_admin: bool): use_case = usecases.InviteContributorsToTeam( - reporter=self.get_default_reporter(), team=self.team, emails=emails, set_admin=set_admin, diff --git a/src/superannotate/lib/infrastructure/validators.py b/src/superannotate/lib/infrastructure/validators.py index 33d48d63d..55c2faa08 100644 --- a/src/superannotate/lib/infrastructure/validators.py +++ b/src/superannotate/lib/infrastructure/validators.py @@ -1,7 +1,64 @@ import os +import typing from collections import defaultdict from pydantic import ValidationError +from pydantic import validators +from pydantic.errors import WrongConstantError + + +def wrong_constant_error(self): + permitted = ", ".join(repr(v) for v in self.permitted) # type: ignore + return f"Available values are {permitted}." + + +WrongConstantError.__str__ = wrong_constant_error + + +def make_literal_validator( + type_: typing.Any, +) -> typing.Callable[[typing.Any], typing.Any]: + """ + Adding ability to input literal in the lower case. + """ + permitted_choices = validators.all_literal_values(type_) + allowed_choices = {v.lower(): v for v in permitted_choices} + + def literal_validator(v: typing.Any) -> typing.Any: + try: + return allowed_choices[v.lower()] + except KeyError: + raise WrongConstantError(given=v, permitted=permitted_choices) + + return literal_validator + + +def make_typeddict_validator( + typeddict_cls: typing.Type["TypedDict"], config: typing.Type["BaseConfig"] # type: ignore[valid-type] +) -> typing.Callable[[typing.Any], typing.Dict[str, typing.Any]]: + """ + Wrapping to ignore extra keys + """ + from pydantic.annotated_types import create_model_from_typeddict + from pydantic import Extra + + config.extra = Extra.ignore + + TypedDictModel = create_model_from_typeddict( + typeddict_cls, + __config__=config, + __module__=typeddict_cls.__module__, + ) + typeddict_cls.__pydantic_model__ = TypedDictModel # type: ignore[attr-defined] + + def typeddict_validator(values: "TypedDict") -> typing.Dict[str, typing.Any]: # type: ignore[valid-type] + return TypedDictModel.parse_obj(values).dict(exclude_unset=True) + + return typeddict_validator + + +validators.make_literal_validator = make_literal_validator +validators.make_typeddict_validator = make_typeddict_validator def get_tabulation() -> int: diff --git a/tests/convertors/__init__.py b/tests/convertors/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/convertors/test_coco.py b/tests/convertors/test_coco.py deleted file mode 100644 index 3a4057ecc..000000000 --- a/tests/convertors/test_coco.py +++ /dev/null @@ -1,112 +0,0 @@ -# TODO refactor -# from pathlib import Path -# -# import superannotate as sa -# -# from ..common import upload_project -# -# -# def test_coco_vector_instance(tmpdir): -# project_name = "coco2sa_vector_instance" -# -# input_dir = Path( -# "tests" -# ) / "converter_test" / "COCO" / "input" / "toSuperAnnotate" / "instance_segmentation" -# out_dir = Path(tmpdir) / project_name -# sa.import_annotation( -# input_dir, out_dir, "COCO", "instances_test", "Vector", -# "instance_segmentation" -# ) -# -# description = 'coco vector instance segmentation' -# ptype = 'Vector' -# upload_project(out_dir, project_name, description, ptype) -# -# -# def test_coco_vector_object(tmpdir): -# project_name = "coco2sa_vector_object" -# -# input_dir = Path( -# "tests" -# ) / "converter_test" / "COCO" / "input" / "toSuperAnnotate" / "instance_segmentation" -# out_dir = Path(tmpdir) / project_name -# sa.import_annotation( -# input_dir, out_dir, "COCO", "instances_test", "Vector", -# "object_detection" -# ) -# -# description = 'coco vector object detection' -# ptype = 'Vector' -# upload_project(out_dir, project_name, description, ptype) -# -# -# def test_coco_vector_keypoint(tmpdir): -# project_name = "coco2sa_keypoint" -# -# input_dir = Path( -# "tests" -# ) / "converter_test" / "COCO" / "input" / "toSuperAnnotate" / "keypoint_detection/" -# out_dir = Path(tmpdir) / project_name -# sa.import_annotation( -# input_dir, out_dir, "COCO", "person_keypoints_test", "Vector", -# "keypoint_detection" -# ) -# -# description = 'coco vector keypoint detection' -# ptype = 'Vector' -# upload_project(out_dir, project_name, description, ptype) -# -# -# def test_coco_panoptic(tmpdir): -# project_name = "coco2sa_panoptic" -# -# input_dir = Path( -# "tests" -# ) / "converter_test" / "COCO" / "input" / "toSuperAnnotate" / "panoptic_segmentation" -# out_dir = Path(tmpdir) / project_name -# sa.import_annotation( -# input_dir, out_dir, "COCO", "panoptic_test", "Pixel", -# "panoptic_segmentation" -# ) -# -# description = 'coco pixel panoptic segmentation' -# ptype = 'Pixel' -# upload_project(out_dir, project_name, description, ptype) -# -# -# def test_coco_pixel_instance(tmpdir): -# project_name = "coco2sa_pixel_instance" -# -# input_dir = Path( -# "tests" -# ) / "converter_test" / "COCO" / "input" / "toSuperAnnotate" / "instance_segmentation" -# out_dir = Path(tmpdir) / project_name -# sa.import_annotation( -# input_dir, out_dir, "COCO", "instances_test", "Pixel", -# "instance_segmentation" -# ) -# -# description = 'coco pixel instance segmentation' -# ptype = 'Pixel' -# upload_project(out_dir, project_name, description, ptype) -# -# -# def test_sa_to_coco_to_sa(tmpdir): -# input_dir = Path("tests") / "sample_project_pixel" -# output1 = Path(tmpdir) / 'to_coco' -# output2 = Path(tmpdir) / 'to_sa' -# -# sa.export_annotation( -# input_dir, output1, "COCO", "object_test", "Pixel", -# "instance_segmentation" -# ) -# -# sa.import_annotation( -# output1, output2, "COCO", "object_test", "Pixel", -# "instance_segmentation", 'image_set' -# ) -# -# project_name = 'coco_pipeline_new' -# description = 'test_instane' -# ptype = 'Pixel' -# upload_project(output2, project_name, description, ptype) diff --git a/tests/convertors/test_consensus.py b/tests/convertors/test_consensus.py deleted file mode 100644 index 52244e062..000000000 --- a/tests/convertors/test_consensus.py +++ /dev/null @@ -1,102 +0,0 @@ -import os -import tempfile -import time -from os.path import dirname - -from src.superannotate import SAClient -sa = SAClient() -from tests.integration.base import BaseTestCase - - -class TestConsensus(BaseTestCase): - PROJECT_NAME = "consensus" - PROJECT_DESCRIPTION = "Desc" - PROJECT_TYPE = "Vector" - NEW_PROJECT_NAME = "new" - TEST_FOLDER_PTH = "data_set" - TEST_FOLDER_PATH = "data_set/sample_project_vector" - MODEL_NAME = "Instance segmentation (trained on COCO)" - TEST_EXPORT_ROOT = "data_set/consensus_benchmark/consensus_test_data" - CONSENSUS_PREFIX = "consensus_" - - @property - def folder_path(self): - return os.path.join(dirname(dirname(__file__)), self.TEST_FOLDER_PATH) - - @property - def export_path(self): - return os.path.join(dirname(dirname(__file__)), self.TEST_EXPORT_ROOT) - - def test_consensus(self): - annot_types = ["polygon", "bbox", "point"] - folder_names = ["consensus_1", "consensus_2", "consensus_3"] - df_column_names = [ - "creatorEmail", - "imageName", - "instanceId", - "area", - "className", - "attributes", - "folderName", - "score", - ] - - with tempfile.TemporaryDirectory() as tmpdir: - temp_export_path = str(tmpdir) - - for i in range(1, 4): - sa.create_folder(self.PROJECT_NAME, "consensus_" + str(i)) - sa.create_annotation_classes_from_classes_json( - self.PROJECT_NAME, self.export_path + "/classes/classes.json" - ) - sa.upload_images_from_folder_to_project( - self.PROJECT_NAME, - self.export_path + "/images", - annotation_status="Completed", - ) - for i in range(1, 4): - sa.upload_images_from_folder_to_project( - self.PROJECT_NAME + f"/{self.CONSENSUS_PREFIX}" + str(i), - self.export_path + "/images", - annotation_status="Completed", - ) - time.sleep(2) - sa.upload_annotations_from_folder_to_project( - self.PROJECT_NAME, self.export_path - ) - for i in range(1, 4): - sa.upload_annotations_from_folder_to_project( - self.PROJECT_NAME + f"/{self.CONSENSUS_PREFIX}" + str(i), - self.export_path + f"/{self.CONSENSUS_PREFIX}" + str(i), - ) - - for annot_type in annot_types: - res_df = sa.consensus( - self.PROJECT_NAME, folder_names, annot_type=annot_type - ) - # test content of projectName column - assert sorted(res_df["folderName"].unique()) == folder_names - - # test structure of resulting DataFrame - assert sorted(res_df.columns) == sorted(df_column_names) - - # test lower bound of the score - assert (res_df["score"] >= 0).all() - - # test upper bound of the score - assert (res_df["score"] <= 1).all() - - image_names = [ - "bonn_000000_000019_leftImg8bit.png", - "bielefeld_000000_000321_leftImg8bit.png", - ] - - # test filtering images with given image names list - res_images = sa.consensus( - self.PROJECT_NAME, - folder_names, - export_root=temp_export_path, - image_list=image_names, - ) - - assert sorted(res_images["imageName"].unique()) == sorted(image_names) diff --git a/tests/convertors/test_conversion.py b/tests/convertors/test_conversion.py deleted file mode 100644 index 390420945..000000000 --- a/tests/convertors/test_conversion.py +++ /dev/null @@ -1,264 +0,0 @@ -import json -import os -import tempfile -from os.path import dirname -from pathlib import Path -from unittest import TestCase -import pytest - -from src.superannotate import SAClient -sa = SAClient() - - -class TestCocoSplit(TestCase): - TEST_FOLDER_PATH = "data_set/converter_test/COCO/input/toSuperAnnotate" - TEST_BASE_FOLDER_PATH = "data_set/converter_test" - - @property - def base_folder_path(self): - return Path( - Path(os.path.join(dirname(dirname(__file__)), self.TEST_BASE_FOLDER_PATH)) - ) - - @property - def folder_path(self): - return Path( - Path(os.path.join(dirname(dirname(__file__)), self.TEST_FOLDER_PATH)) - ) - - def test_panoptic_segmentation_coco2sa(self): - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = self.folder_path / "panoptic_segmentation" - out_path = Path(tmp_dir) / "toSuperAnnotate" / "panoptic_test" - sa.import_annotation( - input_dir, - out_path, - "COCO", - "panoptic_test", - "Pixel", - "panoptic_segmentation", - ) - - def test_keypoint_detection_coco2sa(self): - """ - test keypoint-detection - """ - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = self.folder_path / "keypoint_detection" - out_path = Path(tmp_dir) / "toSuperAnnotate" / "keypoint_test" - sa.import_annotation( - input_dir, - out_path, - "COCO", - "person_keypoints_test", - "Vector", - "keypoint_detection", - ) - - def test_keypoint_detection_coco2sa_multi_template(self): - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = self.folder_path / "keypoint_detection_multi_template" - out_path = ( - Path(tmp_dir) / "toSuperAnnotate" / "keypoint_detection_multi_template" - ) - - sa.import_annotation( - input_dir, - out_path, - "COCO", - "keypoint_multi_template_test", - "Vector", - "keypoint_detection", - ) - import json - - with open(str(Path(input_dir) / "truth.json")) as f: - truth = json.loads(f.read()) - - with open( - str( - Path(out_path) - / "68307_47130_68308_47130_68307_47131_68308_47131_0.png___objects.json" - ) - ) as f: - data = json.loads(f.read()) - self.assertEqual(data, truth) - - def test_instance_segmentation_coco2sa(self): - """ - test instance segmentation - """ - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = self.folder_path / "instance_segmentation" - out_path = Path(tmp_dir) / "toSuperAnnotate" / "instances_test" - sa.import_annotation( - input_dir, - out_path, - "COCO", - "instances_test", - "Vector", - "instance_segmentation", - ) - - def test_pan_optic_segmentation_sa2coco(self): - """ - # SA to COCO - # test panoptic segmentation - """ - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = ( - self.base_folder_path - / "COCO" - / "input" - / "fromSuperAnnotate" - / "cats_dogs_panoptic_segm" - ) - out_path = Path(tmp_dir) / "fromSuperAnnotate" / "panoptic_test" - sa.export_annotation( - input_dir, - out_path, - "COCO", - "panoptic_test", - "Pixel", - "panoptic_segmentation", - ) - - def test_keypoint_detection_sa2coco(self): - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = ( - self.base_folder_path - / "COCO" - / "input" - / "fromSuperAnnotate" - / "cats_dogs_vector_keypoint_det" - ) - out_path = Path(tmp_dir) / "fromSuperAnnotate" / "keypoint_test_vector" - sa.export_annotation( - input_dir, - out_path, - "COCO", - "keypoint_test_vector", - "Vector", - "keypoint_detection", - ) - - def test_instance_segmentation_sa2coco_pixel(self): - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = ( - self.base_folder_path - / "COCO" - / "input" - / "fromSuperAnnotate" - / "cats_dogs_pixel_instance_segm" - ) - out_path = Path(tmp_dir) / "fromSuperAnnotate" / "instance_test_pixel" - sa.export_annotation( - input_dir, - out_path, - "COCO", - "instance_test_pixel", - "Pixel", - "instance_segmentation", - ) - - def test_instance_segmentation_sa2coco_vector(self): - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = ( - self.base_folder_path - / "COCO" - / "input" - / "fromSuperAnnotate" - / "cats_dogs_vector_instance_segm" - ) - out_path = Path(tmp_dir) / "fromSuperAnnotate" / "instance_test_vector" - sa.export_annotation( - input_dir, - out_path, - "COCO", - "instance_test_vector", - "Vector", - "instance_segmentation", - ) - - def test_instance_segmentation_sa2coco_vector_empty_array(self): - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = ( - self.base_folder_path - / "COCO" - / "input" - / "fromSuperAnnotate" - / "cats_dogs_vector_instance_segm_empty_array" - ) - out_path = ( - Path(tmp_dir) - / "empty_array" - / "fromSuperAnnotate" - / "instance_test_vector" - ) - sa.export_annotation( - input_dir, - out_path, - "COCO", - "instance_test_vector", - "Vector", - "instance_segmentation", - ) - json_path = out_path / "instance_test_vector.json" - with open(json_path) as f: - data = json.loads(f.read()) - truth_path = input_dir / "truth.json" - with open(truth_path) as f: - truth = json.loads(f.read()) - data["info"]["date_created"] = 0 - truth["info"]["date_created"] = 0 - self.assertEqual(truth, data) - - def test_instance_segmentation_sa2coco_vector_empty_name(self): - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = ( - self.base_folder_path - / "COCO" - / "input" - / "fromSuperAnnotate" - / "vector_no_name" - ) - out_path = ( - Path(tmp_dir) - / "empty_name" - / "fromSuperAnnotate" - / "instance_test_vector" - ) - sa.export_annotation( - input_dir, - out_path, - "COCO", - "instance_test_vector", - "Vector", - "instance_segmentation", - ) - - @pytest.mark.skip(reason="Need to adjust") - def test_upload_annotations_with_template_id(self): - with tempfile.TemporaryDirectory() as tmp_dir: - tmpdir = Path(tmp_dir) - project_name = "test_templates" - for project in sa.search_projects(project_name): - sa.delete_project(project) - project = sa.create_project(project_name, "test", "Vector") - input_dir = self.base_folder_path / "sample_coco_with_templates" - sa.upload_images_from_folder_to_project(project, input_dir) - out_path = ( - Path(tmpdir) / "toSuperAnnotate" / "keypoint_detection_multi_template" - ) - - sa.import_annotation( - input_dir, - out_path, - "COCO", - "sample_coco", - "Vector", - "keypoint_detection", - ) - sa.upload_annotations_from_folder_to_project(project, out_path) - annotations = sa.get_annotations(project_name, "t.png")[0] - assert annotations[0]["instances"][0]["templateId"] == -1 diff --git a/tests/convertors/test_dataloop.py b/tests/convertors/test_dataloop.py deleted file mode 100644 index ba0e71399..000000000 --- a/tests/convertors/test_dataloop.py +++ /dev/null @@ -1,54 +0,0 @@ -from pathlib import Path - -import pytest -import superannotate as sa - - -@pytest.mark.skip(reason="Need to adjust") -def test_dataloop_convert_vector(tmpdir): - project_name = "dataloop2sa_vector_annotation" - - input_dir = ( - Path("tests") / "converter_test" / "DataLoop" / "input" / "toSuperAnnotate" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "DataLoop", "", "Vector", "vector_annotation" - ) - - description = "dataloop vector annotation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_dataloop_convert_object(tmpdir): - project_name = "dataloop2sa_vector_object" - - input_dir = ( - Path("tests") / "converter_test" / "DataLoop" / "input" / "toSuperAnnotate" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "DataLoop", "", "Vector", "object_detection" - ) - - description = "dataloop object detection" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_dataloop_convert_instance(tmpdir): - project_name = "dataloop2sa_vector_instance" - - input_dir = ( - Path("tests") / "converter_test" / "DataLoop" / "input" / "toSuperAnnotate" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "DataLoop", "", "Vector", "instance_segmentation" - ) - description = "dataloop instance segmentation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) diff --git a/tests/convertors/test_googlecloud.py b/tests/convertors/test_googlecloud.py deleted file mode 100644 index 67fe6f441..000000000 --- a/tests/convertors/test_googlecloud.py +++ /dev/null @@ -1,26 +0,0 @@ -from pathlib import Path - -import pytest -import superannotate as sa - - -@pytest.mark.skip(reason="Need to adjust") -def test_googlecloud_convert_web(tmpdir): - project_name = "googlcloud_object" - - input_dir = ( - Path("tests") / "converter_test" / "GoogleCloud" / "input" / "toSuperAnnotate" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, - out_dir, - "GoogleCloud", - "image_object_detection", - "Vector", - "object_detection", - ) - - description = "googlecloud object detection" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) diff --git a/tests/convertors/test_json_version_conversion.py b/tests/convertors/test_json_version_conversion.py deleted file mode 100644 index 0b3927446..000000000 --- a/tests/convertors/test_json_version_conversion.py +++ /dev/null @@ -1,55 +0,0 @@ -import json -import os -import tempfile -from os.path import dirname -from pathlib import Path -from unittest import TestCase - -from src.superannotate import SAClient -sa = SAClient() - - -class TestVersionConversion(TestCase): - TEST_BASE_FOLDER_PATH = "data_set/converter_test" - - @property - def base_folder_path(self): - return Path( - Path(os.path.join(dirname(dirname(__file__)), self.TEST_BASE_FOLDER_PATH)) - ) - - def test_json_version_conversion(self): - with tempfile.TemporaryDirectory() as tmp_dir: - input_dir = self.base_folder_path / "sa_json_versions" / "version2" - temp_dir = Path(tmp_dir) / "tmp_dir" - output_dir = Path(tmp_dir) / "output_dir" - - converted_files_old = sa.convert_json_version(input_dir, temp_dir, 1) - converted_files_new = sa.convert_json_version(temp_dir, output_dir, 2) - - assert len(converted_files_old) == len(converted_files_new) - files_list = input_dir.glob("*.json") - - metadata_keys = ["height", "width", "status", "pinned"] - comments_keys = ["x", "y", "resolved"] - for file in files_list: - input_data = json.load(open(file)) - output_data = json.load(open(output_dir / file.name)) - for key in metadata_keys: - assert input_data["metadata"][key] == output_data["metadata"][key] - - assert len(input_data["instances"]) == len(output_data["instances"]) - - assert len(input_data["comments"]) == len(output_data["comments"]) - for in_com, out_com in zip( - input_data["comments"], output_data["comments"] - ): - assert len(in_com["correspondence"]) == len( - out_com["correspondence"] - ) - for key in comments_keys: - assert in_com[key] == out_com[key] - - assert len(input_data["tags"]) == len(output_data["tags"]) - for in_tag, out_tag in zip(input_data["tags"], output_data["tags"]): - assert in_tag == out_tag diff --git a/tests/convertors/test_labelbox.py b/tests/convertors/test_labelbox.py deleted file mode 100644 index a3ca4eab7..000000000 --- a/tests/convertors/test_labelbox.py +++ /dev/null @@ -1,93 +0,0 @@ -from pathlib import Path - -import pytest - -import superannotate as sa - - -@pytest.mark.skip(reason="Need to adjust") -def test_labelbox_convert_vector(tmpdir): - project_name = "labelbox_vector_annotation" - - input_dir = ( - Path("tests") - / "converter_test" - / "LabelBox" - / "vector_annotations" - / "toSuperAnnotate" - ) - out_dir = Path(tmpdir) / project_name - dataset_name = "labelbox_example" - sa.import_annotation( - input_dir, out_dir, "LabelBox", dataset_name, "Vector", "vector_annotation" - ) - - description = "labelbox vector vector annotation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_labelbox_convert_object(tmpdir): - project_name = "labelbox_object_vector" - - input_dir = ( - Path("tests") - / "converter_test" - / "LabelBox" - / "vector_annotations" - / "toSuperAnnotate" - ) - out_dir = Path(tmpdir) / project_name - dataset_name = "labelbox_example" - sa.import_annotation( - input_dir, out_dir, "LabelBox", dataset_name, "Vector", "object_detection" - ) - - description = "labelbox vector object detection" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_labelbox_convert_instance(tmpdir): - project_name = "labelbox_vector_instance" - - input_dir = ( - Path("tests") - / "converter_test" - / "LabelBox" - / "vector_annotations" - / "toSuperAnnotate" - ) - out_dir = Path(tmpdir) / project_name - dataset_name = "labelbox_example" - sa.import_annotation( - input_dir, out_dir, "LabelBox", dataset_name, "Vector", "instance_segmentation" - ) - - description = "labelbox vector instance_segmentation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_labelbox_convert_instance_pixel(tmpdir): - project_name = "labelbox_pixel_instance" - - input_dir = ( - Path("tests") - / "converter_test" - / "LabelBox" - / "instance_segmentation" - / "toSuperAnnotate" - ) - out_dir = Path(tmpdir) / project_name - dataset_name = "labelbox_example" - sa.import_annotation( - input_dir, out_dir, "LabelBox", dataset_name, "Pixel", "instance_segmentation" - ) - - description = "labelbox pixel instance_segmentation" - ptype = "Pixel" - # upload_project(out_dir, project_name, description, ptype) diff --git a/tests/convertors/test_project_converter.py b/tests/convertors/test_project_converter.py deleted file mode 100644 index 109021b46..000000000 --- a/tests/convertors/test_project_converter.py +++ /dev/null @@ -1,73 +0,0 @@ -import json -import os -import tempfile -from os.path import dirname -from pathlib import Path -from unittest import TestCase - -from src.superannotate import SAClient -sa = SAClient() - - -class TestCocoSplit(TestCase): - TEST_FOLDER_PATH = "data_set" - - @property - def folder_path(self): - return Path( - Path(os.path.join(dirname(dirname(__file__)), self.TEST_FOLDER_PATH)) - ) - - @staticmethod - def compare_jsons(json_gen, input_dir): - for path in json_gen: - final_json = json.load(open(str(path))) - input_path = input_dir.joinpath(path.name) - init_json = json.load(open(str(input_path))) - - for init, final in zip(init_json["instances"], final_json["instances"]): - for key in init.keys(): - if key == "parts": - continue - init_value = init[key] - final_value = final[key] - assert init_value == final_value - - def test_pixel_vector_pixel(self): - input_dir = Path() - input_dir = input_dir.joinpath( - self.folder_path, - "converter_test", - "COCO", - "input", - "fromSuperAnnotate", - "cats_dogs_pixel_instance_segm", - ) - with tempfile.TemporaryDirectory() as tmp_dir: - temp_dir = Path(tmp_dir) / "output" - final_dir = Path(tmp_dir) / "output2" - - sa.convert_project_type(input_dir, temp_dir) - sa.convert_project_type(temp_dir, final_dir) - - gen = final_dir.glob("*.json") - self.compare_jsons(gen, input_dir) - - def test_vector_pixel_vector(self): - input_dir = ( - self.folder_path - / "converter_test" - / "COCO" - / "input" - / "fromSuperAnnotate" - / "cats_dogs_vector_instance_segm" - ) - with tempfile.TemporaryDirectory() as tmp_dir: - temp_dir = Path(tmp_dir) / "output" - final_dir = Path(tmp_dir) / "output2" - - sa.convert_project_type(input_dir, temp_dir) - sa.convert_project_type(temp_dir, final_dir) - - gen = input_dir.glob("*.json") - self.compare_jsons(gen, input_dir) diff --git a/tests/convertors/test_sagemaker.py b/tests/convertors/test_sagemaker.py deleted file mode 100644 index 4b561aff6..000000000 --- a/tests/convertors/test_sagemaker.py +++ /dev/null @@ -1,53 +0,0 @@ -from pathlib import Path - -import pytest -import superannotate as sa - - -@pytest.mark.skip(reason="Need to adjust") -def test_sagemaker_instance_segmentation(tmpdir): - project_name = "sagemaker_instance_pixel" - - input_dir = ( - Path("tests") - / "converter_test" - / "SageMaker" - / "input" - / "toSuperAnnotate" - / "instance_segmentation" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, - out_dir, - "SageMaker", - "test-obj-detect", - "Pixel", - "instance_segmentation", - ) - - description = "sagemaker vector instance segmentation" - ptype = "Pixel" - upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_sagemaker_object_detection(tmpdir): - project_name = "sagemaker_object_vector" - - input_dir = ( - Path("tests") - / "converter_test" - / "SageMaker" - / "input" - / "toSuperAnnotate" - / "object_detection" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "SageMaker", "test-obj-detect", "Vector", "object_detection" - ) - - description = "sagemaker object detection" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) diff --git a/tests/convertors/test_supervisely.py b/tests/convertors/test_supervisely.py deleted file mode 100644 index 9ec6d0c01..000000000 --- a/tests/convertors/test_supervisely.py +++ /dev/null @@ -1,116 +0,0 @@ -from pathlib import Path - -import pytest -import superannotate as sa - - -@pytest.mark.skip(reason="Need to adjust") -def test_supervisely_convert_vector(tmpdir): - project_name = "supervisely_test_vector_basic" - - input_dir = ( - Path("tests") - / "converter_test" - / "Supervisely" - / "input" - / "toSuperAnnotate" - / "vector" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "Supervisely", "", "Vector", "vector_annotation" - ) - - description = "supervisely vector annotation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_supervisely_convert_object(tmpdir): - project_name = "supervisely_test_object" - - input_dir = ( - Path("tests") - / "converter_test" - / "Supervisely" - / "input" - / "toSuperAnnotate" - / "vector" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "Supervisely", "", "Vector", "object_detection" - ) - - description = "supervisely object detection" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_supervisely_convert_instance(tmpdir): - project_name = "supervisely_test_vector_instance" - input_dir = ( - Path("tests") - / "converter_test" - / "Supervisely" - / "input" - / "toSuperAnnotate" - / "vector" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "Supervisely", "", "Vector", "instance_segmentation" - ) - - description = "supervisely instance segmentation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_supervisely_convert_keypoint(tmpdir): - project_name = "supervisely_test_keypoint" - - input_dir = ( - Path("tests") - / "converter_test" - / "Supervisely" - / "input" - / "toSuperAnnotate" - / "keypoints" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "Supervisely", "", "Vector", "keypoint_detection" - ) - - description = "supervisely keypoint" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -# - - -@pytest.mark.skip(reason="Need to adjust") -def test_supervisely_convert_instance_pixel(tmpdir): - project_name = "supervisely_test_instance_pixel" - - input_dir = ( - Path("tests") - / "converter_test" - / "Supervisely" - / "input" - / "toSuperAnnotate" - / "instance" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "Supervisely", "", "Pixel", "instance_segmentation" - ) - - description = "supervisely instance segmentation" - ptype = "Pixel" - # upload_project(out_dir, project_name, description, ptype) diff --git a/tests/convertors/test_vgg.py b/tests/convertors/test_vgg.py deleted file mode 100644 index 6a4a24b30..000000000 --- a/tests/convertors/test_vgg.py +++ /dev/null @@ -1,49 +0,0 @@ -from pathlib import Path - -import pytest -import superannotate as sa - - -@pytest.mark.skip(reason="Need to adjust") -def test_vgg_convert_object(tmpdir): - project_name = "vgg_test_object" - - input_dir = Path("tests") / "converter_test" / "VGG" / "input" / "toSuperAnnotate" - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "VGG", "vgg_test", "Vector", "object_detection" - ) - - description = "vgg object detection" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_vgg_convert_instance(tmpdir): - project_name = "vgg_test_instance" - - input_dir = Path("tests") / "converter_test" / "VGG" / "input" / "toSuperAnnotate" - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "VGG", "vgg_test", "Vector", "instance_segmentation" - ) - - description = "vgg instance segmentation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_vgg_convert_vector(tmpdir): - project_name = "vgg_test_vector" - - input_dir = Path("tests") / "converter_test" / "VGG" / "input" / "toSuperAnnotate" - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "VGG", "vgg_test", "Vector", "vector_annotation" - ) - - description = "vgg vector annotation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) diff --git a/tests/convertors/test_voc.py b/tests/convertors/test_voc.py deleted file mode 100644 index 24e23a236..000000000 --- a/tests/convertors/test_voc.py +++ /dev/null @@ -1,70 +0,0 @@ -import os -from os.path import dirname -from pathlib import Path - -import pytest -import superannotate as sa -from tests import DATA_SET_PATH - - -@pytest.mark.skip(reason="Need to adjust") -def test_voc_vector_instance(tmpdir): - project_name = "voc2sa_vector_instance" - - input_dir = ( - Path("tests") - / f"{DATA_SET_PATH}/converter_test" - / "VOC" - / "input" - / "fromPascalVOCToSuperAnnotate" - / "VOC2012" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "VOC", "", "Vector", "instance_segmentation" - ) - - description = "voc vector instance segmentation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_voc_vector_object(tmpdir): - project_name = "voc2sa_vector_object" - - input_dir = ( - Path("tests") - / "converter_test" - / "VOC" - / "input" - / "fromPascalVOCToSuperAnnotate" - / "VOC2012" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation(input_dir, out_dir, "VOC", "", "Vector", "object_detection") - - description = "voc vector object detection" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_voc_pixel(tmpdir): - project_name = "voc2sa_pixel_instance" - input_dir = ( - Path("tests") - / "converter_test" - / "VOC" - / "input" - / "fromPascalVOCToSuperAnnotate" - / "VOC2012" - ) - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "VOC", "", "Pixel", "instance_segmentation" - ) - - description = "voc pixel instance segmentation" - ptype = "Pixel" - # upload_project(out_dir, project_name, description, ptype) diff --git a/tests/convertors/test_vott.py b/tests/convertors/test_vott.py deleted file mode 100644 index 9b39c032c..000000000 --- a/tests/convertors/test_vott.py +++ /dev/null @@ -1,43 +0,0 @@ -from pathlib import Path - -import pytest -import superannotate as sa - - -@pytest.mark.skip(reason="Need to adjust") -def test_vott_convert_object(tmpdir): - project_name = "vott_object" - input_dir = Path("tests") / "converter_test" / "VoTT" / "input" / "toSuperAnnotate" - out_dir = Path(tmpdir) / project_name - sa.import_annotation(input_dir, out_dir, "VoTT", "", "Vector", "object_detection") - - description = "vott object detection" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_vott_convert_instance(tmpdir): - project_name = "vott_vector_instance" - input_dir = Path("tests") / "converter_test" / "VoTT" / "input" / "toSuperAnnotate" - out_dir = Path(tmpdir) / project_name - sa.import_annotation( - input_dir, out_dir, "VoTT", "", "Vector", "instance_segmentation" - ) - - description = "vott instance segmentation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) - - -@pytest.mark.skip(reason="Need to adjust") -def test_vott_convert_vector(tmpdir): - project_name = "vott_vector_annotation" - - input_dir = Path("tests") / "converter_test" / "VoTT" / "input" / "toSuperAnnotate" - out_dir = Path(tmpdir) / project_name - sa.import_annotation(input_dir, out_dir, "VoTT", "", "Vector", "vector_annotation") - - description = "vott vector annotation" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) diff --git a/tests/convertors/test_yolo.py b/tests/convertors/test_yolo.py deleted file mode 100644 index 7bb8f8c8b..000000000 --- a/tests/convertors/test_yolo.py +++ /dev/null @@ -1,17 +0,0 @@ -from pathlib import Path - -import pytest -import superannotate as sa - - -@pytest.mark.skip(reason="Need to adjust") -def test_yolo_object_detection_web(tmpdir): - project_name = "yolo_object_detection" - - input_dir = Path("tests") / "converter_test" / "YOLO" / "input" / "toSuperAnnotate" - out_dir = Path(tmpdir) / project_name - sa.import_annotation(input_dir, out_dir, "YOLO", "", "Vector", "object_detection") - - description = "yolo object detection" - ptype = "Vector" - # upload_project(out_dir, project_name, description, ptype) diff --git a/tests/integration/annotations/validations/test_vector_annotation_validation.py b/tests/integration/annotations/validations/test_vector_annotation_validation.py index f095273c6..fc0da367f 100644 --- a/tests/integration/annotations/validations/test_vector_annotation_validation.py +++ b/tests/integration/annotations/validations/test_vector_annotation_validation.py @@ -7,24 +7,24 @@ class TestVectorValidators(TestCase): - PROJECT_TYPE = "vector" + PROJECT_TYPE = "Vector" @patch('builtins.print') def test_validate_annotation_without_metadata(self, mock_print): - is_valid = sa.validate_annotations("vector", {"instances": []}) + is_valid = sa.validate_annotations("Vector", {"instances": []}) assert not is_valid mock_print.assert_any_call('\'metadata\' is a required property') @patch('builtins.print') def test_validate_annotation_with_invalid_metadata(self, mock_print): - is_valid = sa.validate_annotations("vector", {"metadata": {"name": 12}}) + is_valid = sa.validate_annotations("Vector", {"metadata": {"name": 12}}) assert not is_valid mock_print.assert_any_call("metadata.name 12 is not of type 'string'") @patch('builtins.print') def test_validate_instances(self, mock_print): is_valid = sa.validate_annotations( - "vector", + "Vector", { "metadata": {"name": "12"}, "instances": [{"type": "invalid_type"}, {"type": "bbox"}] @@ -39,7 +39,7 @@ def test_validate_instances(self, mock_print): @patch('builtins.print') def test_validate_create_dby(self, mock_print): is_valid = sa.validate_annotations( - "vector", + "Vector", { "metadata": {"name": "12"}, "instances": [ diff --git a/tests/integration/annotations/validations/test_video_annotation_validation.py b/tests/integration/annotations/validations/test_video_annotation_validation.py index f50274f36..97f0ffb54 100644 --- a/tests/integration/annotations/validations/test_video_annotation_validation.py +++ b/tests/integration/annotations/validations/test_video_annotation_validation.py @@ -18,6 +18,6 @@ class TestVectorValidators(TestCase): # @patch('builtins.print') def test_free_text_numeric_invalid(self): json_data = json.load(open(os.path.join(DATA_SET_PATH, self.ANNOTATIONS_PATH))) - is_valid = sa.validate_annotations("video", json_data) + is_valid = sa.validate_annotations("Video", json_data) assert not is_valid diff --git a/tests/integration/base.py b/tests/integration/base.py index c57c8a28d..564fa5c71 100644 --- a/tests/integration/base.py +++ b/tests/integration/base.py @@ -9,7 +9,7 @@ class BaseTestCase(TestCase): PROJECT_NAME = "" PROJECT_DESCRIPTION = "Desc" - PROJECT_TYPE = "Type" + PROJECT_TYPE = "Vector" TEST_FOLDER_PATH = "data_set" def __init__(self, *args, **kwargs): diff --git a/tests/integration/items/test_set_annotation_statuses.py b/tests/integration/items/test_set_annotation_statuses.py index f18045f92..3b3c2e684 100644 --- a/tests/integration/items/test_set_annotation_statuses.py +++ b/tests/integration/items/test_set_annotation_statuses.py @@ -27,7 +27,7 @@ class TestSetAnnotationStatuses(BaseTestCase): }, { "url": "1SfGcn9hdkVM35ZP0S93eStsE7Ti4GtHU", - "path": "123" + "name": "123" }, { "url": "https://drive.google.com/uc?export=download&id=1geS2YtQiTYuiduEirKVYxBujHJaIWA3V", diff --git a/tests/integration/items/test_set_approval_statuses.py b/tests/integration/items/test_set_approval_statuses.py index adff1ba10..3979d89f8 100644 --- a/tests/integration/items/test_set_approval_statuses.py +++ b/tests/integration/items/test_set_approval_statuses.py @@ -12,7 +12,7 @@ class TestSetApprovalStatuses(BaseTestCase): PROJECT_NAME = "TestSetApprovalStatuses" PROJECT_DESCRIPTION = "TestSetApprovalStatuses" - PROJECT_TYPE = "Document" + PROJECT_TYPE = "Vector" FOLDER_NAME = "test_folder" CSV_PATH = "data_set/attach_urls.csv" EXAMPLE_IMAGE_1 = "6022a74d5384c50017c366b3" @@ -28,7 +28,7 @@ class TestSetApprovalStatuses(BaseTestCase): }, { "url": "1SfGcn9hdkVM35ZP0S93eStsE7Ti4GtHU", - "path": "123" + "name": "123" }, { "url": "https://drive.google.com/uc?export=download&id=1geS2YtQiTYuiduEirKVYxBujHJaIWA3V", @@ -68,7 +68,7 @@ def test_image_approval_status_via_invalid_names(self): sa.attach_items( self.PROJECT_NAME, self.ATTACHMENT_LIST, "InProgress" ) - with self.assertRaisesRegexp(AppException, SetApprovalStatues.ERROR_MESSAGE): + with self.assertRaisesRegexp(AppException, "No items found."): sa.set_approval_statuses( self.PROJECT_NAME, "Approved", ["self.EXAMPLE_IMAGE_1", "self.EXAMPLE_IMAGE_2"] ) @@ -87,7 +87,7 @@ def test_set_invalid_approval_statuses(self): sa.attach_items( self.PROJECT_NAME, [self.ATTACHMENT_LIST[0]] ) - with self.assertRaisesRegexp(AppException, 'Available approval_status options are None, Disapproved, Approved.'): + with self.assertRaisesRegexp(AppException, "Available values are 'Approved', 'Disapproved'."): sa.set_approval_statuses( self.PROJECT_NAME, approval_status="aaa", items=[self.ATTACHMENT_LIST[0]["name"]] ) diff --git a/tests/integration/projects/test_create_project.py b/tests/integration/projects/test_create_project.py index 2a4039b17..85ac8200a 100644 --- a/tests/integration/projects/test_create_project.py +++ b/tests/integration/projects/test_create_project.py @@ -46,7 +46,7 @@ def test_create_project_without_settings(self): def test_create_project_wrong_type(self): with self.assertRaisesRegexp(AppException, - "Please provide a valid project type: Vector, Pixel, Video, Document, Tiled, Other, PointCloud."): + "Available values are 'Vector', 'Pixel', 'Video', 'Document', 'Tiled', 'Other', 'PointCloud'."): sa.create_project(self.PROJECT_1, "desc", "wrong_type") def test_create_project_with_settings(self): @@ -76,7 +76,7 @@ def test_create_project_without_settings(self): def test_create_project_with_settings(self): sa.create_project( self.PROJECT_1, "desc", self.PROJECT_TYPE, - [{"attribute": "FrameRate", "value": 1}] + [{"attribute": "FrameRate", "value": 1.0}] ) project = sa.get_project_metadata(self.PROJECT_1, include_settings=True) for setting in project["settings"]: diff --git a/tests/integration/settings/test_settings.py b/tests/integration/settings/test_settings.py index 7f3aff239..a8b34841f 100644 --- a/tests/integration/settings/test_settings.py +++ b/tests/integration/settings/test_settings.py @@ -156,7 +156,7 @@ def test_frame_rate_invalid_str_value(self): self.PROJECT_NAME, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE, - [{"attribute": "FrameRate", "value": "1"}]) + [{"attribute": "FrameRate", "value": "one"}]) def test_frames_reset(self): sa.create_project( diff --git a/tests/integration/test_limitations.py b/tests/integration/test_limitations.py index a45d575c1..f105075ef 100644 --- a/tests/integration/test_limitations.py +++ b/tests/integration/test_limitations.py @@ -4,8 +4,8 @@ from src.superannotate import AppException from src.superannotate import SAClient -from src.superannotate.lib.core import UPLOAD_FOLDER_LIMIT_ERROR_MESSAGE from src.superannotate.lib.core import UPLOAD_PROJECT_LIMIT_ERROR_MESSAGE +from src.superannotate.lib.core import UPLOAD_FOLDER_LIMIT_ERROR_MESSAGE from src.superannotate.lib.core import UPLOAD_USER_LIMIT_ERROR_MESSAGE from tests.integration.base import BaseTestCase from tests.moks.limitatoins import folder_limit_response