From 1c108885d848bd1abd96ae74dd328a51c3a00a5a Mon Sep 17 00:00:00 2001 From: "Eric O. Korman" Date: Mon, 13 May 2024 14:06:46 -0500 Subject: [PATCH] expose values with getattr (#572) --- client/unit-tests/coretypes/test_core.py | 8 +- client/unit-tests/coretypes/test_filtering.py | 2 +- .../symbolic/collections/test_dictionary.py | 6 +- .../collections/test_static_collection.py | 15 ++- .../symbolic/collections/test_structures.py | 2 +- client/unit-tests/symbolic/test_operators.py | 16 ++- .../symbolic/types/test_symbolic_types.py | 2 +- .../unit-tests}/test_typing.py | 40 +++---- client/unit-tests/test_viz.py | 2 +- client/valor/coretypes.py | 111 ++++++++---------- client/valor/metatypes.py | 8 +- client/valor/schemas/symbolic/collections.py | 22 ++-- client/valor/schemas/symbolic/types.py | 24 +++- client/valor/viz.py | 8 +- .../client/datasets/test_dataset.py | 6 +- .../client/datasets/test_groundtruth.py | 4 +- .../client/datatype/test_data_generation.py | 6 +- integration_tests/client/test_client.py | 4 +- integration_tests/conftest.py | 6 +- 19 files changed, 154 insertions(+), 138 deletions(-) rename {integration_tests/client => client/unit-tests}/test_typing.py (78%) diff --git a/client/unit-tests/coretypes/test_core.py b/client/unit-tests/coretypes/test_core.py index cae060707..d994fdb70 100644 --- a/client/unit-tests/coretypes/test_core.py +++ b/client/unit-tests/coretypes/test_core.py @@ -212,13 +212,11 @@ def test_prediction(): ), ] - # valid - Prediction(datum=datum, annotations=pds) - - string = str(Prediction(datum=datum, annotations=pds)) + pred = Prediction(datum=datum, annotations=pds) + string = str(pred) assert ( string - == "{'datum': {'uid': 'somefile', 'metadata': {}}, 'annotations': [{'task_type': 'classification', 'metadata': {}, 'labels': [{'key': 'test', 'value': 'value', 'score': 1.0}], 'bounding_box': None, 'polygon': None, 'raster': None, 'embedding': None}, {'task_type': 'classification', 'metadata': {}, 'labels': [{'key': 'test', 'value': 'value', 'score': 1.0}], 'bounding_box': None, 'polygon': None, 'raster': None, 'embedding': None}]}" + == "{'datum': {'uid': 'somefile', 'metadata': {}}, 'annotations': [{'task_type': , 'metadata': {}, 'labels': [{'key': 'test', 'value': 'value', 'score': 1.0}], 'bounding_box': None, 'polygon': None, 'raster': None, 'embedding': None}, {'task_type': , 'metadata': {}, 'labels': [{'key': 'test', 'value': 'value', 'score': 1.0}], 'bounding_box': None, 'polygon': None, 'raster': None, 'embedding': None}]}" ) assert "dataset_name" not in string diff --git a/client/unit-tests/coretypes/test_filtering.py b/client/unit-tests/coretypes/test_filtering.py index 8ffb91e2a..2059014f6 100644 --- a/client/unit-tests/coretypes/test_filtering.py +++ b/client/unit-tests/coretypes/test_filtering.py @@ -76,7 +76,7 @@ def test__format_filter(geojson, polygon): Dataset.metadata["some_str"] == "foobar", Dataset.metadata["some_float"] >= 0.123, Dataset.metadata["some_datetime"] > datetime.timedelta(days=1), - Dataset.metadata["some_geospatial"].intersects(polygon), + Dataset.metadata["some_geospatial"].intersects(polygon), # type: ignore ] ) diff --git a/client/unit-tests/symbolic/collections/test_dictionary.py b/client/unit-tests/symbolic/collections/test_dictionary.py index 0c1169dd8..49701ff7c 100644 --- a/client/unit-tests/symbolic/collections/test_dictionary.py +++ b/client/unit-tests/symbolic/collections/test_dictionary.py @@ -2,7 +2,7 @@ import pytest -from valor.schemas import Dictionary, Float, Integer +from valor.schemas import Dictionary def test_validate_metadata(): @@ -14,8 +14,8 @@ def test_validate_metadata(): Dictionary({123: 123}) # type: ignore # Check int to float conversion - assert type(Dictionary({"test": 1})["test"]) is Integer - assert type(Dictionary({"test": 1.0})["test"]) is Float + assert type(Dictionary({"test": 1})["test"]) is int + assert type(Dictionary({"test": 1.0})["test"]) is float def test_init_dictionary_from_builtin_dict(): diff --git a/client/unit-tests/symbolic/collections/test_static_collection.py b/client/unit-tests/symbolic/collections/test_static_collection.py index d74552eef..ae30e31fb 100644 --- a/client/unit-tests/symbolic/collections/test_static_collection.py +++ b/client/unit-tests/symbolic/collections/test_static_collection.py @@ -107,10 +107,17 @@ class A(StaticCollection): } # test value members - assert v1.w.to_dict() == {"type": "integer", "value": 101} - assert v1.x.to_dict() == {"type": "float", "value": 0.123} - assert v1.y.to_dict() == {"type": "string", "value": "foobar"} - assert v1.z.to_dict() == {"type": "bool", "value": True} + assert isinstance(v1.w, int) + assert v1.w == 101 + + assert isinstance(v1.x, float) + assert v1.x == 0.123 + + assert isinstance(v1.y, str) + assert v1.y == "foobar" + + assert isinstance(v1.z, bool) + assert v1.z is True def test__get_static_types(): diff --git a/client/unit-tests/symbolic/collections/test_structures.py b/client/unit-tests/symbolic/collections/test_structures.py index ce63a2c18..3000ca4dd 100644 --- a/client/unit-tests/symbolic/collections/test_structures.py +++ b/client/unit-tests/symbolic/collections/test_structures.py @@ -199,7 +199,7 @@ def test_list(): # test creating valued lists variable = List[Float]([0.1, 0.2, 0.3]) - assert variable.__str__() == "[0.1, 0.2, 0.3]" + assert variable.__str__() == "[Float(0.1), Float(0.2), Float(0.3)]" assert variable.to_dict() == { "type": "list[float]", "value": [0.1, 0.2, 0.3], diff --git a/client/unit-tests/symbolic/test_operators.py b/client/unit-tests/symbolic/test_operators.py index 42ab568a9..06807322a 100644 --- a/client/unit-tests/symbolic/test_operators.py +++ b/client/unit-tests/symbolic/test_operators.py @@ -27,8 +27,14 @@ def test_function(variables): x, y, z = variables # test stringify - assert Function(x, y, z).__repr__() == "Function(1, '2', 0.3)" - assert Function(x, y, z).__str__() == "Function(1, '2', 0.3)" + assert ( + Function(x, y, z).__repr__() + == "Function(Integer(1), String('2'), Float(0.3))" + ) + assert ( + Function(x, y, z).__str__() + == "Function(Integer(1), String('2'), Float(0.3))" + ) # test dictionary generation assert Function(x, y, z).to_dict() == { @@ -43,8 +49,10 @@ def test_function(variables): # test stringify w/ operator assert issubclass(And, Function) assert And._operator is not None - assert And(x, y, z).__repr__() == "And(1, '2', 0.3)" - assert And(x, y, z).__str__() == "(1 & '2' & 0.3)" + assert ( + And(x, y, z).__repr__() == "And(Integer(1), String('2'), Float(0.3))" + ) + assert And(x, y, z).__str__() == "(Integer(1) & String('2') & Float(0.3))" # test logical operators assert type(Function(x) & Function(y)) is And diff --git a/client/unit-tests/symbolic/types/test_symbolic_types.py b/client/unit-tests/symbolic/types/test_symbolic_types.py index d6343e9ba..af255014a 100644 --- a/client/unit-tests/symbolic/types/test_symbolic_types.py +++ b/client/unit-tests/symbolic/types/test_symbolic_types.py @@ -90,7 +90,7 @@ def test_symbol(): def _test_symbolic_outputs(v, s=Symbol(name="test")): assert s.to_dict() == v.to_dict() assert s.to_dict() == v.get_symbol().to_dict() - assert s.__repr__() == v.__repr__() + assert f"Variable({s.__repr__()})" == v.__repr__() assert s.__str__() == v.__str__() assert v.is_symbolic and not v.is_value diff --git a/integration_tests/client/test_typing.py b/client/unit-tests/test_typing.py similarity index 78% rename from integration_tests/client/test_typing.py rename to client/unit-tests/test_typing.py index cd41c004e..12d5877c7 100644 --- a/integration_tests/client/test_typing.py +++ b/client/unit-tests/test_typing.py @@ -28,19 +28,19 @@ def test_label_typing(): assert type(Label.score) is Float label = Label(key="k1", value="v1") - assert type(label.key) is String - assert type(label.value) is String - assert type(label.score) is Float + assert type(label.key) is str + assert type(label.value) is str + assert label.score is None label = Label(key="k1", value="v1", score=None) - assert type(label.key) is String - assert type(label.value) is String - assert type(label.score) is Float + assert type(label.key) is str + assert type(label.value) is str + assert label.score is None label = Label(key="k1", value="v1", score=1.0) - assert type(label.key) is String - assert type(label.value) is String - assert type(label.score) is Float + assert type(label.key) is str + assert type(label.value) is str + assert type(label.score) is float def test_annotation_typing(): @@ -55,12 +55,12 @@ def test_annotation_typing(): task_type=enums.TaskType.CLASSIFICATION, labels=[], ) - assert type(annotation.task_type) is TaskTypeEnum + assert type(annotation.task_type) is enums.TaskType assert type(annotation.labels) is List[Label] assert type(annotation.metadata) is Dictionary - assert type(annotation.bounding_box) is Box - assert type(annotation.polygon) is Polygon - assert type(annotation.raster) is Raster + assert annotation.bounding_box is None + assert annotation.polygon is None + assert annotation.raster is None bbox = Box.from_extrema(0, 1, 0, 1) polygon = Polygon([bbox.boundary]) @@ -73,7 +73,7 @@ def test_annotation_typing(): polygon=polygon, raster=raster, ) - assert type(annotation.task_type) is TaskTypeEnum + assert type(annotation.task_type) is enums.TaskType assert type(annotation.labels) is List[Label] assert type(annotation.metadata) is Dictionary assert type(annotation.bounding_box) is Box @@ -86,11 +86,11 @@ def test_datum_typing(): assert type(Datum.metadata) is Dictionary datum = Datum(uid="test") - assert type(datum.uid) is String + assert type(datum.uid) is str assert type(datum.metadata) is Dictionary datum = Datum(uid="test", metadata={}) - assert type(datum.uid) is String + assert type(datum.uid) is str assert type(datum.metadata) is Dictionary @@ -113,11 +113,11 @@ def test_dataset_typing(): assert type(Dataset.metadata) is Dictionary dataset = Dataset(name="test") - assert type(dataset.name) is String + assert type(dataset.name) is str assert type(dataset.metadata) is Dictionary dataset = Dataset(name="test", metadata={}) - assert type(dataset.name) is String + assert type(dataset.name) is str assert type(dataset.metadata) is Dictionary @@ -126,9 +126,9 @@ def test_model_typing(): assert type(Model.metadata) is Dictionary model = Model(name="test") - assert type(model.name) is String + assert type(model.name) is str assert type(model.metadata) is Dictionary model = Model(name="test", metadata={}) - assert type(model.name) is String + assert type(model.name) is str assert type(model.metadata) is Dictionary diff --git a/client/unit-tests/test_viz.py b/client/unit-tests/test_viz.py index e0ce6dbfe..79f487f52 100644 --- a/client/unit-tests/test_viz.py +++ b/client/unit-tests/test_viz.py @@ -176,7 +176,7 @@ def test_draw_detections_on_image(bounding_poly: Polygon): Annotation( task_type=TaskType.OBJECT_DETECTION, labels=[Label(key="k", value="v")], - polygon=Polygon(bounding_poly.get_value()), + polygon=bounding_poly, ) ], ), diff --git a/client/valor/coretypes.py b/client/valor/coretypes.py index cbb5df28b..964f3e8a9 100644 --- a/client/valor/coretypes.py +++ b/client/valor/coretypes.py @@ -102,7 +102,7 @@ def __init__( for annotation in self.annotations: for label in annotation.labels: - if label.score.get_value() is not None: + if label.score is not None: raise ValueError( "GroundTruth labels should not have scores." ) @@ -160,13 +160,13 @@ def __init__( # validation for annotation in self.annotations: - task_type = annotation.task_type.get_value() + task_type = annotation.task_type if task_type in [ TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION, ]: for label in annotation.labels: - label_score = label.score._value + label_score = label.score if label_score is None: raise ValueError( f"For task type `{task_type}` prediction labels must have scores, but got `None`" @@ -175,8 +175,8 @@ def __init__( label_keys_to_sum = {} for scored_label in annotation.labels: - label_key = scored_label.key.get_value() - label_score = scored_label.score.get_value() + label_key = scored_label.key + label_score = scored_label.score if label_key not in label_keys_to_sum: label_keys_to_sum[label_key] = 0.0 label_keys_to_sum[label_key] += label_score @@ -551,9 +551,6 @@ def get_groundtruth( """ return Client(self.conn).get_groundtruth(dataset=self, datum=datum) - def get_name(self) -> str: - return self.name.get_value() - def get_labels( self, ) -> List[Label]: @@ -591,7 +588,7 @@ def get_datums( raise ValueError( "Cannot filter by dataset_names when calling `Dataset.get_datums`." ) - filter_["dataset_names"] = [self.get_name()] + filter_["dataset_names"] = [self.name] # type: ignore return Client(self.conn).get_datums(filter_by=filter_) def get_evaluations( @@ -649,7 +646,7 @@ def get_summary(self) -> DatasetSummary: groundtruth_annotation_metadata: list of the unique metadata dictionaries in the dataset that are associated to annotations """ - return Client(self.conn).get_dataset_summary(self.get_name()) + return Client(self.conn).get_dataset_summary(self.name) # type: ignore def finalize( self, @@ -671,7 +668,7 @@ def delete( timeout : int, default=0 Sets a timeout in seconds. """ - Client(self.conn).delete_dataset(self.get_name(), timeout) + Client(self.conn).delete_dataset(self.name, timeout) # type: ignore class Model(StaticCollection): @@ -808,9 +805,6 @@ def add_predictions( predictions=predictions, ) - def get_name(self) -> str: - return self.name.get_value() - def get_prediction( self, dataset: Union[Dataset, str], datum: Union[Datum, str] ) -> Union[Prediction, None]: @@ -851,11 +845,9 @@ def _format_constraints( # get list of dataset names dataset_names_from_obj = [] if isinstance(datasets, list): - dataset_names_from_obj = [ - dataset.get_name() for dataset in datasets - ] + dataset_names_from_obj = [dataset.name for dataset in datasets] elif isinstance(datasets, Dataset): - dataset_names_from_obj = [datasets.get_name()] + dataset_names_from_obj = [datasets.name] # create a 'schemas.Filter' object from the constraints. filter_ = _format_filter(filter_by) @@ -867,7 +859,7 @@ def _format_constraints( # set dataset names if not filter_.dataset_names: filter_.dataset_names = [] - filter_.dataset_names.extend(dataset_names_from_obj) + filter_.dataset_names.extend(dataset_names_from_obj) # type: ignore return filter_ def _create_label_map( @@ -892,18 +884,15 @@ def _create_label_map( for key, value in label_map.items(): if not all( [ - ( - isinstance(v.key._value, str) - and isinstance(v.value._value, str) - ) + (isinstance(v.key, str) and isinstance(v.value, str)) for v in [key, value] ] ): raise TypeError return_value.append( [ - [key.key._value, key.value._value], - [value.key._value, value.value._value], + [key.key, key.value], + [value.key, value.value], ] ) return return_value @@ -945,7 +934,7 @@ def evaluate_classification( # format request datum_filter = self._format_constraints(datasets, filter_by) request = EvaluationRequest( - model_names=[self.get_name()], + model_names=[self.name], # type: ignore datum_filter=datum_filter, parameters=EvaluationParameters( task_type=TaskType.CLASSIFICATION, @@ -1028,7 +1017,7 @@ def evaluate_detection( ) datum_filter = self._format_constraints(datasets, filter_by) request = EvaluationRequest( - model_names=[self.get_name()], + model_names=[self.name], # type: ignore datum_filter=datum_filter, parameters=parameters, meta={}, @@ -1071,7 +1060,7 @@ def evaluate_segmentation( # format request datum_filter = self._format_constraints(datasets, filter_by) request = EvaluationRequest( - model_names=[self.get_name()], + model_names=[self.name], # type: ignore datum_filter=datum_filter, parameters=EvaluationParameters( task_type=TaskType.SEMANTIC_SEGMENTATION, @@ -1097,7 +1086,7 @@ def delete(self, timeout: int = 0): timeout : int, default=0 Sets a timeout in seconds. """ - Client(self.conn).delete_model(self.get_name(), timeout) + Client(self.conn).delete_model(self.name, timeout) # type: ignore def get_labels( self, @@ -1210,11 +1199,11 @@ def get_labels_from_dataset( A list of labels. """ dataset_name = ( - dataset.get_name() if isinstance(dataset, Dataset) else dataset + dataset.name if isinstance(dataset, Dataset) else dataset ) return [ Label(**label) - for label in self.conn.get_labels_from_dataset(dataset_name) + for label in self.conn.get_labels_from_dataset(dataset_name) # type: ignore ] def get_labels_from_model(self, model: Union[Model, str]) -> List[Label]: @@ -1231,10 +1220,10 @@ def get_labels_from_model(self, model: Union[Model, str]) -> List[Label]: List[valor.Label] A list of labels. """ - model_name = model.get_name() if isinstance(model, Model) else model + model_name = model.name if isinstance(model, Model) else model return [ Label(**label) - for label in self.conn.get_labels_from_model(model_name) + for label in self.conn.get_labels_from_model(model_name) # type: ignore ] def create_dataset( @@ -1278,7 +1267,7 @@ def create_groundtruths( if not isinstance(groundtruth.annotations._value, list): raise TypeError groundtruth_dict = groundtruth.encode_value() - groundtruth_dict["dataset_name"] = dataset.get_name() + groundtruth_dict["dataset_name"] = dataset.name groundtruths_json.append(groundtruth_dict) self.conn.create_groundtruths(groundtruths_json) @@ -1303,12 +1292,12 @@ def get_groundtruth( The matching ground truth or 'None' if it doesn't exist. """ dataset_name = ( - dataset.get_name() if isinstance(dataset, Dataset) else dataset + dataset.name if isinstance(dataset, Dataset) else dataset ) - datum_uid = datum.get_uid() if isinstance(datum, Datum) else datum + datum_uid = datum.uid if isinstance(datum, Datum) else datum try: resp = self.conn.get_groundtruth( - dataset_name=dataset_name, datum_uid=datum_uid + dataset_name=dataset_name, datum_uid=datum_uid # type: ignore ) resp.pop("dataset_name") return GroundTruth.decode_value(resp) @@ -1327,9 +1316,9 @@ def finalize_dataset(self, dataset: Union[Dataset, str]) -> None: The dataset to be finalized. """ dataset_name = ( - dataset.get_name() if isinstance(dataset, Dataset) else dataset + dataset.name if isinstance(dataset, Dataset) else dataset ) - return self.conn.finalize_dataset(name=dataset_name) + return self.conn.finalize_dataset(name=dataset_name) # type: ignore def get_dataset( self, @@ -1427,9 +1416,9 @@ def get_datum( The requested datum or 'None' if it doesn't exist. """ dataset_name = ( - dataset.get_name() if isinstance(dataset, Dataset) else dataset + dataset.name if isinstance(dataset, Dataset) else dataset ) - resp = self.conn.get_datum(dataset_name=dataset_name, uid=uid) + resp = self.conn.get_datum(dataset_name=dataset_name, uid=uid) # type: ignore return Datum.decode_value(resp) def get_dataset_status( @@ -1539,8 +1528,8 @@ def create_predictions( if not isinstance(prediction.annotations._value, list): raise TypeError prediction_dict = prediction.encode_value() - prediction_dict["dataset_name"] = dataset.get_name() - prediction_dict["model_name"] = model.get_name() + prediction_dict["dataset_name"] = dataset.name + prediction_dict["model_name"] = model.name predictions_json.append(prediction_dict) self.conn.create_predictions(predictions_json) @@ -1568,15 +1557,15 @@ def get_prediction( The matching prediction or 'None' if it doesn't exist. """ dataset_name = ( - dataset.get_name() if isinstance(dataset, Dataset) else dataset + dataset.name if isinstance(dataset, Dataset) else dataset ) - model_name = model.get_name() if isinstance(model, Model) else model - datum_uid = datum.get_uid() if isinstance(datum, Datum) else datum + model_name = model.name if isinstance(model, Model) else model + datum_uid = datum.uid if isinstance(datum, Datum) else datum resp = self.conn.get_prediction( - dataset_name=dataset_name, - model_name=model_name, - datum_uid=datum_uid, + dataset_name=dataset_name, # type: ignore + model_name=model_name, # type: ignore + datum_uid=datum_uid, # type: ignore ) resp.pop("dataset_name") resp.pop("model_name") @@ -1589,12 +1578,12 @@ def finalize_inferences( Finalizes a model-dataset pairing such that new predictions cannot be added to it. """ dataset_name = ( - dataset.get_name() if isinstance(dataset, Dataset) else dataset + dataset.name if isinstance(dataset, Dataset) else dataset ) - model_name = model.get_name() if isinstance(model, Model) else model + model_name = model.name if isinstance(model, Model) else model return self.conn.finalize_inferences( - dataset_name=dataset_name, - model_name=model_name, + dataset_name=dataset_name, # type: ignore + model_name=model_name, # type: ignore ) def get_model( @@ -1694,10 +1683,10 @@ def get_model_eval_requests( List[Evaluation] A list of evaluations. """ - model_name = model.get_name() if isinstance(model, Model) else model + model_name = model.name if isinstance(model, Model) else model return [ Evaluation(**evaluation, connection=self.conn) - for evaluation in self.conn.get_model_eval_requests(model_name) + for evaluation in self.conn.get_model_eval_requests(model_name) # type: ignore ] def delete_model(self, name: str, timeout: int = 0) -> None: @@ -1754,21 +1743,21 @@ def get_evaluations( A list of evaluations. """ if isinstance(datasets, list): - datasets = [ - element.get_name() if isinstance(element, Dataset) else element + datasets = [ # type: ignore + element.name if isinstance(element, Dataset) else element for element in datasets ] if isinstance(models, list): - models = [ - element.get_name() if isinstance(element, Model) else element + models = [ # type: ignore + element.name if isinstance(element, Model) else element for element in models ] return [ Evaluation(connection=self.conn, **evaluation) for evaluation in self.conn.get_evaluations( evaluation_ids=evaluation_ids, - models=models, - datasets=datasets, + models=models, # type: ignore + datasets=datasets, # type: ignore metrics_to_sort_by=metrics_to_sort_by, ) ] diff --git a/client/valor/metatypes.py b/client/valor/metatypes.py index 43f808879..6d49f6b99 100644 --- a/client/valor/metatypes.py +++ b/client/valor/metatypes.py @@ -34,10 +34,6 @@ def __init__(self, datum: Datum): elif datum.is_symbolic: raise ValueError - height = int(datum.metadata.get_value()["height"].get_value()) - width = int(datum.metadata.get_value()["width"].get_value()) - datum.metadata["height"] = Integer(height) - datum.metadata["width"] = Integer(width) self.datum = datum @classmethod @@ -92,7 +88,7 @@ def from_pil(cls, image: Image, uid: str): @property def height(self) -> int: """Returns image height in pixels.""" - value = self.datum.metadata["height"].get_value() + value = self.datum.metadata["height"] if not isinstance(value, int): raise TypeError return int(value) @@ -100,7 +96,7 @@ def height(self) -> int: @property def width(self) -> int: """Returns image width in pixels.""" - value = self.datum.metadata["width"].get_value() + value = self.datum.metadata["width"] if not isinstance(value, int): raise TypeError return int(value) diff --git a/client/valor/schemas/symbolic/collections.py b/client/valor/schemas/symbolic/collections.py index d5710b7f4..e8a0f417b 100644 --- a/client/valor/schemas/symbolic/collections.py +++ b/client/valor/schemas/symbolic/collections.py @@ -17,6 +17,7 @@ String, TaskTypeEnum, Variable, + _convert_simple_variables_to_standard_types, get_type_by_name, ) @@ -112,6 +113,12 @@ def format(self, __name: str, __value: Any) -> Any: def __setattr__(self, __name: str, __value: Any) -> None: super().__setattr__(__name, self.format(__name, __value)) + def __getattribute__(self, __name: str) -> Any: + ret = super().__getattribute__(__name) + if isinstance(ret, Variable) and ret.is_value: + return _convert_simple_variables_to_standard_types(ret) + return ret + @classmethod def __validate__(cls, value: Any): """Validate typing.""" @@ -136,7 +143,8 @@ def decode_value(cls, value: dict): def encode_value(self): """Encode object to JSON compatible dictionary.""" return { - k: v.encode_value() for k, v in self._get_dynamic_values().items() + k: (v.encode_value() if hasattr(v, "encode_value") else v) + for k, v in self._get_dynamic_values().items() } @classmethod @@ -165,7 +173,7 @@ def _get_dynamic_values(self): def __repr__(self): if self.is_symbolic: return super().__repr__() - return self.encode_value().__repr__() + return f"{self.__class__.__name__}({self.encode_value().__repr__()})" def __str__(self): if self.is_symbolic: @@ -234,11 +242,7 @@ def tuple(self): tuple A tuple of the `Label's` arguments. """ - return ( - self.key.get_value(), - self.value.get_value(), - self.score.get_value(), - ) + return (self.key, self.value, self.score) class Annotation(StaticCollection): @@ -418,7 +422,3 @@ def __init__( A dictionary of metadata that describes the datum. """ super().__init__(uid=uid, metadata=metadata if metadata else dict()) - - def get_uid(self) -> str: - """Extracts the uid from a datum instance.""" - return self.uid.get_value() diff --git a/client/valor/schemas/symbolic/types.py b/client/valor/schemas/symbolic/types.py index 1f78fb265..cf50e1de4 100644 --- a/client/valor/schemas/symbolic/types.py +++ b/client/valor/schemas/symbolic/types.py @@ -30,6 +30,21 @@ ) +def _convert_simple_variables_to_standard_types(var: typing.Any): + """Converts a variable to a standard type. This operates recursively. + in the case that the variable represents a dictionary + """ + from valor.schemas.symbolic.collections import StaticCollection + + if isinstance(var, StaticCollection): + return var + if isinstance(var, Variable): + val = var.get_value() + if isinstance(val, (str, int, float, bool, type(None))): + var = val + return var + + class Symbol: """ A symbol contains no value and is defined by the tuple (owner, name, key, attribute). @@ -128,7 +143,7 @@ def __init__( self._value = value def __repr__(self) -> str: - return self._value.__repr__() + return f"{self.__class__.__name__}({self._value.__repr__()})" def __str__(self) -> str: return str(self._value) @@ -213,6 +228,7 @@ def preprocess(cls, value: typing.Any): owner=value._owner, ) elif cls.supports(value): + # return cls(value=value) raise TypeError( f"{cls.__name__} does not support operations with value '{value}' of type '{type(value).__name__}'." @@ -442,7 +458,7 @@ def __eq__(self, value: typing.Any) -> typing.Union["Bool", Eq]: elif rhs is None: return Bool(lhs is None) else: - return Bool(lhs == rhs) + return Bool(bool(lhs == rhs)) return Eq(self, other) def __ne__(self, value: typing.Any) -> typing.Union["Bool", Ne]: @@ -1669,6 +1685,8 @@ def encode_value(self) -> dict: ), ): encoding[k] = v.encode_value() + elif isinstance(v, (bool, int, float, str)): + encoding[k] = v else: encoding[k] = v.to_dict() return encoding @@ -1686,7 +1704,7 @@ def __getitem__(self, __key: str): value = self.get_value() if not value: raise KeyError(__key) - return value[__key] + return _convert_simple_variables_to_standard_types(value[__key]) def __setitem__(self, __key: str, __value: typing.Any): if not isinstance(__value, Variable): diff --git a/client/valor/viz.py b/client/valor/viz.py index de5997667..715c9fbc3 100644 --- a/client/valor/viz.py +++ b/client/valor/viz.py @@ -130,15 +130,15 @@ def create_combined_segmentation_mask( # unpack raster annotations annotations: List[Annotation] = [] for annotation in annotated_datum.annotations: - if annotation.task_type.get_value() in task_types: + if annotation.task_type in task_types: annotations.append(annotation) # unpack label values label_values = [] for annotation in annotations: for label in annotation.labels: - if label.key.get_value() == label_key: - label_values.append(label.value.get_value()) + if label.key == label_key: + label_values.append(label.value) if not label_values: raise RuntimeError( f"Annotation doesn't have a label with key `{label_key}`" @@ -255,7 +255,7 @@ def _draw_detection_on_image( ) box = detection.bounding_box polygon = detection.polygon - if polygon.get_value() is not None: + if polygon is not None: img = _draw_bounding_polygon_on_image( polygon, img, diff --git a/integration_tests/client/datasets/test_dataset.py b/integration_tests/client/datasets/test_dataset.py index 4cad426a0..056f85c1f 100644 --- a/integration_tests/client/datasets/test_dataset.py +++ b/integration_tests/client/datasets/test_dataset.py @@ -162,9 +162,9 @@ def test_create_image_dataset_with_segmentations( semantic_segs = [] for seg in segs: assert isinstance(seg, Annotation) - if seg.task_type.get_value() == TaskType.OBJECT_DETECTION: + if seg.task_type == TaskType.OBJECT_DETECTION: instance_segs.append(seg) - elif seg.task_type.get_value() == TaskType.SEMANTIC_SEGMENTATION: + elif seg.task_type == TaskType.SEMANTIC_SEGMENTATION: semantic_segs.append(seg) # should have one instance segmentation that's a rectangle @@ -381,7 +381,7 @@ def test_get_summary( TaskType.SEMANTIC_SEGMENTATION, ] - summary.labels.sort(key=lambda x: x.key.get_value()) + summary.labels.sort(key=lambda x: x.key) assert summary.labels == [ Label(key="k1", value="v1"), Label(key="k2", value="v2"), diff --git a/integration_tests/client/datasets/test_groundtruth.py b/integration_tests/client/datasets/test_groundtruth.py index 2b6cc57d5..976522766 100644 --- a/integration_tests/client/datasets/test_groundtruth.py +++ b/integration_tests/client/datasets/test_groundtruth.py @@ -88,13 +88,13 @@ def test_create_gt_detections_as_bbox_or_poly( [ det for det in detections.annotations - if det.bounding_box.get_value() is not None + if det.bounding_box is not None ] ) == 1 ) for det in detections.annotations: - if det.bounding_box.get_value(): + if det.bounding_box: assert det.to_dict() == gt.annotations[0].to_dict() else: assert det.to_dict() == gt.annotations[1].to_dict() diff --git a/integration_tests/client/datatype/test_data_generation.py b/integration_tests/client/datatype/test_data_generation.py index 49ffc4075..af5978efa 100644 --- a/integration_tests/client/datatype/test_data_generation.py +++ b/integration_tests/client/datatype/test_data_generation.py @@ -276,8 +276,8 @@ def generate_prediction_data( datums = dataset.get_datums() for datum in datums: - height = cast(int, datum.metadata["height"].get_value()) - width = cast(int, datum.metadata["width"].get_value()) + height = cast(int, datum.metadata["height"]) + width = cast(int, datum.metadata["width"]) prediction = _generate_prediction( datum=datum, height=int(height), @@ -319,7 +319,7 @@ def test_generate_segmentation_data( ), "Number of images doesn't match the test input" for image in dataset.get_datums(): - uid = image.get_uid() + uid = image.uid sample_gt = dataset.get_groundtruth(uid) assert sample_gt diff --git a/integration_tests/client/test_client.py b/integration_tests/client/test_client.py index 16b6391e7..17cd67548 100644 --- a/integration_tests/client/test_client.py +++ b/integration_tests/client/test_client.py @@ -193,14 +193,14 @@ def test_get_labels( ) assert len(high_score_labels) == 5 for label in high_score_labels: - assert int(label.value.get_value()) % 2 == 1 + assert int(label.value) % 2 == 1 low_score_labels = client.get_labels( Filter(label_scores=[Constraint(value=0.5, operator="<")]) ) assert len(low_score_labels) == 5 for label in low_score_labels: - assert int(label.value.get_value()) % 2 == 0 + assert int(label.value) % 2 == 0 # check that the content-range header exists on the raw response requests_method = getattr(requests, "get") diff --git a/integration_tests/conftest.py b/integration_tests/conftest.py index 492fa3b3b..775081a1e 100644 --- a/integration_tests/conftest.py +++ b/integration_tests/conftest.py @@ -55,7 +55,7 @@ def db(connection: ClientConnection) -> Iterator[Session]: if len(client.get_datasets()) > 0: raise RuntimeError( "Tests should be run on an empty valor back end but found existing datasets.", - [ds.get_name() for ds in client.get_datasets()], + [ds.name for ds in client.get_datasets()], ) if len(client.get_models()) > 0: @@ -84,13 +84,13 @@ def db(connection: ClientConnection) -> Iterator[Session]: for model in client.get_models(): try: - client.delete_model(model.get_name(), timeout=360) + client.delete_model(model.name, timeout=360) except exceptions.ModelDoesNotExistError: continue for dataset in client.get_datasets(): try: - client.delete_dataset(dataset.get_name(), timeout=360) + client.delete_dataset(dataset.name, timeout=360) except exceptions.DatasetDoesNotExistError: continue