diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py index ce17d1798ce7..9490137183da 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py @@ -456,7 +456,7 @@ def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = N :param str data: A str using RestAPI structure. JSON by default. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong :rtype: ModelType """ deserializer = Deserializer(cls._infer_class_models()) @@ -479,7 +479,7 @@ def from_dict( :param function key_extractors: A key extractor function. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong :rtype: ModelType """ deserializer = Deserializer(cls._infer_class_models()) @@ -626,7 +626,7 @@ def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, to :param object target_obj: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str, dict - :raises: SerializationError if serialization fails. + :raises SerializationError: if serialization fails. :returns: The serialized data. """ key_transformer = kwargs.get("key_transformer", self.key_transformer) @@ -736,8 +736,8 @@ def body(self, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: dict - :raises: SerializationError if serialization fails. - :raises: ValueError if data is None + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized request body """ @@ -781,8 +781,8 @@ def url(self, name, data, data_type, **kwargs): :param str data_type: The type to be serialized from. :rtype: str :returns: The serialized URL path - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None """ try: output = self.serialize_data(data, data_type, **kwargs) @@ -805,8 +805,8 @@ def query(self, name, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str, list - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized query parameter """ try: @@ -835,8 +835,8 @@ def header(self, name, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized header """ try: @@ -855,9 +855,9 @@ def serialize_data(self, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. - :raises: AttributeError if required data is None. - :raises: ValueError if data is None - :raises: SerializationError if serialization fails. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. :returns: The serialized data. :rtype: str, int, float, bool, dict, list """ @@ -1192,7 +1192,7 @@ def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: str - :raises: TypeError if format invalid. + :raises TypeError: if format invalid. :return: serialized rfc """ try: @@ -1218,7 +1218,7 @@ def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: str - :raises: SerializationError if format invalid. + :raises SerializationError: if format invalid. :return: serialized iso """ if isinstance(attr, str): @@ -1251,7 +1251,7 @@ def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: int - :raises: SerializationError if format invalid + :raises SerializationError: if format invalid :return: serialied unix """ if isinstance(attr, int): @@ -1488,7 +1488,7 @@ def __call__(self, target_obj, response_data, content_type=None): :param str target_obj: Target data type to deserialize to. :param requests.Response response_data: REST response object. :param str content_type: Swagger "produces" if available. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1502,7 +1502,7 @@ def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return :param str target_obj: Target data type to deserialize to. :param object data: Object to deserialize. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1713,7 +1713,7 @@ def deserialize_data(self, data, data_type): # pylint: disable=too-many-return- :param str data: The response string to be deserialized. :param str data_type: The type to deserialize to. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1795,7 +1795,7 @@ def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return :param dict attr: Dictionary to be deserialized. :return: Deserialized object. :rtype: dict - :raises: TypeError if non-builtin datatype encountered. + :raises TypeError: if non-builtin datatype encountered. """ if attr is None: return None @@ -1841,7 +1841,7 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return :param str data_type: deserialization data type. :return: Deserialized basic type. :rtype: str, int, float or bool - :raises: TypeError if string format is not valid. + :raises TypeError: if string format is not valid. """ # If we're here, data is supposed to be a basic type. # If it's still an XML node, take the text @@ -1932,7 +1932,7 @@ def deserialize_bytearray(attr): :param str attr: response string to be deserialized. :return: Deserialized bytearray :rtype: bytearray - :raises: TypeError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1945,7 +1945,7 @@ def deserialize_base64(attr): :param str attr: response string to be deserialized. :return: Deserialized base64 string :rtype: bytearray - :raises: TypeError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1960,7 +1960,7 @@ def deserialize_decimal(attr): :param str attr: response string to be deserialized. :return: Deserialized decimal - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. :rtype: decimal """ if isinstance(attr, ET.Element): @@ -1978,7 +1978,7 @@ def deserialize_long(attr): :param str attr: response string to be deserialized. :return: Deserialized int :rtype: long or int - :raises: ValueError if string format invalid. + :raises ValueError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1991,7 +1991,7 @@ def deserialize_duration(attr): :param str attr: response string to be deserialized. :return: Deserialized duration :rtype: TimeDelta - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2009,7 +2009,7 @@ def deserialize_date(attr): :param str attr: response string to be deserialized. :return: Deserialized date :rtype: Date - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2025,7 +2025,7 @@ def deserialize_time(attr): :param str attr: response string to be deserialized. :return: Deserialized time :rtype: datetime.time - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2040,7 +2040,7 @@ def deserialize_rfc(attr): :param str attr: response string to be deserialized. :return: Deserialized RFC datetime :rtype: Datetime - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2063,7 +2063,7 @@ def deserialize_iso(attr): :param str attr: response string to be deserialized. :return: Deserialized ISO datetime :rtype: Datetime - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2101,7 +2101,7 @@ def deserialize_unix(attr): :param int attr: Object to be serialized. :return: Deserialized datetime :rtype: Datetime - :raises: DeserializationError if format invalid + :raises DeserializationError: if format invalid """ if isinstance(attr, ET.Element): attr = int(attr.text) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_paging.py b/sdk/search/azure-search-documents/azure/search/documents/_paging.py index 73b7a7f0b03d..eb6db166f402 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_paging.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_paging.py @@ -43,6 +43,7 @@ def unpack_continuation_token(token): class SearchItemPaged(ItemPaged[ReturnType]): + """A pageable list of search results.""" def __init__(self, *args, **kwargs) -> None: super(SearchItemPaged, self).__init__(*args, **kwargs) self._first_page_iterator_instance: Optional[SearchPageIterator] = None @@ -116,6 +117,7 @@ def wrapper(self, *args, **kw): class SearchPageIterator(PageIterator): + """An iterator over search results.""" def __init__(self, client, initial_query, kwargs, continuation_token=None) -> None: super(SearchPageIterator, self).__init__( get_next=self._get_next_cb, diff --git a/sdk/search/azure-search-documents/azure/search/documents/_queries.py b/sdk/search/azure-search-documents/azure/search/documents/_queries.py index c6e4f22cbbb7..bc24fbe7db91 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_queries.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_queries.py @@ -58,10 +58,8 @@ def order_by(self, *fields: Union[str, List[str]]) -> None: :param fields: A list of fields for the query result to be ordered by. :type fields: str or list[str] - :raises: ValueError + :raises ValueError: If no fields are provided. """ - if not fields: - raise ValueError("At least one field must be provided") if not fields: raise ValueError("At least one field must be provided") selects = [] @@ -78,7 +76,7 @@ def select(self, *fields: Union[str, List[str]]) -> None: :param fields: A list of fields for the query result to return. :type fields: str or list[str] - :raises: ValueError + :raises ValueError: If no fields are provided. """ if not fields: raise ValueError("At least one field must be provided") @@ -104,10 +102,8 @@ def order_by(self, *fields: Union[str, List[str]]) -> None: :param fields: A list of fields for the query result to be ordered by. :type fields: str or list[str] - :raises: ValueError + :raises ValueError: If no fields are provided. """ - if not fields: - raise ValueError("At least one field must be provided") if not fields: raise ValueError("At least one field must be provided") selects = [] @@ -124,7 +120,7 @@ def select(self, *fields: Union[str, List[str]]) -> None: :param fields: A list of fields for the query result to return. :type fields: str or list[str] - :raises: ValueError + :raises ValueError: If no fields are provided. """ if not fields: raise ValueError("At least one field must be provided") diff --git a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py index c99fd4e463f4..0751ab32e0c2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py @@ -101,6 +101,7 @@ def __repr__(self) -> str: def close(self) -> None: """Close the session. + :return: None :rtype: None """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py b/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py index 5648bd49fa2c..e0f1f41ac8f4 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py @@ -111,6 +111,7 @@ def actions(self) -> List[IndexAction]: @distributed_trace def close(self, **kwargs) -> None: # pylint: disable=unused-argument """Close the session. + :return: None :rtype: None """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py index 87e5ef5028b2..4b7d77f344a5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py @@ -17,6 +17,7 @@ class AsyncSearchItemPaged(AsyncItemPaged[ReturnType]): + """A pageable list of search results.""" def __init__(self, *args, **kwargs) -> None: super(AsyncSearchItemPaged, self).__init__(*args, **kwargs) self._first_page_iterator_instance: Optional[AsyncSearchPageIterator] = None @@ -99,6 +100,7 @@ async def wrapper(self, *args, **kw): class AsyncSearchPageIterator(AsyncPageIterator[ReturnType]): + """An iterator of search results.""" def __init__(self, client, initial_query, kwargs, continuation_token=None) -> None: super(AsyncSearchPageIterator, self).__init__( get_next=self._get_next_cb, diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py index 974976c10808..366a12ee4c62 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py @@ -103,6 +103,7 @@ def __repr__(self) -> str: async def close(self) -> None: """Close the session. + :return: None :rtype: None """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py index ca53c7d6910e..5aca7abcb6e6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py @@ -102,6 +102,7 @@ def __repr__(self) -> str: @property def actions(self) -> List[IndexAction]: """The list of currently index actions in queue to index. + :return: The list of currently index actions in queue to index. :rtype: list[IndexAction] """ @@ -110,6 +111,7 @@ def actions(self) -> List[IndexAction]: @distributed_trace_async async def close(self, **kwargs: Any) -> None: # pylint: disable=unused-argument """Close the session. + :return: None :rtype: None """ @@ -119,10 +121,11 @@ async def close(self, **kwargs: Any) -> None: # pylint: disable=unused-argument @distributed_trace_async async def flush(self, timeout: int = 86400, **kwargs) -> bool: # pylint:disable=unused-argument """Flush the batch. + :param int timeout: time out setting. Default is 86400s (one day) :return: True if there are errors. Else False :rtype: bool - :raises ~azure.core.exceptions.ServiceResponseTimeoutError: + :raises ~azure.core.exceptions.ServiceResponseTimeoutError: if there is a timeout """ has_error = False begin_time = int(time.time()) @@ -197,6 +200,7 @@ async def _process_if_needed(self) -> bool: will be triggered. It checks the actions already queued and flushes them if: 1. Auto_flush is on 2. There are self._batch_action_count actions queued + :return: True if proces is needed, False otherwise :rtype: bool """ @@ -220,6 +224,7 @@ def _reset_timer(self): @distributed_trace_async async def upload_documents(self, documents: List[Dict], **kwargs: Any) -> None: # pylint: disable=unused-argument """Queue upload documents actions. + :param documents: A list of documents to upload. :type documents: list[dict] """ @@ -230,6 +235,7 @@ async def upload_documents(self, documents: List[Dict], **kwargs: Any) -> None: @distributed_trace_async async def delete_documents(self, documents: List[Dict], **kwargs: Any) -> None: # pylint: disable=unused-argument """Queue delete documents actions + :param documents: A list of documents to delete. :type documents: list[Dict] """ @@ -240,6 +246,7 @@ async def delete_documents(self, documents: List[Dict], **kwargs: Any) -> None: @distributed_trace_async async def merge_documents(self, documents: List[Dict], **kwargs: Any) -> None: # pylint: disable=unused-argument """Queue merge documents actions + :param documents: A list of documents to merge. :type documents: list[dict] """ @@ -251,6 +258,7 @@ async def merge_documents(self, documents: List[Dict], **kwargs: Any) -> None: async def merge_or_upload_documents(self, documents: List[Dict], **kwargs: Any) -> None: # pylint: disable=unused-argument """Queue merge documents or upload documents actions + :param documents: A list of documents to merge or upload. :type documents: list[dict] """ @@ -266,8 +274,7 @@ async def index_documents(self, batch: IndexDocumentsBatch, **kwargs: Any) -> Li :type batch: IndexDocumentsBatch :return: Indexing result for each action in the batch. :rtype: list[IndexingResult] - - :raises ~azure.search.documents.RequestEntityTooLargeError + :raises ~azure.search.documents.RequestEntityTooLargeError: The request is too large. """ return await self._index_documents_actions(actions=batch.actions, **kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_serialization.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_serialization.py index ce17d1798ce7..f24208713601 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_serialization.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_serialization.py @@ -456,7 +456,7 @@ def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = N :param str data: A str using RestAPI structure. JSON by default. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong :rtype: ModelType """ deserializer = Deserializer(cls._infer_class_models()) @@ -479,7 +479,7 @@ def from_dict( :param function key_extractors: A key extractor function. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong :rtype: ModelType """ deserializer = Deserializer(cls._infer_class_models()) @@ -626,7 +626,7 @@ def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, to :param object target_obj: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str, dict - :raises: SerializationError if serialization fails. + :raises SerializationError: if serialization fails. :returns: The serialized data. """ key_transformer = kwargs.get("key_transformer", self.key_transformer) @@ -736,8 +736,8 @@ def body(self, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: dict - :raises: SerializationError if serialization fails. - :raises: ValueError if data is None + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized request body """ @@ -781,8 +781,8 @@ def url(self, name, data, data_type, **kwargs): :param str data_type: The type to be serialized from. :rtype: str :returns: The serialized URL path - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None """ try: output = self.serialize_data(data, data_type, **kwargs) @@ -805,8 +805,8 @@ def query(self, name, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str, list - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized query parameter """ try: @@ -835,8 +835,8 @@ def header(self, name, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized header """ try: @@ -855,9 +855,9 @@ def serialize_data(self, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. - :raises: AttributeError if required data is None. - :raises: ValueError if data is None - :raises: SerializationError if serialization fails. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. :returns: The serialized data. :rtype: str, int, float, bool, dict, list """ @@ -1192,7 +1192,7 @@ def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: str - :raises: TypeError if format invalid. + :raises TypeError: if format invalid. :return: serialized rfc """ try: @@ -1218,7 +1218,7 @@ def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: str - :raises: SerializationError if format invalid. + :raises SerializationError: if format invalid. :return: serialized iso """ if isinstance(attr, str): @@ -1251,7 +1251,7 @@ def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: int - :raises: SerializationError if format invalid + :raises SerializationError: if format is invalid :return: serialied unix """ if isinstance(attr, int): @@ -1488,7 +1488,7 @@ def __call__(self, target_obj, response_data, content_type=None): :param str target_obj: Target data type to deserialize to. :param requests.Response response_data: REST response object. :param str content_type: Swagger "produces" if available. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1502,7 +1502,7 @@ def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return :param str target_obj: Target data type to deserialize to. :param object data: Object to deserialize. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1713,7 +1713,7 @@ def deserialize_data(self, data, data_type): # pylint: disable=too-many-return- :param str data: The response string to be deserialized. :param str data_type: The type to deserialize to. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1795,7 +1795,7 @@ def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return :param dict attr: Dictionary to be deserialized. :return: Deserialized object. :rtype: dict - :raises: TypeError if non-builtin datatype encountered. + :raises TypeError: if non-builtin datatype encountered. """ if attr is None: return None @@ -1841,7 +1841,7 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return :param str data_type: deserialization data type. :return: Deserialized basic type. :rtype: str, int, float or bool - :raises: TypeError if string format is not valid. + :raises TypeError: if string format invalid. """ # If we're here, data is supposed to be a basic type. # If it's still an XML node, take the text @@ -1932,7 +1932,7 @@ def deserialize_bytearray(attr): :param str attr: response string to be deserialized. :return: Deserialized bytearray :rtype: bytearray - :raises: TypeError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1945,7 +1945,7 @@ def deserialize_base64(attr): :param str attr: response string to be deserialized. :return: Deserialized base64 string :rtype: bytearray - :raises: TypeError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1960,7 +1960,7 @@ def deserialize_decimal(attr): :param str attr: response string to be deserialized. :return: Deserialized decimal - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. :rtype: decimal """ if isinstance(attr, ET.Element): @@ -1978,7 +1978,7 @@ def deserialize_long(attr): :param str attr: response string to be deserialized. :return: Deserialized int :rtype: long or int - :raises: ValueError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1991,7 +1991,7 @@ def deserialize_duration(attr): :param str attr: response string to be deserialized. :return: Deserialized duration :rtype: TimeDelta - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2009,7 +2009,7 @@ def deserialize_date(attr): :param str attr: response string to be deserialized. :return: Deserialized date :rtype: Date - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2025,7 +2025,7 @@ def deserialize_time(attr): :param str attr: response string to be deserialized. :return: Deserialized time :rtype: datetime.time - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2040,7 +2040,7 @@ def deserialize_rfc(attr): :param str attr: response string to be deserialized. :return: Deserialized RFC datetime :rtype: Datetime - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2063,7 +2063,7 @@ def deserialize_iso(attr): :param str attr: response string to be deserialized. :return: Deserialized ISO datetime :rtype: Datetime - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2101,7 +2101,7 @@ def deserialize_unix(attr): :param int attr: Object to be serialized. :return: Deserialized datetime :rtype: Datetime - :raises: DeserializationError if format invalid + :raises DeserializationError: if format is invalid """ if isinstance(attr, ET.Element): attr = int(attr.text) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py index 59b8cc728349..5afb2aed6acc 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py @@ -402,10 +402,10 @@ class AnalyzeRequest(_serialization.Model): :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName :ivar token_filters: An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :vartype token_filters: list[str] or list[~azure.search.documents.indexes.models.TokenFilterName] :ivar char_filters: An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :vartype char_filters: list[str] or list[~azure.search.documents.indexes.models.CharFilterName] """ _validation = { @@ -466,10 +466,10 @@ def __init__( :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName :keyword token_filters: An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :paramtype token_filters: list[str] or list[~azure.search.documents.indexes.models.TokenFilterName] :keyword char_filters: An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :paramtype char_filters: list[str] or list[~azure.search.documents.indexes.models.CharFilterName] """ super().__init__(**kwargs) self.text = text @@ -1657,8 +1657,8 @@ class CjkBigramTokenFilter(TokenFilter): characters. Required. :vartype name: str :ivar ignore_scripts: The scripts to ignore. - :vartype ignore_scripts: list[str or - ~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts] + :vartype ignore_scripts: list[str] or + list[~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts] :ivar output_unigrams: A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. :vartype output_unigrams: bool @@ -1690,8 +1690,8 @@ def __init__( 128 characters. Required. :paramtype name: str :keyword ignore_scripts: The scripts to ignore. - :paramtype ignore_scripts: list[str or - ~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts] + :paramtype ignore_scripts: list[str] or + list[~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts] :keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. :paramtype output_unigrams: bool @@ -2120,11 +2120,11 @@ class CustomAnalyzer(LexicalAnalyzer): :ivar token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :vartype token_filters: list[str] or list[~azure.search.documents.indexes.models.TokenFilterName] :ivar char_filters: A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :vartype char_filters: list[str] or list[~azure.search.documents.indexes.models.CharFilterName] """ _validation = { @@ -2164,11 +2164,11 @@ def __init__( :keyword token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :paramtype token_filters: list[str] or list[~azure.search.documents.indexes.models.TokenFilterName] :keyword char_filters: A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :paramtype char_filters: list[str] or list[~azure.search.documents.indexes.models.CharFilterName] """ super().__init__(name=name, **kwargs) self.odata_type: str = "#Microsoft.Azure.Search.CustomAnalyzer" @@ -2571,11 +2571,11 @@ class CustomNormalizer(LexicalNormalizer): :ivar token_filters: A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :vartype token_filters: list[str] or list[~azure.search.documents.indexes.models.TokenFilterName] :ivar char_filters: A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :vartype char_filters: list[str] or list[~azure.search.documents.indexes.models.CharFilterName] """ _validation = { @@ -2607,11 +2607,11 @@ def __init__( :keyword token_filters: A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :paramtype token_filters: list[str] or list[~azure.search.documents.indexes.models.TokenFilterName] :keyword char_filters: A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] + :paramtype char_filters: list[str] or list[~azure.search.documents.indexes.models.CharFilterName] """ super().__init__(name=name, **kwargs) self.odata_type: str = "#Microsoft.Azure.Search.CustomNormalizer" @@ -3375,7 +3375,7 @@ class EdgeNGramTokenizer(LexicalTokenizer): :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :vartype max_gram: int :ivar token_chars: Character classes to keep in the tokens. - :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + :vartype token_chars: list[str] or list[~azure.search.documents.indexes.models.TokenCharacterKind] """ _validation = { @@ -3413,7 +3413,7 @@ def __init__( :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :paramtype max_gram: int :keyword token_chars: Character classes to keep in the tokens. - :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + :paramtype token_chars: list[str] or list[~azure.search.documents.indexes.models.TokenCharacterKind] """ super().__init__(name=name, **kwargs) self.odata_type: str = "#Microsoft.Azure.Search.EdgeNGramTokenizer" @@ -3591,7 +3591,7 @@ class EntityRecognitionSkill(SearchIndexerSkill): be consumed as an input by another skill. Required. :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] :ivar categories: A list of entity categories that should be extracted. - :vartype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] + :vartype categories: list[str] or list[~azure.search.documents.indexes.models.EntityCategory] :ivar default_language_code: A value indicating which language code to use. Default is ``en``. Known values are: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", and "tr". @@ -3660,7 +3660,7 @@ def __init__( can be consumed as an input by another skill. Required. :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] :keyword categories: A list of entity categories that should be extracted. - :paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] + :paramtype categories: list[str] or list[~azure.search.documents.indexes.models.EntityCategory] :keyword default_language_code: A value indicating which language code to use. Default is ``en``. Known values are: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", and "tr". @@ -4374,9 +4374,9 @@ class ImageAnalysisSkill(SearchIndexerSkill): :vartype default_language_code: str or ~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage :ivar visual_features: A list of visual features. - :vartype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature] + :vartype visual_features: list[str] or list[~azure.search.documents.indexes.models.VisualFeature] :ivar details: A string indicating which domain-specific details to return. - :vartype details: list[str or ~azure.search.documents.indexes.models.ImageDetail] + :vartype details: list[str] or list[~azure.search.documents.indexes.models.ImageDetail] """ _validation = { @@ -4436,9 +4436,9 @@ def __init__( :paramtype default_language_code: str or ~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage :keyword visual_features: A list of visual features. - :paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature] + :paramtype visual_features: list[str] or list[~azure.search.documents.indexes.models.VisualFeature] :keyword details: A string indicating which domain-specific details to return. - :paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail] + :paramtype details: list[str] or list[~azure.search.documents.indexes.models.ImageDetail] """ super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) self.odata_type: str = "#Microsoft.Skills.Vision.ImageAnalysisSkill" @@ -6302,7 +6302,7 @@ class NGramTokenizer(LexicalTokenizer): :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :vartype max_gram: int :ivar token_chars: Character classes to keep in the tokens. - :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + :vartype token_chars: list[str] or list[~azure.search.documents.indexes.models.TokenCharacterKind] """ _validation = { @@ -6340,7 +6340,7 @@ def __init__( :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :paramtype max_gram: int :keyword token_chars: Character classes to keep in the tokens. - :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] + :paramtype token_chars: list[str] or list[~azure.search.documents.indexes.models.TokenCharacterKind] """ super().__init__(name=name, **kwargs) self.odata_type: str = "#Microsoft.Azure.Search.NGramTokenizer" diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py index 0a6b96137725..cbd64dfb3a67 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py @@ -76,6 +76,7 @@ def __exit__(self, *args): def close(self) -> None: """Close the session. + :return: None :rtype: None """ @@ -109,7 +110,7 @@ def list_indexes(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> :paramtype select: list[str] :return: List of indexes :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.indexes.models.SearchIndex] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -125,7 +126,7 @@ def list_index_names(self, **kwargs: Any) -> ItemPaged[str]: :return: List of index names :rtype: ~azure.core.paging.ItemPaged[str] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -135,13 +136,13 @@ def list_index_names(self, **kwargs: Any) -> ItemPaged[str]: @distributed_trace def get_index(self, name: str, **kwargs: Any) -> SearchIndex: - """ + """ Retrieve a named index in an Azure Search service :param name: The name of the index to retrieve. :type name: str :return: SearchIndex object :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -165,7 +166,7 @@ def get_index_statistics(self, index_name: str, **kwargs: Any) -> MutableMapping :type index_name: str :return: Statistics for the given index, including a document count and storage usage. :rtype: Dict - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -187,7 +188,7 @@ def delete_index( :type index: str or ~azure.search.documents.indexes.models.SearchIndex :keyword match_condition: The match condition to use upon the etag :paramtype match_condition: ~azure.core.MatchConditions - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -215,7 +216,7 @@ def create_index(self, index: SearchIndex, **kwargs: Any) -> SearchIndex: :type index: ~azure.search.documents.indexes.models.SearchIndex :return: The index created :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -254,11 +255,11 @@ def create_or_update_index( :paramtype match_condition: ~azure.core.MatchConditions :return: The index created or updated :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises: ~azure.core.exceptions.ResourceNotFoundError or - ~azure.core.exceptions.ResourceModifiedError or - ~azure.core.exceptions.ResourceNotModifiedError or - ~azure.core.exceptions.ResourceNotFoundError or - ~azure.core.exceptions.ResourceExistsError + :raises ~azure.core.exceptions.ResourceNotFoundError: If the index doesn't exist. + :raises ~azure.core.exceptions.ResourceModifiedError: If the index has been modified in the server. + :raises ~azure.core.exceptions.ResourceNotModifiedError: If the index hasn't been modified in the server. + :raises ~azure.core.exceptions.ResourceNotFoundError: If the index doesn't exist. + :raises ~azure.core.exceptions.ResourceExistsError: If the index already exists. .. admonition:: Example: @@ -293,7 +294,7 @@ def analyze_text(self, index_name: str, analyze_request: AnalyzeTextOptions, **k :type analyze_request: ~azure.search.documents.indexes.models.AnalyzeTextOptions :return: AnalyzeResult :rtype: ~azure.search.documents.indexes.models.AnalyzeResult - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -322,7 +323,7 @@ def get_synonym_maps(self, *, select: Optional[List[str]] = None, **kwargs) -> L :paramtype select: list[str] :return: List of synonym maps :rtype: list[~azure.search.documents.indexes.models.SynonymMap] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -348,7 +349,7 @@ def get_synonym_map_names(self, **kwargs: Any) -> List[str]: :return: List of synonym maps :rtype: list[str] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -364,7 +365,7 @@ def get_synonym_map(self, name: str, **kwargs: Any) -> SynonymMap: :type name: str :return: The retrieved Synonym Map :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises: ~azure.core.exceptions.ResourceNotFoundError + :raises ~azure.core.exceptions.ResourceNotFoundError: If the Synonym Map doesn't exist. .. admonition:: Example: @@ -493,7 +494,7 @@ def list_aliases(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> :paramtype select: list[str] :return: List of Aliases :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.indexes.models.SearchAlias] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: @@ -507,7 +508,7 @@ def list_alias_names(self, **kwargs: Any) -> ItemPaged[str]: :return: List of alias names :rtype: ~azure.core.paging.ItemPaged[str] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -523,7 +524,7 @@ def get_alias(self, name: str, **kwargs: Any) -> SearchAlias: :type name: str :return: SearchAlias object :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.aliases.get(name, **kwargs) @@ -544,7 +545,7 @@ def delete_alias( :type alias: str or ~azure.search.documents.indexes.models.SearchAlias :keyword match_condition: The match condition to use upon the etag :paramtype match_condition: ~azure.core.MatchConditions - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -572,7 +573,7 @@ def create_alias(self, alias: SearchAlias, **kwargs: Any) -> SearchAlias: :type alias: ~azure.search.documents.indexes.models.SearchAlias :return: The alias created :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -600,11 +601,11 @@ def create_or_update_alias( :return: The index created or updated :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises: ~azure.core.exceptions.ResourceNotFoundError or - ~azure.core.exceptions.ResourceModifiedError or - ~azure.core.exceptions.ResourceNotModifiedError or - ~azure.core.exceptions.ResourceNotFoundError or - ~azure.core.exceptions.ResourceExistsError` + :raises ~azure.core.exceptions.ResourceNotFoundError: If the alias doesn't exist. + :raises ~azure.core.exceptions.ResourceModifiedError: If the alias has been modified in the server. + :raises ~azure.core.exceptions.ResourceNotModifiedError: If the alias hasn't been modified in the server. + :raises ~azure.core.exceptions.ResourceNotFoundError: If the alias doesn't exist. + :raises ~azure.core.exceptions.ResourceExistsError: If the alias already exists. .. admonition:: Example: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py index 592895702879..60030c646701 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py @@ -78,6 +78,7 @@ def __exit__(self, *args) -> None: def close(self) -> None: """Close the session. + :return: None :rtype: None """ @@ -310,7 +311,7 @@ def reset_documents( keys or ids in this payload will be queued to be re-ingested. The default is false. :paramtype overwrite: bool :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If there is an error in the REST request. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) kwargs["keys_or_ids"] = keys_or_ids @@ -378,6 +379,7 @@ def create_or_update_data_source_connection( **kwargs: Any ) -> SearchIndexerDataSourceConnection: """Creates a new data source connection or updates a data source connection if it already exists. + :param data_source_connection: The definition of the data source connection to create or update. :type data_source_connection: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection :keyword match_condition: The match condition to use upon the etag @@ -517,7 +519,7 @@ def get_skillsets(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> :return: List of SearchIndexerSkillsets :rtype: list[~azure.search.documents.indexes.models.SearchIndexerSkillset] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If there is an error in the REST request. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: @@ -532,7 +534,7 @@ def get_skillset_names(self, **kwargs: Any) -> List[str]: :return: List of SearchIndexerSkillset names :rtype: list[str] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If there is an error in the REST request. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -548,7 +550,7 @@ def get_skillset(self, name: str, **kwargs: Any) -> SearchIndexerSkillset: :type name: str :return: The retrieved SearchIndexerSkillset :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises: ~azure.core.exceptions.ResourceNotFoundError + :raises ~azure.core.exceptions.ResourceNotFoundError: If the skillset cannot be found. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.skillsets.get(name, **kwargs) @@ -652,7 +654,7 @@ def reset_skills(self, skillset: Union[str, SearchIndexerSkillset], skill_names: :type skill_names: List[str] :return: None, or the result of cls(response) :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If there is an error in the REST request. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) try: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py index c71e3e210282..737c08b829d4 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py @@ -77,6 +77,7 @@ async def __aexit__(self, *args: Any) -> None: async def close(self) -> None: """Close the session. + :return: None :rtype: None """ @@ -109,7 +110,7 @@ def list_indexes(self, *, select: Optional[List[str]] = None, **kwargs) -> Async :paramtype select: list[str] :return: List of indexes :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.indexes.models.SearchIndex] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: @@ -124,7 +125,7 @@ def list_index_names(self, **kwargs: Any) -> AsyncItemPaged[str]: :return: List of index names :rtype: ~azure.core.async_paging.AsyncItemPaged[str] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -133,13 +134,13 @@ def list_index_names(self, **kwargs: Any) -> AsyncItemPaged[str]: @distributed_trace_async async def get_index(self, name: str, **kwargs: Any) -> SearchIndex: - """ + """ Retrieve a named index in an Azure Search service :param name: The name of the index to retrieve. :type name: str :return: SearchIndex object :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -163,7 +164,7 @@ async def get_index_statistics(self, index_name: str, **kwargs: Any) -> MutableM :type index_name: str :return: Statistics for the given index, including a document count and storage usage. :rtype: Dict - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.indexes.get_statistics(index_name, **kwargs) @@ -184,7 +185,7 @@ async def delete_index( :type index: str or ~azure.search.documents.indexes.models.SearchIndex :keyword match_condition: The match condition to use upon the etag :paramtype match_condition: ~azure.core.MatchConditions - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -212,7 +213,7 @@ async def create_index(self, index: SearchIndex, **kwargs: Any) -> SearchIndex: :type index: ~azure.search.documents.indexes.models.SearchIndex :return: The index created :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -251,11 +252,11 @@ async def create_or_update_index( :paramtype match_condition: ~azure.core.MatchConditions :return: The index created or updated :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises: ~azure.core.exceptions.ResourceNotFoundError or - ~azure.core.exceptions.ResourceModifiedError or - ~azure.core.exceptions.ResourceNotModifiedError or - ~azure.core.exceptions.ResourceNotFoundError or - ~azure.core.exceptions.ResourceExistsError + :raises ~azure.core.exceptions.ResourceNotFoundError: If the index doesn't exist. + :raises ~azure.core.exceptions.ResourceModifiedError: If the index has been modified. + :raises ~azure.core.exceptions.ResourceNotModifiedError: If the index has not been modified. + :raises ~azure.core.exceptions.ResourceNotFoundError: If the index is not found. + :raises ~azure.core.exceptions.ResourceExistsError: If the index already exists. .. admonition:: Example: @@ -290,7 +291,7 @@ async def analyze_text(self, index_name: str, analyze_request: AnalyzeTextOption :type analyze_request: ~azure.search.documents.indexes.models.AnalyzeTextOptions :return: AnalyzeResult :rtype: ~azure.search.documents.indexes.models.AnalyzeRequest - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -319,7 +320,7 @@ async def get_synonym_maps(self, *, select: Optional[List[str]] = None, **kwargs :paramtype select: List[str] :return: List of synonym maps :rtype: List[~azure.search.documents.indexes.models.SynonymMap] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -345,7 +346,7 @@ async def get_synonym_map_names(self, **kwargs: Any) -> List[str]: :return: List of synonym map names :rtype: List[str] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -361,7 +362,7 @@ async def get_synonym_map(self, name: str, **kwargs: Any) -> SynonymMap: :type name: str :return: The retrieved Synonym Map :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises: ~azure.core.exceptions.ResourceNotFoundError + :raises ~azure.core.exceptions.ResourceNotFoundError: If the Synonym Map doesn't exist. .. admonition:: Example: @@ -489,7 +490,7 @@ def list_aliases(self, *, select: Optional[List[str]] = None, **kwargs) -> Async :paramtype select: list[str] :return: List of Aliases :rtype: ~azure.core.paging.AsyncItemPaged[~azure.search.documents.indexes.models.SearchAlias] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: @@ -503,8 +504,7 @@ def list_alias_names(self, **kwargs) -> AsyncItemPaged[str]: :return: List of alias names :rtype: ~azure.core.paging.AsyncItemPaged[str] - :raises: ~azure.core.exceptions.HttpResponseError - + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -513,13 +513,13 @@ def list_alias_names(self, **kwargs) -> AsyncItemPaged[str]: @distributed_trace_async async def get_alias(self, name: str, **kwargs) -> SearchAlias: - """ + """ Retrieve a named alias in an Azure Search service :param name: The name of the alias to retrieve. :type name: str :return: SearchAlias object :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.aliases.get(name, **kwargs) @@ -535,11 +535,12 @@ async def delete_alias( ) -> None: """Deletes a search alias and its associated mapping to an index. This operation is permanent, with no recovery option. The mapped index is untouched by this operation + :param alias: The alias name or object to delete. :type alias: str or ~azure.search.documents.indexes.models.SearchAlias :keyword match_condition: The match condition to use upon the etag :paramtype match_condition: ~azure.core.MatchConditions - :raises: ~azure.core.exceptions.HttpResponseError + ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -562,11 +563,12 @@ async def delete_alias( @distributed_trace_async async def create_alias(self, alias: SearchAlias, **kwargs: Any) -> SearchAlias: """Creates a new search alias. + :param alias: The alias object. :type alias: ~azure.search.documents.indexes.models.SearchAlias :return: The alias created :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. .. admonition:: Example: @@ -593,11 +595,11 @@ async def create_or_update_alias( :paramtype match_condition: ~azure.core.MatchConditions :return: The index created or updated :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises: ~azure.core.exceptions.ResourceNotFoundError - ~azure.core.exceptions.ResourceModifiedError or - ~azure.core.exceptions.ResourceNotModifiedError or - ~azure.core.exceptions.ResourceNotFoundError or - ~azure.core.exceptions.ResourceExistsError + :raises ~azure.core.exceptions.ResourceNotFoundError: If the alias doesn't exist. + :raises ~azure.core.exceptions.ResourceModifiedError: If the alias has been modified. + :raises ~azure.core.exceptions.ResourceNotModifiedError: If the alias has not been modified. + :raises ~azure.core.exceptions.ResourceNotFoundError: If the alias is not found. + :raises ~azure.core.exceptions.ResourceExistsError: If the alias already exists. .. admonition:: Example: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py index c83e536fc788..8456d3d2deb1 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py @@ -73,6 +73,7 @@ async def __aexit__(self, *args) -> None: async def close(self) -> None: """Close the session. + :return: None :rtype: None """ @@ -296,7 +297,7 @@ async def reset_documents( keys or ids in this payload will be queued to be re-ingested. The default is false. :paramtype overwrite: bool :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If there is an error in the REST request. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) kwargs["keys_or_ids"] = keys_or_ids @@ -334,6 +335,7 @@ async def create_data_source_connection( self, data_source_connection: SearchIndexerDataSourceConnection, **kwargs: Any ) -> SearchIndexerDataSourceConnection: """Creates a new data source connection. + :param data_source_connection: The definition of the data source connection to create. :type data_source_connection: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection :return: The created SearchIndexerDataSourceConnection @@ -364,6 +366,7 @@ async def create_or_update_data_source_connection( **kwargs: Any ) -> SearchIndexerDataSourceConnection: """Creates a new data source connection or updates a data source connection if it already exists. + :param data_source_connection: The definition of the data source connection to create or update. :type data_source_connection: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection :keyword match_condition: The match condition to use upon the etag @@ -505,7 +508,7 @@ async def get_skillsets(self, *, select: Optional[List[str]] = None, **kwargs) - :paramtype select: list[str] :return: List of SearchIndexerSkillsets :rtype: list[~azure.search.documents.indexes.models.SearchIndexerSkillset] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If there is an error in the REST request. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: @@ -520,7 +523,7 @@ async def get_skillset_names(self, **kwargs) -> List[str]: :return: List of SearchIndexerSkillset names :rtype: list[str] - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If there is an error in the REST request. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -536,7 +539,7 @@ async def get_skillset(self, name: str, **kwargs) -> SearchIndexerSkillset: :type name: str :return: The retrieved SearchIndexerSkillset :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises: ~azure.core.exceptions.ResourceNotFoundError + :raises ~azure.core.exceptions.ResourceNotFoundError: If the skillset doesn't exist. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.skillsets.get(name, **kwargs) @@ -637,7 +640,7 @@ async def reset_skills(self, skillset: Union[str, SearchIndexerSkillset], skill_ :type skill_names: List[str] :return: None, or the result of cls(response) :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError + :raises ~azure.core.exceptions.HttpResponseError: If there is an error in the REST request. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) try: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py index 403504c1ed78..e45c062caaf6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py @@ -294,6 +294,7 @@ def _from_generated(cls, search_field) -> Optional[Self]: def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: """Return the JSON that would be sent to server from this model. + :param bool keep_readonly: If you want to serialize the readonly attributes :returns: A dict JSON compatible object :rtype: dict @@ -307,7 +308,7 @@ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[ :param str data: A str using RestAPI structure. JSON by default. :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchField instance - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated(_SearchField.deserialize(data, content_type=content_type)) @@ -346,7 +347,7 @@ def from_dict( # type: ignore :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchField instance :rtype: SearchField - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated( _SearchField.from_dict(data, content_type=content_type, key_extractors=key_extractors) @@ -586,8 +587,7 @@ def ComplexField( fields: Optional[List[SearchField]] = None, **kw # pylint:disable=unused-argument ) -> SearchField: - """Configure a Complex or Complex collection field for an Azure Search - Index + """Configure a Complex or Complex collection field for an Azure Search Index :keyword name: Required. The name of the field, which must be unique within the fields collection of the index or parent field. @@ -781,6 +781,7 @@ def _from_generated(cls, search_index) -> Optional[Self]: def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: """Return the JSON that would be sent to server from this model. + :param bool keep_readonly: If you want to serialize the readonly attributes :returns: A dict JSON compatible object :rtype: dict @@ -795,7 +796,7 @@ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[ :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchIndex instance :rtype: SearchIndex - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated(_SearchIndex.deserialize(data, content_type=content_type)) @@ -834,7 +835,7 @@ def from_dict( # type: ignore :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchIndex instance :rtype: SearchIndex - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated( _SearchIndex.from_dict(data, content_type=content_type, key_extractors=key_extractors) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py index 2831c47bbc50..8e8a4d1d930c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py @@ -142,6 +142,7 @@ def _from_generated(cls, skillset) -> Optional[Self]: def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: """Return the JSON that would be sent to server from this model. + :param bool keep_readonly: If you want to serialize the readonly attributes :returns: A dict JSON compatible object :rtype: dict @@ -156,7 +157,7 @@ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[ :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchIndexerSkillset instance :rtype: SearchIndexerSkillset - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated(_SearchIndexerSkillset.deserialize(data, content_type=content_type)) @@ -195,7 +196,7 @@ def from_dict( # type: ignore :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchIndexerSkillset instance :rtype: SearchIndexerSkillset - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated( _SearchIndexerSkillset.from_dict(data, content_type=content_type, key_extractors=key_extractors) @@ -237,7 +238,7 @@ class EntityRecognitionSkill(SearchIndexerSkill): that can be consumed as an input by another skill. :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] :ivar categories: A list of entity categories that should be extracted. - :vartype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] + :vartype categories: list[str] or list[~azure.search.documents.indexes.models.EntityCategory] :ivar default_language_code: A value indicating which language code to use. Default is en. Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr". @@ -440,46 +441,7 @@ def _from_generated(cls, skill): class AnalyzeTextOptions(_serialization.Model): - """Specifies some text and analysis components used to break that text into tokens. - - All required parameters must be populated in order to send to Azure. - - :ivar text: Required. The text to break into tokens. - :vartype text: str - :ivar analyzer_name: The name of the analyzer to use to break the given text. If this parameter is - not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are - mutually exclusive. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", - "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- - Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", - "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", - "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", - "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", - "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", - "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", - "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", - "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", - "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- - PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", - "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", - "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", - "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", - "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", - "simple", "stop", "whitespace". - :vartype analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :ivar tokenizer_name: The name of the tokenizer to use to break the given text. If this parameter - is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters - are mutually exclusive. Possible values include: "classic", "edgeNGram", "keyword_v2", - "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", - "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". - :vartype tokenizer_name: str or ~azure.search.documents.indexes.models.LexicalTokenizerName - :ivar token_filters: An optional list of token filters to use when breaking the given text. - This parameter can only be set when using the tokenizer parameter. - :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :ivar char_filters: An optional list of character filters to use when breaking the given text. - This parameter can only be set when using the tokenizer parameter. - :vartype char_filters: list[str] - """ - + """Specifies some text and analysis components used to break that text into tokens.""" def __init__( self, *, @@ -491,6 +453,42 @@ def __init__( char_filters: Optional[List[str]] = None, **kwargs ): + """ + :keyword text: Required. The text to break into tokens. + :paramtype text: str + :keyword analyzer_name: The name of the analyzer to use to break the given text. If this parameter is + not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are + mutually exclusive. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :paramtype analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :keyword tokenizer_name: The name of the tokenizer to use to break the given text. If this parameter + is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters + are mutually exclusive. Possible values include: "classic", "edgeNGram", "keyword_v2", + "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", + "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". + :paramtype tokenizer_name: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :keyword token_filters: An optional list of token filters to use when breaking the given text. + This parameter can only be set when using the tokenizer parameter. + :paramtype token_filters: list[str] or list[~azure.search.documents.indexes.models.TokenFilterName] + :keyword char_filters: An optional list of character filters to use when breaking the given text. + This parameter can only be set when using the tokenizer parameter. + :paramtype char_filters: list[str] + """ super().__init__(**kwargs) self.text = text self.analyzer_name = analyzer_name @@ -524,6 +522,7 @@ def _from_analyze_request(cls, analyze_request) -> Optional[Self]: def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: """Return the JSON that would be sent to server from this model. + :param bool keep_readonly: If you want to serialize the readonly attributes :returns: A dict JSON compatible object :rtype: dict @@ -538,7 +537,7 @@ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[ :param str content_type: JSON by default, set application/xml if XML. :returns: A AnalyzeTextOptions instance :rtype: AnalyzeTextOptions - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_analyze_request(AnalyzeRequest.deserialize(data, content_type=content_type)) @@ -577,7 +576,7 @@ def from_dict( # type: ignore :param str content_type: JSON by default, set application/xml if XML. :returns: A AnalyzeTextOptions instance :rtype: AnalyzeTextOptions - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_analyze_request( AnalyzeRequest.from_dict(data, content_type=content_type, key_extractors=key_extractors) @@ -608,7 +607,7 @@ class CustomAnalyzer(LexicalAnalyzer): :ivar token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :vartype token_filters: list[str] or list[~azure.search.documents.indexes.models.TokenFilterName] :ivar char_filters: A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. @@ -868,6 +867,7 @@ def _from_generated(cls, search_resource_encryption_key) -> Optional[Self]: def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: """Return the JSON that would be sent to server from this model. + :param bool keep_readonly: If you want to serialize the readonly attributes :returns: A dict JSON compatible object :rtype: dict @@ -881,7 +881,7 @@ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[ :param str data: A str using RestAPI structure. JSON by default. :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchResourceEncryptionKey instance - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated( # type: ignore _SearchResourceEncryptionKey.deserialize(data, content_type=content_type) @@ -922,7 +922,7 @@ def from_dict( # type: ignore :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchResourceEncryptionKey instance :rtype: SearchResourceEncryptionKey - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated( _SearchResourceEncryptionKey.from_dict(data, content_type=content_type, key_extractors=key_extractors) @@ -998,6 +998,7 @@ def _from_generated(cls, synonym_map) -> Optional[Self]: def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: """Return the JSON that would be sent to server from this model. + :param bool keep_readonly: If you want to serialize the readonly attributes :returns: A dict JSON compatible object :rtype: dict @@ -1012,7 +1013,7 @@ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[ :param str content_type: JSON by default, set application/xml if XML. :returns: A SynonymMap instance :rtype: SynonymMap - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated(_SynonymMap.deserialize(data, content_type=content_type)) @@ -1051,7 +1052,7 @@ def from_dict( # type: ignore :param str content_type: JSON by default, set application/xml if XML. :returns: A SynonymMap instance :rtype: SynonymMap - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated( _SynonymMap.from_dict(data, content_type=content_type, key_extractors=key_extractors) @@ -1174,6 +1175,7 @@ def _from_generated(cls, search_indexer_data_source) -> Optional[Self]: def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: """Return the JSON that would be sent to server from this model. + :param bool keep_readonly: If you want to serialize the readonly attributes :returns: A dict JSON compatible object :rtype: dict @@ -1188,7 +1190,7 @@ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[ :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchIndexerDataSourceConnection instance :rtype: SearchIndexerDataSourceConnection - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated(_SearchIndexerDataSource.deserialize(data, content_type=content_type)) @@ -1227,7 +1229,7 @@ def from_dict( # type: ignore :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchIndexerDataSourceConnection instance :rtype: SearchIndexerDataSourceConnection - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated( _SearchIndexerDataSource.from_dict(data, content_type=content_type, key_extractors=key_extractors) @@ -1412,6 +1414,7 @@ def _from_generated(cls, search_indexer) -> Optional[Self]: def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: """Return the JSON that would be sent to server from this model. + :param bool keep_readonly: If you want to serialize the readonly attributes :returns: A dict JSON compatible object :rtype: dict @@ -1426,6 +1429,6 @@ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[ :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchIndexer instance :rtype: SearchIndexer - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong """ return cls._from_generated(_SearchIndexer.deserialize(data, content_type=content_type))