Skip to content
This repository has been archived by the owner on Dec 17, 2023. It is now read-only.

Commit

Permalink
feat(v2beta1): add the API of StreamingAnalyzeContent (#520)
Browse files Browse the repository at this point in the history
* feat: add the API of StreamingAnalyzeContent

PiperOrigin-RevId: 446850583

Source-Link: googleapis/googleapis@b9927eb

Source-Link: googleapis/googleapis-gen@d442854
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZDQ0Mjg1NDk5YTBlNzU3ZTY4ZDM5ZGMxOGY5MWQyODAwNjk0YWQ5MCJ9

* 🦉 Updates from OwlBot post-processor

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
  • Loading branch information
gcf-owl-bot[bot] and gcf-owl-bot[bot] committed May 9, 2022
1 parent 347e03f commit 0d3b413
Show file tree
Hide file tree
Showing 15 changed files with 1,202 additions and 10 deletions.
6 changes: 6 additions & 0 deletions google/cloud/dialogflow_v2beta1/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,7 @@
from .types.participant import DtmfParameters
from .types.participant import FaqAnswer
from .types.participant import GetParticipantRequest
from .types.participant import InputTextConfig
from .types.participant import ListParticipantsRequest
from .types.participant import ListParticipantsResponse
from .types.participant import ListSuggestionsRequest
Expand All @@ -201,6 +202,8 @@
from .types.participant import Participant
from .types.participant import ResponseMessage
from .types.participant import SmartReplyAnswer
from .types.participant import StreamingAnalyzeContentRequest
from .types.participant import StreamingAnalyzeContentResponse
from .types.participant import SuggestArticlesRequest
from .types.participant import SuggestArticlesResponse
from .types.participant import SuggestFaqAnswersRequest
Expand Down Expand Up @@ -368,6 +371,7 @@
"ImportDocumentsRequest",
"ImportDocumentsResponse",
"InputAudioConfig",
"InputTextConfig",
"Intent",
"IntentBatch",
"IntentView",
Expand Down Expand Up @@ -437,6 +441,8 @@
"SpeechToTextConfig",
"SpeechWordInfo",
"SsmlVoiceGender",
"StreamingAnalyzeContentRequest",
"StreamingAnalyzeContentResponse",
"StreamingDetectIntentRequest",
"StreamingDetectIntentResponse",
"StreamingRecognitionResult",
Expand Down
10 changes: 10 additions & 0 deletions google/cloud/dialogflow_v2beta1/gapic_metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -874,6 +874,11 @@
"list_suggestions"
]
},
"StreamingAnalyzeContent": {
"methods": [
"streaming_analyze_content"
]
},
"SuggestArticles": {
"methods": [
"suggest_articles"
Expand Down Expand Up @@ -929,6 +934,11 @@
"list_suggestions"
]
},
"StreamingAnalyzeContent": {
"methods": [
"streaming_analyze_content"
]
},
"SuggestArticles": {
"methods": [
"suggest_articles"
Expand Down
168 changes: 167 additions & 1 deletion google/cloud/dialogflow_v2beta1/services/participants/async_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,18 @@
from collections import OrderedDict
import functools
import re
from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
from typing import (
Dict,
Mapping,
Optional,
AsyncIterable,
Awaitable,
AsyncIterator,
Sequence,
Tuple,
Type,
Union,
)
import pkg_resources
import warnings

Expand Down Expand Up @@ -761,6 +772,161 @@ async def sample_analyze_content():
# Done; return the response.
return response

def streaming_analyze_content(
self,
requests: AsyncIterator[participant.StreamingAnalyzeContentRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[participant.StreamingAnalyzeContentResponse]]:
r"""Adds a text (e.g., chat) or audio (e.g., phone recording)
message from a participant into the conversation. Note: This
method is only available through the gRPC API (not REST).
The top-level message sent to the client by the server is
``StreamingAnalyzeContentResponse``. Multiple response messages
can be returned in order. The first one or more messages contain
the ``recognition_result`` field. Each result represents a more
complete transcript of what the user said. The next message
contains the ``reply_text`` field, and potentially the
``reply_audio`` and/or the ``automated_agent_reply`` fields.
Note: Always use agent versions for production traffic sent to
virtual agents. See `Versions and
environments <https://cloud.google.com/dialogflow/es/docs/agents-versions>`__.
.. code-block:: python
from google.cloud import dialogflow_v2beta1
async def sample_streaming_analyze_content():
# Create a client
client = dialogflow_v2beta1.ParticipantsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2beta1.StreamingAnalyzeContentRequest(
input_audio=b'input_audio_blob',
participant="participant_value",
)
# This method expects an iterator which contains
# 'dialogflow_v2beta1.StreamingAnalyzeContentRequest' objects
# Here we create a generator that yields a single `request` for
# demonstrative purposes.
requests = [request]
def request_generator():
for request in requests:
yield request
# Make the request
stream = await client.streaming_analyze_content(requests=request_generator())
# Handle the response
async for response in stream:
print(response)
Args:
requests (AsyncIterator[`google.cloud.dialogflow_v2beta1.types.StreamingAnalyzeContentRequest`]):
The request object AsyncIterator. The top-level message sent by the
client to the
[Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.StreamingAnalyzeContent]
method.
Multiple request messages should be sent in order:
1. The first message must contain
[participant][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.participant],
[config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.config]
and optionally
[query_params][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.query_params].
If you want to receive an audio response, it should
also contain
[reply_audio_config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.reply_audio_config].
The message must not contain
[input][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input].
2. If
[config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.config]
in the first message was set to
[audio_config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.audio_config],
all subsequent messages must contain
[input_audio][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input_audio]
to continue with Speech recognition.
If you decide to rather analyze text input after you
already started Speech recognition, please send a
message with
[StreamingAnalyzeContentRequest.input_text][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input_text].
However, note that:
* Dialogflow will bill you for the audio so far.
* Dialogflow discards all Speech recognition results
in favor of the text input.
3. If
[StreamingAnalyzeContentRequest.config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.config]
in the first message was set to
[StreamingAnalyzeContentRequest.text_config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.text_config],
then the second message must contain only
[input_text][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input_text].
Moreover, you must not send more than two messages.
After you sent all input, you must half-close or abort
the request stream.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
AsyncIterable[google.cloud.dialogflow_v2beta1.types.StreamingAnalyzeContentResponse]:
The top-level message returned from the
StreamingAnalyzeContent method.
Multiple response messages can be returned in order:
1. If the input was set to streaming audio, the first
one or more messages contain recognition_result.
Each recognition_result represents a more complete
transcript of what the user said. The last
recognition_result has is_final set to true.
2. In virtual agent stage: if
enable_partial_automated_agent_reply is true, the
following N (currently 1 <= N <= 4) messages
contain automated_agent_reply and optionally
reply_audio returned by the virtual agent. The
first (N-1) automated_agent_replys will have
automated_agent_reply_type set to PARTIAL. The
last automated_agent_reply has
automated_agent_reply_type set to FINAL. If
enable_partial_automated_agent_reply is not
enabled, response stream only contains the final
reply.
In human assist stage: the following N (N >= 1)
messages contain human_agent_suggestion_results,
end_user_suggestion_results or message.
"""

# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.streaming_analyze_content,
default_timeout=220.0,
client_info=DEFAULT_CLIENT_INFO,
)

# Send the request.
response = rpc(
requests,
retry=retry,
timeout=timeout,
metadata=metadata,
)

# Done; return the response.
return response

async def suggest_articles(
self,
request: Union[participant.SuggestArticlesRequest, dict] = None,
Expand Down
Loading

0 comments on commit 0d3b413

Please sign in to comment.