Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -136,23 +136,9 @@ message InputAudioConfig {
// for more details.
repeated SpeechContext speech_contexts = 11;

// Which Speech model to select for the given request. Select the
// model best suited to your domain to get best results. If a model is not
// explicitly specified, then we auto-select a model based on the parameters
// in the InputAudioConfig.
// If enhanced speech model is enabled for the agent and an enhanced
// version of the specified model for the language does not exist, then the
// speech is recognized using the standard version of the specified model.
// Refer to
// [Cloud Speech API
// documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
// for more details.
// If you specify a model, the following models typically have the best
// performance:
//
// - phone_call (best for Agent Assist and telephony)
// - latest_short (best for Dialogflow non-telephony)
// - command_and_search (best for very short utterances and commands)
// Optional. Which Speech model to select for the given request.
// For more information, see
// [Speech models](https://cloud.google.com/dialogflow/es/docs/speech-models).
string model = 7;

// Which variant of the [Speech
Expand Down Expand Up @@ -271,13 +257,28 @@ message SpeechToTextConfig {
// error.
SpeechModelVariant speech_model_variant = 1;

// Which Speech model to select. Select the model best suited to your domain
// to get best results. If a model is not explicitly specified, then a default
// model is used.
// Which Speech model to select. Select the
// model best suited to your domain to get best results. If a model is not
// explicitly specified, then Dialogflow auto-selects a model based on other
// parameters in the SpeechToTextConfig and Agent settings.
// If enhanced speech model is enabled for the agent and an enhanced
// version of the specified model for the language does not exist, then the
// speech is recognized using the standard version of the specified model.
// Refer to
// [Cloud Speech API
// documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
// for more details.
// If you specify a model, the following models typically have the best
// performance:
//
// - phone_call (best for Agent Assist and telephony)
// - latest_short (best for Dialogflow non-telephony)
// - command_and_search
//
// Leave this field unspecified to use
// [Agent Speech
// settings](https://cloud.google.com/dialogflow/cx/docs/concept/agent#settings-speech)
// for model selection.
string model = 2;

// Use timeout based endpointing, interpreting endpointer sensitivy as
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -638,6 +638,9 @@ message SearchKnowledgeResponse {
// Most relevant snippets extracted from articles in the given knowledge base,
// ordered by confidence.
repeated SearchKnowledgeAnswer answers = 2;

// The rewritten query used to search knowledge.
string rewritten_query = 3;
}

// Represents a SearchKnowledge answer.
Expand All @@ -647,11 +650,14 @@ message SearchKnowledgeAnswer {
// The answer has a unspecified type.
ANSWER_TYPE_UNSPECIFIED = 0;

// The answer is from FAQ doucments.
// The answer is from FAQ documents.
FAQ = 1;

// The answer is from generative model.
GENERATIVE = 2;

// The answer is from intent matching.
INTENT = 3;
}

// The sources of the answers.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -383,10 +383,10 @@ message AutomatedAgentConfig {
}
];

// Optional. Sets Dialogflow CX session life time.
// Optional. Configure lifetime of the Dialogflow session.
// By default, a Dialogflow CX session remains active and its data is stored
// for 30 minutes after the last request is sent for the session. This value
// should be no longer than 1 day.
// for 30 minutes after the last request is sent for the session.
// This value should be no longer than 1 day.
google.protobuf.Duration session_ttl = 3
[(google.api.field_behavior) = OPTIONAL];
}
Expand Down Expand Up @@ -422,6 +422,11 @@ message HumanAgentAssistantConfig {
bool disable_agent_query_logging = 14
[(google.api.field_behavior) = OPTIONAL];

// Optional. Enable including conversation context during query answer
// generation. Supported features: KNOWLEDGE_SEARCH.
bool enable_conversation_augmented_query = 16
[(google.api.field_behavior) = OPTIONAL];

// Settings of suggestion trigger.
//
// Currently, only ARTICLE_SUGGESTION and FAQ will use this field.
Expand Down Expand Up @@ -538,6 +543,54 @@ message HumanAgentAssistantConfig {
bool drop_ivr_messages = 3;
}

// Custom sections to return when requesting a summary of a conversation.
// This is only supported when `baseline_model_version` == '2.0'.
//
// Supported features: CONVERSATION_SUMMARIZATION,
// CONVERSATION_SUMMARIZATION_VOICE.
message Sections {
// Selectable sections to return when requesting a summary of a
// conversation.
enum SectionType {
// Undefined section type, does not return anything.
SECTION_TYPE_UNSPECIFIED = 0;

// What the customer needs help with or has question about.
// Section name: "situation".
SITUATION = 1;

// What the agent does to help the customer.
// Section name: "action".
ACTION = 2;

// Result of the customer service. A single word describing the result
// of the conversation.
// Section name: "resolution".
RESOLUTION = 3;

// Reason for cancellation if the customer requests for a cancellation.
// "N/A" otherwise.
// Section name: "reason_for_cancellation".
REASON_FOR_CANCELLATION = 4;

// "Unsatisfied" or "Satisfied" depending on the customer's feelings at
// the end of the conversation.
// Section name: "customer_satisfaction".
CUSTOMER_SATISFACTION = 5;

// Key entities extracted from the conversation, such as ticket number,
// order number, dollar amount, etc.
// Section names are prefixed by "entities/".
ENTITIES = 6;
}

// The selected sections chosen to return when requesting a summary of a
// conversation. A duplicate selected section will be treated as a single
// selected section. If section types are not provided, the default will
// be {SITUATION, ACTION, RESULT}.
repeated SectionType section_types = 1;
}

// Source of query.
oneof query_source {
// Query from knowledgebase. It is used by:
Expand Down Expand Up @@ -581,6 +634,10 @@ message HumanAgentAssistantConfig {
// Determines how recent conversation context is filtered when generating
// suggestions. If unspecified, no messages will be dropped.
ContextFilterSettings context_filter_settings = 7;

// Optional. The customized sections chosen to return when requesting a
// summary of a conversation.
Sections sections = 8 [(google.api.field_behavior) = OPTIONAL];
}

// Custom conversation models used in agent assist feature.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,23 +181,9 @@ message InputAudioConfig {
// for more details.
repeated SpeechContext speech_contexts = 11;

// Which Speech model to select for the given request. Select the
// model best suited to your domain to get best results. If a model is not
// explicitly specified, then we auto-select a model based on the parameters
// in the InputAudioConfig.
// If enhanced speech model is enabled for the agent and an enhanced
// version of the specified model for the language does not exist, then the
// speech is recognized using the standard version of the specified model.
// Refer to
// [Cloud Speech API
// documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
// for more details.
// If you specify a model, the following models typically have the best
// performance:
//
// - phone_call (best for Agent Assist and telephony)
// - latest_short (best for Dialogflow non-telephony)
// - command_and_search (best for very short utterances and commands)
// Optional. Which Speech model to select for the given request.
// For more information, see
// [Speech models](https://cloud.google.com/dialogflow/es/docs/speech-models).
string model = 7;

// Which variant of the [Speech
Expand Down Expand Up @@ -322,13 +308,28 @@ message SpeechToTextConfig {
// error.
SpeechModelVariant speech_model_variant = 1;

// Which Speech model to select. Select the model best suited to your domain
// to get best results. If a model is not explicitly specified, then a default
// model is used.
// Which Speech model to select. Select the
// model best suited to your domain to get best results. If a model is not
// explicitly specified, then Dialogflow auto-selects a model based on other
// parameters in the SpeechToTextConfig and Agent settings.
// If enhanced speech model is enabled for the agent and an enhanced
// version of the specified model for the language does not exist, then the
// speech is recognized using the standard version of the specified model.
// Refer to
// [Cloud Speech API
// documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
// for more details.
// If you specify a model, the following models typically have the best
// performance:
//
// - phone_call (best for Agent Assist and telephony)
// - latest_short (best for Dialogflow non-telephony)
// - command_and_search
//
// Leave this field unspecified to use
// [Agent Speech
// settings](https://cloud.google.com/dialogflow/cx/docs/concept/agent#settings-speech)
// for model selection.
string model = 2;

// Use timeout based endpointing, interpreting endpointer sensitivy as
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -698,6 +698,9 @@ message SearchKnowledgeResponse {
// Most relevant snippets extracted from articles in the given knowledge base,
// ordered by confidence.
repeated SearchKnowledgeAnswer answers = 2;

// The rewritten query used to search knowledge.
string rewritten_query = 3;
}

// Represents a SearchKnowledge answer.
Expand All @@ -707,11 +710,14 @@ message SearchKnowledgeAnswer {
// The answer has a unspecified type.
ANSWER_TYPE_UNSPECIFIED = 0;

// The answer is from FAQ doucments.
// The answer is from FAQ documents.
FAQ = 1;

// The answer is from generative model.
GENERATIVE = 2;

// The answer is from intent matching.
INTENT = 3;
}

// The sources of the answers.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -296,10 +296,10 @@ message AutomatedAgentConfig {
}
];

// Optional. Sets Dialogflow CX session life time.
// Optional. Configure lifetime of the Dialogflow session.
// By default, a Dialogflow CX session remains active and its data is stored
// for 30 minutes after the last request is sent for the session. This value
// should be no longer than 1 day.
// for 30 minutes after the last request is sent for the session.
// This value should be no longer than 1 day.
google.protobuf.Duration session_ttl = 3
[(google.api.field_behavior) = OPTIONAL];
}
Expand Down Expand Up @@ -335,6 +335,11 @@ message HumanAgentAssistantConfig {
bool disable_agent_query_logging = 14
[(google.api.field_behavior) = OPTIONAL];

// Optional. Enable including conversation context during query answer
// generation. Supported features: KNOWLEDGE_SEARCH.
bool enable_conversation_augmented_query = 16
[(google.api.field_behavior) = OPTIONAL];

// Settings of suggestion trigger.
//
// Currently, only ARTICLE_SUGGESTION, FAQ, and DIALOGFLOW_ASSIST will use
Expand Down Expand Up @@ -451,6 +456,54 @@ message HumanAgentAssistantConfig {
bool drop_ivr_messages = 3;
}

// Custom sections to return when requesting a summary of a conversation.
// This is only supported when `baseline_model_version` == '2.0'.
//
// Supported features: CONVERSATION_SUMMARIZATION,
// CONVERSATION_SUMMARIZATION_VOICE.
message Sections {
// Selectable sections to return when requesting a summary of a
// conversation.
enum SectionType {
// Undefined section type, does not return anything.
SECTION_TYPE_UNSPECIFIED = 0;

// What the customer needs help with or has question about.
// Section name: "situation".
SITUATION = 1;

// What the agent does to help the customer.
// Section name: "action".
ACTION = 2;

// Result of the customer service. A single word describing the result
// of the conversation.
// Section name: "resolution".
RESOLUTION = 3;

// Reason for cancellation if the customer requests for a cancellation.
// "N/A" otherwise.
// Section name: "reason_for_cancellation".
REASON_FOR_CANCELLATION = 4;

// "Unsatisfied" or "Satisfied" depending on the customer's feelings at
// the end of the conversation.
// Section name: "customer_satisfaction".
CUSTOMER_SATISFACTION = 5;

// Key entities extracted from the conversation, such as ticket number,
// order number, dollar amount, etc.
// Section names are prefixed by "entities/".
ENTITIES = 6;
}

// The selected sections chosen to return when requesting a summary of a
// conversation. A duplicate selected section will be treated as a single
// selected section. If section types are not provided, the default will
// be {SITUATION, ACTION, RESULT}.
repeated SectionType section_types = 1;
}

// Source of query.
oneof query_source {
// Query from knowledgebase. It is used by:
Expand Down Expand Up @@ -495,6 +548,10 @@ message HumanAgentAssistantConfig {
// Determines how recent conversation context is filtered when generating
// suggestions. If unspecified, no messages will be dropped.
ContextFilterSettings context_filter_settings = 7;

// Optional. The customized sections chosen to return when requesting a
// summary of a conversation.
Sections sections = 8 [(google.api.field_behavior) = OPTIONAL];
}

// Custom conversation models used in agent assist feature.
Expand Down
Loading