Skip to content

Commit

Permalink
feat(api): OpenAPI spec update via Stainless API (#71)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-app[bot] authored and stainless-bot committed Apr 26, 2024
1 parent d149267 commit a90064f
Show file tree
Hide file tree
Showing 3 changed files with 129 additions and 77 deletions.
156 changes: 102 additions & 54 deletions src/resources/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,24 @@ export interface AgentResponse {
| 'static-noise'
| null;

/**
* Only applicable when enable_backchannel is true. Controls how often the agent
* would backchannel when a backchannel is possible. Value ranging from [0,1].
* Lower value means less frequent backchannel, while higher value means more
* frequent backchannel. If unset, default value 0.8 will apply.
*/
backchannel_frequency?: number;

/**
* Only applicable when enable_backchannel is true. A list of words that the agent
* would use as backchannel. If not set, default backchannel words will apply.
* Check out
* [backchannel default words](/agent/interaction-configuration#backchannel) for
* more details. Note that certain voices do not work too well with certain words,
* so it's recommended to expeirment before adding any words.
*/
backchannel_words?: Array<string>;

/**
* Provide a customized list of keywords to bias the transcriber model, so that
* these words are more likely to get transcribed. Commonly used for names, brands,
Expand All @@ -133,25 +151,9 @@ export interface AgentResponse {
interruption_sensitivity?: number;

/**
* `Beta feature, use with caution.`
*
* This setting specifies the agent's operational language, including base language
* and dialect. Speech recognition considers both elements, but text-to-speech
* currently only recognizes the base language.
*
* Specifies what language (and dialect) the speech recognition will operate in.
* For instance, selecting `en-GB` optimizes speech recognition for British
* English, yet text-to-speech output will be in standard English. If
* dialect-specific text-to-speech is required, please contact us for support.
*
* If unset, will use default value `en-US`.
*
* - `11lab voices`: supports English(en), German(de), Spanish(es), Hindi(hi),
* Portuguese(pt)
*
* - `openAI voices`: supports English(en), German(de), Spanish(es), Hindi(hi),
* Portuguese(pt), Japanese(ja)
*
* - `deepgram voices`: supports English(en)
* English. If unset, will use default value `en-US`.
*/
language?:
| 'en-US'
Expand All @@ -173,6 +175,20 @@ export interface AgentResponse {
*/
opt_out_sensitive_data_storage?: boolean;

/**
* If set, controls how many times agent would remind user when user is
* unresponsive. Must be a non negative integer. If unset, default value of 1 will
* apply (remind once). Set to 0 to disable agent from reminding.
*/
reminder_max_count?: number;

/**
* If set (in milliseconds), will trigger a reminder to the agent to speak if the
* user has been silent for the specified duration after some agent speech. Must be
* a positive number. If unset, default value of 10000 ms (10 s) will apply.
*/
reminder_trigger_ms?: number;

/**
* Controls how responsive is the agent. Value ranging from [0,1]. Lower value
* means less responsive agent (wait more, respond slower), while higher value
Expand Down Expand Up @@ -256,6 +272,24 @@ export interface AgentCreateParams {
| 'static-noise'
| null;

/**
* Only applicable when enable_backchannel is true. Controls how often the agent
* would backchannel when a backchannel is possible. Value ranging from [0,1].
* Lower value means less frequent backchannel, while higher value means more
* frequent backchannel. If unset, default value 0.8 will apply.
*/
backchannel_frequency?: number;

/**
* Only applicable when enable_backchannel is true. A list of words that the agent
* would use as backchannel. If not set, default backchannel words will apply.
* Check out
* [backchannel default words](/agent/interaction-configuration#backchannel) for
* more details. Note that certain voices do not work too well with certain words,
* so it's recommended to expeirment before adding any words.
*/
backchannel_words?: Array<string>;

/**
* Provide a customized list of keywords to bias the transcriber model, so that
* these words are more likely to get transcribed. Commonly used for names, brands,
Expand All @@ -281,25 +315,9 @@ export interface AgentCreateParams {
interruption_sensitivity?: number;

/**
* `Beta feature, use with caution.`
*
* This setting specifies the agent's operational language, including base language
* and dialect. Speech recognition considers both elements, but text-to-speech
* currently only recognizes the base language.
*
* Specifies what language (and dialect) the speech recognition will operate in.
* For instance, selecting `en-GB` optimizes speech recognition for British
* English, yet text-to-speech output will be in standard English. If
* dialect-specific text-to-speech is required, please contact us for support.
*
* If unset, will use default value `en-US`.
*
* - `11lab voices`: supports English(en), German(de), Spanish(es), Hindi(hi),
* Portuguese(pt)
*
* - `openAI voices`: supports English(en), German(de), Spanish(es), Hindi(hi),
* Portuguese(pt), Japanese(ja)
*
* - `deepgram voices`: supports English(en)
* English. If unset, will use default value `en-US`.
*/
language?:
| 'en-US'
Expand All @@ -321,6 +339,20 @@ export interface AgentCreateParams {
*/
opt_out_sensitive_data_storage?: boolean;

/**
* If set, controls how many times agent would remind user when user is
* unresponsive. Must be a non negative integer. If unset, default value of 1 will
* apply (remind once). Set to 0 to disable agent from reminding.
*/
reminder_max_count?: number;

/**
* If set (in milliseconds), will trigger a reminder to the agent to speak if the
* user has been silent for the specified duration after some agent speech. Must be
* a positive number. If unset, default value of 10000 ms (10 s) will apply.
*/
reminder_trigger_ms?: number;

/**
* Controls how responsive is the agent. Value ranging from [0,1]. Lower value
* means less responsive agent (wait more, respond slower), while higher value
Expand Down Expand Up @@ -389,6 +421,24 @@ export interface AgentUpdateParams {
| 'static-noise'
| null;

/**
* Only applicable when enable_backchannel is true. Controls how often the agent
* would backchannel when a backchannel is possible. Value ranging from [0,1].
* Lower value means less frequent backchannel, while higher value means more
* frequent backchannel. If unset, default value 0.8 will apply.
*/
backchannel_frequency?: number;

/**
* Only applicable when enable_backchannel is true. A list of words that the agent
* would use as backchannel. If not set, default backchannel words will apply.
* Check out
* [backchannel default words](/agent/interaction-configuration#backchannel) for
* more details. Note that certain voices do not work too well with certain words,
* so it's recommended to expeirment before adding any words.
*/
backchannel_words?: Array<string>;

/**
* Provide a customized list of keywords to bias the transcriber model, so that
* these words are more likely to get transcribed. Commonly used for names, brands,
Expand All @@ -414,25 +464,9 @@ export interface AgentUpdateParams {
interruption_sensitivity?: number;

/**
* `Beta feature, use with caution.`
*
* This setting specifies the agent's operational language, including base language
* and dialect. Speech recognition considers both elements, but text-to-speech
* currently only recognizes the base language.
*
* Specifies what language (and dialect) the speech recognition will operate in.
* For instance, selecting `en-GB` optimizes speech recognition for British
* English, yet text-to-speech output will be in standard English. If
* dialect-specific text-to-speech is required, please contact us for support.
*
* If unset, will use default value `en-US`.
*
* - `11lab voices`: supports English(en), German(de), Spanish(es), Hindi(hi),
* Portuguese(pt)
*
* - `openAI voices`: supports English(en), German(de), Spanish(es), Hindi(hi),
* Portuguese(pt), Japanese(ja)
*
* - `deepgram voices`: supports English(en)
* English. If unset, will use default value `en-US`.
*/
language?:
| 'en-US'
Expand Down Expand Up @@ -461,6 +495,20 @@ export interface AgentUpdateParams {
*/
opt_out_sensitive_data_storage?: boolean;

/**
* If set, controls how many times agent would remind user when user is
* unresponsive. Must be a non negative integer. If unset, default value of 1 will
* apply (remind once). Set to 0 to disable agent from reminding.
*/
reminder_max_count?: number;

/**
* If set (in milliseconds), will trigger a reminder to the agent to speak if the
* user has been silent for the specified duration after some agent speech. Must be
* a positive number. If unset, default value of 10000 ms (10 s) will apply.
*/
reminder_trigger_ms?: number;

/**
* Controls how responsive is the agent. Value ranging from [0,1]. Lower value
* means less responsive agent (wait more, respond slower), while higher value
Expand Down
46 changes: 23 additions & 23 deletions src/resources/call.ts
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ export interface CallResponse extends RegisterCallResponse {
* completion status and other metrics. Available after call ends. Subscribe to
* `call_analyzed` webhook event type to receive it once ready.
*/
conversation_eval?: CallResponse.ConversationEval;
call_analysis?: CallResponse.CallAnalysis;

/**
* The reason for the disconnection of the call. Debug using explanation in docs
Expand Down Expand Up @@ -150,39 +150,39 @@ export namespace CallResponse {
* completion status and other metrics. Available after call ends. Subscribe to
* `call_analyzed` webhook event type to receive it once ready.
*/
export interface ConversationEval {
export interface CallAnalysis {
/**
* Evaluate agent task completion status, whether the agent has completed his task.
* Sentiment of the agent in the call.
*/
agent_task_completion?: 'Completed' | 'Incomplete' | 'Partial';
agent_sentiment?: 'Negative' | 'Positive' | 'Neutral';

/**
* Reason for the agent task completion status.
* Evaluate agent task completion status, whether the agent has completed his task.
*/
agent_task_completion_reason?: string;
agent_task_completion_rating?: 'Complete' | 'Incomplete' | 'Partial';

/**
* Sentiment of the agent in the conversation.
* Reason for the agent task completion status.
*/
agnet_sentiment?: 'Negative' | 'Positive' | 'Neutral';
agent_task_completion_rating_reason?: string;

/**
* Evaluate whether the conversation ended normally or was cut off.
* Evaluate whether the call ended normally or was cut off.
*/
conversation_completion?: 'Completed' | 'Incomplete' | 'Partial';
call_completion_rating?: 'Complete' | 'Incomplete' | 'Partial';

/**
* Reason for the conversation completion status.
* Reason for the call completion status.
*/
conversation_completion_reason?: string;
call_completion_rating_reason?: string;

/**
* A high level summary of the conversation conversation.
* A high level summary of the call.
*/
conversation_summary?: string;
call_summary?: string;

/**
* Sentiment of the user in the conversation.
* Sentiment of the user in the call.
*/
user_sentiment?: 'Negative' | 'Positive' | 'Neutral';
}
Expand Down Expand Up @@ -326,9 +326,9 @@ export namespace CallResponse {
role: 'agent' | 'user';

/**
* Array of words in the utternace with the word timestamp. Useful for
* Array of words in the utterance with the word timestamp. Useful for
* understanding what word was spoken at what time. Note that the word timestamp is
* not guranteed to be accurate, it's more like an approximation.
* not guaranteed to be accurate, it's more like an approximation.
*/
words: Array<TranscriptObject.Word>;
}
Expand Down Expand Up @@ -366,9 +366,9 @@ export namespace CallResponse {
role: 'agent' | 'user';

/**
* Array of words in the utternace with the word timestamp. Useful for
* Array of words in the utterance with the word timestamp. Useful for
* understanding what word was spoken at what time. Note that the word timestamp is
* not guranteed to be accurate, it's more like an approximation.
* not guaranteed to be accurate, it's more like an approximation.
*/
words: Array<Utterance.Word>;
}
Expand Down Expand Up @@ -689,10 +689,10 @@ export interface CallRegisterParams {
from_number?: string;

/**
* An abtriary object for storage purpose only. You can put anything here like your
* own id for the call, twilio SID, internal customer id. Not used for processing,
* when we connect to your LLM websocket server, you can then get it from the call
* object.
* An arbitrary object for storage purpose only. You can put anything here like
* your own id for the call, twilio SID, internal customer id. Not used for
* processing, when we connect to your LLM websocket server, you can then get it
* from the call object.
*/
metadata?: unknown;

Expand Down
4 changes: 4 additions & 0 deletions tests/api-resources/agent.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,15 @@ describe('resource agent', () => {
voice_id: '11labs-Adrian',
agent_name: 'Jarvis',
ambient_sound: 'coffee-shop',
backchannel_frequency: 0.9,
backchannel_words: ['yeah', 'uh-huh'],
boosted_keywords: ['retell', 'kroger'],
enable_backchannel: true,
interruption_sensitivity: 1,
language: 'en-US',
opt_out_sensitive_data_storage: true,
reminder_max_count: 2,
reminder_trigger_ms: 10000,
responsiveness: 1,
voice_speed: 1,
voice_temperature: 1,
Expand Down

0 comments on commit a90064f

Please sign in to comment.