From 1bc946b6e53cf75c03d8c51f0fba613615dae3bb Mon Sep 17 00:00:00 2001 From: Angelo Paparazzi Date: Tue, 18 May 2021 13:53:55 -0400 Subject: [PATCH] feat(stt-tts): generation release changes --- speech-to-text/v1-generated.ts | 501 ++++++++++----- test/integration/text-to-speech.test.js | 110 ++++ test/resources/tts_audio.wav | Bin 0 -> 75726 bytes test/unit/speech-to-text.v1.test.js | 8 +- test/unit/text-to-speech.v1.test.js | 578 ++++++++++++++++- text-to-speech/v1-generated.ts | 788 +++++++++++++++++++++++- 6 files changed, 1818 insertions(+), 167 deletions(-) create mode 100644 test/resources/tts_audio.wav diff --git a/speech-to-text/v1-generated.ts b/speech-to-text/v1-generated.ts index 3673feefea..5deebd1bf8 100644 --- a/speech-to-text/v1-generated.ts +++ b/speech-to-text/v1-generated.ts @@ -1,5 +1,5 @@ /** - * (C) Copyright IBM Corp. 2017, 2020. + * (C) Copyright IBM Corp. 2017, 2021. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,9 +15,9 @@ */ /** - * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-be3b4618-20201221-123327 + * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 */ - + import * as extend from 'extend'; import { IncomingHttpHeaders, OutgoingHttpHeaders } from 'http'; @@ -27,9 +27,14 @@ import { getSdkHeaders } from '../lib/common'; /** * The IBM Watson™ Speech to Text service provides APIs that use IBM's speech-recognition capabilities to produce * transcripts of spoken audio. The service can transcribe speech from various languages and audio formats. In addition - * to basic transcription, the service can produce detailed information about many different aspects of the audio. For - * most languages, the service supports two sampling rates, broadband and narrowband. It returns all JSON response - * content in the UTF-8 character set. + * to basic transcription, the service can produce detailed information about many different aspects of the audio. It + * returns all JSON response content in the UTF-8 character set. + * + * The service supports two types of models: previous-generation models that include the terms `Broadband` and + * `Narrowband` in their names, and beta next-generation models that include the terms `Multimedia` and `Telephony` in + * their names. Broadband and multimedia models have minimum sampling rates of 16 kHz. Narrowband and telephony models + * have minimum sampling rates of 8 kHz. The beta next-generation models currently support fewer languages and features, + * but they offer high throughput and greater transcription accuracy. * * For speech recognition, the service supports synchronous and asynchronous HTTP Representational State Transfer (REST) * interfaces. It also supports a WebSocket interface that provides a full-duplex, low-latency communication channel: @@ -41,8 +46,8 @@ import { getSdkHeaders } from '../lib/common'; * formal language specification that lets you restrict the phrases that the service can recognize. * * Language model customization and acoustic model customization are generally available for production use with all - * language models that are generally available. Grammars are beta functionality for all language models that support - * language model customization. + * previous-generation models that are generally available. Grammars are beta functionality for all previous-generation + * models that support language model customization. Next-generation models do not support customization at this time. */ class SpeechToTextV1 extends BaseService { @@ -89,7 +94,7 @@ class SpeechToTextV1 extends BaseService { * model and its minimum sampling rate in Hertz, among other things. The ordering of the list of models can change * from call to call; do not rely on an alphabetized or static list of models. * - * **See also:** [Languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + * **See also:** [Listing models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-list). * * @param {Object} [params] - The parameters to send to the service. * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers @@ -121,7 +126,7 @@ class SpeechToTextV1 extends BaseService { * Gets information for a single specified language model that is available for use with the service. The information * includes the name of the model and its minimum sampling rate in Hertz, among other things. * - * **See also:** [Languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + * **See also:** [Listing models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-list). * * @param {Object} params - The parameters to send to the service. * @param {string} params.modelId - The identifier of the model in the form of its name from the output of the **Get a @@ -221,8 +226,36 @@ class SpeechToTextV1 extends BaseService { * the minimum required rate, the service down-samples the audio to the appropriate rate. If the sampling rate of the * audio is lower than the minimum required rate, the request fails. * - * **See also:** [Audio - * formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats#audio-formats). + * **See also:** [Supported audio + * formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). + * + * ### Next-generation models + * + * **Note:** The next-generation language models are beta functionality. They support a limited number of languages + * and features at this time. The supported languages, models, and features will increase with future releases. + * + * The service supports next-generation `Multimedia` (16 kHz) and `Telephony` (8 kHz) models for many languages. + * Next-generation models have higher throughput than the service's previous generation of `Broadband` and + * `Narrowband` models. When you use next-generation models, the service can return transcriptions more quickly and + * also provide noticeably better transcription accuracy. + * + * You specify a next-generation model by using the `model` query parameter, as you do a previous-generation model. + * Next-generation models support the same request headers as previous-generation models, but they support only the + * following additional query parameters: + * * `background_audio_suppression` + * * `inactivity_timeout` + * * `profanity_filter` + * * `redaction` + * * `smart_formatting` + * * `speaker_labels` + * * `speech_detector_sensitivity` + * * `timestamps` + * + * Many next-generation models also support the beta `low_latency` parameter, which is not available with + * previous-generation models. + * + * **See also:** [Next-generation languages and + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). * * ### Multipart speech recognition * @@ -246,24 +279,26 @@ class SpeechToTextV1 extends BaseService { * an audio format, see **Audio formats (content types)** in the method description. * @param {string} [params.model] - The identifier of the model that is to be used for the recognition request. * (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and [Next-generation languages and + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). * @param {string} [params.languageCustomizationId] - The customization ID (GUID) of a custom language model that is * to be used with the recognition request. The base model of the specified custom language model must match the model * specified with the `model` parameter. You must make the request with credentials for the instance of the service - * that owns the custom model. By default, no custom language model is used. See [Custom - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * that owns the custom model. By default, no custom language model is used. See [Using a custom language model for + * speech recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse). * * **Note:** Use this parameter instead of the deprecated `customization_id` parameter. * @param {string} [params.acousticCustomizationId] - The customization ID (GUID) of a custom acoustic model that is * to be used with the recognition request. The base model of the specified custom acoustic model must match the model * specified with the `model` parameter. You must make the request with credentials for the instance of the service - * that owns the custom model. By default, no custom acoustic model is used. See [Custom - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * that owns the custom model. By default, no custom acoustic model is used. See [Using a custom acoustic model for + * speech recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acousticUse). * @param {string} [params.baseModelVersion] - The version of the specified base model that is to be used with the * recognition request. Multiple versions of a base model can exist when a model is updated for internal improvements. * The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The - * default value depends on whether the parameter is used with or without a custom model. See [Base model - * version](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#version). + * default value depends on whether the parameter is used with or without a custom model. See [Making speech + * recognition requests with upgraded custom + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade-use#custom-upgrade-use-recognition). * @param {number} [params.customizationWeight] - If you specify the customization ID (GUID) of a custom language * model with the recognition request, the customization weight tells the service how much weight to give to words * from the custom language model compared to those from the base model for the current request. @@ -276,7 +311,8 @@ class SpeechToTextV1 extends BaseService { * OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of * phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. * - * See [Custom models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * See [Using customization + * weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). * @param {number} [params.inactivityTimeout] - The time in seconds after which, if only silence (no speech) is * detected in streaming audio, the connection is closed with a 400 error. The parameter is useful for stopping audio * submission from a live microphone when a user simply walks away. Use `-1` for infinity. See [Inactivity @@ -290,31 +326,31 @@ class SpeechToTextV1 extends BaseService { * characters, though the maximum effective length for double-byte languages might be shorter. Keywords are * case-insensitive. * - * See [Keyword spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + * See [Keyword spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). * @param {number} [params.keywordsThreshold] - A confidence value that is the lower bound for spotting a keyword. A * word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a * probability between 0.0 and 1.0. If you specify a threshold, you must also specify one or more keywords. The * service performs no keyword spotting if you omit either parameter. See [Keyword - * spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + * spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). * @param {number} [params.maxAlternatives] - The maximum number of alternative transcripts that the service is to * return. By default, the service returns a single transcript. If you specify a value of `0`, the service uses the * default value, `1`. See [Maximum - * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#max_alternatives). + * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#max-alternatives). * @param {number} [params.wordAlternativesThreshold] - A confidence value that is the lower bound for identifying a * hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered * if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. By default, * the service computes no alternative words. See [Word - * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_alternatives). + * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#word-alternatives). * @param {boolean} [params.wordConfidence] - If `true`, the service returns a confidence measure in the range of 0.0 * to 1.0 for each word. By default, the service returns no word confidence scores. See [Word - * confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_confidence). + * confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-confidence). * @param {boolean} [params.timestamps] - If `true`, the service returns time alignment for each word. By default, no * timestamps are returned. See [Word - * timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_timestamps). + * timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-timestamps). * @param {boolean} [params.profanityFilter] - If `true`, the service filters profanity from all output except for * keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return - * results with no censoring. Applies to US English transcription only. See [Profanity - * filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#profanity_filter). + * results with no censoring. Applies to US English and Japanese transcription only. See [Profanity + * filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#profanity-filtering). * @param {boolean} [params.smartFormatting] - If `true`, the service converts dates, times, series of digits and * numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in * the final transcript of a recognition request. For US English, the service also converts certain keyword strings to @@ -322,24 +358,26 @@ class SpeechToTextV1 extends BaseService { * * **Note:** Applies to US English, Japanese, and Spanish transcription only. * - * See [Smart formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#smart_formatting). + * See [Smart formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). * @param {boolean} [params.speakerLabels] - If `true`, the response includes labels that identify which words were * spoken by which participants in a multi-person exchange. By default, the service returns no speaker labels. Setting * `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify * `false` for the parameter. + * * For previous-generation models, can be used for US English, Australian English, German, Japanese, Korean, and + * Spanish (both broadband and narrowband models) and UK English (narrowband model) transcription only. + * * For next-generation models, can be used for English (Australian, UK, and US), German, and Spanish transcription + * only. * - * **Note:** Applies to US English, Australian English, German, Japanese, Korean, and Spanish (both broadband and - * narrowband models) and UK English (narrowband model) transcription only. - * - * See [Speaker labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#speaker_labels). + * Restrictions and limitations apply to the use of speaker labels for both types of models. See [Speaker + * labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). * @param {string} [params.customizationId] - **Deprecated.** Use the `language_customization_id` parameter to specify * the customization ID (GUID) of a custom language model that is to be used with the recognition request. Do not * specify both parameters with a request. * @param {string} [params.grammarName] - The name of a grammar that is to be used with the recognition request. If * you specify a grammar, you must also use the `language_customization_id` parameter to specify the name of the * custom language model for which the grammar is defined. The service recognizes only strings that are recognized by - * the specified grammar; it does not recognize other custom words from the model's words resource. See - * [Grammars](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#grammars-input). + * the specified grammar; it does not recognize other custom words from the model's words resource. See [Using a + * grammar for speech recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). * @param {boolean} [params.redaction] - If `true`, the service redacts, or masks, numeric data from final * transcripts. The feature redacts any number that has three or more consecutive digits by replacing each digit with * an `X` character. It is intended to redact sensitive numeric data, such as credit card numbers. By default, the @@ -352,12 +390,13 @@ class SpeechToTextV1 extends BaseService { * * **Note:** Applies to US English, Japanese, and Korean transcription only. * - * See [Numeric redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#redaction). + * See [Numeric + * redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). * @param {boolean} [params.audioMetrics] - If `true`, requests detailed information about the signal characteristics * of the input audio. The service returns audio metrics with the final transcription results. By default, the service * returns no audio metrics. * - * See [Audio metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio_metrics). + * See [Audio metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio-metrics). * @param {number} [params.endOfPhraseSilenceTime] - If `true`, specifies the duration of the pause interval at which * the service splits a transcript into multiple final results. If the service detects pauses or extended silence * before it reaches the end of the audio stream, its response can include multiple final results. Silence indicates a @@ -371,7 +410,7 @@ class SpeechToTextV1 extends BaseService { * The default pause interval for most languages is 0.8 seconds; the default for Chinese is 0.6 seconds. * * See [End of phrase silence - * time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#silence_time). + * time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#silence-time). * @param {boolean} [params.splitTranscriptAtPhraseEnd] - If `true`, directs the service to split the transcript into * multiple final results based on semantic features of the input, for example, at the conclusion of meaningful * phrases such as sentences. The service bases its understanding of semantic features on the base language model that @@ -379,7 +418,7 @@ class SpeechToTextV1 extends BaseService { * transcript. By default, the service splits transcripts based solely on the pause interval. * * See [Split transcript at phrase - * end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#split_transcript). + * end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#split-transcript). * @param {number} [params.speechDetectorSensitivity] - The sensitivity of speech activity detection that the service * is to perform. Use the parameter to suppress word insertions from music, coughing, and other non-speech events. The * service biases the audio it passes for speech recognition by evaluating the input audio against prior models of @@ -390,8 +429,8 @@ class SpeechToTextV1 extends BaseService { * * 0.5 (the default) provides a reasonable compromise for the level of sensitivity. * * 1.0 suppresses no audio (speech detection sensitivity is disabled). * - * The values increase on a monotonic curve. See [Speech Activity - * Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + * The values increase on a monotonic curve. See [Speech detector + * sensitivity](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-sensitivity). * @param {number} [params.backgroundAudioSuppression] - The level to which the service is to suppress background * audio based on its volume to prevent it from being transcribed as speech. Use the parameter to suppress side * conversations or background noise. @@ -401,8 +440,21 @@ class SpeechToTextV1 extends BaseService { * * 0.5 provides a reasonable level of audio suppression for general usage. * * 1.0 suppresses all audio (no audio is transcribed). * - * The values increase on a monotonic curve. See [Speech Activity - * Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + * The values increase on a monotonic curve. See [Background audio + * suppression](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-suppression). + * @param {boolean} [params.lowLatency] - If `true` for next-generation `Multimedia` and `Telephony` models that + * support low latency, directs the service to produce results even more quickly than it usually does. Next-generation + * models produce transcription results faster than previous-generation models. The `low_latency` parameter causes the + * models to produce results even more quickly, though the results might be less accurate when the parameter is used. + * + * **Note:** The parameter is beta functionality. It is not available for previous-generation `Broadband` and + * `Narrowband` models. It is available only for some next-generation models. + * + * * For a list of next-generation models that support low latency, see [Supported language + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported) for + * next-generation models. + * * For more information about the `low_latency` parameter, see [Low + * latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers * @returns {Promise>} */ @@ -439,7 +491,8 @@ class SpeechToTextV1 extends BaseService { 'end_of_phrase_silence_time': _params.endOfPhraseSilenceTime, 'split_transcript_at_phrase_end': _params.splitTranscriptAtPhraseEnd, 'speech_detector_sensitivity': _params.speechDetectorSensitivity, - 'background_audio_suppression': _params.backgroundAudioSuppression + 'background_audio_suppression': _params.backgroundAudioSuppression, + 'low_latency': _params.lowLatency }; const sdkHeaders = getSdkHeaders(SpeechToTextV1.DEFAULT_SERVICE_NAME, 'v1', 'recognize'); @@ -664,8 +717,36 @@ class SpeechToTextV1 extends BaseService { * the minimum required rate, the service down-samples the audio to the appropriate rate. If the sampling rate of the * audio is lower than the minimum required rate, the request fails. * - * **See also:** [Audio - * formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats#audio-formats). + * **See also:** [Supported audio + * formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). + * + * ### Next-generation models + * + * **Note:** The next-generation language models are beta functionality. They support a limited number of languages + * and features at this time. The supported languages, models, and features will increase with future releases. + * + * The service supports next-generation `Multimedia` (16 kHz) and `Telephony` (8 kHz) models for many languages. + * Next-generation models have higher throughput than the service's previous generation of `Broadband` and + * `Narrowband` models. When you use next-generation models, the service can return transcriptions more quickly and + * also provide noticeably better transcription accuracy. + * + * You specify a next-generation model by using the `model` query parameter, as you do a previous-generation model. + * Next-generation models support the same request headers as previous-generation models, but they support only the + * following additional query parameters: + * * `background_audio_suppression` + * * `inactivity_timeout` + * * `profanity_filter` + * * `redaction` + * * `smart_formatting` + * * `speaker_labels` + * * `speech_detector_sensitivity` + * * `timestamps` + * + * Many next-generation models also support the beta `low_latency` parameter, which is not available with + * previous-generation models. + * + * **See also:** [Next-generation languages and + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). * * @param {Object} params - The parameters to send to the service. * @param {NodeJS.ReadableStream|Buffer} params.audio - The audio to transcribe. @@ -673,7 +754,8 @@ class SpeechToTextV1 extends BaseService { * an audio format, see **Audio formats (content types)** in the method description. * @param {string} [params.model] - The identifier of the model that is to be used for the recognition request. * (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and [Next-generation languages and + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). * @param {string} [params.callbackUrl] - A URL to which callback notifications are to be sent. The URL must already * be successfully allowlisted by using the **Register a callback** method. You can include the same callback URL with * any number of job creation requests. Omit the parameter to poll the service for job completion and results. @@ -705,20 +787,21 @@ class SpeechToTextV1 extends BaseService { * @param {string} [params.languageCustomizationId] - The customization ID (GUID) of a custom language model that is * to be used with the recognition request. The base model of the specified custom language model must match the model * specified with the `model` parameter. You must make the request with credentials for the instance of the service - * that owns the custom model. By default, no custom language model is used. See [Custom - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * that owns the custom model. By default, no custom language model is used. See [Using a custom language model for + * speech recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse). * * **Note:** Use this parameter instead of the deprecated `customization_id` parameter. * @param {string} [params.acousticCustomizationId] - The customization ID (GUID) of a custom acoustic model that is * to be used with the recognition request. The base model of the specified custom acoustic model must match the model * specified with the `model` parameter. You must make the request with credentials for the instance of the service - * that owns the custom model. By default, no custom acoustic model is used. See [Custom - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * that owns the custom model. By default, no custom acoustic model is used. See [Using a custom acoustic model for + * speech recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acousticUse). * @param {string} [params.baseModelVersion] - The version of the specified base model that is to be used with the * recognition request. Multiple versions of a base model can exist when a model is updated for internal improvements. * The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The - * default value depends on whether the parameter is used with or without a custom model. See [Base model - * version](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#version). + * default value depends on whether the parameter is used with or without a custom model. See [Making speech + * recognition requests with upgraded custom + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade-use#custom-upgrade-use-recognition). * @param {number} [params.customizationWeight] - If you specify the customization ID (GUID) of a custom language * model with the recognition request, the customization weight tells the service how much weight to give to words * from the custom language model compared to those from the base model for the current request. @@ -731,7 +814,8 @@ class SpeechToTextV1 extends BaseService { * OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of * phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. * - * See [Custom models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * See [Using customization + * weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). * @param {number} [params.inactivityTimeout] - The time in seconds after which, if only silence (no speech) is * detected in streaming audio, the connection is closed with a 400 error. The parameter is useful for stopping audio * submission from a live microphone when a user simply walks away. Use `-1` for infinity. See [Inactivity @@ -745,31 +829,31 @@ class SpeechToTextV1 extends BaseService { * characters, though the maximum effective length for double-byte languages might be shorter. Keywords are * case-insensitive. * - * See [Keyword spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + * See [Keyword spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). * @param {number} [params.keywordsThreshold] - A confidence value that is the lower bound for spotting a keyword. A * word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a * probability between 0.0 and 1.0. If you specify a threshold, you must also specify one or more keywords. The * service performs no keyword spotting if you omit either parameter. See [Keyword - * spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + * spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). * @param {number} [params.maxAlternatives] - The maximum number of alternative transcripts that the service is to * return. By default, the service returns a single transcript. If you specify a value of `0`, the service uses the * default value, `1`. See [Maximum - * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#max_alternatives). + * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#max-alternatives). * @param {number} [params.wordAlternativesThreshold] - A confidence value that is the lower bound for identifying a * hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered * if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. By default, * the service computes no alternative words. See [Word - * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_alternatives). + * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#word-alternatives). * @param {boolean} [params.wordConfidence] - If `true`, the service returns a confidence measure in the range of 0.0 * to 1.0 for each word. By default, the service returns no word confidence scores. See [Word - * confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_confidence). + * confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-confidence). * @param {boolean} [params.timestamps] - If `true`, the service returns time alignment for each word. By default, no * timestamps are returned. See [Word - * timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_timestamps). + * timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-timestamps). * @param {boolean} [params.profanityFilter] - If `true`, the service filters profanity from all output except for * keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return - * results with no censoring. Applies to US English transcription only. See [Profanity - * filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#profanity_filter). + * results with no censoring. Applies to US English and Japanese transcription only. See [Profanity + * filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#profanity-filtering). * @param {boolean} [params.smartFormatting] - If `true`, the service converts dates, times, series of digits and * numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in * the final transcript of a recognition request. For US English, the service also converts certain keyword strings to @@ -777,24 +861,26 @@ class SpeechToTextV1 extends BaseService { * * **Note:** Applies to US English, Japanese, and Spanish transcription only. * - * See [Smart formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#smart_formatting). + * See [Smart formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). * @param {boolean} [params.speakerLabels] - If `true`, the response includes labels that identify which words were * spoken by which participants in a multi-person exchange. By default, the service returns no speaker labels. Setting * `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify * `false` for the parameter. + * * For previous-generation models, can be used for US English, Australian English, German, Japanese, Korean, and + * Spanish (both broadband and narrowband models) and UK English (narrowband model) transcription only. + * * For next-generation models, can be used for English (Australian, UK, and US), German, and Spanish transcription + * only. * - * **Note:** Applies to US English, Australian English, German, Japanese, Korean, and Spanish (both broadband and - * narrowband models) and UK English (narrowband model) transcription only. - * - * See [Speaker labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#speaker_labels). + * Restrictions and limitations apply to the use of speaker labels for both types of models. See [Speaker + * labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). * @param {string} [params.customizationId] - **Deprecated.** Use the `language_customization_id` parameter to specify * the customization ID (GUID) of a custom language model that is to be used with the recognition request. Do not * specify both parameters with a request. * @param {string} [params.grammarName] - The name of a grammar that is to be used with the recognition request. If * you specify a grammar, you must also use the `language_customization_id` parameter to specify the name of the * custom language model for which the grammar is defined. The service recognizes only strings that are recognized by - * the specified grammar; it does not recognize other custom words from the model's words resource. See - * [Grammars](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#grammars-input). + * the specified grammar; it does not recognize other custom words from the model's words resource. See [Using a + * grammar for speech recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). * @param {boolean} [params.redaction] - If `true`, the service redacts, or masks, numeric data from final * transcripts. The feature redacts any number that has three or more consecutive digits by replacing each digit with * an `X` character. It is intended to redact sensitive numeric data, such as credit card numbers. By default, the @@ -807,14 +893,15 @@ class SpeechToTextV1 extends BaseService { * * **Note:** Applies to US English, Japanese, and Korean transcription only. * - * See [Numeric redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#redaction). + * See [Numeric + * redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). * @param {boolean} [params.processingMetrics] - If `true`, requests processing metrics about the service's * transcription of the input audio. The service returns processing metrics at the interval specified by the * `processing_metrics_interval` parameter. It also returns processing metrics for transcription events, for example, * for final and interim results. By default, the service returns no processing metrics. * * See [Processing - * metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing_metrics). + * metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing-metrics). * @param {number} [params.processingMetricsInterval] - Specifies the interval in real wall-clock seconds at which the * service is to return processing metrics. The parameter is ignored unless the `processing_metrics` parameter is set * to `true`. @@ -827,12 +914,12 @@ class SpeechToTextV1 extends BaseService { * of the audio, the service returns processing metrics only for transcription events. * * See [Processing - * metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing_metrics). + * metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing-metrics). * @param {boolean} [params.audioMetrics] - If `true`, requests detailed information about the signal characteristics * of the input audio. The service returns audio metrics with the final transcription results. By default, the service * returns no audio metrics. * - * See [Audio metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio_metrics). + * See [Audio metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio-metrics). * @param {number} [params.endOfPhraseSilenceTime] - If `true`, specifies the duration of the pause interval at which * the service splits a transcript into multiple final results. If the service detects pauses or extended silence * before it reaches the end of the audio stream, its response can include multiple final results. Silence indicates a @@ -846,7 +933,7 @@ class SpeechToTextV1 extends BaseService { * The default pause interval for most languages is 0.8 seconds; the default for Chinese is 0.6 seconds. * * See [End of phrase silence - * time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#silence_time). + * time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#silence-time). * @param {boolean} [params.splitTranscriptAtPhraseEnd] - If `true`, directs the service to split the transcript into * multiple final results based on semantic features of the input, for example, at the conclusion of meaningful * phrases such as sentences. The service bases its understanding of semantic features on the base language model that @@ -854,7 +941,7 @@ class SpeechToTextV1 extends BaseService { * transcript. By default, the service splits transcripts based solely on the pause interval. * * See [Split transcript at phrase - * end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#split_transcript). + * end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#split-transcript). * @param {number} [params.speechDetectorSensitivity] - The sensitivity of speech activity detection that the service * is to perform. Use the parameter to suppress word insertions from music, coughing, and other non-speech events. The * service biases the audio it passes for speech recognition by evaluating the input audio against prior models of @@ -865,8 +952,8 @@ class SpeechToTextV1 extends BaseService { * * 0.5 (the default) provides a reasonable compromise for the level of sensitivity. * * 1.0 suppresses no audio (speech detection sensitivity is disabled). * - * The values increase on a monotonic curve. See [Speech Activity - * Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + * The values increase on a monotonic curve. See [Speech detector + * sensitivity](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-sensitivity). * @param {number} [params.backgroundAudioSuppression] - The level to which the service is to suppress background * audio based on its volume to prevent it from being transcribed as speech. Use the parameter to suppress side * conversations or background noise. @@ -876,8 +963,21 @@ class SpeechToTextV1 extends BaseService { * * 0.5 provides a reasonable level of audio suppression for general usage. * * 1.0 suppresses all audio (no audio is transcribed). * - * The values increase on a monotonic curve. See [Speech Activity - * Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + * The values increase on a monotonic curve. See [Background audio + * suppression](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-suppression). + * @param {boolean} [params.lowLatency] - If `true` for next-generation `Multimedia` and `Telephony` models that + * support low latency, directs the service to produce results even more quickly than it usually does. Next-generation + * models produce transcription results faster than previous-generation models. The `low_latency` parameter causes the + * models to produce results even more quickly, though the results might be less accurate when the parameter is used. + * + * **Note:** The parameter is beta functionality. It is not available for previous-generation `Broadband` and + * `Narrowband` models. It is available only for some next-generation models. + * + * * For a list of next-generation models that support low latency, see [Supported language + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported) for + * next-generation models. + * * For more information about the `low_latency` parameter, see [Low + * latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers * @returns {Promise>} */ @@ -920,7 +1020,8 @@ class SpeechToTextV1 extends BaseService { 'end_of_phrase_silence_time': _params.endOfPhraseSilenceTime, 'split_transcript_at_phrase_end': _params.splitTranscriptAtPhraseEnd, 'speech_detector_sensitivity': _params.speechDetectorSensitivity, - 'background_audio_suppression': _params.backgroundAudioSuppression + 'background_audio_suppression': _params.backgroundAudioSuppression, + 'low_latency': _params.lowLatency }; const sdkHeaders = getSdkHeaders(SpeechToTextV1.DEFAULT_SERVICE_NAME, 'v1', 'createJob'); @@ -1355,6 +1456,9 @@ class SpeechToTextV1 extends BaseService { * * The value that you assign is used for all recognition requests that use the model. You can override it for any * recognition request by specifying a customization weight for that request. + * + * See [Using customization + * weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers * @returns {Promise>} */ @@ -1460,7 +1564,7 @@ class SpeechToTextV1 extends BaseService { * requests for the model until the upgrade completes. * * **See also:** [Upgrading a custom language - * model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customUpgrade#upgradeLanguage). + * model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-language). * * @param {Object} params - The parameters to send to the service. * @param {string} params.customizationId - The customization ID (GUID) of the custom language model that is to be @@ -2795,7 +2899,7 @@ class SpeechToTextV1 extends BaseService { * was not trained with a custom language model. * * **See also:** [Upgrading a custom acoustic - * model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customUpgrade#upgradeAcoustic). + * model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-acoustic). * * @param {Object} params - The parameters to send to the service. * @param {string} params.customizationId - The customization ID (GUID) of the custom acoustic model that is to be @@ -2809,7 +2913,7 @@ class SpeechToTextV1 extends BaseService { * has been modified since it was last trained. Use this parameter only to force the upgrade of a custom acoustic * model that is trained with a custom language model, and only if you receive a 400 response code and the message `No * input data modified since last training`. See [Upgrading a custom acoustic - * model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customUpgrade#upgradeAcoustic). + * model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-acoustic). * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers * @returns {Promise>} */ @@ -2965,8 +3069,8 @@ class SpeechToTextV1 extends BaseService { * higher than the minimum required rate, the service down-samples the audio to the appropriate rate. If the sampling * rate of the audio is lower than the minimum required rate, the service labels the audio file as `invalid`. * - * **See also:** [Audio - * formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats#audio-formats). + * **See also:** [Supported audio + * formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). * * ### Content types for archive-type resources * @@ -3296,15 +3400,21 @@ namespace SpeechToTextV1 { export enum ModelId { AR_AR_BROADBANDMODEL = 'ar-AR_BroadbandModel', AR_MS_BROADBANDMODEL = 'ar-MS_BroadbandModel', + AR_MS_TELEPHONY = 'ar-MS_Telephony', DE_DE_BROADBANDMODEL = 'de-DE_BroadbandModel', DE_DE_NARROWBANDMODEL = 'de-DE_NarrowbandModel', + DE_DE_TELEPHONY = 'de-DE_Telephony', EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel', EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel', + EN_AU_TELEPHONY = 'en-AU_Telephony', EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel', EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel', + EN_GB_TELEPHONY = 'en-GB_Telephony', EN_US_BROADBANDMODEL = 'en-US_BroadbandModel', + EN_US_MULTIMEDIA = 'en-US_Multimedia', EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel', EN_US_SHORTFORM_NARROWBANDMODEL = 'en-US_ShortForm_NarrowbandModel', + EN_US_TELEPHONY = 'en-US_Telephony', ES_AR_BROADBANDMODEL = 'es-AR_BroadbandModel', ES_AR_NARROWBANDMODEL = 'es-AR_NarrowbandModel', ES_CL_BROADBANDMODEL = 'es-CL_BroadbandModel', @@ -3313,16 +3423,20 @@ namespace SpeechToTextV1 { ES_CO_NARROWBANDMODEL = 'es-CO_NarrowbandModel', ES_ES_BROADBANDMODEL = 'es-ES_BroadbandModel', ES_ES_NARROWBANDMODEL = 'es-ES_NarrowbandModel', + ES_ES_TELEPHONY = 'es-ES_Telephony', ES_MX_BROADBANDMODEL = 'es-MX_BroadbandModel', ES_MX_NARROWBANDMODEL = 'es-MX_NarrowbandModel', ES_PE_BROADBANDMODEL = 'es-PE_BroadbandModel', ES_PE_NARROWBANDMODEL = 'es-PE_NarrowbandModel', FR_CA_BROADBANDMODEL = 'fr-CA_BroadbandModel', FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel', + FR_CA_TELEPHONY = 'fr-CA_Telephony', FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel', FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel', + FR_FR_TELEPHONY = 'fr-FR_Telephony', IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel', IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel', + IT_IT_TELEPHONY = 'it-IT_Telephony', JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel', JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel', KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel', @@ -3331,6 +3445,7 @@ namespace SpeechToTextV1 { NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel', PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel', PT_BR_NARROWBANDMODEL = 'pt-BR_NarrowbandModel', + PT_BR_TELEPHONY = 'pt-BR_Telephony', ZH_CN_BROADBANDMODEL = 'zh-CN_BroadbandModel', ZH_CN_NARROWBANDMODEL = 'zh-CN_NarrowbandModel', } @@ -3346,14 +3461,15 @@ namespace SpeechToTextV1 { contentType?: RecognizeConstants.ContentType | string; /** The identifier of the model that is to be used for the recognition request. (**Note:** The model * `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and [Next-generation languages + * and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). */ model?: RecognizeConstants.Model | string; /** The customization ID (GUID) of a custom language model that is to be used with the recognition request. The * base model of the specified custom language model must match the model specified with the `model` parameter. You * must make the request with credentials for the instance of the service that owns the custom model. By default, - * no custom language model is used. See [Custom - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * no custom language model is used. See [Using a custom language model for speech + * recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse). * * **Note:** Use this parameter instead of the deprecated `customization_id` parameter. */ @@ -3361,15 +3477,16 @@ namespace SpeechToTextV1 { /** The customization ID (GUID) of a custom acoustic model that is to be used with the recognition request. The * base model of the specified custom acoustic model must match the model specified with the `model` parameter. You * must make the request with credentials for the instance of the service that owns the custom model. By default, - * no custom acoustic model is used. See [Custom - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * no custom acoustic model is used. See [Using a custom acoustic model for speech + * recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acousticUse). */ acousticCustomizationId?: string; /** The version of the specified base model that is to be used with the recognition request. Multiple versions * of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily * for use with custom models that have been upgraded for a new base model. The default value depends on whether - * the parameter is used with or without a custom model. See [Base model - * version](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#version). + * the parameter is used with or without a custom model. See [Making speech recognition requests with upgraded + * custom + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade-use#custom-upgrade-use-recognition). */ baseModelVersion?: string; /** If you specify the customization ID (GUID) of a custom language model with the recognition request, the @@ -3384,7 +3501,8 @@ namespace SpeechToTextV1 { * of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy * of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. * - * See [Custom models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * See [Using customization + * weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). */ customizationWeight?: number; /** The time in seconds after which, if only silence (no speech) is detected in streaming audio, the connection @@ -3402,41 +3520,42 @@ namespace SpeechToTextV1 { * 1024 characters, though the maximum effective length for double-byte languages might be shorter. Keywords are * case-insensitive. * - * See [Keyword spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + * See [Keyword + * spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). */ keywords?: string[]; /** A confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword * if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. If you * specify a threshold, you must also specify one or more keywords. The service performs no keyword spotting if you * omit either parameter. See [Keyword - * spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + * spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). */ keywordsThreshold?: number; /** The maximum number of alternative transcripts that the service is to return. By default, the service returns * a single transcript. If you specify a value of `0`, the service uses the default value, `1`. See [Maximum - * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#max_alternatives). + * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#max-alternatives). */ maxAlternatives?: number; /** A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also * known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to * the threshold. Specify a probability between 0.0 and 1.0. By default, the service computes no alternative words. * See [Word - * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_alternatives). + * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#word-alternatives). */ wordAlternativesThreshold?: number; /** If `true`, the service returns a confidence measure in the range of 0.0 to 1.0 for each word. By default, * the service returns no word confidence scores. See [Word - * confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_confidence). + * confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-confidence). */ wordConfidence?: boolean; /** If `true`, the service returns time alignment for each word. By default, no timestamps are returned. See - * [Word timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_timestamps). + * [Word timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-timestamps). */ timestamps?: boolean; /** If `true`, the service filters profanity from all output except for keyword results by replacing * inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no - * censoring. Applies to US English transcription only. See [Profanity - * filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#profanity_filter). + * censoring. Applies to US English and Japanese transcription only. See [Profanity + * filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#profanity-filtering). */ profanityFilter?: boolean; /** If `true`, the service converts dates, times, series of digits and numbers, phone numbers, currency values, @@ -3446,17 +3565,20 @@ namespace SpeechToTextV1 { * * **Note:** Applies to US English, Japanese, and Spanish transcription only. * - * See [Smart formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#smart_formatting). + * See [Smart + * formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). */ smartFormatting?: boolean; /** If `true`, the response includes labels that identify which words were spoken by which participants in a * multi-person exchange. By default, the service returns no speaker labels. Setting `speaker_labels` to `true` * forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. + * * For previous-generation models, can be used for US English, Australian English, German, Japanese, Korean, and + * Spanish (both broadband and narrowband models) and UK English (narrowband model) transcription only. + * * For next-generation models, can be used for English (Australian, UK, and US), German, and Spanish + * transcription only. * - * **Note:** Applies to US English, Australian English, German, Japanese, Korean, and Spanish (both broadband and - * narrowband models) and UK English (narrowband model) transcription only. - * - * See [Speaker labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#speaker_labels). + * Restrictions and limitations apply to the use of speaker labels for both types of models. See [Speaker + * labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). */ speakerLabels?: boolean; /** **Deprecated.** Use the `language_customization_id` parameter to specify the customization ID (GUID) of a @@ -3467,8 +3589,8 @@ namespace SpeechToTextV1 { /** The name of a grammar that is to be used with the recognition request. If you specify a grammar, you must * also use the `language_customization_id` parameter to specify the name of the custom language model for which * the grammar is defined. The service recognizes only strings that are recognized by the specified grammar; it - * does not recognize other custom words from the model's words resource. See - * [Grammars](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#grammars-input). + * does not recognize other custom words from the model's words resource. See [Using a grammar for speech + * recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). */ grammarName?: string; /** If `true`, the service redacts, or masks, numeric data from final transcripts. The feature redacts any @@ -3482,13 +3604,14 @@ namespace SpeechToTextV1 { * * **Note:** Applies to US English, Japanese, and Korean transcription only. * - * See [Numeric redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#redaction). + * See [Numeric + * redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). */ redaction?: boolean; /** If `true`, requests detailed information about the signal characteristics of the input audio. The service * returns audio metrics with the final transcription results. By default, the service returns no audio metrics. * - * See [Audio metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio_metrics). + * See [Audio metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio-metrics). */ audioMetrics?: boolean; /** If `true`, specifies the duration of the pause interval at which the service splits a transcript into @@ -3504,7 +3627,7 @@ namespace SpeechToTextV1 { * The default pause interval for most languages is 0.8 seconds; the default for Chinese is 0.6 seconds. * * See [End of phrase silence - * time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#silence_time). + * time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#silence-time). */ endOfPhraseSilenceTime?: number; /** If `true`, directs the service to split the transcript into multiple final results based on semantic @@ -3514,7 +3637,7 @@ namespace SpeechToTextV1 { * splits transcripts based solely on the pause interval. * * See [Split transcript at phrase - * end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#split_transcript). + * end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#split-transcript). */ splitTranscriptAtPhraseEnd?: boolean; /** The sensitivity of speech activity detection that the service is to perform. Use the parameter to suppress @@ -3526,8 +3649,8 @@ namespace SpeechToTextV1 { * * 0.5 (the default) provides a reasonable compromise for the level of sensitivity. * * 1.0 suppresses no audio (speech detection sensitivity is disabled). * - * The values increase on a monotonic curve. See [Speech Activity - * Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + * The values increase on a monotonic curve. See [Speech detector + * sensitivity](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-sensitivity). */ speechDetectorSensitivity?: number; /** The level to which the service is to suppress background audio based on its volume to prevent it from being @@ -3538,10 +3661,25 @@ namespace SpeechToTextV1 { * * 0.5 provides a reasonable level of audio suppression for general usage. * * 1.0 suppresses all audio (no audio is transcribed). * - * The values increase on a monotonic curve. See [Speech Activity - * Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + * The values increase on a monotonic curve. See [Background audio + * suppression](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-suppression). */ backgroundAudioSuppression?: number; + /** If `true` for next-generation `Multimedia` and `Telephony` models that support low latency, directs the + * service to produce results even more quickly than it usually does. Next-generation models produce transcription + * results faster than previous-generation models. The `low_latency` parameter causes the models to produce results + * even more quickly, though the results might be less accurate when the parameter is used. + * + * **Note:** The parameter is beta functionality. It is not available for previous-generation `Broadband` and + * `Narrowband` models. It is available only for some next-generation models. + * + * * For a list of next-generation models that support low latency, see [Supported language + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported) for + * next-generation models. + * * For more information about the `low_latency` parameter, see [Low + * latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). + */ + lowLatency?: boolean; headers?: OutgoingHttpHeaders; } @@ -3566,19 +3704,25 @@ namespace SpeechToTextV1 { AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus', AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis', } - /** The identifier of the model that is to be used for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). */ + /** The identifier of the model that is to be used for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and [Next-generation languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). */ export enum Model { AR_AR_BROADBANDMODEL = 'ar-AR_BroadbandModel', AR_MS_BROADBANDMODEL = 'ar-MS_BroadbandModel', + AR_MS_TELEPHONY = 'ar-MS_Telephony', DE_DE_BROADBANDMODEL = 'de-DE_BroadbandModel', DE_DE_NARROWBANDMODEL = 'de-DE_NarrowbandModel', + DE_DE_TELEPHONY = 'de-DE_Telephony', EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel', EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel', + EN_AU_TELEPHONY = 'en-AU_Telephony', EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel', EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel', + EN_GB_TELEPHONY = 'en-GB_Telephony', EN_US_BROADBANDMODEL = 'en-US_BroadbandModel', + EN_US_MULTIMEDIA = 'en-US_Multimedia', EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel', EN_US_SHORTFORM_NARROWBANDMODEL = 'en-US_ShortForm_NarrowbandModel', + EN_US_TELEPHONY = 'en-US_Telephony', ES_AR_BROADBANDMODEL = 'es-AR_BroadbandModel', ES_AR_NARROWBANDMODEL = 'es-AR_NarrowbandModel', ES_CL_BROADBANDMODEL = 'es-CL_BroadbandModel', @@ -3587,16 +3731,20 @@ namespace SpeechToTextV1 { ES_CO_NARROWBANDMODEL = 'es-CO_NarrowbandModel', ES_ES_BROADBANDMODEL = 'es-ES_BroadbandModel', ES_ES_NARROWBANDMODEL = 'es-ES_NarrowbandModel', + ES_ES_TELEPHONY = 'es-ES_Telephony', ES_MX_BROADBANDMODEL = 'es-MX_BroadbandModel', ES_MX_NARROWBANDMODEL = 'es-MX_NarrowbandModel', ES_PE_BROADBANDMODEL = 'es-PE_BroadbandModel', ES_PE_NARROWBANDMODEL = 'es-PE_NarrowbandModel', FR_CA_BROADBANDMODEL = 'fr-CA_BroadbandModel', FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel', + FR_CA_TELEPHONY = 'fr-CA_Telephony', FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel', FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel', + FR_FR_TELEPHONY = 'fr-FR_Telephony', IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel', IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel', + IT_IT_TELEPHONY = 'it-IT_Telephony', JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel', JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel', KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel', @@ -3605,6 +3753,7 @@ namespace SpeechToTextV1 { NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel', PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel', PT_BR_NARROWBANDMODEL = 'pt-BR_NarrowbandModel', + PT_BR_TELEPHONY = 'pt-BR_Telephony', ZH_CN_BROADBANDMODEL = 'zh-CN_BroadbandModel', ZH_CN_NARROWBANDMODEL = 'zh-CN_NarrowbandModel', } @@ -3643,7 +3792,8 @@ namespace SpeechToTextV1 { contentType?: CreateJobConstants.ContentType | string; /** The identifier of the model that is to be used for the recognition request. (**Note:** The model * `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and [Next-generation languages + * and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). */ model?: CreateJobConstants.Model | string; /** A URL to which callback notifications are to be sent. The URL must already be successfully allowlisted by @@ -3685,8 +3835,8 @@ namespace SpeechToTextV1 { /** The customization ID (GUID) of a custom language model that is to be used with the recognition request. The * base model of the specified custom language model must match the model specified with the `model` parameter. You * must make the request with credentials for the instance of the service that owns the custom model. By default, - * no custom language model is used. See [Custom - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * no custom language model is used. See [Using a custom language model for speech + * recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse). * * **Note:** Use this parameter instead of the deprecated `customization_id` parameter. */ @@ -3694,15 +3844,16 @@ namespace SpeechToTextV1 { /** The customization ID (GUID) of a custom acoustic model that is to be used with the recognition request. The * base model of the specified custom acoustic model must match the model specified with the `model` parameter. You * must make the request with credentials for the instance of the service that owns the custom model. By default, - * no custom acoustic model is used. See [Custom - * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * no custom acoustic model is used. See [Using a custom acoustic model for speech + * recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acousticUse). */ acousticCustomizationId?: string; /** The version of the specified base model that is to be used with the recognition request. Multiple versions * of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily * for use with custom models that have been upgraded for a new base model. The default value depends on whether - * the parameter is used with or without a custom model. See [Base model - * version](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#version). + * the parameter is used with or without a custom model. See [Making speech recognition requests with upgraded + * custom + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade-use#custom-upgrade-use-recognition). */ baseModelVersion?: string; /** If you specify the customization ID (GUID) of a custom language model with the recognition request, the @@ -3717,7 +3868,8 @@ namespace SpeechToTextV1 { * of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy * of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. * - * See [Custom models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#custom-input). + * See [Using customization + * weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). */ customizationWeight?: number; /** The time in seconds after which, if only silence (no speech) is detected in streaming audio, the connection @@ -3735,41 +3887,42 @@ namespace SpeechToTextV1 { * 1024 characters, though the maximum effective length for double-byte languages might be shorter. Keywords are * case-insensitive. * - * See [Keyword spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + * See [Keyword + * spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). */ keywords?: string[]; /** A confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword * if its confidence is greater than or equal to the threshold. Specify a probability between 0.0 and 1.0. If you * specify a threshold, you must also specify one or more keywords. The service performs no keyword spotting if you * omit either parameter. See [Keyword - * spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#keyword_spotting). + * spotting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#keyword-spotting). */ keywordsThreshold?: number; /** The maximum number of alternative transcripts that the service is to return. By default, the service returns * a single transcript. If you specify a value of `0`, the service uses the default value, `1`. See [Maximum - * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#max_alternatives). + * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#max-alternatives). */ maxAlternatives?: number; /** A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also * known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to * the threshold. Specify a probability between 0.0 and 1.0. By default, the service computes no alternative words. * See [Word - * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_alternatives). + * alternatives](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-spotting#word-alternatives). */ wordAlternativesThreshold?: number; /** If `true`, the service returns a confidence measure in the range of 0.0 to 1.0 for each word. By default, * the service returns no word confidence scores. See [Word - * confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_confidence). + * confidence](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-confidence). */ wordConfidence?: boolean; /** If `true`, the service returns time alignment for each word. By default, no timestamps are returned. See - * [Word timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#word_timestamps). + * [Word timestamps](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metadata#word-timestamps). */ timestamps?: boolean; /** If `true`, the service filters profanity from all output except for keyword results by replacing * inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no - * censoring. Applies to US English transcription only. See [Profanity - * filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#profanity_filter). + * censoring. Applies to US English and Japanese transcription only. See [Profanity + * filtering](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#profanity-filtering). */ profanityFilter?: boolean; /** If `true`, the service converts dates, times, series of digits and numbers, phone numbers, currency values, @@ -3779,17 +3932,20 @@ namespace SpeechToTextV1 { * * **Note:** Applies to US English, Japanese, and Spanish transcription only. * - * See [Smart formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#smart_formatting). + * See [Smart + * formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). */ smartFormatting?: boolean; /** If `true`, the response includes labels that identify which words were spoken by which participants in a * multi-person exchange. By default, the service returns no speaker labels. Setting `speaker_labels` to `true` * forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. + * * For previous-generation models, can be used for US English, Australian English, German, Japanese, Korean, and + * Spanish (both broadband and narrowband models) and UK English (narrowband model) transcription only. + * * For next-generation models, can be used for English (Australian, UK, and US), German, and Spanish + * transcription only. * - * **Note:** Applies to US English, Australian English, German, Japanese, Korean, and Spanish (both broadband and - * narrowband models) and UK English (narrowband model) transcription only. - * - * See [Speaker labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#speaker_labels). + * Restrictions and limitations apply to the use of speaker labels for both types of models. See [Speaker + * labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). */ speakerLabels?: boolean; /** **Deprecated.** Use the `language_customization_id` parameter to specify the customization ID (GUID) of a @@ -3800,8 +3956,8 @@ namespace SpeechToTextV1 { /** The name of a grammar that is to be used with the recognition request. If you specify a grammar, you must * also use the `language_customization_id` parameter to specify the name of the custom language model for which * the grammar is defined. The service recognizes only strings that are recognized by the specified grammar; it - * does not recognize other custom words from the model's words resource. See - * [Grammars](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#grammars-input). + * does not recognize other custom words from the model's words resource. See [Using a grammar for speech + * recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). */ grammarName?: string; /** If `true`, the service redacts, or masks, numeric data from final transcripts. The feature redacts any @@ -3815,7 +3971,8 @@ namespace SpeechToTextV1 { * * **Note:** Applies to US English, Japanese, and Korean transcription only. * - * See [Numeric redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#redaction). + * See [Numeric + * redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). */ redaction?: boolean; /** If `true`, requests processing metrics about the service's transcription of the input audio. The service @@ -3824,7 +3981,7 @@ namespace SpeechToTextV1 { * service returns no processing metrics. * * See [Processing - * metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing_metrics). + * metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing-metrics). */ processingMetrics?: boolean; /** Specifies the interval in real wall-clock seconds at which the service is to return processing metrics. The @@ -3838,13 +3995,13 @@ namespace SpeechToTextV1 { * duration of the audio, the service returns processing metrics only for transcription events. * * See [Processing - * metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing_metrics). + * metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#processing-metrics). */ processingMetricsInterval?: number; /** If `true`, requests detailed information about the signal characteristics of the input audio. The service * returns audio metrics with the final transcription results. By default, the service returns no audio metrics. * - * See [Audio metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio_metrics). + * See [Audio metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio-metrics). */ audioMetrics?: boolean; /** If `true`, specifies the duration of the pause interval at which the service splits a transcript into @@ -3860,7 +4017,7 @@ namespace SpeechToTextV1 { * The default pause interval for most languages is 0.8 seconds; the default for Chinese is 0.6 seconds. * * See [End of phrase silence - * time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#silence_time). + * time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#silence-time). */ endOfPhraseSilenceTime?: number; /** If `true`, directs the service to split the transcript into multiple final results based on semantic @@ -3870,7 +4027,7 @@ namespace SpeechToTextV1 { * splits transcripts based solely on the pause interval. * * See [Split transcript at phrase - * end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#split_transcript). + * end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-parsing#split-transcript). */ splitTranscriptAtPhraseEnd?: boolean; /** The sensitivity of speech activity detection that the service is to perform. Use the parameter to suppress @@ -3882,8 +4039,8 @@ namespace SpeechToTextV1 { * * 0.5 (the default) provides a reasonable compromise for the level of sensitivity. * * 1.0 suppresses no audio (speech detection sensitivity is disabled). * - * The values increase on a monotonic curve. See [Speech Activity - * Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + * The values increase on a monotonic curve. See [Speech detector + * sensitivity](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-sensitivity). */ speechDetectorSensitivity?: number; /** The level to which the service is to suppress background audio based on its volume to prevent it from being @@ -3894,10 +4051,25 @@ namespace SpeechToTextV1 { * * 0.5 provides a reasonable level of audio suppression for general usage. * * 1.0 suppresses all audio (no audio is transcribed). * - * The values increase on a monotonic curve. See [Speech Activity - * Detection](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#detection). + * The values increase on a monotonic curve. See [Background audio + * suppression](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-detection#detection-parameters-suppression). */ backgroundAudioSuppression?: number; + /** If `true` for next-generation `Multimedia` and `Telephony` models that support low latency, directs the + * service to produce results even more quickly than it usually does. Next-generation models produce transcription + * results faster than previous-generation models. The `low_latency` parameter causes the models to produce results + * even more quickly, though the results might be less accurate when the parameter is used. + * + * **Note:** The parameter is beta functionality. It is not available for previous-generation `Broadband` and + * `Narrowband` models. It is available only for some next-generation models. + * + * * For a list of next-generation models that support low latency, see [Supported language + * models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported) for + * next-generation models. + * * For more information about the `low_latency` parameter, see [Low + * latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). + */ + lowLatency?: boolean; headers?: OutgoingHttpHeaders; } @@ -3922,19 +4094,25 @@ namespace SpeechToTextV1 { AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus', AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis', } - /** The identifier of the model that is to be used for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). */ + /** The identifier of the model that is to be used for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and [Next-generation languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). */ export enum Model { AR_AR_BROADBANDMODEL = 'ar-AR_BroadbandModel', AR_MS_BROADBANDMODEL = 'ar-MS_BroadbandModel', + AR_MS_TELEPHONY = 'ar-MS_Telephony', DE_DE_BROADBANDMODEL = 'de-DE_BroadbandModel', DE_DE_NARROWBANDMODEL = 'de-DE_NarrowbandModel', + DE_DE_TELEPHONY = 'de-DE_Telephony', EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel', EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel', + EN_AU_TELEPHONY = 'en-AU_Telephony', EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel', EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel', + EN_GB_TELEPHONY = 'en-GB_Telephony', EN_US_BROADBANDMODEL = 'en-US_BroadbandModel', + EN_US_MULTIMEDIA = 'en-US_Multimedia', EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel', EN_US_SHORTFORM_NARROWBANDMODEL = 'en-US_ShortForm_NarrowbandModel', + EN_US_TELEPHONY = 'en-US_Telephony', ES_AR_BROADBANDMODEL = 'es-AR_BroadbandModel', ES_AR_NARROWBANDMODEL = 'es-AR_NarrowbandModel', ES_CL_BROADBANDMODEL = 'es-CL_BroadbandModel', @@ -3943,16 +4121,20 @@ namespace SpeechToTextV1 { ES_CO_NARROWBANDMODEL = 'es-CO_NarrowbandModel', ES_ES_BROADBANDMODEL = 'es-ES_BroadbandModel', ES_ES_NARROWBANDMODEL = 'es-ES_NarrowbandModel', + ES_ES_TELEPHONY = 'es-ES_Telephony', ES_MX_BROADBANDMODEL = 'es-MX_BroadbandModel', ES_MX_NARROWBANDMODEL = 'es-MX_NarrowbandModel', ES_PE_BROADBANDMODEL = 'es-PE_BroadbandModel', ES_PE_NARROWBANDMODEL = 'es-PE_NarrowbandModel', FR_CA_BROADBANDMODEL = 'fr-CA_BroadbandModel', FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel', + FR_CA_TELEPHONY = 'fr-CA_Telephony', FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel', FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel', + FR_FR_TELEPHONY = 'fr-FR_Telephony', IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel', IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel', + IT_IT_TELEPHONY = 'it-IT_Telephony', JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel', JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel', KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel', @@ -3961,6 +4143,7 @@ namespace SpeechToTextV1 { NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel', PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel', PT_BR_NARROWBANDMODEL = 'pt-BR_NarrowbandModel', + PT_BR_TELEPHONY = 'pt-BR_Telephony', ZH_CN_BROADBANDMODEL = 'zh-CN_BroadbandModel', ZH_CN_NARROWBANDMODEL = 'zh-CN_NarrowbandModel', } @@ -4159,6 +4342,9 @@ namespace SpeechToTextV1 { * * The value that you assign is used for all recognition requests that use the model. You can override it for any * recognition request by specifying a customization weight for that request. + * + * See [Using customization + * weight](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageUse#weight). */ customizationWeight?: number; headers?: OutgoingHttpHeaders; @@ -4629,7 +4815,7 @@ namespace SpeechToTextV1 { * was last trained. Use this parameter only to force the upgrade of a custom acoustic model that is trained with a * custom language model, and only if you receive a 400 response code and the message `No input data modified since * last training`. See [Upgrading a custom acoustic - * model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customUpgrade#upgradeAcoustic). + * model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-acoustic). */ force?: boolean; headers?: OutgoingHttpHeaders; @@ -5340,8 +5526,10 @@ namespace SpeechToTextV1 { export interface SpeechRecognitionAlternative { /** A transcription of the audio. */ transcript: string; - /** A score that indicates the service's confidence in the transcript in the range of 0.0 to 1.0. A confidence - * score is returned only for the best alternative and only with results marked as final. + /** A score that indicates the service's confidence in the transcript in the range of 0.0 to 1.0. For speech + * recognition with previous-generation models, a confidence score is returned only for the best alternative and + * only with results marked as final. For speech recognition with next-generation models, a confidence score is + * never returned. */ confidence?: number; /** Time alignments for each word from the transcript as a list of lists. Each inner list consists of three @@ -5443,6 +5631,11 @@ namespace SpeechToTextV1 { * (narrowband model only). Speaker labels are not supported for any other models. */ speaker_labels: boolean; + /** Indicates whether the `low_latency` parameter can be used with a next-generation language model. The field + * is returned only for next-generation models. Previous-generation models do not support the `low_latency` + * parameter. + */ + low_latency?: boolean; } /** The response from training of a custom language or custom acoustic model. */ diff --git a/test/integration/text-to-speech.test.js b/test/integration/text-to-speech.test.js index 75fbb1d3de..6b9bf69ff8 100644 --- a/test/integration/text-to-speech.test.js +++ b/test/integration/text-to-speech.test.js @@ -2,6 +2,8 @@ const { IamAuthenticator } = require('../../dist/auth'); const TextToSpeechV1 = require('../../dist/text-to-speech/v1'); +const fs = require('fs'); +const path = require('path'); const wav = require('wav'); const authHelper = require('../resources/auth_helper.js'); const describe = authHelper.describe; // this runs describe.skip if there is no auth.js file :) @@ -94,6 +96,66 @@ describe('text to speech_integration', () => { customizationId = result.customization_id; }); + describe('custom prompts', () => { + const promptId = 'Hello'; + + it('should addCustomPrompt()', async () => { + expect(customizationId).toBeTruthy(); + + const params = { + customizationId, + promptId, + metadata: { + prompt_text: 'Hello, how are you today?', + }, + file: fs.createReadStream(path.join(__dirname, '../resources/tts_audio.wav')), + filename: 'tts_audio.wav', + }; + + const res = await textToSpeech.addCustomPrompt(params); + const { result } = res || {}; + expect(result.prompt_id).toBe('Hello'); + }); + + it('should listCustomPrompts()', async () => { + expect(customizationId).toBeTruthy(); + + const params = { + customizationId, + }; + + const res = await textToSpeech.listCustomPrompts(params); + const { result } = res || {}; + expect(result.prompts.length).toBeTruthy(); + }); + + it('should getCustomPrompt()', async () => { + expect(customizationId).toBeTruthy(); + + const params = { + customizationId, + promptId, + }; + + const res = await textToSpeech.getCustomPrompt(params); + const { result } = res || {}; + expect(result.prompt_id).toBe('Hello'); + }); + + it('should deleteCustomPrompt()', async () => { + expect(customizationId).toBeTruthy(); + + const params = { + customizationId, + promptId, + }; + + const res = await textToSpeech.deleteCustomPrompt(params); + const { result } = res || {}; + expect(result).toBeDefined(); + }); + }); + it('should listCustomModels() with language', async () => { const params = { language: 'en-GB', @@ -220,4 +282,52 @@ describe('text to speech_integration', () => { expect(result).toBeDefined(); }); }); + + describe('speaker models', () => { + let speakerId; + + it('should createSpeakerModel()', async () => { + const params = { + speakerName: 'Angelo', + audio: fs.createReadStream(path.join(__dirname, '../resources/tts_audio.wav')), + }; + + const res = await textToSpeech.createSpeakerModel(params); + const { result } = res || {}; + expect(result.speaker_id).toBeDefined(); + speakerId = result.speaker_id; + }); + + it('should listSpeakerModels()', async () => { + expect(speakerId).toBeTruthy(); + + const res = await textToSpeech.listSpeakerModels(); + const { result } = res || {}; + expect(result.speakers.length).toBeTruthy(); + }); + + it('should getSpeakerModel()', async () => { + expect(speakerId).toBeTruthy(); + + const params = { + speakerId, + }; + + const res = await textToSpeech.getSpeakerModel(params); + const { result } = res || {}; + expect(result.customizations).toBeDefined(); + }); + + it('should deleteSpeakerModel()', async () => { + expect(speakerId).toBeTruthy(); + + const params = { + speakerId, + }; + + const res = await textToSpeech.deleteSpeakerModel(params); + const { result } = res || {}; + expect(result).toBeDefined(); + }); + }); }); diff --git a/test/resources/tts_audio.wav b/test/resources/tts_audio.wav new file mode 100644 index 0000000000000000000000000000000000000000..ba4760649ea3a46afb64ad02ef3f10adce8d24d3 GIT binary patch literal 75726 zcmeFZXM7V!7d|?>>eaF=_g=tM(>o#b9w77{5_&?1P(z16fCNGdB@jvop_fn-AoSjC znz4<$CEMy<+TFR2c<+1v_kO*fU-UDURy#B2Jm)!QX4ms<{~let-pyplkPd^pOqe<+ zMMnr>@LJXmZx$265*_K?W5B>TeC*MuYu_FNx(?LheeW;lPH5ILt7)^WMhzR1abM2) zav6I0-@pGM@IM6phrs_3_#XoQL*Rc1{Qp6KQbPXUKcPNCpa1iZe*E{}_o3haIXATS zVE#Yv|8wL&tC`S>=Krit{?9j|_04~N{@>sJ=iL9a`R}>^9VzrXbd3G@3Vr&|drCPP zqym-TTb6LtPgx=%Dk3E!B2X!j6B!W`ALVhJqaxy`0r(_*^y|Mqg+^rmA0I-WLf?c& z2ptdo3w;W`WB%`!|F!PF*8k_XfL8ypp2DL4So_~L0>=N32Ndr@{j=yT)L-bI0>8x{ zk19yWQY(%yB%HWlt(j`z4TYDL=+lL7qv2H&iNGtCG$8r(D_WE6Bn?SRl0|RQO!5Os zVOY|JoTrEAee|-CyrqPmC#TTcMcRsHkqWB9IL`$e?N5v8b<&ycC#`{2Z~7f=!u(8r z1CEb`f5|*jM1LR&R8Mx1P8d%|t_U8|gWMnrvV~3}J?SXIlMJ$ySx1-9*hT3RjdWxQ;Ye`))g6tJ~k&V7LX~h=NlSF9rRz}8eKzf(owVoqrQdROXyYl8BM@z3z<%*kiGOWeL@?u>*x@o zA(eCrT`Sd*9j~Mo#tLOw$3@Z$XK{f4150M~^!>AfCghS+Q z_~G%RK-BG2jhO368BHhlkDL;~1UVSNg4JrACnf=SQvsyj&{|A5`IX!zpVBzy9hpeB zlDYIAB2x}zu96Homee8R(dT+vjoz-%0w7XJAJ7PlP$%&ez zl7qA}c}0C->1?`~jHBTsli5ypkR$XJ`0_95MwU_sISSU+Bz?(B;-E{&6hwSKJiJf7 zhNX|GPxzN$AL%97 z+KT)Pj=Uhp=~i-vyr2xz4DafbRz%4RrG1E;62?YfBT~f}ZwLLIOrV=!@g2I7_yr%W z59B%!voI8~8b@1`SyV=b1NlfqbT=~WC|J4x*1RQOBUpM8#1tD`+nH z89sMGCJw+TH%L3ul}rHZe}&gFRG8oBM8v8tIRQ^2f$d>wvlodm1E_|6! zqeyLJTn^QMYg15*F4D!Y_5(8X6Xfk(I+rX1PyO^Y9gP07h?abXtT+Ok3ds_Ttp)C> z7U!8!@zo>_u@;eOq%Ue` zCixDn^<*FY7xPCG+!*;2@k>Qzy99QAj(%r>^-D+?j{Ji9up9lf1tOo}`zv%OxKtBW zbU1B8BIsmbIT9AOLtQr0nK&9i#jA;#SA)n_p_ab~#x+qlvT1wR`73<~jAfWBo0D7Q zI^BUvb%aKfn`9V0NYYVv+L89iyGLl7i}#tZO-vHO)%KV%>mf7u!={^v-!;rCnV9GP z2DVPx1(|E7!{Gk|WPBC!@&ai>Q&1BcV;)!puf_t|Z;`D(A(qdPK@2fth8_wIv`4OH zqXIq#KUjD+3)ugF-*+(bM$EluFoSjm%f2EzQ9m{#t|ySe^Dz&r$mc+R7Aog1x*3*y zj?c}IIU|7jFwzlOkVQ_?NX!bI5z7tW+AF$_oI^gXqr;IMh4c@mG5Xtu`m_*HE~H0^ zRcK9Skh6$L3P!v}yJ2NA9A`8}#@)kgXaI+081ocHQW6W+XltnfC~IMR3H_3eCduH{ zJk)6!IfP2-qJA)DGI+NE=rjkGk1;EhlXIjgP`HYkH-UC#o+G~XNiE>`EA35{%u)Cf zz)aDF&Y%}Cvn8WG|48c7=X5D*Z7OWJhXLB>8tye83UKsN&`pm(Swc37fC&Xl3f zjY1V|&a6l6`;L4;-DEQ2{|Hg+29|C^Ob^n3V0Aibz)m`qv<7DyA-k?&RT4|$v1+YC zhI>EGx+&oF2h2P9;A9cvA_CXNV1f_h8>t7}6=OsnGA$ms6=A)3kSstieK7O)qnluL z0bawv+heHB+v)GfhOMYl2heLn(jGHXB4)Jam^VvM*Z-jlz}2Q?8hC3*?MR2;4^U^i zVOCFqm2c=n#QP6g2#=4UVh;nBt1$ajVcegA|0JL{2yG^!l6oO`l8L)b}x2S|?Q8&IuHH|`+o&(b3 zu-aiTUwwjE&y1Q~n+zhCVdo9h6o%A8#9dhHZU^ofoOKC&e}HIBMJ?@x^+*DpgE{CV z*!C~D$QKTPT9l`_a%Fr4<8*A{ZsJl;4Gc_a+2)-aG$hDiOS0}MPse=&~ z)0dd9+`#;p@Cz7pAB^~%%8~PJF%JzOy|DglLz6IfeTF(YP52!CZp2J|3~_&hjCllH zMX1Wh>1Cp2`eNoYqcRFu>77J{`xCV$2`k_W7_|XCjQQ#rX27*zV;6X^1S_NdsEF_3 zZ9~*f)En9y{47Q8crlBlVFmXidgd@@AzeqYcEa5CGmid6rlML5!?n&c^z$9?lVasD z5!TP5$IzDs`L_Vvxr|x$6ZF;)Ys)R*=xyZOBCL_U!1M=jv=7Gj;JP9UvsW535o?V< za0XWA80Q|vdIXG^2Sit(g7ifN-b1Cx&`b0Ys_z0EJA{h+HD>%vc-IhCoW(4Zg_>%` zIn!X#F0gGsSqCiUqZf`ABg!K&gKtBv*iO%&lJCMge=w@~YkD}xQNekiVKW}t@F zLyj&$wMe70z~U=J&(r_{&oJMg0@v^1Dkcgu|2$;43A~VCjE+RkuuMLx)?nI>)}qUR zY!j-WUZGS-q$Xhn#&N-mUZ@UT$Ro@{QlPVnbYM23Wk*KB>|`>SMa&P(=S&+moQYwU zGCpPvvx*F7-Y|EV*-QuK1}P`~nDdxN^1(7IxT&JKd=fv3Kg37#E4dN;Y@sHuW%di} zgz=~&$kbrBi7af91buepi-I zI@`F}QrEVms(0DWig)%^!CutFW%E1PPvwQGPnCj9A)7CKC0i&{N>t1uu1jE-C)YLB zHPC+Ca@|_PBjFT$l<=dVCg+&zY#Zql`547nd9jpH6f1iv5+v&|N12&NY%lfz<7c9n zu0nNSk+-|+H|HPjdcH#6OqZ*2ZOM1#yfzcG3Q% zJE4=v3j*8Bw&FzvZHfcN_olbS{qn4ZwQO1Z0J@JGz}FNRm8D_Rbpyke>RLqn5%Dl= zi{cWi5ZZE%z!dLY$6(9xN{eZ2HtG+3B zN@|M!VQMkWnG#$9OeWbvXRbwHpf|zU+0od2#1nLAs$He&#X~C=yG61OhR@?aOE{kx z$hg~JN{eUBy4GgmI_OKo+l22`^bannl6`pit}t(6iMF)#{h6Fbxr(ZQf1&UdpH0(b zN3n$|obEp1=^IosJ~odD zXFE#UC_gAJ$cHKiXgX_JD2K>r%5I9sFhzn!s6jUi-*B^n?*hYp!(Bf)4mgNofNiNc z&iJJ0hk}ZdY}Z4@@|f=u`Gkqd7iz6)w(S#d%ciyK#;Fb43_HUAmDco4Ha2^o{rcOy ztkPA*lizfGQSZIhaw<@RpTQquzff({w~js*)h6;`^!B(Vv2DVe$_I*mri%h&9eVS? z%9CY{itm+vs9x+ycN}*N@zp2I#k0kuC5;q2Rfm<^6uVSowaMDE%InfqqAtu6Iu}>z zc3~HPoLk5h1iE|N_SW`+j$ZbD)ve2x=l9CpQylJADO8aUViigAGZGrz_~cIK)t@}6 z(;^`!dRp9VgH}4hEvxu6zx4HwZwKYExjkQe^?3KIZRL$z1^yjEXUPNgPZ8^4?#G^p z{W4)j^2Ee1A~$K)%VI>UxZTdt=3x2g!jXBgg>Q}5swP#d?ArswSd+L=DpFn6>a=pz z4P^)I%`^N=N2FCb_cZKt%t)o5G&RMQkZYWO8o04ZQ zOY!!RZ8FFbPNpQ)O>4Qb)9NnLHWTYfQ&%UF_~YTd#O0p%CRIWC+grJ7^8&9oKUw-P zCA($uJB!AX9ULeUsm~Y=$4-fT6mqLQpmhNmH)a_ zU-8n^)mGwf#@Iz>=?Fy~l}kBXB@IiCm>fCCAn5YdXB7LSo!HlW^I)^UIA4LgxATVG zY#U;|R-JA#m$u1Icy<33Q`*h9Rra}Vb2LevReyP#o?WkY-Oy%m{rn6eH9o0JM3p$* z_pI7cygmOx-s{}N>;VrKJ^1d$pLxT|FPIhXxqOOjkiKC?<+9{>!J5syFHHrK# z!VqSWe9mof?W+E|M4Nvz&sH$?L!F`~#-;WhUT5$;QzdJnN>ha?e^D*b6-WA`7ezIU zc%f^hZKjwZ$`K^Nw?4Di?Y-gq#>cp~+Be&_Su(5Um)k$Ic~|h76iBLf1a~MB4Xfgr z3|+&vZ8JOVYTvi{KXpgdSeJ4jwj!*ySms?(xu(Qj{Hpj?-qsiGA54AtWA?OsQ7JKX zv##_%kZ#qrF&v0$ozOMyK&mc&O|&h>5?!tH%fAsP)9cPv6)%g97ws!LURYAR&e+H@ z&3V+{gFa&oqI6a(X|CQKJ~-k~#Get342KL~>)UC{rQ?~UTu1*o?^gGE`wdGY+b{N= z*4C9LiiP*BbAQcWUiRA7JJ?E+6s}AfTCZ_yro+$e`nLF~;pMu9n%5KchIr+B%6V!! zj@bH}ca?2;zaq!<@^*HcSL@y$&7WM-xvIZ=JY}S2#R}b{=sB?mqL@f+v?sbw)Zd0< z`Y3gCQBJU?@2-2Pb*u5ya4W}_lrN5|E8RyZlu2y9f+wP zz05FDU#-cJ_Yij>S=>n95YJ-Q9>*x#@#^~(=SwP!P8U8eT2sEj65(4y#>oBQili2` zYi9-PpR89}=U}Zp8OEgcv1j$4sNRUL;@(_*;Dlp+rLSauk*MhN!gqO{a+~DOE*Wlm zZq_*``?t|DQM`1#;9m0;fHsOwb(E^r@6FG)Mu64^v~U1b-=G3jE7 zQL`vOnX>WP(t{y$c-lP4~WZ|L*$Q^|L40UlQ~SE^nR-BP< zlJAvompv7mnL$_|Kf(R6ZNlHd%ijCWQC4H+7Nf%StWs*(=osj`Cft)O*IYN$j*W@q zV@+{$$DlOjwCS#ky?@?(Me~@hA8eSdnq-l_iCNyglek1l_ZV*o1Wkw1p4}F zdsz1o_YL>Ap1a-#fxX-uK3}MTm12+#VLD@W{6Q+0O_1894P@yulXy9qz*T!wooY*V zWqwr$%W&Ie$6-$w?l)0}x@W}ZsKwEXVnlI0;v2_%W1mJ{&>srx8P-G>9X3m87yU>h zd6WMi*Y6gYsb2YkvS($NE0$FFjqR#=>n4ZUIn7+OdFwou(fZkrzW- zDSl)^@3=A1E`ufFPQ+7vl&+WhjO-uL6|5wuU|wYOmmMfOT{^m$FZ`=`dPSVM zk^QCfvS(0mGQGld6X!}N%Rf_?loK?2!Y1gB>8f>K>l)~qXuBwnN_@n{7Y5|MpFJ&I z!yPLf70#ZXHC~n97udreC9Rq3%trPr(I=A860d|xnn<3A?lL}7gGAE|?x|;{(`Dad z>t>x{ec=eY%)vv8996tA7{`*3f_@PX$D3~^cUEmj!J#0(yb8$b3QmU1A#B5kcHCfe89ivSPJEsk({IV^Q7oxqSHh13F z!Sjjx8<)>{(Y?eo+#BOR7)TY&xWq^zTuEoskACg0IMe5C&l z?ju#=u7Ma=EPZh&Vt{*}BiGuY+Gm<>DysU?+T4EIG0#&^*e|`UexX}rXb}5H;+xcQ zsd-6%CXS0wja?eKJA6ghI`w4P6>^X(3mywL^If+8P?cv~Z5&=cq}ci)qIhlj*OlGP za{E3H4HgR-0!@_T9 z%9R~uP1r0!%6%Rz^ZnwSVfL4AEAV2)s8HQofAJexh%t8Ykcj@8lzMCCv=Eu9@$U7BCNLhy3|Q# z3Y_q>aNM`Y`m6Ep(nrOYKjgj}`KsWxGe5UfY09fEaqROa36IER(K1Q6q>gACJ6`fy zeqC9lOi|a;O*b@*bm^#yRkHHwlDQ-&xGwlGcq^F5o#J!Jcy|yDm$7oDp71nq-22YG-hJJ3(YwyO$D8Tx=BajmZfjunn5G&v zm7C0oj^6HKf0QUWY<=8MDLYfQr5&iTrPjdA=_!q3eG&C_Z?u1C&ZtrpMdDNBcm7&% zg8!N0gK=SDoxCse)cMQb^?BPnzocC6c;cxO=tiGQ2Wf9=H)xJ&rfGgtFH&_;-_+g< z)9McDz6qZkF(~4w_O)~ZEycRGud9P)c~uAV9IMaT&~d`^uYYKuV{j5b19v<2khV+? ztCcpEzmVn0*2@pe^|D&x7oXSNN&8|qteiE*nUi=);i`coGsY&3KW9}w0- zqfqXZ#fg)7&i&N-++5SVs`7IA;v&BIeAPeBmA)VS9%iFvtG+mDYh0rwdGfr39&zoW zI)`Ve{uT3r##iQ?_xICU>|64wZy(%6rzouNMX`+kQ z{iZo8-y}&8%@x}C?${nyBo!!gFBXlkhI{foEB&)2uVYEps#fXEduH9M6IQ=uw5?)8U)M{}d*~~3;PqTbeF~5+@{p{_oceOt(DywhYX^gks;I}BI>#j#MiTgUW zEc3^DtLt~E|FCvs4P)X{eL(&zP4ynDK2tKjw8Ge;s+nnl@%!pBo6Ggq-QLyHwZPk- z|Cx=F)l;!yPxQrx`w_=`nzQt4_G;`*gIg_IZ-2#?&mv4*^K+F8rLn?=dKLT%7BqZN({=5x-C(UW1q(ehMT&*sDe+C(;6WpPpaH~XJw=KNA}#;;H{jl7;Xv2KG7H3w}QrS2cyYHZEZ zsi_&&33;-{&c&5~nnznEmkoK_@LAFQE;l~CYQDbzX@1_)GLdDJuswEkz2$WmC+&&( zH6z#{rOpPumMxKe9qEbuja8dF6&}jJky|gj-IIjJo|k>zzo=N@Gs(|J3Num;7*jdi^tAkHRXeUItVu$@B!A+-jPRzjIv?!Tu8p$p!1Uwkvs2qf;%>3|VyUZW zSzgpD{`sFz=RS1Zx%%kv`;5wZwo_KUcedg|qB?VV(qnxeLt4#FO+L>YteqfwpgtLS zUOB+tJ-_?wGjI04`tezpr}wja75q|u+j=q3S$ZMtXWe+6$uK`TP?O0Rob+kpkc_`- zeUmEEhe<5-U_h{y8_!q7SJtm=S@qsFFt~wjO|}I42FB6jqM@Rm>|@p;v#Q>z{#AS? zJ16ZXD&S6gO`fJf$}J1L@~-j^r@xUK!BOsBjvt&|T&d1*yV3Tqs|TkQ-4!hewlKdb z8d0RN^e5ZGzK^Vmj82%C73^y2U*4sv);WEFmW|k@8_)h^jVS%Tw6yY=d57_ff=MqM zJX-N&^;=W{mk%ajnbeQ9y=tyjVjEa=)K9Flpo;t8b5zi^ToKF z!S9ORD+)}-AF6m?u{cO9L>Cyye&!+^+&?hz{xZK=#J@qX=^YOgeRFpqUS z_SF|gFx^E{#WTf4WMOc!udyfE+sQlLTj5Izwj||jJJ!aDocYG##VyO5dQ_^8u}r)% zc3N8FW^C8*I}dHZMQv7y6&~#g?I6)1*UqXpRX_OynN#Qm7HgAsm zo^OS;eq36rHfEbbAvvwVW9Pi>J}rJu_;wti~$xffzZS=YMWI@2?QKR~Z> zf4Iu3ADF4DL*Nc~fWIL07xz&Q)$W(ip||{N0`{QKFZMq39^#vdwbFAe8;jNqvIsM)w7F;YnSzXte?d}vDDjpM765-OUk&87q zxO%y%maW|ViF>*&yZW5%xNki3TpUmT@R>a%_)7e> zdWOO$-XQKJjZ_p!3WAs1jeSG7Ny7f%I8Tlz%WvXV3rg|_yO(Li?B+kkJX|IDLXyp& z@o)1?@T&cty^B5j9EsMUrt{_hSXlBxS1WQrc!EAMS=6|8J4?%nna85PiWncWJAS$T zdv3c)h81hEQC#%CPJUSux_(DJu?1^ zs~`21;p_0dvLxR{b5_+^>t1I+dzZ>J6}`>RJq46yThVmR6l^b2DL1LRDRdH_I6|^j zW|q}vPkY+it*+kwaIUF$m}`Y+k-rcAnsGuXr?-fm-^dMQbmBLX_L5mbG;a{5a}NJ> zcPGzi?=;(ivSX#S-0kG^^&Rz(!m17T(uOy^+iFIGij-3kRQ-v{pm8Z0aZ4>Z<%3H0 z7aL18#k*fmdTx7}lYh|S@-^oh((7z5?c%r*Nzt)uBNSm`U3hGE%%7UsxbtwGU*w-= zzfwBwUEW(!k*{KJ6&|n*38uy6%mD|d%7oC#I#Pj(zfn&m7>}k}1ouq-UpC{KpGWa04&L8O(J3n*S z9VL$MooBdCnz>OcqUXgVCyuFIx7q7POv=virs^Nm4oyIoL#v&Y#-3$;O8zJwG<$?Wtqhl>h$C zz5MMJ8tZu*arW{T(Dt$m%D$RfntxOmRf9A;;@+o_WL3-s&2p_zcR@E{14|V_qqS1 zP1r?jnk-$biJlT|*2YSHVK%5AN1cmER?QSqW;c69XySC2U(D;3mt5Mk@-pt)J$2Ug zToZ0ex5!>7GNnx=ta3zTMe^O`!3hrx2XqhhUc*?OO1#G1&a%>-Nt5VCkE`mOInVnh z7#@&%9QJWGvF|rx68q(L@f6Wq>8Em!db?tcIGX7$%x7-1N0@2CuL9v^-frF<-Y&Rj z+0Omg?eZQF9Q zzEU|+(N--F-=t5}nY8WY{UzDLOHWPfkL8Ce?pJ(OezUT!HP*Gq_jj<6KTi&ex=HWI z4$1FoeNl<=P2((4I}AI+uZKU=zfiOz3w*oWO@rTYPdJrpuXBRi+Hc## zT~~eU2xa=n`pR!9*J?It_o`ClW7#Y^nbr{RX1-+42#ayQqBziwZ|b+ZI@({`vvL2Z zKQ{|3V1zG4Hzh~eQ*;}=I%Mr=?e5QFV>FTaRr*HQ+3=i9WRy&M+{JrGgE369woc!y zU76PSDdE$#UDdqGFK!~73e0zGwskOnsJv*bEYRlt@TU4hbfw453p?l>#>*%ab+wHn zPHKB9(i9FA6LBf*PS|MmIfYTumhWa?QM#efQ1Z0sc1fi%%G$-(fm_Z?$Yl0Y**STo zI8z=THZ3wIE;Ihu_?q#Z<2A9PbvcrKRP68OzUGs-$2zi{6P?BGJl8naN!KX*A?rW3 z9^SXKTJ(@zEAF6tqLV~9b;sn#L@jv(_lSPUI!SNtSYSzDWAJrwmVdbSfIY%7$X({? z>UVnQ`^NGz_MPaZs4=-7xZpczukVz2j|Oi`C#pLcnnhK|cF(v|`-|Fd9 zv93ThOS42dN;OW@Dmd7F2lL6KilY@9EBaQpv6s1)1upR}=9XlF^q#~oJ*E-qo<#49 zPLKUPZfjh1()on^i2dS!`3hl_-|Ks4cUYQR4qKKv%A5G^MNDIG0K(h5ZNa?-g`y| zJ+zfLU(!ufE12Zj@2VTDEj}yXuU!;Y67bXG--|N6oD)&pds^J;E~*>L$E54Am{weWm{;{I9U@HN6yO z$r@2}y4wGPldC>iHmvB|;>>by#Z?or9rVv-K9e3)yi=Z3J=W$##2DoI#Sy$A5PLjv zZo=)TA-YYP)^dhP;oo>3TPjS)Oh1|1*pE8xHj%l&{M=g8InI08w>9`gC}k^@(YkL9 zHDaDbTvSUGyX3z~)0qm~Tif9}=>EYg4^#*2-X*RIM<=@t*F8(@j~okK9lWu=Q{L&G zD=r#bAlno6QvXH7tf)mv-D~e}tZ4Q{R)chVY_q6Ik()IgNv7kt(NG>+HnntInX`0D z!I4+~Y-@f>)j+%5y2RYc`AF0{A};2SsNPX&G0Cxaqs;pLdQpVs~B3 z*fPGjxNL!Wpp7#>tFG^O!igA${ZUd+xlxxKX^UcF)Ukt-YNuo*E{%_lj*Ljv&XSE_ z_68<<<~v`QuN$4l7z=A@T9t2{U_5CSJDYot;I2b5&6Vy|SA>l+jE>x=(`x4^y^3Gu z93$Z({k?*xxpqOdH`5X88tavKW;qP@wYK`USf|^4#Orboa4vD}_D6^V%GqI$bXNV5 zxY&&LSz(RN)LEZ0Eava1#OS4w*OaG%!|hH>oUOU}i1E|X^##(mO>;WDSzElO@=evW zsvG9+!I8@DVP^dfgDG-i42kX(xjAZR)Exau<#oc~uNg^RqvKRnBh%XIe=8qWZ8tSC zKD2an_4Ac;%Scml9cBtNJy=T|B$1?9_3RMH`Q0?F0G-9JcX}F{{$@YntvGcHN;wvu6 zd)70^&3adR-Z=`aQ_b@%QC6vSnptVP?i_%O;%w)9 zYj@2!o;);eqyCt(iFl2#j&+FfVQKw}YU7gP{;w@hx;!p_*VWk0R92p8oaoev`fBfm z4>b5=NWz%p`pF#=1F`E2^TM9Wr!u4XFZq_9KxJXY5d2-UY32IzO%=ZyW2&dx175cv zVr-&w>JyRAqbEi_iQbqvE~Qz@z{D-FQw@r+?drwy#mr#8$@!mKS3hV>3_tiPnIsZNk~l_W@F#eQPrwg<|*a(4rl z!SU95(R$gEZ`p4>YCci@#N6C6($T};n7yQIulqS-Mq*UmjHc=qe>S;Y|8UJVX};tm zG4r(wwvV@u?f1%U<-?0-71YX2eeucD<}cN``--M~(0=Gy-q%^ibX3jI*N&PU*DCp3 z^4H0pluc=E64Ue^*+RA-vsb9}+_&h>UCh_2FICR1m}op<>}6_Db<=X*^E>yH-73GR z`6hC2?6btS$xRbe!oN_Pzh>-HzOX4=$6!o58M-c2ea;9FD6_GuNo7@oVO) z)Hbo-g>O^0QY*z4|6uFj$`RG6rpx7xOWKuIls_%k82yzU9QwcoB9ZJ>jo0^z-XH&a z^1}47$rln@#oURogul|1tLjKH7_8GgkDX_%gDuUgMw`AheyCVz+-f>%zTzOh&Dq+OUVN}DvRW<-{_@qgQ+1WoT1OG1GLTWGw9;_}BA|{g>(_lc;3z z`=4Gpv$wwK_x9r3zHb`j{ZUbF>FjGvx+z`y^w@=oHxs^3d7YM*b|8M0Zku|ba-{qj zdndTtbJwxXR$kT0m|Jnq=rd+iUNV>2xBBYwV`+a;BUP7(gHh>V{qX4X@k?V{MkVVf zYgeh3iHDLId=$4ZaL?V^!Qgr@&UD_iwd!@{HOowUPw!Q}2GL0}WMRtlsw$;Yu|nES zc|*NGJyLO1!pN$n7V&1fJ1{%Y*|)&6#r3Owux+5ZLAArAsR*xJZ$0jDaDGuO_43HM zsiSL6t$(-v%i3>hH?CnxbVPlp9VertG2hwW+r8GdxN1zfw3z#lRp5HR;%&RvPhT&4 zzq7cO$>^BEPY`ca&y9Q%-zq&jZDPutq_eSIBPH6$^4_9Mb|teTSm-so#(C;`Ry*4} zN-dAAe^~Zg+Bxz(z4%FjlD#4AtNdDbIzkye%+ODNN}H|ds0J z_yU8yV?7a``}V$ew>82s$9c?o-@A-o${Hk3q$RQ`sx|6rd1t9cGECHh{g#2qP4#>$1^ARl%1n%5%+iMk@RO7hch>)n^Kx2G>CF(W{G># zI)0zGo#&8anDu?-XH^X=FB|_TcNUTlvI1%0gwn6d$5%eJ{^aws<;u^(w-~NPOX5C> zONc3p@M(`J?Xq{`nxcu!P3$F8V*lV0?D~4fb@X|i;f_4pY}=R4G>_8vbC6>yfdk_2K zf)YAhbXVF|_DEJ+c2PD})?f61nMbCRxx!ekd2pYvR&ZR<8>r$=a|4BEP%HG2M5y>M zO~g3EM1w9`8Cx1N#GuwrQEn0cMA~z|`)~OgxW+nUwh^}W_L5PJyc5QbI+Mh6w#Zf-gEl-tTR4@3mA z1C7aS$#dCY*+JQQH6J!Ca!c$tasHTy$j;%dG}TI<6Vk$e{J(^ z4;`#q;~D4I2UCK-1?vQU40wVmLJ{?lx01*5fyzgUU5dJjSCUQQDrUOCV71uVamTsc zd&L{=uN_cv40SWDq}L?t#e3OX%rTY>&ZsR_sjp3Pmym?v{bB;bP?|& zCVsfEMp#2Cpq>gl#G)M6UWddK?C1V z*u)+WO@F-~Fq7ll*qB9n(*QyJ(_Zwv(7jc8QIWq2ha@#^NKACGs+vST2%J zmL10&`<3{tXa_r+QL^*c5sa7a2ew9$c zALGt&i}`55O|zL;ajy6~7CW|uVca0JXyV)LpC4?^A7eU5w#pBxZ5pgAv=;R}<#@$p z`BX`U=pK{I%pgPf;^4txZg5|qov)9t%-=lt(BIeJl6xua6V7wV+>2mazL8KE?9TCA zI#1|8z6qbn&%tc?iN8VMlOPQ`IS+S=W?`u~%Z1 zKT~`wZ!C+HFOWZzE@J0kuke1nK48asDRya=3Tk>rxWxD4V!89&Q2s9er!We7p^+pg zJQv0a?}gL6pEGlVa3y;zXbOJCo#4I|jtDWtz@~~0Nt(zdiUo>3iq47xnO>p<=Xlyh zDB|OwB6MO0v4Qyk`-^XbJ$|lna9;3(-{QaLukYU!{FV#yi~0E&bpt;G_e08pn}XMa zMZs{60NGTbuiz4D<9gOgH`4EfFkJu75+=gdF;GnOWinWfT`9gMj*(c!YAKZ&WoKoR zWo{|*Ri=_wiH@-UFngF>RI-3@jkj~7xURwk?5~axY6*?8&;OzD18*1dg{R!npn-eI zSM#H|E?fcUA!zo$3Oo#62p;nf^gj*s4=xV23Kns0?jU!FJH}P; zP@NGG(}bzdVCica8uhp!!i$53DLxp!O?-v!ZI4^>ke`ViQT z7vAv$`7Qhoel+e+9AR#;<*ZEfDPmO0wiGpG?80LqjhtlWV^_>OSkz8bF6t*v6qmC5 zv4edlc9}OO1LzI97Q6C)#vb!p%wLRwEu-O3gB3t2@S4vM<_T5M8SJ5Z;OjJK5?4_J z_H|bwk4JE7{ur<3yYV&oyFwl29TU&q7fD1*MPXutxGTF7ng=&_j8~$sAxwML&75E| z88iBQ!DsOexO3dqU{0_<+KQO&jE%X))L=xU1-0`=?l{+)tKdZZV}1}?dq@#>U0j7) zU?Dvy7=@F9T4*Hn$DaBavV*B)yMirh(O;ri(KpN?(h7Dc*`DMt{B1{rUD-#lTY3)m zVgHV^7ek}h3_IiR^EHI2&;%rsFQM8jgW_iwc2sYm1;XD#C+zbOK}%v029f#9C(wVK zB$JtQz#NKCD5+BE6yc?COb`UKFkEO(jliWdG$!dxXXYq&gWqE=GS$pbtUEH8edHlY zW5$s!(CBujcZGU_j~~JB;S2a)LS5{E?TX##0#v!NP}q&6mxZ@b6nsHn2n=S~lUW()#hJQ0tlJ%vkIJ>%lyN zI%%ZvJ->`^3dCv&O~8Bu`HBt^gU77P+|$H6fl6pf5ZGbU@R^o5S7KeQY5p@y187tw1%d#D9&LhA4S(*Fb|B2KA5}HS-8P0qv>?dl!zQVw>q1=sx-~TbW+WWoStTKmj!x z8jD$i7I%&3@qKuiVB|LlRZw!MLOM%k3p5NW<`cFByBrF(TBMTHWgbF((4X1GtR@ek zW4_L8VTM5CF%n9(K~SuuqxwuiES}SW7(EQF4dYpy=QGhRAXo^q6TMd)UHeue9p-csI-WS2_ zPoXN2ieDv@T}H*I;TTMt%Wx%1_W{SJ8&_u<$eelWwDb!_z`2 ztd>D@bQZSf3WK0I83Wbwd}v*Ap@wlm0ooFoeFWN`XsAQvsGc^`k?D#mU5#i?#<$Jz zY5{~Yp)RQlO;0zx>Vhlx(a%1*0vtRHY%amFc<50-1u}l*#5-`~Ccb+PRz8L5{4&%f zR?Iz*aAXHmJO4m8bC;T+~Ub>MRB}O7T4peVZTSDRH(OeT6KnfM*u8%7fa?35%qdO%3QT`J+#Iv{d2DP#=-# zEfor!S>RF>^64C6_XFbj5I)+`{tZSCKs93n#!lp885CK$rj8qAwq#jD7&{F}FP~P4}t#!aE z2@sMYcC|2C8piq@nc5%xa)BR3Kqd+;c;s{fP;Y|hor4N`8LHs>kE+}nXd^?N4n03W z0;K%Vu7#rILhl}E6FVURPDCVvvh35KS}e%Y$b|pnMA1qen)h zf)iR`6WZ;36c*|6%#4QcTLD&ubktJV-WhXkEF!0Yy092FB!A4>6vWR8TwWtakFY9H zLX+x%#`ItK-3GRZVPO+6)DL8ffJ;6qO9uGZ02l~(Cc{xw^+))|i1S-R8`>7$m|(pd z=LQkSH1tu3{CNRB1W{3=Ne7@9gBtY^t*nS(GAii^jAexNH-Tdi*5TO7*Q*f#$lxvyrx+!drhAp9|vAA)z z7wu}GpS5_dL;+%x4=lrgf&$v;YQ%xV6AZe;yEb5e3K>^`{4T?BC$dw5|5^Mf&|fmF z&4>MOQQ3H?=u&Y^1wHXwFeLy#!Vrff;Sp{UT0An$D9`ljewSa*YBV0#&4O-T~D52O> zWDNx)eQ0gP`;h0^@IwTAT=3}*;%kPLS%_{M^sGS3NX)=G*dv1HRfteqc%T4B?a&%B z;IauUeu8=NFf28|;%ekcGgP$BK*EIllcH5QEDq^_-C(O8{fgm90zQRTx!_dXxuVz6GQ65uwmAFBmL>RSMLM6ma?zjQId-pnX8D4Yc@UFak84 zu!)1Up{LUr;DH-RD8b=Ah+-HR6UrzDdi@t%jD@nZJ?5Ass8WAJ21S4$_2BV!DE zTcDEkLImI8bsjV8Ta42NxseJUg*SJZ9v^U_po@ zWr(Q>*2IHv31Fogh;rcN{g3vy!f3HL^9eqQfNUm^t%1)zAbSzVLNj<27?uEaya_LY zxF}Eq;!tU&;EMvay9{xv#uF+U!Q0Tx9*4?g!b925Nx+^EvwdJ(XpNE!KQj^2bj+7wh=CH{c)`~s#5f#? z!~$oUs)IE_CcIDr zHwtTc^cjF1ZmbwAA2TAR@PnTnLX0K(Pkegw}8&4)DNI2~H@0W#|cI1QlBb zt4q;K32>2Pj0D(hLZ1#qzY@NyF=}XJE&2#O4~oHW78vPZZ|Erpp&3GkPZro+31$_* zCo?e7!l%TKJT~H-P)tj~u$u7C0DnSgi(st*$FnhV=xGuuuqgE8GBv!di8zbli5YfT zU|lGpL0qB4z`qa=L-_b%uM<9rKBDCW!Xk_rkGRId>pB=S0E>NKz60Jch>8>b*kG*| z=Y;qq#~B>Pw%{E0qgC>cyw3kPq6%Y~(LeJMQv!P_RuCLGs6sUlT_J_$FFkw@y{f_S z%8y7GFnSz3)5E{o$VV+AOt6Y7|2VI@(T4&k_z?wy-&RC9`(usMp;f4*6i*U*i~gU$ zEW!w*Y|mR9 zeuY_d;D?~H5UWG5MMBGXoQS`*GCmfA0wVjw<6AKTf;LB@B_DW9tPRl+60=&tjRbHk zA9C^*@8sjp2hbCPnI#^gJbEFkP7W=rm@ft+iNHH#z9aA%k%s_&NHlIkdwz^N7cfkw78gIuEYtc#>ci=0hI5m^BH1Q)r*qR~B>_ z@N^zpbfX8dav{El#5Qi&7#Uox2-=OHp3FXt{zC8!v7cnsEo1EE@jQ#M6DllaoC3o! zS5Ew-LC;<2k=P19=#XLid9;-U8f5S;3L|IBB*2XSg@JYrpAzp#aN+{^zpYGyBxrFA zz9*Kt5Z|ZZUlo!2*WL16^Gjloj#%cv0Nc>w03K?*K%r=d84fu({s2pfrKyPG*NPfqLHc94+=rh3& zH9>-iMtd;F#F`SDM*1VYkmx27Ef8Jz;8~*YWV|G69#W=X`Os5hF@k7;*gOqX5RXWt zlvqIGjdZk1ddo-0WcF;B9};(G zVRnhF)*%}tRv?iKu|LGVXwVx9zY}^bpywT)Ct6G-oQ#a{L;(Fp@Qb`v8S_d!1Bt2$ zVk@zcWDbm=$pLvJS|OM5g+gv9h_OcC36i-A;yWisLvn)#&`Nk_K&w`?MlutmM`9hw zypoJs5IwUP4T)K7Wly=GZ9#A>4}C`?o+TEH#BE}k)D!y_ihhWVCD!i~{%6K_#MT_qJuJ?E(T2`k|wPbgEPdUvdDZ8e@oVPJmxK=O#dRm8^Tdy z^@w$d#=i=l(ZQ)w%qNL^HOM{Dc483%ph*W0jo>heLy0XRkrIg}S&W0&L1LSUZV-th z_L96$-qXrj@PJ#y(vfI@pwNbZYa}zoqF*P*LOvyV6*AUPjNb!lNi0aL4v|e_Pf3=R z@Xv%kh+Y#tkA+|Pi1&%46HBg@#dd{dl0hRvAbT6cR}=Usqq<@ktCUZ~vAn^@B z>~i8c1*0YN7=T<69XFNPSujR}!TY zN&VjwvG^tjl-TgE4HQ_RS_)5=>!YKS)l8@SDUd#99(-{{ij#z!eg| zk_aFd?U5NHt5uQ(BN=%S?GOr7&_bj<0GUgG_Uw*CJc5sOdyBVLsUBz7R56Wu0pBcYhsT;it{yicM~6W%A0Ea4Ne4@nP zKzugondrKRXZ~k{$ZkLwWbS{yhiHofPmtJ$c!vL3vr^FDgU=>*io_FyPZU}uBP4c# z#0Es#3D3xgi8mryG~z*sjFKpw=n%<33K$Io>qX?3_~jsYLa=p-CemeNlo1J;@D^F6 zlem_wrz{vbi3Lj0rv|FXn$QB7j6q8zr)dVY#5R(3bUe67G>Oa&kykQ{QSe_%ndE)| z$B2ggZ&mp}OR0f7!~*1j+r)>E2#d%a(O=T02fvXq8!%UdHWwsIE7KQ3C-E;tD#<($ zogr4Av`9EaMyz07$!`HjX9?(0p^0W_Cy^d99}31p)V-%# zvA!f)CYeti|1ZJZuw|aIBD^@sd=jf4#OK7Dlc?|mqV5tr8G_%*tdq^Bt9f-K{Kc(kt12Zl9-#&=0&??^`jx1 z7lywHssfK^NOYNuStqN?JoHqAAL7AC#w!!O+aQJU&`Og1i$LpInWu{^(|sb5WJXHy z8Sw{XrbImF0_R9lkh%St5SHPBCK8n zxa2|$)$lZtgCL}X%rx->WW{2`Y!gpRRw!n8cn4OpkMVpGM*9l0nTSjd@qae-{=aM` zS+RaXE78b!HU=LF#i6iXF7SdvPQMa3C*XUsM@ZI39ISJ4nWcY*e9~)31o6JaLKI_^ zA>ajx2T5d0A|Dd77l5P0#*@`4+0`R*OJYc}ii<{n#8c+rM?!pAtL!}i?-XE6WdD|| zK8o>Eh~NKb&xp4otEM81FdVu|tP6?h$y$Qs`N%$CC?an=_(^skNX#6M^#qAKNK{2+ zTPf2j9{&;_LSieD1^S;{5UWqD8KFG@OJ9s}6L09ov+0m374y{!KFAKaBsTLSo+CS} zCOkuIFY$^>*_w!~8wrAJ7V;%t=tKhOHR10#_^$uJQ4+OR1bxJcNs##5GF}o*C7ekD zRm3L}KS|;V7e-8Q;z`zwWSQP!EhK_xWK?7?k%7#)z!PF=h;1diTO>Ll`IQhnL!{jW zZHU6>NuY!5=4F5@B+e#SB2}@*CBBWUl!+%}KrONE#2(Z}kHlt@IEcg%Bu?WYU)7OE zuMXaM%cu{<$jAzrNCN3S6aR+c{S@^52K|r>6N%?TAtip~F788uR9Kjn$as=pZkdG+g?>arnhUWuBM}Z+WyF-_ z$`$nWq>Kw>{aw8*n?`o3DuEZduo7gR3(>0$bK^h`ob1q&EI5%VD_S9$H?p%xvgB3q zJBbMt@Y0K3h9_coNJlv2o@iNZjDql(%;N)$h3vm4;j_B< zAygHYwMO>h?T{rBPY@n{#GI2{Rw^_uxs3N&knMbMf#^dNxJEKWWZhjEJSF*Xf}u*{ zR1%N4!5^|eSb#T3L>B>lXaybfLDKUussixBjM$#67zm$;3@T;sdGR-~Xa+o;inf!` zw+n5MSdY*}ctAW4v0()3)()K_>p@~U$j%kn8zXj~$a)DzmJNQAom8@C84IdY5M@5Y zC0C0cVs_|thT)1Y619c15b%pBI`YGS!@=DXX5ZYp`XkKnI#syZHZ6GZdq;_ zbu=slSwqB@$uP0m6~HMs#`nK9qY3;b_PrQ-N_2o^yNFE2W4s=4h4@agXRe}k8G73q zKNax=$-SgO^6x_%Nj~C3nYAD*GICC%3G&X-kaaTuB>E@kS0bT_AFylo7%h=8#z9*u zVu!Ff{tW{~UW|jp4P-}zD3yzdve#6Q}M?8v-K8eRF0)NR# z7h?HDaO^(pX&$&iq&ynBM^1oL!48cb<0qpf>vkD?WhtPHtno=y;X_XXJS@p+lNB>r zwN?Rz96ZuJ?0e(_Gq(wThzz(f4w8*0J8(n?lrrB`h?aBVQ;CnXfhw{dHGvc490=K= z&cPcbgF$RG$zK`DGLI(YuE|*rlB*{9O|m}x4;g4+%VH!XpB9UqwmN7-1vft6Cku3v zf601?#GW6Z=a13)XTS-*iBq@Ba7thZ{C7OgN&l!%0OIUwOQI$Ed_1nU}v`tP={*(%Xk@1x^306)l)dtXofV`0|oc6 z|3p08Q|xLUgB+fwY6E|@Cs1S0q8gz#)fi_0y*Oc5LY<(iFt>3FNvb|U`$a2{(}(lb zBt?|Vso~mtg%Y-xq~-pU{lZn3-9R2en!Uw$)Soihf_@Zj3emB@?Ww{Wnxi3)DWaL%E`j6;3ct-@}F{B$`nRP zE%jqKo7zh~>xs-;_333sCAY(JtHPps0~=hbS-H}bq)&E>ovgc%$D_G&) z;`^o4Q_woMQ+~^m9YQar$m)!0S?)x&I;pXhOc9D4nfv>jWv}x;QH7GLz?D$?Q*awI z+UNE%q38zx<<^ zElc_gRI@kI0?HX`hw-u1XIet-5;_Dfh_jFeILTM!j8q5tgs?>%sYGh$)wAkdsu${e z6jV6B6`OhomL4sg?4DTiXThrMS$X?|)5A7ZZ_#LAeYVMDzBa;?9Drn=3EP_qX$%_v|nE&1rTwkk^$b7x039J;6)MZwT2pF? zwpeMdKBOczn*CZ`?>}1d(7C9jorm>)EZJK4dBGV^jRBdal3;0EPg4UOvWxp9yC8>A9!-!^<-SjHS=-cJolfN%- zcJcc_7F#joUgWljefB)Vb5zz#R5$Ie*fEe1XrY{Dy{40dQ+1CVp>!A-2eG^D*_qwG1lnNWuC7A^+RL|6GIp+Z2VZX*(5lJ;hXQ zN$QNMY4Mkh3Eqd98E^Z(e)PJ}+nlt_?9*-_U|Xz?$Ci408lB3X2EyA*ARg_H(L=v6 zwWKDA-*|s>_AI!WU(+>6d}){-wl=C>#C*G8{F0+_Pmf*cEldhD7N)85S;3(2jp=>z zkHRRiwK|;^*ehHUev;v!@qu9oBS>q#-jY_HRPh3(bN_MK^eyeB@>0Gg{Ut2+7nXJ} zYLt65dv4(^;jwLBxtX;qHJsMqV4eLa>KhOrW&cxcT+%$d`bS^d@lRW`R~2k19_Rem z*`l<4;ITf>Iyt7We7EXXQU_M;o6y6&+TS8)+xr!7?z~O6ZAV+*yy}zC-QM3ho!(F833H!Oz8bd9zZg;*<9Ers#LnwMv>iPIgOqQdeb@p2Tf1{a`(9 z6-@0ogT6$X5X|@A_CE>S6I#fN)$;Ic&v2L25%nu+wa@MxmwPySZqY=!Y3QvAf7Uuv zw_cqtwZznI)kdcjJ8p`9=e&6{?&XHJSXX|=h~dS%ze44t57&YH#5cAzX_`pbvrr`VYH2-M+wb+Q~iu|QoD=WJ>pbG zJ|eA^TcM_+pEOnaq+a1R+A_m>hu3thFrJ|-vNgEJ*VQ}6Be)lN4E`fxf|cWQ{2P-q%N} zt@WZ0ahkEv@}u1r+9!NtgdDysl((mtM{umRJf*0~alsZ+OVz1y`V!RNOruKa-{^7rbES^*MvLW+nq%#K>~k!A_(VNMwEI#^ ze{*IQ*LNLq-wRw-zGHS93XMv}G1tP}ysXlKIl@Nk%A__ma_jO9&euO!*PJ>n z5( ze;{=5{pe}rndTYm)%<@*A{AwY;W=|CZ^^k(#Kh*x0#`>I|M5r6x(^ zg=^x!Dvvt2Z!BvqX~uVqQLQBSeUfLGyJ6|&(qiv)@wonsTWGvy@Y3zXjjqz1TG@As zMCrNh>%a*7c50 zp$#J{MfZ#AmQW+!7?o~o$Td?Z2^V~CORY|+aAfgu_js`}UD5cH<&E{C^}MB-`4=N& zsLYk1cCG=U$q~4zVUto$)B=@*(?m^qNd?&d_(6uVd=_h_2djP|)hCpsIZIsCy?w=_ z`b2KL!O!6wudu76sNjdfexCmNO8cs~nH9!VpHt^jgT4(SYj#P#V5=k7b{@$7J>yp9 z_c_z^CgqjS{WoWA{!Jr$2vt?e(sACF$V>O+6MRo8Rqdh}!)5iFK3h$paIP zMsE%qVUMybGYq8%4ylj!T2|2V8f{4#%X>DIwErX7gmjYHl90 zL8pO7qy&x7@BWl9CDE>8M|;tb+WWD0_6iTJPosW-=$ER2_ZsNj1cZ1I`hmeRwX z?ZRfN*mNY+6SFSmNY%=9PByqve{8BbrA26W`kwb%zLLHx?Zvyr??%6$^zlJ<4Od5D z3}xrn8WtP28TJ|XnWxzHhx82pDdLZCDWtru6#mbk{N?GMU-a?Ko35{;-*ryEm%qd_ zP^rQ9vFTwuqg%(1O5Bk6U;L#QCbD*DdD~Q@iTM#|dPd(j?qwwlfV?uibd&e@U|XdI zUD+_f5@$bcUu)ZBSz$V9*u)*6AFH9#v0&fe8=;51Uu}b2!%&>f?25Xem0E;yQmEzs z$1~IO-n+@)Be2|`>84bftq@rv3Ka%m}iMgl9wjBVh)6x9N${1;^{vqUfU@* z5}kqS{-!>MZ*jrB@a^%>!awe zf&Tgnjq^!LZ*f|nz<1Es$G<$VIJhzx5$Nu{UixpT<~yt8@M|0^qhb@LmTO*RaO(A1 zr&4cMa>w^UK?LPv$ZavZnW zERBr09Lt^NGWb<|fbJm=@P1L;BrhvV$_UG}V_{s)J6=)v(IJ1u{FJmkB_g?Q{IIAl zp~oz3xd`oom=#omH--9Q8*#ljRB9+sM#^Ec6fHkgy6Pc}i@RX_U|wXYXl`h@&TQ60 z)jo20X^yy2JS5#y0{TAIWH`bPViu|Ig}?p7d{?~NJ>5$~oNQ^x?hQUHT~rvK8=pNhdvETIg8j}to_=COW`ylc z)UCuy<-*D>NE#B?D6$z=sb5nQ#VOwJN+!EHmA?1>9kj~B)oOYnb(pElmFGM2zjGVe zZ`@d@T zwsrc@!nXc3T1UR8d5TrEJFx-(VL5LZYYsP7=9FutJ?4nPBrqmq6c5^G}-f7Dh_R`SU$loIygimq|wB9z~GtDzJ zr1uLEu9sOOX63EDSu%YeV6V`104>II>tt)+4N?;$_Ot}Pc+?N;sW4XQM9 z)oWV)Wx^(FTls-=arTpsZQs9pbL#c&Hy_jf&RS5c`QK@4`4Dp@^K#?ghA&M%>!Xmz zVb{Xug%&!t+yAq!G#z70l`6ivg?Z^O-rangmo_ldQE;$yq3A?HvvpkO@l{bnD7KR$!&m1d6(_LE#>mKIs6ZNK6jX{&b*^1 z;O3elYPA-w)R6?-mGM3FcE5C8xu$)OD!ch=U-@ycoj1)T6qPUTQz`_%q|Mf=k#&+6 zRoPlotTVW7)mnL#-^IVQ{jH8JeVEH< zf%(M$Y56wfO!(-?^^vBCGa+{@N4dFbChqM?$awYkv)9452^s#v-~2iHThrvw`_aZk ze@bTg8|5w~G>qWP7xd0z_khcPA`lzg71-&I^oImq3(u5?)FQSe*Pbn)_tP(#f4EwP zs)nW9d3uf}%6U=+dA|~;{i9Xajw%D>Y-xk+mOCp;<==&6zL=8zg?$Q^6gTkM^xl^C zkpmKctuVgYqncalG_G^BMzab#qAD5Z3cZSJW?%T!`(wh#jE@7;Q?tVIkGWb0!qg)A z1KW`Og5H42lTqvrQ1=&wry%^j|IBp55VN_xFFKnKI88-u3~q z&vehxDdO*_&N1a;{V~N+Tf#P4zv1Un{naOORouChEB-2&5RIJmk0^`67a=BXj{A=K zvZ;J|iUphH?N?u*jt6_ zVc!;awW7PmraC3{JoTE@DX6v~c|hn^s;|3oj{igb_k+@kK89qz&)$+3SLAb5^ex3& zE)=&;?PNBx^$iOx-jF3x8{_WB7sU(=TWTt&cMtsGI+Wim`)AL7+55ax#*+ zzT-m3mJr#o!11}G9FV|stVZBdjA8bm8ud0RNw;fvq?3VH-n!lwzEI(?d{IlM!k7ol zpKM1y4N(L^lUz-$RP#{B?NhdB5^gK2L>X`dq=JBjCg$Dvp_?l65 zE0jyUQRnBnt82fjv7+)<$+jrkGE3{{tLL0ha5#T~Cz_Y%gp-+1oifIBEm+gEh70KQY-;`h>AU=^%W zzEh+07W7(n2mg;@q2Vq!jB)FqX~pV&tvT=(La9dz9~@93=MBx8l~uLihW8%jx2MDj z<@Qx>SM_$4uPYrXcRgWp)G^1O#vSwoWs)$^Kh5)5Nm9}J-1eD$KOOscIell&y5c^* z`#>oU7$?}+&=29`qVl85#Vm-r5mv{R!k<^42cDHyD^4s}nRh6!TfwN}-KDGib>(~1 zDgK1{JUm5Vh&8lj$Vz)J>wD8jLv6kR$8%}y3U(56iTVL3rWNEF(j_TM9w~Q}w@8Y3 zLHtbGDX&y_>P_f<^cmdfxJ`3wH>s)Y27U$Kk@-ii;2mAKEc+3TiFWd?qDt(1+{$vM zN+lJ$l<$y~5&I&tXV`mtdCMF_9#c;5Ajbp;diFaf7nqSnshzbtyJCKQr_~!Oj??~S zh8x1o`Id&Z3HGy&8zJc-(_!l~4DIMo(s18l*WSX{`9llN7j-H*>-|%R*BY?j82ehR zwtKceZB|>Bx#QymOh;(sR?Jvs)F&C9Q&=sU7ICtj^}KPuVEk zD{~euaHThxsFI{Dv)NE2uFQ?dT;peic7U`nGReAyO=Ikn@O{SKLm~j*GdC5 z+(W@_;x}?JOpHEFSt?Z&4+We0H@Qza=N3IGzUq0Q)H0_;WhPB6zca-hSAbiHu2|Mv zA2_y#{~bw3J`Wjh@@UU}3yLOY4@w{UX;jA3oEt@bJ#z)Cx)(Qu_SS1?7t|x#&-4yM zvVDF;L5wT*QPh1$Jpa9PzVx?(W4SN#>J>+No(1ET3EF9`gW63_kgiLY)Y*{OPWGK) zha-bgcO$&&eOihg8Y?d+_tGu=)k4sDa_0(AIl!>$C z8vC*JwRCY%&=ZIiV#T_`c>i_xTvtuka!;Z>)ld``l8{&~FXcwUgy`*I(U6@T;WeY~ zMAe9BX?>m?j@ne^*wEqy72r?WOfSwSq6TTE>ZvW)W$TMG>Ee zt+gLA#~8Y@v*|BUf0dzMqG~fU+4XEXJwQ7z#``b0kCguE=@smz7BkBYX5&OIlsYK) z5rzjFh}#uZpfM|`5%Lb-8rN+^U%La{7{$>zE+V-wu~y9FkSgX9Ll<+ykkip861ycY zh+iL8#8nM`Rx~=ReR_|KzB!)?>z5w(9`)Vxmb$+xtyU_#V**{247R(iW285(RpQU_ zXCn)&JYy0gy_4J#z75Dsg)pDf=anP=g|43pj^tk`zUI5B9W$|^6C%EiNDFmhJ@S*G zF`vw5^4AQ>#!^EEgNYXy)PT!3gJ1dTc?#U;yfuR(@`J%t z5~I>}^^an-x32SYVRz?D|1tWsqfy+_u2l!w67X}M~<@=+S1eAhS^l17UdKYzD zyDWDOe(AaIn&|r6b5ybzyrHSF@$s9Z=Z9=E#qfs>P3$|Oy~#xtj+JW^d(paGd*E&7 zyjRr8b-;5ga7ge3irv1#!Ys$9x*z|^7+Y}FyI#9u)a-%Kd!aoXr!3`-|8SkTL;M$} z2G;8K3ijES%iJVok~ge4GJjY8Y3Egc8|@Z%(Rkf(lTlSwnkrw{Zy0Jg4n?et>K<|1 z{gW%dO?(Bzdp1mOBlhr3b60YA_XYw_rA&1ecKUu*cgj_z%DCxeuzFBC zq5ZDh5Zd}u+}+(>1Hb4$T24pojcppcIiin!h$+Q*-4Y(|i7S_ql2S2lx8oYKTbSap zx>tE$1gFS7lpnV7MB%`82NyVue5pZ-gh+8xlAq-cwd#N3*%=myQ8zR1w-KovB^y@7#K3 zthymEvGix>wvq#Z7&_OwBSMbK58rRQZdk*;Ft!N|O(?9GRwb;$h3FloNlI^jb>HjY z_i8`-EcLIH;(n96_S1+D&C^fj9(1Momq>9yL#qPZ;~~OKpxk%m+nKjm*V~rZHdwn@ zrkH~!yUAh5VxshboF*~KTCE-JW9~BxsJ+VPg3&+OXAH(DO_;oo6ehg%QbVf`Fm4k(@o!EnXV&R>*Ul~kUxJ=wzHQeMe__)sOeC9CImvyp! zHWmm%o3xkmpJIkEM9h$WQ+RE!_M`Su4JwtDba{hPPpeE_rW-Ix^la>%>{07d6`4EqU9Fz%5)KN>r5kD{70EVb1Jnog zsJuchQpeCGYysDW`<}Utvn<*AQEDrFgZ@a_bf>aNj+Oh##aN9rQ5WLA)*EtrG0%U{ zli+^j=_)j)CK-RXEH|A;-XlY)!Ys0SBUi^yi60d;)6v~L#T0Ej5LOylK4On;6SrGy zt;|wV)x*j&ag1+)b6Vb~Y-cW4+`RPr(pt{x1?}?B6@Fi`%GW~lt5zn4GjK=QPW;#A zMfS=e;~b;ym+Xr|T7-QHo#nV~osVO}q1*tPrK0KG?5~J_b6_25>lil zN)L6A(n{RmJ6TfMnO?F@c+B=kHu?qgLVO$$lqKeWqbetw6Y54a3^`}p?Km8f7kf9} z8$H=ki{A)5m%o?=+%kHz)W~zP@OjRM?1y;`oTcusJ@cG5a|dLF=H?eu-b(%@$b?M` zzLekTG3*AcSRD3=w#{azv7;p_bXQcpn30h~LoQf;Hx4rFFibb`R^HYG_a?lP=L)P8 zR8y$>+G=s9{~u^pj%Zc>RB9-Dq^rV#;1r>Od=7VlzoNSX11y}W#(vJ$WUk{Z2yhOl zT1*0KXaAtT(^J$F@@d(i9@32ZP*p;fwY_J!Tk^eA=JWl`G5jF;f;ZVehH4nHC)OQv zE2N%rI9K2F(LqISjmn6q5PHKp)U?RRnb%obo7->~kS*yDi1)AaZgp2Htz42_d?NpO zc2#8Ej(Rfves_AIEq{k|lkk#0!F|CD!A-*|y^ybCUS@w7HY}oi=vMP;F2MD)#D+c% z%X3_|#F}OrzUKNeqnYyja8q4V9&=9i1l?k|-ijGdb&+cZ*9V3OZ;^ldLO2{8gITMf zBx{>-mL(Ipol(kiwHbAbS;#hLdH}B}lA2Bz(zmFai06{jLiM^<4){L;a0DZTUOtnz zWv~PFh3SlG4rK)j@I3joB{Ql))MwUDK(kt8xrRFl*G2CNJ!AdB{K#~`wAwt(ddeDa z@=znhJl{m0>hBmFhA2JByUsPWkSRRiQgJiV6nB?=Yjzxp*lMDcy3z+v70>2ih*nI$ zX1f@^Gy5%b%|Sz=@h9t{u;-CYB6^0nZG-Kz9rx{Xt+UOCO!tjDc|X&PDbGb2`f#Fp zCUC@iD9{nU@s%<(SlJuty%7q;cMl{L4D70E%sVqO0UdmQ&G1ZXS!fc=) zAiMko2z3T#5W~YFUsj{E7y2{$IJ1jR*KWw)Nd?Lb2I!fF#nd$MH(@1p#M(T}VPDMt zs@iD5GBM(O@l4M!!R)sp7-HeQ%80?^@{U;l2}? zE-eUt>uQ+$K6jQoQhUfWQEU4)m0JBPl?%A_zAmkCMP|v8X&BDbXLp(phinb$X0aP4 z8_(Emq1}K@o?@DAF0!09-$Wj3tFgq~$n*x;+?8U6@}7D_T~mG#76!+NF>-w=5qO{r zJimHkee(l7#Lid?S;f{tN;n`_r-HcoW+4@=C9AR8HJqeqYdB>1if_SQ0?Jxfwl<&3 zH{=>qKg->P_P}Ll*w(nGavRkYak)c&!rigFHw|EVYfzvS?Y+iFXv|}AA+Mr+zzb&t2q=QSkF_txL@Itl2gGjX_V)A?#0agg*AdXxM}o7 z>9LYNf%obby{;;#9&VmZv$Zh{pc2?g_KndqV|s;+v(~lN4S5yTICPzDuH{FIWWH?Z z#+;@`vPs6;#@@_T`CV|Jz{nGo^0E=P!*2D~_tgqS3B84Nf#trTz7ByugsI3Ib(I&% zh4LyTO&vhBWM6XKxiic|dLZ+N?ai+-)HXgf%;ul6e=$42pWzH}-?X3AWHB^qH?^s@j%&}KB{mD-@J~5ZW{3lt9?3W!qt=IX(%sB3y>6CSdnwAapKa*MR8YE0 zZ}l0bL|X^rui8k-pl#q=Sq7UE4F|bY{t?%Q*-nW}UDGX#!MI!hAUTxgR3|2vx}|ki zmq<1JX{Ci8PL8D8soUHI`2&is_{XSk<)Cl8ySlHHFkToEU9(GS@WQyxi)rZL%J;tqUm$y=Tuc_TnD$6)eBn4p@frb*bKJx{|B51ukQ5oW#WQ!=1-ofqO`u!GOCxgdiaR2x;EMrW_)d21C-bE)*bd?wkk#=JxIg3OYA6oPo*or;`sVn z&jnAa@VO=`QdwwU>ntIi(8ZI{{9Z|oVq zkauz)*ss_ztclylwdXc6lc_I(D>ICmLv7U`s{Q09Vq>wpvX1i6J+!XEbN_s41$)XI zB==j&VOkHvWqX5=Z>^ePEuUd1G%d3{v;JUnTE-f_rEln!nS8DSC+bh-JK}!fZeW!E zMW7W*fnWJQIrkQZmtGVXYgMF0?xD`sL(#pHyDkwp)K;yqCYrH_)*FP=N4<5)kVrwB}45+tz*513-ZMDu&Y97>f&iVLL$atHaQ z)LL=|=XvfGk1Vd>{Z5WmKlsNNUnt;9n~VS8jOJY7>tIJQQrV~-q-XG_j57b6u0c)b z=GbDwD}}YURyWuTsg~}JmLbjUKUl_@SYwRgEgx;T%->{BQ`NN!>JXLJ!nF^|3h8aI zMqp2%NN6o@!+FGexDlm|uv^}wc2#vbRh}-NM~-bEJs7vDb*JkCg>fXai|=n3!=*rb z(wIW_J@8XoGhOKfsypmQUEpf&*M}feXOz2$9-)mqO>fEcM7iGzpXmRn%`_deL~_59 zo4M$JEO$cwwXZVyI2*shc*649E{2?Qk!if;89x+v@{AQa3NwP6g0WIv zrAP=VU7lZ9u)+J4@~>R%xlnkj@R8dgZjv`kH${W=2XL%YrLI(OQ-QU__=uXPjpEYn zQ4xiq?X9e-vN_at%6`l~#@^koTd$d_@Di)>yG{K~ZMjFiNa?-4uk3cfD==qlq(%qYGXygvu&w!YHi%psBu*dH@S&S4WNLw zV!z~Cah(u>E(P*zC3+j`xTot5eJReT4N{jW?d0jwY`LLUpSq|ulbYfdjyv=u(*#p_ zCR0vPzGPonr-yiLeN4I7xBn4$)B3Hw?2ByeO;?#Z)B@&+VVUtW)}Tg<sEA zy$DHgBw04$*z^+13j3Cj<{^ITGGlpuh@m(3(wCY}ai{1$)EUH$YqUJzcz&l27Z3P< z^4Ai#Dn;^N!CJoZzNdko#GtfXUM)Yv92^vXmTzhttx|LJHrf#Fq&}JUFelg)b|RgP zx|Ap;9c#=bOeQKSrcjx{`{r>=>Pgy7>(o^3k?fOeQ;+y<##_uqi4P_!eUPzyYe{5U z%9WKV>^;jn+X!8%h@jtJxxnYM#C7K%1cuOy$)NQtw9M&CHa-uRJa+eCn@S) z?I$tW)2SrUXI5tGYvj|ubMAw_3*vaiq!dUVX`$Q$^VwN>ihI$9b0(&Su4+KkG*mOs zH&-I4i<-r>q?;?vf)xYL zv0rjbjuLAJ{J!)4fkB@TDGiV=i={#fu@X)|BmvL!j@AO$oXzMT={xi`<_bHSOXbF~ zxlBIOfIW)+u~l3aPBY>}KF-v?q_5&6!AI>^`HVPLdZ8B63)sxZap!N(OgAY^Th1jD>uvVHD0MPB0}JS~6~}ftIabM6_{Py(r!e zP8LeU`O<6ghHyFfGB7<55O?} zVt(bDGtHD^;z#8#W}~5zVJ5o@n6a0bZb0+?%Glf3onOn0pf@o4xSITF_7*T+=jm6~ zxyn+t3J_-JsX4)D|AycOWxqB+9uTl>UeU6zgue+27@7341RdvUbb5S(hKFVlLe zf2u&uqz^JZnJUaPrX}~3ziGH@_{`ui=!RcRbIoa{<;K;9Y(ui?hN%eh6U7~1TXAQ( zE9^m>YD)$d>K1*xc1tlRGZe3~QnAYq#gXDpv76+Rn&KSeJaM7$04IjVDhVp5BueeY zz0z9M4a~DlWcWL2)2VaJ@4(c~z)JrR_kb@3zdmv+`2#q?eTP5B?q+(jCvZB~&aS6k zYdf_?`Vj2-iP{xKmQv(AWe2c#E2;D3%{a%>7AIWi06TZ5z8cj6JZ%Ol_Y}sl}XoTmz_}XEU<@|`1w$}5k@PicG4-CBL^Dh` zrZ3Zr>A=iqim=j5W?wMdn7vFf<6v(x?U_>g0ezcZPbUDoxhZO5-lC4Bzg|@@MFmI) zpu$(wz6DLAv>*_;t=f2edQdy3@u)Rf2PA5rHWHZX9e}v|9}t3pMMf>A8M+bt!+BK8 zgrhwPxN*bu>UukUBG6D91I@XSJ_h)~(ZAC_(>`hzH4iA? z9q8epDII&y3#h5UW~vV!jW5IXJ%+OlSF~^S=D;9DC8zcsP{8M+60Q<;OYZ?>=+639 z)axXn7DxtC@z1~_Jdaw)p=iZHtwp6w7xdd7HE;)kyj&MltwC+hVN?rd0aH2>w*DBf zuWzCvX_P)02ybstk8=n=F3Lzh1#0+M>M6#k(|^+UQQH$s=~OzMPDj$Q)MnHQZ3cey z4p4d>SjEGDYrGHmgCq5!sQ1}})*I+m^nTEw%|Nbhq<2KM&}yv04uC_qLBU+`=Oi*W zhW;zCu0FOT6xamzA*!Jq$a#6O=eHVnOr%pIQK_^St@^1RsQMX<>Y&Hq(EYB zoW?U!=Mla2!)eROsCRpZda|i{BIdgkIOhjZ1$7eK z8|p-*S{8WL9R8y%>dy$SN)zy}C(tCjfIIEbY9pYZ{|Jd&2&~HSsK#mzeoqH-_6VFM zevV46ig>ai-uoRG@$-QUnFjvgW_V^ zFj%6gkQ0K**$35(^Dw(NfQ&v2EiM7q?2yS>;N^12Ri-|QnuKvKg;nwEHBrZR404x- z|1|?L=3GdLq|Zi0SvK&-&*A^WsI3^ijuB+*eZYg^;0M9)O2wE*fe*DIIcXSAOIYEl zsC?W7Y~4ygwQdVeUdLQm!PhYA05lmlYC>vrQH{0?ytxZa$cH`K1XS}sKnX$n^FqVY zprv(T58neFAOh|5!uefN(<))^Tfx>-=({;|eHgU;6P~exZ{5I)zo8!tKC6szW}_v7 z>_;l}UO+zxVnQZ(`xJGNr1F|z+K{SXKW07+x!wk-EDT3|9>Jh!21(6D#oq<|VeyTH zdI(LqiVDdj{GZsXbkq#CM=L(?^$qGB+oB$DK58BRLGDxrAM4;d3cWXlED}5;7QYjO z4}ug(qo>!P$VS~kD+KYfF7^>_fD^d)2k)A}&AZ6ElX^fJ^pM(ff)Dx@Rec2gj^Im@ zDnsm4K-OcZ8{leXaH$74<3cN$uxhm+*;eq6;AHtBHKgLY26Yv`k*fLjnAwW(Bu?17 zJD^R*tpaUPzig-O!kSn?DZ#t2LFePZ^F_8ZMg*97&kr(RoVC`~%@4gEb@NdjzE@b;XM)Vunysftb^@aKpirCMU zz^DHV+B8$&3z}9?dm*D;;49|nv>(BBocuDwyWQbf%Kf_#5E0y+{&f2ED1dqV3sQA2be z)kEKiC}2LGos9_lKJ^GIGYYc!B|g1{S*-~#vPjE7{fne61TMdw>IG!^2*fW5@bW2o z9NiLcuEQ7z;^sW+6u8_UT2u|PbR6dc+^|sXp*yv;&dg54Yb#I@x(J@}31npm@bRCa zF7qCh1bY63-T6ryMWZSclIjFDe+;B%HN4GJtvfxCs;TAE>-F2(4SEH<*iG~u%REy% zQ=^$Y*~j4YzLv_|)PV3!4b*1Qr!)(yI+p3BbW5E0`T;V%l?sP+^?;R_f+%DN-B3FR z3HU?%jqa$8r)tnP*p}(^kJ<_FmDAUN>%el+uT$#~g&aXvCx%W%l<_zHRBHy!>_Mf$ zp6;VUwT-wD;Gh!5#^__P7kZLx9Sksv^$XC~A^Ltg7Ix6id<|Xu z6899`pf+nw=p*`iEuNXA6ya8n;aYvVK>LTPqkpGQqhDz6sGv53zJ<5y(WkXF(EQ2h z^9$^}RmZ+t2|ZTp2~YBcUP$lItorY?MXwB9U87G052|4ACmp)^tM)B@U#oG#maBsNPkQh&2Qsv~jxwO3otUeuEG*O(0} zBEM1kD(ws{sG_!uu8K@)WdJwr(<{;wR7ttdzgEQ6L)m9(JHdI z4EF#XqONFv(Pt4gCQuu&i)W?2qJ!E@?0AOK|4_X(GwpzlSdU%5>*xpfHE0XzqW~Rk zMVG*SFV*iOUW(I=bRuj?8sap8%GR3GFM!g_P_6aN^cgjk{+3z*sq2HNqd*$~uO2|m zxm9aQTlE*PI=^7;K2EL3j?~K0ZmLAdM+WGQjukB|)NEJ-f{xyk>ZNtSy&NxKXRP`a zNZni5LYiKVwcR|(;|uDb_6qB^ZipPGX&>o9RId6GKKi1588>Ithb{XFbe_UpTvecb zz)HcGg4z~PB2r$Brx#IkwZ-~y=+zr)gx*b$rv2J{{QZ;O9(L}z_5%0DxS@?k=;c#= zCY^!(p>OqR(CnkMi2F?bqX~+1I5PM}kmp^%ImX@z>`fQKq)LLycb`M)>%c&7` zb1eXS{Exl|QKgM)hmqW%y1@Q5hfN?iz15`Mh@Sq0CAkTGEPyRu3u{e7o>$SaT9*DZ z}x*j!CYe4PO%h6SE zhs|X8a!;9`I1Z1x5LMr+=w6Vh57ahTu?u*=8J-nj-S_Ar(9!+y_%5JgSEXjb&dx`K z^EIq_E)W6I^v9t6U+gFB(9MvK(^&hQhL^d8ede7yN25*t@{CbbzAhG(>S#s9MmQa`X_$W)z~i zg@}I%8iHL9*U#Y|qYnB@jojOGPTi_*M~&n=tSv974m`0G>zq7z_|K?ixc90ycbt8Q zyQX@wuecf9H{2)I&ZtZ)W+JnO>5e>D9JO1kgp=H&+FlFCeUCp&oun-iEnAc?6t6r? z-YYjzPN+MuKHLkhvIOg<&*&Mr;b|Z9nE8fj%gkam?BvvEUQ#vbMfA^zey>9BI^oZD zZ4>S+dW1X3Y7+1TSs_J;#yPbA> z;d1F!L==_SJJ_(H#uhZTNbqfy%$tK+J)QeDZ75} zbJiF5iMQ;t_gb@N&Aju@tUcjyJ?*II*?4ktpz!&Q_MTpxo>y^x#XZ@3QoDLDq<)k6 zA$PG@@*eL`?nHN_Q?$G6+4e^J7uEEC7`_v{8-5-gENz~Vtc<=4chFl5{u>XdGtrIq zWNU#k<8{#`@!##kov-Y7tv%w(=<#5ua9G@C<((JoOOl~sx33y)^$C04UaU?|vlQ(= zD)bSnyVgO@0_RM7N$lw~+H2Nqr`?KzNyVw98?BvkN2vSvRsQmUcS|RD>6-8B_OAND zc_&(v?C2d*F{7d`bEK=co7&6lJ)BdLY0AMokpCn&-&$rL5smM=w*8^@`T5P(?9`c7 zHotez)V|*bci8*t3`SO+(E7dmQv0~~B>#-kR!8d6id`yqOGnA9@au5EnjpUX3HLr_ z_V-GDj;4f_;ZyWZMIoZeQ^7xBmTYwrGDyE97rEN8#%xZbJmul7Ifx$3Q! zb*{8G%NCDFs?}L;LA*R(8y^&X5bO{P^}q7p4b}yBM%D4N(W~Ks(M|HPMPa|O>fE-% zz0G^cYjK7-KiL0vmZ}1I3-?#+ae0hWtgQ2vaMluk(ZCgbYx5P!)tSF!Z?=~9&+A;! z-{789H>+_`)kV(f;UP(7`o7BP)h|{4CihwTi1e0XzK(G&i+=ErFNNVQ-ul#{df zHm==R>RaM%QL(RmP|t2H_qW|qs8^4(Z1kdkUbMHjG_|kQR$5qWOa75d*S%KrL+YN` z3g1kor)K9KR#(8?RB2al&59Svd!8qLt;M-Qp5~qKw4hnN@m5*qI}eIeSs$(rzl|=8 z4;0?r!|qii{paLm`TwhBVMj<$BKfB)l4bGXaYM3P{`F(4Ir)1uUJQ_@_~;1PsY}I< z{VTp#Y_716Q}1@$8-%$JQeUDoozvaP?tr~kNaDLgKk<*K)qgd=e4xE> zOnkQ2?Hy%>g}8U8!k6}6svoWWE;BT~CAi!vRGeOOd2X>-#7olSs!pz&lm5ZlJ9nbB`}9ZeQ4baq-qnU*(y0z3L|)>B@&Uc+-;!{XN|^#hfY*?~W>Z7j#T2 z%uSzBy;tt<@%P2AOS>hToTJ=1se3YGa$jUxoH_CNao)Kho2h&yJ6ze_yWI2beZ}`b zYd@EIFE!nrk~|sg6U~axQGdFRB(2HvPYucKQ86g`XQ)Q&J?9>`!@4{AcW`8QjJ(!A zoW<6%c!&6D?dIRBW7sh30x<`5ai#jm1@SOhqpf5YM_RvAwbi%QoAS2<$yoK^+C^;J zTsxy&$)57okE>ROH+tpd37eDSRA(@^biLU8ue_O=KIf;>gT1#5EVC-BSJ(Dt4hT;z zyd2(`ZmNE+;>gtf_7~3OnJcoh({r5TrMErKmZ>21wD-BaCYX>vwC|MSJN93*J9^vY zk8JJSys=Q@U70$m^kheUXS4r;_r7~x!S1@LZ?*kOdQCf6-%6N`tR)6uYXK>T+`I1RBnzxrSMOyx$>0Skrgj_lf2(#8gd_| z`t9wK!BOlF_+BzRy*9Hbb~>M2`*O=1|F@M>D+m1UmKAH?>1<8T%x>W<%!9^@v(xjEouVDAOnOPh_?n}uYBJkdv;BFR^RueJ+O-_zUK?NF zpF9xvFD_meU+Vtqo@uS{4=cP>+$s9TJxd*b-*-My54t|_{u}LM>^f(s)G_HBQ@1jjZ4+J{o*q3MeWhr#!+PGn%>I+}nR|e`Sw8Px z>n?GhbEkVFogrfM_7KKWcSLc)$A&kFss6BdO8?Bh7ybIw303nm>x=hw+~4_>m2P@@ zBFF3O}Cx7k0Va1uzCg%^{L-r))t4}X|RNS(3S@^i?sIs^wdwnv# z_;5*Y!OVD7Z|8oSp5`23wZ%I}yT?CSuX#tOZ*)h*$Cp+N?2_NB^q=T>_ZjE+>igVT zP_{9AMln~1{i*9XS!Ff$iyZ4c`$6{x_bKlH?;2-fTpyhf-sa@IleNE|PNoE}`swiT z_GossoMrnWe^`Ez%^veL6t z_trhycvj5>Z$^|3{t_LlxNS*cM%Op(z5RFC)fE$*QpZO>ZNG9(_q^)&N1RYUBj3F4 z^cFw4Y|w+vd*yx&S44}m+tgoI`C(-BZ!TP#KBDp8jlZPwea)@?9Yg$sQodf_JCL~k z3jOiw-UJTKewRj2gkDtt@(?hUT1RZWb3D3 zPOwzHLK}i3OV9ZelS93SQoFe$l1=_u!5`vX-PhCCrML2eWT$wPy6>$JJ5gne?~ZDM zz5M;dSbsMKL8!OC3&~#}E!VlbFP(|$&|B`f%5UD1`lENPeRjNM{E?!v)8i+?{lf3V zg8xK*T~Bjg7|hN(b#p5|4zKMV+;gGbJou8a`wTld_ha~OxH0puy47`GW?xNSjb>SI zxRJ9tc(r#z+pDd=?Rz76E<4mcyE|O<&QBXV56NYQoL=`Ee_iVzIuEoTtiQbBkLk;T zDbZOKKQ;O_YvT5<*L#k#y!w3xWwUn{n%ie}&dKj;U6{(GBKL*lvGAATFU6gs8O}D@ z%PTfjURJp_U1QA(o(j_TTY95VL*?7)-^CXX?A&{F@d9hTn@{!+p7Fm4H${Vk1B=s2 zLz45o!!ymPg|;2f3b&8b?w&eddUX1ew`z4 zP3nNS)cSW^uda+W_7Z2Yv&sF$3A9T;w&vLt)|2t#@CN_<;Mi!gIM`={g9?ZDP40iX zG~L-kou+rFI49m&nfO!EgN9_rd_QDv#mM-(Xj5vRs<`^D^m*0^!Q|kL=)CaDLS0w5 zc}d&L{RhQYdZ$HGdQMr}v~i!pBQ?W^d{#BQ6m$&D|B(4!CxLcP9UqQz237r3_fTf< z(wkkoHBYSu6Kk8xoOgJfD*LP&T$^VzCh93xyif@a8(lx4)yEYl;y{k72{4w*7 z)OS`Q{P|KWkd`|l`?wwqIrJHL;Q z3}1+Qt@X|{?>DKZ(x=%>d*ZqFUG9O_y3)T3N$KTiPn~a@9L^}@du9)) zcVpGHjT5U*w`vAv6wl1;-n@L&@q=e(M~geZHanquQstJ;T>pl`y5g_ow?o%1)G zx&G$vq0WTdROg+ZbJp#!@!^56a^c_^^+(xr^E;P5OU1SC)(lRsj-IzCR!pmJs_cy3 z?|QTIuHqxL2U5Cq&uO!NIZi{(5J^U#p8*MI-Sk z%IVHao$I}!j+FZMQVw6UTUek-gTYT$xG3W;knTx@#5%EKV96TIM?s;&n!6ol>=`?9Xgv=pWP~P zOW*k`QxgW=Gwd$Kkeh`-K1@{kr5>E8H7&0=!8yquliIAj_d9MbnH??v6f$UyDm*7o_Ui#ud8df&-)sWHJ}(YVxM)hDKw z7jExZ+wqvcpEoR1VO=_~tb32{{RS3>_u83^pPruzyceA_+_~=6=_f1a)f`_pz3Q;+ zKfOiPtI1n>|IieBy#0PMrPQ2%zi(6jlf}NmUZoZOn7B{%uV2S;@IjPy@62qWlMeUf z)_Q-nZnA&lEc3Q@@3vOO3&OSW_n{v&mtGyP3pW;T2)~HcF(f%JjQrn)!<{43Bhnk~ zsg~{CkXu|iG;^0dGJZF1a%xjsr|X?*(YeI~3Qq*R;VxlC>8HH!KN($^Jm~M;H_&@) z>9Xv5^;_1@&z&7z=~r4Ss}39b&tbPW+>rV-S>oKBnU_7sIXrl}AnzGW3R9()-f?ZI zmUG&&`4^q}nf-0I|J1hLo~@l{8&(W@JGUvG86KMMu3cXHOzK$weE(o~S=H|1b=|?TGrl_nke0a|V9ud#|uI7^*7t`@JjD$7k+Kx6A6io2tuHW+zpoD(|k`JvYi- z7Ou!2k^jiwBe}*tD>|m|b>CHb(~Mu(wsd%~Ptqwl|ByPwIYn$wTk1CzcUAP}w#q!2 zI^MlUoQPP$=tBK>X#9bHVDXB9s|Nn8F6!rp?ZFS>!SOBeV`AE#vfi{8xdW;D^sbGa z(&N)3y(jHf)y=H1kMiF1erG+Xy4!~fOZ+{;U34z(9ltSn(O(tTM~8;1N)Hs~gb#Sf z)@-Q%u3|TPwEWYU%2NkFGc?sOAu~x??M3di^i1!haO;65`xX|~sJHeJg$3=SH(arC zS>IEs18NrKIt#bAwzp4=|Jrcn(9XIutw&4a?Ge?N*N2%qgE@r_@mUr5sw(gI18ZAX zwC_~dPVe(tWw#bb^xfIFqSzMy*A3IhWj@W!&J4~>%buk-$P}E@ylieo)!6EnQagqn z14j(3DE$&u=*-Ur{_^6bI@f(z;ef)QgX`=!(s$>sO`q&uBc`gtotNIf;-boBxw})1 z*2lr;+F@&>P0^hAtMHm|yI_JUSB@U&A2>BU!o4=LI5j$%>n~O1@-gW+vz=Z(euUOw zqFt?59sf42wI_Ltylb6PgG&lc1FCh7E{-pZwh8{Bir+26J>zr2u(YP|Myc9;y>e3H zMKyP%&Incn2c}yZ_ZU8>c~wPka%K2{by4b$)cf%vg{gga=BEqGEGg=Ye9P*Vi9H`A zODp!R9A^EbH}Py5xVGY!rVWjwG8aXEP9|hps$a++oYn(X$ODc&Bc`rIM)VfLL9}H}_QTyv*O-t>Xv$ zdy5N7M+g6io{VowK8P<0z9`(%Z|5J@3GM?jJ7rece+qy0#nX5bGO5fnsb;Ux19ridBy~ zHGLgA)x0?UdBdL@@5{cY_~+rw?`!^D*^)d_Tdk-gRM*`=&EG z8d0px$Hg^4M{=06ck21f((Fyy7t*hI3sfI=y>~)-NNz^%*i3A%438^*Quw7bTYTEA z^;i!Gd_I^J%If^J3~y_ZF*BT;sc5MK+VIPA#;qh&pu!!j0}%hw z%H-hS=fYnKr5le`BK{}ozE8jX@8X7&ONO3WzVsF)01PWo~_@m z`U3Zhc(%Q7W_@mb>TOjjUSfajUGLr*U7ugm*EMjl-yI#1>=s`Yj4%DYxRZZiJl0*G z-cIkcc`>!zYf}~W8L4#ozRaiD$=QMzTDwPo3IczAuzy$^)u>DQ@aPx+@ZuRd=RPG` z=p2_`oF4CeVm%gL5cgY?yrG$YWgbXv>nw=Jg~tV-gkzF*)_<%^qNOiqihjn3D*EjIbP_-k~6{pj$CurWN#w@Nks*YSy||EqYqBAdS3>9IaY zJ(v4O^-)zv=SHXNyv<^hezDGu4k^7`R2_4)M{-;+Cx1fsgFS!Aue07v-<~-?=`L*5 ze|oS_YFM>bnQ^}kcMgBHukyBZw~SW#PX(jw&Gy;WMWxm9fZGN;BvbXqmp0`{Cd)SO zZcX+^rmxQA(nqIvOZ_FaK(#`3&Y7x2{J>l49Ay11{I7qvPDj2K&rr^KMf9KWreKlk zwwjb7SgV~hHg%)Ry}pd6+1EL)JKf$&5*QQbbPlC1ni1a@^{W!>Kj9h5ex9rtp-R=q zmnnyKfnG2@&zb4&0Z5gX=E`DqU29HO?aUq|NihWYnuD6w~H4z+vu#|Ys#9P9!(1ORi^(3`;BCNv?w^b zG&1N8o{rD4Znv6M(fSAL37v4rThG~>Q=8J)rn|lE-JE-)JHg$?yMx8wlmeeP<5ei>rKArSkEN& z@oK$;G7Qg%s^S$=XPq>{GV&QRU9Ak7p)&$*uFP8#;fqe@-5b-`2ZkT9kdb zUFSTSqg!MhPm8_?SA-B~o-@4kF<+Lf^QqsHPE=YFPdoQ-sYWvnQ zRahLYT-KO)Z8A6hPC2q!$={`g--cH7RXo-@*S<+Ldix{~*)LhwyRSH(yCt!?H#qa1 z>y$NqT6nut*S{4%9V=@eg*taw5kC~IRK@3k%0f?4<=!o-uiBuy+>qR=`m)z#9k-SI zit#(~`|*bO=J<|yqjJ`Zl2=vlcYHjM+-$w3in5XRZ0ic;2iC}vexusMTkT6!3H_tNV4bD*871Ii4zhYo0P!U&og6i(lzH&vfg_WKDdO^}6`eL-d}9 zh0$`=iJfBqD>)`<&}yBp%=^XaFMg-8yt@j;ZmIm)GUe5Fwm0f*+>Umu>PB}H*V=3U z$3D?slpG$Pq%7Ns*4QMI?5UGOe^8ceYpu)aNrx)IPO_TyF06|b-#sjg)@gsGGo;hS zhuo7~rAX;8)ip1S;&4v%S~N#B4fV;m`1+_ZUJ>7%+^aWTKVee-_KZQo=cuQj+P9JpEU88@=`o%ibepCB%TjlTQ z8t}6!Z17HNuZ$Y^VT|1{;Tqi_lxh@(SA{NmR0(0 z2UU=qpls*)szv+0Tp6-DsnCjyQ=Qy4@)aX_TVuIn#~kY#WiGE$rP=?fu4$2~?=Dwv zvP(~yjDSN=MaVqm{i} zs;ZMclFjiIs_=Rh&Io4NHi?~h^?G~yONRlC{FMVD4i)|(08ts~#DwZx$ePFjT7q=$9-heh!$l%9h zy(&XH;;f`_hxPB|8LjFG>i9fg-z}8=nVF1Lj{jZN$<9)J*4?UptWw7JcGcT_qAbbh zsy%zeTA*z1*<$t^Rja#Dy#FVvZu_rR>|tw;r1OfZ5_XifovQg?ul(z1>C>6Yw2YM& zT&Iydp8TMCj{Eep(^V}rMYSVoeda7xP0f^U9i+;qv@+q_s@wH|s!@(qUCg1{^>h^a zST(orsk&|tRklr(EML|;;d@n$_!o`-Bx%Dds*d|n~=e53d6Z?Es^)3ZiT->l9A70RuT zR(^aJX~;0uC9G4Hd!(w~wv(0Z)D>@u-SG9~>AHqK7O$(GtWm$;UUF>JZ@Y9KTUVN1 z4nM0RuT?bw)QIk`9ek;3_BLvL+cl=&NXO1mrBovS`?9j}TWhovqz$XIujx(Mse6sq z_2V>p`rOi`hptM)G{P}@`ec1}ZSsij-6N^e2j+dP_-CrwqfJl5&`zGNyHlBHx>#D84PAl;RNb(#ETwjh zjSeP_(t>BDYu&1Bpvwn+12+9{*$AYf&^vG$Kj7+B= zTPs7wUrsZp3yZI@Ptec0BtgBvMfay~&>HFHCjF*D^}=uHxovu%XhC<vOD} z=_g1JXL^4j6S@#rNFH?jrkhiur_w8`R;##L=z-22)zX$~&7f2Bp|c2`vgpS}=g=R@ zpX$)})J6}{-B`D@BsxTrH~pNE0=;xc>8Gs5H_Ed9qUTfn{Hta`kBDl?dAM|dZXV5& z>{!iYleDNyxFUdihdjfYN;9U2Gy!s!S~cdAwTe3f<#UApOsO*f(KRZqc~V|{j# zp4+4yNarDXwAqr%SjmH2`6kW2N|HjmEq$79hjbXE6BK>3=oLuMY zp9!U+8S%F8zc?-EYh86t?u%Rp42GI zH$rRHD5=rot{{2RLuZ7(`&G|crrks5({5P=It8b-b6WH~x-@PhnU2)w=pR9dL(Vq z2htBC;`dSpduH`;-YNTfpvuEAu`&g@c znQmj-s?vChp3b#&728Jhrq0Nh7L3=mU+ekb=^k{Gqu*Yvd(f+gPJDEAp^pze`8s6_ zHfbg4bxcPXI=gf2=CX#Mqh0z-=NdW_)1xaVi4K>vs2QeLVOIN`4pMYE!+P^IJe5zac$3)zgI$DYk00sCsUb9O(5-hq5lsuu2+^ zrDfl^dR|8Jqst^66S3e$>Bl)zPh#fu9Hf&v zJUUh!C3`fAh7&eMHO?=w0?(&duwh@97Gkj)>dXR@AjE2XO>oo_6S z0vpgP9XC%k_3rdiL!;PFLp2Y2z4b~*IPpLSBzju*mFu|aA4z{w4Ok;$q;y6`7c%-8 zT~O(_ONT!C-{Obp#>gjX%d6BXdyVH~>}UmD8mWM%vtz$3K&56})M&UL9q%HI zo4$VZwsti?dZg3g93A3QbSR>GBD<5`-E>aoXRJM=XSdLQ*wN1l(&d%XIr{O?naiv= zo`&9ygC$pbO*ZN)bT_Ae=~G9CZu}cPY(rUpx|h;~2MOD%am(=MoLP~qJ_V+VHG6a-uNejMNga|eUIP2ruIJ#_ z=s$_>&~r1R=Ym`4e2vv&uaD5^(|Qv9De+2l{-RsvfJQ*)?oeaH{?eD6u9EaMrBflYaGl!M$)LR}iwmqYjw}l(%%!o|s))?z+ z=1X78;ra>^iZDH83!oUUN**c|XHx`p3qmrUrZ%39Dzl@7jinni!g#<>C8SHPiKs-g$Iaq$4`o&pt%o=z@(M1rgD!l&+$jiJ?E^gQ~U|Ew9|OR(fA?H_tC(qkMnL;p^^w=R`rStGg`gK5|!;4Q2Tx7~kw(8rjr<@A7L=h3l?Zqme5phmhcv%AnW(+M!t6R@L5+SZdDJps9I)>qt> zj*WCX<+qg@FIEbiz{s)O{G3kr%(F*Qr7Is^FOp>GxyK#`Wz!Lt4z3yf%No*?H>FR~ z5gJ{fBQ9r}=qqh$zLuWKSMnU4ZGIjLtI-);Q4}v2|c2 zPgkLnpx-*}Dr^j12&|39distIwT!J>D`0jDR@>-fP@WsTwy`sb^p@^=d~bRav!Ad; zbmOemJy<^wLZWNX%#_AYM@u|Yg`Ph|Kcgcc+QNJoH7iOlV5}v*Pmve+zC$B28bE*6 zD#?_SSf)oQ{k`c_8%qM6dKMNFYsh##JvFTvVuNzJ2J4JnW2M0&L=H%mKCr9?JxJ*u z?`mxHw`Cph1z1Zon!bSSAI@>n?G#(Z9st2JQ$~z+LO;Dh_?A~|*u5gsF> z-!i>))A}i0YfYEbzVh51?M-?(o0UPg7%5M|%2rCNxEm-8w9I~>_i}$3U4kV-NeVB< zUIy`UK8w?1APJ;H$IGmKTc?>Kc}|=43-4iDu{`uT$LcreNp<=c9pYq}xeK_HuE(9a z3w_SPBFw@s<1P9+CmKDmC02<2%YLEfb<6KT}^48KU@kVq!rUN<_6g$Ma-~otz@XYKqY&tuf_>#HM-<22TY^tPTv&9l55HyYu&(SY=1C`Fbi^u4&+GwX zld!t%Z#vo{S*#8y44uW-dYV^{<_==Ss_`jeFYFBcx$*0!Z$0ZxCs?oy=L+#W^eg7h z*jO+XBL;=g+mbtCwdkeF(^xgo7+Omlfuv3BOqL=LBj`CB6 z>UzcsmIVXA3DJ+ZsVp0iG=8H}&j)jY5o$F?Y%RUULF4o=E|&EG|BcnbM&tcisd~)< zw872=B~(bC(Y)@muJUZgNf$~y1^Q06Yp@{84(B-8-^axXBdl| zlFh~58>!)w@%Y@2KCak0?uG3mqQEzswQ1Ch(V0QzF|g};%g;1e65Gt_Xe8OFF-Q6u z{l)5oSwS%wU16*p^Ug{V%$N~@9{7ZMZj{%Ld*YjrB8*H?za!rCrCIn3b_;eE&yJ@` z=~LJ%dEE#+SPe$vAQcWz4mttM< zim@;~u^_x3NQ2RXgP0S$7yJ;*LLqO1l<+LbANGRvs@0VsKcZHwE^!0a2YJxVHKnIJ z`u`Ap1+D^dV{_ms*mc-AI^wb;(O0ZZuf_oa2Eh`ulr&z$4jEK#EGkh3(K7KBV?$4j zMXS`$@SX;pwd%fbLnans1z9QjTIcmlFgUiMzr43WomjyUnl0mPk|gksXf+H7z3o9n zSp85dz)!h5JBLW9x-2(-i$7-HV^!&uY=x`vOigr)%Ii z@q~_60ZHP^y7ffZEMjWL%39M)w@?4pYJAKE3&Yw~Nrnw&?Ws}JN(=>GRii6l@QA0d zci=Sa7LkR)<*Y6mX_x}y4SYqX=3#6jEE#wYuZ_QE*Ml74eULSt2no^`7re|)fomx0 zsg5*&@nPF?x(EH8Lw%;XERC%8?M7WoE%_DkBV zDw5cspXapqx-<`L9rI?qNRXHb-^Xcs;sK=mSf{8iG2o zp6qjYD#nM$MgL8#+)$P{HUngbUFBa+`+5wK4{6p^d~ld=f3iw&C%~MbN9BnS-;jGa%zctJ|Y1 zz!&5J%#Mep#V#P}N<9Y+HF7k(T&cW1=zgTpF!GYV#+KG=OtCUbu$y$8CqAs!6X~bS z-iGyIwnTNjy`itHyR%knv00J;1+&u@(OYdorB>p1Oc=5QPe7u4u7OYpgPreAR$b9LN4Lb;z0yBk8Bz`uS z083!-ud&@=641tAU5l2(4Umgjt?^C#u(Hva)u(0R)+W$HW!wg#V>9T*4sIdahZjPN z!DR;DCep|8Wgo|U&A)uq*4IQ~M)&$Po4WE9;6@@(>_|x(2g8C^fi1DXjJ!wtIMj1t zw6KRnz@PwwUS0XZu#Amtts=IL{0A%|2oY`o%_kzk@@>{iVxi!y)aqGxAEr-(Zx|DI zB+|~7Jql49xCc*!)%7JSd<9H=vpxq#hp|Av3erk=1lW801=s;}jE%;64QK`Vk2Qgz zV=m}0+6u3N$3xmge8ewUE^IbDGAspn2aRJ-p>r_3#3$@N>_mf}fwmf70?*j4XYi~x zjSl~eA5Ck9opbfdP>K-i$HfE7*ywsj-Ng#1HTXO(s`uD=Q2- zBp+#ZZmlE(gV?BNVPDCglE1}Q!^xp5mR6%A`C~uOEIc(CD{LguIFiAZ5gWJ3CNgq< zhp&R0h;>aUF0)ym1BYPG9LbHZoAkt5{hxL3(e=!QUC21_tR1=w%v_af5jJRcj0W74 zD(j}X29L;mvFlhn;w&;r^}3hA*NhJjzCm|{QOcI*$b1b;!5ZO*SZAynNYP+__6z(Y zY$VSmR#_u0p%MYjjU^3N|Blpi!B%8#&~)MvEF=6qtN~FAcBxmNY|vP-D|kG3L3{vc zGOyo(($RCGUSsKoNgv>Zv0je68QOvm^(8Nm3U}|+cie;jgNQ-k@D@#)vtbPCwZE~e z*aIws*@Xs&__8KdT3Pgsk#E+W(PiX7o)Hd(l{cIQs2;ljZpW^HnXu$=>lOOCOV8jM zxJHmKPlD0YJM?v5q9^1;&dk@>F!}Y$(1Tv^y)YJ;A}b4;c=&`LYy{ebsjZy79Ym_4up zRv*L&@7=GlHp*W#NK3ej;cT$QeR{5?j5)C<(La_DE`-dH(MPZyc#o)uC^fG;p`UOl z@OJ13IRf+mq>POKc^SS1OB|N%5?+gJT(_i$-0)gvZ)2H>;@Anq>BOTZ`wjL$n~1mZ z{gqnbK3xIJYa&8yu%-RLUH}bZ8(9b9JUlvHtD>Aa#@~S%(LaM%zy<8ZqOz}`ODZOK z9<>1MUhEu|8HTOFwo&WB9*5b2eZ}*kF<9SbeaewFhV>@Dz6q`dC}tpxrC zejh!=6L|XU5RI16;q_oED&ht{S!;Uung8U*b#6ce!smu zOAvlaR?1anLrQwbeTej7sfc`;3;PKV!rBKV9+o%q}mcY4Y@U1D+C-GW{8&}cIQrJB8Nj5Y_jqe8IQbIh#ZD(0Fq#N& z%KF1!AOVnrBQ0Z;NC*2!P8g(TPlvx-6ktbR0X;Q;#&PlvzAm!mD#*B~L*4jTuGGj^7Y4)-A^i1oq_z#OU`QrDq{u_VPVXFr2f z*%w4oNB~Vm!{O0cV|*If9&m^whGvjP42O*+hB*flfIzUTruxBTxX6yB^>qAbkF)_# z#HjhMzib7#1F;mgg{ShjsBw~qM>6cT2F=crW{`_**S;jHW8ygwA^sgkHm4_d=q|*5 zSQ{f%WDdUo2V(LWgEe261?*S5=7OnIisl{K5`MTBSceJBKBOJ z?n3;8m4neBMg?z?S28>~XcM*}rybs^yO6g8ub^wx?|?zlS`nh}qAYifMwOQ~f&sAS zuuDi34~Ru0W}(W0dNpho`v&ybs-J)!;Zv~IcoJeTqG>!TD_~GK$SWt*Z;$}Y#|VuP z|5MOCN|F^`k0%p15r;%&$>GV#NuoL6ax#itnhW=VOUJ5^R~e>%jd$l7yft=~{Q*+I z4)*KXp&A2d4?h7CW@Xu_)JcM5@c;NP&VDVr) zT}d2FgA_m~2CFkl)&PA$QpCa_NA``$zhPy_*up7d87hR}nzWuEM%Ip4AM}Q2$M-W5 z&@mnr%)yLf{U6?iJQOIMuLeslU>Nk4h=i}$(fCQKBn>l+M?msqNzklbJqu4w7Myqm zwt-dRYwRMnl^vd!?tw$V#Ap>he?WI2&o)@sg5u$5$VZzjDlt4*f{Yb2HP)QDfN6;J z(MS`uQB?z~CKADO!(Ewr3bJr8pR73^1iM8vhV3zVJdh_=0iRV@)&Q71{&Z?J84dmg zZjt(=-ZFaOCorpcS2%n$q*EGzhsM5vjLCANbwkVRY2pktfcTH>WvDxmYa!|<_QA7} zd!X(YONng+OOtN_%OGX4CT+SWJTdGZ*#hduisffxi(qZR<9rfbLc*XuVlb)$+skXi zZe^!%5B@a~>bmkOpc6!I#Mf9>;_<;+L9Ah&evb6Ozwm*u251SMnrcAS3lC(nYSaiA zz5tdG*^r$eKF2B&`xDJEChR?4k#$)w_Mh#JudFc(?`mSkSU9qcCK z&F}y)TjVV8nynf$zK3c+c507ePHLI3pU94!KYkP}jtAuV_#Ut&@jWa?q&05U_pE0u z%LjVqdgk0)UU|ld9KaOd?2P&^N-V*`Jn14Mjq*u)|rV%E=~QzXk3CR0Uj6yBMrf!%<`g|9`% zX3w)n!Fe#V$O2S}Hv=o-BVa;VP3FTYu;%O$@aI;P+>%fN6~I#12|#U8a*(+63&6WgVi?tHon5uE+dXI>@{{D-8R~c zwT9V8V&oD{d|))1c@aAh(=c1^Gep;suhS`<@+zT?)S$4x@YHxC!*>vE!K;AISVJRG zRvjh@DT7_v4IoIcG+NBAz{8s;1~!bCoHfIafz?1_tPj49$`tl>l*26N(4z(HQHcnQIjz4A%W%P4MXKCoszpRWWa= zuRm2qf1v98?aDtNsb_wz&vN#mUf01tVwv$CaCl_2sKy>DIdrSOmv>=f_c$Z*t*#rY z-&9LtYb1@48cBns%Sxi_@YI$1{TN+=59t#-=;*A8rKq`1tG`}%hPOm7$CPWV@$FxV zXBkyKCAD6Xg%L((X<^r4iXihkqtv2%kJHl(4g>LF;aj!##59}%>d>>+X$-aE6b9=_ zz0xd3xlT`P&>0U`Z_#d-Nz`iWTTAMnX_T8aPF52uZtBMHSlC2Mt8HsE_>ZCbJf}OTT_y%am#GOEq8aff z*3o(bJLdbce5v~WOnS?H8>vr#=!0?`$V_dGd8pQm%*_VLrA~6hHpAP2GVmAZEiqh& z)}>!hwR8roN7_6|Yflas%r`-I8L21zDET)_LRcCyQBC4R)!$U_3Fky2G3^-bs$Vs- zMvb;j>su+=^yqBXM%`t!ot72LH2rZS)&*so?)0Oh%IqxA82-2ttb&Y**z*i zj4$Kl4}Oezrcxs$i^Ma~=z`?K$(wOnFESNQDPgLTrSOWN@tNnyO^HdgnJ zB$aQp2Vj{8OH%BJpCtJ}bL>>_fnBrPd>viXP-Dfx5=__49q|~F|PSXhCDn8a+sx>=KRk0Fe z?3klpwtLu|Dy_k{dSXdWtk)A+$xWL7_xih1yW&vIl+$LLWj#PWLv?a(sTFWKv}uTRXq;pg==+Z~gKZ?eqV8kz z6cswpNwo&pv_qp9r4g)FB{#m1xD~dqS5MwTnm`2s9*@&koD$9*i1Vt0a>D{EVRs$M2XH)Q$L~*ON>uV*G!zDX>>?e`{Cq2fNqdm~g zubS_#vO2Yz3t2V1cDuBwO*%DB(m^xX`D=9dTFJ6Xy4;}?giVqRUU++r7u;_c4cKI| znA_-SYc+?TG;*TKF=cDtUe=gi*<#p0bel}xXkE)Gy9PaPgLIb+1Lr@$XCt+8i9YpX zd1j-esqlO-06*xz3jKDn=KiflN36m*J@z#=2>G(}iO%pZRHU`)PFTdWG-j~$x z?gmzQl%z@4#MV8~j-RzVCu+S*l1_!@+N#-7M>AaW;~aviLo#Qd@Co266PKL9U5bseg+nvEG;mV`X$Y|M(a9ES8>*a{u5*xM`|8L>EvKZ?HfG}zsFfP zn1`>m1Bn65sT(3o^6RY8R?<)Es#>&qoOs+@Pl7Wh?#DvFN)d$++nOkqoFq1-D19f> zpO(I_k(_ecGvFCeHoP9c+fut2-WHE(R)HvRljg&fhC?PFLY&ngt=w3CCM*Cr`A3Zz zIe-+=7P5szm(VLqHBeXiqnc8?_Xi)qJJ$fSgk!4?=}FXV2hRK)cu( z)}AT{d^2pCVPdJ40sRr{fHl#Ejvexq8AFW+ zd>XY3RQQ>AgskuyNt<&jCE0MSv85FRPwV5l3nvobqXzU0>Zc8|HW@OQMkLILsW+e+ zfb0wP0r0ONe(I>Ht;0`ll2ltXTF^FGW6llmJy|!-!(koC?2XsTfQdn}M452yL{n6~ zU<=@B+jJTn&eVkYpigO1&&as)XCLL z=J2W1526JyGH}=A2YrnkY2desWVsG4pzfPdVw>SHh)>{y%;`CNAbcxvy5T6`%Pc(+ zJH>7yt~Wai`vi_9c1El5F6cAWM_9j3J(ZY(GwEOZI~Z=2&0d;%4&b{ zPKDKy2JxOb)4pE6H7rcGu7UX>P6a&}mKvUgeSqD88Hdv%3ZV9kT28VD)K-I54L`=e z1`Q6qve0Ok|UmRPV;^M_l)x4~YL-4FGtVdYiCmY1~a*69gwu<+kROISfzBBD;>LvSwZ z3onL5v5i0IQ^RG`@keCEVD({!$l1bbqpifnyxjq0Y`ATh4ZINCrm644s*vjgHFoLl zX2xXR&6%KKk`#y^{+JpX6OB_N05=V1O5O{0)12inRrBx}tTwqwqyfk8%er8@$zY;4 zAa^LzoJOj}N z`6jXr)bqh3!l7abu>~-#tOjh;czvF_OppRN3*Mbi;6JfJz0F!6J@kyvz*FLxiRk$r4WU94?IfE<6lhp!BSW(-l74LfScn?5z-s1`x6zz z*^~VP@p8Tjjl-vzEFjjDJOy<~)UzNn-W9>~cm|9lEJcUzME;h&M>U4wO~DB~54}dC zksX-80=d% zP*_(gl-Nt`8N4bHAC`$(lJUnPqG{kl&;!ylr}a6Rg6AgJfG)M@T4ETqiCsZW8k+CP zDzTEBYA`jd%$7)i^@E#Z%%*a~)FPpiyeWYvv%(-jQ+-Cp5({n4PV!q;$!I;R1z*eh z@ijIC1Z?Vl86hVHsPZDaLDfRPRv%0R6M>~fQ}Jok0kikf9cIkxGX}JdN=;%{7-BpX z`xuK!wAG{g5l6!qqgT{F5pA+_`pY|j>OZPEU|J3KW=CS-V34qeuwLLcqDS&l%#Yjz z62&(Vu^<=LpF2|@X!85)PTo^v5Esve$z$zJ#WZ>YB0wYXyfD(Ba^g&wOWyfF#urU6 zK6#AhKx{>p&!8-xj8>9;FsDA?iQp=+8@#o^WJU0tFfQx>a1uBMy-W)iQXS44L>N2X z0=tD}GANRq0{F#bQ}A8j0y09>25^oMT!}nDV(d=C0x~PIU_`;4x{iBbgRm!5_f%_j z;2E*L_-pJeXQn`zL>b@~W`bvjlOa#R{sO@hU&C}`9kDd*Wp)Hr9;T`W@6RkuodOvN zvcpMuH^q6!7&}edd6FMytVSXd0`^s&JB@6@g_#50T?Ajgj4< zZ_Krfh|^x(+WAPCl_R#rc7qzosW4VnhIo{DS8G<_2lD17m%#gqz|y9^5O0QVVrlTl z#Alqp98?~IF4Ww}8KLV`W#D;AK_NX=o5{A zb%_Or=vkatMt*SIT%mvTNjwqKN2+)b=7crJx?}OMtE_}s19~q~k5XM;drpW@QNalg z&XK`2l40o3&*0GEL$Euhb`BjUKSLHB&Z1u9BHpMLD^jKVP*sdO07jg1V{yMxKAn~u-kYrGzsg^ z`7b;xnK3G~sn!}9maEXN={_-Kv}Racx5uVXgpCiQ8%*%f5H(kZ^jDZA?Cm; zFjFimn4Z(LCL3vDMw8#eJHmCEhz1XU1!Ske2%*1NWjr@5CYr*t$uy<34?zM%n^c{! zYFHQ06nqBrVm8Pa42@^NSF;yj>#@PuDNgyoVuAvQec74hoh(Ta)WV#|B!S$>!Q)%k z=~L`5o{p~sL$W(TJ|IDMDbbDL(7=6E1(H+Z8Q?WoIw}f4G31wEzp=vj79;>OVUhMO0OkM}YBx4WOD8#D^5|CBE)R9u@lnW(5(GqXc=v$D4P?uw&UV@UcYp zAVjb%IZ~JrYG$zZP5M1p2F}J)B<|=5! z!bt_94)8nriT?t1BA>zh2_O_gO{mSASxg(0yz+~lKsRcFgkcHyd*dh zX<{R=clb5-D0|K5;Qvlxb!xBkJ~Z|SnRNCMzLZ)_P!Pxhjta(uS}O9vXaun*kpb(< zzF_Y*YCP1Tg28#uLB7negFV1ISbF}YO4V2lRuSCDT-h7ExdYj;bI^S5&-jr&Ctf(8 zMl=LIsS{s|Wq~E2o{6&#{1p2F3x$MOX{3RbHfNW>?bu^102#d@Vm>*~Nd$}lWMYT`&7&y3jok|V!A}uY z!ok8eV^LUrBw=h2egaOAm>(7jh5*)ylLH3LW2Zp=^%^Z~Cb)@+11rY-!3yjW6PdA6 zShfGf44^}>9{V5E0+WLz;CawSb{BY`2nNqMLO+4g0QX^Guv6?8wHe?*G#pC%kxLuB?`M(;1O795We7_zX@FQzL@)13`dM$p~%GOa^N|lPjYx z3tLRx3=uk>1y6#$n(=b_6-#fdAG?>mfaJifpg|&NY>vq^pcCv{FbFlDXcs$!+#=`4 zHs}ddloA<`*CA4e%Qa|)7_~w&p<0;wEn*II02~81V4_PZh440DV0;qH0=zrA1!iU+ zf% { const splitTranscriptAtPhraseEnd = true; const speechDetectorSensitivity = 36.0; const backgroundAudioSuppression = 36.0; + const lowLatency = true; const params = { audio: audio, contentType: contentType, @@ -279,6 +280,7 @@ describe('SpeechToTextV1', () => { splitTranscriptAtPhraseEnd: splitTranscriptAtPhraseEnd, speechDetectorSensitivity: speechDetectorSensitivity, backgroundAudioSuppression: backgroundAudioSuppression, + lowLatency: lowLatency, }; const recognizeResult = speechToTextService.recognize(params); @@ -320,6 +322,7 @@ describe('SpeechToTextV1', () => { expect(options.qs['split_transcript_at_phrase_end']).toEqual(splitTranscriptAtPhraseEnd); expect(options.qs['speech_detector_sensitivity']).toEqual(speechDetectorSensitivity); expect(options.qs['background_audio_suppression']).toEqual(backgroundAudioSuppression); + expect(options.qs['low_latency']).toEqual(lowLatency); }); test('should prioritize user-given headers', () => { @@ -538,6 +541,7 @@ describe('SpeechToTextV1', () => { const splitTranscriptAtPhraseEnd = true; const speechDetectorSensitivity = 36.0; const backgroundAudioSuppression = 36.0; + const lowLatency = true; const params = { audio: audio, contentType: contentType, @@ -570,6 +574,7 @@ describe('SpeechToTextV1', () => { splitTranscriptAtPhraseEnd: splitTranscriptAtPhraseEnd, speechDetectorSensitivity: speechDetectorSensitivity, backgroundAudioSuppression: backgroundAudioSuppression, + lowLatency: lowLatency, }; const createJobResult = speechToTextService.createJob(params); @@ -617,6 +622,7 @@ describe('SpeechToTextV1', () => { expect(options.qs['split_transcript_at_phrase_end']).toEqual(splitTranscriptAtPhraseEnd); expect(options.qs['speech_detector_sensitivity']).toEqual(speechDetectorSensitivity); expect(options.qs['background_audio_suppression']).toEqual(backgroundAudioSuppression); + expect(options.qs['low_latency']).toEqual(lowLatency); }); test('should prioritize user-given headers', () => { diff --git a/test/unit/text-to-speech.v1.test.js b/test/unit/text-to-speech.v1.test.js index 4f30145972..4bb04b2b09 100644 --- a/test/unit/text-to-speech.v1.test.js +++ b/test/unit/text-to-speech.v1.test.js @@ -1,5 +1,5 @@ /** - * (C) Copyright IBM Corp. 2018, 2020. + * (C) Copyright IBM Corp. 2018, 2021. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -1103,6 +1103,582 @@ describe('TextToSpeechV1', () => { }); }); }); + describe('listCustomPrompts', () => { + describe('positive tests', () => { + test('should pass the right params to createRequest', () => { + // Construct the params object for operation listCustomPrompts + const customizationId = 'testString'; + const params = { + customizationId: customizationId, + }; + + const listCustomPromptsResult = textToSpeechService.listCustomPrompts(params); + + // all methods should return a Promise + expectToBePromise(listCustomPromptsResult); + + // assert that create request was called + expect(createRequestMock).toHaveBeenCalledTimes(1); + + const options = getOptions(createRequestMock); + + checkUrlAndMethod(options, '/v1/customizations/{customization_id}/prompts', 'GET'); + const expectedAccept = 'application/json'; + const expectedContentType = undefined; + checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType); + expect(options.path['customization_id']).toEqual(customizationId); + }); + + test('should prioritize user-given headers', () => { + // parameters + const customizationId = 'testString'; + const userAccept = 'fake/accept'; + const userContentType = 'fake/contentType'; + const params = { + customizationId, + headers: { + Accept: userAccept, + 'Content-Type': userContentType, + }, + }; + + textToSpeechService.listCustomPrompts(params); + checkMediaHeaders(createRequestMock, userAccept, userContentType); + }); + }); + + describe('negative tests', () => { + test('should enforce required parameters', async done => { + let err; + try { + await textToSpeechService.listCustomPrompts({}); + } catch (e) { + err = e; + } + + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + + test('should reject promise when required params are not given', done => { + const listCustomPromptsPromise = textToSpeechService.listCustomPrompts(); + expectToBePromise(listCustomPromptsPromise); + + listCustomPromptsPromise.catch(err => { + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + }); + }); + }); + describe('addCustomPrompt', () => { + describe('positive tests', () => { + // Request models needed by this operation. + + // PromptMetadata + const promptMetadataModel = { + prompt_text: 'testString', + speaker_id: 'testString', + }; + + test('should pass the right params to createRequest', () => { + // Construct the params object for operation addCustomPrompt + const customizationId = 'testString'; + const promptId = 'testString'; + const metadata = promptMetadataModel; + const file = Buffer.from('This is a mock file.'); + const filename = 'testString'; + const params = { + customizationId: customizationId, + promptId: promptId, + metadata: metadata, + file: file, + filename: filename, + }; + + const addCustomPromptResult = textToSpeechService.addCustomPrompt(params); + + // all methods should return a Promise + expectToBePromise(addCustomPromptResult); + + // assert that create request was called + expect(createRequestMock).toHaveBeenCalledTimes(1); + + const options = getOptions(createRequestMock); + + checkUrlAndMethod( + options, + '/v1/customizations/{customization_id}/prompts/{prompt_id}', + 'POST' + ); + const expectedAccept = 'application/json'; + const expectedContentType = 'multipart/form-data'; + checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType); + expect(options.formData['metadata']).toEqual(metadata); + expect(options.formData['file'].data).toEqual(file); + expect(options.formData['file'].filename).toEqual(filename); + expect(options.formData['file'].contentType).toEqual('audio/wav'); + expect(options.path['customization_id']).toEqual(customizationId); + expect(options.path['prompt_id']).toEqual(promptId); + }); + + test('should prioritize user-given headers', () => { + // parameters + const customizationId = 'testString'; + const promptId = 'testString'; + const metadata = promptMetadataModel; + const file = Buffer.from('This is a mock file.'); + const filename = 'testString'; + const userAccept = 'fake/accept'; + const userContentType = 'fake/contentType'; + const params = { + customizationId, + promptId, + metadata, + file, + filename, + headers: { + Accept: userAccept, + 'Content-Type': userContentType, + }, + }; + + textToSpeechService.addCustomPrompt(params); + checkMediaHeaders(createRequestMock, userAccept, userContentType); + }); + }); + + describe('negative tests', () => { + test('should enforce required parameters', async done => { + let err; + try { + await textToSpeechService.addCustomPrompt({}); + } catch (e) { + err = e; + } + + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + + test('should reject promise when required params are not given', done => { + const addCustomPromptPromise = textToSpeechService.addCustomPrompt(); + expectToBePromise(addCustomPromptPromise); + + addCustomPromptPromise.catch(err => { + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + }); + }); + }); + describe('getCustomPrompt', () => { + describe('positive tests', () => { + test('should pass the right params to createRequest', () => { + // Construct the params object for operation getCustomPrompt + const customizationId = 'testString'; + const promptId = 'testString'; + const params = { + customizationId: customizationId, + promptId: promptId, + }; + + const getCustomPromptResult = textToSpeechService.getCustomPrompt(params); + + // all methods should return a Promise + expectToBePromise(getCustomPromptResult); + + // assert that create request was called + expect(createRequestMock).toHaveBeenCalledTimes(1); + + const options = getOptions(createRequestMock); + + checkUrlAndMethod( + options, + '/v1/customizations/{customization_id}/prompts/{prompt_id}', + 'GET' + ); + const expectedAccept = 'application/json'; + const expectedContentType = undefined; + checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType); + expect(options.path['customization_id']).toEqual(customizationId); + expect(options.path['prompt_id']).toEqual(promptId); + }); + + test('should prioritize user-given headers', () => { + // parameters + const customizationId = 'testString'; + const promptId = 'testString'; + const userAccept = 'fake/accept'; + const userContentType = 'fake/contentType'; + const params = { + customizationId, + promptId, + headers: { + Accept: userAccept, + 'Content-Type': userContentType, + }, + }; + + textToSpeechService.getCustomPrompt(params); + checkMediaHeaders(createRequestMock, userAccept, userContentType); + }); + }); + + describe('negative tests', () => { + test('should enforce required parameters', async done => { + let err; + try { + await textToSpeechService.getCustomPrompt({}); + } catch (e) { + err = e; + } + + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + + test('should reject promise when required params are not given', done => { + const getCustomPromptPromise = textToSpeechService.getCustomPrompt(); + expectToBePromise(getCustomPromptPromise); + + getCustomPromptPromise.catch(err => { + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + }); + }); + }); + describe('deleteCustomPrompt', () => { + describe('positive tests', () => { + test('should pass the right params to createRequest', () => { + // Construct the params object for operation deleteCustomPrompt + const customizationId = 'testString'; + const promptId = 'testString'; + const params = { + customizationId: customizationId, + promptId: promptId, + }; + + const deleteCustomPromptResult = textToSpeechService.deleteCustomPrompt(params); + + // all methods should return a Promise + expectToBePromise(deleteCustomPromptResult); + + // assert that create request was called + expect(createRequestMock).toHaveBeenCalledTimes(1); + + const options = getOptions(createRequestMock); + + checkUrlAndMethod( + options, + '/v1/customizations/{customization_id}/prompts/{prompt_id}', + 'DELETE' + ); + const expectedAccept = undefined; + const expectedContentType = undefined; + checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType); + expect(options.path['customization_id']).toEqual(customizationId); + expect(options.path['prompt_id']).toEqual(promptId); + }); + + test('should prioritize user-given headers', () => { + // parameters + const customizationId = 'testString'; + const promptId = 'testString'; + const userAccept = 'fake/accept'; + const userContentType = 'fake/contentType'; + const params = { + customizationId, + promptId, + headers: { + Accept: userAccept, + 'Content-Type': userContentType, + }, + }; + + textToSpeechService.deleteCustomPrompt(params); + checkMediaHeaders(createRequestMock, userAccept, userContentType); + }); + }); + + describe('negative tests', () => { + test('should enforce required parameters', async done => { + let err; + try { + await textToSpeechService.deleteCustomPrompt({}); + } catch (e) { + err = e; + } + + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + + test('should reject promise when required params are not given', done => { + const deleteCustomPromptPromise = textToSpeechService.deleteCustomPrompt(); + expectToBePromise(deleteCustomPromptPromise); + + deleteCustomPromptPromise.catch(err => { + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + }); + }); + }); + describe('listSpeakerModels', () => { + describe('positive tests', () => { + test('should pass the right params to createRequest', () => { + // Construct the params object for operation listSpeakerModels + const params = {}; + + const listSpeakerModelsResult = textToSpeechService.listSpeakerModels(params); + + // all methods should return a Promise + expectToBePromise(listSpeakerModelsResult); + + // assert that create request was called + expect(createRequestMock).toHaveBeenCalledTimes(1); + + const options = getOptions(createRequestMock); + + checkUrlAndMethod(options, '/v1/speakers', 'GET'); + const expectedAccept = 'application/json'; + const expectedContentType = undefined; + checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType); + }); + + test('should prioritize user-given headers', () => { + // parameters + const userAccept = 'fake/accept'; + const userContentType = 'fake/contentType'; + const params = { + headers: { + Accept: userAccept, + 'Content-Type': userContentType, + }, + }; + + textToSpeechService.listSpeakerModels(params); + checkMediaHeaders(createRequestMock, userAccept, userContentType); + }); + + test('should not have any problems when no parameters are passed in', () => { + // invoke the method with no parameters + textToSpeechService.listSpeakerModels({}); + checkForSuccessfulExecution(createRequestMock); + }); + }); + }); + describe('createSpeakerModel', () => { + describe('positive tests', () => { + test('should pass the right params to createRequest', () => { + // Construct the params object for operation createSpeakerModel + const speakerName = 'testString'; + const audio = Buffer.from('This is a mock file.'); + const params = { + speakerName: speakerName, + audio: audio, + }; + + const createSpeakerModelResult = textToSpeechService.createSpeakerModel(params); + + // all methods should return a Promise + expectToBePromise(createSpeakerModelResult); + + // assert that create request was called + expect(createRequestMock).toHaveBeenCalledTimes(1); + + const options = getOptions(createRequestMock); + + checkUrlAndMethod(options, '/v1/speakers', 'POST'); + const expectedAccept = 'application/json'; + const expectedContentType = 'audio/wav'; + checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType); + expect(options.body).toEqual(audio); + expect(options.qs['speaker_name']).toEqual(speakerName); + }); + + test('should prioritize user-given headers', () => { + // parameters + const speakerName = 'testString'; + const audio = Buffer.from('This is a mock file.'); + const userAccept = 'fake/accept'; + const userContentType = 'fake/contentType'; + const params = { + speakerName, + audio, + headers: { + Accept: userAccept, + 'Content-Type': userContentType, + }, + }; + + textToSpeechService.createSpeakerModel(params); + checkMediaHeaders(createRequestMock, userAccept, userContentType); + }); + }); + + describe('negative tests', () => { + test('should enforce required parameters', async done => { + let err; + try { + await textToSpeechService.createSpeakerModel({}); + } catch (e) { + err = e; + } + + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + + test('should reject promise when required params are not given', done => { + const createSpeakerModelPromise = textToSpeechService.createSpeakerModel(); + expectToBePromise(createSpeakerModelPromise); + + createSpeakerModelPromise.catch(err => { + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + }); + }); + }); + describe('getSpeakerModel', () => { + describe('positive tests', () => { + test('should pass the right params to createRequest', () => { + // Construct the params object for operation getSpeakerModel + const speakerId = 'testString'; + const params = { + speakerId: speakerId, + }; + + const getSpeakerModelResult = textToSpeechService.getSpeakerModel(params); + + // all methods should return a Promise + expectToBePromise(getSpeakerModelResult); + + // assert that create request was called + expect(createRequestMock).toHaveBeenCalledTimes(1); + + const options = getOptions(createRequestMock); + + checkUrlAndMethod(options, '/v1/speakers/{speaker_id}', 'GET'); + const expectedAccept = 'application/json'; + const expectedContentType = undefined; + checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType); + expect(options.path['speaker_id']).toEqual(speakerId); + }); + + test('should prioritize user-given headers', () => { + // parameters + const speakerId = 'testString'; + const userAccept = 'fake/accept'; + const userContentType = 'fake/contentType'; + const params = { + speakerId, + headers: { + Accept: userAccept, + 'Content-Type': userContentType, + }, + }; + + textToSpeechService.getSpeakerModel(params); + checkMediaHeaders(createRequestMock, userAccept, userContentType); + }); + }); + + describe('negative tests', () => { + test('should enforce required parameters', async done => { + let err; + try { + await textToSpeechService.getSpeakerModel({}); + } catch (e) { + err = e; + } + + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + + test('should reject promise when required params are not given', done => { + const getSpeakerModelPromise = textToSpeechService.getSpeakerModel(); + expectToBePromise(getSpeakerModelPromise); + + getSpeakerModelPromise.catch(err => { + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + }); + }); + }); + describe('deleteSpeakerModel', () => { + describe('positive tests', () => { + test('should pass the right params to createRequest', () => { + // Construct the params object for operation deleteSpeakerModel + const speakerId = 'testString'; + const params = { + speakerId: speakerId, + }; + + const deleteSpeakerModelResult = textToSpeechService.deleteSpeakerModel(params); + + // all methods should return a Promise + expectToBePromise(deleteSpeakerModelResult); + + // assert that create request was called + expect(createRequestMock).toHaveBeenCalledTimes(1); + + const options = getOptions(createRequestMock); + + checkUrlAndMethod(options, '/v1/speakers/{speaker_id}', 'DELETE'); + const expectedAccept = undefined; + const expectedContentType = undefined; + checkMediaHeaders(createRequestMock, expectedAccept, expectedContentType); + expect(options.path['speaker_id']).toEqual(speakerId); + }); + + test('should prioritize user-given headers', () => { + // parameters + const speakerId = 'testString'; + const userAccept = 'fake/accept'; + const userContentType = 'fake/contentType'; + const params = { + speakerId, + headers: { + Accept: userAccept, + 'Content-Type': userContentType, + }, + }; + + textToSpeechService.deleteSpeakerModel(params); + checkMediaHeaders(createRequestMock, userAccept, userContentType); + }); + }); + + describe('negative tests', () => { + test('should enforce required parameters', async done => { + let err; + try { + await textToSpeechService.deleteSpeakerModel({}); + } catch (e) { + err = e; + } + + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + + test('should reject promise when required params are not given', done => { + const deleteSpeakerModelPromise = textToSpeechService.deleteSpeakerModel(); + expectToBePromise(deleteSpeakerModelPromise); + + deleteSpeakerModelPromise.catch(err => { + expect(err.message).toMatch(/Missing required parameters/); + done(); + }); + }); + }); + }); describe('deleteUserData', () => { describe('positive tests', () => { test('should pass the right params to createRequest', () => { diff --git a/text-to-speech/v1-generated.ts b/text-to-speech/v1-generated.ts index d08401c0da..854265f1ae 100644 --- a/text-to-speech/v1-generated.ts +++ b/text-to-speech/v1-generated.ts @@ -1,5 +1,5 @@ /** - * (C) Copyright IBM Corp. 2017, 2020. + * (C) Copyright IBM Corp. 2017, 2021. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,9 +15,9 @@ */ /** - * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-be3b4618-20201221-123327 + * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 */ - + import * as extend from 'extend'; import { IncomingHttpHeaders, OutgoingHttpHeaders } from 'http'; @@ -40,6 +40,10 @@ import { getSdkHeaders } from '../lib/common'; * translation is based on the SSML phoneme format for representing a word. You can specify a phonetic translation in * standard International Phonetic Alphabet (IPA) representation or in the proprietary IBM Symbolic Phonetic * Representation (SPR). The Arabic, Chinese, Dutch, Australian English, and Korean languages support only IPA. + * + * The service also offers a Tune by Example feature that lets you define custom prompts. You can also define speaker + * models to improve the quality of your custom prompts. The service support custom prompts only for US English custom + * models and voices. */ class TextToSpeechV1 extends BaseService { @@ -480,9 +484,9 @@ class TextToSpeechV1 extends BaseService { * List custom models. * * Lists metadata such as the name and description for all custom models that are owned by an instance of the service. - * Specify a language to list the custom models for that language only. To see the words in addition to the metadata - * for a specific custom model, use the **List a custom model** method. You must use credentials for the instance of - * the service that owns a model to list information about it. + * Specify a language to list the custom models for that language only. To see the words and prompts in addition to + * the metadata for a specific custom model, use the **Get a custom model** method. You must use credentials for the + * instance of the service that owns a model to list information about it. * * **See also:** [Querying all custom * models](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsQueryAll). @@ -597,8 +601,9 @@ class TextToSpeechV1 extends BaseService { * Get a custom model. * * Gets all information about a specified custom model. In addition to metadata such as the name and description of - * the custom model, the output includes the words and their translations as defined in the model. To see just the - * metadata for a model, use the **List custom models** method. + * the custom model, the output includes the words and their translations that are defined for the model, as well as + * any prompts that are defined for the model. To see just the metadata for a model, use the **List custom models** + * method. * * **See also:** [Querying a custom * model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsQuery). @@ -987,6 +992,544 @@ class TextToSpeechV1 extends BaseService { return this.createRequest(parameters); }; + /************************* + * customPrompts + ************************/ + + /** + * List custom prompts. + * + * Lists information about all custom prompts that are defined for a custom model. The information includes the prompt + * ID, prompt text, status, and optional speaker ID for each prompt of the custom model. You must use credentials for + * the instance of the service that owns the custom model. The same information about all of the prompts for a custom + * model is also provided by the **Get a custom model** method. That method provides complete details about a + * specified custom model, including its language, owner, custom words, and more. + * + * **Beta:** Custom prompts are beta functionality that is supported only for use with US English custom models and + * voices. + * + * **See also:** [Listing custom + * prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-list). + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.customizationId - The customization ID (GUID) of the custom model. You must make the request + * with credentials for the instance of the service that owns the custom model. + * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers + * @returns {Promise>} + */ + public listCustomPrompts(params: TextToSpeechV1.ListCustomPromptsParams): Promise> { + const _params = Object.assign({}, params); + const requiredParams = ['customizationId']; + + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return Promise.reject(missingParams); + } + + const path = { + 'customization_id': _params.customizationId + }; + + const sdkHeaders = getSdkHeaders(TextToSpeechV1.DEFAULT_SERVICE_NAME, 'v1', 'listCustomPrompts'); + + const parameters = { + options: { + url: '/v1/customizations/{customization_id}/prompts', + method: 'GET', + path, + }, + defaultOptions: extend(true, {}, this.baseOptions, { + headers: extend(true, sdkHeaders, { + 'Accept': 'application/json', + }, _params.headers), + }), + }; + + return this.createRequest(parameters); + }; + + /** + * Add a custom prompt. + * + * Adds a custom prompt to a custom model. A prompt is defined by the text that is to be spoken, the audio for that + * text, a unique user-specified ID for the prompt, and an optional speaker ID. The information is used to generate + * prosodic data that is not visible to the user. This data is used by the service to produce the synthesized audio + * upon request. You must use credentials for the instance of the service that owns a custom model to add a prompt to + * it. You can add a maximum of 1000 custom prompts to a single custom model. + * + * You are recommended to assign meaningful values for prompt IDs. For example, use `goodbye` to identify a prompt + * that speaks a farewell message. Prompt IDs must be unique within a given custom model. You cannot define two + * prompts with the same name for the same custom model. If you provide the ID of an existing prompt, the previously + * uploaded prompt is replaced by the new information. The existing prompt is reprocessed by using the new text and + * audio and, if provided, new speaker model, and the prosody data associated with the prompt is updated. + * + * The quality of a prompt is undefined if the language of a prompt does not match the language of its custom model. + * This is consistent with any text or SSML that is specified for a speech synthesis request. The service makes a + * best-effort attempt to render the specified text for the prompt; it does not validate that the language of the text + * matches the language of the model. + * + * Adding a prompt is an asynchronous operation. Although it accepts less audio than speaker enrollment, the service + * must align the audio with the provided text. The time that it takes to process a prompt depends on the prompt + * itself. The processing time for a reasonably sized prompt generally matches the length of the audio (for example, + * it takes 20 seconds to process a 20-second prompt). + * + * For shorter prompts, you can wait for a reasonable amount of time and then check the status of the prompt with the + * **Get a custom prompt** method. For longer prompts, consider using that method to poll the service every few + * seconds to determine when the prompt becomes available. No prompt can be used for speech synthesis if it is in the + * `processing` or `failed` state. Only prompts that are in the `available` state can be used for speech synthesis. + * + * When it processes a request, the service attempts to align the text and the audio that are provided for the prompt. + * The text that is passed with a prompt must match the spoken audio as closely as possible. Optimally, the text and + * audio match exactly. The service does its best to align the specified text with the audio, and it can often + * compensate for mismatches between the two. But if the service cannot effectively align the text and the audio, + * possibly because the magnitude of mismatches between the two is too great, processing of the prompt fails. + * + * ### Evaluating a prompt + * + * Always listen to and evaluate a prompt to determine its quality before using it in production. To evaluate a + * prompt, include only the single prompt in a speech synthesis request by using the following SSML extension, in this + * case for a prompt whose ID is `goodbye`: + * + * `` + * + * In some cases, you might need to rerecord and resubmit a prompt as many as five times to address the following + * possible problems: + * * The service might fail to detect a mismatch between the prompt’s text and audio. The longer the prompt, the + * greater the chance for misalignment between its text and audio. Therefore, multiple shorter prompts are preferable + * to a single long prompt. + * * The text of a prompt might include a word that the service does not recognize. In this case, you can create a + * custom word and pronunciation pair to tell the service how to pronounce the word. You must then re-create the + * prompt. + * * The quality of the input audio might be insufficient or the service’s processing of the audio might fail to + * detect the intended prosody. Submitting new audio for the prompt can correct these issues. + * + * If a prompt that is created without a speaker ID does not adequately reflect the intended prosody, enrolling the + * speaker and providing a speaker ID for the prompt is one recommended means of potentially improving the quality of + * the prompt. This is especially important for shorter prompts such as "good-bye" or "thank you," where less audio + * data makes it more difficult to match the prosody of the speaker. + * + * **Beta:** Custom prompts are beta functionality that is supported only for use with US English custom models and + * voices. + * + * **See also:** + * * [Add a custom + * prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-add-prompt) + * * [Evaluate a custom + * prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-evaluate-prompt) + * * [Rules for creating custom + * prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-rules#tbe-rules-prompts). + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.customizationId - The customization ID (GUID) of the custom model. You must make the request + * with credentials for the instance of the service that owns the custom model. + * @param {string} params.promptId - The identifier of the prompt that is to be added to the custom model: + * * Include a maximum of 49 characters in the ID. + * * Include only alphanumeric characters and `_` (underscores) in the ID. + * * Do not include XML sensitive characters (double quotes, single quotes, ampersands, angle brackets, and slashes) + * in the ID. + * * To add a new prompt, the ID must be unique for the specified custom model. Otherwise, the new information for the + * prompt overwrites the existing prompt that has that ID. + * @param {PromptMetadata} params.metadata - Information about the prompt that is to be added to a custom model. The + * following example of a `PromptMetadata` object includes both the required prompt text and an optional speaker model + * ID: + * + * `{ "prompt_text": "Thank you and good-bye!", "speaker_id": "823068b2-ed4e-11ea-b6e0-7b6456aa95cc" }`. + * @param {NodeJS.ReadableStream|Buffer} params.file - An audio file that speaks the text of the prompt with + * intonation and prosody that matches how you would like the prompt to be spoken. + * * The prompt audio must be in WAV format and must have a minimum sampling rate of 16 kHz. The service accepts audio + * with higher sampling rates. The service transcodes all audio to 16 kHz before processing it. + * * The length of the prompt audio is limited to 30 seconds. + * @param {string} params.filename - The filename for file. + * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers + * @returns {Promise>} + */ + public addCustomPrompt(params: TextToSpeechV1.AddCustomPromptParams): Promise> { + const _params = Object.assign({}, params); + const requiredParams = ['customizationId', 'promptId', 'metadata', 'file', 'filename']; + + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return Promise.reject(missingParams); + } + + const formData = { + 'metadata': _params.metadata, + 'file': { + data: _params.file, + filename: _params.filename, + contentType: 'audio/wav' + } + }; + + const path = { + 'customization_id': _params.customizationId, + 'prompt_id': _params.promptId + }; + + const sdkHeaders = getSdkHeaders(TextToSpeechV1.DEFAULT_SERVICE_NAME, 'v1', 'addCustomPrompt'); + + const parameters = { + options: { + url: '/v1/customizations/{customization_id}/prompts/{prompt_id}', + method: 'POST', + path, + formData + }, + defaultOptions: extend(true, {}, this.baseOptions, { + headers: extend(true, sdkHeaders, { + 'Accept': 'application/json', + 'Content-Type': 'multipart/form-data', + }, _params.headers), + }), + }; + + return this.createRequest(parameters); + }; + + /** + * Get a custom prompt. + * + * Gets information about a specified custom prompt for a specified custom model. The information includes the prompt + * ID, prompt text, status, and optional speaker ID for each prompt of the custom model. You must use credentials for + * the instance of the service that owns the custom model. + * + * **Beta:** Custom prompts are beta functionality that is supported only for use with US English custom models and + * voices. + * + * **See also:** [Listing custom + * prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-list). + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.customizationId - The customization ID (GUID) of the custom model. You must make the request + * with credentials for the instance of the service that owns the custom model. + * @param {string} params.promptId - The identifier (name) of the prompt. + * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers + * @returns {Promise>} + */ + public getCustomPrompt(params: TextToSpeechV1.GetCustomPromptParams): Promise> { + const _params = Object.assign({}, params); + const requiredParams = ['customizationId', 'promptId']; + + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return Promise.reject(missingParams); + } + + const path = { + 'customization_id': _params.customizationId, + 'prompt_id': _params.promptId + }; + + const sdkHeaders = getSdkHeaders(TextToSpeechV1.DEFAULT_SERVICE_NAME, 'v1', 'getCustomPrompt'); + + const parameters = { + options: { + url: '/v1/customizations/{customization_id}/prompts/{prompt_id}', + method: 'GET', + path, + }, + defaultOptions: extend(true, {}, this.baseOptions, { + headers: extend(true, sdkHeaders, { + 'Accept': 'application/json', + }, _params.headers), + }), + }; + + return this.createRequest(parameters); + }; + + /** + * Delete a custom prompt. + * + * Deletes an existing custom prompt from a custom model. The service deletes the prompt with the specified ID. You + * must use credentials for the instance of the service that owns the custom model from which the prompt is to be + * deleted. + * + * **Caution:** Deleting a custom prompt elicits a 400 response code from synthesis requests that attempt to use the + * prompt. Make sure that you do not attempt to use a deleted prompt in a production application. + * + * **Beta:** Custom prompts are beta functionality that is supported only for use with US English custom models and + * voices. + * + * **See also:** [Deleting a custom + * prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-delete). + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.customizationId - The customization ID (GUID) of the custom model. You must make the request + * with credentials for the instance of the service that owns the custom model. + * @param {string} params.promptId - The identifier (name) of the prompt that is to be deleted. + * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers + * @returns {Promise>} + */ + public deleteCustomPrompt(params: TextToSpeechV1.DeleteCustomPromptParams): Promise> { + const _params = Object.assign({}, params); + const requiredParams = ['customizationId', 'promptId']; + + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return Promise.reject(missingParams); + } + + const path = { + 'customization_id': _params.customizationId, + 'prompt_id': _params.promptId + }; + + const sdkHeaders = getSdkHeaders(TextToSpeechV1.DEFAULT_SERVICE_NAME, 'v1', 'deleteCustomPrompt'); + + const parameters = { + options: { + url: '/v1/customizations/{customization_id}/prompts/{prompt_id}', + method: 'DELETE', + path, + }, + defaultOptions: extend(true, {}, this.baseOptions, { + headers: extend(true, sdkHeaders, { + }, _params.headers), + }), + }; + + return this.createRequest(parameters); + }; + + /************************* + * speakerModels + ************************/ + + /** + * List speaker models. + * + * Lists information about all speaker models that are defined for a service instance. The information includes the + * speaker ID and speaker name of each defined speaker. You must use credentials for the instance of a service to list + * its speakers. + * + * **Beta:** Speaker models and the custom prompts with which they are used are beta functionality that is supported + * only for use with US English custom models and voices. + * + * **See also:** [Listing speaker + * models](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-list). + * + * @param {Object} [params] - The parameters to send to the service. + * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers + * @returns {Promise>} + */ + public listSpeakerModels(params?: TextToSpeechV1.ListSpeakerModelsParams): Promise> { + const _params = Object.assign({}, params); + + const sdkHeaders = getSdkHeaders(TextToSpeechV1.DEFAULT_SERVICE_NAME, 'v1', 'listSpeakerModels'); + + const parameters = { + options: { + url: '/v1/speakers', + method: 'GET', + }, + defaultOptions: extend(true, {}, this.baseOptions, { + headers: extend(true, sdkHeaders, { + 'Accept': 'application/json', + }, _params.headers), + }), + }; + + return this.createRequest(parameters); + }; + + /** + * Create a speaker model. + * + * Creates a new speaker model, which is an optional enrollment token for users who are to add prompts to custom + * models. A speaker model contains information about a user's voice. The service extracts this information from a WAV + * audio sample that you pass as the body of the request. Associating a speaker model with a prompt is optional, but + * the information that is extracted from the speaker model helps the service learn about the speaker's voice. + * + * A speaker model can make an appreciable difference in the quality of prompts, especially short prompts with + * relatively little audio, that are associated with that speaker. A speaker model can help the service produce a + * prompt with more confidence; the lack of a speaker model can potentially compromise the quality of a prompt. + * + * The gender of the speaker who creates a speaker model does not need to match the gender of a voice that is used + * with prompts that are associated with that speaker model. For example, a speaker model that is created by a male + * speaker can be associated with prompts that are spoken by female voices. + * + * You create a speaker model for a given instance of the service. The new speaker model is owned by the service + * instance whose credentials are used to create it. That same speaker can then be used to create prompts for all + * custom models within that service instance. No language is associated with a speaker model, but each custom model + * has a single specified language. You can add prompts only to US English models. + * + * You specify a name for the speaker when you create it. The name must be unique among all speaker names for the + * owning service instance. To re-create a speaker model for an existing speaker name, you must first delete the + * existing speaker model that has that name. + * + * Speaker enrollment is a synchronous operation. Although it accepts more audio data than a prompt, the process of + * adding a speaker is very fast. The service simply extracts information about the speaker’s voice from the audio. + * Unlike prompts, speaker models neither need nor accept a transcription of the audio. When the call returns, the + * audio is fully processed and the speaker enrollment is complete. + * + * The service returns a speaker ID with the request. A speaker ID is globally unique identifier (GUID) that you use + * to identify the speaker in subsequent requests to the service. + * + * **Beta:** Speaker models and the custom prompts with which they are used are beta functionality that is supported + * only for use with US English custom models and voices. + * + * **See also:** + * * [Create a speaker + * model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-speaker-model) + * * [Rules for creating speaker + * models](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-rules#tbe-rules-speakers). + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.speakerName - The name of the speaker that is to be added to the service instance. + * * Include a maximum of 49 characters in the name. + * * Include only alphanumeric characters and `_` (underscores) in the name. + * * Do not include XML sensitive characters (double quotes, single quotes, ampersands, angle brackets, and slashes) + * in the name. + * * Do not use the name of an existing speaker that is already defined for the service instance. + * @param {NodeJS.ReadableStream|Buffer} params.audio - An enrollment audio file that contains a sample of the + * speaker’s voice. + * * The enrollment audio must be in WAV format and must have a minimum sampling rate of 16 kHz. The service accepts + * audio with higher sampling rates. It transcodes all audio to 16 kHz before processing it. + * * The length of the enrollment audio is limited to 1 minute. Speaking one or two paragraphs of text that include + * five to ten sentences is recommended. + * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers + * @returns {Promise>} + */ + public createSpeakerModel(params: TextToSpeechV1.CreateSpeakerModelParams): Promise> { + const _params = Object.assign({}, params); + const requiredParams = ['speakerName', 'audio']; + + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return Promise.reject(missingParams); + } + + const body = _params.audio; + const query = { + 'speaker_name': _params.speakerName + }; + + const sdkHeaders = getSdkHeaders(TextToSpeechV1.DEFAULT_SERVICE_NAME, 'v1', 'createSpeakerModel'); + + const parameters = { + options: { + url: '/v1/speakers', + method: 'POST', + body, + qs: query, + }, + defaultOptions: extend(true, {}, this.baseOptions, { + headers: extend(true, sdkHeaders, { + 'Accept': 'application/json', + 'Content-Type': 'audio/wav', + }, _params.headers), + }), + }; + + return this.createRequest(parameters); + }; + + /** + * Get a speaker model. + * + * Gets information about all prompts that are defined by a specified speaker for all custom models that are owned by + * a service instance. The information is grouped by the customization IDs of the custom models. For each custom + * model, the information lists information about each prompt that is defined for that custom model by the speaker. + * You must use credentials for the instance of the service that owns a speaker model to list its prompts. + * + * **Beta:** Speaker models and the custom prompts with which they are used are beta functionality that is supported + * only for use with US English custom models and voices. + * + * **See also:** [Listing the custom prompts for a speaker + * model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-list-prompts). + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.speakerId - The speaker ID (GUID) of the speaker model. You must make the request with + * service credentials for the instance of the service that owns the speaker model. + * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers + * @returns {Promise>} + */ + public getSpeakerModel(params: TextToSpeechV1.GetSpeakerModelParams): Promise> { + const _params = Object.assign({}, params); + const requiredParams = ['speakerId']; + + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return Promise.reject(missingParams); + } + + const path = { + 'speaker_id': _params.speakerId + }; + + const sdkHeaders = getSdkHeaders(TextToSpeechV1.DEFAULT_SERVICE_NAME, 'v1', 'getSpeakerModel'); + + const parameters = { + options: { + url: '/v1/speakers/{speaker_id}', + method: 'GET', + path, + }, + defaultOptions: extend(true, {}, this.baseOptions, { + headers: extend(true, sdkHeaders, { + 'Accept': 'application/json', + }, _params.headers), + }), + }; + + return this.createRequest(parameters); + }; + + /** + * Delete a speaker model. + * + * Deletes an existing speaker model from the service instance. The service deletes the enrolled speaker with the + * specified speaker ID. You must use credentials for the instance of the service that owns a speaker model to delete + * the speaker. + * + * Any prompts that are associated with the deleted speaker are not affected by the speaker's deletion. The prosodic + * data that defines the quality of a prompt is established when the prompt is created. A prompt is static and remains + * unaffected by deletion of its associated speaker. However, the prompt cannot be resubmitted or updated with its + * original speaker once that speaker is deleted. + * + * **Beta:** Speaker models and the custom prompts with which they are used are beta functionality that is supported + * only for use with US English custom models and voices. + * + * **See also:** [Deleting a speaker + * model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-delete). + * + * @param {Object} params - The parameters to send to the service. + * @param {string} params.speakerId - The speaker ID (GUID) of the speaker model. You must make the request with + * service credentials for the instance of the service that owns the speaker model. + * @param {OutgoingHttpHeaders} [params.headers] - Custom request headers + * @returns {Promise>} + */ + public deleteSpeakerModel(params: TextToSpeechV1.DeleteSpeakerModelParams): Promise> { + const _params = Object.assign({}, params); + const requiredParams = ['speakerId']; + + const missingParams = getMissingParams(_params, requiredParams); + if (missingParams) { + return Promise.reject(missingParams); + } + + const path = { + 'speaker_id': _params.speakerId + }; + + const sdkHeaders = getSdkHeaders(TextToSpeechV1.DEFAULT_SERVICE_NAME, 'v1', 'deleteSpeakerModel'); + + const parameters = { + options: { + url: '/v1/speakers/{speaker_id}', + method: 'DELETE', + path, + }, + defaultOptions: extend(true, {}, this.baseOptions, { + headers: extend(true, sdkHeaders, { + }, _params.headers), + }), + }; + + return this.createRequest(parameters); + }; + /************************* * userData ************************/ @@ -1127,6 +1670,7 @@ namespace TextToSpeechV1 { ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice', ES_US_SOFIAVOICE = 'es-US_SofiaVoice', ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice', + FR_CA_LOUISEV3VOICE = 'fr-CA_LouiseV3Voice', FR_FR_NICOLASV3VOICE = 'fr-FR_NicolasV3Voice', FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice', FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice', @@ -1221,6 +1765,7 @@ namespace TextToSpeechV1 { ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice', ES_US_SOFIAVOICE = 'es-US_SofiaVoice', ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice', + FR_CA_LOUISEV3VOICE = 'fr-CA_LouiseV3Voice', FR_FR_NICOLASV3VOICE = 'fr-FR_NicolasV3Voice', FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice', FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice', @@ -1300,6 +1845,7 @@ namespace TextToSpeechV1 { ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice', ES_US_SOFIAVOICE = 'es-US_SofiaVoice', ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice', + FR_CA_LOUISEV3VOICE = 'fr-CA_LouiseV3Voice', FR_FR_NICOLASV3VOICE = 'fr-FR_NicolasV3Voice', FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice', FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice', @@ -1352,6 +1898,7 @@ namespace TextToSpeechV1 { ES_ES = 'es-ES', ES_LA = 'es-LA', ES_US = 'es-US', + FR_CA = 'fr-CA', FR_FR = 'fr-FR', IT_IT = 'it-IT', JA_JP = 'ja-JP', @@ -1383,6 +1930,7 @@ namespace TextToSpeechV1 { ES_ES = 'es-ES', ES_LA = 'es-LA', ES_US = 'es-US', + FR_CA = 'fr-CA', FR_FR = 'fr-FR', IT_IT = 'it-IT', JA_JP = 'ja-JP', @@ -1524,6 +2072,113 @@ namespace TextToSpeechV1 { headers?: OutgoingHttpHeaders; } + /** Parameters for the `listCustomPrompts` operation. */ + export interface ListCustomPromptsParams { + /** The customization ID (GUID) of the custom model. You must make the request with credentials for the instance + * of the service that owns the custom model. + */ + customizationId: string; + headers?: OutgoingHttpHeaders; + } + + /** Parameters for the `addCustomPrompt` operation. */ + export interface AddCustomPromptParams { + /** The customization ID (GUID) of the custom model. You must make the request with credentials for the instance + * of the service that owns the custom model. + */ + customizationId: string; + /** The identifier of the prompt that is to be added to the custom model: + * * Include a maximum of 49 characters in the ID. + * * Include only alphanumeric characters and `_` (underscores) in the ID. + * * Do not include XML sensitive characters (double quotes, single quotes, ampersands, angle brackets, and + * slashes) in the ID. + * * To add a new prompt, the ID must be unique for the specified custom model. Otherwise, the new information for + * the prompt overwrites the existing prompt that has that ID. + */ + promptId: string; + /** Information about the prompt that is to be added to a custom model. The following example of a + * `PromptMetadata` object includes both the required prompt text and an optional speaker model ID: + * + * `{ "prompt_text": "Thank you and good-bye!", "speaker_id": "823068b2-ed4e-11ea-b6e0-7b6456aa95cc" }`. + */ + metadata: PromptMetadata; + /** An audio file that speaks the text of the prompt with intonation and prosody that matches how you would like + * the prompt to be spoken. + * * The prompt audio must be in WAV format and must have a minimum sampling rate of 16 kHz. The service accepts + * audio with higher sampling rates. The service transcodes all audio to 16 kHz before processing it. + * * The length of the prompt audio is limited to 30 seconds. + */ + file: NodeJS.ReadableStream|Buffer; + /** The filename for file. */ + filename: string; + headers?: OutgoingHttpHeaders; + } + + /** Parameters for the `getCustomPrompt` operation. */ + export interface GetCustomPromptParams { + /** The customization ID (GUID) of the custom model. You must make the request with credentials for the instance + * of the service that owns the custom model. + */ + customizationId: string; + /** The identifier (name) of the prompt. */ + promptId: string; + headers?: OutgoingHttpHeaders; + } + + /** Parameters for the `deleteCustomPrompt` operation. */ + export interface DeleteCustomPromptParams { + /** The customization ID (GUID) of the custom model. You must make the request with credentials for the instance + * of the service that owns the custom model. + */ + customizationId: string; + /** The identifier (name) of the prompt that is to be deleted. */ + promptId: string; + headers?: OutgoingHttpHeaders; + } + + /** Parameters for the `listSpeakerModels` operation. */ + export interface ListSpeakerModelsParams { + headers?: OutgoingHttpHeaders; + } + + /** Parameters for the `createSpeakerModel` operation. */ + export interface CreateSpeakerModelParams { + /** The name of the speaker that is to be added to the service instance. + * * Include a maximum of 49 characters in the name. + * * Include only alphanumeric characters and `_` (underscores) in the name. + * * Do not include XML sensitive characters (double quotes, single quotes, ampersands, angle brackets, and + * slashes) in the name. + * * Do not use the name of an existing speaker that is already defined for the service instance. + */ + speakerName: string; + /** An enrollment audio file that contains a sample of the speaker’s voice. + * * The enrollment audio must be in WAV format and must have a minimum sampling rate of 16 kHz. The service + * accepts audio with higher sampling rates. It transcodes all audio to 16 kHz before processing it. + * * The length of the enrollment audio is limited to 1 minute. Speaking one or two paragraphs of text that include + * five to ten sentences is recommended. + */ + audio: NodeJS.ReadableStream|Buffer; + headers?: OutgoingHttpHeaders; + } + + /** Parameters for the `getSpeakerModel` operation. */ + export interface GetSpeakerModelParams { + /** The speaker ID (GUID) of the speaker model. You must make the request with service credentials for the + * instance of the service that owns the speaker model. + */ + speakerId: string; + headers?: OutgoingHttpHeaders; + } + + /** Parameters for the `deleteSpeakerModel` operation. */ + export interface DeleteSpeakerModelParams { + /** The speaker ID (GUID) of the speaker model. You must make the request with service credentials for the + * instance of the service that owns the speaker model. + */ + speakerId: string; + headers?: OutgoingHttpHeaders; + } + /** Parameters for the `deleteUserData` operation. */ export interface DeleteUserDataParams { /** The customer ID for which all data is to be deleted. */ @@ -1559,11 +2214,15 @@ namespace TextToSpeechV1 { /** The description of the custom model. */ description?: string; /** An array of `Word` objects that lists the words and their translations from the custom model. The words are - * listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if the - * custom model contains no words. This field is returned only by the **Get a voice** method and only when you - * specify the customization ID of a custom model. + * listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if no + * words are defined for the custom model. This field is returned only by the **Get a custom model** method. */ words?: Word[]; + /** An array of `Prompt` objects that provides information about the prompts that are defined for the specified + * custom model. The array is empty if no prompts are defined for the custom model. This field is returned only by + * the **Get a custom model** method. + */ + prompts?: Prompt[]; } /** Information about existing custom models. */ @@ -1575,6 +2234,52 @@ namespace TextToSpeechV1 { customizations: CustomModel[]; } + /** Information about a custom prompt. */ + export interface Prompt { + /** The user-specified text of the prompt. */ + prompt: string; + /** The user-specified identifier (name) of the prompt. */ + prompt_id: string; + /** The status of the prompt: + * * `processing`: The service received the request to add the prompt and is analyzing the validity of the prompt. + * * `available`: The service successfully validated the prompt, which is now ready for use in a speech synthesis + * request. + * * `failed`: The service's validation of the prompt failed. The status of the prompt includes an `error` field + * that describes the reason for the failure. + */ + status: string; + /** If the status of the prompt is `failed`, an error message that describes the reason for the failure. The + * field is omitted if no error occurred. + */ + error?: string; + /** The speaker ID (GUID) of the speaker for which the prompt was defined. The field is omitted if no speaker ID + * was specified. + */ + speaker_id?: string; + } + + /** Information about the prompt that is to be added to a custom model. The following example of a `PromptMetadata` object includes both the required prompt text and an optional speaker model ID: `{ "prompt_text": "Thank you and good-bye!", "speaker_id": "823068b2-ed4e-11ea-b6e0-7b6456aa95cc" }`. */ + export interface PromptMetadata { + /** The required written text of the spoken prompt. The length of a prompt's text is limited to a few sentences. + * Speaking one or two sentences of text is the recommended limit. A prompt cannot contain more than 1000 + * characters of text. Escape any XML control characters (double quotes, single quotes, ampersands, angle brackets, + * and slashes) that appear in the text of the prompt. + */ + prompt_text: string; + /** The optional speaker ID (GUID) of a previously defined speaker model that is to be associated with the + * prompt. + */ + speaker_id?: string; + } + + /** Information about the custom prompts that are defined for a custom model. */ + export interface Prompts { + /** An array of `Prompt` objects that provides information about the prompts that are defined for the specified + * custom model. The array is empty if no prompts are defined for the custom model. + */ + prompts: Prompt[]; + } + /** The pronunciation of the specified text. */ export interface Pronunciation { /** The pronunciation of the specified text in the requested voice and format. If a custom model is specified, @@ -1583,6 +2288,67 @@ namespace TextToSpeechV1 { pronunciation: string; } + /** Information about a speaker model. */ + export interface Speaker { + /** The speaker ID (GUID) of the speaker. */ + speaker_id: string; + /** The user-defined name of the speaker. */ + name: string; + } + + /** A custom models for which the speaker has defined prompts. */ + export interface SpeakerCustomModel { + /** The customization ID (GUID) of a custom model for which the speaker has defined one or more prompts. */ + customization_id: string; + /** An array of `SpeakerPrompt` objects that provides information about each prompt that the user has defined + * for the custom model. + */ + prompts: SpeakerPrompt[]; + } + + /** Custom models for which the speaker has defined prompts. */ + export interface SpeakerCustomModels { + /** An array of `SpeakerCustomModel` objects. Each object provides information about the prompts that are + * defined for a specified speaker in the custom models that are owned by a specified service instance. The array + * is empty if no prompts are defined for the speaker. + */ + customizations: SpeakerCustomModel[]; + } + + /** The speaker ID of the speaker model. */ + export interface SpeakerModel { + /** The speaker ID (GUID) of the speaker model. */ + speaker_id: string; + } + + /** A prompt that a speaker has defined for a custom model. */ + export interface SpeakerPrompt { + /** The user-specified text of the prompt. */ + prompt: string; + /** The user-specified identifier (name) of the prompt. */ + prompt_id: string; + /** The status of the prompt: + * * `processing`: The service received the request to add the prompt and is analyzing the validity of the prompt. + * * `available`: The service successfully validated the prompt, which is now ready for use in a speech synthesis + * request. + * * `failed`: The service's validation of the prompt failed. The status of the prompt includes an `error` field + * that describes the reason for the failure. + */ + status: string; + /** If the status of the prompt is `failed`, an error message that describes the reason for the failure. The + * field is omitted if no error occurred. + */ + error?: string; + } + + /** Information about all speaker models for the service instance. */ + export interface Speakers { + /** An array of `Speaker` objects that provides information about the speakers for the service instance. The + * array is empty if the service instance has no speakers. + */ + speakers: Speaker[]; + } + /** Additional service features that are supported with the voice. */ export interface SupportedFeatures { /** If `true`, the voice can be customized; if `false`, the voice cannot be customized. (Same as