forked from microsoft/BotFramework-WebChat
-
Notifications
You must be signed in to change notification settings - Fork 4
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
7 changed files
with
172 additions
and
15 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
export default function(voices, { language }, activity) { | ||
// Find the first voice based on this order: | ||
// 1. Voice with language same as locale as defined in the activity | ||
// 2. Voice with language same as locale as passed into Web Chat | ||
// 3. Voice with language same as browser | ||
// 4. English (United States) | ||
// 5. First voice | ||
|
||
// We also prefer voice powered by deep neural network (with keyword "neural" in the voice name). | ||
return ( | ||
[activity.locale, language, window.navigator.language, 'en-US'].reduce( | ||
(result, targetLanguage) => | ||
result || | ||
voices.find(({ lang, name }) => lang === targetLanguage && /neural/iu.test(name)) || | ||
voices.find(({ lang }) => lang === targetLanguage), | ||
null | ||
) || voices[0] | ||
); | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
# Sample - Integrating with Cognitive Services Speech Services | ||
|
||
## Description | ||
|
||
A simple page with Web Chat integrated with speech-to-text and text-to-speech functionality and select different voices. | ||
|
||
# Test out the hosted sample | ||
|
||
- [Try out MockBot](https://microsoft.github.io/BotFramework-WebChat/06.g.select-voice) | ||
|
||
# How to run | ||
|
||
- Fork this repository | ||
- Navigate to `/Your-Local-WebChat/samples/06.g.select-voice` in command line | ||
- Run `npx serve` | ||
- Browse to [http://localhost:5000/](http://localhost:5000/) | ||
|
||
# Things to try out | ||
|
||
- Click on microphone button | ||
- Say "Tell me a story." | ||
- It should recognize as "Tell me a story." | ||
- It should speak out two activities sent from the bot. One in English, another Cantonese. | ||
|
||
# Code | ||
|
||
> Jump to [completed code](#completed-code) to see the end-result `index.html`. | ||
### Goals of this bot | ||
|
||
Demonstrates ability to select different voice for speech synthesis on-the-fly. | ||
|
||
## Completed code | ||
|
||
(TBD) | ||
|
||
# Further reading | ||
|
||
- [Cognitive Speech Speech Services website](https://azure.microsoft.com/en-us/services/cognitive-services/speech-services/) | ||
|
||
## Full list of Web Chat Hosted Samples | ||
|
||
View the list of [available Web Chat samples](https://github.com/microsoft/BotFramework-WebChat/tree/master/samples) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
<!DOCTYPE html> | ||
<html lang="en-US"> | ||
<head> | ||
<title>Web Chat: Select voice</title> | ||
<!-- Cognitive Services Speech Services adapter is only available in full bundle --> | ||
|
||
<meta name="viewport" content="width=device-width, initial-scale=1.0"> | ||
<!-- | ||
This CDN points to the latest official release of Web Chat. If you need to test against Web Chat's latest bits, please refer to pointing to Web Chat's MyGet feed: | ||
https://github.com/microsoft/BotFramework-WebChat#how-to-test-with-web-chats-latest-bits | ||
--> | ||
<script src="https://cdn.botframework.com/botframework-webchat/latest/webchat.js"></script> | ||
<style> | ||
html, body { height: 100% } | ||
body { margin: 0 } | ||
|
||
#webchat { | ||
height: 100%; | ||
width: 100%; | ||
} | ||
</style> | ||
</head> | ||
<body> | ||
<div id="webchat" role="main"></div> | ||
<script> | ||
(async function () { | ||
// In this demo, we are using Direct Line token from MockBot. | ||
// Your client code must provide either a secret or a token to talk to your bot. | ||
// Tokens are more secure. To learn about the differences between secrets and tokens | ||
// and to understand the risks associated with using secrets, visit https://docs.microsoft.com/en-us/azure/bot-service/rest-api/bot-framework-rest-direct-line-3-0-authentication?view=azure-bot-service-4.0 | ||
|
||
const directLineTokenResponse = await fetch('https://webchat-mockbot.azurewebsites.net/directline/token', { method: 'POST' }); | ||
const { token } = await directLineTokenResponse.json(); | ||
|
||
const speechTokenResponse = await fetch('https://webchat-mockbot.azurewebsites.net/speechservices/token', { method: 'POST' }); | ||
const { region, token: authorizationToken } = await speechTokenResponse.json(); | ||
|
||
webSpeechPonyfillFactory = await window.WebChat.createCognitiveServicesSpeechServicesPonyfillFactory({ authorizationToken, region }); | ||
|
||
window.WebChat.renderWebChat({ | ||
directLine: window.WebChat.createDirectLine({ token }), | ||
selectVoice: (voices, { language }, activity) => | ||
// If the activity is in Cantonese, use a voice with keyword "TracyRUS". | ||
// Otherwise, use "JessaNeural" (preferred) or "Jessa". | ||
activity.locale === 'zh-HK' ? | ||
voices.find(({ name }) => /TracyRUS/iu.test(name)) | ||
: | ||
voices.find(({ name }) => /JessaNeural/iu.test(name)) | ||
|| voices.find(({ name }) => /Jessa/iu.test(name)), | ||
webSpeechPonyfillFactory | ||
}, document.getElementById('webchat')); | ||
|
||
document.querySelector('#webchat > *').focus(); | ||
})().catch(err => console.error(err)); | ||
</script> | ||
</body> | ||
</html> |