|
2 | 2 |
|
3 | 3 | import java.io.FileInputStream;
|
4 | 4 | import java.io.FileNotFoundException;
|
| 5 | +import java.util.concurrent.CountDownLatch; |
| 6 | +import java.util.concurrent.TimeUnit; |
5 | 7 |
|
6 | 8 | import com.ibm.watson.developer_cloud.http.HttpMediaType;
|
7 | 9 | import com.ibm.watson.developer_cloud.speech_to_text.v1.model.SpeechResults;
|
|
11 | 13 | * Recognize using WebSockets a sample wav file and print the transcript into the console output.
|
12 | 14 | */
|
13 | 15 | public class RecognizeUsingWebSockets {
|
14 |
| - public static void main(String[] args) throws FileNotFoundException { |
| 16 | + private static CountDownLatch lock = new CountDownLatch(1); |
| 17 | + |
| 18 | + public static void main(String[] args) throws FileNotFoundException, InterruptedException { |
15 | 19 | SpeechToText service = new SpeechToText();
|
16 | 20 | service.setUsernameAndPassword("<username>", "<password>");
|
17 | 21 |
|
18 | 22 | FileInputStream audio = new FileInputStream("src/test/resources/speech_to_text/sample1.wav");
|
19 | 23 |
|
20 |
| - RecognizeOptions options = |
21 |
| - new RecognizeOptions().continuous(true).interimResults(true) |
22 |
| - .contentType(HttpMediaType.AUDIO_WAV); |
| 24 | + RecognizeOptions options = new RecognizeOptions(); |
| 25 | + options.continuous(true).interimResults(true).contentType(HttpMediaType.AUDIO_WAV); |
23 | 26 |
|
24 | 27 | service.recognizeUsingWebSockets(audio, options, new BaseRecognizeDelegate() {
|
25 |
| - |
26 | 28 | @Override
|
27 | 29 | public void onMessage(SpeechResults speechResults) {
|
28 | 30 | System.out.println(speechResults);
|
| 31 | + if (speechResults.isFinal()) |
| 32 | + lock.countDown(); |
29 | 33 | }
|
30 | 34 | });
|
| 35 | + |
| 36 | + lock.await(20000, TimeUnit.MILLISECONDS); |
31 | 37 | }
|
32 | 38 | }
|
0 commit comments