diff --git a/.gitignore b/.gitignore
index a4b7d6a3..59add15d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,3 +13,5 @@ google-cloud-logging-winston-*.tgz
google-cloud-logging-bunyan-*.tgz
package-lock.json
__pycache__
+.vscode/
+dist/
diff --git a/.jsdoc.js b/.jsdoc.js
index e3e6ad03..a772b520 100644
--- a/.jsdoc.js
+++ b/.jsdoc.js
@@ -32,7 +32,7 @@ module.exports = {
source: {
excludePattern: '(^|\\/|\\\\)[._]',
include: [
- 'src'
+ 'build/src'
],
includePattern: '\\.js$'
},
diff --git a/package.json b/package.json
index 4a3c1f47..06a2030c 100644
--- a/package.json
+++ b/package.json
@@ -8,10 +8,10 @@
"node": ">=8.10.0"
},
"repository": "googleapis/nodejs-speech",
- "main": "./src/index.js",
+ "main": "./build/src/index.js",
"files": [
- "protos",
- "src",
+ "build/protos",
+ "build/src",
"AUTHORS",
"LICENSE"
],
@@ -29,40 +29,51 @@
"Google Cloud Speech API"
],
"scripts": {
- "cover": "nyc --reporter=lcov mocha test/*.js && nyc report",
"docs": "jsdoc -c .jsdoc.js",
- "lint": "eslint '**/*.js'",
+ "lint": "gts check && eslint samples/*.js samples/system-test/*.js system-test/*.js",
"samples-test": "cd samples/ && npm link ../ && npm test && cd ../",
- "system-test": "mocha system-test/*.js --timeout 600000",
- "test-no-cover": "mocha test/*.js",
- "test": "npm run cover",
- "fix": "eslint --fix '**/*.js'",
+ "system-test": "c8 mocha build/system-test/*.js --timeout 600000",
+ "test": "c8 mocha build/test/*.js",
+ "fix": "gts fix && eslint --fix samples/*.js samples/system-test/*.js system-test/*.js",
"docs-test": "linkinator docs",
- "predocs-test": "npm run docs"
+ "predocs-test": "npm run docs",
+ "compile": "tsc -p . && cp system-test/*.js build/system-test/ && cp -r protos build/",
+ "prepare": "npm run compile",
+ "pretest": "npm run compile"
},
"dependencies": {
"@google-cloud/common": "^2.0.0",
- "google-gax": "^1.7.5",
+ "google-gax": "^1.11.1",
"protobufjs": "^6.8.6",
"pumpify": "^2.0.0",
- "stream-events": "^1.0.4"
+ "stream-events": "^1.0.4",
+ "@types/pumpify": "^1.4.1"
},
"devDependencies": {
+ "@types/mocha": "^5.2.7",
+ "@types/node": "^12.0.0",
+ "@types/sinon": "^7.5.1",
+ "c8": "^6.0.0",
"codecov": "^3.0.2",
"eslint": "^6.0.0",
"eslint-config-prettier": "^6.0.0",
"eslint-plugin-node": "^10.0.0",
"eslint-plugin-prettier": "^3.0.0",
+ "gts": "^1.1.2",
"intelli-espower-loader": "^1.0.1",
"jsdoc": "^3.5.5",
"jsdoc-fresh": "^1.0.1",
"jsdoc-region-tag": "^1.0.2",
"linkinator": "^1.5.0",
"mocha": "^6.0.0",
- "nyc": "^14.0.0",
+ "null-loader": "^3.0.0",
+ "pack-n-play": "^1.0.0-2",
"power-assert": "^1.6.0",
"prettier": "^1.13.5",
- "proxyquire": "^2.1.0",
- "sinon": "^7.0.0"
+ "sinon": "^7.0.0",
+ "ts-loader": "^6.2.1",
+ "typescript": "~3.7.0",
+ "webpack": "^4.41.2",
+ "webpack-cli": "^3.3.10"
}
}
diff --git a/prettier.config.js b/prettier.config.js
new file mode 100644
index 00000000..a425d3f7
--- /dev/null
+++ b/prettier.config.js
@@ -0,0 +1,4 @@
+module.exports = {
+ singleQuote: true,
+ trailingComma: 'es5',
+};
diff --git a/protos/google/cloud/speech/v1beta1/cloud_speech.proto b/protos/google/cloud/speech/v1beta1/cloud_speech.proto
deleted file mode 100644
index 82b82d3c..00000000
--- a/protos/google/cloud/speech/v1beta1/cloud_speech.proto
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright 2017 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.cloud.speech.v1beta1;
-
-import "google/api/annotations.proto";
-import "google/longrunning/operations.proto";
-import "google/protobuf/duration.proto";
-import "google/protobuf/timestamp.proto";
-import "google/rpc/status.proto";
-
-option go_package = "google.golang.org/genproto/googleapis/cloud/speech/v1beta1;speech";
-option java_multiple_files = true;
-option java_outer_classname = "SpeechProto";
-option java_package = "com.google.cloud.speech.v1beta1";
-
-
-// Service that implements Google Cloud Speech API.
-service Speech {
- // Performs synchronous speech recognition: receive results after all audio
- // has been sent and processed.
- rpc SyncRecognize(SyncRecognizeRequest) returns (SyncRecognizeResponse) {
- option (google.api.http) = { post: "/v1beta1/speech:syncrecognize" body: "*" };
- }
-
- // Performs asynchronous speech recognition: receive results via the
- // [google.longrunning.Operations]
- // (/speech/reference/rest/v1beta1/operations#Operation)
- // interface. Returns either an
- // `Operation.error` or an `Operation.response` which contains
- // an `AsyncRecognizeResponse` message.
- rpc AsyncRecognize(AsyncRecognizeRequest) returns (google.longrunning.Operation) {
- option (google.api.http) = { post: "/v1beta1/speech:asyncrecognize" body: "*" };
- }
-
- // Performs bidirectional streaming speech recognition: receive results while
- // sending audio. This method is only available via the gRPC API (not REST).
- rpc StreamingRecognize(stream StreamingRecognizeRequest) returns (stream StreamingRecognizeResponse);
-}
-
-// The top-level message sent by the client for the `SyncRecognize` method.
-message SyncRecognizeRequest {
- // *Required* Provides information to the recognizer that specifies how to
- // process the request.
- RecognitionConfig config = 1;
-
- // *Required* The audio data to be recognized.
- RecognitionAudio audio = 2;
-}
-
-// The top-level message sent by the client for the `AsyncRecognize` method.
-message AsyncRecognizeRequest {
- // *Required* Provides information to the recognizer that specifies how to
- // process the request.
- RecognitionConfig config = 1;
-
- // *Required* The audio data to be recognized.
- RecognitionAudio audio = 2;
-}
-
-// The top-level message sent by the client for the `StreamingRecognize` method.
-// Multiple `StreamingRecognizeRequest` messages are sent. The first message
-// must contain a `streaming_config` message and must not contain `audio` data.
-// All subsequent messages must contain `audio` data and must not contain a
-// `streaming_config` message.
-message StreamingRecognizeRequest {
- // The streaming request, which is either a streaming config or audio content.
- oneof streaming_request {
- // Provides information to the recognizer that specifies how to process the
- // request. The first `StreamingRecognizeRequest` message must contain a
- // `streaming_config` message.
- StreamingRecognitionConfig streaming_config = 1;
-
- // The audio data to be recognized. Sequential chunks of audio data are sent
- // in sequential `StreamingRecognizeRequest` messages. The first
- // `StreamingRecognizeRequest` message must not contain `audio_content` data
- // and all subsequent `StreamingRecognizeRequest` messages must contain
- // `audio_content` data. The audio bytes must be encoded as specified in
- // `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
- // pure binary representation (not base64). See
- // [audio limits](https://cloud.google.com/speech/limits#content).
- bytes audio_content = 2;
- }
-}
-
-// Provides information to the recognizer that specifies how to process the
-// request.
-message StreamingRecognitionConfig {
- // *Required* Provides information to the recognizer that specifies how to
- // process the request.
- RecognitionConfig config = 1;
-
- // *Optional* If `false` or omitted, the recognizer will perform continuous
- // recognition (continuing to wait for and process audio even if the user
- // pauses speaking) until the client closes the input stream (gRPC API) or
- // until the maximum time limit has been reached. May return multiple
- // `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
- //
- // If `true`, the recognizer will detect a single spoken utterance. When it
- // detects that the user has paused or stopped speaking, it will return an
- // `END_OF_UTTERANCE` event and cease recognition. It will return no more than
- // one `StreamingRecognitionResult` with the `is_final` flag set to `true`.
- bool single_utterance = 2;
-
- // *Optional* If `true`, interim results (tentative hypotheses) may be
- // returned as they become available (these interim results are indicated with
- // the `is_final=false` flag).
- // If `false` or omitted, only `is_final=true` result(s) are returned.
- bool interim_results = 3;
-}
-
-// Provides information to the recognizer that specifies how to process the
-// request.
-message RecognitionConfig {
- // Audio encoding of the data sent in the audio message. All encodings support
- // only 1 channel (mono) audio. Only `FLAC` includes a header that describes
- // the bytes of audio that follow the header. The other encodings are raw
- // audio bytes with no header.
- //
- // For best results, the audio source should be captured and transmitted using
- // a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be
- // reduced if lossy codecs (such as AMR, AMR_WB and MULAW) are used to capture
- // or transmit the audio, particularly if background noise is present.
- enum AudioEncoding {
- // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
- ENCODING_UNSPECIFIED = 0;
-
- // Uncompressed 16-bit signed little-endian samples (Linear PCM).
- // This is the only encoding that may be used by `AsyncRecognize`.
- LINEAR16 = 1;
-
- // This is the recommended encoding for `SyncRecognize` and
- // `StreamingRecognize` because it uses lossless compression; therefore
- // recognition accuracy is not compromised by a lossy codec.
- //
- // The stream FLAC (Free Lossless Audio Codec) encoding is specified at:
- // http://flac.sourceforge.net/documentation.html.
- // 16-bit and 24-bit samples are supported.
- // Not all fields in STREAMINFO are supported.
- FLAC = 2;
-
- // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
- MULAW = 3;
-
- // Adaptive Multi-Rate Narrowband codec. `sample_rate` must be 8000 Hz.
- AMR = 4;
-
- // Adaptive Multi-Rate Wideband codec. `sample_rate` must be 16000 Hz.
- AMR_WB = 5;
- }
-
- // *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
- AudioEncoding encoding = 1;
-
- // *Required* Sample rate in Hertz of the audio data sent in all
- // `RecognitionAudio` messages. Valid values are: 8000-48000.
- // 16000 is optimal. For best results, set the sampling rate of the audio
- // source to 16000 Hz. If that's not possible, use the native sample rate of
- // the audio source (instead of re-sampling).
- int32 sample_rate = 2;
-
- // *Optional* The language of the supplied audio as a BCP-47 language tag.
- // Example: "en-GB" https://www.rfc-editor.org/rfc/bcp/bcp47.txt
- // If omitted, defaults to "en-US". See
- // [Language Support](https://cloud.google.com/speech/docs/languages)
- // for a list of the currently supported language codes.
- string language_code = 3;
-
- // *Optional* Maximum number of recognition hypotheses to be returned.
- // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
- // within each `SpeechRecognitionResult`.
- // The server may return fewer than `max_alternatives`.
- // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
- // one. If omitted, will return a maximum of one.
- int32 max_alternatives = 4;
-
- // *Optional* If set to `true`, the server will attempt to filter out
- // profanities, replacing all but the initial character in each filtered word
- // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
- // won't be filtered out.
- bool profanity_filter = 5;
-
- // *Optional* A means to provide context to assist the speech recognition.
- SpeechContext speech_context = 6;
-}
-
-// Provides "hints" to the speech recognizer to favor specific words and phrases
-// in the results.
-message SpeechContext {
- // *Optional* A list of strings containing words and phrases "hints" so that
- // the speech recognition is more likely to recognize them. This can be used
- // to improve the accuracy for specific words and phrases, for example, if
- // specific commands are typically spoken by the user. This can also be used
- // to add additional words to the vocabulary of the recognizer. See
- // [usage limits](https://cloud.google.com/speech/limits#content).
- repeated string phrases = 1;
-}
-
-// Contains audio data in the encoding specified in the `RecognitionConfig`.
-// Either `content` or `uri` must be supplied. Supplying both or neither
-// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
-// [audio limits](https://cloud.google.com/speech/limits#content).
-message RecognitionAudio {
- // The audio source, which is either inline content or a GCS uri.
- oneof audio_source {
- // The audio data bytes encoded as specified in
- // `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
- // pure binary representation, whereas JSON representations use base64.
- bytes content = 1;
-
- // URI that points to a file that contains audio data bytes as specified in
- // `RecognitionConfig`. Currently, only Google Cloud Storage URIs are
- // supported, which must be specified in the following format:
- // `gs://bucket_name/object_name` (other URI formats return
- // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
- // [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- string uri = 2;
- }
-}
-
-// The only message returned to the client by `SyncRecognize`. method. It
-// contains the result as zero or more sequential `SpeechRecognitionResult`
-// messages.
-message SyncRecognizeResponse {
- // *Output-only* Sequential list of transcription results corresponding to
- // sequential portions of audio.
- repeated SpeechRecognitionResult results = 2;
-}
-
-// The only message returned to the client by `AsyncRecognize`. It contains the
-// result as zero or more sequential `SpeechRecognitionResult` messages. It is
-// included in the `result.response` field of the `Operation` returned by the
-// `GetOperation` call of the `google::longrunning::Operations` service.
-message AsyncRecognizeResponse {
- // *Output-only* Sequential list of transcription results corresponding to
- // sequential portions of audio.
- repeated SpeechRecognitionResult results = 2;
-}
-
-// Describes the progress of a long-running `AsyncRecognize` call. It is
-// included in the `metadata` field of the `Operation` returned by the
-// `GetOperation` call of the `google::longrunning::Operations` service.
-message AsyncRecognizeMetadata {
- // Approximate percentage of audio processed thus far. Guaranteed to be 100
- // when the audio is fully processed and the results are available.
- int32 progress_percent = 1;
-
- // Time when the request was received.
- google.protobuf.Timestamp start_time = 2;
-
- // Time of the most recent processing update.
- google.protobuf.Timestamp last_update_time = 3;
-}
-
-// `StreamingRecognizeResponse` is the only message returned to the client by
-// `StreamingRecognize`. A series of one or more `StreamingRecognizeResponse`
-// messages are streamed back to the client.
-//
-// Here's an example of a series of ten `StreamingRecognizeResponse`s that might
-// be returned while processing audio:
-//
-// 1. endpointer_type: START_OF_SPEECH
-//
-// 2. results { alternatives { transcript: "tube" } stability: 0.01 }
-// result_index: 0
-//
-// 3. results { alternatives { transcript: "to be a" } stability: 0.01 }
-// result_index: 0
-//
-// 4. results { alternatives { transcript: "to be" } stability: 0.9 }
-// results { alternatives { transcript: " or not to be" } stability: 0.01 }
-// result_index: 0
-//
-// 5. results { alternatives { transcript: "to be or not to be"
-// confidence: 0.92 }
-// alternatives { transcript: "to bee or not to bee" }
-// is_final: true }
-// result_index: 0
-//
-// 6. results { alternatives { transcript: " that's" } stability: 0.01 }
-// result_index: 1
-//
-// 7. results { alternatives { transcript: " that is" } stability: 0.9 }
-// results { alternatives { transcript: " the question" } stability: 0.01 }
-// result_index: 1
-//
-// 8. endpointer_type: END_OF_SPEECH
-//
-// 9. results { alternatives { transcript: " that is the question"
-// confidence: 0.98 }
-// alternatives { transcript: " that was the question" }
-// is_final: true }
-// result_index: 1
-//
-// 10. endpointer_type: END_OF_AUDIO
-//
-// Notes:
-//
-// - Only two of the above responses #5 and #9 contain final results, they are
-// indicated by `is_final: true`. Concatenating these together generates the
-// full transcript: "to be or not to be that is the question".
-//
-// - The others contain interim `results`. #4 and #7 contain two interim
-// `results`, the first portion has a high stability and is less likely to
-// change, the second portion has a low stability and is very likely to
-// change. A UI designer might choose to show only high stability `results`.
-//
-// - The specific `stability` and `confidence` values shown above are only for
-// illustrative purposes. Actual values may vary.
-//
-// - The `result_index` indicates the portion of audio that has had final
-// results returned, and is no longer being processed. For example, the
-// `results` in #6 and later correspond to the portion of audio after
-// "to be or not to be".
-message StreamingRecognizeResponse {
- // Indicates the type of endpointer event.
- enum EndpointerType {
- // No endpointer event specified.
- ENDPOINTER_EVENT_UNSPECIFIED = 0;
-
- // Speech has been detected in the audio stream, and the service is
- // beginning to process it.
- START_OF_SPEECH = 1;
-
- // Speech has ceased to be detected in the audio stream. (For example, the
- // user may have paused after speaking.) If `single_utterance` is `false`,
- // the service will continue to process audio, and if subsequent speech is
- // detected, will send another START_OF_SPEECH event.
- END_OF_SPEECH = 2;
-
- // This event is sent after the client has half-closed the input stream gRPC
- // connection and the server has received all of the audio. (The server may
- // still be processing the audio and may subsequently return additional
- // results.)
- END_OF_AUDIO = 3;
-
- // This event is only sent when `single_utterance` is `true`. It indicates
- // that the server has detected the end of the user's speech utterance and
- // expects no additional speech. Therefore, the server will not process
- // additional audio (although it may subsequently return additional
- // results). The client should stop sending additional audio data,
- // half-close the gRPC connection, and wait for any additional results
- // until the server closes the gRPC connection.
- END_OF_UTTERANCE = 4;
- }
-
- // *Output-only* If set, returns a [google.rpc.Status][google.rpc.Status] message that
- // specifies the error for the operation.
- google.rpc.Status error = 1;
-
- // *Output-only* This repeated list contains zero or more results that
- // correspond to consecutive portions of the audio currently being processed.
- // It contains zero or one `is_final=true` result (the newly settled portion),
- // followed by zero or more `is_final=false` results.
- repeated StreamingRecognitionResult results = 2;
-
- // *Output-only* Indicates the lowest index in the `results` array that has
- // changed. The repeated `StreamingRecognitionResult` results overwrite past
- // results at this index and higher.
- int32 result_index = 3;
-
- // *Output-only* Indicates the type of endpointer event.
- EndpointerType endpointer_type = 4;
-}
-
-// A streaming speech recognition result corresponding to a portion of the audio
-// that is currently being processed.
-message StreamingRecognitionResult {
- // *Output-only* May contain one or more recognition hypotheses (up to the
- // maximum specified in `max_alternatives`).
- repeated SpeechRecognitionAlternative alternatives = 1;
-
- // *Output-only* If `false`, this `StreamingRecognitionResult` represents an
- // interim result that may change. If `true`, this is the final time the
- // speech service will return this particular `StreamingRecognitionResult`,
- // the recognizer will not return any further hypotheses for this portion of
- // the transcript and corresponding audio.
- bool is_final = 2;
-
- // *Output-only* An estimate of the likelihood that the recognizer will not
- // change its guess about this interim result. Values range from 0.0
- // (completely unstable) to 1.0 (completely stable).
- // This field is only provided for interim results (`is_final=false`).
- // The default of 0.0 is a sentinel value indicating `stability` was not set.
- float stability = 3;
-}
-
-// A speech recognition result corresponding to a portion of the audio.
-message SpeechRecognitionResult {
- // *Output-only* May contain one or more recognition hypotheses (up to the
- // maximum specified in `max_alternatives`).
- repeated SpeechRecognitionAlternative alternatives = 1;
-}
-
-// Alternative hypotheses (a.k.a. n-best list).
-message SpeechRecognitionAlternative {
- // *Output-only* Transcript text representing the words that the user spoke.
- string transcript = 1;
-
- // *Output-only* The confidence estimate between 0.0 and 1.0. A higher number
- // indicates an estimated greater likelihood that the recognized words are
- // correct. This field is typically provided only for the top hypothesis, and
- // only for `is_final=true` results. Clients should not rely on the
- // `confidence` field as it is not guaranteed to be accurate, or even set, in
- // any of the results.
- // The default of 0.0 is a sentinel value indicating `confidence` was not set.
- float confidence = 2;
-}
diff --git a/protos/protos.js b/protos/protos.js
index d3ade13f..2f7e211f 100644
--- a/protos/protos.js
+++ b/protos/protos.js
@@ -1,3 +1,17 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
/*eslint-disable block-scoped-var, id-length, no-control-regex, no-magic-numbers, no-prototype-builtins, no-redeclare, no-shadow, no-var, sort-vars*/
(function(global, factory) { /* global define, require, module */
diff --git a/samples/package.json b/samples/package.json
index f47e8735..1a23d90b 100644
--- a/samples/package.json
+++ b/samples/package.json
@@ -8,7 +8,7 @@
"node": ">=8"
},
"scripts": {
- "test": "mocha system-test --timeout 600000"
+ "test": "c8 mocha system-test --timeout 600000"
},
"dependencies": {
"@google-cloud/speech": "^3.4.0",
@@ -19,6 +19,7 @@
"yargs": "^15.0.0"
},
"devDependencies": {
+ "c8": "^6.0.0",
"chai": "^4.2.0",
"mocha": "^6.0.0",
"uuid": "^3.3.0"
diff --git a/src/helpers.js b/src/helpers.ts
similarity index 75%
rename from src/helpers.js
rename to src/helpers.ts
index 00ec9324..b5e410c4 100644
--- a/src/helpers.js
+++ b/src/helpers.ts
@@ -1,4 +1,4 @@
-/*!
+/*
* Copyright 2017 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,25 +14,18 @@
* limitations under the License.
*/
-'use strict';
-
-const common = require('@google-cloud/common');
-const pumpify = require('pumpify');
-const streamEvents = require('stream-events');
-const {PassThrough} = require('stream');
-
-/*!
- * Return a dictionary-like object with helpers to augment the Speech
- * GAPIC.
- */
-module.exports = () => {
- const methods = {};
+import * as common from '@google-cloud/common';
+import * as pumpify from 'pumpify';
+import * as streamEvents from 'stream-events';
+import {PassThrough} from 'stream';
+import * as protosTypes from '../protos/protos';
+import * as gax from 'google-gax';
+export class ImprovedStreamingClient {
/**
* Performs bidirectional streaming speech recognition: receive results while
* sending audio. This method is only available via the gRPC API (not REST).
*
- * @method v1.SpeechClient#streamingRecognize
* @param {object} config The configuration for the stream. This is
* appropriately wrapped and sent as the first argument. It should be an
* object conforming to the [StreamingRecognitionConfig]{@link StreamingRecognitionConfig}
@@ -63,21 +56,34 @@ module.exports = () => {
* // Write request objects.
* stream.write(request);
*/
- methods.streamingRecognize = function(streamingConfig, options) {
+ streamingRecognize(
+ streamingConfig?:
+ | protosTypes.google.cloud.speech.v1.IStreamingRecognitionConfig
+ | protosTypes.google.cloud.speech.v1p1beta1.IStreamingRecognitionConfig,
+ options?: gax.CallOptions
+ ) {
options = options || {};
streamingConfig = streamingConfig || {};
// Format the audio content as input request for pipeline
- const recognizeStream = streamEvents(pumpify.obj());
+ const recognizeStream = streamEvents(new pumpify.obj());
- const requestStream = this._innerApiCalls
- .streamingRecognize(options)
- .on('error', err => {
+ // tslint:disable-next-line no-any
+ const requestStream = (this as any)
+ ._streamingRecognize(options)
+ .on('error', (err: Error) => {
recognizeStream.destroy(err);
})
- .on('response', response => {
- recognizeStream.emit('response', response);
- });
+ .on(
+ 'response',
+ (
+ response:
+ | protosTypes.google.cloud.speech.v1.StreamingRecognizeResponse
+ | protosTypes.google.cloud.speech.v1p1beta1.StreamingRecognizeResponse
+ ) => {
+ recognizeStream.emit('response', response);
+ }
+ );
// Attach the events to the request stream, but only do so
// when the first write (of data) comes in.
@@ -98,7 +104,7 @@ module.exports = () => {
objectMode: true,
transform: (audioContent, _, next) => {
if (audioContent !== undefined) {
- next(null, {audioContent});
+ next(undefined, {audioContent});
return;
}
next();
@@ -112,14 +118,12 @@ module.exports = () => {
next(new common.util.ApiError(response.error));
return;
}
- next(null, response);
+ next(undefined, response);
},
}),
]);
});
return recognizeStream;
- };
-
- return methods;
-};
+ }
+}
diff --git a/src/index.js b/src/index.js
deleted file mode 100644
index 87785e73..00000000
--- a/src/index.js
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*!
- * DO NOT REMOVE THE FOLLOWING NAMESPACE DEFINITIONS
- */
-/**
- * @namespace google
- */
-/**
- * @namespace google.cloud
- */
-/**
- * @namespace google.cloud.speech
- */
-/**
- * @namespace google.cloud.speech.v1
- */
-/**
- * @namespace google.cloud.speech.v1p1beta1
- */
-/**
- * @namespace google.protobuf
- */
-/**
- * @namespace google.rpc
- */
-/**
- * @namespace google.longrunning
- */
-
-'use strict';
-
-const helpers = require('./helpers');
-
-// Import the clients for each version supported by this package.
-const gapic = Object.freeze({
- v1: require('./v1'),
- v1p1beta1: require('./v1p1beta1'),
-});
-
-// Augment the SpeechClient objects with the helpers.
-for (const gapicVersion of Object.keys(gapic)) {
- const clientProto = gapic[gapicVersion].SpeechClient.prototype;
- Object.assign(clientProto, helpers());
-}
-
-/**
- * The `@google-cloud/speech` package has the following named exports:
- *
- * - `SpeechClient` - Reference to {@link v1.SpeechClient}
- * - `v1` - This is used for selecting or pinning a particular backend service
- * version. It exports:
- * - `SpeechClient` - Reference to {@link v1.SpeechClient}
- * - `v1p1beta1` - This is used for selecting or pinning a beta backend service
- * version. It exports:
- * - `SpeechClient` - Reference to {@link v1p1beta1.SpeechClient}
- *
- * @module {object} @google-cloud/speech
- * @alias nodejs-speech
- *
- * @example
Install the client library with npm:
- * npm install --save @google-cloud/speech
- *
- * @example Import the client library:
- * const speech = require('@google-cloud/speech');
- *
- * @example Create a client that uses Application Default Credentials (ADC):
- * const client = new speech.SpeechClient();
- *
- * @example Create a client with explicit credentials:
- * const client = new speech.SpeechClient({
- * projectId: 'your-project-id',
- * keyFilename: '/path/to/keyfile.json',
- * });
- */
-module.exports = gapic.v1;
-
-/**
- * @type {object}
- * @property {constructor} SpeechClient Reference to {@link v1.SpeechClient}.
- */
-module.exports.v1 = gapic.v1;
-
-/**
- * @type {object}
- * @property {constructor} SpeechClient Reference to {@link v1p1beta1.SpeechClient}.
- */
-module.exports.v1p1beta1 = gapic.v1p1beta1;
-
-// Alias `module.exports` as `module.exports.default`, for future-proofing.
-module.exports.default = Object.assign({}, module.exports);
diff --git a/src/index.ts b/src/index.ts
new file mode 100644
index 00000000..75bfdfe1
--- /dev/null
+++ b/src/index.ts
@@ -0,0 +1,42 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import {ImprovedStreamingClient} from './helpers';
+import * as v1p1beta1 from './v1p1beta1';
+import * as v1 from './v1';
+
+// The following code is adapted from http://www.typescriptlang.org/docs/handbook/mixins.html
+// tslint:disable-next-line no-any
+Object.defineProperty(
+ v1.SpeechClient.prototype,
+ 'streamingRecognize',
+ Object.getOwnPropertyDescriptor(
+ ImprovedStreamingClient.prototype,
+ 'streamingRecognize'
+ )!
+);
+Object.defineProperty(
+ v1p1beta1.SpeechClient.prototype,
+ 'streamingRecognize',
+ Object.getOwnPropertyDescriptor(
+ ImprovedStreamingClient.prototype,
+ 'streamingRecognize'
+ )!
+);
+
+const SpeechClient = v1.SpeechClient;
+export {v1, v1p1beta1, SpeechClient};
+// For compatibility with JavaScript libraries we need to provide this default export:
+// tslint:disable-next-line no-default-export
+export default {v1, v1p1beta1, SpeechClient};
diff --git a/src/v1/doc/google/cloud/speech/v1/doc_cloud_speech.js b/src/v1/doc/google/cloud/speech/v1/doc_cloud_speech.js
deleted file mode 100644
index df848d50..00000000
--- a/src/v1/doc/google/cloud/speech/v1/doc_cloud_speech.js
+++ /dev/null
@@ -1,984 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * The top-level message sent by the client for the `Recognize` method.
- *
- * @property {Object} config
- * Required. Provides information to the recognizer that specifies how to
- * process the request.
- *
- * This object should have the same structure as [RecognitionConfig]{@link google.cloud.speech.v1.RecognitionConfig}
- *
- * @property {Object} audio
- * Required. The audio data to be recognized.
- *
- * This object should have the same structure as [RecognitionAudio]{@link google.cloud.speech.v1.RecognitionAudio}
- *
- * @typedef RecognizeRequest
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.RecognizeRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const RecognizeRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * The top-level message sent by the client for the `LongRunningRecognize`
- * method.
- *
- * @property {Object} config
- * Required. Provides information to the recognizer that specifies how to
- * process the request.
- *
- * This object should have the same structure as [RecognitionConfig]{@link google.cloud.speech.v1.RecognitionConfig}
- *
- * @property {Object} audio
- * Required. The audio data to be recognized.
- *
- * This object should have the same structure as [RecognitionAudio]{@link google.cloud.speech.v1.RecognitionAudio}
- *
- * @typedef LongRunningRecognizeRequest
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.LongRunningRecognizeRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const LongRunningRecognizeRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * The top-level message sent by the client for the `StreamingRecognize` method.
- * Multiple `StreamingRecognizeRequest` messages are sent. The first message
- * must contain a `streaming_config` message and must not contain
- * `audio_content`. All subsequent messages must contain `audio_content` and
- * must not contain a `streaming_config` message.
- *
- * @property {Object} streamingConfig
- * Provides information to the recognizer that specifies how to process the
- * request. The first `StreamingRecognizeRequest` message must contain a
- * `streaming_config` message.
- *
- * This object should have the same structure as [StreamingRecognitionConfig]{@link google.cloud.speech.v1.StreamingRecognitionConfig}
- *
- * @property {Buffer} audioContent
- * The audio data to be recognized. Sequential chunks of audio data are sent
- * in sequential `StreamingRecognizeRequest` messages. The first
- * `StreamingRecognizeRequest` message must not contain `audio_content` data
- * and all subsequent `StreamingRecognizeRequest` messages must contain
- * `audio_content` data. The audio bytes must be encoded as specified in
- * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
- * pure binary representation (not base64). See
- * [content limits](https://cloud.google.com/speech-to-text/quotas#content).
- *
- * @typedef StreamingRecognizeRequest
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.StreamingRecognizeRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const StreamingRecognizeRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Provides information to the recognizer that specifies how to process the
- * request.
- *
- * @property {Object} config
- * Required. Provides information to the recognizer that specifies how to
- * process the request.
- *
- * This object should have the same structure as [RecognitionConfig]{@link google.cloud.speech.v1.RecognitionConfig}
- *
- * @property {boolean} singleUtterance
- * If `false` or omitted, the recognizer will perform continuous
- * recognition (continuing to wait for and process audio even if the user
- * pauses speaking) until the client closes the input stream (gRPC API) or
- * until the maximum time limit has been reached. May return multiple
- * `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
- *
- * If `true`, the recognizer will detect a single spoken utterance. When it
- * detects that the user has paused or stopped speaking, it will return an
- * `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
- * more than one `StreamingRecognitionResult` with the `is_final` flag set to
- * `true`.
- *
- * @property {boolean} interimResults
- * If `true`, interim results (tentative hypotheses) may be
- * returned as they become available (these interim results are indicated with
- * the `is_final=false` flag).
- * If `false` or omitted, only `is_final=true` result(s) are returned.
- *
- * @typedef StreamingRecognitionConfig
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.StreamingRecognitionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const StreamingRecognitionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Provides information to the recognizer that specifies how to process the
- * request.
- *
- * @property {number} encoding
- * Encoding of audio data sent in all `RecognitionAudio` messages.
- * This field is optional for `FLAC` and `WAV` audio files and required
- * for all other audio formats. For details, see AudioEncoding.
- *
- * The number should be among the values of [AudioEncoding]{@link google.cloud.speech.v1.AudioEncoding}
- *
- * @property {number} sampleRateHertz
- * Sample rate in Hertz of the audio data sent in all
- * `RecognitionAudio` messages. Valid values are: 8000-48000.
- * 16000 is optimal. For best results, set the sampling rate of the audio
- * source to 16000 Hz. If that's not possible, use the native sample rate of
- * the audio source (instead of re-sampling).
- * This field is optional for FLAC and WAV audio files, but is
- * required for all other audio formats. For details, see AudioEncoding.
- *
- * @property {number} audioChannelCount
- * The number of channels in the input audio data.
- * ONLY set this for MULTI-CHANNEL recognition.
- * Valid values for LINEAR16 and FLAC are `1`-`8`.
- * Valid values for OGG_OPUS are '1'-'254'.
- * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
- * If `0` or omitted, defaults to one channel (mono).
- * Note: We only recognize the first channel by default.
- * To perform independent recognition on each channel set
- * `enable_separate_recognition_per_channel` to 'true'.
- *
- * @property {boolean} enableSeparateRecognitionPerChannel
- * This needs to be set to `true` explicitly and `audio_channel_count` > 1
- * to get each channel recognized separately. The recognition result will
- * contain a `channel_tag` field to state which channel that result belongs
- * to. If this is not true, we will only recognize the first channel. The
- * request is billed cumulatively for all channels recognized:
- * `audio_channel_count` multiplied by the length of the audio.
- *
- * @property {string} languageCode
- * Required. The language of the supplied audio as a
- * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
- * Example: "en-US".
- * See [Language
- * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
- * of the currently supported language codes.
- *
- * @property {number} maxAlternatives
- * Maximum number of recognition hypotheses to be returned.
- * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
- * within each `SpeechRecognitionResult`.
- * The server may return fewer than `max_alternatives`.
- * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
- * one. If omitted, will return a maximum of one.
- *
- * @property {boolean} profanityFilter
- * If set to `true`, the server will attempt to filter out
- * profanities, replacing all but the initial character in each filtered word
- * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
- * won't be filtered out.
- *
- * @property {Object[]} speechContexts
- * Array of SpeechContext.
- * A means to provide context to assist the speech recognition. For more
- * information, see
- * [speech
- * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
- *
- * This object should have the same structure as [SpeechContext]{@link google.cloud.speech.v1.SpeechContext}
- *
- * @property {boolean} enableWordTimeOffsets
- * If `true`, the top result includes a list of words and
- * the start and end time offsets (timestamps) for those words. If
- * `false`, no word-level time offset information is returned. The default is
- * `false`.
- *
- * @property {boolean} enableAutomaticPunctuation
- * If 'true', adds punctuation to recognition result hypotheses.
- * This feature is only available in select languages. Setting this for
- * requests in other languages has no effect at all.
- * The default 'false' value does not add punctuation to result hypotheses.
- * Note: This is currently offered as an experimental service, complimentary
- * to all users. In the future this may be exclusively available as a
- * premium feature.
- *
- * @property {Object} diarizationConfig
- * Config to enable speaker diarization and set additional
- * parameters to make diarization better suited for your application.
- * Note: When this is enabled, we send all the words from the beginning of the
- * audio for the top alternative in every consecutive STREAMING responses.
- * This is done in order to improve our speaker tags as our models learn to
- * identify the speakers in the conversation over time.
- * For non-streaming requests, the diarization results will be provided only
- * in the top alternative of the FINAL SpeechRecognitionResult.
- *
- * This object should have the same structure as [SpeakerDiarizationConfig]{@link google.cloud.speech.v1.SpeakerDiarizationConfig}
- *
- * @property {Object} metadata
- * Metadata regarding this request.
- *
- * This object should have the same structure as [RecognitionMetadata]{@link google.cloud.speech.v1.RecognitionMetadata}
- *
- * @property {string} model
- * Which model to select for the given request. Select the model
- * best suited to your domain to get best results. If a model is not
- * explicitly specified, then we auto-select a model based on the parameters
- * in the RecognitionConfig.
- *
- *
- * Model |
- * Description |
- *
- *
- * command_and_search |
- * Best for short queries such as voice commands or voice search. |
- *
- *
- * phone_call |
- * Best for audio that originated from a phone call (typically
- * recorded at an 8khz sampling rate). |
- *
- *
- * video |
- * Best for audio that originated from from video or includes multiple
- * speakers. Ideally the audio is recorded at a 16khz or greater
- * sampling rate. This is a premium model that costs more than the
- * standard rate. |
- *
- *
- * default |
- * Best for audio that is not one of the specific audio models.
- * For example, long-form audio. Ideally the audio is high-fidelity,
- * recorded at a 16khz or greater sampling rate. |
- *
- *
- *
- * @property {boolean} useEnhanced
- * Set to true to use an enhanced model for speech recognition.
- * If `use_enhanced` is set to true and the `model` field is not set, then
- * an appropriate enhanced model is chosen if an enhanced model exists for
- * the audio.
- *
- * If `use_enhanced` is true and an enhanced version of the specified model
- * does not exist, then the speech is recognized using the standard version
- * of the specified model.
- *
- * @typedef RecognitionConfig
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.RecognitionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const RecognitionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-
- /**
- * The encoding of the audio data sent in the request.
- *
- * All encodings support only 1 channel (mono) audio, unless the
- * `audio_channel_count` and `enable_separate_recognition_per_channel` fields
- * are set.
- *
- * For best results, the audio source should be captured and transmitted using
- * a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
- * recognition can be reduced if lossy codecs are used to capture or transmit
- * audio, particularly if background noise is present. Lossy codecs include
- * `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`.
- *
- * The `FLAC` and `WAV` audio file formats include a header that describes the
- * included audio content. You can request recognition for `WAV` files that
- * contain either `LINEAR16` or `MULAW` encoded audio.
- * If you send `FLAC` or `WAV` audio file format in
- * your request, you do not need to specify an `AudioEncoding`; the audio
- * encoding format is determined from the file header. If you specify
- * an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the
- * encoding configuration must match the encoding described in the audio
- * header; otherwise the request returns an
- * google.rpc.Code.INVALID_ARGUMENT error code.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1
- */
- AudioEncoding: {
-
- /**
- * Not specified.
- */
- ENCODING_UNSPECIFIED: 0,
-
- /**
- * Uncompressed 16-bit signed little-endian samples (Linear PCM).
- */
- LINEAR16: 1,
-
- /**
- * `FLAC` (Free Lossless Audio
- * Codec) is the recommended encoding because it is
- * lossless--therefore recognition is not compromised--and
- * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
- * encoding supports 16-bit and 24-bit samples, however, not all fields in
- * `STREAMINFO` are supported.
- */
- FLAC: 2,
-
- /**
- * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
- */
- MULAW: 3,
-
- /**
- * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
- */
- AMR: 4,
-
- /**
- * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
- */
- AMR_WB: 5,
-
- /**
- * Opus encoded audio frames in Ogg container
- * ([OggOpus](https://wiki.xiph.org/OggOpus)).
- * `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
- */
- OGG_OPUS: 6,
-
- /**
- * Although the use of lossy encodings is not recommended, if a very low
- * bitrate encoding is required, `OGG_OPUS` is highly preferred over
- * Speex encoding. The [Speex](https://speex.org/) encoding supported by
- * Cloud Speech API has a header byte in each block, as in MIME type
- * `audio/x-speex-with-header-byte`.
- * It is a variant of the RTP Speex encoding defined in
- * [RFC 5574](https://tools.ietf.org/html/rfc5574).
- * The stream is a sequence of blocks, one block per RTP packet. Each block
- * starts with a byte containing the length of the block, in bytes, followed
- * by one or more frames of Speex data, padded to an integral number of
- * bytes (octets) as specified in RFC 5574. In other words, each RTP header
- * is replaced with a single byte containing the block length. Only Speex
- * wideband is supported. `sample_rate_hertz` must be 16000.
- */
- SPEEX_WITH_HEADER_BYTE: 7
- }
-};
-
-/**
- * Config to enable speaker diarization.
- *
- * @property {boolean} enableSpeakerDiarization
- * If 'true', enables speaker detection for each recognized word in
- * the top alternative of the recognition result using a speaker_tag provided
- * in the WordInfo.
- *
- * @property {number} minSpeakerCount
- * Minimum number of speakers in the conversation. This range gives you more
- * flexibility by allowing the system to automatically determine the correct
- * number of speakers. If not set, the default value is 2.
- *
- * @property {number} maxSpeakerCount
- * Maximum number of speakers in the conversation. This range gives you more
- * flexibility by allowing the system to automatically determine the correct
- * number of speakers. If not set, the default value is 6.
- *
- * @property {number} speakerTag
- * Unused.
- *
- * @typedef SpeakerDiarizationConfig
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.SpeakerDiarizationConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const SpeakerDiarizationConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Description of audio data to be recognized.
- *
- * @property {number} interactionType
- * The use case most closely describing the audio content to be recognized.
- *
- * The number should be among the values of [InteractionType]{@link google.cloud.speech.v1.InteractionType}
- *
- * @property {number} industryNaicsCodeOfAudio
- * The industry vertical to which this speech recognition request most
- * closely applies. This is most indicative of the topics contained
- * in the audio. Use the 6-digit NAICS code to identify the industry
- * vertical - see https://www.naics.com/search/.
- *
- * @property {number} microphoneDistance
- * The audio type that most closely describes the audio being recognized.
- *
- * The number should be among the values of [MicrophoneDistance]{@link google.cloud.speech.v1.MicrophoneDistance}
- *
- * @property {number} originalMediaType
- * The original media the speech was recorded on.
- *
- * The number should be among the values of [OriginalMediaType]{@link google.cloud.speech.v1.OriginalMediaType}
- *
- * @property {number} recordingDeviceType
- * The type of device the speech was recorded with.
- *
- * The number should be among the values of [RecordingDeviceType]{@link google.cloud.speech.v1.RecordingDeviceType}
- *
- * @property {string} recordingDeviceName
- * The device used to make the recording. Examples 'Nexus 5X' or
- * 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
- * 'Cardioid Microphone'.
- *
- * @property {string} originalMimeType
- * Mime type of the original audio file. For example `audio/m4a`,
- * `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
- * A list of possible audio mime types is maintained at
- * http://www.iana.org/assignments/media-types/media-types.xhtml#audio
- *
- * @property {string} audioTopic
- * Description of the content. Eg. "Recordings of federal supreme court
- * hearings from 2012".
- *
- * @typedef RecognitionMetadata
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.RecognitionMetadata definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const RecognitionMetadata = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-
- /**
- * Use case categories that the audio recognition request can be described
- * by.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1
- */
- InteractionType: {
-
- /**
- * Use case is either unknown or is something other than one of the other
- * values below.
- */
- INTERACTION_TYPE_UNSPECIFIED: 0,
-
- /**
- * Multiple people in a conversation or discussion. For example in a
- * meeting with two or more people actively participating. Typically
- * all the primary people speaking would be in the same room (if not,
- * see PHONE_CALL)
- */
- DISCUSSION: 1,
-
- /**
- * One or more persons lecturing or presenting to others, mostly
- * uninterrupted.
- */
- PRESENTATION: 2,
-
- /**
- * A phone-call or video-conference in which two or more people, who are
- * not in the same room, are actively participating.
- */
- PHONE_CALL: 3,
-
- /**
- * A recorded message intended for another person to listen to.
- */
- VOICEMAIL: 4,
-
- /**
- * Professionally produced audio (eg. TV Show, Podcast).
- */
- PROFESSIONALLY_PRODUCED: 5,
-
- /**
- * Transcribe spoken questions and queries into text.
- */
- VOICE_SEARCH: 6,
-
- /**
- * Transcribe voice commands, such as for controlling a device.
- */
- VOICE_COMMAND: 7,
-
- /**
- * Transcribe speech to text to create a written document, such as a
- * text-message, email or report.
- */
- DICTATION: 8
- },
-
- /**
- * Enumerates the types of capture settings describing an audio file.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1
- */
- MicrophoneDistance: {
-
- /**
- * Audio type is not known.
- */
- MICROPHONE_DISTANCE_UNSPECIFIED: 0,
-
- /**
- * The audio was captured from a closely placed microphone. Eg. phone,
- * dictaphone, or handheld microphone. Generally if there speaker is within
- * 1 meter of the microphone.
- */
- NEARFIELD: 1,
-
- /**
- * The speaker if within 3 meters of the microphone.
- */
- MIDFIELD: 2,
-
- /**
- * The speaker is more than 3 meters away from the microphone.
- */
- FARFIELD: 3
- },
-
- /**
- * The original media the speech was recorded on.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1
- */
- OriginalMediaType: {
-
- /**
- * Unknown original media type.
- */
- ORIGINAL_MEDIA_TYPE_UNSPECIFIED: 0,
-
- /**
- * The speech data is an audio recording.
- */
- AUDIO: 1,
-
- /**
- * The speech data originally recorded on a video.
- */
- VIDEO: 2
- },
-
- /**
- * The type of device the speech was recorded with.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1
- */
- RecordingDeviceType: {
-
- /**
- * The recording device is unknown.
- */
- RECORDING_DEVICE_TYPE_UNSPECIFIED: 0,
-
- /**
- * Speech was recorded on a smartphone.
- */
- SMARTPHONE: 1,
-
- /**
- * Speech was recorded using a personal computer or tablet.
- */
- PC: 2,
-
- /**
- * Speech was recorded over a phone line.
- */
- PHONE_LINE: 3,
-
- /**
- * Speech was recorded in a vehicle.
- */
- VEHICLE: 4,
-
- /**
- * Speech was recorded outdoors.
- */
- OTHER_OUTDOOR_DEVICE: 5,
-
- /**
- * Speech was recorded indoors.
- */
- OTHER_INDOOR_DEVICE: 6
- }
-};
-
-/**
- * Provides "hints" to the speech recognizer to favor specific words and phrases
- * in the results.
- *
- * @property {string[]} phrases
- * A list of strings containing words and phrases "hints" so that
- * the speech recognition is more likely to recognize them. This can be used
- * to improve the accuracy for specific words and phrases, for example, if
- * specific commands are typically spoken by the user. This can also be used
- * to add additional words to the vocabulary of the recognizer. See
- * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
- *
- * List items can also be set to classes for groups of words that represent
- * common concepts that occur in natural language. For example, rather than
- * providing phrase hints for every month of the year, using the $MONTH class
- * improves the likelihood of correctly transcribing audio that includes
- * months.
- *
- * @typedef SpeechContext
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.SpeechContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const SpeechContext = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Contains audio data in the encoding specified in the `RecognitionConfig`.
- * Either `content` or `uri` must be supplied. Supplying both or neither
- * returns google.rpc.Code.INVALID_ARGUMENT. See
- * [content limits](https://cloud.google.com/speech-to-text/quotas#content).
- *
- * @property {Buffer} content
- * The audio data bytes encoded as specified in
- * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
- * pure binary representation, whereas JSON representations use base64.
- *
- * @property {string} uri
- * URI that points to a file that contains audio data bytes as specified in
- * `RecognitionConfig`. The file must not be compressed (for example, gzip).
- * Currently, only Google Cloud Storage URIs are
- * supported, which must be specified in the following format:
- * `gs://bucket_name/object_name` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- *
- * @typedef RecognitionAudio
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.RecognitionAudio definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const RecognitionAudio = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * The only message returned to the client by the `Recognize` method. It
- * contains the result as zero or more sequential `SpeechRecognitionResult`
- * messages.
- *
- * @property {Object[]} results
- * Sequential list of transcription results corresponding to
- * sequential portions of audio.
- *
- * This object should have the same structure as [SpeechRecognitionResult]{@link google.cloud.speech.v1.SpeechRecognitionResult}
- *
- * @typedef RecognizeResponse
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.RecognizeResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const RecognizeResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * The only message returned to the client by the `LongRunningRecognize` method.
- * It contains the result as zero or more sequential `SpeechRecognitionResult`
- * messages. It is included in the `result.response` field of the `Operation`
- * returned by the `GetOperation` call of the `google::longrunning::Operations`
- * service.
- *
- * @property {Object[]} results
- * Sequential list of transcription results corresponding to
- * sequential portions of audio.
- *
- * This object should have the same structure as [SpeechRecognitionResult]{@link google.cloud.speech.v1.SpeechRecognitionResult}
- *
- * @typedef LongRunningRecognizeResponse
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.LongRunningRecognizeResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const LongRunningRecognizeResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Describes the progress of a long-running `LongRunningRecognize` call. It is
- * included in the `metadata` field of the `Operation` returned by the
- * `GetOperation` call of the `google::longrunning::Operations` service.
- *
- * @property {number} progressPercent
- * Approximate percentage of audio processed thus far. Guaranteed to be 100
- * when the audio is fully processed and the results are available.
- *
- * @property {Object} startTime
- * Time when the request was received.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @property {Object} lastUpdateTime
- * Time of the most recent processing update.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @typedef LongRunningRecognizeMetadata
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.LongRunningRecognizeMetadata definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const LongRunningRecognizeMetadata = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * `StreamingRecognizeResponse` is the only message returned to the client by
- * `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
- * messages are streamed back to the client. If there is no recognizable
- * audio, and `single_utterance` is set to false, then no messages are streamed
- * back to the client.
- *
- * Here's an example of a series of ten `StreamingRecognizeResponse`s that might
- * be returned while processing audio:
- *
- * 1. results { alternatives { transcript: "tube" } stability: 0.01 }
- *
- * 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
- *
- * 3. results { alternatives { transcript: "to be" } stability: 0.9 }
- * results { alternatives { transcript: " or not to be" } stability: 0.01 }
- *
- * 4. results { alternatives { transcript: "to be or not to be"
- * confidence: 0.92 }
- * alternatives { transcript: "to bee or not to bee" }
- * is_final: true }
- *
- * 5. results { alternatives { transcript: " that's" } stability: 0.01 }
- *
- * 6. results { alternatives { transcript: " that is" } stability: 0.9 }
- * results { alternatives { transcript: " the question" } stability: 0.01 }
- *
- * 7. results { alternatives { transcript: " that is the question"
- * confidence: 0.98 }
- * alternatives { transcript: " that was the question" }
- * is_final: true }
- *
- * Notes:
- *
- * - Only two of the above responses #4 and #7 contain final results; they are
- * indicated by `is_final: true`. Concatenating these together generates the
- * full transcript: "to be or not to be that is the question".
- *
- * - The others contain interim `results`. #3 and #6 contain two interim
- * `results`: the first portion has a high stability and is less likely to
- * change; the second portion has a low stability and is very likely to
- * change. A UI designer might choose to show only high stability `results`.
- *
- * - The specific `stability` and `confidence` values shown above are only for
- * illustrative purposes. Actual values may vary.
- *
- * - In each response, only one of these fields will be set:
- * `error`,
- * `speech_event_type`, or
- * one or more (repeated) `results`.
- *
- * @property {Object} error
- * If set, returns a google.rpc.Status message that
- * specifies the error for the operation.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @property {Object[]} results
- * This repeated list contains zero or more results that
- * correspond to consecutive portions of the audio currently being processed.
- * It contains zero or one `is_final=true` result (the newly settled portion),
- * followed by zero or more `is_final=false` results (the interim results).
- *
- * This object should have the same structure as [StreamingRecognitionResult]{@link google.cloud.speech.v1.StreamingRecognitionResult}
- *
- * @property {number} speechEventType
- * Indicates the type of speech event.
- *
- * The number should be among the values of [SpeechEventType]{@link google.cloud.speech.v1.SpeechEventType}
- *
- * @typedef StreamingRecognizeResponse
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.StreamingRecognizeResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const StreamingRecognizeResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-
- /**
- * Indicates the type of speech event.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1
- */
- SpeechEventType: {
-
- /**
- * No speech event specified.
- */
- SPEECH_EVENT_UNSPECIFIED: 0,
-
- /**
- * This event indicates that the server has detected the end of the user's
- * speech utterance and expects no additional speech. Therefore, the server
- * will not process additional audio (although it may subsequently return
- * additional results). The client should stop sending additional audio
- * data, half-close the gRPC connection, and wait for any additional results
- * until the server closes the gRPC connection. This event is only sent if
- * `single_utterance` was set to `true`, and is not used otherwise.
- */
- END_OF_SINGLE_UTTERANCE: 1
- }
-};
-
-/**
- * A streaming speech recognition result corresponding to a portion of the audio
- * that is currently being processed.
- *
- * @property {Object[]} alternatives
- * May contain one or more recognition hypotheses (up to the
- * maximum specified in `max_alternatives`).
- * These alternatives are ordered in terms of accuracy, with the top (first)
- * alternative being the most probable, as ranked by the recognizer.
- *
- * This object should have the same structure as [SpeechRecognitionAlternative]{@link google.cloud.speech.v1.SpeechRecognitionAlternative}
- *
- * @property {boolean} isFinal
- * If `false`, this `StreamingRecognitionResult` represents an
- * interim result that may change. If `true`, this is the final time the
- * speech service will return this particular `StreamingRecognitionResult`,
- * the recognizer will not return any further hypotheses for this portion of
- * the transcript and corresponding audio.
- *
- * @property {number} stability
- * An estimate of the likelihood that the recognizer will not
- * change its guess about this interim result. Values range from 0.0
- * (completely unstable) to 1.0 (completely stable).
- * This field is only provided for interim results (`is_final=false`).
- * The default of 0.0 is a sentinel value indicating `stability` was not set.
- *
- * @property {Object} resultEndTime
- * Time offset of the end of this result relative to the
- * beginning of the audio.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} channelTag
- * For multi-channel audio, this is the channel number corresponding to the
- * recognized result for the audio from that channel.
- * For audio_channel_count = N, its output values can range from '1' to 'N'.
- *
- * @property {string} languageCode
- * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
- * the language in this result. This language code was detected to have the
- * most likelihood of being spoken in the audio.
- *
- * @typedef StreamingRecognitionResult
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.StreamingRecognitionResult definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const StreamingRecognitionResult = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A speech recognition result corresponding to a portion of the audio.
- *
- * @property {Object[]} alternatives
- * May contain one or more recognition hypotheses (up to the
- * maximum specified in `max_alternatives`).
- * These alternatives are ordered in terms of accuracy, with the top (first)
- * alternative being the most probable, as ranked by the recognizer.
- *
- * This object should have the same structure as [SpeechRecognitionAlternative]{@link google.cloud.speech.v1.SpeechRecognitionAlternative}
- *
- * @property {number} channelTag
- * For multi-channel audio, this is the channel number corresponding to the
- * recognized result for the audio from that channel.
- * For audio_channel_count = N, its output values can range from '1' to 'N'.
- *
- * @typedef SpeechRecognitionResult
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.SpeechRecognitionResult definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const SpeechRecognitionResult = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Alternative hypotheses (a.k.a. n-best list).
- *
- * @property {string} transcript
- * Transcript text representing the words that the user spoke.
- *
- * @property {number} confidence
- * The confidence estimate between 0.0 and 1.0. A higher number
- * indicates an estimated greater likelihood that the recognized words are
- * correct. This field is set only for the top alternative of a non-streaming
- * result or, of a streaming result where `is_final=true`.
- * This field is not guaranteed to be accurate and users should not rely on it
- * to be always provided.
- * The default of 0.0 is a sentinel value indicating `confidence` was not set.
- *
- * @property {Object[]} words
- * A list of word-specific information for each recognized word.
- * Note: When `enable_speaker_diarization` is true, you will see all the words
- * from the beginning of the audio.
- *
- * This object should have the same structure as [WordInfo]{@link google.cloud.speech.v1.WordInfo}
- *
- * @typedef SpeechRecognitionAlternative
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.SpeechRecognitionAlternative definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const SpeechRecognitionAlternative = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Word-specific information for recognized words.
- *
- * @property {Object} startTime
- * Time offset relative to the beginning of the audio,
- * and corresponding to the start of the spoken word.
- * This field is only set if `enable_word_time_offsets=true` and only
- * in the top hypothesis.
- * This is an experimental feature and the accuracy of the time offset can
- * vary.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object} endTime
- * Time offset relative to the beginning of the audio,
- * and corresponding to the end of the spoken word.
- * This field is only set if `enable_word_time_offsets=true` and only
- * in the top hypothesis.
- * This is an experimental feature and the accuracy of the time offset can
- * vary.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {string} word
- * The word corresponding to this set of information.
- *
- * @property {number} speakerTag
- * A distinct integer value is assigned for every speaker within
- * the audio. This field specifies which one of those speakers was detected to
- * have spoken this word. Value ranges from '1' to diarization_speaker_count.
- * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
- * top alternative.
- *
- * @typedef WordInfo
- * @memberof google.cloud.speech.v1
- * @see [google.cloud.speech.v1.WordInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto}
- */
-const WordInfo = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/src/v1/doc/google/longrunning/doc_operations.js b/src/v1/doc/google/longrunning/doc_operations.js
deleted file mode 100644
index 4719aebd..00000000
--- a/src/v1/doc/google/longrunning/doc_operations.js
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * This resource represents a long-running operation that is the result of a
- * network API call.
- *
- * @property {string} name
- * The server-assigned name, which is only unique within the same service that
- * originally returns it. If you use the default HTTP mapping, the
- * `name` should have the format of `operations/some/unique/name`.
- *
- * @property {Object} metadata
- * Service-specific metadata associated with the operation. It typically
- * contains progress information and common metadata such as create time.
- * Some services might not provide such metadata. Any method that returns a
- * long-running operation should document the metadata type, if any.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @property {boolean} done
- * If the value is `false`, it means the operation is still in progress.
- * If `true`, the operation is completed, and either `error` or `response` is
- * available.
- *
- * @property {Object} error
- * The error result of the operation in case of failure or cancellation.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @property {Object} response
- * The normal response of the operation in case of success. If the original
- * method returns no data on success, such as `Delete`, the response is
- * `google.protobuf.Empty`. If the original method is standard
- * `Get`/`Create`/`Update`, the response should be the resource. For other
- * methods, the response should have the type `XxxResponse`, where `Xxx`
- * is the original method name. For example, if the original method name
- * is `TakeSnapshot()`, the inferred response type is
- * `TakeSnapshotResponse`.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Operation
- * @memberof google.longrunning
- * @see [google.longrunning.Operation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto}
- */
-const Operation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/src/v1/doc/google/protobuf/doc_any.js b/src/v1/doc/google/protobuf/doc_any.js
deleted file mode 100644
index cdd2fc80..00000000
--- a/src/v1/doc/google/protobuf/doc_any.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * `Any` contains an arbitrary serialized protocol buffer message along with a
- * URL that describes the type of the serialized message.
- *
- * Protobuf library provides support to pack/unpack Any values in the form
- * of utility functions or additional generated methods of the Any type.
- *
- * Example 1: Pack and unpack a message in C++.
- *
- * Foo foo = ...;
- * Any any;
- * any.PackFrom(foo);
- * ...
- * if (any.UnpackTo(&foo)) {
- * ...
- * }
- *
- * Example 2: Pack and unpack a message in Java.
- *
- * Foo foo = ...;
- * Any any = Any.pack(foo);
- * ...
- * if (any.is(Foo.class)) {
- * foo = any.unpack(Foo.class);
- * }
- *
- * Example 3: Pack and unpack a message in Python.
- *
- * foo = Foo(...)
- * any = Any()
- * any.Pack(foo)
- * ...
- * if any.Is(Foo.DESCRIPTOR):
- * any.Unpack(foo)
- * ...
- *
- * Example 4: Pack and unpack a message in Go
- *
- * foo := &pb.Foo{...}
- * any, err := ptypes.MarshalAny(foo)
- * ...
- * foo := &pb.Foo{}
- * if err := ptypes.UnmarshalAny(any, foo); err != nil {
- * ...
- * }
- *
- * The pack methods provided by protobuf library will by default use
- * 'type.googleapis.com/full.type.name' as the type URL and the unpack
- * methods only use the fully qualified type name after the last '/'
- * in the type URL, for example "foo.bar.com/x/y.z" will yield type
- * name "y.z".
- *
- *
- * # JSON
- *
- * The JSON representation of an `Any` value uses the regular
- * representation of the deserialized, embedded message, with an
- * additional field `@type` which contains the type URL. Example:
- *
- * package google.profile;
- * message Person {
- * string first_name = 1;
- * string last_name = 2;
- * }
- *
- * {
- * "@type": "type.googleapis.com/google.profile.Person",
- * "firstName": ,
- * "lastName":
- * }
- *
- * If the embedded message type is well-known and has a custom JSON
- * representation, that representation will be embedded adding a field
- * `value` which holds the custom JSON in addition to the `@type`
- * field. Example (for message google.protobuf.Duration):
- *
- * {
- * "@type": "type.googleapis.com/google.protobuf.Duration",
- * "value": "1.212s"
- * }
- *
- * @property {string} typeUrl
- * A URL/resource name that uniquely identifies the type of the serialized
- * protocol buffer message. This string must contain at least
- * one "/" character. The last segment of the URL's path must represent
- * the fully qualified name of the type (as in
- * `path/google.protobuf.Duration`). The name should be in a canonical form
- * (e.g., leading "." is not accepted).
- *
- * In practice, teams usually precompile into the binary all types that they
- * expect it to use in the context of Any. However, for URLs which use the
- * scheme `http`, `https`, or no scheme, one can optionally set up a type
- * server that maps type URLs to message definitions as follows:
- *
- * * If no scheme is provided, `https` is assumed.
- * * An HTTP GET on the URL must yield a google.protobuf.Type
- * value in binary format, or produce an error.
- * * Applications are allowed to cache lookup results based on the
- * URL, or have them precompiled into a binary to avoid any
- * lookup. Therefore, binary compatibility needs to be preserved
- * on changes to types. (Use versioned type names to manage
- * breaking changes.)
- *
- * Note: this functionality is not currently available in the official
- * protobuf release, and it is not used for type URLs beginning with
- * type.googleapis.com.
- *
- * Schemes other than `http`, `https` (or the empty scheme) might be
- * used with implementation specific semantics.
- *
- * @property {Buffer} value
- * Must be a valid serialized protocol buffer of the above specified type.
- *
- * @typedef Any
- * @memberof google.protobuf
- * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto}
- */
-const Any = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/src/v1/doc/google/protobuf/doc_duration.js b/src/v1/doc/google/protobuf/doc_duration.js
deleted file mode 100644
index 1275f8f4..00000000
--- a/src/v1/doc/google/protobuf/doc_duration.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * A Duration represents a signed, fixed-length span of time represented
- * as a count of seconds and fractions of seconds at nanosecond
- * resolution. It is independent of any calendar and concepts like "day"
- * or "month". It is related to Timestamp in that the difference between
- * two Timestamp values is a Duration and it can be added or subtracted
- * from a Timestamp. Range is approximately +-10,000 years.
- *
- * # Examples
- *
- * Example 1: Compute Duration from two Timestamps in pseudo code.
- *
- * Timestamp start = ...;
- * Timestamp end = ...;
- * Duration duration = ...;
- *
- * duration.seconds = end.seconds - start.seconds;
- * duration.nanos = end.nanos - start.nanos;
- *
- * if (duration.seconds < 0 && duration.nanos > 0) {
- * duration.seconds += 1;
- * duration.nanos -= 1000000000;
- * } else if (durations.seconds > 0 && duration.nanos < 0) {
- * duration.seconds -= 1;
- * duration.nanos += 1000000000;
- * }
- *
- * Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
- *
- * Timestamp start = ...;
- * Duration duration = ...;
- * Timestamp end = ...;
- *
- * end.seconds = start.seconds + duration.seconds;
- * end.nanos = start.nanos + duration.nanos;
- *
- * if (end.nanos < 0) {
- * end.seconds -= 1;
- * end.nanos += 1000000000;
- * } else if (end.nanos >= 1000000000) {
- * end.seconds += 1;
- * end.nanos -= 1000000000;
- * }
- *
- * Example 3: Compute Duration from datetime.timedelta in Python.
- *
- * td = datetime.timedelta(days=3, minutes=10)
- * duration = Duration()
- * duration.FromTimedelta(td)
- *
- * # JSON Mapping
- *
- * In JSON format, the Duration type is encoded as a string rather than an
- * object, where the string ends in the suffix "s" (indicating seconds) and
- * is preceded by the number of seconds, with nanoseconds expressed as
- * fractional seconds. For example, 3 seconds with 0 nanoseconds should be
- * encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
- * be expressed in JSON format as "3.000000001s", and 3 seconds and 1
- * microsecond should be expressed in JSON format as "3.000001s".
- *
- * @property {number} seconds
- * Signed seconds of the span of time. Must be from -315,576,000,000
- * to +315,576,000,000 inclusive. Note: these bounds are computed from:
- * 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- *
- * @property {number} nanos
- * Signed fractions of a second at nanosecond resolution of the span
- * of time. Durations less than one second are represented with a 0
- * `seconds` field and a positive or negative `nanos` field. For durations
- * of one second or more, a non-zero value for the `nanos` field must be
- * of the same sign as the `seconds` field. Must be from -999,999,999
- * to +999,999,999 inclusive.
- *
- * @typedef Duration
- * @memberof google.protobuf
- * @see [google.protobuf.Duration definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/duration.proto}
- */
-const Duration = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/src/v1/doc/google/rpc/doc_status.js b/src/v1/doc/google/rpc/doc_status.js
deleted file mode 100644
index 432ab6bb..00000000
--- a/src/v1/doc/google/rpc/doc_status.js
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * The `Status` type defines a logical error model that is suitable for
- * different programming environments, including REST APIs and RPC APIs. It is
- * used by [gRPC](https://github.com/grpc). The error model is designed to be:
- *
- * - Simple to use and understand for most users
- * - Flexible enough to meet unexpected needs
- *
- * # Overview
- *
- * The `Status` message contains three pieces of data: error code, error
- * message, and error details. The error code should be an enum value of
- * google.rpc.Code, but it may accept additional error codes
- * if needed. The error message should be a developer-facing English message
- * that helps developers *understand* and *resolve* the error. If a localized
- * user-facing error message is needed, put the localized message in the error
- * details or localize it in the client. The optional error details may contain
- * arbitrary information about the error. There is a predefined set of error
- * detail types in the package `google.rpc` that can be used for common error
- * conditions.
- *
- * # Language mapping
- *
- * The `Status` message is the logical representation of the error model, but it
- * is not necessarily the actual wire format. When the `Status` message is
- * exposed in different client libraries and different wire protocols, it can be
- * mapped differently. For example, it will likely be mapped to some exceptions
- * in Java, but more likely mapped to some error codes in C.
- *
- * # Other uses
- *
- * The error model and the `Status` message can be used in a variety of
- * environments, either with or without APIs, to provide a
- * consistent developer experience across different environments.
- *
- * Example uses of this error model include:
- *
- * - Partial errors. If a service needs to return partial errors to the client,
- * it may embed the `Status` in the normal response to indicate the partial
- * errors.
- *
- * - Workflow errors. A typical workflow has multiple steps. Each step may
- * have a `Status` message for error reporting.
- *
- * - Batch operations. If a client uses batch request and batch response, the
- * `Status` message should be used directly inside batch response, one for
- * each error sub-response.
- *
- * - Asynchronous operations. If an API call embeds asynchronous operation
- * results in its response, the status of those operations should be
- * represented directly using the `Status` message.
- *
- * - Logging. If some API errors are stored in logs, the message `Status` could
- * be used directly after any stripping needed for security/privacy reasons.
- *
- * @property {number} code
- * The status code, which should be an enum value of
- * google.rpc.Code.
- *
- * @property {string} message
- * A developer-facing error message, which should be in English. Any
- * user-facing error message should be localized and sent in the
- * google.rpc.Status.details field, or localized
- * by the client.
- *
- * @property {Object[]} details
- * A list of messages that carry the error details. There is a common set of
- * message types for APIs to use.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Status
- * @memberof google.rpc
- * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto}
- */
-const Status = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/src/v1/index.js b/src/v1/index.ts
similarity index 70%
rename from src/v1/index.js
rename to src/v1/index.ts
index 93d34738..28f7d784 100644
--- a/src/v1/index.js
+++ b/src/v1/index.ts
@@ -11,9 +11,9 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-const SpeechClient = require('./speech_client');
-
-module.exports.SpeechClient = SpeechClient;
+export {SpeechClient} from './speech_client';
diff --git a/src/v1/speech_client.js b/src/v1/speech_client.ts
similarity index 51%
rename from src/v1/speech_client.js
rename to src/v1/speech_client.ts
index 19b45508..016c4b32 100644
--- a/src/v1/speech_client.js
+++ b/src/v1/speech_client.ts
@@ -11,22 +11,39 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-
-'use strict';
-
-const gapicConfig = require('./speech_client_config.json');
-const gax = require('google-gax');
-const path = require('path');
-
-const VERSION = require('../../package.json').version;
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+import * as gax from 'google-gax';
+import {
+ APICallback,
+ Callback,
+ CallOptions,
+ Descriptors,
+ ClientOptions,
+ LROperation,
+} from 'google-gax';
+import * as path from 'path';
+
+import * as protosTypes from '../../protos/protos';
+import * as gapicConfig from './speech_client_config.json';
+
+const version = require('../../../package.json').version;
/**
- * Service that implements Google Cloud Speech API.
- *
+ * Service that implements Google Cloud Speech API.
* @class
* @memberof v1
*/
-class SpeechClient {
+export class SpeechClient {
+ private _descriptors: Descriptors = {page: {}, stream: {}, longrunning: {}};
+ private _speechStub: Promise<{[name: string]: Function}>;
+ private _innerApiCalls: {[name: string]: Function};
+ private _terminated = false;
+ auth: gax.GoogleAuth;
+
/**
* Construct an instance of SpeechClient.
*
@@ -54,58 +71,55 @@ class SpeechClient {
* @param {string} [options.apiEndpoint] - The domain name of the
* API remote host.
*/
- constructor(opts) {
- opts = opts || {};
- this._descriptors = {};
- if (global.isBrowser) {
- // If we're in browser, we use gRPC fallback.
- opts.fallback = true;
+ constructor(opts?: ClientOptions) {
+ // Ensure that options include the service address and port.
+ const staticMembers = this.constructor as typeof SpeechClient;
+ const servicePath =
+ opts && opts.servicePath
+ ? opts.servicePath
+ : opts && opts.apiEndpoint
+ ? opts.apiEndpoint
+ : staticMembers.servicePath;
+ const port = opts && opts.port ? opts.port : staticMembers.port;
+
+ if (!opts) {
+ opts = {servicePath, port};
}
+ opts.servicePath = opts.servicePath || servicePath;
+ opts.port = opts.port || port;
+ opts.clientConfig = opts.clientConfig || {};
+ const isBrowser = typeof window !== 'undefined';
+ if (isBrowser) {
+ opts.fallback = true;
+ }
// If we are in browser, we are already using fallback because of the
// "browser" field in package.json.
// But if we were explicitly requested to use fallback, let's do it now.
- const gaxModule = !global.isBrowser && opts.fallback ? gax.fallback : gax;
-
- const servicePath =
- opts.servicePath || opts.apiEndpoint || this.constructor.servicePath;
-
- // Ensure that options include the service address and port.
- opts = Object.assign(
- {
- clientConfig: {},
- port: this.constructor.port,
- servicePath,
- },
- opts
- );
+ const gaxModule = !isBrowser && opts.fallback ? gax.fallback : gax;
// Create a `gaxGrpc` object, with any grpc-specific options
// sent to the client.
- opts.scopes = this.constructor.scopes;
+ opts.scopes = (this.constructor as typeof SpeechClient).scopes;
const gaxGrpc = new gaxModule.GrpcClient(opts);
// Save the auth object to the client, for use by other methods.
- this.auth = gaxGrpc.auth;
+ this.auth = gaxGrpc.auth as gax.GoogleAuth;
// Determine the client header string.
- const clientHeader = [];
-
+ const clientHeader = [`gax/${gaxModule.version}`, `gapic/${version}`];
if (typeof process !== 'undefined' && 'versions' in process) {
clientHeader.push(`gl-node/${process.versions.node}`);
- }
- clientHeader.push(`gax/${gaxModule.version}`);
- if (opts.fallback) {
- clientHeader.push(`gl-web/${gaxModule.version}`);
} else {
+ clientHeader.push(`gl-web/${gaxModule.version}`);
+ }
+ if (!opts.fallback) {
clientHeader.push(`grpc/${gaxGrpc.grpcVersion}`);
}
- clientHeader.push(`gapic/${VERSION}`);
if (opts.libName && opts.libVersion) {
clientHeader.push(`${opts.libName}/${opts.libVersion}`);
}
-
// Load the applicable protos.
// For Node.js, pass the path to JSON proto file.
// For browsers, pass the JSON content.
@@ -129,28 +143,29 @@ class SpeechClient {
),
};
- const protoFilesRoot = opts.fallback
- ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
- : gaxModule.protobuf.loadSync(nodejsProtoPath);
-
// This API contains "long-running operations", which return a
// an Operation object that allows for tracking of the operation,
// rather than holding a request open.
- this.operationsClient = new gaxModule.lro({
- auth: gaxGrpc.auth,
- grpc: gaxGrpc.grpc,
- }).operationsClient(opts);
+ const protoFilesRoot = opts.fallback
+ ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
+ : gaxModule.protobuf.loadSync(nodejsProtoPath);
+ const operationsClient = gaxModule
+ .lro({
+ auth: this.auth,
+ grpc: 'grpc' in gaxGrpc ? gaxGrpc.grpc : undefined,
+ })
+ .operationsClient(opts);
const longRunningRecognizeResponse = protoFilesRoot.lookup(
- 'google.cloud.speech.v1.LongRunningRecognizeResponse'
- );
+ '.google.cloud.speech.v1.LongRunningRecognizeResponse'
+ ) as gax.protobuf.Type;
const longRunningRecognizeMetadata = protoFilesRoot.lookup(
- 'google.cloud.speech.v1.LongRunningRecognizeMetadata'
- );
+ '.google.cloud.speech.v1.LongRunningRecognizeMetadata'
+ ) as gax.protobuf.Type;
this._descriptors.longrunning = {
longRunningRecognize: new gaxModule.LongrunningDescriptor(
- this.operationsClient,
+ operationsClient,
longRunningRecognizeResponse.decode.bind(longRunningRecognizeResponse),
longRunningRecognizeMetadata.decode.bind(longRunningRecognizeMetadata)
),
@@ -159,8 +174,8 @@ class SpeechClient {
// Put together the default options sent with requests.
const defaults = gaxGrpc.constructSettings(
'google.cloud.speech.v1.Speech',
- gapicConfig,
- opts.clientConfig,
+ gapicConfig as gax.ClientConfig,
+ opts.clientConfig || {},
{'x-goog-api-client': clientHeader.join(' ')}
);
@@ -171,12 +186,15 @@ class SpeechClient {
// Put together the "service stub" for
// google.cloud.speech.v1.Speech.
- const speechStub = gaxGrpc.createStub(
+ this._speechStub = gaxGrpc.createStub(
opts.fallback
- ? protos.lookupService('google.cloud.speech.v1.Speech')
- : protos.google.cloud.speech.v1.Speech,
+ ? (protos as protobuf.Root).lookupService(
+ 'google.cloud.speech.v1.Speech'
+ )
+ : // tslint:disable-next-line no-any
+ (protos as any).google.cloud.speech.v1.Speech,
opts
- );
+ ) as Promise<{[method: string]: Function}>;
// Iterate over each of the methods that the service provides
// and create an API call method for each.
@@ -185,21 +203,35 @@ class SpeechClient {
'longRunningRecognize',
'streamingRecognize',
];
+
for (const methodName of speechStubMethods) {
- const innerCallPromise = speechStub.then(
- stub => (...args) => {
+ const innerCallPromise = this._speechStub.then(
+ stub => (...args: Array<{}>) => {
return stub[methodName].apply(stub, args);
},
- err => () => {
+ (err: Error | null | undefined) => () => {
throw err;
}
);
- this._innerApiCalls[methodName] = gaxModule.createApiCall(
+
+ const apiCall = gaxModule.createApiCall(
innerCallPromise,
defaults[methodName],
- this._descriptors.stream[methodName] ||
+ this._descriptors.page[methodName] ||
+ this._descriptors.stream[methodName] ||
this._descriptors.longrunning[methodName]
);
+
+ this._innerApiCalls[methodName] = (
+ argument: {},
+ callOptions?: CallOptions,
+ callback?: APICallback
+ ) => {
+ if (this._terminated) {
+ return Promise.reject('The client has already been closed.');
+ }
+ return apiCall(argument, callOptions, callback);
+ };
}
}
@@ -233,89 +265,136 @@ class SpeechClient {
return ['https://www.googleapis.com/auth/cloud-platform'];
}
+ getProjectId(): Promise;
+ getProjectId(callback: Callback): void;
/**
* Return the project ID used by this class.
* @param {function(Error, string)} callback - the callback to
* be called with the current project Id.
*/
- getProjectId(callback) {
- return this.auth.getProjectId(callback);
+ getProjectId(
+ callback?: Callback
+ ): Promise | void {
+ if (callback) {
+ this.auth.getProjectId(callback);
+ return;
+ }
+ return this.auth.getProjectId();
}
// -------------------
// -- Service calls --
// -------------------
-
+ recognize(
+ request: protosTypes.google.cloud.speech.v1.IRecognizeRequest,
+ options?: gax.CallOptions
+ ): Promise<
+ [
+ protosTypes.google.cloud.speech.v1.IRecognizeResponse,
+ protosTypes.google.cloud.speech.v1.IRecognizeRequest | undefined,
+ {} | undefined
+ ]
+ >;
+ recognize(
+ request: protosTypes.google.cloud.speech.v1.IRecognizeRequest,
+ options: gax.CallOptions,
+ callback: Callback<
+ protosTypes.google.cloud.speech.v1.IRecognizeResponse,
+ protosTypes.google.cloud.speech.v1.IRecognizeRequest | undefined,
+ {} | undefined
+ >
+ ): void;
/**
* Performs synchronous speech recognition: receive results after all audio
* has been sent and processed.
*
* @param {Object} request
* The request object that will be sent.
- * @param {Object} request.config
+ * @param {google.cloud.speech.v1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
- *
- * This object should have the same structure as [RecognitionConfig]{@link google.cloud.speech.v1.RecognitionConfig}
- * @param {Object} request.audio
+ * @param {google.cloud.speech.v1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
- *
- * This object should have the same structure as [RecognitionAudio]{@link google.cloud.speech.v1.RecognitionAudio}
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @param {function(?Error, ?Object)} [callback]
- * The function which will be called with the result of the API call.
- *
- * The second parameter to the callback is an object representing [RecognizeResponse]{@link google.cloud.speech.v1.RecognizeResponse}.
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [RecognizeResponse]{@link google.cloud.speech.v1.RecognizeResponse}.
* The promise has a method named "cancel" which cancels the ongoing API call.
- *
- * @example
- *
- * const speech = require('@google-cloud/speech');
- *
- * const client = new speech.v1.SpeechClient({
- * // optional auth parameters.
- * });
- *
- * const encoding = 'FLAC';
- * const sampleRateHertz = 44100;
- * const languageCode = 'en-US';
- * const config = {
- * encoding: encoding,
- * sampleRateHertz: sampleRateHertz,
- * languageCode: languageCode,
- * };
- * const uri = 'gs://bucket_name/file_name.flac';
- * const audio = {
- * uri: uri,
- * };
- * const request = {
- * config: config,
- * audio: audio,
- * };
- * client.recognize(request)
- * .then(responses => {
- * const response = responses[0];
- * // doThingsWith(response)
- * })
- * .catch(err => {
- * console.error(err);
- * });
*/
- recognize(request, options, callback) {
- if (options instanceof Function && callback === undefined) {
- callback = options;
+ recognize(
+ request: protosTypes.google.cloud.speech.v1.IRecognizeRequest,
+ optionsOrCallback?:
+ | gax.CallOptions
+ | Callback<
+ protosTypes.google.cloud.speech.v1.IRecognizeResponse,
+ protosTypes.google.cloud.speech.v1.IRecognizeRequest | undefined,
+ {} | undefined
+ >,
+ callback?: Callback<
+ protosTypes.google.cloud.speech.v1.IRecognizeResponse,
+ protosTypes.google.cloud.speech.v1.IRecognizeRequest | undefined,
+ {} | undefined
+ >
+ ): Promise<
+ [
+ protosTypes.google.cloud.speech.v1.IRecognizeResponse,
+ protosTypes.google.cloud.speech.v1.IRecognizeRequest | undefined,
+ {} | undefined
+ ]
+ > | void {
+ request = request || {};
+ let options: gax.CallOptions;
+ if (typeof optionsOrCallback === 'function' && callback === undefined) {
+ callback = optionsOrCallback;
options = {};
+ } else {
+ options = optionsOrCallback as gax.CallOptions;
}
- request = request || {};
options = options || {};
-
return this._innerApiCalls.recognize(request, options, callback);
}
+ /**
+ * Performs bidirectional streaming speech recognition: receive results while
+ * sending audio. This method is only available via the gRPC API (not REST).
+ *
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
+ * @returns {Stream}
+ * An object stream which is both readable and writable. It accepts objects
+ * representing [StreamingRecognizeRequest]{@link google.cloud.speech.v1.StreamingRecognizeRequest} for write() method, and
+ * will emit objects representing [StreamingRecognizeResponse]{@link google.cloud.speech.v1.StreamingRecognizeResponse} on 'data' event asynchronously.
+ */
+ _streamingRecognize(options?: gax.CallOptions): gax.CancellableStream {
+ options = options || {};
+ return this._innerApiCalls.streamingRecognize(options);
+ }
+
+ longRunningRecognize(
+ request: protosTypes.google.cloud.speech.v1.ILongRunningRecognizeRequest,
+ options?: gax.CallOptions
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.speech.v1.ILongRunningRecognizeResponse,
+ protosTypes.google.cloud.speech.v1.ILongRunningRecognizeMetadata
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ >;
+ longRunningRecognize(
+ request: protosTypes.google.cloud.speech.v1.ILongRunningRecognizeRequest,
+ options: gax.CallOptions,
+ callback: Callback<
+ LROperation<
+ protosTypes.google.cloud.speech.v1.ILongRunningRecognizeResponse,
+ protosTypes.google.cloud.speech.v1.ILongRunningRecognizeMetadata
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): void;
/**
* Performs asynchronous speech recognition: receive results via the
* google.longrunning.Operations interface. Returns either an
@@ -326,176 +405,74 @@ class SpeechClient {
*
* @param {Object} request
* The request object that will be sent.
- * @param {Object} request.config
+ * @param {google.cloud.speech.v1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
- *
- * This object should have the same structure as [RecognitionConfig]{@link google.cloud.speech.v1.RecognitionConfig}
- * @param {Object} request.audio
+ * @param {google.cloud.speech.v1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
- *
- * This object should have the same structure as [RecognitionAudio]{@link google.cloud.speech.v1.RecognitionAudio}
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @param {function(?Error, ?Object)} [callback]
- * The function which will be called with the result of the API call.
- *
- * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
- * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * The first element of the array is an object representing [Operation]{@link google.longrunning.Operation}.
* The promise has a method named "cancel" which cancels the ongoing API call.
- *
- * @example
- *
- * const speech = require('@google-cloud/speech');
- *
- * const client = new speech.v1.SpeechClient({
- * // optional auth parameters.
- * });
- *
- * const encoding = 'FLAC';
- * const sampleRateHertz = 44100;
- * const languageCode = 'en-US';
- * const config = {
- * encoding: encoding,
- * sampleRateHertz: sampleRateHertz,
- * languageCode: languageCode,
- * };
- * const uri = 'gs://bucket_name/file_name.flac';
- * const audio = {
- * uri: uri,
- * };
- * const request = {
- * config: config,
- * audio: audio,
- * };
- *
- * // Handle the operation using the promise pattern.
- * client.longRunningRecognize(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Operation#promise starts polling for the completion of the LRO.
- * return operation.promise();
- * })
- * .then(responses => {
- * const result = responses[0];
- * const metadata = responses[1];
- * const finalApiResponse = responses[2];
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const encoding = 'FLAC';
- * const sampleRateHertz = 44100;
- * const languageCode = 'en-US';
- * const config = {
- * encoding: encoding,
- * sampleRateHertz: sampleRateHertz,
- * languageCode: languageCode,
- * };
- * const uri = 'gs://bucket_name/file_name.flac';
- * const audio = {
- * uri: uri,
- * };
- * const request = {
- * config: config,
- * audio: audio,
- * };
- *
- * // Handle the operation using the event emitter pattern.
- * client.longRunningRecognize(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Adding a listener for the "complete" event starts polling for the
- * // completion of the operation.
- * operation.on('complete', (result, metadata, finalApiResponse) => {
- * // doSomethingWith(result);
- * });
- *
- * // Adding a listener for the "progress" event causes the callback to be
- * // called on any change in metadata when the operation is polled.
- * operation.on('progress', (metadata, apiResponse) => {
- * // doSomethingWith(metadata)
- * });
- *
- * // Adding a listener for the "error" event handles any errors found during polling.
- * operation.on('error', err => {
- * // throw(err);
- * });
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const encoding = 'FLAC';
- * const sampleRateHertz = 44100;
- * const languageCode = 'en-US';
- * const config = {
- * encoding: encoding,
- * sampleRateHertz: sampleRateHertz,
- * languageCode: languageCode,
- * };
- * const uri = 'gs://bucket_name/file_name.flac';
- * const audio = {
- * uri: uri,
- * };
- * const request = {
- * config: config,
- * audio: audio,
- * };
- *
- * // Handle the operation using the await pattern.
- * const [operation] = await client.longRunningRecognize(request);
- *
- * const [response] = await operation.promise();
*/
- longRunningRecognize(request, options, callback) {
- if (options instanceof Function && callback === undefined) {
- callback = options;
+ longRunningRecognize(
+ request: protosTypes.google.cloud.speech.v1.ILongRunningRecognizeRequest,
+ optionsOrCallback?:
+ | gax.CallOptions
+ | Callback<
+ LROperation<
+ protosTypes.google.cloud.speech.v1.ILongRunningRecognizeResponse,
+ protosTypes.google.cloud.speech.v1.ILongRunningRecognizeMetadata
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >,
+ callback?: Callback<
+ LROperation<
+ protosTypes.google.cloud.speech.v1.ILongRunningRecognizeResponse,
+ protosTypes.google.cloud.speech.v1.ILongRunningRecognizeMetadata
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.speech.v1.ILongRunningRecognizeResponse,
+ protosTypes.google.cloud.speech.v1.ILongRunningRecognizeMetadata
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ > | void {
+ request = request || {};
+ let options: gax.CallOptions;
+ if (typeof optionsOrCallback === 'function' && callback === undefined) {
+ callback = optionsOrCallback;
options = {};
+ } else {
+ options = optionsOrCallback as gax.CallOptions;
}
- request = request || {};
options = options || {};
-
return this._innerApiCalls.longRunningRecognize(request, options, callback);
}
/**
- * Performs bidirectional streaming speech recognition: receive results while
- * sending audio. This method is only available via the gRPC API (not REST).
+ * Terminate the GRPC channel and close the client.
*
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @returns {Stream}
- * An object stream which is both readable and writable. It accepts objects
- * representing [StreamingRecognizeRequest]{@link google.cloud.speech.v1.StreamingRecognizeRequest} for write() method, and
- * will emit objects representing [StreamingRecognizeResponse]{@link google.cloud.speech.v1.StreamingRecognizeResponse} on 'data' event asynchronously.
- *
- * @example
- *
- * const speech = require('@google-cloud/speech');
- *
- * const client = new speech.v1.SpeechClient({
- * // optional auth parameters.
- * });
- *
- * const stream = client.streamingRecognize().on('data', response => {
- * // doThingsWith(response)
- * });
- * const request = {};
- * // Write request objects.
- * stream.write(request);
+ * The client will no longer be usable and all future behavior is undefined.
*/
- streamingRecognize(options) {
- options = options || {};
-
- return this._innerApiCalls.streamingRecognize(options);
+ close(): Promise {
+ if (!this._terminated) {
+ return this._speechStub.then(stub => {
+ this._terminated = true;
+ stub.close();
+ });
+ }
+ return Promise.resolve();
}
}
-module.exports = SpeechClient;
+import {ImprovedStreamingClient} from '../helpers';
+export interface SpeechClient extends ImprovedStreamingClient {}
diff --git a/src/v1/speech_client_config.json b/src/v1/speech_client_config.json
index 87cee147..d38ce260 100644
--- a/src/v1/speech_client_config.json
+++ b/src/v1/speech_client_config.json
@@ -2,11 +2,11 @@
"interfaces": {
"google.cloud.speech.v1.Speech": {
"retry_codes": {
+ "non_idempotent": [],
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
- ],
- "non_idempotent": []
+ ]
},
"retry_params": {
"default": {
@@ -14,25 +14,25 @@
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
- "rpc_timeout_multiplier": 1.0,
+ "rpc_timeout_multiplier": 1,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000
}
},
"methods": {
"Recognize": {
- "timeout_millis": 60000,
- "retry_codes_name": "non_idempotent",
+ "timeout_millis": 5000000,
+ "retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"LongRunningRecognize": {
- "timeout_millis": 60000,
+ "timeout_millis": 5000000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"StreamingRecognize": {
- "timeout_millis": 60000,
- "retry_codes_name": "non_idempotent",
+ "timeout_millis": 5000000,
+ "retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
diff --git a/src/v1p1beta1/doc/google/cloud/speech/v1p1beta1/doc_cloud_speech.js b/src/v1p1beta1/doc/google/cloud/speech/v1p1beta1/doc_cloud_speech.js
deleted file mode 100644
index c8bd5f04..00000000
--- a/src/v1p1beta1/doc/google/cloud/speech/v1p1beta1/doc_cloud_speech.js
+++ /dev/null
@@ -1,1046 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * The top-level message sent by the client for the `Recognize` method.
- *
- * @property {Object} config
- * Required. Provides information to the recognizer that specifies how to
- * process the request.
- *
- * This object should have the same structure as [RecognitionConfig]{@link google.cloud.speech.v1p1beta1.RecognitionConfig}
- *
- * @property {Object} audio
- * Required. The audio data to be recognized.
- *
- * This object should have the same structure as [RecognitionAudio]{@link google.cloud.speech.v1p1beta1.RecognitionAudio}
- *
- * @typedef RecognizeRequest
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.RecognizeRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const RecognizeRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * The top-level message sent by the client for the `LongRunningRecognize`
- * method.
- *
- * @property {Object} config
- * Required. Provides information to the recognizer that specifies how to
- * process the request.
- *
- * This object should have the same structure as [RecognitionConfig]{@link google.cloud.speech.v1p1beta1.RecognitionConfig}
- *
- * @property {Object} audio
- * Required. The audio data to be recognized.
- *
- * This object should have the same structure as [RecognitionAudio]{@link google.cloud.speech.v1p1beta1.RecognitionAudio}
- *
- * @typedef LongRunningRecognizeRequest
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const LongRunningRecognizeRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * The top-level message sent by the client for the `StreamingRecognize` method.
- * Multiple `StreamingRecognizeRequest` messages are sent. The first message
- * must contain a `streaming_config` message and must not contain
- * `audio_content`. All subsequent messages must contain `audio_content` and
- * must not contain a `streaming_config` message.
- *
- * @property {Object} streamingConfig
- * Provides information to the recognizer that specifies how to process the
- * request. The first `StreamingRecognizeRequest` message must contain a
- * `streaming_config` message.
- *
- * This object should have the same structure as [StreamingRecognitionConfig]{@link google.cloud.speech.v1p1beta1.StreamingRecognitionConfig}
- *
- * @property {Buffer} audioContent
- * The audio data to be recognized. Sequential chunks of audio data are sent
- * in sequential `StreamingRecognizeRequest` messages. The first
- * `StreamingRecognizeRequest` message must not contain `audio_content` data
- * and all subsequent `StreamingRecognizeRequest` messages must contain
- * `audio_content` data. The audio bytes must be encoded as specified in
- * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
- * pure binary representation (not base64). See
- * [content limits](https://cloud.google.com/speech-to-text/quotas#content).
- *
- * @typedef StreamingRecognizeRequest
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.StreamingRecognizeRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const StreamingRecognizeRequest = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Provides information to the recognizer that specifies how to process the
- * request.
- *
- * @property {Object} config
- * Required. Provides information to the recognizer that specifies how to
- * process the request.
- *
- * This object should have the same structure as [RecognitionConfig]{@link google.cloud.speech.v1p1beta1.RecognitionConfig}
- *
- * @property {boolean} singleUtterance
- * If `false` or omitted, the recognizer will perform continuous
- * recognition (continuing to wait for and process audio even if the user
- * pauses speaking) until the client closes the input stream (gRPC API) or
- * until the maximum time limit has been reached. May return multiple
- * `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
- *
- * If `true`, the recognizer will detect a single spoken utterance. When it
- * detects that the user has paused or stopped speaking, it will return an
- * `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
- * more than one `StreamingRecognitionResult` with the `is_final` flag set to
- * `true`.
- *
- * @property {boolean} interimResults
- * If `true`, interim results (tentative hypotheses) may be
- * returned as they become available (these interim results are indicated with
- * the `is_final=false` flag).
- * If `false` or omitted, only `is_final=true` result(s) are returned.
- *
- * @typedef StreamingRecognitionConfig
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.StreamingRecognitionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const StreamingRecognitionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Provides information to the recognizer that specifies how to process the
- * request.
- *
- * @property {number} encoding
- * Encoding of audio data sent in all `RecognitionAudio` messages.
- * This field is optional for `FLAC` and `WAV` audio files and required
- * for all other audio formats. For details, see AudioEncoding.
- *
- * The number should be among the values of [AudioEncoding]{@link google.cloud.speech.v1p1beta1.AudioEncoding}
- *
- * @property {number} sampleRateHertz
- * Sample rate in Hertz of the audio data sent in all
- * `RecognitionAudio` messages. Valid values are: 8000-48000.
- * 16000 is optimal. For best results, set the sampling rate of the audio
- * source to 16000 Hz. If that's not possible, use the native sample rate of
- * the audio source (instead of re-sampling).
- * This field is optional for FLAC and WAV audio files, but is
- * required for all other audio formats. For details, see AudioEncoding.
- *
- * @property {number} audioChannelCount
- * The number of channels in the input audio data.
- * ONLY set this for MULTI-CHANNEL recognition.
- * Valid values for LINEAR16 and FLAC are `1`-`8`.
- * Valid values for OGG_OPUS are '1'-'254'.
- * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
- * If `0` or omitted, defaults to one channel (mono).
- * Note: We only recognize the first channel by default.
- * To perform independent recognition on each channel set
- * `enable_separate_recognition_per_channel` to 'true'.
- *
- * @property {boolean} enableSeparateRecognitionPerChannel
- * This needs to be set to `true` explicitly and `audio_channel_count` > 1
- * to get each channel recognized separately. The recognition result will
- * contain a `channel_tag` field to state which channel that result belongs
- * to. If this is not true, we will only recognize the first channel. The
- * request is billed cumulatively for all channels recognized:
- * `audio_channel_count` multiplied by the length of the audio.
- *
- * @property {string} languageCode
- * Required. The language of the supplied audio as a
- * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
- * Example: "en-US".
- * See [Language
- * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
- * of the currently supported language codes.
- *
- * @property {string[]} alternativeLanguageCodes
- * A list of up to 3 additional
- * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
- * listing possible alternative languages of the supplied audio.
- * See [Language
- * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
- * of the currently supported language codes. If alternative languages are
- * listed, recognition result will contain recognition in the most likely
- * language detected including the main language_code. The recognition result
- * will include the language tag of the language detected in the audio. Note:
- * This feature is only supported for Voice Command and Voice Search use cases
- * and performance may vary for other use cases (e.g., phone call
- * transcription).
- *
- * @property {number} maxAlternatives
- * Maximum number of recognition hypotheses to be returned.
- * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
- * within each `SpeechRecognitionResult`.
- * The server may return fewer than `max_alternatives`.
- * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
- * one. If omitted, will return a maximum of one.
- *
- * @property {boolean} profanityFilter
- * If set to `true`, the server will attempt to filter out
- * profanities, replacing all but the initial character in each filtered word
- * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
- * won't be filtered out.
- *
- * @property {Object[]} speechContexts
- * Array of SpeechContext.
- * A means to provide context to assist the speech recognition. For more
- * information, see
- * [speech
- * adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
- *
- * This object should have the same structure as [SpeechContext]{@link google.cloud.speech.v1p1beta1.SpeechContext}
- *
- * @property {boolean} enableWordTimeOffsets
- * If `true`, the top result includes a list of words and
- * the start and end time offsets (timestamps) for those words. If
- * `false`, no word-level time offset information is returned. The default is
- * `false`.
- *
- * @property {boolean} enableWordConfidence
- * If `true`, the top result includes a list of words and the
- * confidence for those words. If `false`, no word-level confidence
- * information is returned. The default is `false`.
- *
- * @property {boolean} enableAutomaticPunctuation
- * If 'true', adds punctuation to recognition result hypotheses.
- * This feature is only available in select languages. Setting this for
- * requests in other languages has no effect at all.
- * The default 'false' value does not add punctuation to result hypotheses.
- * Note: This is currently offered as an experimental service, complimentary
- * to all users. In the future this may be exclusively available as a
- * premium feature.
- *
- * @property {boolean} enableSpeakerDiarization
- * If 'true', enables speaker detection for each recognized word in
- * the top alternative of the recognition result using a speaker_tag provided
- * in the WordInfo.
- * Note: Use diarization_config instead.
- *
- * @property {number} diarizationSpeakerCount
- * If set, specifies the estimated number of speakers in the conversation.
- * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
- * Note: Use diarization_config instead.
- *
- * @property {Object} diarizationConfig
- * Config to enable speaker diarization and set additional
- * parameters to make diarization better suited for your application.
- * Note: When this is enabled, we send all the words from the beginning of the
- * audio for the top alternative in every consecutive STREAMING responses.
- * This is done in order to improve our speaker tags as our models learn to
- * identify the speakers in the conversation over time.
- * For non-streaming requests, the diarization results will be provided only
- * in the top alternative of the FINAL SpeechRecognitionResult.
- *
- * This object should have the same structure as [SpeakerDiarizationConfig]{@link google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig}
- *
- * @property {Object} metadata
- * Metadata regarding this request.
- *
- * This object should have the same structure as [RecognitionMetadata]{@link google.cloud.speech.v1p1beta1.RecognitionMetadata}
- *
- * @property {string} model
- * Which model to select for the given request. Select the model
- * best suited to your domain to get best results. If a model is not
- * explicitly specified, then we auto-select a model based on the parameters
- * in the RecognitionConfig.
- *
- *
- * Model |
- * Description |
- *
- *
- * command_and_search |
- * Best for short queries such as voice commands or voice search. |
- *
- *
- * phone_call |
- * Best for audio that originated from a phone call (typically
- * recorded at an 8khz sampling rate). |
- *
- *
- * video |
- * Best for audio that originated from from video or includes multiple
- * speakers. Ideally the audio is recorded at a 16khz or greater
- * sampling rate. This is a premium model that costs more than the
- * standard rate. |
- *
- *
- * default |
- * Best for audio that is not one of the specific audio models.
- * For example, long-form audio. Ideally the audio is high-fidelity,
- * recorded at a 16khz or greater sampling rate. |
- *
- *
- *
- * @property {boolean} useEnhanced
- * Set to true to use an enhanced model for speech recognition.
- * If `use_enhanced` is set to true and the `model` field is not set, then
- * an appropriate enhanced model is chosen if an enhanced model exists for
- * the audio.
- *
- * If `use_enhanced` is true and an enhanced version of the specified model
- * does not exist, then the speech is recognized using the standard version
- * of the specified model.
- *
- * @typedef RecognitionConfig
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.RecognitionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const RecognitionConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-
- /**
- * The encoding of the audio data sent in the request.
- *
- * All encodings support only 1 channel (mono) audio, unless the
- * `audio_channel_count` and `enable_separate_recognition_per_channel` fields
- * are set.
- *
- * For best results, the audio source should be captured and transmitted using
- * a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
- * recognition can be reduced if lossy codecs are used to capture or transmit
- * audio, particularly if background noise is present. Lossy codecs include
- * `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`.
- *
- * The `FLAC` and `WAV` audio file formats include a header that describes the
- * included audio content. You can request recognition for `WAV` files that
- * contain either `LINEAR16` or `MULAW` encoded audio.
- * If you send `FLAC` or `WAV` audio file format in
- * your request, you do not need to specify an `AudioEncoding`; the audio
- * encoding format is determined from the file header. If you specify
- * an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the
- * encoding configuration must match the encoding described in the audio
- * header; otherwise the request returns an
- * google.rpc.Code.INVALID_ARGUMENT error code.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1p1beta1
- */
- AudioEncoding: {
-
- /**
- * Not specified.
- */
- ENCODING_UNSPECIFIED: 0,
-
- /**
- * Uncompressed 16-bit signed little-endian samples (Linear PCM).
- */
- LINEAR16: 1,
-
- /**
- * `FLAC` (Free Lossless Audio
- * Codec) is the recommended encoding because it is
- * lossless--therefore recognition is not compromised--and
- * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
- * encoding supports 16-bit and 24-bit samples, however, not all fields in
- * `STREAMINFO` are supported.
- */
- FLAC: 2,
-
- /**
- * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
- */
- MULAW: 3,
-
- /**
- * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
- */
- AMR: 4,
-
- /**
- * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
- */
- AMR_WB: 5,
-
- /**
- * Opus encoded audio frames in Ogg container
- * ([OggOpus](https://wiki.xiph.org/OggOpus)).
- * `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
- */
- OGG_OPUS: 6,
-
- /**
- * Although the use of lossy encodings is not recommended, if a very low
- * bitrate encoding is required, `OGG_OPUS` is highly preferred over
- * Speex encoding. The [Speex](https://speex.org/) encoding supported by
- * Cloud Speech API has a header byte in each block, as in MIME type
- * `audio/x-speex-with-header-byte`.
- * It is a variant of the RTP Speex encoding defined in
- * [RFC 5574](https://tools.ietf.org/html/rfc5574).
- * The stream is a sequence of blocks, one block per RTP packet. Each block
- * starts with a byte containing the length of the block, in bytes, followed
- * by one or more frames of Speex data, padded to an integral number of
- * bytes (octets) as specified in RFC 5574. In other words, each RTP header
- * is replaced with a single byte containing the block length. Only Speex
- * wideband is supported. `sample_rate_hertz` must be 16000.
- */
- SPEEX_WITH_HEADER_BYTE: 7,
-
- /**
- * MP3 audio. Support all standard MP3 bitrates (which range from 32-320
- * kbps). When using this encoding, `sample_rate_hertz` can be optionally
- * unset if not known.
- */
- MP3: 8
- }
-};
-
-/**
- * Config to enable speaker diarization.
- *
- * @property {boolean} enableSpeakerDiarization
- * If 'true', enables speaker detection for each recognized word in
- * the top alternative of the recognition result using a speaker_tag provided
- * in the WordInfo.
- *
- * @property {number} minSpeakerCount
- * Minimum number of speakers in the conversation. This range gives you more
- * flexibility by allowing the system to automatically determine the correct
- * number of speakers. If not set, the default value is 2.
- *
- * @property {number} maxSpeakerCount
- * Maximum number of speakers in the conversation. This range gives you more
- * flexibility by allowing the system to automatically determine the correct
- * number of speakers. If not set, the default value is 6.
- *
- * @typedef SpeakerDiarizationConfig
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const SpeakerDiarizationConfig = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Description of audio data to be recognized.
- *
- * @property {number} interactionType
- * The use case most closely describing the audio content to be recognized.
- *
- * The number should be among the values of [InteractionType]{@link google.cloud.speech.v1p1beta1.InteractionType}
- *
- * @property {number} industryNaicsCodeOfAudio
- * The industry vertical to which this speech recognition request most
- * closely applies. This is most indicative of the topics contained
- * in the audio. Use the 6-digit NAICS code to identify the industry
- * vertical - see https://www.naics.com/search/.
- *
- * @property {number} microphoneDistance
- * The audio type that most closely describes the audio being recognized.
- *
- * The number should be among the values of [MicrophoneDistance]{@link google.cloud.speech.v1p1beta1.MicrophoneDistance}
- *
- * @property {number} originalMediaType
- * The original media the speech was recorded on.
- *
- * The number should be among the values of [OriginalMediaType]{@link google.cloud.speech.v1p1beta1.OriginalMediaType}
- *
- * @property {number} recordingDeviceType
- * The type of device the speech was recorded with.
- *
- * The number should be among the values of [RecordingDeviceType]{@link google.cloud.speech.v1p1beta1.RecordingDeviceType}
- *
- * @property {string} recordingDeviceName
- * The device used to make the recording. Examples 'Nexus 5X' or
- * 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
- * 'Cardioid Microphone'.
- *
- * @property {string} originalMimeType
- * Mime type of the original audio file. For example `audio/m4a`,
- * `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
- * A list of possible audio mime types is maintained at
- * http://www.iana.org/assignments/media-types/media-types.xhtml#audio
- *
- * @property {number} obfuscatedId
- * Obfuscated (privacy-protected) ID of the user, to identify number of
- * unique users using the service.
- *
- * @property {string} audioTopic
- * Description of the content. Eg. "Recordings of federal supreme court
- * hearings from 2012".
- *
- * @typedef RecognitionMetadata
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.RecognitionMetadata definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const RecognitionMetadata = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-
- /**
- * Use case categories that the audio recognition request can be described
- * by.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1p1beta1
- */
- InteractionType: {
-
- /**
- * Use case is either unknown or is something other than one of the other
- * values below.
- */
- INTERACTION_TYPE_UNSPECIFIED: 0,
-
- /**
- * Multiple people in a conversation or discussion. For example in a
- * meeting with two or more people actively participating. Typically
- * all the primary people speaking would be in the same room (if not,
- * see PHONE_CALL)
- */
- DISCUSSION: 1,
-
- /**
- * One or more persons lecturing or presenting to others, mostly
- * uninterrupted.
- */
- PRESENTATION: 2,
-
- /**
- * A phone-call or video-conference in which two or more people, who are
- * not in the same room, are actively participating.
- */
- PHONE_CALL: 3,
-
- /**
- * A recorded message intended for another person to listen to.
- */
- VOICEMAIL: 4,
-
- /**
- * Professionally produced audio (eg. TV Show, Podcast).
- */
- PROFESSIONALLY_PRODUCED: 5,
-
- /**
- * Transcribe spoken questions and queries into text.
- */
- VOICE_SEARCH: 6,
-
- /**
- * Transcribe voice commands, such as for controlling a device.
- */
- VOICE_COMMAND: 7,
-
- /**
- * Transcribe speech to text to create a written document, such as a
- * text-message, email or report.
- */
- DICTATION: 8
- },
-
- /**
- * Enumerates the types of capture settings describing an audio file.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1p1beta1
- */
- MicrophoneDistance: {
-
- /**
- * Audio type is not known.
- */
- MICROPHONE_DISTANCE_UNSPECIFIED: 0,
-
- /**
- * The audio was captured from a closely placed microphone. Eg. phone,
- * dictaphone, or handheld microphone. Generally if there speaker is within
- * 1 meter of the microphone.
- */
- NEARFIELD: 1,
-
- /**
- * The speaker if within 3 meters of the microphone.
- */
- MIDFIELD: 2,
-
- /**
- * The speaker is more than 3 meters away from the microphone.
- */
- FARFIELD: 3
- },
-
- /**
- * The original media the speech was recorded on.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1p1beta1
- */
- OriginalMediaType: {
-
- /**
- * Unknown original media type.
- */
- ORIGINAL_MEDIA_TYPE_UNSPECIFIED: 0,
-
- /**
- * The speech data is an audio recording.
- */
- AUDIO: 1,
-
- /**
- * The speech data originally recorded on a video.
- */
- VIDEO: 2
- },
-
- /**
- * The type of device the speech was recorded with.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1p1beta1
- */
- RecordingDeviceType: {
-
- /**
- * The recording device is unknown.
- */
- RECORDING_DEVICE_TYPE_UNSPECIFIED: 0,
-
- /**
- * Speech was recorded on a smartphone.
- */
- SMARTPHONE: 1,
-
- /**
- * Speech was recorded using a personal computer or tablet.
- */
- PC: 2,
-
- /**
- * Speech was recorded over a phone line.
- */
- PHONE_LINE: 3,
-
- /**
- * Speech was recorded in a vehicle.
- */
- VEHICLE: 4,
-
- /**
- * Speech was recorded outdoors.
- */
- OTHER_OUTDOOR_DEVICE: 5,
-
- /**
- * Speech was recorded indoors.
- */
- OTHER_INDOOR_DEVICE: 6
- }
-};
-
-/**
- * Provides "hints" to the speech recognizer to favor specific words and phrases
- * in the results.
- *
- * @property {string[]} phrases
- * A list of strings containing words and phrases "hints" so that
- * the speech recognition is more likely to recognize them. This can be used
- * to improve the accuracy for specific words and phrases, for example, if
- * specific commands are typically spoken by the user. This can also be used
- * to add additional words to the vocabulary of the recognizer. See
- * [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
- *
- * List items can also be set to classes for groups of words that represent
- * common concepts that occur in natural language. For example, rather than
- * providing phrase hints for every month of the year, using the $MONTH class
- * improves the likelihood of correctly transcribing audio that includes
- * months.
- *
- * @property {number} boost
- * Hint Boost. Positive value will increase the probability that a specific
- * phrase will be recognized over other similar sounding phrases. The higher
- * the boost, the higher the chance of false positive recognition as well.
- * Negative boost values would correspond to anti-biasing. Anti-biasing is not
- * enabled, so negative boost will simply be ignored. Though `boost` can
- * accept a wide range of positive values, most use cases are best served with
- * values between 0 and 20. We recommend using a binary search approach to
- * finding the optimal value for your use case.
- *
- * @typedef SpeechContext
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.SpeechContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const SpeechContext = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Contains audio data in the encoding specified in the `RecognitionConfig`.
- * Either `content` or `uri` must be supplied. Supplying both or neither
- * returns google.rpc.Code.INVALID_ARGUMENT. See
- * [content limits](https://cloud.google.com/speech-to-text/quotas#content).
- *
- * @property {Buffer} content
- * The audio data bytes encoded as specified in
- * `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
- * pure binary representation, whereas JSON representations use base64.
- *
- * @property {string} uri
- * URI that points to a file that contains audio data bytes as specified in
- * `RecognitionConfig`. The file must not be compressed (for example, gzip).
- * Currently, only Google Cloud Storage URIs are
- * supported, which must be specified in the following format:
- * `gs://bucket_name/object_name` (other URI formats return
- * google.rpc.Code.INVALID_ARGUMENT). For more information, see
- * [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- *
- * @typedef RecognitionAudio
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.RecognitionAudio definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const RecognitionAudio = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * The only message returned to the client by the `Recognize` method. It
- * contains the result as zero or more sequential `SpeechRecognitionResult`
- * messages.
- *
- * @property {Object[]} results
- * Sequential list of transcription results corresponding to
- * sequential portions of audio.
- *
- * This object should have the same structure as [SpeechRecognitionResult]{@link google.cloud.speech.v1p1beta1.SpeechRecognitionResult}
- *
- * @typedef RecognizeResponse
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.RecognizeResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const RecognizeResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * The only message returned to the client by the `LongRunningRecognize` method.
- * It contains the result as zero or more sequential `SpeechRecognitionResult`
- * messages. It is included in the `result.response` field of the `Operation`
- * returned by the `GetOperation` call of the `google::longrunning::Operations`
- * service.
- *
- * @property {Object[]} results
- * Sequential list of transcription results corresponding to
- * sequential portions of audio.
- *
- * This object should have the same structure as [SpeechRecognitionResult]{@link google.cloud.speech.v1p1beta1.SpeechRecognitionResult}
- *
- * @typedef LongRunningRecognizeResponse
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const LongRunningRecognizeResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Describes the progress of a long-running `LongRunningRecognize` call. It is
- * included in the `metadata` field of the `Operation` returned by the
- * `GetOperation` call of the `google::longrunning::Operations` service.
- *
- * @property {number} progressPercent
- * Approximate percentage of audio processed thus far. Guaranteed to be 100
- * when the audio is fully processed and the results are available.
- *
- * @property {Object} startTime
- * Time when the request was received.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @property {Object} lastUpdateTime
- * Time of the most recent processing update.
- *
- * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp}
- *
- * @typedef LongRunningRecognizeMetadata
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const LongRunningRecognizeMetadata = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * `StreamingRecognizeResponse` is the only message returned to the client by
- * `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
- * messages are streamed back to the client. If there is no recognizable
- * audio, and `single_utterance` is set to false, then no messages are streamed
- * back to the client.
- *
- * Here's an example of a series of ten `StreamingRecognizeResponse`s that might
- * be returned while processing audio:
- *
- * 1. results { alternatives { transcript: "tube" } stability: 0.01 }
- *
- * 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
- *
- * 3. results { alternatives { transcript: "to be" } stability: 0.9 }
- * results { alternatives { transcript: " or not to be" } stability: 0.01 }
- *
- * 4. results { alternatives { transcript: "to be or not to be"
- * confidence: 0.92 }
- * alternatives { transcript: "to bee or not to bee" }
- * is_final: true }
- *
- * 5. results { alternatives { transcript: " that's" } stability: 0.01 }
- *
- * 6. results { alternatives { transcript: " that is" } stability: 0.9 }
- * results { alternatives { transcript: " the question" } stability: 0.01 }
- *
- * 7. results { alternatives { transcript: " that is the question"
- * confidence: 0.98 }
- * alternatives { transcript: " that was the question" }
- * is_final: true }
- *
- * Notes:
- *
- * - Only two of the above responses #4 and #7 contain final results; they are
- * indicated by `is_final: true`. Concatenating these together generates the
- * full transcript: "to be or not to be that is the question".
- *
- * - The others contain interim `results`. #3 and #6 contain two interim
- * `results`: the first portion has a high stability and is less likely to
- * change; the second portion has a low stability and is very likely to
- * change. A UI designer might choose to show only high stability `results`.
- *
- * - The specific `stability` and `confidence` values shown above are only for
- * illustrative purposes. Actual values may vary.
- *
- * - In each response, only one of these fields will be set:
- * `error`,
- * `speech_event_type`, or
- * one or more (repeated) `results`.
- *
- * @property {Object} error
- * If set, returns a google.rpc.Status message that
- * specifies the error for the operation.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @property {Object[]} results
- * This repeated list contains zero or more results that
- * correspond to consecutive portions of the audio currently being processed.
- * It contains zero or one `is_final=true` result (the newly settled portion),
- * followed by zero or more `is_final=false` results (the interim results).
- *
- * This object should have the same structure as [StreamingRecognitionResult]{@link google.cloud.speech.v1p1beta1.StreamingRecognitionResult}
- *
- * @property {number} speechEventType
- * Indicates the type of speech event.
- *
- * The number should be among the values of [SpeechEventType]{@link google.cloud.speech.v1p1beta1.SpeechEventType}
- *
- * @typedef StreamingRecognizeResponse
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.StreamingRecognizeResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const StreamingRecognizeResponse = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-
- /**
- * Indicates the type of speech event.
- *
- * @enum {number}
- * @memberof google.cloud.speech.v1p1beta1
- */
- SpeechEventType: {
-
- /**
- * No speech event specified.
- */
- SPEECH_EVENT_UNSPECIFIED: 0,
-
- /**
- * This event indicates that the server has detected the end of the user's
- * speech utterance and expects no additional speech. Therefore, the server
- * will not process additional audio (although it may subsequently return
- * additional results). The client should stop sending additional audio
- * data, half-close the gRPC connection, and wait for any additional results
- * until the server closes the gRPC connection. This event is only sent if
- * `single_utterance` was set to `true`, and is not used otherwise.
- */
- END_OF_SINGLE_UTTERANCE: 1
- }
-};
-
-/**
- * A streaming speech recognition result corresponding to a portion of the audio
- * that is currently being processed.
- *
- * @property {Object[]} alternatives
- * May contain one or more recognition hypotheses (up to the
- * maximum specified in `max_alternatives`).
- * These alternatives are ordered in terms of accuracy, with the top (first)
- * alternative being the most probable, as ranked by the recognizer.
- *
- * This object should have the same structure as [SpeechRecognitionAlternative]{@link google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative}
- *
- * @property {boolean} isFinal
- * If `false`, this `StreamingRecognitionResult` represents an
- * interim result that may change. If `true`, this is the final time the
- * speech service will return this particular `StreamingRecognitionResult`,
- * the recognizer will not return any further hypotheses for this portion of
- * the transcript and corresponding audio.
- *
- * @property {number} stability
- * An estimate of the likelihood that the recognizer will not
- * change its guess about this interim result. Values range from 0.0
- * (completely unstable) to 1.0 (completely stable).
- * This field is only provided for interim results (`is_final=false`).
- * The default of 0.0 is a sentinel value indicating `stability` was not set.
- *
- * @property {Object} resultEndTime
- * Time offset of the end of this result relative to the
- * beginning of the audio.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {number} channelTag
- * For multi-channel audio, this is the channel number corresponding to the
- * recognized result for the audio from that channel.
- * For audio_channel_count = N, its output values can range from '1' to 'N'.
- *
- * @property {string} languageCode
- * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
- * of the language in this result. This language code was detected to have
- * the most likelihood of being spoken in the audio.
- *
- * @typedef StreamingRecognitionResult
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.StreamingRecognitionResult definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const StreamingRecognitionResult = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * A speech recognition result corresponding to a portion of the audio.
- *
- * @property {Object[]} alternatives
- * May contain one or more recognition hypotheses (up to the
- * maximum specified in `max_alternatives`).
- * These alternatives are ordered in terms of accuracy, with the top (first)
- * alternative being the most probable, as ranked by the recognizer.
- *
- * This object should have the same structure as [SpeechRecognitionAlternative]{@link google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative}
- *
- * @property {number} channelTag
- * For multi-channel audio, this is the channel number corresponding to the
- * recognized result for the audio from that channel.
- * For audio_channel_count = N, its output values can range from '1' to 'N'.
- *
- * @property {string} languageCode
- * The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
- * of the language in this result. This language code was detected to have
- * the most likelihood of being spoken in the audio.
- *
- * @typedef SpeechRecognitionResult
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.SpeechRecognitionResult definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const SpeechRecognitionResult = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Alternative hypotheses (a.k.a. n-best list).
- *
- * @property {string} transcript
- * Transcript text representing the words that the user spoke.
- *
- * @property {number} confidence
- * The confidence estimate between 0.0 and 1.0. A higher number
- * indicates an estimated greater likelihood that the recognized words are
- * correct. This field is set only for the top alternative of a non-streaming
- * result or, of a streaming result where `is_final=true`.
- * This field is not guaranteed to be accurate and users should not rely on it
- * to be always provided.
- * The default of 0.0 is a sentinel value indicating `confidence` was not set.
- *
- * @property {Object[]} words
- * A list of word-specific information for each recognized word.
- * Note: When `enable_speaker_diarization` is true, you will see all the words
- * from the beginning of the audio.
- *
- * This object should have the same structure as [WordInfo]{@link google.cloud.speech.v1p1beta1.WordInfo}
- *
- * @typedef SpeechRecognitionAlternative
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const SpeechRecognitionAlternative = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
-
-/**
- * Word-specific information for recognized words.
- *
- * @property {Object} startTime
- * Time offset relative to the beginning of the audio,
- * and corresponding to the start of the spoken word.
- * This field is only set if `enable_word_time_offsets=true` and only
- * in the top hypothesis.
- * This is an experimental feature and the accuracy of the time offset can
- * vary.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {Object} endTime
- * Time offset relative to the beginning of the audio,
- * and corresponding to the end of the spoken word.
- * This field is only set if `enable_word_time_offsets=true` and only
- * in the top hypothesis.
- * This is an experimental feature and the accuracy of the time offset can
- * vary.
- *
- * This object should have the same structure as [Duration]{@link google.protobuf.Duration}
- *
- * @property {string} word
- * The word corresponding to this set of information.
- *
- * @property {number} confidence
- * The confidence estimate between 0.0 and 1.0. A higher number
- * indicates an estimated greater likelihood that the recognized words are
- * correct. This field is set only for the top alternative of a non-streaming
- * result or, of a streaming result where `is_final=true`.
- * This field is not guaranteed to be accurate and users should not rely on it
- * to be always provided.
- * The default of 0.0 is a sentinel value indicating `confidence` was not set.
- *
- * @property {number} speakerTag
- * A distinct integer value is assigned for every speaker within
- * the audio. This field specifies which one of those speakers was detected to
- * have spoken this word. Value ranges from '1' to diarization_speaker_count.
- * speaker_tag is set if enable_speaker_diarization = 'true' and only in the
- * top alternative.
- *
- * @typedef WordInfo
- * @memberof google.cloud.speech.v1p1beta1
- * @see [google.cloud.speech.v1p1beta1.WordInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1p1beta1/cloud_speech.proto}
- */
-const WordInfo = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/src/v1p1beta1/doc/google/longrunning/doc_operations.js b/src/v1p1beta1/doc/google/longrunning/doc_operations.js
deleted file mode 100644
index 4719aebd..00000000
--- a/src/v1p1beta1/doc/google/longrunning/doc_operations.js
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * This resource represents a long-running operation that is the result of a
- * network API call.
- *
- * @property {string} name
- * The server-assigned name, which is only unique within the same service that
- * originally returns it. If you use the default HTTP mapping, the
- * `name` should have the format of `operations/some/unique/name`.
- *
- * @property {Object} metadata
- * Service-specific metadata associated with the operation. It typically
- * contains progress information and common metadata such as create time.
- * Some services might not provide such metadata. Any method that returns a
- * long-running operation should document the metadata type, if any.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @property {boolean} done
- * If the value is `false`, it means the operation is still in progress.
- * If `true`, the operation is completed, and either `error` or `response` is
- * available.
- *
- * @property {Object} error
- * The error result of the operation in case of failure or cancellation.
- *
- * This object should have the same structure as [Status]{@link google.rpc.Status}
- *
- * @property {Object} response
- * The normal response of the operation in case of success. If the original
- * method returns no data on success, such as `Delete`, the response is
- * `google.protobuf.Empty`. If the original method is standard
- * `Get`/`Create`/`Update`, the response should be the resource. For other
- * methods, the response should have the type `XxxResponse`, where `Xxx`
- * is the original method name. For example, if the original method name
- * is `TakeSnapshot()`, the inferred response type is
- * `TakeSnapshotResponse`.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Operation
- * @memberof google.longrunning
- * @see [google.longrunning.Operation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto}
- */
-const Operation = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/src/v1p1beta1/doc/google/protobuf/doc_any.js b/src/v1p1beta1/doc/google/protobuf/doc_any.js
deleted file mode 100644
index cdd2fc80..00000000
--- a/src/v1p1beta1/doc/google/protobuf/doc_any.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * `Any` contains an arbitrary serialized protocol buffer message along with a
- * URL that describes the type of the serialized message.
- *
- * Protobuf library provides support to pack/unpack Any values in the form
- * of utility functions or additional generated methods of the Any type.
- *
- * Example 1: Pack and unpack a message in C++.
- *
- * Foo foo = ...;
- * Any any;
- * any.PackFrom(foo);
- * ...
- * if (any.UnpackTo(&foo)) {
- * ...
- * }
- *
- * Example 2: Pack and unpack a message in Java.
- *
- * Foo foo = ...;
- * Any any = Any.pack(foo);
- * ...
- * if (any.is(Foo.class)) {
- * foo = any.unpack(Foo.class);
- * }
- *
- * Example 3: Pack and unpack a message in Python.
- *
- * foo = Foo(...)
- * any = Any()
- * any.Pack(foo)
- * ...
- * if any.Is(Foo.DESCRIPTOR):
- * any.Unpack(foo)
- * ...
- *
- * Example 4: Pack and unpack a message in Go
- *
- * foo := &pb.Foo{...}
- * any, err := ptypes.MarshalAny(foo)
- * ...
- * foo := &pb.Foo{}
- * if err := ptypes.UnmarshalAny(any, foo); err != nil {
- * ...
- * }
- *
- * The pack methods provided by protobuf library will by default use
- * 'type.googleapis.com/full.type.name' as the type URL and the unpack
- * methods only use the fully qualified type name after the last '/'
- * in the type URL, for example "foo.bar.com/x/y.z" will yield type
- * name "y.z".
- *
- *
- * # JSON
- *
- * The JSON representation of an `Any` value uses the regular
- * representation of the deserialized, embedded message, with an
- * additional field `@type` which contains the type URL. Example:
- *
- * package google.profile;
- * message Person {
- * string first_name = 1;
- * string last_name = 2;
- * }
- *
- * {
- * "@type": "type.googleapis.com/google.profile.Person",
- * "firstName": ,
- * "lastName":
- * }
- *
- * If the embedded message type is well-known and has a custom JSON
- * representation, that representation will be embedded adding a field
- * `value` which holds the custom JSON in addition to the `@type`
- * field. Example (for message google.protobuf.Duration):
- *
- * {
- * "@type": "type.googleapis.com/google.protobuf.Duration",
- * "value": "1.212s"
- * }
- *
- * @property {string} typeUrl
- * A URL/resource name that uniquely identifies the type of the serialized
- * protocol buffer message. This string must contain at least
- * one "/" character. The last segment of the URL's path must represent
- * the fully qualified name of the type (as in
- * `path/google.protobuf.Duration`). The name should be in a canonical form
- * (e.g., leading "." is not accepted).
- *
- * In practice, teams usually precompile into the binary all types that they
- * expect it to use in the context of Any. However, for URLs which use the
- * scheme `http`, `https`, or no scheme, one can optionally set up a type
- * server that maps type URLs to message definitions as follows:
- *
- * * If no scheme is provided, `https` is assumed.
- * * An HTTP GET on the URL must yield a google.protobuf.Type
- * value in binary format, or produce an error.
- * * Applications are allowed to cache lookup results based on the
- * URL, or have them precompiled into a binary to avoid any
- * lookup. Therefore, binary compatibility needs to be preserved
- * on changes to types. (Use versioned type names to manage
- * breaking changes.)
- *
- * Note: this functionality is not currently available in the official
- * protobuf release, and it is not used for type URLs beginning with
- * type.googleapis.com.
- *
- * Schemes other than `http`, `https` (or the empty scheme) might be
- * used with implementation specific semantics.
- *
- * @property {Buffer} value
- * Must be a valid serialized protocol buffer of the above specified type.
- *
- * @typedef Any
- * @memberof google.protobuf
- * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto}
- */
-const Any = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/src/v1p1beta1/doc/google/protobuf/doc_duration.js b/src/v1p1beta1/doc/google/protobuf/doc_duration.js
deleted file mode 100644
index 1275f8f4..00000000
--- a/src/v1p1beta1/doc/google/protobuf/doc_duration.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * A Duration represents a signed, fixed-length span of time represented
- * as a count of seconds and fractions of seconds at nanosecond
- * resolution. It is independent of any calendar and concepts like "day"
- * or "month". It is related to Timestamp in that the difference between
- * two Timestamp values is a Duration and it can be added or subtracted
- * from a Timestamp. Range is approximately +-10,000 years.
- *
- * # Examples
- *
- * Example 1: Compute Duration from two Timestamps in pseudo code.
- *
- * Timestamp start = ...;
- * Timestamp end = ...;
- * Duration duration = ...;
- *
- * duration.seconds = end.seconds - start.seconds;
- * duration.nanos = end.nanos - start.nanos;
- *
- * if (duration.seconds < 0 && duration.nanos > 0) {
- * duration.seconds += 1;
- * duration.nanos -= 1000000000;
- * } else if (durations.seconds > 0 && duration.nanos < 0) {
- * duration.seconds -= 1;
- * duration.nanos += 1000000000;
- * }
- *
- * Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
- *
- * Timestamp start = ...;
- * Duration duration = ...;
- * Timestamp end = ...;
- *
- * end.seconds = start.seconds + duration.seconds;
- * end.nanos = start.nanos + duration.nanos;
- *
- * if (end.nanos < 0) {
- * end.seconds -= 1;
- * end.nanos += 1000000000;
- * } else if (end.nanos >= 1000000000) {
- * end.seconds += 1;
- * end.nanos -= 1000000000;
- * }
- *
- * Example 3: Compute Duration from datetime.timedelta in Python.
- *
- * td = datetime.timedelta(days=3, minutes=10)
- * duration = Duration()
- * duration.FromTimedelta(td)
- *
- * # JSON Mapping
- *
- * In JSON format, the Duration type is encoded as a string rather than an
- * object, where the string ends in the suffix "s" (indicating seconds) and
- * is preceded by the number of seconds, with nanoseconds expressed as
- * fractional seconds. For example, 3 seconds with 0 nanoseconds should be
- * encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
- * be expressed in JSON format as "3.000000001s", and 3 seconds and 1
- * microsecond should be expressed in JSON format as "3.000001s".
- *
- * @property {number} seconds
- * Signed seconds of the span of time. Must be from -315,576,000,000
- * to +315,576,000,000 inclusive. Note: these bounds are computed from:
- * 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- *
- * @property {number} nanos
- * Signed fractions of a second at nanosecond resolution of the span
- * of time. Durations less than one second are represented with a 0
- * `seconds` field and a positive or negative `nanos` field. For durations
- * of one second or more, a non-zero value for the `nanos` field must be
- * of the same sign as the `seconds` field. Must be from -999,999,999
- * to +999,999,999 inclusive.
- *
- * @typedef Duration
- * @memberof google.protobuf
- * @see [google.protobuf.Duration definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/duration.proto}
- */
-const Duration = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/src/v1p1beta1/doc/google/rpc/doc_status.js b/src/v1p1beta1/doc/google/rpc/doc_status.js
deleted file mode 100644
index 432ab6bb..00000000
--- a/src/v1p1beta1/doc/google/rpc/doc_status.js
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Note: this file is purely for documentation. Any contents are not expected
-// to be loaded as the JS file.
-
-/**
- * The `Status` type defines a logical error model that is suitable for
- * different programming environments, including REST APIs and RPC APIs. It is
- * used by [gRPC](https://github.com/grpc). The error model is designed to be:
- *
- * - Simple to use and understand for most users
- * - Flexible enough to meet unexpected needs
- *
- * # Overview
- *
- * The `Status` message contains three pieces of data: error code, error
- * message, and error details. The error code should be an enum value of
- * google.rpc.Code, but it may accept additional error codes
- * if needed. The error message should be a developer-facing English message
- * that helps developers *understand* and *resolve* the error. If a localized
- * user-facing error message is needed, put the localized message in the error
- * details or localize it in the client. The optional error details may contain
- * arbitrary information about the error. There is a predefined set of error
- * detail types in the package `google.rpc` that can be used for common error
- * conditions.
- *
- * # Language mapping
- *
- * The `Status` message is the logical representation of the error model, but it
- * is not necessarily the actual wire format. When the `Status` message is
- * exposed in different client libraries and different wire protocols, it can be
- * mapped differently. For example, it will likely be mapped to some exceptions
- * in Java, but more likely mapped to some error codes in C.
- *
- * # Other uses
- *
- * The error model and the `Status` message can be used in a variety of
- * environments, either with or without APIs, to provide a
- * consistent developer experience across different environments.
- *
- * Example uses of this error model include:
- *
- * - Partial errors. If a service needs to return partial errors to the client,
- * it may embed the `Status` in the normal response to indicate the partial
- * errors.
- *
- * - Workflow errors. A typical workflow has multiple steps. Each step may
- * have a `Status` message for error reporting.
- *
- * - Batch operations. If a client uses batch request and batch response, the
- * `Status` message should be used directly inside batch response, one for
- * each error sub-response.
- *
- * - Asynchronous operations. If an API call embeds asynchronous operation
- * results in its response, the status of those operations should be
- * represented directly using the `Status` message.
- *
- * - Logging. If some API errors are stored in logs, the message `Status` could
- * be used directly after any stripping needed for security/privacy reasons.
- *
- * @property {number} code
- * The status code, which should be an enum value of
- * google.rpc.Code.
- *
- * @property {string} message
- * A developer-facing error message, which should be in English. Any
- * user-facing error message should be localized and sent in the
- * google.rpc.Status.details field, or localized
- * by the client.
- *
- * @property {Object[]} details
- * A list of messages that carry the error details. There is a common set of
- * message types for APIs to use.
- *
- * This object should have the same structure as [Any]{@link google.protobuf.Any}
- *
- * @typedef Status
- * @memberof google.rpc
- * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto}
- */
-const Status = {
- // This is for documentation. Actual contents will be loaded by gRPC.
-};
\ No newline at end of file
diff --git a/src/browser.js b/src/v1p1beta1/index.ts
similarity index 70%
rename from src/browser.js
rename to src/v1p1beta1/index.ts
index ddbcd7ec..28f7d784 100644
--- a/src/browser.js
+++ b/src/v1p1beta1/index.ts
@@ -11,11 +11,9 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-// Set a flag that we are running in a browser bundle.
-global.isBrowser = true;
-
-// Re-export all exports from ./index.js.
-module.exports = require('./index');
+export {SpeechClient} from './speech_client';
diff --git a/src/v1p1beta1/speech_client.js b/src/v1p1beta1/speech_client.ts
similarity index 50%
rename from src/v1p1beta1/speech_client.js
rename to src/v1p1beta1/speech_client.ts
index 61e2a797..55a1f468 100644
--- a/src/v1p1beta1/speech_client.js
+++ b/src/v1p1beta1/speech_client.ts
@@ -11,22 +11,39 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-
-'use strict';
-
-const gapicConfig = require('./speech_client_config.json');
-const gax = require('google-gax');
-const path = require('path');
-
-const VERSION = require('../../package.json').version;
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+import * as gax from 'google-gax';
+import {
+ APICallback,
+ Callback,
+ CallOptions,
+ Descriptors,
+ ClientOptions,
+ LROperation,
+} from 'google-gax';
+import * as path from 'path';
+
+import * as protosTypes from '../../protos/protos';
+import * as gapicConfig from './speech_client_config.json';
+
+const version = require('../../../package.json').version;
/**
- * Service that implements Google Cloud Speech API.
- *
+ * Service that implements Google Cloud Speech API.
* @class
* @memberof v1p1beta1
*/
-class SpeechClient {
+export class SpeechClient {
+ private _descriptors: Descriptors = {page: {}, stream: {}, longrunning: {}};
+ private _speechStub: Promise<{[name: string]: Function}>;
+ private _innerApiCalls: {[name: string]: Function};
+ private _terminated = false;
+ auth: gax.GoogleAuth;
+
/**
* Construct an instance of SpeechClient.
*
@@ -54,58 +71,55 @@ class SpeechClient {
* @param {string} [options.apiEndpoint] - The domain name of the
* API remote host.
*/
- constructor(opts) {
- opts = opts || {};
- this._descriptors = {};
- if (global.isBrowser) {
- // If we're in browser, we use gRPC fallback.
- opts.fallback = true;
+ constructor(opts?: ClientOptions) {
+ // Ensure that options include the service address and port.
+ const staticMembers = this.constructor as typeof SpeechClient;
+ const servicePath =
+ opts && opts.servicePath
+ ? opts.servicePath
+ : opts && opts.apiEndpoint
+ ? opts.apiEndpoint
+ : staticMembers.servicePath;
+ const port = opts && opts.port ? opts.port : staticMembers.port;
+
+ if (!opts) {
+ opts = {servicePath, port};
}
+ opts.servicePath = opts.servicePath || servicePath;
+ opts.port = opts.port || port;
+ opts.clientConfig = opts.clientConfig || {};
+ const isBrowser = typeof window !== 'undefined';
+ if (isBrowser) {
+ opts.fallback = true;
+ }
// If we are in browser, we are already using fallback because of the
// "browser" field in package.json.
// But if we were explicitly requested to use fallback, let's do it now.
- const gaxModule = !global.isBrowser && opts.fallback ? gax.fallback : gax;
-
- const servicePath =
- opts.servicePath || opts.apiEndpoint || this.constructor.servicePath;
-
- // Ensure that options include the service address and port.
- opts = Object.assign(
- {
- clientConfig: {},
- port: this.constructor.port,
- servicePath,
- },
- opts
- );
+ const gaxModule = !isBrowser && opts.fallback ? gax.fallback : gax;
// Create a `gaxGrpc` object, with any grpc-specific options
// sent to the client.
- opts.scopes = this.constructor.scopes;
+ opts.scopes = (this.constructor as typeof SpeechClient).scopes;
const gaxGrpc = new gaxModule.GrpcClient(opts);
// Save the auth object to the client, for use by other methods.
- this.auth = gaxGrpc.auth;
+ this.auth = gaxGrpc.auth as gax.GoogleAuth;
// Determine the client header string.
- const clientHeader = [];
-
+ const clientHeader = [`gax/${gaxModule.version}`, `gapic/${version}`];
if (typeof process !== 'undefined' && 'versions' in process) {
clientHeader.push(`gl-node/${process.versions.node}`);
- }
- clientHeader.push(`gax/${gaxModule.version}`);
- if (opts.fallback) {
- clientHeader.push(`gl-web/${gaxModule.version}`);
} else {
+ clientHeader.push(`gl-web/${gaxModule.version}`);
+ }
+ if (!opts.fallback) {
clientHeader.push(`grpc/${gaxGrpc.grpcVersion}`);
}
- clientHeader.push(`gapic/${VERSION}`);
if (opts.libName && opts.libVersion) {
clientHeader.push(`${opts.libName}/${opts.libVersion}`);
}
-
// Load the applicable protos.
// For Node.js, pass the path to JSON proto file.
// For browsers, pass the JSON content.
@@ -129,28 +143,29 @@ class SpeechClient {
),
};
- const protoFilesRoot = opts.fallback
- ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
- : gaxModule.protobuf.loadSync(nodejsProtoPath);
-
// This API contains "long-running operations", which return a
// an Operation object that allows for tracking of the operation,
// rather than holding a request open.
- this.operationsClient = new gaxModule.lro({
- auth: gaxGrpc.auth,
- grpc: gaxGrpc.grpc,
- }).operationsClient(opts);
+ const protoFilesRoot = opts.fallback
+ ? gaxModule.protobuf.Root.fromJSON(require('../../protos/protos.json'))
+ : gaxModule.protobuf.loadSync(nodejsProtoPath);
+ const operationsClient = gaxModule
+ .lro({
+ auth: this.auth,
+ grpc: 'grpc' in gaxGrpc ? gaxGrpc.grpc : undefined,
+ })
+ .operationsClient(opts);
const longRunningRecognizeResponse = protoFilesRoot.lookup(
- 'google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse'
- );
+ '.google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse'
+ ) as gax.protobuf.Type;
const longRunningRecognizeMetadata = protoFilesRoot.lookup(
- 'google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata'
- );
+ '.google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata'
+ ) as gax.protobuf.Type;
this._descriptors.longrunning = {
longRunningRecognize: new gaxModule.LongrunningDescriptor(
- this.operationsClient,
+ operationsClient,
longRunningRecognizeResponse.decode.bind(longRunningRecognizeResponse),
longRunningRecognizeMetadata.decode.bind(longRunningRecognizeMetadata)
),
@@ -159,8 +174,8 @@ class SpeechClient {
// Put together the default options sent with requests.
const defaults = gaxGrpc.constructSettings(
'google.cloud.speech.v1p1beta1.Speech',
- gapicConfig,
- opts.clientConfig,
+ gapicConfig as gax.ClientConfig,
+ opts.clientConfig || {},
{'x-goog-api-client': clientHeader.join(' ')}
);
@@ -171,12 +186,15 @@ class SpeechClient {
// Put together the "service stub" for
// google.cloud.speech.v1p1beta1.Speech.
- const speechStub = gaxGrpc.createStub(
+ this._speechStub = gaxGrpc.createStub(
opts.fallback
- ? protos.lookupService('google.cloud.speech.v1p1beta1.Speech')
- : protos.google.cloud.speech.v1p1beta1.Speech,
+ ? (protos as protobuf.Root).lookupService(
+ 'google.cloud.speech.v1p1beta1.Speech'
+ )
+ : // tslint:disable-next-line no-any
+ (protos as any).google.cloud.speech.v1p1beta1.Speech,
opts
- );
+ ) as Promise<{[method: string]: Function}>;
// Iterate over each of the methods that the service provides
// and create an API call method for each.
@@ -185,21 +203,35 @@ class SpeechClient {
'longRunningRecognize',
'streamingRecognize',
];
+
for (const methodName of speechStubMethods) {
- const innerCallPromise = speechStub.then(
- stub => (...args) => {
+ const innerCallPromise = this._speechStub.then(
+ stub => (...args: Array<{}>) => {
return stub[methodName].apply(stub, args);
},
- err => () => {
+ (err: Error | null | undefined) => () => {
throw err;
}
);
- this._innerApiCalls[methodName] = gaxModule.createApiCall(
+
+ const apiCall = gaxModule.createApiCall(
innerCallPromise,
defaults[methodName],
- this._descriptors.stream[methodName] ||
+ this._descriptors.page[methodName] ||
+ this._descriptors.stream[methodName] ||
this._descriptors.longrunning[methodName]
);
+
+ this._innerApiCalls[methodName] = (
+ argument: {},
+ callOptions?: CallOptions,
+ callback?: APICallback
+ ) => {
+ if (this._terminated) {
+ return Promise.reject('The client has already been closed.');
+ }
+ return apiCall(argument, callOptions, callback);
+ };
}
}
@@ -233,89 +265,137 @@ class SpeechClient {
return ['https://www.googleapis.com/auth/cloud-platform'];
}
+ getProjectId(): Promise;
+ getProjectId(callback: Callback): void;
/**
* Return the project ID used by this class.
* @param {function(Error, string)} callback - the callback to
* be called with the current project Id.
*/
- getProjectId(callback) {
- return this.auth.getProjectId(callback);
+ getProjectId(
+ callback?: Callback
+ ): Promise | void {
+ if (callback) {
+ this.auth.getProjectId(callback);
+ return;
+ }
+ return this.auth.getProjectId();
}
// -------------------
// -- Service calls --
// -------------------
-
+ recognize(
+ request: protosTypes.google.cloud.speech.v1p1beta1.IRecognizeRequest,
+ options?: gax.CallOptions
+ ): Promise<
+ [
+ protosTypes.google.cloud.speech.v1p1beta1.IRecognizeResponse,
+ protosTypes.google.cloud.speech.v1p1beta1.IRecognizeRequest | undefined,
+ {} | undefined
+ ]
+ >;
+ recognize(
+ request: protosTypes.google.cloud.speech.v1p1beta1.IRecognizeRequest,
+ options: gax.CallOptions,
+ callback: Callback<
+ protosTypes.google.cloud.speech.v1p1beta1.IRecognizeResponse,
+ protosTypes.google.cloud.speech.v1p1beta1.IRecognizeRequest | undefined,
+ {} | undefined
+ >
+ ): void;
/**
* Performs synchronous speech recognition: receive results after all audio
* has been sent and processed.
*
* @param {Object} request
* The request object that will be sent.
- * @param {Object} request.config
+ * @param {google.cloud.speech.v1p1beta1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
- *
- * This object should have the same structure as [RecognitionConfig]{@link google.cloud.speech.v1p1beta1.RecognitionConfig}
- * @param {Object} request.audio
+ * @param {google.cloud.speech.v1p1beta1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
- *
- * This object should have the same structure as [RecognitionAudio]{@link google.cloud.speech.v1p1beta1.RecognitionAudio}
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @param {function(?Error, ?Object)} [callback]
- * The function which will be called with the result of the API call.
- *
- * The second parameter to the callback is an object representing [RecognizeResponse]{@link google.cloud.speech.v1p1beta1.RecognizeResponse}.
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [RecognizeResponse]{@link google.cloud.speech.v1p1beta1.RecognizeResponse}.
* The promise has a method named "cancel" which cancels the ongoing API call.
- *
- * @example
- *
- * const speech = require('@google-cloud/speech');
- *
- * const client = new speech.v1p1beta1.SpeechClient({
- * // optional auth parameters.
- * });
- *
- * const encoding = 'FLAC';
- * const sampleRateHertz = 44100;
- * const languageCode = 'en-US';
- * const config = {
- * encoding: encoding,
- * sampleRateHertz: sampleRateHertz,
- * languageCode: languageCode,
- * };
- * const uri = 'gs://bucket_name/file_name.flac';
- * const audio = {
- * uri: uri,
- * };
- * const request = {
- * config: config,
- * audio: audio,
- * };
- * client.recognize(request)
- * .then(responses => {
- * const response = responses[0];
- * // doThingsWith(response)
- * })
- * .catch(err => {
- * console.error(err);
- * });
*/
- recognize(request, options, callback) {
- if (options instanceof Function && callback === undefined) {
- callback = options;
+ recognize(
+ request: protosTypes.google.cloud.speech.v1p1beta1.IRecognizeRequest,
+ optionsOrCallback?:
+ | gax.CallOptions
+ | Callback<
+ protosTypes.google.cloud.speech.v1p1beta1.IRecognizeResponse,
+ | protosTypes.google.cloud.speech.v1p1beta1.IRecognizeRequest
+ | undefined,
+ {} | undefined
+ >,
+ callback?: Callback<
+ protosTypes.google.cloud.speech.v1p1beta1.IRecognizeResponse,
+ protosTypes.google.cloud.speech.v1p1beta1.IRecognizeRequest | undefined,
+ {} | undefined
+ >
+ ): Promise<
+ [
+ protosTypes.google.cloud.speech.v1p1beta1.IRecognizeResponse,
+ protosTypes.google.cloud.speech.v1p1beta1.IRecognizeRequest | undefined,
+ {} | undefined
+ ]
+ > | void {
+ request = request || {};
+ let options: gax.CallOptions;
+ if (typeof optionsOrCallback === 'function' && callback === undefined) {
+ callback = optionsOrCallback;
options = {};
+ } else {
+ options = optionsOrCallback as gax.CallOptions;
}
- request = request || {};
options = options || {};
-
return this._innerApiCalls.recognize(request, options, callback);
}
+ /**
+ * Performs bidirectional streaming speech recognition: receive results while
+ * sending audio. This method is only available via the gRPC API (not REST).
+ *
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
+ * @returns {Stream}
+ * An object stream which is both readable and writable. It accepts objects
+ * representing [StreamingRecognizeRequest]{@link google.cloud.speech.v1p1beta1.StreamingRecognizeRequest} for write() method, and
+ * will emit objects representing [StreamingRecognizeResponse]{@link google.cloud.speech.v1p1beta1.StreamingRecognizeResponse} on 'data' event asynchronously.
+ */
+ _streamingRecognize(options?: gax.CallOptions): gax.CancellableStream {
+ options = options || {};
+ return this._innerApiCalls.streamingRecognize(options);
+ }
+
+ longRunningRecognize(
+ request: protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeRequest,
+ options?: gax.CallOptions
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeResponse,
+ protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeMetadata
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ >;
+ longRunningRecognize(
+ request: protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeRequest,
+ options: gax.CallOptions,
+ callback: Callback<
+ LROperation<
+ protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeResponse,
+ protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeMetadata
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): void;
/**
* Performs asynchronous speech recognition: receive results via the
* google.longrunning.Operations interface. Returns either an
@@ -326,176 +406,74 @@ class SpeechClient {
*
* @param {Object} request
* The request object that will be sent.
- * @param {Object} request.config
+ * @param {google.cloud.speech.v1p1beta1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
- *
- * This object should have the same structure as [RecognitionConfig]{@link google.cloud.speech.v1p1beta1.RecognitionConfig}
- * @param {Object} request.audio
+ * @param {google.cloud.speech.v1p1beta1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
- *
- * This object should have the same structure as [RecognitionAudio]{@link google.cloud.speech.v1p1beta1.RecognitionAudio}
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @param {function(?Error, ?Object)} [callback]
- * The function which will be called with the result of the API call.
- *
- * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * @param {object} [options]
+ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
- * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/classes/Operation.html} object.
+ * The first element of the array is an object representing [Operation]{@link google.longrunning.Operation}.
* The promise has a method named "cancel" which cancels the ongoing API call.
- *
- * @example
- *
- * const speech = require('@google-cloud/speech');
- *
- * const client = new speech.v1p1beta1.SpeechClient({
- * // optional auth parameters.
- * });
- *
- * const encoding = 'FLAC';
- * const sampleRateHertz = 44100;
- * const languageCode = 'en-US';
- * const config = {
- * encoding: encoding,
- * sampleRateHertz: sampleRateHertz,
- * languageCode: languageCode,
- * };
- * const uri = 'gs://bucket_name/file_name.flac';
- * const audio = {
- * uri: uri,
- * };
- * const request = {
- * config: config,
- * audio: audio,
- * };
- *
- * // Handle the operation using the promise pattern.
- * client.longRunningRecognize(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Operation#promise starts polling for the completion of the LRO.
- * return operation.promise();
- * })
- * .then(responses => {
- * const result = responses[0];
- * const metadata = responses[1];
- * const finalApiResponse = responses[2];
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const encoding = 'FLAC';
- * const sampleRateHertz = 44100;
- * const languageCode = 'en-US';
- * const config = {
- * encoding: encoding,
- * sampleRateHertz: sampleRateHertz,
- * languageCode: languageCode,
- * };
- * const uri = 'gs://bucket_name/file_name.flac';
- * const audio = {
- * uri: uri,
- * };
- * const request = {
- * config: config,
- * audio: audio,
- * };
- *
- * // Handle the operation using the event emitter pattern.
- * client.longRunningRecognize(request)
- * .then(responses => {
- * const [operation, initialApiResponse] = responses;
- *
- * // Adding a listener for the "complete" event starts polling for the
- * // completion of the operation.
- * operation.on('complete', (result, metadata, finalApiResponse) => {
- * // doSomethingWith(result);
- * });
- *
- * // Adding a listener for the "progress" event causes the callback to be
- * // called on any change in metadata when the operation is polled.
- * operation.on('progress', (metadata, apiResponse) => {
- * // doSomethingWith(metadata)
- * });
- *
- * // Adding a listener for the "error" event handles any errors found during polling.
- * operation.on('error', err => {
- * // throw(err);
- * });
- * })
- * .catch(err => {
- * console.error(err);
- * });
- *
- * const encoding = 'FLAC';
- * const sampleRateHertz = 44100;
- * const languageCode = 'en-US';
- * const config = {
- * encoding: encoding,
- * sampleRateHertz: sampleRateHertz,
- * languageCode: languageCode,
- * };
- * const uri = 'gs://bucket_name/file_name.flac';
- * const audio = {
- * uri: uri,
- * };
- * const request = {
- * config: config,
- * audio: audio,
- * };
- *
- * // Handle the operation using the await pattern.
- * const [operation] = await client.longRunningRecognize(request);
- *
- * const [response] = await operation.promise();
*/
- longRunningRecognize(request, options, callback) {
- if (options instanceof Function && callback === undefined) {
- callback = options;
+ longRunningRecognize(
+ request: protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeRequest,
+ optionsOrCallback?:
+ | gax.CallOptions
+ | Callback<
+ LROperation<
+ protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeResponse,
+ protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeMetadata
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >,
+ callback?: Callback<
+ LROperation<
+ protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeResponse,
+ protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeMetadata
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ >
+ ): Promise<
+ [
+ LROperation<
+ protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeResponse,
+ protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeMetadata
+ >,
+ protosTypes.google.longrunning.IOperation | undefined,
+ {} | undefined
+ ]
+ > | void {
+ request = request || {};
+ let options: gax.CallOptions;
+ if (typeof optionsOrCallback === 'function' && callback === undefined) {
+ callback = optionsOrCallback;
options = {};
+ } else {
+ options = optionsOrCallback as gax.CallOptions;
}
- request = request || {};
options = options || {};
-
return this._innerApiCalls.longRunningRecognize(request, options, callback);
}
/**
- * Performs bidirectional streaming speech recognition: receive results while
- * sending audio. This method is only available via the gRPC API (not REST).
+ * Terminate the GRPC channel and close the client.
*
- * @param {Object} [options]
- * Optional parameters. You can override the default settings for this call, e.g, timeout,
- * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/interfaces/CallOptions.html} for the details.
- * @returns {Stream}
- * An object stream which is both readable and writable. It accepts objects
- * representing [StreamingRecognizeRequest]{@link google.cloud.speech.v1p1beta1.StreamingRecognizeRequest} for write() method, and
- * will emit objects representing [StreamingRecognizeResponse]{@link google.cloud.speech.v1p1beta1.StreamingRecognizeResponse} on 'data' event asynchronously.
- *
- * @example
- *
- * const speech = require('@google-cloud/speech');
- *
- * const client = new speech.v1p1beta1.SpeechClient({
- * // optional auth parameters.
- * });
- *
- * const stream = client.streamingRecognize().on('data', response => {
- * // doThingsWith(response)
- * });
- * const request = {};
- * // Write request objects.
- * stream.write(request);
+ * The client will no longer be usable and all future behavior is undefined.
*/
- streamingRecognize(options) {
- options = options || {};
-
- return this._innerApiCalls.streamingRecognize(options);
+ close(): Promise {
+ if (!this._terminated) {
+ return this._speechStub.then(stub => {
+ this._terminated = true;
+ stub.close();
+ });
+ }
+ return Promise.resolve();
}
}
-module.exports = SpeechClient;
+import {ImprovedStreamingClient} from '../helpers';
+export interface SpeechClient extends ImprovedStreamingClient {}
diff --git a/src/v1p1beta1/speech_client_config.json b/src/v1p1beta1/speech_client_config.json
index 08b672bd..11345790 100644
--- a/src/v1p1beta1/speech_client_config.json
+++ b/src/v1p1beta1/speech_client_config.json
@@ -2,11 +2,11 @@
"interfaces": {
"google.cloud.speech.v1p1beta1.Speech": {
"retry_codes": {
+ "non_idempotent": [],
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
- ],
- "non_idempotent": []
+ ]
},
"retry_params": {
"default": {
@@ -14,25 +14,25 @@
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
- "rpc_timeout_multiplier": 1.0,
+ "rpc_timeout_multiplier": 1,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000
}
},
"methods": {
"Recognize": {
- "timeout_millis": 60000,
- "retry_codes_name": "non_idempotent",
+ "timeout_millis": 5000000,
+ "retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"LongRunningRecognize": {
- "timeout_millis": 60000,
+ "timeout_millis": 5000000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"StreamingRecognize": {
- "timeout_millis": 60000,
- "retry_codes_name": "non_idempotent",
+ "timeout_millis": 5000000,
+ "retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
diff --git a/synth.metadata b/synth.metadata
index 50fe1d4c..c79576e7 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -1,19 +1,12 @@
{
- "updateTime": "2019-11-14T12:25:35.290683Z",
+ "updateTime": "2019-11-22T19:40:37.666421Z",
"sources": [
- {
- "generator": {
- "name": "artman",
- "version": "0.41.1",
- "dockerImage": "googleapis/artman@sha256:545c758c76c3f779037aa259023ec3d1ef2d57d2c8cd00a222cb187d63ceac5e"
- }
- },
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "4f747bda9b099b4426f495985680d16d0227fa5f",
- "internalRef": "280394936"
+ "sha": "3ba7ddc4b2acf532bdfb0004ca26311053c11c30",
+ "internalRef": "281852671"
}
},
{
@@ -30,9 +23,8 @@
"source": "googleapis",
"apiName": "speech",
"apiVersion": "v1",
- "language": "nodejs",
- "generator": "gapic",
- "config": "google/cloud/speech/artman_speech_v1.yaml"
+ "language": "typescript",
+ "generator": "gapic-generator-typescript"
}
},
{
@@ -40,9 +32,8 @@
"source": "googleapis",
"apiName": "speech",
"apiVersion": "v1p1beta1",
- "language": "nodejs",
- "generator": "gapic",
- "config": "google/cloud/speech/artman_speech_v1p1beta1.yaml"
+ "language": "typescript",
+ "generator": "gapic-generator-typescript"
}
}
]
diff --git a/synth.py b/synth.py
index d4d17901..3758b333 100644
--- a/synth.py
+++ b/synth.py
@@ -21,38 +21,40 @@
logging.basicConfig(level=logging.DEBUG)
-gapic = gcp.GAPICGenerator()
+gapic = gcp.GAPICMicrogenerator()
common_templates = gcp.CommonTemplates()
versions = ['v1', 'v1p1beta1']
+name = 'speech'
for version in versions:
- library = gapic.node_library('speech', version)
+ library = gapic.typescript_library(
+ name,
+ proto_path=f'google/cloud/{name}/{version}',
+ generator_args={
+ 'grpc-service-config': f'google/cloud/{name}/{version}/{name}_grpc_service_config.json',
+ 'package-name': f'@google-cloud/{name}'
+ },
+ version=version)
# skip index, protos, package.json, and README.md
s.copy(
library,
- excludes=['package.json', 'src/index.js',]
+ excludes=['package.json', 'src/index.ts',]
)
- # Manual helper methods overrides the streaming API so that it
- # accepts streamingConfig when calling streamingRecognize. Fix
- # the gapic tests to use the overridden method signature.
- s.replace( f"test/gapic-{version}.js",
- "(mockBidiStreamingGrpcMethod\()request",
- r"\1{ streamingConfig: {} }")
-
- s.replace(
- f"test/gapic-{version}.js",
- "stream\.write\(request\)",
- "stream.write()")
-
- s.replace(
- f"test/gapic-{version}.js",
- "// Mock request\n\s*const request = {};",
- "")
+ # Manual helper methods override the streaming API so that it
+ # accepts streamingConfig when calling streamingRecognize.
+ # Rename the generated methods to avoid confusion.
+ s.replace(f'src/{version}/{name}_client.ts', r'( +)streamingRecognize\(', '\\1_streamingRecognize(')
+ s.replace(f'test/gapic-{name}-{version}.ts', r'client\.streamingRecognize\(', 'client._streamingRecognize(')
+ s.replace(f'src/{version}/{name}_client.ts', r'\Z',
+ '\n' +
+ "import {ImprovedStreamingClient} from '../helpers';\n" +
+ 'export interface SpeechClient extends ImprovedStreamingClient {}\n'
+ )
-templates = common_templates.node_library()
+templates = common_templates.node_library(source_location='build/src')
s.copy(templates)
#
diff --git a/system-test/.eslintrc.yml b/system-test/.eslintrc.yml
index 2e6882e4..dc5d9b01 100644
--- a/system-test/.eslintrc.yml
+++ b/system-test/.eslintrc.yml
@@ -1,6 +1,4 @@
---
env:
mocha: true
-rules:
- node/no-unpublished-require: off
- no-console: off
+
diff --git a/system-test/fixtures/sample/src/index.js b/system-test/fixtures/sample/src/index.js
new file mode 100644
index 00000000..a5b47cc7
--- /dev/null
+++ b/system-test/fixtures/sample/src/index.js
@@ -0,0 +1,27 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+
+/* eslint-disable node/no-missing-require, no-unused-vars */
+const speech = require('@google-cloud/speech');
+
+function main() {
+ const speechClient = new speech.SpeechClient();
+}
+
+main();
diff --git a/src/v1p1beta1/index.js b/system-test/fixtures/sample/src/index.ts
similarity index 64%
rename from src/v1p1beta1/index.js
rename to system-test/fixtures/sample/src/index.ts
index 93d34738..6003cad1 100644
--- a/src/v1p1beta1/index.js
+++ b/system-test/fixtures/sample/src/index.ts
@@ -11,9 +11,15 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
+import {SpeechClient} from '@google-cloud/speech';
-const SpeechClient = require('./speech_client');
+function main() {
+ const speechClient = new SpeechClient();
+}
-module.exports.SpeechClient = SpeechClient;
+main();
diff --git a/system-test/install.ts b/system-test/install.ts
new file mode 100644
index 00000000..2736aee8
--- /dev/null
+++ b/system-test/install.ts
@@ -0,0 +1,50 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+import {packNTest} from 'pack-n-play';
+import {readFileSync} from 'fs';
+
+describe('typescript consumer tests', () => {
+ it('should have correct type signature for typescript users', async function() {
+ this.timeout(300000);
+ const options = {
+ packageDir: process.cwd(), // path to your module.
+ sample: {
+ description: 'typescript based user can use the type definitions',
+ ts: readFileSync(
+ './system-test/fixtures/sample/src/index.ts'
+ ).toString(),
+ },
+ };
+ await packNTest(options); // will throw upon error.
+ });
+
+ it('should have correct type signature for javascript users', async function() {
+ this.timeout(300000);
+ const options = {
+ packageDir: process.cwd(), // path to your module.
+ sample: {
+ description: 'typescript based user can use the type definitions',
+ ts: readFileSync(
+ './system-test/fixtures/sample/src/index.js'
+ ).toString(),
+ },
+ };
+ await packNTest(options); // will throw upon error.
+ });
+});
diff --git a/system-test/speech_system_test.js b/system-test/speech_system_test.js
index d49beed2..4b97f955 100644
--- a/system-test/speech_system_test.js
+++ b/system-test/speech_system_test.js
@@ -15,9 +15,9 @@
'use strict';
const assert = require('assert');
-const speech = require('../src');
-const fs = require('fs');
const path = require('path');
+const speech = require(path.join(process.cwd(), 'build', 'src'));
+const fs = require('fs');
describe('SpeechClient system test default', () => {
it('calls recognize', async () => {
diff --git a/system-test/speech_system_test_v1.js b/system-test/speech_system_test_v1.js
index df21c3fb..43cd2526 100644
--- a/system-test/speech_system_test_v1.js
+++ b/system-test/speech_system_test_v1.js
@@ -15,9 +15,9 @@
'use strict';
const assert = require('assert');
-const speech = require('../src');
-const fs = require('fs');
const path = require('path');
+const speech = require(path.join(process.cwd(), 'build', 'src'));
+const fs = require('fs');
describe('SpeechClient system test v1', () => {
it('calls recognize', async () => {
diff --git a/system-test/speech_system_test_v1p1beta1.js b/system-test/speech_system_test_v1p1beta1.js
index bf32071c..3526df33 100644
--- a/system-test/speech_system_test_v1p1beta1.js
+++ b/system-test/speech_system_test_v1p1beta1.js
@@ -15,9 +15,9 @@
'use strict';
const assert = require('assert');
-const speech = require('../src');
-const fs = require('fs');
const path = require('path');
+const speech = require(path.join(process.cwd(), 'build', 'src'));
+const fs = require('fs');
describe('SpeechClient system test v1p1beta1', () => {
it('calls recognize', async () => {
diff --git a/system-test/speech_typescript_system_test.ts b/system-test/speech_typescript_system_test.ts
new file mode 100644
index 00000000..21e8c920
--- /dev/null
+++ b/system-test/speech_typescript_system_test.ts
@@ -0,0 +1,119 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import * as assert from 'assert';
+import * as speech from '../src';
+import * as fs from 'fs';
+import * as path from 'path';
+import {google} from '../protos/protos';
+
+describe('SpeechClient TypeScript system test default', () => {
+ it('calls recognize', async () => {
+ const client = new speech.SpeechClient();
+
+ const languageCode = 'en-US';
+ const sampleRateHertz = 44100;
+ const encoding =
+ google.cloud.speech.v1.RecognitionConfig.AudioEncoding.FLAC;
+ const config = {
+ languageCode,
+ sampleRateHertz,
+ encoding,
+ };
+ const uri = 'gs://gapic-toolkit/hello.flac';
+ const audio = {
+ uri,
+ };
+ const request = {
+ config,
+ audio,
+ };
+ const [response] = await client.recognize(request);
+ assert.strictEqual(
+ response?.results?.[0]?.alternatives?.[0]?.transcript,
+ 'hello'
+ );
+ });
+
+ it('calls longRunningRecognize', async () => {
+ const client = new speech.SpeechClient();
+
+ const languageCode = 'en-US';
+ const sampleRateHertz = 44100;
+ const encoding =
+ google.cloud.speech.v1.RecognitionConfig.AudioEncoding.FLAC;
+ const config = {
+ languageCode,
+ sampleRateHertz,
+ encoding,
+ };
+ const uri = 'gs://gapic-toolkit/hello.flac';
+ const audio = {
+ uri,
+ };
+ const request = {
+ config,
+ audio,
+ };
+ const [operation] = await client.longRunningRecognize(request);
+ const [response] = await operation.promise();
+ assert.strictEqual(
+ response?.results?.[0]?.alternatives?.[0]?.transcript,
+ 'hello'
+ );
+ });
+
+ it('calls streamingRecognize', done => {
+ const filename = path.join(
+ 'system-test',
+ 'fixtures',
+ 'streamingRecognize',
+ 'input.wav'
+ );
+
+ const languageCode = 'en-US';
+ const sampleRateHertz = 24000;
+ const encoding =
+ google.cloud.speech.v1.RecognitionConfig.AudioEncoding.LINEAR16;
+ const config = {
+ languageCode,
+ sampleRateHertz,
+ encoding,
+ };
+ const request = {
+ config,
+ interimResults: false,
+ };
+
+ const client = new speech.SpeechClient();
+ const stream = client.streamingRecognize(request);
+ let gotResponse = false;
+ stream.on(
+ 'data',
+ (response: google.cloud.speech.v1.IStreamingRecognizeResponse) => {
+ assert.strictEqual(
+ response?.results?.[0]?.alternatives?.[0]?.transcript,
+ 'test of streaming recognize call'
+ );
+ gotResponse = true;
+ }
+ );
+ stream.on('end', () => {
+ assert(gotResponse);
+ done();
+ });
+ stream.on('error', done);
+ fs.createReadStream(filename).pipe(stream);
+ });
+});
diff --git a/system-test/speech_typescript_system_test_v1.ts b/system-test/speech_typescript_system_test_v1.ts
new file mode 100644
index 00000000..dedc762d
--- /dev/null
+++ b/system-test/speech_typescript_system_test_v1.ts
@@ -0,0 +1,119 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import * as assert from 'assert';
+import * as speech from '../src';
+import * as fs from 'fs';
+import * as path from 'path';
+import {google} from '../protos/protos';
+
+describe('SpeechClient TypeScript system test v1', () => {
+ it('calls recognize', async () => {
+ const client = new speech.v1.SpeechClient();
+
+ const languageCode = 'en-US';
+ const sampleRateHertz = 44100;
+ const encoding =
+ google.cloud.speech.v1.RecognitionConfig.AudioEncoding.FLAC;
+ const config = {
+ languageCode,
+ sampleRateHertz,
+ encoding,
+ };
+ const uri = 'gs://gapic-toolkit/hello.flac';
+ const audio = {
+ uri,
+ };
+ const request = {
+ config,
+ audio,
+ };
+ const [response] = await client.recognize(request);
+ assert.strictEqual(
+ response?.results?.[0]?.alternatives?.[0]?.transcript,
+ 'hello'
+ );
+ });
+
+ it('calls longRunningRecognize', async () => {
+ const client = new speech.v1.SpeechClient();
+
+ const languageCode = 'en-US';
+ const sampleRateHertz = 44100;
+ const encoding =
+ google.cloud.speech.v1.RecognitionConfig.AudioEncoding.FLAC;
+ const config = {
+ languageCode,
+ sampleRateHertz,
+ encoding,
+ };
+ const uri = 'gs://gapic-toolkit/hello.flac';
+ const audio = {
+ uri,
+ };
+ const request = {
+ config,
+ audio,
+ };
+ const [operation] = await client.longRunningRecognize(request);
+ const [response] = await operation.promise();
+ assert.strictEqual(
+ response?.results?.[0]?.alternatives?.[0]?.transcript,
+ 'hello'
+ );
+ });
+
+ it('calls streamingRecognize', done => {
+ const filename = path.join(
+ 'system-test',
+ 'fixtures',
+ 'streamingRecognize',
+ 'input.wav'
+ );
+
+ const languageCode = 'en-US';
+ const sampleRateHertz = 24000;
+ const encoding =
+ google.cloud.speech.v1.RecognitionConfig.AudioEncoding.LINEAR16;
+ const config = {
+ languageCode,
+ sampleRateHertz,
+ encoding,
+ };
+ const request = {
+ config,
+ interimResults: false,
+ };
+
+ const client = new speech.v1.SpeechClient();
+ const stream = client.streamingRecognize(request);
+ let gotResponse = false;
+ stream.on(
+ 'data',
+ (response: google.cloud.speech.v1.IStreamingRecognizeResponse) => {
+ assert.strictEqual(
+ response?.results?.[0]?.alternatives?.[0]?.transcript,
+ 'test of streaming recognize call'
+ );
+ gotResponse = true;
+ }
+ );
+ stream.on('end', () => {
+ assert(gotResponse);
+ done();
+ });
+ stream.on('error', done);
+ fs.createReadStream(filename).pipe(stream);
+ });
+});
diff --git a/system-test/speech_typescript_system_test_v1p1beta1.ts b/system-test/speech_typescript_system_test_v1p1beta1.ts
new file mode 100644
index 00000000..9bf94da7
--- /dev/null
+++ b/system-test/speech_typescript_system_test_v1p1beta1.ts
@@ -0,0 +1,119 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import * as assert from 'assert';
+import * as speech from '../src';
+import * as fs from 'fs';
+import * as path from 'path';
+import {google} from '../protos/protos';
+
+describe('SpeechClient TypeScript system test v1p1beta1', () => {
+ it('calls recognize', async () => {
+ const client = new speech.v1p1beta1.SpeechClient();
+
+ const languageCode = 'en-US';
+ const sampleRateHertz = 44100;
+ const encoding =
+ google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.FLAC;
+ const config = {
+ languageCode,
+ sampleRateHertz,
+ encoding,
+ };
+ const uri = 'gs://gapic-toolkit/hello.flac';
+ const audio = {
+ uri,
+ };
+ const request = {
+ config,
+ audio,
+ };
+ const [response] = await client.recognize(request);
+ assert.strictEqual(
+ response?.results?.[0]?.alternatives?.[0]?.transcript,
+ 'hello'
+ );
+ });
+
+ it('calls longRunningRecognize', async () => {
+ const client = new speech.v1p1beta1.SpeechClient();
+
+ const languageCode = 'en-US';
+ const sampleRateHertz = 44100;
+ const encoding =
+ google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.FLAC;
+ const config = {
+ languageCode,
+ sampleRateHertz,
+ encoding,
+ };
+ const uri = 'gs://gapic-toolkit/hello.flac';
+ const audio = {
+ uri,
+ };
+ const request = {
+ config,
+ audio,
+ };
+ const [operation] = await client.longRunningRecognize(request);
+ const [response] = await operation.promise();
+ assert.strictEqual(
+ response?.results?.[0]?.alternatives?.[0]?.transcript,
+ 'hello'
+ );
+ });
+
+ it('calls streamingRecognize', done => {
+ const filename = path.join(
+ 'system-test',
+ 'fixtures',
+ 'streamingRecognize',
+ 'input.wav'
+ );
+
+ const languageCode = 'en-US';
+ const sampleRateHertz = 24000;
+ const encoding =
+ google.cloud.speech.v1.RecognitionConfig.AudioEncoding.LINEAR16;
+ const config = {
+ languageCode,
+ sampleRateHertz,
+ encoding,
+ };
+ const request = {
+ config,
+ interimResults: false,
+ };
+
+ const client = new speech.v1p1beta1.SpeechClient();
+ const stream = client.streamingRecognize(request);
+ let gotResponse = false;
+ stream.on(
+ 'data',
+ (response: google.cloud.speech.v1p1beta1.IStreamingRecognizeResponse) => {
+ assert.strictEqual(
+ response?.results?.[0]?.alternatives?.[0]?.transcript,
+ 'test of streaming recognize call'
+ );
+ gotResponse = true;
+ }
+ );
+ stream.on('end', () => {
+ assert(gotResponse);
+ done();
+ });
+ stream.on('error', done);
+ fs.createReadStream(filename).pipe(stream);
+ });
+});
diff --git a/test/gapic-v1.js b/test/gapic-speech-v1.ts
similarity index 59%
rename from test/gapic-v1.js
rename to test/gapic-speech-v1.ts
index 6c2facbd..c4dc6150 100644
--- a/test/gapic-v1.js
+++ b/test/gapic-speech-v1.ts
@@ -11,80 +11,135 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-const assert = require('assert');
-const {PassThrough} = require('stream');
-
+import * as protosTypes from '../protos/protos';
+import * as assert from 'assert';
const speechModule = require('../src');
+import {PassThrough} from 'stream';
+
const FAKE_STATUS_CODE = 1;
-const error = new Error();
-error.code = FAKE_STATUS_CODE;
+class FakeError {
+ name: string;
+ message: string;
+ code: number;
+ constructor(n: number) {
+ this.name = 'fakeName';
+ this.message = 'fake message';
+ this.code = n;
+ }
+}
+const error = new FakeError(FAKE_STATUS_CODE);
+export interface Callback {
+ (err: FakeError | null, response?: {} | null): void;
+}
-describe('SpeechClient', () => {
+export class Operation {
+ constructor() {}
+ promise() {}
+}
+function mockSimpleGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error: FakeError | null
+) {
+ return (actualRequest: {}, options: {}, callback: Callback) => {
+ assert.deepStrictEqual(actualRequest, expectedRequest);
+ if (error) {
+ callback(error);
+ } else if (response) {
+ callback(null, response);
+ } else {
+ callback(null);
+ }
+ };
+}
+function mockBidiStreamingGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error: FakeError | null
+) {
+ return () => {
+ const mockStream = new PassThrough({
+ objectMode: true,
+ transform: (chunk: {}, enc: {}, callback: Callback) => {
+ assert.deepStrictEqual(chunk, expectedRequest);
+ if (error) {
+ callback(error);
+ } else {
+ callback(null, response);
+ }
+ },
+ });
+ return mockStream;
+ };
+}
+function mockLongRunningGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error?: {} | null
+) {
+ return (request: {}) => {
+ assert.deepStrictEqual(request, expectedRequest);
+ const mockOperation = {
+ promise() {
+ return new Promise((resolve, reject) => {
+ if (error) {
+ reject(error);
+ } else {
+ resolve([response]);
+ }
+ });
+ },
+ };
+ return Promise.resolve([mockOperation]);
+ };
+}
+describe('v1.SpeechClient', () => {
it('has servicePath', () => {
const servicePath = speechModule.v1.SpeechClient.servicePath;
assert(servicePath);
});
-
it('has apiEndpoint', () => {
const apiEndpoint = speechModule.v1.SpeechClient.apiEndpoint;
assert(apiEndpoint);
});
-
it('has port', () => {
const port = speechModule.v1.SpeechClient.port;
assert(port);
assert(typeof port === 'number');
});
-
- it('should create a client with no options', () => {
+ it('should create a client with no option', () => {
const client = new speechModule.v1.SpeechClient();
assert(client);
});
-
it('should create a client with gRPC fallback', () => {
- const client = new speechModule.v1.SpeechClient({fallback: true});
+ const client = new speechModule.v1.SpeechClient({
+ fallback: true,
+ });
assert(client);
});
-
describe('recognize', () => {
it('invokes recognize without error', done => {
const client = new speechModule.v1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
// Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
+ const request: protosTypes.google.cloud.speech.v1.IRecognizeRequest = {};
// Mock response
const expectedResponse = {};
-
- // Mock Grpc layer
+ // Mock gRPC layer
client._innerApiCalls.recognize = mockSimpleGrpcMethod(
request,
- expectedResponse
+ expectedResponse,
+ null
);
-
- client.recognize(request, (err, response) => {
+ client.recognize(request, (err: {}, response: {}) => {
assert.ifError(err);
assert.deepStrictEqual(response, expectedResponse);
done();
@@ -96,86 +151,50 @@ describe('SpeechClient', () => {
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
// Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
- // Mock Grpc layer
+ const request: protosTypes.google.cloud.speech.v1.IRecognizeRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
client._innerApiCalls.recognize = mockSimpleGrpcMethod(
request,
null,
error
);
-
- client.recognize(request, (err, response) => {
- assert(err instanceof Error);
+ client.recognize(request, (err: FakeError, response: {}) => {
+ assert(err instanceof FakeError);
assert.strictEqual(err.code, FAKE_STATUS_CODE);
assert(typeof response === 'undefined');
done();
});
});
});
-
- describe('longRunningRecognize', function() {
+ describe('longRunningRecognize', () => {
it('invokes longRunningRecognize without error', done => {
const client = new speechModule.v1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
// Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
+ const request: protosTypes.google.cloud.speech.v1.ILongRunningRecognizeRequest = {};
// Mock response
const expectedResponse = {};
-
- // Mock Grpc layer
+ // Mock gRPC layer
client._innerApiCalls.longRunningRecognize = mockLongRunningGrpcMethod(
request,
expectedResponse
);
-
client
.longRunningRecognize(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
- .then(responses => {
+ .then((responses: [Operation]) => {
assert.deepStrictEqual(responses[0], expectedResponse);
done();
})
- .catch(err => {
+ .catch((err: {}) => {
done(err);
});
});
@@ -185,166 +204,85 @@ describe('SpeechClient', () => {
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
// Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
- // Mock Grpc layer
+ const request: protosTypes.google.cloud.speech.v1.ILongRunningRecognizeRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
client._innerApiCalls.longRunningRecognize = mockLongRunningGrpcMethod(
request,
null,
error
);
-
client
.longRunningRecognize(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
.then(() => {
assert.fail();
})
- .catch(err => {
- assert(err instanceof Error);
+ .catch((err: FakeError) => {
+ assert(err instanceof FakeError);
assert.strictEqual(err.code, FAKE_STATUS_CODE);
done();
});
});
-
- it('has longrunning decoder functions', () => {
- const client = new speechModule.v1.SpeechClient({
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- });
- assert(
- client._descriptors.longrunning.longRunningRecognize
- .responseDecoder instanceof Function
- );
- assert(
- client._descriptors.longrunning.longRunningRecognize
- .metadataDecoder instanceof Function
- );
- });
});
-
describe('streamingRecognize', () => {
it('invokes streamingRecognize without error', done => {
const client = new speechModule.v1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
+ // Mock request
+ const request = {};
// Mock response
const expectedResponse = {};
-
- // Mock Grpc layer
+ // Mock gRPC layer
client._innerApiCalls.streamingRecognize = mockBidiStreamingGrpcMethod(
- {streamingConfig: {}},
- expectedResponse
+ request,
+ expectedResponse,
+ null
);
-
const stream = client
- .streamingRecognize()
- .on('data', response => {
+ ._streamingRecognize()
+ .on('data', (response: {}) => {
assert.deepStrictEqual(response, expectedResponse);
done();
})
- .on('error', err => {
+ .on('error', (err: FakeError) => {
done(err);
});
-
- stream.write();
+ stream.write(request);
});
-
it('invokes streamingRecognize with error', done => {
const client = new speechModule.v1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
- // Mock Grpc layer
+ // Mock request
+ const request = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
client._innerApiCalls.streamingRecognize = mockBidiStreamingGrpcMethod(
- {streamingConfig: {}},
+ request,
null,
error
);
-
const stream = client
- .streamingRecognize()
+ ._streamingRecognize()
.on('data', () => {
assert.fail();
})
- .on('error', err => {
- assert(err instanceof Error);
+ .on('error', (err: FakeError) => {
+ assert(err instanceof FakeError);
assert.strictEqual(err.code, FAKE_STATUS_CODE);
done();
});
-
- stream.write();
+ stream.write(request);
});
});
});
-
-function mockSimpleGrpcMethod(expectedRequest, response, error) {
- return function(actualRequest, options, callback) {
- assert.deepStrictEqual(actualRequest, expectedRequest);
- if (error) {
- callback(error);
- } else if (response) {
- callback(null, response);
- } else {
- callback(null);
- }
- };
-}
-
-function mockBidiStreamingGrpcMethod(expectedRequest, response, error) {
- return () => {
- const mockStream = new PassThrough({
- objectMode: true,
- transform: (chunk, enc, callback) => {
- assert.deepStrictEqual(chunk, expectedRequest);
- if (error) {
- callback(error);
- } else {
- callback(null, response);
- }
- },
- });
- return mockStream;
- };
-}
-
-function mockLongRunningGrpcMethod(expectedRequest, response, error) {
- return request => {
- assert.deepStrictEqual(request, expectedRequest);
- const mockOperation = {
- promise: function() {
- return new Promise((resolve, reject) => {
- if (error) {
- reject(error);
- } else {
- resolve([response]);
- }
- });
- },
- };
- return Promise.resolve([mockOperation]);
- };
-}
diff --git a/test/gapic-v1p1beta1.js b/test/gapic-speech-v1p1beta1.ts
similarity index 59%
rename from test/gapic-v1p1beta1.js
rename to test/gapic-speech-v1p1beta1.ts
index 08dafb51..5314de71 100644
--- a/test/gapic-v1p1beta1.js
+++ b/test/gapic-speech-v1p1beta1.ts
@@ -11,80 +11,135 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
-'use strict';
-
-const assert = require('assert');
-const {PassThrough} = require('stream');
-
+import * as protosTypes from '../protos/protos';
+import * as assert from 'assert';
const speechModule = require('../src');
+import {PassThrough} from 'stream';
+
const FAKE_STATUS_CODE = 1;
-const error = new Error();
-error.code = FAKE_STATUS_CODE;
+class FakeError {
+ name: string;
+ message: string;
+ code: number;
+ constructor(n: number) {
+ this.name = 'fakeName';
+ this.message = 'fake message';
+ this.code = n;
+ }
+}
+const error = new FakeError(FAKE_STATUS_CODE);
+export interface Callback {
+ (err: FakeError | null, response?: {} | null): void;
+}
-describe('SpeechClient', () => {
+export class Operation {
+ constructor() {}
+ promise() {}
+}
+function mockSimpleGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error: FakeError | null
+) {
+ return (actualRequest: {}, options: {}, callback: Callback) => {
+ assert.deepStrictEqual(actualRequest, expectedRequest);
+ if (error) {
+ callback(error);
+ } else if (response) {
+ callback(null, response);
+ } else {
+ callback(null);
+ }
+ };
+}
+function mockBidiStreamingGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error: FakeError | null
+) {
+ return () => {
+ const mockStream = new PassThrough({
+ objectMode: true,
+ transform: (chunk: {}, enc: {}, callback: Callback) => {
+ assert.deepStrictEqual(chunk, expectedRequest);
+ if (error) {
+ callback(error);
+ } else {
+ callback(null, response);
+ }
+ },
+ });
+ return mockStream;
+ };
+}
+function mockLongRunningGrpcMethod(
+ expectedRequest: {},
+ response: {} | null,
+ error?: {} | null
+) {
+ return (request: {}) => {
+ assert.deepStrictEqual(request, expectedRequest);
+ const mockOperation = {
+ promise() {
+ return new Promise((resolve, reject) => {
+ if (error) {
+ reject(error);
+ } else {
+ resolve([response]);
+ }
+ });
+ },
+ };
+ return Promise.resolve([mockOperation]);
+ };
+}
+describe('v1p1beta1.SpeechClient', () => {
it('has servicePath', () => {
const servicePath = speechModule.v1p1beta1.SpeechClient.servicePath;
assert(servicePath);
});
-
it('has apiEndpoint', () => {
const apiEndpoint = speechModule.v1p1beta1.SpeechClient.apiEndpoint;
assert(apiEndpoint);
});
-
it('has port', () => {
const port = speechModule.v1p1beta1.SpeechClient.port;
assert(port);
assert(typeof port === 'number');
});
-
- it('should create a client with no options', () => {
+ it('should create a client with no option', () => {
const client = new speechModule.v1p1beta1.SpeechClient();
assert(client);
});
-
it('should create a client with gRPC fallback', () => {
- const client = new speechModule.v1p1beta1.SpeechClient({fallback: true});
+ const client = new speechModule.v1p1beta1.SpeechClient({
+ fallback: true,
+ });
assert(client);
});
-
describe('recognize', () => {
it('invokes recognize without error', done => {
const client = new speechModule.v1p1beta1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
// Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
+ const request: protosTypes.google.cloud.speech.v1p1beta1.IRecognizeRequest = {};
// Mock response
const expectedResponse = {};
-
- // Mock Grpc layer
+ // Mock gRPC layer
client._innerApiCalls.recognize = mockSimpleGrpcMethod(
request,
- expectedResponse
+ expectedResponse,
+ null
);
-
- client.recognize(request, (err, response) => {
+ client.recognize(request, (err: {}, response: {}) => {
assert.ifError(err);
assert.deepStrictEqual(response, expectedResponse);
done();
@@ -96,86 +151,50 @@ describe('SpeechClient', () => {
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
// Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
- // Mock Grpc layer
+ const request: protosTypes.google.cloud.speech.v1p1beta1.IRecognizeRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
client._innerApiCalls.recognize = mockSimpleGrpcMethod(
request,
null,
error
);
-
- client.recognize(request, (err, response) => {
- assert(err instanceof Error);
+ client.recognize(request, (err: FakeError, response: {}) => {
+ assert(err instanceof FakeError);
assert.strictEqual(err.code, FAKE_STATUS_CODE);
assert(typeof response === 'undefined');
done();
});
});
});
-
- describe('longRunningRecognize', function() {
+ describe('longRunningRecognize', () => {
it('invokes longRunningRecognize without error', done => {
const client = new speechModule.v1p1beta1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
// Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
+ const request: protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeRequest = {};
// Mock response
const expectedResponse = {};
-
- // Mock Grpc layer
+ // Mock gRPC layer
client._innerApiCalls.longRunningRecognize = mockLongRunningGrpcMethod(
request,
expectedResponse
);
-
client
.longRunningRecognize(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
- .then(responses => {
+ .then((responses: [Operation]) => {
assert.deepStrictEqual(responses[0], expectedResponse);
done();
})
- .catch(err => {
+ .catch((err: {}) => {
done(err);
});
});
@@ -185,166 +204,85 @@ describe('SpeechClient', () => {
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
// Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
- // Mock Grpc layer
+ const request: protosTypes.google.cloud.speech.v1p1beta1.ILongRunningRecognizeRequest = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
client._innerApiCalls.longRunningRecognize = mockLongRunningGrpcMethod(
request,
null,
error
);
-
client
.longRunningRecognize(request)
- .then(responses => {
+ .then((responses: [Operation]) => {
const operation = responses[0];
- return operation.promise();
+ return operation ? operation.promise() : {};
})
.then(() => {
assert.fail();
})
- .catch(err => {
- assert(err instanceof Error);
+ .catch((err: FakeError) => {
+ assert(err instanceof FakeError);
assert.strictEqual(err.code, FAKE_STATUS_CODE);
done();
});
});
-
- it('has longrunning decoder functions', () => {
- const client = new speechModule.v1p1beta1.SpeechClient({
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- });
- assert(
- client._descriptors.longrunning.longRunningRecognize
- .responseDecoder instanceof Function
- );
- assert(
- client._descriptors.longrunning.longRunningRecognize
- .metadataDecoder instanceof Function
- );
- });
});
-
describe('streamingRecognize', () => {
it('invokes streamingRecognize without error', done => {
const client = new speechModule.v1p1beta1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
+ // Mock request
+ const request = {};
// Mock response
const expectedResponse = {};
-
- // Mock Grpc layer
+ // Mock gRPC layer
client._innerApiCalls.streamingRecognize = mockBidiStreamingGrpcMethod(
- {streamingConfig: {}},
- expectedResponse
+ request,
+ expectedResponse,
+ null
);
-
const stream = client
- .streamingRecognize()
- .on('data', response => {
+ ._streamingRecognize()
+ .on('data', (response: {}) => {
assert.deepStrictEqual(response, expectedResponse);
done();
})
- .on('error', err => {
+ .on('error', (err: FakeError) => {
done(err);
});
-
- stream.write();
+ stream.write(request);
});
-
it('invokes streamingRecognize with error', done => {
const client = new speechModule.v1p1beta1.SpeechClient({
credentials: {client_email: 'bogus', private_key: 'bogus'},
projectId: 'bogus',
});
-
- // Mock Grpc layer
+ // Mock request
+ const request = {};
+ // Mock response
+ const expectedResponse = {};
+ // Mock gRPC layer
client._innerApiCalls.streamingRecognize = mockBidiStreamingGrpcMethod(
- {streamingConfig: {}},
+ request,
null,
error
);
-
const stream = client
- .streamingRecognize()
+ ._streamingRecognize()
.on('data', () => {
assert.fail();
})
- .on('error', err => {
- assert(err instanceof Error);
+ .on('error', (err: FakeError) => {
+ assert(err instanceof FakeError);
assert.strictEqual(err.code, FAKE_STATUS_CODE);
done();
});
-
- stream.write();
+ stream.write(request);
});
});
});
-
-function mockSimpleGrpcMethod(expectedRequest, response, error) {
- return function(actualRequest, options, callback) {
- assert.deepStrictEqual(actualRequest, expectedRequest);
- if (error) {
- callback(error);
- } else if (response) {
- callback(null, response);
- } else {
- callback(null);
- }
- };
-}
-
-function mockBidiStreamingGrpcMethod(expectedRequest, response, error) {
- return () => {
- const mockStream = new PassThrough({
- objectMode: true,
- transform: (chunk, enc, callback) => {
- assert.deepStrictEqual(chunk, expectedRequest);
- if (error) {
- callback(error);
- } else {
- callback(null, response);
- }
- },
- });
- return mockStream;
- };
-}
-
-function mockLongRunningGrpcMethod(expectedRequest, response, error) {
- return request => {
- assert.deepStrictEqual(request, expectedRequest);
- const mockOperation = {
- promise: function() {
- return new Promise((resolve, reject) => {
- if (error) {
- reject(error);
- } else {
- resolve([response]);
- }
- });
- },
- };
- return Promise.resolve([mockOperation]);
- };
-}
diff --git a/test/gapic-v1.test.js b/test/gapic-v1.test.js
deleted file mode 100644
index dce086a3..00000000
--- a/test/gapic-v1.test.js
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-'use strict';
-
-const assert = require('assert');
-
-const speechModule = require('../src');
-
-const FAKE_STATUS_CODE = 1;
-const error = new Error();
-error.code = FAKE_STATUS_CODE;
-
-describe('SpeechClient', () => {
- describe('recognize', () => {
- it('invokes recognize without error', done => {
- const client = new speechModule.v1.SpeechClient({
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- });
-
- // Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
- // Mock response
- const expectedResponse = {};
-
- // Mock Grpc layer
- client._innerApiCalls.recognize = mockSimpleGrpcMethod(
- request,
- expectedResponse
- );
-
- client.recognize(request, (err, response) => {
- assert.ifError(err);
- assert.deepStrictEqual(response, expectedResponse);
- done();
- });
- });
-
- it('invokes recognize with error', done => {
- const client = new speechModule.v1.SpeechClient({
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- });
-
- // Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
- // Mock Grpc layer
- client._innerApiCalls.recognize = mockSimpleGrpcMethod(
- request,
- null,
- error
- );
-
- client.recognize(request, (err, response) => {
- assert(err instanceof Error);
- assert.strictEqual(err.code, FAKE_STATUS_CODE);
- assert(typeof response === 'undefined');
- done();
- });
- });
- });
-
- describe('longRunningRecognize', function() {
- it('invokes longRunningRecognize without error', done => {
- const client = new speechModule.v1.SpeechClient({
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- });
-
- // Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
- // Mock response
- const expectedResponse = {};
-
- // Mock Grpc layer
- client._innerApiCalls.longRunningRecognize = mockLongRunningGrpcMethod(
- request,
- expectedResponse
- );
-
- client
- .longRunningRecognize(request)
- .then(responses => {
- const operation = responses[0];
- return operation.promise();
- })
- .then(responses => {
- assert.deepStrictEqual(responses[0], expectedResponse);
- done();
- })
- .catch(err => {
- done(err);
- });
- });
-
- it('invokes longRunningRecognize with error', done => {
- const client = new speechModule.v1.SpeechClient({
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- });
-
- // Mock request
- const encoding = 'FLAC';
- const sampleRateHertz = 44100;
- const languageCode = 'en-US';
- const config = {
- encoding: encoding,
- sampleRateHertz: sampleRateHertz,
- languageCode: languageCode,
- };
- const uri = 'gs://bucket_name/file_name.flac';
- const audio = {
- uri: uri,
- };
- const request = {
- config: config,
- audio: audio,
- };
-
- // Mock Grpc layer
- client._innerApiCalls.longRunningRecognize = mockLongRunningGrpcMethod(
- request,
- null,
- error
- );
-
- client
- .longRunningRecognize(request)
- .then(responses => {
- const operation = responses[0];
- return operation.promise();
- })
- .then(() => {
- assert.fail();
- })
- .catch(err => {
- assert(err instanceof Error);
- assert.strictEqual(err.code, FAKE_STATUS_CODE);
- done();
- });
- });
-
- it('has longrunning decoder functions', () => {
- const client = new speechModule.v1.SpeechClient({
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- });
- assert(
- client._descriptors.longrunning.longRunningRecognize
- .responseDecoder instanceof Function
- );
- assert(
- client._descriptors.longrunning.longRunningRecognize
- .metadataDecoder instanceof Function
- );
- });
- });
-});
-
-function mockSimpleGrpcMethod(expectedRequest, response, error) {
- return function(actualRequest, options, callback) {
- assert.deepStrictEqual(actualRequest, expectedRequest);
- if (error) {
- callback(error);
- } else if (response) {
- callback(null, response);
- } else {
- callback(null);
- }
- };
-}
-
-function mockLongRunningGrpcMethod(expectedRequest, response, error) {
- return request => {
- assert.deepStrictEqual(request, expectedRequest);
- const mockOperation = {
- promise: function() {
- return new Promise((resolve, reject) => {
- if (error) {
- reject(error);
- } else {
- resolve([response]);
- }
- });
- },
- };
- return Promise.resolve([mockOperation]);
- };
-}
diff --git a/test/helpers.test.js b/test/helpers.test.ts
similarity index 70%
rename from test/helpers.test.js
rename to test/helpers.test.ts
index de2c3d2c..e975ffcf 100644
--- a/test/helpers.test.js
+++ b/test/helpers.test.ts
@@ -16,46 +16,14 @@
'use strict';
-const assert = require('assert');
-const common = require('@google-cloud/common');
-const proxyquire = require('proxyquire');
-const sinon = require('sinon');
-const stream = require('stream');
+import * as assert from 'assert';
+import * as sinon from 'sinon';
+import * as stream from 'stream';
+
+const speech = require('../src');
describe('Speech helper methods', () => {
- let client;
- let FakeApiErrorOverride;
const sandbox = sinon.createSandbox();
- let speech;
-
- class FakeApiError extends common.util.ApiError {
- constructor(error) {
- super();
-
- if (FakeApiErrorOverride) {
- return FakeApiErrorOverride(error);
- }
- }
- }
-
- before(() => {
- speech = proxyquire('../', {
- './helpers.js': proxyquire('../src/helpers.js', {
- '@google-cloud/common': {
- util: {
- ApiError: FakeApiError,
- },
- },
- }),
- });
- });
-
- beforeEach(() => {
- client = new speech.v1.SpeechClient({
- credentials: {client_email: 'bogus', private_key: 'bogus'},
- projectId: 'bogus',
- });
- });
afterEach(() => {
sandbox.restore();
@@ -68,6 +36,11 @@ describe('Speech helper methods', () => {
const OPTIONS = {timeout: Infinity};
it('writes the config to the resulting stream', done => {
+ const client = new speech.v1.SpeechClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
// Stub the underlying _streamingRecognize method to just return
// a bogus stream.
const requestStream = new stream.PassThrough({objectMode: true});
@@ -89,13 +62,18 @@ describe('Speech helper methods', () => {
streamingConfig: CONFIG,
});
setImmediate(done);
- next(null, data);
+ next(null);
};
userStream.write(undefined);
});
it('does not require options', () => {
+ const client = new speech.v1.SpeechClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
// Stub the underlying _streamingRecognize method to just return
// a bogus stream.
const requestStream = new stream.PassThrough({objectMode: true});
@@ -112,6 +90,11 @@ describe('Speech helper methods', () => {
});
it('destroys the user stream when the request stream errors', done => {
+ const client = new speech.v1.SpeechClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
// Stub the underlying _streamingRecognize method to just return
// a bogus stream.
const requestStream = new stream.PassThrough({objectMode: true});
@@ -123,7 +106,7 @@ describe('Speech helper methods', () => {
const error = new Error('Request stream error');
- userStream.once('error', err => {
+ userStream.once('error', (err: Error) => {
assert.strictEqual(err, error);
done();
});
@@ -131,34 +114,12 @@ describe('Speech helper methods', () => {
requestStream.emit('error', error);
});
- it('destroys the user stream when the response contains an error', done => {
- // Stub the underlying _streamingRecognize method to just return
- // a bogus stream.
- const requestStream = new stream.PassThrough({objectMode: true});
- sandbox
- .stub(client._innerApiCalls, 'streamingRecognize')
- .returns(requestStream);
-
- const userStream = client.streamingRecognize(CONFIG, OPTIONS);
-
- const error = {};
- const fakeApiError = {};
-
- FakeApiErrorOverride = err => {
- assert.strictEqual(err, error);
- return fakeApiError;
- };
-
- userStream.once('error', err => {
- assert.strictEqual(err, fakeApiError);
- done();
+ it('re-emits response from the request stream', done => {
+ const client = new speech.v1.SpeechClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
});
- userStream.emit('writing');
- requestStream.end({error});
- });
-
- it('re-emits response from the request stream', done => {
// Stub the underlying _streamingRecognize method to just return
// a bogus stream.
const requestStream = new stream.PassThrough({objectMode: true});
@@ -170,7 +131,7 @@ describe('Speech helper methods', () => {
const response = {};
- userStream.on('response', _response => {
+ userStream.on('response', (_response: {}) => {
assert.strictEqual(_response, response);
done();
});
@@ -180,6 +141,11 @@ describe('Speech helper methods', () => {
});
it('wraps incoming audio data', done => {
+ const client = new speech.v1.SpeechClient({
+ credentials: {client_email: 'bogus', private_key: 'bogus'},
+ projectId: 'bogus',
+ });
+
// Stub the underlying _streamingRecognize method to just return
// a bogus stream.
const requestStream = new stream.PassThrough({objectMode: true});
@@ -193,18 +159,18 @@ describe('Speech helper methods', () => {
let count = 0;
requestStream._write = (data, enc, next) => {
- if (count === 0)
+ if (count === 0) {
assert.deepStrictEqual(data, {
streamingConfig: CONFIG,
});
- else if (count === 1) {
+ } else if (count === 1) {
assert.deepStrictEqual(data, {
- audioContent: audioContent,
+ audioContent,
});
setImmediate(done);
}
count++;
- next(null, data);
+ next(null);
};
userStream.end(audioContent);
diff --git a/tsconfig.json b/tsconfig.json
new file mode 100644
index 00000000..613d3559
--- /dev/null
+++ b/tsconfig.json
@@ -0,0 +1,19 @@
+{
+ "extends": "./node_modules/gts/tsconfig-google.json",
+ "compilerOptions": {
+ "rootDir": ".",
+ "outDir": "build",
+ "resolveJsonModule": true,
+ "lib": [
+ "es2016",
+ "dom"
+ ]
+ },
+ "include": [
+ "src/*.ts",
+ "src/**/*.ts",
+ "test/*.ts",
+ "test/**/*.ts",
+ "system-test/*.ts"
+ ]
+}
diff --git a/tslint.json b/tslint.json
new file mode 100644
index 00000000..617dc975
--- /dev/null
+++ b/tslint.json
@@ -0,0 +1,3 @@
+{
+ "extends": "gts/tslint.json"
+}
diff --git a/webpack.config.js b/webpack.config.js
index fcdf8f27..b9c46a41 100644
--- a/webpack.config.js
+++ b/webpack.config.js
@@ -12,10 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+const path = require('path');
+
module.exports = {
- entry: './src/browser.js',
+ entry: './src/index.ts',
output: {
- library: 'speech',
+ library: 'Speech',
filename: './speech.js',
},
node: {
@@ -24,21 +26,37 @@ module.exports = {
crypto: 'empty',
},
resolve: {
- extensions: ['.js', '.json'],
+ alias: {
+ '../../../package.json': path.resolve(__dirname, 'package.json'),
+ },
+ extensions: ['.js', '.json', '.ts'],
},
module: {
rules: [
{
- test: /node_modules[\\/]retry-request[\\/]/,
- use: 'null-loader',
+ test: /\.tsx?$/,
+ use: 'ts-loader',
+ exclude: /node_modules/
+ },
+ {
+ test: /node_modules[\\/]@grpc[\\/]grpc-js/,
+ use: 'null-loader'
+ },
+ {
+ test: /node_modules[\\/]grpc/,
+ use: 'null-loader'
+ },
+ {
+ test: /node_modules[\\/]retry-request/,
+ use: 'null-loader'
},
{
- test: /node_modules[\\/]https-proxy-agent[\\/]/,
- use: 'null-loader',
+ test: /node_modules[\\/]https?-proxy-agent/,
+ use: 'null-loader'
},
{
- test: /node_modules[\\/]gtoken[\\/]/,
- use: 'null-loader',
+ test: /node_modules[\\/]gtoken/,
+ use: 'null-loader'
},
],
},