Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: manual generate library for ongoing stream on closed client #1011

Merged
merged 2 commits into from
Feb 3, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
327 changes: 327 additions & 0 deletions samples/generated/v2/snippet_metadata.google.bigtable.v2.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,327 @@
{
"clientLibrary": {
"name": "nodejs-bigtable",
"version": "0.1.0",
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@sofisl could you take a look? Is the version right to you?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be the current version of the library, but there's no way to put that information into the generator. I think, for now, we should just change it manually, but then have release-please update this value when a new release is made. I'll open a bug on release-please.

"language": "TYPESCRIPT",
"apis": [
{
"id": "google.bigtable.v2",
"version": "v2"
}
]
},
"snippets": [
{
"regionTag": "bigtable_v2_generated_Bigtable_ReadRows_async",
"title": "bigtable readRows Sample",
"origin": "API_DEFINITION",
"description": " Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to each. Depending on their size, rows and cells may be broken up across multiple responses, but atomicity of each row will still be preserved. See the ReadRowsResponse documentation for details.",
"canonical": true,
"file": "bigtable.read_rows.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 20,
"end": 69,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "ReadRows",
"fullName": "google.bigtable.v2.Bigtable.ReadRows",
"async": true,
"parameters": [
{
"name": "table_name",
"type": "TYPE_STRING"
},
{
"name": "app_profile_id",
"type": "TYPE_STRING"
},
{
"name": "rows",
"type": ".google.bigtable.v2.RowSet"
},
{
"name": "filter",
"type": ".google.bigtable.v2.RowFilter"
},
{
"name": "rows_limit",
"type": "TYPE_INT64"
}
],
"resultType": ".google.bigtable.v2.ReadRowsResponse",
"client": {
"shortName": "BigtableClient",
"fullName": "google.bigtable.v2.BigtableClient"
},
"method": {
"shortName": "ReadRows",
"fullName": "google.bigtable.v2.Bigtable.ReadRows",
"service": {
"shortName": "Bigtable",
"fullName": "google.bigtable.v2.Bigtable"
}
}
}
},
{
"regionTag": "bigtable_v2_generated_Bigtable_SampleRowKeys_async",
"title": "bigtable sampleRowKeys Sample",
"origin": "API_DEFINITION",
"description": " Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of the table of approximately equal size, which can be used to break up the data for distributed tasks like mapreduces.",
"canonical": true,
"file": "bigtable.sample_row_keys.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 20,
"end": 54,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "SampleRowKeys",
"fullName": "google.bigtable.v2.Bigtable.SampleRowKeys",
"async": true,
"parameters": [
{
"name": "table_name",
"type": "TYPE_STRING"
},
{
"name": "app_profile_id",
"type": "TYPE_STRING"
}
],
"resultType": ".google.bigtable.v2.SampleRowKeysResponse",
"client": {
"shortName": "BigtableClient",
"fullName": "google.bigtable.v2.BigtableClient"
},
"method": {
"shortName": "SampleRowKeys",
"fullName": "google.bigtable.v2.Bigtable.SampleRowKeys",
"service": {
"shortName": "Bigtable",
"fullName": "google.bigtable.v2.Bigtable"
}
}
}
},
{
"regionTag": "bigtable_v2_generated_Bigtable_MutateRow_async",
"title": "bigtable mutateRow Sample",
"origin": "API_DEFINITION",
"description": " Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by `mutation`.",
"canonical": true,
"file": "bigtable.mutate_row.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 20,
"end": 64,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "MutateRow",
"fullName": "google.bigtable.v2.Bigtable.MutateRow",
"async": true,
"parameters": [
{
"name": "table_name",
"type": "TYPE_STRING"
},
{
"name": "app_profile_id",
"type": "TYPE_STRING"
},
{
"name": "row_key",
"type": "TYPE_BYTES"
},
{
"name": "mutations",
"type": "TYPE_MESSAGE[]"
}
],
"resultType": ".google.bigtable.v2.MutateRowResponse",
"client": {
"shortName": "BigtableClient",
"fullName": "google.bigtable.v2.BigtableClient"
},
"method": {
"shortName": "MutateRow",
"fullName": "google.bigtable.v2.Bigtable.MutateRow",
"service": {
"shortName": "Bigtable",
"fullName": "google.bigtable.v2.Bigtable"
}
}
}
},
{
"regionTag": "bigtable_v2_generated_Bigtable_MutateRows_async",
"title": "bigtable mutateRows Sample",
"origin": "API_DEFINITION",
"description": " Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed atomically.",
"canonical": true,
"file": "bigtable.mutate_rows.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 20,
"end": 61,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "MutateRows",
"fullName": "google.bigtable.v2.Bigtable.MutateRows",
"async": true,
"parameters": [
{
"name": "table_name",
"type": "TYPE_STRING"
},
{
"name": "app_profile_id",
"type": "TYPE_STRING"
},
{
"name": "entries",
"type": "TYPE_MESSAGE[]"
}
],
"resultType": ".google.bigtable.v2.MutateRowsResponse",
"client": {
"shortName": "BigtableClient",
"fullName": "google.bigtable.v2.BigtableClient"
},
"method": {
"shortName": "MutateRows",
"fullName": "google.bigtable.v2.Bigtable.MutateRows",
"service": {
"shortName": "Bigtable",
"fullName": "google.bigtable.v2.Bigtable"
}
}
}
},
{
"regionTag": "bigtable_v2_generated_Bigtable_CheckAndMutateRow_async",
"title": "bigtable checkAndMutateRow Sample",
"origin": "API_DEFINITION",
"description": " Mutates a row atomically based on the output of a predicate Reader filter.",
"canonical": true,
"file": "bigtable.check_and_mutate_row.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 20,
"end": 81,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "CheckAndMutateRow",
"fullName": "google.bigtable.v2.Bigtable.CheckAndMutateRow",
"async": true,
"parameters": [
{
"name": "table_name",
"type": "TYPE_STRING"
},
{
"name": "app_profile_id",
"type": "TYPE_STRING"
},
{
"name": "row_key",
"type": "TYPE_BYTES"
},
{
"name": "predicate_filter",
"type": ".google.bigtable.v2.RowFilter"
},
{
"name": "true_mutations",
"type": "TYPE_MESSAGE[]"
},
{
"name": "false_mutations",
"type": "TYPE_MESSAGE[]"
}
],
"resultType": ".google.bigtable.v2.CheckAndMutateRowResponse",
"client": {
"shortName": "BigtableClient",
"fullName": "google.bigtable.v2.BigtableClient"
},
"method": {
"shortName": "CheckAndMutateRow",
"fullName": "google.bigtable.v2.Bigtable.CheckAndMutateRow",
"service": {
"shortName": "Bigtable",
"fullName": "google.bigtable.v2.Bigtable"
}
}
}
},
{
"regionTag": "bigtable_v2_generated_Bigtable_ReadModifyWriteRow_async",
"title": "bigtable readModifyWriteRow Sample",
"origin": "API_DEFINITION",
"description": " Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the specified columns and writes a new entry based on pre-defined read/modify/write rules. The new value for the timestamp is the greater of the existing timestamp or the current server time. The method returns the new contents of all modified cells.",
"canonical": true,
"file": "bigtable.read_modify_write_row.js",
"language": "JAVASCRIPT",
"segments": [
{
"start": 20,
"end": 65,
"type": "FULL"
}
],
"clientMethod": {
"shortName": "ReadModifyWriteRow",
"fullName": "google.bigtable.v2.Bigtable.ReadModifyWriteRow",
"async": true,
"parameters": [
{
"name": "table_name",
"type": "TYPE_STRING"
},
{
"name": "app_profile_id",
"type": "TYPE_STRING"
},
{
"name": "row_key",
"type": "TYPE_BYTES"
},
{
"name": "rules",
"type": "TYPE_MESSAGE[]"
}
],
"resultType": ".google.bigtable.v2.ReadModifyWriteRowResponse",
"client": {
"shortName": "BigtableClient",
"fullName": "google.bigtable.v2.BigtableClient"
},
"method": {
"shortName": "ReadModifyWriteRow",
"fullName": "google.bigtable.v2.Bigtable.ReadModifyWriteRow",
"service": {
"shortName": "Bigtable",
"fullName": "google.bigtable.v2.Bigtable"
}
}
}
}
]
}
21 changes: 19 additions & 2 deletions src/v2/bigtable_client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,15 @@

/* global window */
import * as gax from 'google-gax';
import {Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax';

import {
Callback,
CallOptions,
Descriptors,
ClientOptions,
GoogleError,
} from 'google-gax';

import {PassThrough} from 'stream';
import * as protos from '../../protos/protos';
import jsonProtos = require('../../protos/protos.json');
/**
Expand Down Expand Up @@ -236,6 +243,16 @@ export class BigtableClient {
stub =>
(...args: Array<{}>) => {
if (this._terminated) {
if (methodName in this.descriptors.stream) {
const stream = new PassThrough();
setImmediate(() => {
stream.emit(
'error',
new GoogleError('The client has already been closed.')
);
});
return stream;
}
return Promise.reject('The client has already been closed.');
}
const func = stub[methodName];
Expand Down
Loading