-
Notifications
You must be signed in to change notification settings - Fork 655
fix: Unblock the releases on Node Bigquery #7946
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
fd3975c
f31ccc7
5544e36
1bbd192
c588794
73c4ed5
ab16690
2410d66
bdba4dd
ca0a51c
a1eb4d2
a959b3a
f5b7e31
61db403
01ab527
67a4822
757dfdc
cbeeacc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1101,10 +1101,12 @@ export class BigQuery extends Service { | |
| }), | ||
| }; | ||
| } else if ((providedType as string).toUpperCase() === 'TIMESTAMP(12)') { | ||
| return { | ||
| type: 'TIMESTAMP', | ||
| timestampPrecision: '12', | ||
| }; | ||
| if (process.env.BIGQUERY_PICOSECOND_SUPPORT === 'true') { | ||
| return { | ||
| type: 'TIMESTAMP', | ||
| timestampPrecision: '12', | ||
| }; | ||
| } | ||
| } | ||
|
|
||
| providedType = (providedType as string).toUpperCase(); | ||
|
|
@@ -2364,18 +2366,31 @@ export class BigQuery extends Service { | |
| if (options.job) { | ||
| return undefined; | ||
| } | ||
| const hasAnyFormatOpts = | ||
| options['formatOptions.timestampOutputFormat'] !== undefined || | ||
| options['formatOptions.useInt64Timestamp'] !== undefined; | ||
| const defaultOpts = hasAnyFormatOpts | ||
| ? {} | ||
| : { | ||
| timestampOutputFormat: 'ISO8601_STRING', | ||
| }; | ||
| const formatOptions = extend(defaultOpts, { | ||
| timestampOutputFormat: options['formatOptions.timestampOutputFormat'], | ||
| useInt64Timestamp: options['formatOptions.useInt64Timestamp'], | ||
| }); | ||
| let formatOptions; | ||
| if (process.env.BIGQUERY_PICOSECOND_SUPPORT === 'true') { | ||
| const hasAnyFormatOpts = | ||
| options['formatOptions.timestampOutputFormat'] !== undefined || | ||
| options['formatOptions.useInt64Timestamp'] !== undefined; | ||
| const defaultOpts = hasAnyFormatOpts | ||
| ? {} | ||
| : { | ||
| timestampOutputFormat: 'ISO8601_STRING', | ||
| }; | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this can be simplified: Additionally, if we do this, I don't think we need the |
||
| formatOptions = extend(defaultOpts, { | ||
| timestampOutputFormat: options['formatOptions.timestampOutputFormat'], | ||
| useInt64Timestamp: options['formatOptions.useInt64Timestamp'], | ||
| }); | ||
| } else { | ||
| formatOptions = extend( | ||
| { | ||
| useInt64Timestamp: true, | ||
| }, | ||
| { | ||
| timestampOutputFormat: options['formatOptions.timestampOutputFormat'], | ||
| useInt64Timestamp: options['formatOptions.useInt64Timestamp'], | ||
| }, | ||
| ); | ||
| } | ||
| const req: bigquery.IQueryRequest = { | ||
| useQueryCache: queryObj.useQueryCache, | ||
| labels: queryObj.labels, | ||
|
|
@@ -2576,39 +2591,48 @@ function convertSchemaFieldValue( | |
| break; | ||
| } | ||
| case 'TIMESTAMP': { | ||
| /* | ||
| At this point, 'value' will equal the timestamp value returned from the | ||
| server. We need to parse this value differently depending on its format. | ||
| For example, value could be any of the following: | ||
| 1672574400123456 | ||
| 1672574400.123456 | ||
| 2023-01-01T12:00:00.123456789123Z | ||
| */ | ||
| const listParams = options.listParams; | ||
| const timestampOutputFormat = listParams | ||
| ? listParams['formatOptions.timestampOutputFormat'] | ||
| : undefined; | ||
| const useInt64Timestamp = listParams | ||
| ? listParams['formatOptions.useInt64Timestamp'] | ||
| : undefined; | ||
| if (timestampOutputFormat === 'ISO8601_STRING') { | ||
| // value is ISO string, create BigQueryTimestamp wrapping the string | ||
| value = BigQuery.timestamp(value); | ||
| } else if ( | ||
| useInt64Timestamp !== true && | ||
| timestampOutputFormat !== 'INT64' && | ||
| (useInt64Timestamp !== undefined || timestampOutputFormat !== undefined) | ||
| ) { | ||
| // NOTE: The additional | ||
| // (useInt64Timestamp !== undefined || timestampOutputFormat !== und...) | ||
| // check is to ensure that calls to the /query endpoint remain | ||
| // unaffected as they will not be providing any listParams. | ||
| // | ||
| // If the program reaches this point in time then | ||
| // value is float seconds so convert to BigQueryTimestamp | ||
| value = BigQuery.timestamp(Number(value)); | ||
| // High precision timestamp behaviour | ||
| if (process.env.BIGQUERY_PICOSECOND_SUPPORT === 'true') { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think the feature flag check is needed here, we can keep the improved code that pass the listParams across boundaries. |
||
| /* | ||
| At this point, 'value' will equal the timestamp value returned from the | ||
| server. We need to parse this value differently depending on its format. | ||
| For example, value could be any of the following: | ||
| 1672574400123456 | ||
| 1672574400.123456 | ||
| 2023-01-01T12:00:00.123456789123Z | ||
| */ | ||
| const listParams = options.listParams; | ||
| const timestampOutputFormat = listParams | ||
| ? listParams['formatOptions.timestampOutputFormat'] | ||
| : undefined; | ||
| const useInt64Timestamp = listParams | ||
| ? listParams['formatOptions.useInt64Timestamp'] | ||
| : undefined; | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We can use the |
||
| if (timestampOutputFormat === 'ISO8601_STRING') { | ||
| // value is ISO string, create BigQueryTimestamp wrapping the string | ||
| value = BigQuery.timestamp(value); | ||
| } else if ( | ||
| useInt64Timestamp !== true && | ||
| timestampOutputFormat !== 'INT64' && | ||
| (useInt64Timestamp !== undefined || | ||
| timestampOutputFormat !== undefined) | ||
| ) { | ||
| // NOTE: The additional | ||
| // (useInt64Timestamp !== undefined || timestampOutputFormat !== und...) | ||
| // check is to ensure that calls to the /query endpoint remain | ||
| // unaffected as they will not be providing any listParams. | ||
| // | ||
| // If the program reaches this point in time then | ||
| // value is float seconds so convert to BigQueryTimestamp | ||
| value = BigQuery.timestamp(Number(value)); | ||
| } else { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does this What if in the future an output option is added other than 'ISO8601_STRING', 'INT64', and "float seconds"? It seems like this logic could break by applying an incompatible default? Would it be more robust in the |
||
| // Expect int64 micros (default or explicit INT64) | ||
| const pd = new PreciseDate(); | ||
| pd.setFullTime(PreciseDate.parseFull(BigInt(value) * BigInt(1000))); | ||
| value = BigQuery.timestamp(pd); | ||
| } | ||
| } else { | ||
| // Expect int64 micros (default or explicit INT64) | ||
| // Old behaviour | ||
| const pd = new PreciseDate(); | ||
| pd.setFullTime(PreciseDate.parseFull(BigInt(value) * BigInt(1000))); | ||
| value = BigQuery.timestamp(pd); | ||
|
|
@@ -2834,16 +2858,6 @@ export class BigQueryTimestamp { | |
| } else if (typeof value === 'string') { | ||
| if (/^\d{4}-\d{1,2}-\d{1,2}/.test(value)) { | ||
| pd = new PreciseDate(value); | ||
| if (value.match(/\.\d{10,}/) && !Number.isNaN(pd.getTime())) { | ||
| /* | ||
| TODO: | ||
| When https://github.com/googleapis/nodejs-precise-date/pull/302 | ||
| is released and we have full support for picoseconds in PreciseData | ||
| then we can remove this if block. | ||
| */ | ||
| this.value = value; | ||
| return; | ||
| } | ||
| } else { | ||
| const floatValue = Number.parseFloat(value); | ||
| if (!Number.isNaN(floatValue)) { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -55,7 +55,6 @@ import {JobMetadata, JobOptions} from './job'; | |
| import bigquery from './types'; | ||
| import {IntegerTypeCastOptions} from './bigquery'; | ||
| import {RowQueue} from './rowQueue'; | ||
| import IDataFormatOptions = bigquery.IDataFormatOptions; | ||
|
|
||
| // This is supposed to be a @google-cloud/storage `File` type. The storage npm | ||
| // module includes these types, but is current installed as a devDependency. | ||
|
|
@@ -1867,13 +1866,35 @@ class Table extends ServiceObject { | |
| callback!(err, null, null, resp); | ||
| return; | ||
| } | ||
| try { | ||
| /* | ||
| Without this try/catch block, calls to getRows will hang indefinitely if | ||
| a call to mergeSchemaWithRows_ fails because the error never makes it to | ||
| the callback. Instead, pass the error to the callback the user provides | ||
| so that the user can see the error. | ||
| */ | ||
| if (process.env.BIGQUERY_PICOSECOND_SUPPORT === 'true') { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same as others. I think checking the feature flag here is making code much more complicated to understand |
||
| // High precision timestamp behaviour | ||
| try { | ||
| /* | ||
| Without this try/catch block, calls to getRows will hang indefinitely if | ||
| a call to mergeSchemaWithRows_ fails because the error never makes it to | ||
| the callback. Instead, pass the error to the callback the user provides | ||
| so that the user can see the error. | ||
| */ | ||
| if (options.skipParsing) { | ||
| rows = rows || []; | ||
| } else { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same in this file. I think the options.skipParsing logic could live outside of this flag check (since it happens either way). |
||
| rows = BigQuery.mergeSchemaWithRows_( | ||
| this.metadata.schema, | ||
| rows || [], | ||
| { | ||
| wrapIntegers, | ||
| selectedFields, | ||
| parseJSON, | ||
| listParams: qs, | ||
| }, | ||
| ); | ||
| } | ||
| } catch (err) { | ||
| callback!(err as Error | null, null, null, resp); | ||
| return; | ||
| } | ||
| } else { | ||
| // Old behaviour | ||
| if (options.skipParsing) { | ||
| rows = rows || []; | ||
| } else { | ||
|
|
@@ -1884,25 +1905,33 @@ class Table extends ServiceObject { | |
| wrapIntegers, | ||
| selectedFields, | ||
| parseJSON, | ||
| listParams: qs, | ||
| }, | ||
| ); | ||
| } | ||
| } catch (err) { | ||
| callback!(err as Error | null, null, null, resp); | ||
| return; | ||
| } | ||
| callback!(null, rows, nextQuery, resp); | ||
| }; | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same as https://github.com/googleapis/google-cloud-node/pull/7946/changes#r3029295761, this is the core part that needs to handle the feature flag. |
||
| const hasAnyFormatOpts = | ||
| options['formatOptions.timestampOutputFormat'] !== undefined || | ||
| options['formatOptions.useInt64Timestamp'] !== undefined; | ||
| const defaultOpts = hasAnyFormatOpts | ||
| ? {} | ||
| : { | ||
| 'formatOptions.timestampOutputFormat': 'ISO8601_STRING', | ||
| }; | ||
| const qs = extend(defaultOpts, options); | ||
|
|
||
| let qs: any; | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we use a more robust type here than any? Do we have an options interface we can reference instead? |
||
| if (process.env.BIGQUERY_PICOSECOND_SUPPORT === 'true') { | ||
| const hasAnyFormatOpts = | ||
| options['formatOptions.timestampOutputFormat'] !== undefined || | ||
| options['formatOptions.useInt64Timestamp'] !== undefined; | ||
| const defaultOpts = hasAnyFormatOpts | ||
| ? {} | ||
| : { | ||
| 'formatOptions.timestampOutputFormat': 'ISO8601_STRING', | ||
| }; | ||
| qs = extend(defaultOpts, options); | ||
| } else { | ||
| qs = extend( | ||
| { | ||
| 'formatOptions.useInt64Timestamp': true, | ||
| }, | ||
| options, | ||
| ); | ||
| } | ||
|
|
||
| this.request( | ||
| { | ||
| uri: '/data', | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1042,25 +1042,27 @@ describe('BigQuery', () => { | |
| }); | ||
|
|
||
| it('should create a table with timestampPrecision', async () => { | ||
| const table = dataset.table(generateName('timestamp-precision-table')); | ||
| const schema = { | ||
| fields: [ | ||
| { | ||
| name: 'ts_field', | ||
| type: 'TIMESTAMP', | ||
| timestampPrecision: 12, | ||
| }, | ||
| ], | ||
| }; | ||
| try { | ||
| await table.create({schema}); | ||
| const [metadata] = await table.getMetadata(); | ||
| assert.deepStrictEqual( | ||
| metadata.schema.fields[0].timestampPrecision, | ||
| '12', | ||
| ); | ||
| } catch (e) { | ||
| assert.ifError(e); | ||
| if (process.env.BIGQUERY_PICOSECOND_SUPPORT === 'true') { | ||
| const table = dataset.table(generateName('timestamp-precision-table')); | ||
| const schema = { | ||
| fields: [ | ||
| { | ||
| name: 'ts_field', | ||
| type: 'TIMESTAMP', | ||
| timestampPrecision: 12, | ||
| }, | ||
| ], | ||
| }; | ||
| try { | ||
| await table.create({schema}); | ||
| const [metadata] = await table.getMetadata(); | ||
| assert.deepStrictEqual( | ||
| metadata.schema.fields[0].timestampPrecision, | ||
| '12', | ||
| ); | ||
| } catch (e) { | ||
| assert.ifError(e); | ||
| } | ||
| } | ||
| }); | ||
|
|
||
|
|
@@ -1562,6 +1564,11 @@ describe('BigQuery', () => { | |
|
|
||
| testCases.forEach(testCase => { | ||
| it(`should handle ${testCase.name}`, async () => { | ||
| if (process.env.BIGQUERY_PICOSECOND_SUPPORT !== 'true') { | ||
| // These tests are only important when the high precision | ||
| // timestamp support is turned on. | ||
| return; | ||
| } | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Rather than disabling the tests, we should instead test the feature flag behavior. We should be able to stub the environment variable behavior in tests like this: |
||
| /* | ||
| The users use the new TIMESTAMP(12) type to indicate they want to | ||
| opt in to using timestampPrecision=12. The reason is that some queries | ||
|
|
@@ -1614,6 +1621,11 @@ describe('BigQuery', () => { | |
| } | ||
| }); | ||
| it(`should handle nested ${testCase.name}`, async () => { | ||
| if (process.env.BIGQUERY_PICOSECOND_SUPPORT !== 'true') { | ||
| // These tests are only important when the high precision | ||
| // timestamp support is turned on. | ||
| return; | ||
| } | ||
| /* | ||
| The users use the new TIMESTAMP(12) type to indicate they want to | ||
| opt in to using timestampPrecision=12. The reason is that some queries | ||
|
|
@@ -2009,6 +2021,11 @@ describe('BigQuery', () => { | |
|
|
||
| testCases.forEach(testCase => { | ||
| it(`should handle ${testCase.name}`, async () => { | ||
| if (process.env.BIGQUERY_PICOSECOND_SUPPORT !== 'true') { | ||
| // These tests are only important when the high precision | ||
| // timestamp support is turned on. | ||
| return; | ||
| } | ||
| /* | ||
| The users use the new TIMESTAMP(12) type to indicate they want to | ||
| opt in to using timestampPrecision=12. The reason is that some queries | ||
|
|
@@ -2063,6 +2080,11 @@ describe('BigQuery', () => { | |
| } | ||
| }); | ||
| it(`should handle nested ${testCase.name}`, async () => { | ||
| if (process.env.BIGQUERY_PICOSECOND_SUPPORT !== 'true') { | ||
| // These tests are only important when the high precision | ||
| // timestamp support is turned on. | ||
| return; | ||
| } | ||
| /* | ||
| The users use the new TIMESTAMP(12) type to indicate they want to | ||
| opt in to using timestampPrecision=12. The reason is that some queries | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think this is the most important part that should handle the feature flag. If BIGQUERY_PICOSECOND_SUPPORT is enabled, we should use
timestampOutputFormat: 'ISO8601_STRING', if not, useuseInt64Timestamp:true. We can still allow customers to pass the timestampOutputFormat, even if is allowlisted now. But in the future they can use it just fine without a new release. Later we can remove the feature flag and just default to use it too.