-
Notifications
You must be signed in to change notification settings - Fork 64
/
zz_job_types.go
executable file
·1881 lines (1466 loc) · 103 KB
/
zz_job_types.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// SPDX-FileCopyrightText: 2024 The Crossplane Authors <https://crossplane.io>
//
// SPDX-License-Identifier: Apache-2.0
// Code generated by upjet. DO NOT EDIT.
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
)
type CopyInitParameters struct {
// Specifies whether the job is allowed to create new tables. The following values are supported:
// CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
// CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
// Creation, truncation and append actions occur as one atomic update upon job completion
// Default value is CREATE_IF_NEEDED.
// Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
CreateDisposition *string `json:"createDisposition,omitempty" tf:"create_disposition,omitempty"`
// Custom encryption configuration (e.g., Cloud KMS keys)
// Structure is documented below.
DestinationEncryptionConfiguration *DestinationEncryptionConfigurationInitParameters `json:"destinationEncryptionConfiguration,omitempty" tf:"destination_encryption_configuration,omitempty"`
// The destination table.
// Structure is documented below.
DestinationTable *DestinationTableInitParameters `json:"destinationTable,omitempty" tf:"destination_table,omitempty"`
// Source tables to copy.
// Structure is documented below.
SourceTables []SourceTablesInitParameters `json:"sourceTables,omitempty" tf:"source_tables,omitempty"`
// Specifies the action that occurs if the destination table already exists. The following values are supported:
// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
// WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
// WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
// Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
// Creation, truncation and append actions occur as one atomic update upon job completion.
// Default value is WRITE_EMPTY.
// Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
WriteDisposition *string `json:"writeDisposition,omitempty" tf:"write_disposition,omitempty"`
}
type CopyObservation struct {
// Specifies whether the job is allowed to create new tables. The following values are supported:
// CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
// CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
// Creation, truncation and append actions occur as one atomic update upon job completion
// Default value is CREATE_IF_NEEDED.
// Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
CreateDisposition *string `json:"createDisposition,omitempty" tf:"create_disposition,omitempty"`
// Custom encryption configuration (e.g., Cloud KMS keys)
// Structure is documented below.
DestinationEncryptionConfiguration *DestinationEncryptionConfigurationObservation `json:"destinationEncryptionConfiguration,omitempty" tf:"destination_encryption_configuration,omitempty"`
// The destination table.
// Structure is documented below.
DestinationTable *DestinationTableObservation `json:"destinationTable,omitempty" tf:"destination_table,omitempty"`
// Source tables to copy.
// Structure is documented below.
SourceTables []SourceTablesObservation `json:"sourceTables,omitempty" tf:"source_tables,omitempty"`
// Specifies the action that occurs if the destination table already exists. The following values are supported:
// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
// WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
// WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
// Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
// Creation, truncation and append actions occur as one atomic update upon job completion.
// Default value is WRITE_EMPTY.
// Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
WriteDisposition *string `json:"writeDisposition,omitempty" tf:"write_disposition,omitempty"`
}
type CopyParameters struct {
// Specifies whether the job is allowed to create new tables. The following values are supported:
// CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
// CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
// Creation, truncation and append actions occur as one atomic update upon job completion
// Default value is CREATE_IF_NEEDED.
// Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
// +kubebuilder:validation:Optional
CreateDisposition *string `json:"createDisposition,omitempty" tf:"create_disposition,omitempty"`
// Custom encryption configuration (e.g., Cloud KMS keys)
// Structure is documented below.
// +kubebuilder:validation:Optional
DestinationEncryptionConfiguration *DestinationEncryptionConfigurationParameters `json:"destinationEncryptionConfiguration,omitempty" tf:"destination_encryption_configuration,omitempty"`
// The destination table.
// Structure is documented below.
// +kubebuilder:validation:Optional
DestinationTable *DestinationTableParameters `json:"destinationTable,omitempty" tf:"destination_table,omitempty"`
// Source tables to copy.
// Structure is documented below.
// +kubebuilder:validation:Optional
SourceTables []SourceTablesParameters `json:"sourceTables" tf:"source_tables,omitempty"`
// Specifies the action that occurs if the destination table already exists. The following values are supported:
// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
// WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
// WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
// Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
// Creation, truncation and append actions occur as one atomic update upon job completion.
// Default value is WRITE_EMPTY.
// Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
// +kubebuilder:validation:Optional
WriteDisposition *string `json:"writeDisposition,omitempty" tf:"write_disposition,omitempty"`
}
type DefaultDatasetInitParameters struct {
// The ID of the dataset containing this table.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Dataset
// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID()
DatasetID *string `json:"datasetId,omitempty" tf:"dataset_id,omitempty"`
// Reference to a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDRef *v1.Reference `json:"datasetIdRef,omitempty" tf:"-"`
// Selector for a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDSelector *v1.Selector `json:"datasetIdSelector,omitempty" tf:"-"`
// The ID of the project containing this table.
ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"`
}
type DefaultDatasetObservation struct {
// The ID of the dataset containing this table.
DatasetID *string `json:"datasetId,omitempty" tf:"dataset_id,omitempty"`
// The ID of the project containing this table.
ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"`
}
type DefaultDatasetParameters struct {
// The ID of the dataset containing this table.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Dataset
// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID()
// +kubebuilder:validation:Optional
DatasetID *string `json:"datasetId,omitempty" tf:"dataset_id,omitempty"`
// Reference to a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDRef *v1.Reference `json:"datasetIdRef,omitempty" tf:"-"`
// Selector for a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDSelector *v1.Selector `json:"datasetIdSelector,omitempty" tf:"-"`
// The ID of the project containing this table.
// +kubebuilder:validation:Optional
ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"`
}
type DestinationEncryptionConfigurationInitParameters struct {
// Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table.
// The BigQuery Service Account associated with your project requires access to this encryption key.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/kms/v1beta2.CryptoKey
// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID()
KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"`
// Reference to a CryptoKey in kms to populate kmsKeyName.
// +kubebuilder:validation:Optional
KMSKeyNameRef *v1.Reference `json:"kmsKeyNameRef,omitempty" tf:"-"`
// Selector for a CryptoKey in kms to populate kmsKeyName.
// +kubebuilder:validation:Optional
KMSKeyNameSelector *v1.Selector `json:"kmsKeyNameSelector,omitempty" tf:"-"`
}
type DestinationEncryptionConfigurationObservation struct {
// Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table.
// The BigQuery Service Account associated with your project requires access to this encryption key.
KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"`
// (Output)
// Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
KMSKeyVersion *string `json:"kmsKeyVersion,omitempty" tf:"kms_key_version,omitempty"`
}
type DestinationEncryptionConfigurationParameters struct {
// Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table.
// The BigQuery Service Account associated with your project requires access to this encryption key.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/kms/v1beta2.CryptoKey
// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID()
// +kubebuilder:validation:Optional
KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"`
// Reference to a CryptoKey in kms to populate kmsKeyName.
// +kubebuilder:validation:Optional
KMSKeyNameRef *v1.Reference `json:"kmsKeyNameRef,omitempty" tf:"-"`
// Selector for a CryptoKey in kms to populate kmsKeyName.
// +kubebuilder:validation:Optional
KMSKeyNameSelector *v1.Selector `json:"kmsKeyNameSelector,omitempty" tf:"-"`
}
type DestinationTableInitParameters struct {
// The ID of the dataset containing this table.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Dataset
DatasetID *string `json:"datasetId,omitempty" tf:"dataset_id,omitempty"`
// Reference to a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDRef *v1.Reference `json:"datasetIdRef,omitempty" tf:"-"`
// Selector for a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDSelector *v1.Selector `json:"datasetIdSelector,omitempty" tf:"-"`
// The ID of the project containing this table.
ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"`
// The table. Can be specified {{table_id}} if project_id and dataset_id are also set,
// or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Table
// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID()
TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"`
// Reference to a Table in bigquery to populate tableId.
// +kubebuilder:validation:Optional
TableIDRef *v1.Reference `json:"tableIdRef,omitempty" tf:"-"`
// Selector for a Table in bigquery to populate tableId.
// +kubebuilder:validation:Optional
TableIDSelector *v1.Selector `json:"tableIdSelector,omitempty" tf:"-"`
}
type DestinationTableObservation struct {
// The ID of the dataset containing this table.
DatasetID *string `json:"datasetId,omitempty" tf:"dataset_id,omitempty"`
// The ID of the project containing this table.
ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"`
// The table. Can be specified {{table_id}} if project_id and dataset_id are also set,
// or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"`
}
type DestinationTableParameters struct {
// The ID of the dataset containing this table.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Dataset
// +kubebuilder:validation:Optional
DatasetID *string `json:"datasetId,omitempty" tf:"dataset_id,omitempty"`
// Reference to a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDRef *v1.Reference `json:"datasetIdRef,omitempty" tf:"-"`
// Selector for a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDSelector *v1.Selector `json:"datasetIdSelector,omitempty" tf:"-"`
// The ID of the project containing this table.
// +kubebuilder:validation:Optional
ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"`
// The table. Can be specified {{table_id}} if project_id and dataset_id are also set,
// or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Table
// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID()
// +kubebuilder:validation:Optional
TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"`
// Reference to a Table in bigquery to populate tableId.
// +kubebuilder:validation:Optional
TableIDRef *v1.Reference `json:"tableIdRef,omitempty" tf:"-"`
// Selector for a Table in bigquery to populate tableId.
// +kubebuilder:validation:Optional
TableIDSelector *v1.Selector `json:"tableIdSelector,omitempty" tf:"-"`
}
type ErrorResultInitParameters struct {
}
type ErrorResultObservation struct {
// The geographic location of the job. The default value is US.
Location *string `json:"location,omitempty" tf:"location,omitempty"`
// A human-readable description of the error.
Message *string `json:"message,omitempty" tf:"message,omitempty"`
// A short error code that summarizes the error.
Reason *string `json:"reason,omitempty" tf:"reason,omitempty"`
}
type ErrorResultParameters struct {
}
type ErrorsInitParameters struct {
}
type ErrorsObservation struct {
// The geographic location of the job. The default value is US.
Location *string `json:"location,omitempty" tf:"location,omitempty"`
// A human-readable description of the error.
Message *string `json:"message,omitempty" tf:"message,omitempty"`
// A short error code that summarizes the error.
Reason *string `json:"reason,omitempty" tf:"reason,omitempty"`
}
type ErrorsParameters struct {
}
type ExtractInitParameters struct {
// The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE.
// The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
Compression *string `json:"compression,omitempty" tf:"compression,omitempty"`
// The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models.
// The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV.
// The default value for models is SAVED_MODEL.
DestinationFormat *string `json:"destinationFormat,omitempty" tf:"destination_format,omitempty"`
// A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
DestinationUris []*string `json:"destinationUris,omitempty" tf:"destination_uris,omitempty"`
// When extracting data in CSV format, this defines the delimiter to use between fields in the exported data.
// Default is ','
FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"`
// Whether to print out a header row in the results. Default is true.
PrintHeader *bool `json:"printHeader,omitempty" tf:"print_header,omitempty"`
// A reference to the model being exported.
// Structure is documented below.
SourceModel *SourceModelInitParameters `json:"sourceModel,omitempty" tf:"source_model,omitempty"`
// A reference to the table being exported.
// Structure is documented below.
SourceTable *SourceTableInitParameters `json:"sourceTable,omitempty" tf:"source_table,omitempty"`
// Whether to use logical types when extracting to AVRO format.
UseAvroLogicalTypes *bool `json:"useAvroLogicalTypes,omitempty" tf:"use_avro_logical_types,omitempty"`
}
type ExtractObservation struct {
// The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE.
// The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
Compression *string `json:"compression,omitempty" tf:"compression,omitempty"`
// The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models.
// The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV.
// The default value for models is SAVED_MODEL.
DestinationFormat *string `json:"destinationFormat,omitempty" tf:"destination_format,omitempty"`
// A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
DestinationUris []*string `json:"destinationUris,omitempty" tf:"destination_uris,omitempty"`
// When extracting data in CSV format, this defines the delimiter to use between fields in the exported data.
// Default is ','
FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"`
// Whether to print out a header row in the results. Default is true.
PrintHeader *bool `json:"printHeader,omitempty" tf:"print_header,omitempty"`
// A reference to the model being exported.
// Structure is documented below.
SourceModel *SourceModelObservation `json:"sourceModel,omitempty" tf:"source_model,omitempty"`
// A reference to the table being exported.
// Structure is documented below.
SourceTable *SourceTableObservation `json:"sourceTable,omitempty" tf:"source_table,omitempty"`
// Whether to use logical types when extracting to AVRO format.
UseAvroLogicalTypes *bool `json:"useAvroLogicalTypes,omitempty" tf:"use_avro_logical_types,omitempty"`
}
type ExtractParameters struct {
// The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE.
// The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
// +kubebuilder:validation:Optional
Compression *string `json:"compression,omitempty" tf:"compression,omitempty"`
// The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models.
// The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV.
// The default value for models is SAVED_MODEL.
// +kubebuilder:validation:Optional
DestinationFormat *string `json:"destinationFormat,omitempty" tf:"destination_format,omitempty"`
// A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
// +kubebuilder:validation:Optional
DestinationUris []*string `json:"destinationUris" tf:"destination_uris,omitempty"`
// When extracting data in CSV format, this defines the delimiter to use between fields in the exported data.
// Default is ','
// +kubebuilder:validation:Optional
FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"`
// Whether to print out a header row in the results. Default is true.
// +kubebuilder:validation:Optional
PrintHeader *bool `json:"printHeader,omitempty" tf:"print_header,omitempty"`
// A reference to the model being exported.
// Structure is documented below.
// +kubebuilder:validation:Optional
SourceModel *SourceModelParameters `json:"sourceModel,omitempty" tf:"source_model,omitempty"`
// A reference to the table being exported.
// Structure is documented below.
// +kubebuilder:validation:Optional
SourceTable *SourceTableParameters `json:"sourceTable,omitempty" tf:"source_table,omitempty"`
// Whether to use logical types when extracting to AVRO format.
// +kubebuilder:validation:Optional
UseAvroLogicalTypes *bool `json:"useAvroLogicalTypes,omitempty" tf:"use_avro_logical_types,omitempty"`
}
type JobInitParameters struct {
// Copies a table.
// Structure is documented below.
Copy *CopyInitParameters `json:"copy,omitempty" tf:"copy,omitempty"`
// Configures an extract job.
// Structure is documented below.
Extract *ExtractInitParameters `json:"extract,omitempty" tf:"extract,omitempty"`
// The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
JobID *string `json:"jobId,omitempty" tf:"job_id,omitempty"`
// Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
JobTimeoutMs *string `json:"jobTimeoutMs,omitempty" tf:"job_timeout_ms,omitempty"`
// The labels associated with this job. You can use these to organize and group your jobs.
// +mapType=granular
Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"`
// Configures a load job.
// Structure is documented below.
Load *LoadInitParameters `json:"load,omitempty" tf:"load,omitempty"`
// The geographic location of the job. The default value is US.
Location *string `json:"location,omitempty" tf:"location,omitempty"`
// The ID of the project in which the resource belongs.
// If it is not provided, the provider project is used.
Project *string `json:"project,omitempty" tf:"project,omitempty"`
// Configures a query job.
// Structure is documented below.
Query *QueryInitParameters `json:"query,omitempty" tf:"query,omitempty"`
}
type JobObservation struct {
// Copies a table.
// Structure is documented below.
Copy *CopyObservation `json:"copy,omitempty" tf:"copy,omitempty"`
// +mapType=granular
EffectiveLabels map[string]*string `json:"effectiveLabels,omitempty" tf:"effective_labels,omitempty"`
// Configures an extract job.
// Structure is documented below.
Extract *ExtractObservation `json:"extract,omitempty" tf:"extract,omitempty"`
// an identifier for the resource with format projects/{{project}}/jobs/{{job_id}}
ID *string `json:"id,omitempty" tf:"id,omitempty"`
// The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
JobID *string `json:"jobId,omitempty" tf:"job_id,omitempty"`
// Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
JobTimeoutMs *string `json:"jobTimeoutMs,omitempty" tf:"job_timeout_ms,omitempty"`
// (Output)
// The type of the job.
JobType *string `json:"jobType,omitempty" tf:"job_type,omitempty"`
// The labels associated with this job. You can use these to organize and group your jobs.
// +mapType=granular
Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"`
// Configures a load job.
// Structure is documented below.
Load *LoadObservation `json:"load,omitempty" tf:"load,omitempty"`
// The geographic location of the job. The default value is US.
Location *string `json:"location,omitempty" tf:"location,omitempty"`
// The ID of the project in which the resource belongs.
// If it is not provided, the provider project is used.
Project *string `json:"project,omitempty" tf:"project,omitempty"`
// Configures a query job.
// Structure is documented below.
Query *QueryObservation `json:"query,omitempty" tf:"query,omitempty"`
// The status of this job. Examine this value when polling an asynchronous job to see if the job is complete.
// Structure is documented below.
Status []StatusObservation `json:"status,omitempty" tf:"status,omitempty"`
// (Output)
// The combination of labels configured directly on the resource
// and default labels configured on the provider.
// +mapType=granular
TerraformLabels map[string]*string `json:"terraformLabels,omitempty" tf:"terraform_labels,omitempty"`
// Email address of the user who ran the job.
UserEmail *string `json:"userEmail,omitempty" tf:"user_email,omitempty"`
}
type JobParameters struct {
// Copies a table.
// Structure is documented below.
// +kubebuilder:validation:Optional
Copy *CopyParameters `json:"copy,omitempty" tf:"copy,omitempty"`
// Configures an extract job.
// Structure is documented below.
// +kubebuilder:validation:Optional
Extract *ExtractParameters `json:"extract,omitempty" tf:"extract,omitempty"`
// The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
// +kubebuilder:validation:Optional
JobID *string `json:"jobId,omitempty" tf:"job_id,omitempty"`
// Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
// +kubebuilder:validation:Optional
JobTimeoutMs *string `json:"jobTimeoutMs,omitempty" tf:"job_timeout_ms,omitempty"`
// The labels associated with this job. You can use these to organize and group your jobs.
// +kubebuilder:validation:Optional
// +mapType=granular
Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"`
// Configures a load job.
// Structure is documented below.
// +kubebuilder:validation:Optional
Load *LoadParameters `json:"load,omitempty" tf:"load,omitempty"`
// The geographic location of the job. The default value is US.
// +kubebuilder:validation:Optional
Location *string `json:"location,omitempty" tf:"location,omitempty"`
// The ID of the project in which the resource belongs.
// If it is not provided, the provider project is used.
// +kubebuilder:validation:Optional
Project *string `json:"project,omitempty" tf:"project,omitempty"`
// Configures a query job.
// Structure is documented below.
// +kubebuilder:validation:Optional
Query *QueryParameters `json:"query,omitempty" tf:"query,omitempty"`
}
type LoadDestinationEncryptionConfigurationInitParameters struct {
// Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table.
// The BigQuery Service Account associated with your project requires access to this encryption key.
KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"`
}
type LoadDestinationEncryptionConfigurationObservation struct {
// Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table.
// The BigQuery Service Account associated with your project requires access to this encryption key.
KMSKeyName *string `json:"kmsKeyName,omitempty" tf:"kms_key_name,omitempty"`
// (Output)
// Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
KMSKeyVersion *string `json:"kmsKeyVersion,omitempty" tf:"kms_key_version,omitempty"`
}
type LoadDestinationEncryptionConfigurationParameters struct {
// Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table.
// The BigQuery Service Account associated with your project requires access to this encryption key.
// +kubebuilder:validation:Optional
KMSKeyName *string `json:"kmsKeyName" tf:"kms_key_name,omitempty"`
}
type LoadDestinationTableInitParameters struct {
// The ID of the dataset containing this table.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Dataset
DatasetID *string `json:"datasetId,omitempty" tf:"dataset_id,omitempty"`
// Reference to a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDRef *v1.Reference `json:"datasetIdRef,omitempty" tf:"-"`
// Selector for a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDSelector *v1.Selector `json:"datasetIdSelector,omitempty" tf:"-"`
// The ID of the project containing this table.
ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"`
// The table. Can be specified {{table_id}} if project_id and dataset_id are also set,
// or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Table
// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID()
TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"`
// Reference to a Table in bigquery to populate tableId.
// +kubebuilder:validation:Optional
TableIDRef *v1.Reference `json:"tableIdRef,omitempty" tf:"-"`
// Selector for a Table in bigquery to populate tableId.
// +kubebuilder:validation:Optional
TableIDSelector *v1.Selector `json:"tableIdSelector,omitempty" tf:"-"`
}
type LoadDestinationTableObservation struct {
// The ID of the dataset containing this table.
DatasetID *string `json:"datasetId,omitempty" tf:"dataset_id,omitempty"`
// The ID of the project containing this table.
ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"`
// The table. Can be specified {{table_id}} if project_id and dataset_id are also set,
// or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"`
}
type LoadDestinationTableParameters struct {
// The ID of the dataset containing this table.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Dataset
// +kubebuilder:validation:Optional
DatasetID *string `json:"datasetId,omitempty" tf:"dataset_id,omitempty"`
// Reference to a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDRef *v1.Reference `json:"datasetIdRef,omitempty" tf:"-"`
// Selector for a Dataset in bigquery to populate datasetId.
// +kubebuilder:validation:Optional
DatasetIDSelector *v1.Selector `json:"datasetIdSelector,omitempty" tf:"-"`
// The ID of the project containing this table.
// +kubebuilder:validation:Optional
ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"`
// The table. Can be specified {{table_id}} if project_id and dataset_id are also set,
// or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
// +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/bigquery/v1beta2.Table
// +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID()
// +kubebuilder:validation:Optional
TableID *string `json:"tableId,omitempty" tf:"table_id,omitempty"`
// Reference to a Table in bigquery to populate tableId.
// +kubebuilder:validation:Optional
TableIDRef *v1.Reference `json:"tableIdRef,omitempty" tf:"-"`
// Selector for a Table in bigquery to populate tableId.
// +kubebuilder:validation:Optional
TableIDSelector *v1.Selector `json:"tableIdSelector,omitempty" tf:"-"`
}
type LoadInitParameters struct {
// Accept rows that are missing trailing optional columns. The missing values are treated as nulls.
// If false, records with missing trailing columns are treated as bad records, and if there are too many bad records,
// an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
AllowJaggedRows *bool `json:"allowJaggedRows,omitempty" tf:"allow_jagged_rows,omitempty"`
// Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file.
// The default value is false.
AllowQuotedNewlines *bool `json:"allowQuotedNewlines,omitempty" tf:"allow_quoted_newlines,omitempty"`
// Indicates if we should automatically infer the options and schema for CSV and JSON sources.
Autodetect *bool `json:"autodetect,omitempty" tf:"autodetect,omitempty"`
// Specifies whether the job is allowed to create new tables. The following values are supported:
// CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
// CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
// Creation, truncation and append actions occur as one atomic update upon job completion
// Default value is CREATE_IF_NEEDED.
// Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
CreateDisposition *string `json:"createDisposition,omitempty" tf:"create_disposition,omitempty"`
// Custom encryption configuration (e.g., Cloud KMS keys)
// Structure is documented below.
DestinationEncryptionConfiguration *LoadDestinationEncryptionConfigurationInitParameters `json:"destinationEncryptionConfiguration,omitempty" tf:"destination_encryption_configuration,omitempty"`
// The destination table to load the data into.
// Structure is documented below.
DestinationTable *LoadDestinationTableInitParameters `json:"destinationTable,omitempty" tf:"destination_table,omitempty"`
// The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
// The default value is UTF-8. BigQuery decodes the data after the raw, binary data
// has been split using the values of the quote and fieldDelimiter properties.
Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"`
// The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character.
// To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts
// the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the
// data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator.
// The default value is a comma (',').
FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"`
// Indicates if BigQuery should allow extra values that are not represented in the table schema.
// If true, the extra values are ignored. If false, records with extra columns are treated as bad records,
// and if there are too many bad records, an invalid error is returned in the job result.
// The default value is false. The sourceFormat property determines what BigQuery treats as an extra value:
// CSV: Trailing columns
// JSON: Named values that don't match any column names
IgnoreUnknownValues *bool `json:"ignoreUnknownValues,omitempty" tf:"ignore_unknown_values,omitempty"`
// If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON.
// For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited
// GeoJSON: set to GEOJSON.
JSONExtension *string `json:"jsonExtension,omitempty" tf:"json_extension,omitempty"`
// The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value,
// an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
MaxBadRecords *float64 `json:"maxBadRecords,omitempty" tf:"max_bad_records,omitempty"`
// Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value
// when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an
// empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as
// an empty value.
NullMarker *string `json:"nullMarker,omitempty" tf:"null_marker,omitempty"`
// Parquet Options for load and make external tables.
// Structure is documented below.
ParquetOptions *ParquetOptionsInitParameters `json:"parquetOptions,omitempty" tf:"parquet_options,omitempty"`
// If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup.
// Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties.
// If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
ProjectionFields []*string `json:"projectionFields,omitempty" tf:"projection_fields,omitempty"`
// The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding,
// and then uses the first byte of the encoded string to split the data in its raw, binary state.
// The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string.
// If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
Quote *string `json:"quote,omitempty" tf:"quote,omitempty"`
// Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or
// supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND;
// when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators.
// For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified:
// ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
// ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
SchemaUpdateOptions []*string `json:"schemaUpdateOptions,omitempty" tf:"schema_update_options,omitempty"`
// The number of rows at the top of a CSV file that BigQuery will skip when loading the data.
// The default value is 0. This property is useful if you have header rows in the file that should be skipped.
// When autodetect is on, the behavior is the following:
// skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected,
// the row is read as data. Otherwise data is read starting from the second row.
// skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row.
// skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected,
// row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
SkipLeadingRows *float64 `json:"skipLeadingRows,omitempty" tf:"skip_leading_rows,omitempty"`
// The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP".
// For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET".
// For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE".
// The default value is CSV.
SourceFormat *string `json:"sourceFormat,omitempty" tf:"source_format,omitempty"`
// The fully-qualified URIs that point to your data in Google Cloud.
// For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character
// and it must come after the 'bucket' name. Size limits related to load jobs apply
// to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be
// specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
// For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
SourceUris []*string `json:"sourceUris,omitempty" tf:"source_uris,omitempty"`
// Time-based partitioning specification for the destination table.
// Structure is documented below.
TimePartitioning *TimePartitioningInitParameters `json:"timePartitioning,omitempty" tf:"time_partitioning,omitempty"`
// Specifies the action that occurs if the destination table already exists. The following values are supported:
// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
// WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
// WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
// Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
// Creation, truncation and append actions occur as one atomic update upon job completion.
// Default value is WRITE_EMPTY.
// Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
WriteDisposition *string `json:"writeDisposition,omitempty" tf:"write_disposition,omitempty"`
}
type LoadObservation struct {
// Accept rows that are missing trailing optional columns. The missing values are treated as nulls.
// If false, records with missing trailing columns are treated as bad records, and if there are too many bad records,
// an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
AllowJaggedRows *bool `json:"allowJaggedRows,omitempty" tf:"allow_jagged_rows,omitempty"`
// Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file.
// The default value is false.
AllowQuotedNewlines *bool `json:"allowQuotedNewlines,omitempty" tf:"allow_quoted_newlines,omitempty"`
// Indicates if we should automatically infer the options and schema for CSV and JSON sources.
Autodetect *bool `json:"autodetect,omitempty" tf:"autodetect,omitempty"`
// Specifies whether the job is allowed to create new tables. The following values are supported:
// CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
// CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
// Creation, truncation and append actions occur as one atomic update upon job completion
// Default value is CREATE_IF_NEEDED.
// Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
CreateDisposition *string `json:"createDisposition,omitempty" tf:"create_disposition,omitempty"`
// Custom encryption configuration (e.g., Cloud KMS keys)
// Structure is documented below.
DestinationEncryptionConfiguration *LoadDestinationEncryptionConfigurationObservation `json:"destinationEncryptionConfiguration,omitempty" tf:"destination_encryption_configuration,omitempty"`
// The destination table to load the data into.
// Structure is documented below.
DestinationTable *LoadDestinationTableObservation `json:"destinationTable,omitempty" tf:"destination_table,omitempty"`
// The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
// The default value is UTF-8. BigQuery decodes the data after the raw, binary data
// has been split using the values of the quote and fieldDelimiter properties.
Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"`
// The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character.
// To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts
// the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the
// data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator.
// The default value is a comma (',').
FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"`
// Indicates if BigQuery should allow extra values that are not represented in the table schema.
// If true, the extra values are ignored. If false, records with extra columns are treated as bad records,
// and if there are too many bad records, an invalid error is returned in the job result.
// The default value is false. The sourceFormat property determines what BigQuery treats as an extra value:
// CSV: Trailing columns
// JSON: Named values that don't match any column names
IgnoreUnknownValues *bool `json:"ignoreUnknownValues,omitempty" tf:"ignore_unknown_values,omitempty"`
// If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON.
// For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited
// GeoJSON: set to GEOJSON.
JSONExtension *string `json:"jsonExtension,omitempty" tf:"json_extension,omitempty"`
// The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value,
// an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
MaxBadRecords *float64 `json:"maxBadRecords,omitempty" tf:"max_bad_records,omitempty"`
// Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value
// when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an
// empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as
// an empty value.
NullMarker *string `json:"nullMarker,omitempty" tf:"null_marker,omitempty"`
// Parquet Options for load and make external tables.
// Structure is documented below.
ParquetOptions *ParquetOptionsObservation `json:"parquetOptions,omitempty" tf:"parquet_options,omitempty"`
// If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup.
// Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties.
// If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
ProjectionFields []*string `json:"projectionFields,omitempty" tf:"projection_fields,omitempty"`
// The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding,
// and then uses the first byte of the encoded string to split the data in its raw, binary state.
// The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string.
// If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
Quote *string `json:"quote,omitempty" tf:"quote,omitempty"`
// Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or
// supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND;
// when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators.
// For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified:
// ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
// ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
SchemaUpdateOptions []*string `json:"schemaUpdateOptions,omitempty" tf:"schema_update_options,omitempty"`
// The number of rows at the top of a CSV file that BigQuery will skip when loading the data.
// The default value is 0. This property is useful if you have header rows in the file that should be skipped.
// When autodetect is on, the behavior is the following:
// skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected,
// the row is read as data. Otherwise data is read starting from the second row.
// skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row.
// skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected,
// row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
SkipLeadingRows *float64 `json:"skipLeadingRows,omitempty" tf:"skip_leading_rows,omitempty"`
// The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP".
// For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET".
// For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE".
// The default value is CSV.
SourceFormat *string `json:"sourceFormat,omitempty" tf:"source_format,omitempty"`
// The fully-qualified URIs that point to your data in Google Cloud.
// For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character
// and it must come after the 'bucket' name. Size limits related to load jobs apply
// to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be
// specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
// For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
SourceUris []*string `json:"sourceUris,omitempty" tf:"source_uris,omitempty"`
// Time-based partitioning specification for the destination table.
// Structure is documented below.
TimePartitioning *TimePartitioningObservation `json:"timePartitioning,omitempty" tf:"time_partitioning,omitempty"`
// Specifies the action that occurs if the destination table already exists. The following values are supported:
// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result.
// WRITE_APPEND: If the table already exists, BigQuery appends the data to the table.
// WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result.
// Each action is atomic and only occurs if BigQuery is able to complete the job successfully.
// Creation, truncation and append actions occur as one atomic update upon job completion.
// Default value is WRITE_EMPTY.
// Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
WriteDisposition *string `json:"writeDisposition,omitempty" tf:"write_disposition,omitempty"`
}
type LoadParameters struct {
// Accept rows that are missing trailing optional columns. The missing values are treated as nulls.
// If false, records with missing trailing columns are treated as bad records, and if there are too many bad records,
// an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
// +kubebuilder:validation:Optional
AllowJaggedRows *bool `json:"allowJaggedRows,omitempty" tf:"allow_jagged_rows,omitempty"`
// Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file.
// The default value is false.
// +kubebuilder:validation:Optional
AllowQuotedNewlines *bool `json:"allowQuotedNewlines,omitempty" tf:"allow_quoted_newlines,omitempty"`
// Indicates if we should automatically infer the options and schema for CSV and JSON sources.
// +kubebuilder:validation:Optional
Autodetect *bool `json:"autodetect,omitempty" tf:"autodetect,omitempty"`
// Specifies whether the job is allowed to create new tables. The following values are supported:
// CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table.
// CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result.
// Creation, truncation and append actions occur as one atomic update upon job completion
// Default value is CREATE_IF_NEEDED.
// Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
// +kubebuilder:validation:Optional
CreateDisposition *string `json:"createDisposition,omitempty" tf:"create_disposition,omitempty"`
// Custom encryption configuration (e.g., Cloud KMS keys)
// Structure is documented below.
// +kubebuilder:validation:Optional
DestinationEncryptionConfiguration *LoadDestinationEncryptionConfigurationParameters `json:"destinationEncryptionConfiguration,omitempty" tf:"destination_encryption_configuration,omitempty"`
// The destination table to load the data into.
// Structure is documented below.
// +kubebuilder:validation:Optional
DestinationTable *LoadDestinationTableParameters `json:"destinationTable" tf:"destination_table,omitempty"`
// The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.
// The default value is UTF-8. BigQuery decodes the data after the raw, binary data
// has been split using the values of the quote and fieldDelimiter properties.
// +kubebuilder:validation:Optional
Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"`
// The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character.
// To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts
// the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the
// data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator.
// The default value is a comma (',').
// +kubebuilder:validation:Optional
FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"`
// Indicates if BigQuery should allow extra values that are not represented in the table schema.
// If true, the extra values are ignored. If false, records with extra columns are treated as bad records,
// and if there are too many bad records, an invalid error is returned in the job result.
// The default value is false. The sourceFormat property determines what BigQuery treats as an extra value:
// CSV: Trailing columns
// JSON: Named values that don't match any column names
// +kubebuilder:validation:Optional
IgnoreUnknownValues *bool `json:"ignoreUnknownValues,omitempty" tf:"ignore_unknown_values,omitempty"`
// If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON.
// For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited
// GeoJSON: set to GEOJSON.
// +kubebuilder:validation:Optional