forked from googleapis/go-genproto
/
model.pb.go
1364 lines (1267 loc) · 68.6 KB
/
model.pb.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.12.2
// source: google/cloud/aiplatform/v1beta1/model.proto
package aiplatform
import (
reflect "reflect"
sync "sync"
_ "google.golang.org/genproto/googleapis/api/annotations"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
structpb "google.golang.org/protobuf/types/known/structpb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Identifies a type of Model's prediction resources.
type Model_DeploymentResourcesType int32
const (
// Should not be used.
Model_DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED Model_DeploymentResourcesType = 0
// Resources that are dedicated to the [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel], and that need a
// higher degree of manual configuration.
Model_DEDICATED_RESOURCES Model_DeploymentResourcesType = 1
// Resources that to large degree are decided by Vertex AI, and require
// only a modest additional configuration.
Model_AUTOMATIC_RESOURCES Model_DeploymentResourcesType = 2
)
// Enum value maps for Model_DeploymentResourcesType.
var (
Model_DeploymentResourcesType_name = map[int32]string{
0: "DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED",
1: "DEDICATED_RESOURCES",
2: "AUTOMATIC_RESOURCES",
}
Model_DeploymentResourcesType_value = map[string]int32{
"DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED": 0,
"DEDICATED_RESOURCES": 1,
"AUTOMATIC_RESOURCES": 2,
}
)
func (x Model_DeploymentResourcesType) Enum() *Model_DeploymentResourcesType {
p := new(Model_DeploymentResourcesType)
*p = x
return p
}
func (x Model_DeploymentResourcesType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Model_DeploymentResourcesType) Descriptor() protoreflect.EnumDescriptor {
return file_google_cloud_aiplatform_v1beta1_model_proto_enumTypes[0].Descriptor()
}
func (Model_DeploymentResourcesType) Type() protoreflect.EnumType {
return &file_google_cloud_aiplatform_v1beta1_model_proto_enumTypes[0]
}
func (x Model_DeploymentResourcesType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Model_DeploymentResourcesType.Descriptor instead.
func (Model_DeploymentResourcesType) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_aiplatform_v1beta1_model_proto_rawDescGZIP(), []int{0, 0}
}
// The Model content that can be exported.
type Model_ExportFormat_ExportableContent int32
const (
// Should not be used.
Model_ExportFormat_EXPORTABLE_CONTENT_UNSPECIFIED Model_ExportFormat_ExportableContent = 0
// Model artifact and any of its supported files. Will be exported to the
// location specified by the `artifactDestination` field of the
// [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config] object.
Model_ExportFormat_ARTIFACT Model_ExportFormat_ExportableContent = 1
// The container image that is to be used when deploying this Model. Will
// be exported to the location specified by the `imageDestination` field
// of the [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config] object.
Model_ExportFormat_IMAGE Model_ExportFormat_ExportableContent = 2
)
// Enum value maps for Model_ExportFormat_ExportableContent.
var (
Model_ExportFormat_ExportableContent_name = map[int32]string{
0: "EXPORTABLE_CONTENT_UNSPECIFIED",
1: "ARTIFACT",
2: "IMAGE",
}
Model_ExportFormat_ExportableContent_value = map[string]int32{
"EXPORTABLE_CONTENT_UNSPECIFIED": 0,
"ARTIFACT": 1,
"IMAGE": 2,
}
)
func (x Model_ExportFormat_ExportableContent) Enum() *Model_ExportFormat_ExportableContent {
p := new(Model_ExportFormat_ExportableContent)
*p = x
return p
}
func (x Model_ExportFormat_ExportableContent) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Model_ExportFormat_ExportableContent) Descriptor() protoreflect.EnumDescriptor {
return file_google_cloud_aiplatform_v1beta1_model_proto_enumTypes[1].Descriptor()
}
func (Model_ExportFormat_ExportableContent) Type() protoreflect.EnumType {
return &file_google_cloud_aiplatform_v1beta1_model_proto_enumTypes[1]
}
func (x Model_ExportFormat_ExportableContent) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Model_ExportFormat_ExportableContent.Descriptor instead.
func (Model_ExportFormat_ExportableContent) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_aiplatform_v1beta1_model_proto_rawDescGZIP(), []int{0, 0, 0}
}
// A trained machine learning Model.
type Model struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The resource name of the Model.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. The display name of the Model.
// The name can be up to 128 characters long and can be consist of any UTF-8
// characters.
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
// The description of the Model.
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
// The schemata that describe formats of the Model's predictions and
// explanations as given and returned via
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] and [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
PredictSchemata *PredictSchemata `protobuf:"bytes,4,opt,name=predict_schemata,json=predictSchemata,proto3" json:"predict_schemata,omitempty"`
// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional
// information about the Model, that is specific to it. Unset if the Model
// does not have any additional information.
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
// AutoML Models always have this field populated by Vertex AI, if no
// additional metadata is needed, this field is set to an empty string.
// Note: The URI given on output will be immutable and probably different,
// including the URI scheme, than the one given on input. The output URI will
// point to a location where the user only has a read access.
MetadataSchemaUri string `protobuf:"bytes,5,opt,name=metadata_schema_uri,json=metadataSchemaUri,proto3" json:"metadata_schema_uri,omitempty"`
// Immutable. An additional information about the Model; the schema of the metadata can
// be found in [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri].
// Unset if the Model does not have any additional information.
Metadata *structpb.Value `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"`
// Output only. The formats in which this Model may be exported. If empty, this Model is
// not available for export.
SupportedExportFormats []*Model_ExportFormat `protobuf:"bytes,20,rep,name=supported_export_formats,json=supportedExportFormats,proto3" json:"supported_export_formats,omitempty"`
// Output only. The resource name of the TrainingPipeline that uploaded this Model, if any.
TrainingPipeline string `protobuf:"bytes,7,opt,name=training_pipeline,json=trainingPipeline,proto3" json:"training_pipeline,omitempty"`
// Input only. The specification of the container that is to be used when deploying
// this Model. The specification is ingested upon
// [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], and all binaries it contains are copied
// and stored internally by Vertex AI.
// Not present for AutoML Models.
ContainerSpec *ModelContainerSpec `protobuf:"bytes,9,opt,name=container_spec,json=containerSpec,proto3" json:"container_spec,omitempty"`
// Immutable. The path to the directory containing the Model artifact and any of its
// supporting files.
// Not present for AutoML Models.
ArtifactUri string `protobuf:"bytes,26,opt,name=artifact_uri,json=artifactUri,proto3" json:"artifact_uri,omitempty"`
// Output only. When this Model is deployed, its prediction resources are described by the
// `prediction_resources` field of the [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] object.
// Because not all Models support all resource configuration types, the
// configuration types this Model supports are listed here. If no
// configuration types are listed, the Model cannot be deployed to an
// [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and does not support
// online predictions ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]). Such a Model can serve predictions by
// using a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob], if it has at least one entry each in
// [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] and
// [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats].
SupportedDeploymentResourcesTypes []Model_DeploymentResourcesType `protobuf:"varint,10,rep,packed,name=supported_deployment_resources_types,json=supportedDeploymentResourcesTypes,proto3,enum=google.cloud.aiplatform.v1beta1.Model_DeploymentResourcesType" json:"supported_deployment_resources_types,omitempty"`
// Output only. The formats this Model supports in
// [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If
// [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] exists, the instances
// should be given as per that schema.
//
// The possible formats are:
//
// * `jsonl`
// The JSON Lines format, where each instance is a single line. Uses
// [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
//
// * `csv`
// The CSV format, where each instance is a single comma-separated line.
// The first line in the file is the header, containing comma-separated field
// names. Uses [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
//
// * `tf-record`
// The TFRecord format, where each instance is a single record in tfrecord
// syntax. Uses [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
//
// * `tf-record-gzip`
// Similar to `tf-record`, but the file is gzipped. Uses
// [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
//
// * `bigquery`
// Each instance is a single row in BigQuery. Uses
// [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source].
//
// * `file-list`
// Each line of the file is the location of an instance to process, uses
// `gcs_source` field of the
// [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig] object.
//
//
// If this Model doesn't support any of these formats it means it cannot be
// used with a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. However, if it has
// [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], it could serve online
// predictions by using [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
SupportedInputStorageFormats []string `protobuf:"bytes,11,rep,name=supported_input_storage_formats,json=supportedInputStorageFormats,proto3" json:"supported_input_storage_formats,omitempty"`
// Output only. The formats this Model supports in
// [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. If both
// [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] and
// [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri] exist, the predictions
// are returned together with their instances. In other words, the
// prediction has the original instance data first, followed
// by the actual prediction content (as per the schema).
//
// The possible formats are:
//
// * `jsonl`
// The JSON Lines format, where each prediction is a single line. Uses
// [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination].
//
// * `csv`
// The CSV format, where each prediction is a single comma-separated line.
// The first line in the file is the header, containing comma-separated field
// names. Uses
// [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination].
//
// * `bigquery`
// Each prediction is a single row in a BigQuery table, uses
// [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination]
// .
//
//
// If this Model doesn't support any of these formats it means it cannot be
// used with a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. However, if it has
// [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], it could serve online
// predictions by using [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or
// [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
SupportedOutputStorageFormats []string `protobuf:"bytes,12,rep,name=supported_output_storage_formats,json=supportedOutputStorageFormats,proto3" json:"supported_output_storage_formats,omitempty"`
// Output only. Timestamp when this Model was uploaded into Vertex AI.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// Output only. Timestamp when this Model was most recently updated.
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// Output only. The pointers to DeployedModels created from this Model. Note that
// Model could have been deployed to Endpoints in different Locations.
DeployedModels []*DeployedModelRef `protobuf:"bytes,15,rep,name=deployed_models,json=deployedModels,proto3" json:"deployed_models,omitempty"`
// The default explanation specification for this Model.
//
// The Model can be used for [requesting
// explanation][PredictionService.Explain] after being
// [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] if it is populated.
// The Model can be used for [batch
// explanation][BatchPredictionJob.generate_explanation] if it is populated.
//
// All fields of the explanation_spec can be overridden by
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of
// [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model], or
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] of
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
//
// If the default explanation specification is not set for this Model, this
// Model can still be used for [requesting
// explanation][PredictionService.Explain] by setting
// [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of
// [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model] and for [batch
// explanation][BatchPredictionJob.generate_explanation] by setting
// [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] of
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
ExplanationSpec *ExplanationSpec `protobuf:"bytes,23,opt,name=explanation_spec,json=explanationSpec,proto3" json:"explanation_spec,omitempty"`
// Used to perform consistent read-modify-write updates. If not set, a blind
// "overwrite" update happens.
Etag string `protobuf:"bytes,16,opt,name=etag,proto3" json:"etag,omitempty"`
// The labels with user-defined metadata to organize your Models.
//
// Label keys and values can be no longer than 64 characters
// (Unicode codepoints), can only contain lowercase letters, numeric
// characters, underscores and dashes. International characters are allowed.
//
// See https://goo.gl/xmQnxf for more information and examples of labels.
Labels map[string]string `protobuf:"bytes,17,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Customer-managed encryption key spec for a Model. If set, this
// Model and all sub-resources of this Model will be secured by this key.
EncryptionSpec *EncryptionSpec `protobuf:"bytes,24,opt,name=encryption_spec,json=encryptionSpec,proto3" json:"encryption_spec,omitempty"`
}
func (x *Model) Reset() {
*x = Model{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_aiplatform_v1beta1_model_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Model) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Model) ProtoMessage() {}
func (x *Model) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_aiplatform_v1beta1_model_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Model.ProtoReflect.Descriptor instead.
func (*Model) Descriptor() ([]byte, []int) {
return file_google_cloud_aiplatform_v1beta1_model_proto_rawDescGZIP(), []int{0}
}
func (x *Model) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Model) GetDisplayName() string {
if x != nil {
return x.DisplayName
}
return ""
}
func (x *Model) GetDescription() string {
if x != nil {
return x.Description
}
return ""
}
func (x *Model) GetPredictSchemata() *PredictSchemata {
if x != nil {
return x.PredictSchemata
}
return nil
}
func (x *Model) GetMetadataSchemaUri() string {
if x != nil {
return x.MetadataSchemaUri
}
return ""
}
func (x *Model) GetMetadata() *structpb.Value {
if x != nil {
return x.Metadata
}
return nil
}
func (x *Model) GetSupportedExportFormats() []*Model_ExportFormat {
if x != nil {
return x.SupportedExportFormats
}
return nil
}
func (x *Model) GetTrainingPipeline() string {
if x != nil {
return x.TrainingPipeline
}
return ""
}
func (x *Model) GetContainerSpec() *ModelContainerSpec {
if x != nil {
return x.ContainerSpec
}
return nil
}
func (x *Model) GetArtifactUri() string {
if x != nil {
return x.ArtifactUri
}
return ""
}
func (x *Model) GetSupportedDeploymentResourcesTypes() []Model_DeploymentResourcesType {
if x != nil {
return x.SupportedDeploymentResourcesTypes
}
return nil
}
func (x *Model) GetSupportedInputStorageFormats() []string {
if x != nil {
return x.SupportedInputStorageFormats
}
return nil
}
func (x *Model) GetSupportedOutputStorageFormats() []string {
if x != nil {
return x.SupportedOutputStorageFormats
}
return nil
}
func (x *Model) GetCreateTime() *timestamppb.Timestamp {
if x != nil {
return x.CreateTime
}
return nil
}
func (x *Model) GetUpdateTime() *timestamppb.Timestamp {
if x != nil {
return x.UpdateTime
}
return nil
}
func (x *Model) GetDeployedModels() []*DeployedModelRef {
if x != nil {
return x.DeployedModels
}
return nil
}
func (x *Model) GetExplanationSpec() *ExplanationSpec {
if x != nil {
return x.ExplanationSpec
}
return nil
}
func (x *Model) GetEtag() string {
if x != nil {
return x.Etag
}
return ""
}
func (x *Model) GetLabels() map[string]string {
if x != nil {
return x.Labels
}
return nil
}
func (x *Model) GetEncryptionSpec() *EncryptionSpec {
if x != nil {
return x.EncryptionSpec
}
return nil
}
// Contains the schemata used in Model's predictions and explanations via
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict], [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] and
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
type PredictSchemata struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Immutable. Points to a YAML file stored on Google Cloud Storage describing the format
// of a single instance, which are used in [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances],
// [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] and
// [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config].
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
// AutoML Models always have this field populated by Vertex AI.
// Note: The URI given on output will be immutable and probably different,
// including the URI scheme, than the one given on input. The output URI will
// point to a location where the user only has a read access.
InstanceSchemaUri string `protobuf:"bytes,1,opt,name=instance_schema_uri,json=instanceSchemaUri,proto3" json:"instance_schema_uri,omitempty"`
// Immutable. Points to a YAML file stored on Google Cloud Storage describing the
// parameters of prediction and explanation via
// [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters], [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters] and
// [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters].
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
// AutoML Models always have this field populated by Vertex AI, if no
// parameters are supported, then it is set to an empty string.
// Note: The URI given on output will be immutable and probably different,
// including the URI scheme, than the one given on input. The output URI will
// point to a location where the user only has a read access.
ParametersSchemaUri string `protobuf:"bytes,2,opt,name=parameters_schema_uri,json=parametersSchemaUri,proto3" json:"parameters_schema_uri,omitempty"`
// Immutable. Points to a YAML file stored on Google Cloud Storage describing the format
// of a single prediction produced by this Model, which are returned via
// [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions], [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations], and
// [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config].
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
// AutoML Models always have this field populated by Vertex AI.
// Note: The URI given on output will be immutable and probably different,
// including the URI scheme, than the one given on input. The output URI will
// point to a location where the user only has a read access.
PredictionSchemaUri string `protobuf:"bytes,3,opt,name=prediction_schema_uri,json=predictionSchemaUri,proto3" json:"prediction_schema_uri,omitempty"`
}
func (x *PredictSchemata) Reset() {
*x = PredictSchemata{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_aiplatform_v1beta1_model_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PredictSchemata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PredictSchemata) ProtoMessage() {}
func (x *PredictSchemata) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_aiplatform_v1beta1_model_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PredictSchemata.ProtoReflect.Descriptor instead.
func (*PredictSchemata) Descriptor() ([]byte, []int) {
return file_google_cloud_aiplatform_v1beta1_model_proto_rawDescGZIP(), []int{1}
}
func (x *PredictSchemata) GetInstanceSchemaUri() string {
if x != nil {
return x.InstanceSchemaUri
}
return ""
}
func (x *PredictSchemata) GetParametersSchemaUri() string {
if x != nil {
return x.ParametersSchemaUri
}
return ""
}
func (x *PredictSchemata) GetPredictionSchemaUri() string {
if x != nil {
return x.PredictionSchemaUri
}
return ""
}
// Specification of a container for serving predictions. Some fields in this
// message correspond to fields in the [Kubernetes Container v1 core
// specification](https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core).
type ModelContainerSpec struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Immutable. URI of the Docker image to be used as the custom container for serving
// predictions. This URI must identify an image in Artifact Registry or
// Container Registry. Learn more about the [container publishing
// requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing),
// including permissions requirements for the AI Platform Service Agent.
//
// The container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], stored
// internally, and this original path is afterwards not used.
//
// To learn about the requirements for the Docker image itself, see
// [Custom container
// requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#).
//
// You can use the URI to one of Vertex AI's [pre-built container images for
// prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
// in this field.
ImageUri string `protobuf:"bytes,1,opt,name=image_uri,json=imageUri,proto3" json:"image_uri,omitempty"`
// Immutable. Specifies the command that runs when the container starts. This overrides
// the container's
// [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint).
// Specify this field as an array of executable and arguments, similar to a
// Docker `ENTRYPOINT`'s "exec" form, not its "shell" form.
//
// If you do not specify this field, then the container's `ENTRYPOINT` runs,
// in conjunction with the [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] field or the
// container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd),
// if either exists. If this field is not specified and the container does not
// have an `ENTRYPOINT`, then refer to the Docker documentation about [how
// `CMD` and `ENTRYPOINT`
// interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
//
// If you specify this field, then you can also specify the `args` field to
// provide additional arguments for this command. However, if you specify this
// field, then the container's `CMD` is ignored. See the
// [Kubernetes documentation about how the
// `command` and `args` fields interact with a container's `ENTRYPOINT` and
// `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
//
// In this field, you can reference [environment variables set by Vertex
// AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
// and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field.
// You cannot reference environment variables set in the Docker image. In
// order for environment variables to be expanded, reference them by using the
// following syntax:
// <code>$(<var>VARIABLE_NAME</var>)</code>
// Note that this differs from Bash variable expansion, which does not use
// parentheses. If a variable cannot be resolved, the reference in the input
// string is used unchanged. To avoid variable expansion, you can escape this
// syntax with `$$`; for example:
// <code>$$(<var>VARIABLE_NAME</var>)</code>
// This field corresponds to the `command` field of the Kubernetes Containers
// [v1 core
// API](https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core).
Command []string `protobuf:"bytes,2,rep,name=command,proto3" json:"command,omitempty"`
// Immutable. Specifies arguments for the command that runs when the container starts.
// This overrides the container's
// [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
// this field as an array of executable and arguments, similar to a Docker
// `CMD`'s "default parameters" form.
//
// If you don't specify this field but do specify the
// [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] field, then the command from the
// `command` field runs without any additional arguments. See the
// [Kubernetes documentation about how the
// `command` and `args` fields interact with a container's `ENTRYPOINT` and
// `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
//
// If you don't specify this field and don't specify the `command` field,
// then the container's
// [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and
// `CMD` determine what runs based on their default behavior. See the Docker
// documentation about [how `CMD` and `ENTRYPOINT`
// interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
//
// In this field, you can reference [environment variables
// set by Vertex
// AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
// and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field.
// You cannot reference environment variables set in the Docker image. In
// order for environment variables to be expanded, reference them by using the
// following syntax:
// <code>$(<var>VARIABLE_NAME</var>)</code>
// Note that this differs from Bash variable expansion, which does not use
// parentheses. If a variable cannot be resolved, the reference in the input
// string is used unchanged. To avoid variable expansion, you can escape this
// syntax with `$$`; for example:
// <code>$$(<var>VARIABLE_NAME</var>)</code>
// This field corresponds to the `args` field of the Kubernetes Containers
// [v1 core
// API](https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core).
Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
// Immutable. List of environment variables to set in the container. After the container
// starts running, code running in the container can read these environment
// variables.
//
// Additionally, the [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] and
// [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] fields can reference these variables. Later
// entries in this list can also reference earlier entries. For example, the
// following example sets the variable `VAR_2` to have the value `foo bar`:
//
// ```json
// [
// {
// "name": "VAR_1",
// "value": "foo"
// },
// {
// "name": "VAR_2",
// "value": "$(VAR_1) bar"
// }
// ]
// ```
//
// If you switch the order of the variables in the example, then the expansion
// does not occur.
//
// This field corresponds to the `env` field of the Kubernetes Containers
// [v1 core
// API](https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core).
Env []*EnvVar `protobuf:"bytes,4,rep,name=env,proto3" json:"env,omitempty"`
// Immutable. List of ports to expose from the container. Vertex AI sends any
// prediction requests that it receives to the first port on this list. AI
// Platform also sends
// [liveness and health
// checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness)
// to this port.
//
// If you do not specify this field, it defaults to following value:
//
// ```json
// [
// {
// "containerPort": 8080
// }
// ]
// ```
//
// Vertex AI does not use ports other than the first one listed. This field
// corresponds to the `ports` field of the Kubernetes Containers
// [v1 core
// API](https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core).
Ports []*Port `protobuf:"bytes,5,rep,name=ports,proto3" json:"ports,omitempty"`
// Immutable. HTTP path on the container to send prediction requests to. Vertex AI
// forwards requests sent using
// [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] to this
// path on the container's IP address and port. Vertex AI then returns the
// container's response in the API response.
//
// For example, if you set this field to `/foo`, then when Vertex AI
// receives a prediction request, it forwards the request body in a POST
// request to the `/foo` path on the port of your container specified by the
// first value of this `ModelContainerSpec`'s
// [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field.
//
// If you don't specify this field, it defaults to the following value when
// you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]:
// <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
// The placeholders in this value are replaced as follows:
//
// * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
// Endpoint.name][] field of the Endpoint where this Model has been
// deployed. (Vertex AI makes this value available to your container code
// as the [`AIP_ENDPOINT_ID` environment
// variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
//
// * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`.
// (Vertex AI makes this value available to your container code
// as the [`AIP_DEPLOYED_MODEL_ID` environment
// variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
PredictRoute string `protobuf:"bytes,6,opt,name=predict_route,json=predictRoute,proto3" json:"predict_route,omitempty"`
// Immutable. HTTP path on the container to send health checks to. Vertex AI
// intermittently sends GET requests to this path on the container's IP
// address and port to check that the container is healthy. Read more about
// [health
// checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health).
//
// For example, if you set this field to `/bar`, then Vertex AI
// intermittently sends a GET request to the `/bar` path on the port of your
// container specified by the first value of this `ModelContainerSpec`'s
// [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field.
//
// If you don't specify this field, it defaults to the following value when
// you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]:
// <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
// The placeholders in this value are replaced as follows:
//
// * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
// Endpoint.name][] field of the Endpoint where this Model has been
// deployed. (Vertex AI makes this value available to your container code
// as the [`AIP_ENDPOINT_ID` environment
// variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
//
// * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`.
// (Vertex AI makes this value available to your container code as the
// [`AIP_DEPLOYED_MODEL_ID` environment
// variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
HealthRoute string `protobuf:"bytes,7,opt,name=health_route,json=healthRoute,proto3" json:"health_route,omitempty"`
}
func (x *ModelContainerSpec) Reset() {
*x = ModelContainerSpec{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_aiplatform_v1beta1_model_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ModelContainerSpec) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ModelContainerSpec) ProtoMessage() {}
func (x *ModelContainerSpec) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_aiplatform_v1beta1_model_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ModelContainerSpec.ProtoReflect.Descriptor instead.
func (*ModelContainerSpec) Descriptor() ([]byte, []int) {
return file_google_cloud_aiplatform_v1beta1_model_proto_rawDescGZIP(), []int{2}
}
func (x *ModelContainerSpec) GetImageUri() string {
if x != nil {
return x.ImageUri
}
return ""
}
func (x *ModelContainerSpec) GetCommand() []string {
if x != nil {
return x.Command
}
return nil
}
func (x *ModelContainerSpec) GetArgs() []string {
if x != nil {
return x.Args
}
return nil
}
func (x *ModelContainerSpec) GetEnv() []*EnvVar {
if x != nil {
return x.Env
}
return nil
}
func (x *ModelContainerSpec) GetPorts() []*Port {
if x != nil {
return x.Ports
}
return nil
}
func (x *ModelContainerSpec) GetPredictRoute() string {
if x != nil {
return x.PredictRoute
}
return ""
}
func (x *ModelContainerSpec) GetHealthRoute() string {
if x != nil {
return x.HealthRoute
}
return ""
}
// Represents a network port in a container.
type Port struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The number of the port to expose on the pod's IP address.
// Must be a valid port number, between 1 and 65535 inclusive.
ContainerPort int32 `protobuf:"varint,3,opt,name=container_port,json=containerPort,proto3" json:"container_port,omitempty"`
}
func (x *Port) Reset() {
*x = Port{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_aiplatform_v1beta1_model_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Port) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Port) ProtoMessage() {}
func (x *Port) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_aiplatform_v1beta1_model_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Port.ProtoReflect.Descriptor instead.
func (*Port) Descriptor() ([]byte, []int) {
return file_google_cloud_aiplatform_v1beta1_model_proto_rawDescGZIP(), []int{3}
}
func (x *Port) GetContainerPort() int32 {
if x != nil {
return x.ContainerPort
}
return 0
}
// Represents export format supported by the Model.
// All formats export to Google Cloud Storage.
type Model_ExportFormat struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Output only. The ID of the export format.
// The possible format IDs are:
//
// * `tflite`
// Used for Android mobile devices.
//
// * `edgetpu-tflite`
// Used for [Edge TPU](https://cloud.google.com/edge-tpu/) devices.
//
// * `tf-saved-model`
// A tensorflow model in SavedModel format.
//
// * `tf-js`
// A [TensorFlow.js](https://www.tensorflow.org/js) model that can be used
// in the browser and in Node.js using JavaScript.
//
// * `core-ml`
// Used for iOS mobile devices.
//
// * `custom-trained`
// A Model that was uploaded or trained by custom code.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Output only. The content of this Model that may be exported.
ExportableContents []Model_ExportFormat_ExportableContent `protobuf:"varint,2,rep,packed,name=exportable_contents,json=exportableContents,proto3,enum=google.cloud.aiplatform.v1beta1.Model_ExportFormat_ExportableContent" json:"exportable_contents,omitempty"`
}
func (x *Model_ExportFormat) Reset() {
*x = Model_ExportFormat{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_aiplatform_v1beta1_model_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Model_ExportFormat) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Model_ExportFormat) ProtoMessage() {}
func (x *Model_ExportFormat) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_aiplatform_v1beta1_model_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Model_ExportFormat.ProtoReflect.Descriptor instead.
func (*Model_ExportFormat) Descriptor() ([]byte, []int) {
return file_google_cloud_aiplatform_v1beta1_model_proto_rawDescGZIP(), []int{0, 0}