forked from asonawalla/gazette
/
protocol.pb.go
8029 lines (7764 loc) · 206 KB
/
protocol.pb.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: protocol.proto
package protocol
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "github.com/golang/protobuf/ptypes/duration"
import time "time"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
import encoding_binary "encoding/binary"
import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// Status is a response status code, used universally across Gazette RPC APIs.
type Status int32
const (
Status_OK Status = 0
// The named journal does not exist.
Status_JOURNAL_NOT_FOUND Status = 1
// There is no current primary broker for the journal. This is a temporary
// condition which should quickly resolve, assuming sufficient broker capacity.
Status_NO_JOURNAL_PRIMARY_BROKER Status = 2
// The present broker is not the assigned primary broker for the journal.
Status_NOT_JOURNAL_PRIMARY_BROKER Status = 3
// The present broker is not an assigned broker for the journal.
Status_NOT_JOURNAL_BROKER Status = 5
// There are an insufficient number of assigned brokers for the journal
// to meet its required replication.
Status_INSUFFICIENT_JOURNAL_BROKERS Status = 4
// The requested offset is not yet available. This indicates either that the
// offset has not yet been written, or that the broker is not yet aware of a
// written fragment covering the offset. Returned only by non-blocking reads.
Status_OFFSET_NOT_YET_AVAILABLE Status = 6
// The peer disagrees with the Route accompanying a ReplicateRequest.
Status_WRONG_ROUTE Status = 7
// The peer disagrees with the Fragment proposal accompanying a ReplicateRequest.
Status_FRAGMENT_MISMATCH Status = 8
// The Etcd transaction failed. Returned by Update RPC when an
// expect_mod_revision of the UpdateRequest differs from the current
// ModRevision of the JournalSpec within the store.
Status_ETCD_TRANSACTION_FAILED Status = 9
// A disallowed journal access was attempted (eg, a write where the
// journal disables writes, or read where journals disable reads).
Status_NOT_ALLOWED Status = 10
// The Append is refused because its requested offset is not equal
// to the furthest written offset of the journal.
Status_WRONG_APPEND_OFFSET Status = 11
// The Append is refused because the replication pipeline tracks a smaller
// journal offset than that of the remote fragment index. This indicates
// that journal replication consistency has been lost in the past, due to
// too many broker or Etcd failures.
Status_INDEX_HAS_GREATER_OFFSET Status = 12
)
var Status_name = map[int32]string{
0: "OK",
1: "JOURNAL_NOT_FOUND",
2: "NO_JOURNAL_PRIMARY_BROKER",
3: "NOT_JOURNAL_PRIMARY_BROKER",
5: "NOT_JOURNAL_BROKER",
4: "INSUFFICIENT_JOURNAL_BROKERS",
6: "OFFSET_NOT_YET_AVAILABLE",
7: "WRONG_ROUTE",
8: "FRAGMENT_MISMATCH",
9: "ETCD_TRANSACTION_FAILED",
10: "NOT_ALLOWED",
11: "WRONG_APPEND_OFFSET",
12: "INDEX_HAS_GREATER_OFFSET",
}
var Status_value = map[string]int32{
"OK": 0,
"JOURNAL_NOT_FOUND": 1,
"NO_JOURNAL_PRIMARY_BROKER": 2,
"NOT_JOURNAL_PRIMARY_BROKER": 3,
"NOT_JOURNAL_BROKER": 5,
"INSUFFICIENT_JOURNAL_BROKERS": 4,
"OFFSET_NOT_YET_AVAILABLE": 6,
"WRONG_ROUTE": 7,
"FRAGMENT_MISMATCH": 8,
"ETCD_TRANSACTION_FAILED": 9,
"NOT_ALLOWED": 10,
"WRONG_APPEND_OFFSET": 11,
"INDEX_HAS_GREATER_OFFSET": 12,
}
func (x Status) String() string {
return proto.EnumName(Status_name, int32(x))
}
func (Status) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{0}
}
// CompressionCode defines codecs known to Gazette.
type CompressionCodec int32
const (
// INVALID is the zero-valued CompressionCodec, and is not a valid codec.
CompressionCodec_INVALID CompressionCodec = 0
// NONE encodes Fragments without any applied compression, with default suffix ".raw".
CompressionCodec_NONE CompressionCodec = 1
// GZIP encodes Fragments using the Gzip library, with default suffix ".gz".
CompressionCodec_GZIP CompressionCodec = 2
// ZSTANDARD encodes Fragments using the ZStandard library, with default suffix ".zst".
CompressionCodec_ZSTANDARD CompressionCodec = 3
// SNAPPY encodes Fragments using the Snappy library, with default suffix ".sz".
CompressionCodec_SNAPPY CompressionCodec = 4
// GZIP_OFFLOAD_DECOMPRESSION is the GZIP codec with additional behavior
// around reads and writes to remote Fragment stores, designed to offload
// the work of decompression onto compatible stores. Specifically:
// * Fragments are written with a "Content-Encoding: gzip" header.
// * Client read requests are made with "Accept-Encoding: identity".
// This can be helpful in contexts where reader IO bandwidth to the storage
// API is unconstrained, as the cost of decompression is offloaded to the
// store and CPU-intensive batch readers may receive a parallelism benefit.
// While this codec may provide substantial read-time performance improvements,
// it is an advanced configuration and the "Content-Encoding" header handling
// can be subtle and sometimes confusing. It uses the default suffix ".gzod".
CompressionCodec_GZIP_OFFLOAD_DECOMPRESSION CompressionCodec = 5
)
var CompressionCodec_name = map[int32]string{
0: "INVALID",
1: "NONE",
2: "GZIP",
3: "ZSTANDARD",
4: "SNAPPY",
5: "GZIP_OFFLOAD_DECOMPRESSION",
}
var CompressionCodec_value = map[string]int32{
"INVALID": 0,
"NONE": 1,
"GZIP": 2,
"ZSTANDARD": 3,
"SNAPPY": 4,
"GZIP_OFFLOAD_DECOMPRESSION": 5,
}
func (x CompressionCodec) String() string {
return proto.EnumName(CompressionCodec_name, int32(x))
}
func (CompressionCodec) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{1}
}
// Flags define Journal IO control behaviors. Where possible, flags are named
// after an equivalent POSIX flag.
type JournalSpec_Flag int32
const (
// NOT_SPECIFIED is considered as equivalent to O_RDWR by the broker. When
// JournalSpecs are union'ed (eg, by the `journalspace` pkg), NOT_SPECIFIED
// is considered as unset relative to any other non-zero Flag value.
JournalSpec_NOT_SPECIFIED JournalSpec_Flag = 0
// The Journal is available for reads (only).
JournalSpec_O_RDONLY JournalSpec_Flag = 1
// The Journal is available for writes (only).
JournalSpec_O_WRONLY JournalSpec_Flag = 2
// The Journal may be used for reads or writes.
JournalSpec_O_RDWR JournalSpec_Flag = 4
)
var JournalSpec_Flag_name = map[int32]string{
0: "NOT_SPECIFIED",
1: "O_RDONLY",
2: "O_WRONLY",
4: "O_RDWR",
}
var JournalSpec_Flag_value = map[string]int32{
"NOT_SPECIFIED": 0,
"O_RDONLY": 1,
"O_WRONLY": 2,
"O_RDWR": 4,
}
func (x JournalSpec_Flag) String() string {
return proto.EnumName(JournalSpec_Flag_name, int32(x))
}
func (JournalSpec_Flag) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{3, 0}
}
// Label defines a key & value pair which can be attached to entities like
// JournalSpecs and BrokerSpecs. Labels may be used to provide identifying
// attributes which do not directly imply semantics to the core system, but
// are meaningful to users or for higher-level Gazette tools.
type Label struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Label) Reset() { *m = Label{} }
func (m *Label) String() string { return proto.CompactTextString(m) }
func (*Label) ProtoMessage() {}
func (*Label) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{0}
}
func (m *Label) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Label.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Label) XXX_Merge(src proto.Message) {
xxx_messageInfo_Label.Merge(dst, src)
}
func (m *Label) XXX_Size() int {
return m.ProtoSize()
}
func (m *Label) XXX_DiscardUnknown() {
xxx_messageInfo_Label.DiscardUnknown(m)
}
var xxx_messageInfo_Label proto.InternalMessageInfo
// LabelSet is a collection of labels and their values.
type LabelSet struct {
// Labels of the set. Instances must be unique and sorted over (Name, Value).
Labels []Label `protobuf:"bytes,1,rep,name=labels" json:"labels" yaml:",omitempty"`
}
func (m *LabelSet) Reset() { *m = LabelSet{} }
func (m *LabelSet) String() string { return proto.CompactTextString(m) }
func (*LabelSet) ProtoMessage() {}
func (*LabelSet) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{1}
}
func (m *LabelSet) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LabelSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_LabelSet.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *LabelSet) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelSet.Merge(dst, src)
}
func (m *LabelSet) XXX_Size() int {
return m.ProtoSize()
}
func (m *LabelSet) XXX_DiscardUnknown() {
xxx_messageInfo_LabelSet.DiscardUnknown(m)
}
var xxx_messageInfo_LabelSet proto.InternalMessageInfo
// LabelSelector defines a filter over LabelSets.
type LabelSelector struct {
// Include is Labels which must be matched for a LabelSet to be selected. If
// empty, all Labels are included. An include Label with empty ("") value is
// matched by a Label of the same name having any value.
Include LabelSet `protobuf:"bytes,1,opt,name=include" json:"include"`
// Exclude is Labels which cannot be matched for a LabelSet to be selected. If
// empty, no Labels are excluded. An exclude Label with empty ("") value
// excludes a Label of the same name having any value.
Exclude LabelSet `protobuf:"bytes,2,opt,name=exclude" json:"exclude"`
}
func (m *LabelSelector) Reset() { *m = LabelSelector{} }
func (*LabelSelector) ProtoMessage() {}
func (*LabelSelector) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{2}
}
func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_LabelSelector.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *LabelSelector) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelSelector.Merge(dst, src)
}
func (m *LabelSelector) XXX_Size() int {
return m.ProtoSize()
}
func (m *LabelSelector) XXX_DiscardUnknown() {
xxx_messageInfo_LabelSelector.DiscardUnknown(m)
}
var xxx_messageInfo_LabelSelector proto.InternalMessageInfo
// JournalSpec describes a Journal and its configuration.
type JournalSpec struct {
// Name of the Journal.
Name Journal `protobuf:"bytes,1,opt,name=name,proto3,casttype=Journal" json:"name,omitempty" yaml:",omitempty"`
// Desired replication of this Journal. This defines the Journal's tolerance
// to broker failures before data loss can occur (eg, a replication factor
// of three means two failures are tolerated).
Replication int32 `protobuf:"varint,2,opt,name=replication,proto3" json:"replication,omitempty" yaml:",omitempty"`
// User-defined Labels of this JournalSpec. Two label names are reserved
// and may not be used within a JournalSpec's Labels: "name" and "prefix".
LabelSet `protobuf:"bytes,3,opt,name=labels,embedded=labels" json:"labels" yaml:",omitempty,inline"`
Fragment JournalSpec_Fragment `protobuf:"bytes,4,opt,name=fragment" json:"fragment" yaml:",omitempty"`
// Flags of the Journal, as a combination of Flag enum values. The Flag enum
// not used directly, as protobuf enums do not allow for or'ed bitfields.
Flags JournalSpec_Flag `protobuf:"varint,6,opt,name=flags,proto3,casttype=JournalSpec_Flag" json:"flags,omitempty" yaml:",omitempty"`
}
func (m *JournalSpec) Reset() { *m = JournalSpec{} }
func (m *JournalSpec) String() string { return proto.CompactTextString(m) }
func (*JournalSpec) ProtoMessage() {}
func (*JournalSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{3}
}
func (m *JournalSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *JournalSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_JournalSpec.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *JournalSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_JournalSpec.Merge(dst, src)
}
func (m *JournalSpec) XXX_Size() int {
return m.ProtoSize()
}
func (m *JournalSpec) XXX_DiscardUnknown() {
xxx_messageInfo_JournalSpec.DiscardUnknown(m)
}
var xxx_messageInfo_JournalSpec proto.InternalMessageInfo
// Fragment is JournalSpec configuration which pertains to the creation,
// persistence, and indexing of the Journal's Fragments.
type JournalSpec_Fragment struct {
// Target content length of each Fragment. In normal operation after Fragments
// reach at least this length, they will be closed and new ones begun. Note
// lengths may be smaller at times (eg, due to changes in Journal routing
// topology). Content length differs from Fragment file size, in that the
// former reflects uncompressed bytes.
Length int64 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty" yaml:",omitempty"`
// Codec used to compress Journal Fragments.
CompressionCodec CompressionCodec `protobuf:"varint,2,opt,name=compression_codec,json=compressionCodec,proto3,enum=protocol.CompressionCodec" json:"compression_codec,omitempty" yaml:"compression_codec,omitempty"`
// Storage backend base path for this Journal's Fragments. Must be in URL
// form, with the choice of backend defined by the scheme. The full path of
// a Journal's Fragment is derived by joining the fragment_store path with
// the Fragment's ContentPath. Eg, given a fragment_store of
// "s3://My-AWS-bucket/a/prefix" and a JournalSpec of name "my/journal",
// a complete Fragment path might be:
// "s3://My-AWS-bucket/a/prefix/my/journal/000123-000456-789abcdef.gzip
//
// Multiple fragment_stores may be specified, in which case the Journal's
// Fragments are the union of all Fragments present across all stores, and
// new Fragments always persist to the first specified store. This can be
// helpful in performing incremental migrations, where new Journal content is
// written to the new store, while content in the old store remains available
// (and, depending on fragment_retention or recovery log pruning, may
// eventually be removed).
//
// If no fragment_stores are specified, the Journal is still useable but will
// not persist Fragments to any a backing fragment store. This allows for
// real-time streaming use cases where reads of historical data are not needed.
Stores []FragmentStore `protobuf:"bytes,3,rep,name=stores,casttype=FragmentStore" json:"stores,omitempty" yaml:",omitempty"`
// Interval of time between refreshes of remote Fragment listings from
// configured fragment_stores.
RefreshInterval time.Duration `protobuf:"bytes,4,opt,name=refresh_interval,json=refreshInterval,stdduration" json:"refresh_interval" yaml:"refresh_interval,omitempty"`
// Retention duration for historical Fragments of this Journal within the
// Fragment stores. If less than or equal to zero, Fragments are retained
// indefinetely.
Retention time.Duration `protobuf:"bytes,5,opt,name=retention,stdduration" json:"retention" yaml:",omitempty"`
// Flush interval defines a UTC time segment, since epoch time,
// after which a spool must be flushed to the FragmentStore.
FlushInterval time.Duration `protobuf:"bytes,6,opt,name=flush_interval,json=flushInterval,stdduration" json:"flush_interval" yaml:"flush_interval,omitempty"`
}
func (m *JournalSpec_Fragment) Reset() { *m = JournalSpec_Fragment{} }
func (m *JournalSpec_Fragment) String() string { return proto.CompactTextString(m) }
func (*JournalSpec_Fragment) ProtoMessage() {}
func (*JournalSpec_Fragment) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{3, 0}
}
func (m *JournalSpec_Fragment) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *JournalSpec_Fragment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_JournalSpec_Fragment.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *JournalSpec_Fragment) XXX_Merge(src proto.Message) {
xxx_messageInfo_JournalSpec_Fragment.Merge(dst, src)
}
func (m *JournalSpec_Fragment) XXX_Size() int {
return m.ProtoSize()
}
func (m *JournalSpec_Fragment) XXX_DiscardUnknown() {
xxx_messageInfo_JournalSpec_Fragment.DiscardUnknown(m)
}
var xxx_messageInfo_JournalSpec_Fragment proto.InternalMessageInfo
// ProcessSpec describes a uniquely identified process and its addressable endpoint.
type ProcessSpec struct {
Id ProcessSpec_ID `protobuf:"bytes,1,opt,name=id" json:"id"`
// Advertised URL of the process.
Endpoint Endpoint `protobuf:"bytes,2,opt,name=endpoint,proto3,casttype=Endpoint" json:"endpoint,omitempty"`
}
func (m *ProcessSpec) Reset() { *m = ProcessSpec{} }
func (m *ProcessSpec) String() string { return proto.CompactTextString(m) }
func (*ProcessSpec) ProtoMessage() {}
func (*ProcessSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{4}
}
func (m *ProcessSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProcessSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProcessSpec.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ProcessSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProcessSpec.Merge(dst, src)
}
func (m *ProcessSpec) XXX_Size() int {
return m.ProtoSize()
}
func (m *ProcessSpec) XXX_DiscardUnknown() {
xxx_messageInfo_ProcessSpec.DiscardUnknown(m)
}
var xxx_messageInfo_ProcessSpec proto.InternalMessageInfo
func (m *ProcessSpec) GetId() ProcessSpec_ID {
if m != nil {
return m.Id
}
return ProcessSpec_ID{}
}
func (m *ProcessSpec) GetEndpoint() Endpoint {
if m != nil {
return m.Endpoint
}
return ""
}
// ID composes a zone and a suffix to uniquely identify a ProcessSpec.
type ProcessSpec_ID struct {
// "Zone" in which the process is running. Zones may be AWS, Azure, or Google
// Cloud Platform zone identifiers, or rack locations within a colo, or
// given some other custom meaning. Gazette will replicate across multiple
// zones, and seeks to minimize traffic which must cross zones (for example,
// by proxying reads to a broker in the current zone).
Zone string `protobuf:"bytes,1,opt,name=zone,proto3" json:"zone,omitempty"`
// Unique suffix of the process within |zone|. It is permissible for a
// suffix value to repeat across zones, but never within zones. In practice,
// it's recommended to use a FQDN, Kubernetes Pod name, or comparable unique
// and self-describing value as the ID suffix.
Suffix string `protobuf:"bytes,2,opt,name=suffix,proto3" json:"suffix,omitempty"`
}
func (m *ProcessSpec_ID) Reset() { *m = ProcessSpec_ID{} }
func (m *ProcessSpec_ID) String() string { return proto.CompactTextString(m) }
func (*ProcessSpec_ID) ProtoMessage() {}
func (*ProcessSpec_ID) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{4, 0}
}
func (m *ProcessSpec_ID) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProcessSpec_ID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProcessSpec_ID.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ProcessSpec_ID) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProcessSpec_ID.Merge(dst, src)
}
func (m *ProcessSpec_ID) XXX_Size() int {
return m.ProtoSize()
}
func (m *ProcessSpec_ID) XXX_DiscardUnknown() {
xxx_messageInfo_ProcessSpec_ID.DiscardUnknown(m)
}
var xxx_messageInfo_ProcessSpec_ID proto.InternalMessageInfo
// BrokerSpec describes a Gazette broker and its configuration.
type BrokerSpec struct {
// ProcessSpec of the broker.
ProcessSpec `protobuf:"bytes,1,opt,name=process_spec,json=processSpec,embedded=process_spec" json:"process_spec" yaml:",inline"`
// Maximum number of assigned Journal replicas.
JournalLimit uint32 `protobuf:"varint,2,opt,name=journal_limit,json=journalLimit,proto3" json:"journal_limit,omitempty"`
}
func (m *BrokerSpec) Reset() { *m = BrokerSpec{} }
func (m *BrokerSpec) String() string { return proto.CompactTextString(m) }
func (*BrokerSpec) ProtoMessage() {}
func (*BrokerSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{5}
}
func (m *BrokerSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BrokerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BrokerSpec.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *BrokerSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_BrokerSpec.Merge(dst, src)
}
func (m *BrokerSpec) XXX_Size() int {
return m.ProtoSize()
}
func (m *BrokerSpec) XXX_DiscardUnknown() {
xxx_messageInfo_BrokerSpec.DiscardUnknown(m)
}
var xxx_messageInfo_BrokerSpec proto.InternalMessageInfo
// Fragment is a content-addressed description of a contiguous Journal span,
// defined by the [begin, end) offset range covered by the Fragment and the
// SHA1 sum of the corresponding Journal content.
type Fragment struct {
// Journal of the Fragment.
Journal Journal `protobuf:"bytes,1,opt,name=journal,proto3,casttype=Journal" json:"journal,omitempty"`
// Begin (inclusive) and end (exclusive) offset of the Fragment within the Journal.
Begin int64 `protobuf:"varint,2,opt,name=begin,proto3" json:"begin,omitempty"`
End int64 `protobuf:"varint,3,opt,name=end,proto3" json:"end,omitempty"`
// SHA1 sum of the Fragment's content.
Sum SHA1Sum `protobuf:"bytes,4,opt,name=sum" json:"sum"`
// Codec with which the Fragment's content is compressed.
CompressionCodec CompressionCodec `protobuf:"varint,5,opt,name=compression_codec,json=compressionCodec,proto3,enum=protocol.CompressionCodec" json:"compression_codec,omitempty"`
// Fragment store which backs the Fragment. Empty if the Fragment has yet to
// be persisted and is still local to a Broker.
BackingStore FragmentStore `protobuf:"bytes,6,opt,name=backing_store,json=backingStore,proto3,casttype=FragmentStore" json:"backing_store,omitempty"`
// Modification timestamp of the Fragment within the backing store, represented as seconds
// since the epoch.
ModTime int64 `protobuf:"varint,7,opt,name=mod_time,json=modTime,proto3" json:"mod_time,omitempty"`
}
func (m *Fragment) Reset() { *m = Fragment{} }
func (m *Fragment) String() string { return proto.CompactTextString(m) }
func (*Fragment) ProtoMessage() {}
func (*Fragment) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{6}
}
func (m *Fragment) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Fragment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Fragment.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Fragment) XXX_Merge(src proto.Message) {
xxx_messageInfo_Fragment.Merge(dst, src)
}
func (m *Fragment) XXX_Size() int {
return m.ProtoSize()
}
func (m *Fragment) XXX_DiscardUnknown() {
xxx_messageInfo_Fragment.DiscardUnknown(m)
}
var xxx_messageInfo_Fragment proto.InternalMessageInfo
// SHA1Sum is a 160-bit SHA1 digest.
type SHA1Sum struct {
Part1 uint64 `protobuf:"fixed64,1,opt,name=part1,proto3" json:"part1,omitempty"`
Part2 uint64 `protobuf:"fixed64,2,opt,name=part2,proto3" json:"part2,omitempty"`
Part3 uint32 `protobuf:"fixed32,3,opt,name=part3,proto3" json:"part3,omitempty"`
}
func (m *SHA1Sum) Reset() { *m = SHA1Sum{} }
func (m *SHA1Sum) String() string { return proto.CompactTextString(m) }
func (*SHA1Sum) ProtoMessage() {}
func (*SHA1Sum) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{7}
}
func (m *SHA1Sum) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SHA1Sum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SHA1Sum.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *SHA1Sum) XXX_Merge(src proto.Message) {
xxx_messageInfo_SHA1Sum.Merge(dst, src)
}
func (m *SHA1Sum) XXX_Size() int {
return m.ProtoSize()
}
func (m *SHA1Sum) XXX_DiscardUnknown() {
xxx_messageInfo_SHA1Sum.DiscardUnknown(m)
}
var xxx_messageInfo_SHA1Sum proto.InternalMessageInfo
type ReadRequest struct {
// Header is attached by a proxying broker peer.
Header *Header `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
// Journal to be read.
Journal Journal `protobuf:"bytes,2,opt,name=journal,proto3,casttype=Journal" json:"journal,omitempty"`
// Desired offset to begin reading from. Value -1 has special handling, where
// the read is performed from the current write head. All other positive
// values specify a desired exact byte offset to read from. If the offset is
// not available (eg, because it represents a portion of Journal which has
// been permanently deleted), the broker will return the next available
// offset. Callers should therefore always inspect the ReadResponse offset.
Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"`
// Whether the operation should block until content becomes available.
// OFFSET_NOT_YET_AVAILABLE is returned if a non-blocking read has no ready content.
Block bool `protobuf:"varint,4,opt,name=block,proto3" json:"block,omitempty"`
// If do_not_proxy is true, the broker will not proxy the read to another
// broker, or open and proxy a remote Fragment on the client's behalf.
DoNotProxy bool `protobuf:"varint,5,opt,name=do_not_proxy,json=doNotProxy,proto3" json:"do_not_proxy,omitempty"`
// If metadata_only is true, the broker will respond with Journal and
// Fragment metadata but not content.
MetadataOnly bool `protobuf:"varint,6,opt,name=metadata_only,json=metadataOnly,proto3" json:"metadata_only,omitempty"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{8}
}
func (m *ReadRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ReadRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReadRequest.Merge(dst, src)
}
func (m *ReadRequest) XXX_Size() int {
return m.ProtoSize()
}
func (m *ReadRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReadRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReadRequest proto.InternalMessageInfo
type ReadResponse struct {
// Status of the Read RPC.
Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=protocol.Status" json:"status,omitempty"`
// Header of the response. Accompanies the first ReadResponse of the response stream.
Header *Header `protobuf:"bytes,2,opt,name=header" json:"header,omitempty"`
// The effective offset of the read. See ReadRequest offset.
Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"`
// The offset to next be written, by the next append transaction served by
// broker. In other words, the last offset through which content is
// available to be read from the Journal. This is a metadata field and will
// not be returned with a content response.
WriteHead int64 `protobuf:"varint,4,opt,name=write_head,json=writeHead,proto3" json:"write_head,omitempty"`
// Fragment to which the offset was mapped. This is a metadata field and will
// not be returned with a content response.
Fragment *Fragment `protobuf:"bytes,5,opt,name=fragment" json:"fragment,omitempty"`
// If Fragment is remote, a URL from which it may be directly read.
FragmentUrl string `protobuf:"bytes,6,opt,name=fragment_url,json=fragmentUrl,proto3" json:"fragment_url,omitempty"`
// Content chunks of the read.
Content []byte `protobuf:"bytes,7,opt,name=content,proto3" json:"content,omitempty"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{9}
}
func (m *ReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ReadResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReadResponse.Merge(dst, src)
}
func (m *ReadResponse) XXX_Size() int {
return m.ProtoSize()
}
func (m *ReadResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ReadResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ReadResponse proto.InternalMessageInfo
type AppendRequest struct {
// Header is attached by a proxying broker peer to the first AppendRequest message.
Header *Header `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
// Journal to be appended to.
Journal Journal `protobuf:"bytes,2,opt,name=journal,proto3,casttype=Journal" json:"journal,omitempty"`
// If do_not_proxy is true, the broker will not proxy the append if it is
// not the current primary.
DoNotProxy bool `protobuf:"varint,3,opt,name=do_not_proxy,json=doNotProxy,proto3" json:"do_not_proxy,omitempty"`
// Journal offset at which the append should begin. Most clients should leave
// at zero, which uses the broker's tracked offset. The append offset must be
// one greater than furthest written offset of the journal, or
// WRONG_APPEND_OFFSET is returned.
Offset int64 `protobuf:"varint,5,opt,name=offset,proto3" json:"offset,omitempty"`
// Content chunks to be appended. Immediately prior to closing the stream,
// the client must send an empty chunk (eg, zero-valued AppendRequest) to
// indicate the Append should be committed. Absence of this empty chunk
// prior to EOF is interpreted by the broker as a rollback of the Append.
Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"`
}
func (m *AppendRequest) Reset() { *m = AppendRequest{} }
func (m *AppendRequest) String() string { return proto.CompactTextString(m) }
func (*AppendRequest) ProtoMessage() {}
func (*AppendRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{10}
}
func (m *AppendRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AppendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_AppendRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *AppendRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AppendRequest.Merge(dst, src)
}
func (m *AppendRequest) XXX_Size() int {
return m.ProtoSize()
}
func (m *AppendRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AppendRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AppendRequest proto.InternalMessageInfo
type AppendResponse struct {
// Status of the Append RPC.
Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=protocol.Status" json:"status,omitempty"`
// Header of the response.
Header Header `protobuf:"bytes,2,opt,name=header" json:"header"`
// If status is OK, then |commit| is the Fragment which places the
// committed Append content within the Journal.
Commit *Fragment `protobuf:"bytes,3,opt,name=commit" json:"commit,omitempty"`
}
func (m *AppendResponse) Reset() { *m = AppendResponse{} }
func (m *AppendResponse) String() string { return proto.CompactTextString(m) }
func (*AppendResponse) ProtoMessage() {}
func (*AppendResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{11}
}
func (m *AppendResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AppendResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_AppendResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *AppendResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_AppendResponse.Merge(dst, src)
}
func (m *AppendResponse) XXX_Size() int {
return m.ProtoSize()
}
func (m *AppendResponse) XXX_DiscardUnknown() {
xxx_messageInfo_AppendResponse.DiscardUnknown(m)
}
var xxx_messageInfo_AppendResponse proto.InternalMessageInfo
type ReplicateRequest struct {
// Header defines the primary broker, Route, and Etcd Revision under which
// this Replicate stream is being established. Each replication peer
// independently inspects and verifies the current Journal Route topology.
Header *Header `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
// Journal to be replicated to.
Journal Journal `protobuf:"bytes,2,opt,name=journal,proto3,casttype=Journal" json:"journal,omitempty"`
// Proposed Fragment to commit. Also verified by each replica.
Proposal *Fragment `protobuf:"bytes,3,opt,name=proposal" json:"proposal,omitempty"`
// Content to be replicated.
Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"`
// Delta offset of |content| relative to current Fragment |end|.
ContentDelta int64 `protobuf:"varint,5,opt,name=content_delta,json=contentDelta,proto3" json:"content_delta,omitempty"`
// Acknowledge requests that the peer send an acknowledging ReplicateResponse
// on successful application of the ReplicateRequest.
Acknowledge bool `protobuf:"varint,6,opt,name=acknowledge,proto3" json:"acknowledge,omitempty"`
}
func (m *ReplicateRequest) Reset() { *m = ReplicateRequest{} }
func (m *ReplicateRequest) String() string { return proto.CompactTextString(m) }
func (*ReplicateRequest) ProtoMessage() {}
func (*ReplicateRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{12}
}
func (m *ReplicateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReplicateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReplicateRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ReplicateRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReplicateRequest.Merge(dst, src)
}
func (m *ReplicateRequest) XXX_Size() int {
return m.ProtoSize()
}
func (m *ReplicateRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReplicateRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReplicateRequest proto.InternalMessageInfo
type ReplicateResponse struct {
// Status of the Replicate RPC.
Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=protocol.Status" json:"status,omitempty"`
// Header of the response. Accompanies the first ReplicateResponse of the response stream.
Header *Header `protobuf:"bytes,2,opt,name=header" json:"header,omitempty"`
// If status is FRAGMENT_MISMATCH, then |fragment| is the replica's
// Fragment at the current Journal head, which was found to be inconsistent
// with the request |proposal| Fragment.
Fragment *Fragment `protobuf:"bytes,3,opt,name=fragment" json:"fragment,omitempty"`
}
func (m *ReplicateResponse) Reset() { *m = ReplicateResponse{} }
func (m *ReplicateResponse) String() string { return proto.CompactTextString(m) }
func (*ReplicateResponse) ProtoMessage() {}
func (*ReplicateResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_protocol_ffc263d8ecf7e451, []int{13}
}
func (m *ReplicateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ReplicateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ReplicateResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ReplicateResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReplicateResponse.Merge(dst, src)
}
func (m *ReplicateResponse) XXX_Size() int {
return m.ProtoSize()
}
func (m *ReplicateResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ReplicateResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ReplicateResponse proto.InternalMessageInfo
type ListRequest struct {
// Selector optionally refines the set of journals which will be enumerated.
// If zero-valued, all journals are returned. Otherwise, only JournalSpecs
// matching the LabelSelector will be returned. Two meta-labels "name" and
// "prefix" are additionally supported by the selector, where:
// * name=examples/a-name will match a JournalSpec with Name "examples/a-name"
// * prefix=examples/ will match any JournalSpec having prefix "examples/".