forked from apache/kafka
-
Notifications
You must be signed in to change notification settings - Fork 0
/
GroupMetadataManager.java
3949 lines (3600 loc) · 171 KB
/
GroupMetadataManager.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.coordinator.group;
import org.apache.kafka.clients.consumer.internals.ConsumerProtocol;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.errors.ApiException;
import org.apache.kafka.common.errors.CoordinatorNotAvailableException;
import org.apache.kafka.common.errors.FencedMemberEpochException;
import org.apache.kafka.common.errors.GroupIdNotFoundException;
import org.apache.kafka.common.errors.GroupMaxSizeReachedException;
import org.apache.kafka.common.errors.IllegalGenerationException;
import org.apache.kafka.common.errors.InvalidRequestException;
import org.apache.kafka.common.errors.UnknownMemberIdException;
import org.apache.kafka.common.errors.UnknownServerException;
import org.apache.kafka.common.errors.UnsupportedAssignorException;
import org.apache.kafka.common.errors.FencedInstanceIdException;
import org.apache.kafka.common.errors.UnreleasedInstanceIdException;
import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData;
import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData;
import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData;
import org.apache.kafka.common.message.DescribeGroupsResponseData;
import org.apache.kafka.common.message.HeartbeatRequestData;
import org.apache.kafka.common.message.HeartbeatResponseData;
import org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocol;
import org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocolCollection;
import org.apache.kafka.common.message.JoinGroupRequestData;
import org.apache.kafka.common.message.JoinGroupResponseData;
import org.apache.kafka.common.message.LeaveGroupRequestData;
import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity;
import org.apache.kafka.common.message.LeaveGroupResponseData;
import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse;
import org.apache.kafka.common.message.ListGroupsResponseData;
import org.apache.kafka.common.message.SyncGroupRequestData;
import org.apache.kafka.common.message.SyncGroupResponseData;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.types.SchemaException;
import org.apache.kafka.common.requests.JoinGroupRequest;
import org.apache.kafka.common.requests.RequestContext;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.coordinator.group.assignor.PartitionAssignor;
import org.apache.kafka.coordinator.group.assignor.PartitionAssignorException;
import org.apache.kafka.coordinator.group.consumer.Assignment;
import org.apache.kafka.coordinator.group.consumer.ConsumerGroup;
import org.apache.kafka.coordinator.group.consumer.ConsumerGroupMember;
import org.apache.kafka.coordinator.group.consumer.CurrentAssignmentBuilder;
import org.apache.kafka.coordinator.group.consumer.MemberState;
import org.apache.kafka.coordinator.group.consumer.TargetAssignmentBuilder;
import org.apache.kafka.coordinator.group.consumer.TopicMetadata;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupCurrentMemberAssignmentKey;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupCurrentMemberAssignmentValue;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataKey;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataValue;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupMetadataKey;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupMetadataValue;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupPartitionMetadataKey;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupPartitionMetadataValue;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberKey;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMemberValue;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataKey;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupTargetAssignmentMetadataValue;
import org.apache.kafka.coordinator.group.generated.GroupMetadataKey;
import org.apache.kafka.coordinator.group.generated.GroupMetadataValue;
import org.apache.kafka.coordinator.group.classic.ClassicGroup;
import org.apache.kafka.coordinator.group.classic.ClassicGroupMember;
import org.apache.kafka.coordinator.group.classic.ClassicGroupState;
import org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetricsShard;
import org.apache.kafka.coordinator.group.runtime.CoordinatorResult;
import org.apache.kafka.coordinator.group.runtime.CoordinatorTimer;
import org.apache.kafka.image.MetadataDelta;
import org.apache.kafka.image.MetadataImage;
import org.apache.kafka.image.TopicImage;
import org.apache.kafka.timeline.SnapshotRegistry;
import org.apache.kafka.timeline.TimelineHashMap;
import org.apache.kafka.timeline.TimelineHashSet;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.kafka.common.protocol.Errors.COORDINATOR_NOT_AVAILABLE;
import static org.apache.kafka.common.protocol.Errors.ILLEGAL_GENERATION;
import static org.apache.kafka.common.protocol.Errors.NOT_COORDINATOR;
import static org.apache.kafka.common.protocol.Errors.UNKNOWN_SERVER_ERROR;
import static org.apache.kafka.common.requests.ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH;
import static org.apache.kafka.common.requests.ConsumerGroupHeartbeatRequest.LEAVE_GROUP_STATIC_MEMBER_EPOCH;
import static org.apache.kafka.common.requests.JoinGroupRequest.UNKNOWN_MEMBER_ID;
import static org.apache.kafka.coordinator.group.Group.GroupType.CONSUMER;
import static org.apache.kafka.coordinator.group.Group.GroupType.CLASSIC;
import static org.apache.kafka.coordinator.group.RecordHelpers.newCurrentAssignmentRecord;
import static org.apache.kafka.coordinator.group.RecordHelpers.newCurrentAssignmentTombstoneRecord;
import static org.apache.kafka.coordinator.group.RecordHelpers.newGroupEpochRecord;
import static org.apache.kafka.coordinator.group.RecordHelpers.newGroupSubscriptionMetadataRecord;
import static org.apache.kafka.coordinator.group.RecordHelpers.newMemberSubscriptionRecord;
import static org.apache.kafka.coordinator.group.RecordHelpers.newMemberSubscriptionTombstoneRecord;
import static org.apache.kafka.coordinator.group.RecordHelpers.newTargetAssignmentTombstoneRecord;
import static org.apache.kafka.coordinator.group.Utils.assignmentToString;
import static org.apache.kafka.coordinator.group.Utils.ofSentinel;
import static org.apache.kafka.coordinator.group.classic.ClassicGroupMember.EMPTY_ASSIGNMENT;
import static org.apache.kafka.coordinator.group.classic.ClassicGroupState.COMPLETING_REBALANCE;
import static org.apache.kafka.coordinator.group.classic.ClassicGroupState.DEAD;
import static org.apache.kafka.coordinator.group.classic.ClassicGroupState.EMPTY;
import static org.apache.kafka.coordinator.group.classic.ClassicGroupState.PREPARING_REBALANCE;
import static org.apache.kafka.coordinator.group.classic.ClassicGroupState.STABLE;
import static org.apache.kafka.coordinator.group.consumer.ConsumerGroupMember.hasAssignedPartitionsChanged;
import static org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetrics.CLASSIC_GROUP_COMPLETED_REBALANCES_SENSOR_NAME;
import static org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetrics.CONSUMER_GROUP_REBALANCES_SENSOR_NAME;
/**
* The GroupMetadataManager manages the metadata of all classic and consumer groups. It holds
* the hard and the soft state of the groups. This class has two kinds of methods:
* 1) The request handlers which handle the requests and generate a response and records to
* mutate the hard state. Those records will be written by the runtime and applied to the
* hard state via the replay methods.
* 2) The replay methods which apply records to the hard state. Those are used in the request
* handling as well as during the initial loading of the records from the partitions.
*/
public class GroupMetadataManager {
public static class Builder {
private LogContext logContext = null;
private SnapshotRegistry snapshotRegistry = null;
private Time time = null;
private CoordinatorTimer<Void, Record> timer = null;
private List<PartitionAssignor> consumerGroupAssignors = null;
private int consumerGroupMaxSize = Integer.MAX_VALUE;
private int consumerGroupHeartbeatIntervalMs = 5000;
private int consumerGroupMetadataRefreshIntervalMs = Integer.MAX_VALUE;
private MetadataImage metadataImage = null;
private int consumerGroupSessionTimeoutMs = 45000;
private int classicGroupMaxSize = Integer.MAX_VALUE;
private int classicGroupInitialRebalanceDelayMs = 3000;
private int classicGroupNewMemberJoinTimeoutMs = 5 * 60 * 1000;
private int classicGroupMinSessionTimeoutMs;
private int classicGroupMaxSessionTimeoutMs;
private ConsumerGroupMigrationPolicy consumerGroupMigrationPolicy;
private GroupCoordinatorMetricsShard metrics;
Builder withLogContext(LogContext logContext) {
this.logContext = logContext;
return this;
}
Builder withSnapshotRegistry(SnapshotRegistry snapshotRegistry) {
this.snapshotRegistry = snapshotRegistry;
return this;
}
Builder withTime(Time time) {
this.time = time;
return this;
}
Builder withTimer(CoordinatorTimer<Void, Record> timer) {
this.timer = timer;
return this;
}
Builder withConsumerGroupAssignors(List<PartitionAssignor> consumerGroupAssignors) {
this.consumerGroupAssignors = consumerGroupAssignors;
return this;
}
Builder withConsumerGroupMaxSize(int consumerGroupMaxSize) {
this.consumerGroupMaxSize = consumerGroupMaxSize;
return this;
}
Builder withConsumerGroupSessionTimeout(int consumerGroupSessionTimeoutMs) {
this.consumerGroupSessionTimeoutMs = consumerGroupSessionTimeoutMs;
return this;
}
Builder withConsumerGroupHeartbeatInterval(int consumerGroupHeartbeatIntervalMs) {
this.consumerGroupHeartbeatIntervalMs = consumerGroupHeartbeatIntervalMs;
return this;
}
Builder withConsumerGroupMetadataRefreshIntervalMs(int consumerGroupMetadataRefreshIntervalMs) {
this.consumerGroupMetadataRefreshIntervalMs = consumerGroupMetadataRefreshIntervalMs;
return this;
}
Builder withMetadataImage(MetadataImage metadataImage) {
this.metadataImage = metadataImage;
return this;
}
Builder withClassicGroupMaxSize(int classicGroupMaxSize) {
this.classicGroupMaxSize = classicGroupMaxSize;
return this;
}
Builder withClassicGroupInitialRebalanceDelayMs(int classicGroupInitialRebalanceDelayMs) {
this.classicGroupInitialRebalanceDelayMs = classicGroupInitialRebalanceDelayMs;
return this;
}
Builder withClassicGroupNewMemberJoinTimeoutMs(int classicGroupNewMemberJoinTimeoutMs) {
this.classicGroupNewMemberJoinTimeoutMs = classicGroupNewMemberJoinTimeoutMs;
return this;
}
Builder withClassicGroupMinSessionTimeoutMs(int classicGroupMinSessionTimeoutMs) {
this.classicGroupMinSessionTimeoutMs = classicGroupMinSessionTimeoutMs;
return this;
}
Builder withClassicGroupMaxSessionTimeoutMs(int classicGroupMaxSessionTimeoutMs) {
this.classicGroupMaxSessionTimeoutMs = classicGroupMaxSessionTimeoutMs;
return this;
}
Builder withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy consumerGroupMigrationPolicy) {
this.consumerGroupMigrationPolicy = consumerGroupMigrationPolicy;
return this;
}
Builder withGroupCoordinatorMetricsShard(GroupCoordinatorMetricsShard metrics) {
this.metrics = metrics;
return this;
}
GroupMetadataManager build() {
if (logContext == null) logContext = new LogContext();
if (snapshotRegistry == null) snapshotRegistry = new SnapshotRegistry(logContext);
if (metadataImage == null) metadataImage = MetadataImage.EMPTY;
if (time == null) time = Time.SYSTEM;
if (timer == null)
throw new IllegalArgumentException("Timer must be set.");
if (consumerGroupAssignors == null || consumerGroupAssignors.isEmpty())
throw new IllegalArgumentException("Assignors must be set before building.");
if (metrics == null)
throw new IllegalArgumentException("GroupCoordinatorMetricsShard must be set.");
return new GroupMetadataManager(
snapshotRegistry,
logContext,
time,
timer,
metrics,
consumerGroupAssignors,
metadataImage,
consumerGroupMaxSize,
consumerGroupSessionTimeoutMs,
consumerGroupHeartbeatIntervalMs,
consumerGroupMetadataRefreshIntervalMs,
classicGroupMaxSize,
classicGroupInitialRebalanceDelayMs,
classicGroupNewMemberJoinTimeoutMs,
classicGroupMinSessionTimeoutMs,
classicGroupMaxSessionTimeoutMs,
consumerGroupMigrationPolicy
);
}
}
/**
* The log context.
*/
private final LogContext logContext;
/**
* The logger.
*/
private final Logger log;
/**
* The snapshot registry.
*/
private final SnapshotRegistry snapshotRegistry;
/**
* The system time.
*/
private final Time time;
/**
* The system timer.
*/
private final CoordinatorTimer<Void, Record> timer;
/**
* The coordinator metrics.
*/
private final GroupCoordinatorMetricsShard metrics;
/**
* The supported partition assignors keyed by their name.
*/
private final Map<String, PartitionAssignor> assignors;
/**
* The default assignor used.
*/
private final PartitionAssignor defaultAssignor;
/**
* The classic and consumer groups keyed by their name.
*/
private final TimelineHashMap<String, Group> groups;
/**
* The group ids keyed by topic names.
*/
private final TimelineHashMap<String, TimelineHashSet<String>> groupsByTopics;
/**
* The maximum number of members allowed in a single consumer group.
*/
private final int consumerGroupMaxSize;
/**
* The heartbeat interval for consumer groups.
*/
private final int consumerGroupHeartbeatIntervalMs;
/**
* The session timeout for consumer groups.
*/
private final int consumerGroupSessionTimeoutMs;
/**
* The metadata refresh interval.
*/
private final int consumerGroupMetadataRefreshIntervalMs;
/**
* The metadata image.
*/
private MetadataImage metadataImage;
/**
* An empty result returned to the state machine. This means that
* there are no records to append to the log.
*
* Package private for testing.
*/
static final CoordinatorResult<Void, Record> EMPTY_RESULT =
new CoordinatorResult<>(Collections.emptyList(), CompletableFuture.completedFuture(null));
/**
* The maximum number of members allowed in a single classic group.
*/
private final int classicGroupMaxSize;
/**
* Initial rebalance delay for members joining a classic group.
*/
private final int classicGroupInitialRebalanceDelayMs;
/**
* The timeout used to wait for a new member in milliseconds.
*/
private final int classicGroupNewMemberJoinTimeoutMs;
/**
* The group minimum session timeout.
*/
private final int classicGroupMinSessionTimeoutMs;
/**
* The group maximum session timeout.
*/
private final int classicGroupMaxSessionTimeoutMs;
/**
* The config indicating whether group protocol upgrade/downgrade is allowed.
*/
private final ConsumerGroupMigrationPolicy consumerGroupMigrationPolicy;
private GroupMetadataManager(
SnapshotRegistry snapshotRegistry,
LogContext logContext,
Time time,
CoordinatorTimer<Void, Record> timer,
GroupCoordinatorMetricsShard metrics,
List<PartitionAssignor> assignors,
MetadataImage metadataImage,
int consumerGroupMaxSize,
int consumerGroupSessionTimeoutMs,
int consumerGroupHeartbeatIntervalMs,
int consumerGroupMetadataRefreshIntervalMs,
int classicGroupMaxSize,
int classicGroupInitialRebalanceDelayMs,
int classicGroupNewMemberJoinTimeoutMs,
int classicGroupMinSessionTimeoutMs,
int classicGroupMaxSessionTimeoutMs,
ConsumerGroupMigrationPolicy consumerGroupMigrationPolicy
) {
this.logContext = logContext;
this.log = logContext.logger(GroupMetadataManager.class);
this.snapshotRegistry = snapshotRegistry;
this.time = time;
this.timer = timer;
this.metrics = metrics;
this.metadataImage = metadataImage;
this.assignors = assignors.stream().collect(Collectors.toMap(PartitionAssignor::name, Function.identity()));
this.defaultAssignor = assignors.get(0);
this.groups = new TimelineHashMap<>(snapshotRegistry, 0);
this.groupsByTopics = new TimelineHashMap<>(snapshotRegistry, 0);
this.consumerGroupMaxSize = consumerGroupMaxSize;
this.consumerGroupSessionTimeoutMs = consumerGroupSessionTimeoutMs;
this.consumerGroupHeartbeatIntervalMs = consumerGroupHeartbeatIntervalMs;
this.consumerGroupMetadataRefreshIntervalMs = consumerGroupMetadataRefreshIntervalMs;
this.classicGroupMaxSize = classicGroupMaxSize;
this.classicGroupInitialRebalanceDelayMs = classicGroupInitialRebalanceDelayMs;
this.classicGroupNewMemberJoinTimeoutMs = classicGroupNewMemberJoinTimeoutMs;
this.classicGroupMinSessionTimeoutMs = classicGroupMinSessionTimeoutMs;
this.classicGroupMaxSessionTimeoutMs = classicGroupMaxSessionTimeoutMs;
this.consumerGroupMigrationPolicy = consumerGroupMigrationPolicy;
}
/**
* @return The current metadata image used by the group metadata manager.
*/
public MetadataImage image() {
return metadataImage;
}
/**
* @return The group corresponding to the group id or throw GroupIdNotFoundException.
*/
public Group group(String groupId) throws GroupIdNotFoundException {
Group group = groups.get(groupId, Long.MAX_VALUE);
if (group == null) {
throw new GroupIdNotFoundException(String.format("Group %s not found.", groupId));
}
return group;
}
/**
* @return The group corresponding to the group id at the given committed offset
* or throw GroupIdNotFoundException.
*/
public Group group(String groupId, long committedOffset) throws GroupIdNotFoundException {
Group group = groups.get(groupId, committedOffset);
if (group == null) {
throw new GroupIdNotFoundException(String.format("Group %s not found.", groupId));
}
return group;
}
/**
* Get the Group List.
*
* @param statesFilter The states of the groups we want to list.
* If empty, all groups are returned with their state.
* If invalid, no groups are returned.
* @param typesFilter The types of the groups we want to list.
* If empty, all groups are returned with their type.
* If invalid, no groups are returned.
* @param committedOffset A specified committed offset corresponding to this shard.
*
* @return A list containing the ListGroupsResponseData.ListedGroup
*/
public List<ListGroupsResponseData.ListedGroup> listGroups(
Set<String> statesFilter,
Set<String> typesFilter,
long committedOffset
) {
// Converts each state filter string to lower case for a case-insensitive comparison.
Set<String> caseInsensitiveFilterSet = statesFilter.stream()
.map(String::toLowerCase)
.map(String::trim)
.collect(Collectors.toSet());
// Converts each type filter string to a value in the GroupType enum while being case-insensitive.
Set<Group.GroupType> enumTypesFilter = typesFilter.stream()
.map(Group.GroupType::parse)
.collect(Collectors.toSet());
Predicate<Group> combinedFilter = group -> {
boolean stateCheck = statesFilter.isEmpty() || group.isInStates(caseInsensitiveFilterSet, committedOffset);
boolean typeCheck = enumTypesFilter.isEmpty() || enumTypesFilter.contains(group.type());
return stateCheck && typeCheck;
};
Stream<Group> groupStream = groups.values(committedOffset).stream();
return groupStream
.filter(combinedFilter)
.map(group -> group.asListedGroup(committedOffset))
.collect(Collectors.toList());
}
/**
* Handles a ConsumerGroupDescribe request.
*
* @param groupIds The IDs of the groups to describe.
* @param committedOffset A specified committed offset corresponding to this shard.
*
* @return A list containing the ConsumerGroupDescribeResponseData.DescribedGroup.
*/
public List<ConsumerGroupDescribeResponseData.DescribedGroup> consumerGroupDescribe(
List<String> groupIds,
long committedOffset
) {
final List<ConsumerGroupDescribeResponseData.DescribedGroup> describedGroups = new ArrayList<>();
groupIds.forEach(groupId -> {
try {
describedGroups.add(consumerGroup(groupId, committedOffset).asDescribedGroup(
committedOffset,
defaultAssignor.name(),
metadataImage.topics()
));
} catch (GroupIdNotFoundException exception) {
describedGroups.add(new ConsumerGroupDescribeResponseData.DescribedGroup()
.setGroupId(groupId)
.setErrorCode(Errors.GROUP_ID_NOT_FOUND.code())
);
}
});
return describedGroups;
}
/**
* Handles a DescribeGroup request.
*
* @param groupIds The IDs of the groups to describe.
* @param committedOffset A specified committed offset corresponding to this shard.
*
* @return A list containing the DescribeGroupsResponseData.DescribedGroup.
*/
public List<DescribeGroupsResponseData.DescribedGroup> describeGroups(
List<String> groupIds,
long committedOffset
) {
final List<DescribeGroupsResponseData.DescribedGroup> describedGroups = new ArrayList<>();
groupIds.forEach(groupId -> {
try {
ClassicGroup group = classicGroup(groupId, committedOffset);
if (group.isInState(STABLE)) {
if (!group.protocolName().isPresent()) {
throw new IllegalStateException("Invalid null group protocol for stable group");
}
describedGroups.add(new DescribeGroupsResponseData.DescribedGroup()
.setGroupId(groupId)
.setGroupState(group.stateAsString())
.setProtocolType(group.protocolType().orElse(""))
.setProtocolData(group.protocolName().get())
.setMembers(group.allMembers().stream()
.map(member -> member.describe(group.protocolName().get()))
.collect(Collectors.toList())
)
);
} else {
describedGroups.add(new DescribeGroupsResponseData.DescribedGroup()
.setGroupId(groupId)
.setGroupState(group.stateAsString())
.setProtocolType(group.protocolType().orElse(""))
.setMembers(group.allMembers().stream()
.map(member -> member.describeNoMetadata())
.collect(Collectors.toList())
)
);
}
} catch (GroupIdNotFoundException exception) {
describedGroups.add(new DescribeGroupsResponseData.DescribedGroup()
.setGroupId(groupId)
.setGroupState(DEAD.toString())
);
}
});
return describedGroups;
}
/**
* Gets or maybe creates a consumer group without updating the groups map.
* The group will be materialized during the replay.
*
* @param groupId The group id.
* @param createIfNotExists A boolean indicating whether the group should be
* created if it does not exist or is an empty classic group.
* @param records The record list to which the group tombstones are written
* if the group is empty and is a classic group.
*
* @return A ConsumerGroup.
* @throws GroupIdNotFoundException if the group does not exist and createIfNotExists is false or
* if the group is not a consumer group.
*
* Package private for testing.
*/
ConsumerGroup getOrMaybeCreateConsumerGroup(
String groupId,
boolean createIfNotExists,
List<Record> records
) throws GroupIdNotFoundException {
Group group = groups.get(groupId);
if (group == null && !createIfNotExists) {
throw new GroupIdNotFoundException(String.format("Consumer group %s not found.", groupId));
}
if (group == null || (createIfNotExists && maybeDeleteEmptyClassicGroup(group, records))) {
return new ConsumerGroup(snapshotRegistry, groupId, metrics);
} else {
if (group.type() == CONSUMER) {
return (ConsumerGroup) group;
} else if (createIfNotExists && validateOnlineUpgrade((ClassicGroup) group)) {
return convertToConsumerGroup((ClassicGroup) group, records);
} else {
throw new GroupIdNotFoundException(String.format("Group %s is not a consumer group.",
groupId));
}
}
}
/**
* Gets a consumer group by committed offset.
*
* @param groupId The group id.
* @param committedOffset A specified committed offset corresponding to this shard.
*
* @return A ConsumerGroup.
* @throws GroupIdNotFoundException if the group does not exist or is not a consumer group.
*/
public ConsumerGroup consumerGroup(
String groupId,
long committedOffset
) throws GroupIdNotFoundException {
Group group = group(groupId, committedOffset);
if (group.type() == CONSUMER) {
return (ConsumerGroup) group;
} else {
// We don't support upgrading/downgrading between protocols at the moment so
// we throw an exception if a group exists with the wrong type.
throw new GroupIdNotFoundException(String.format("Group %s is not a consumer group.",
groupId));
}
}
/**
* An overloaded method of {@link GroupMetadataManager#consumerGroup(String, long)}
*/
ConsumerGroup consumerGroup(
String groupId
) throws GroupIdNotFoundException {
return consumerGroup(groupId, Long.MAX_VALUE);
}
/**
* The method should be called on the replay path.
* Gets or maybe creates a consumer group and updates the groups map if a new group is created.
*
* @param groupId The group id.
* @param createIfNotExists A boolean indicating whether the group should be
* created if it does not exist.
*
* @return A ConsumerGroup.
* @throws IllegalStateException if the group does not exist and createIfNotExists is false or
* if the group is not a consumer group.
* Package private for testing.
*/
ConsumerGroup getOrMaybeCreatePersistedConsumerGroup(
String groupId,
boolean createIfNotExists
) throws GroupIdNotFoundException {
Group group = groups.get(groupId);
if (group == null && !createIfNotExists) {
throw new IllegalStateException(String.format("Consumer group %s not found.", groupId));
}
if (group == null) {
ConsumerGroup consumerGroup = new ConsumerGroup(snapshotRegistry, groupId, metrics);
groups.put(groupId, consumerGroup);
metrics.onConsumerGroupStateTransition(null, consumerGroup.state());
return consumerGroup;
} else {
if (group.type() == CONSUMER) {
return (ConsumerGroup) group;
} else {
// We don't support upgrading/downgrading between protocols at the moment so
// we throw an exception if a group exists with the wrong type.
throw new IllegalStateException(String.format("Group %s is not a consumer group.", groupId));
}
}
}
/**
* Gets or maybe creates a classic group.
*
* @param groupId The group id.
* @param createIfNotExists A boolean indicating whether the group should be
* created if it does not exist.
*
* @return A ClassicGroup.
* @throws UnknownMemberIdException if the group does not exist and createIfNotExists is false.
* @throws GroupIdNotFoundException if the group is not a classic group.
*
* Package private for testing.
*/
ClassicGroup getOrMaybeCreateClassicGroup(
String groupId,
boolean createIfNotExists
) throws UnknownMemberIdException, GroupIdNotFoundException {
Group group = groups.get(groupId);
if (group == null && !createIfNotExists) {
throw new UnknownMemberIdException(String.format("Classic group %s not found.", groupId));
}
if (group == null) {
ClassicGroup classicGroup = new ClassicGroup(logContext, groupId, ClassicGroupState.EMPTY, time, metrics);
groups.put(groupId, classicGroup);
metrics.onClassicGroupStateTransition(null, classicGroup.currentState());
return classicGroup;
} else {
if (group.type() == CLASSIC) {
return (ClassicGroup) group;
} else {
// We don't support upgrading/downgrading between protocols at the moment so
// we throw an exception if a group exists with the wrong type.
throw new GroupIdNotFoundException(String.format("Group %s is not a classic group.",
groupId));
}
}
}
/**
* Gets a classic group by committed offset.
*
* @param groupId The group id.
* @param committedOffset A specified committed offset corresponding to this shard.
*
* @return A ClassicGroup.
* @throws GroupIdNotFoundException if the group does not exist or is not a classic group.
*/
public ClassicGroup classicGroup(
String groupId,
long committedOffset
) throws GroupIdNotFoundException {
Group group = group(groupId, committedOffset);
if (group.type() == CLASSIC) {
return (ClassicGroup) group;
} else {
// We don't support upgrading/downgrading between protocols at the moment so
// we throw an exception if a group exists with the wrong type.
throw new GroupIdNotFoundException(String.format("Group %s is not a classic group.",
groupId));
}
}
public boolean validateOnlineDowngrade(ConsumerGroup consumerGroup, String memberId) {
if (!consumerGroupMigrationPolicy.isDowngradeEnabled()) {
log.info("Cannot downgrade consumer group {} to classic group because the online downgrade is disabled.",
consumerGroup.groupId());
return false;
} else if (!consumerGroup.allMembersUseClassicProtocolExcept(memberId)) {
log.debug("Cannot downgrade consumer group {} to classic group because not all its members use the classic protocol.",
consumerGroup.groupId());
return false;
} else if (consumerGroup.numMembers() <= 1) {
log.info("Skip downgrading the consumer group {} to classic group because it's empty.",
consumerGroup.groupId());
return false;
} else if (consumerGroup.numMembers() - 1 > classicGroupMaxSize) {
log.info("Cannot downgrade consumer group {} to classic group because its group size is greater than classic group max size.",
consumerGroup.groupId());
}
return true;
}
public CompletableFuture<Void> convertToClassicGroup(ConsumerGroup consumerGroup, String leavingMemberId, List<Record> records) {
consumerGroup.createGroupTombstoneRecords(records);
ClassicGroup classicGroup;
try {
classicGroup = consumerGroup.toClassicGroup(
leavingMemberId,
logContext,
time,
consumerGroupSessionTimeoutMs,
metadataImage,
records
);
} catch (SchemaException e) {
log.warn("Cannot downgrade the consumer group " + consumerGroup.groupId() + ": fail to parse " +
"the Consumer Protocol " + ConsumerProtocol.PROTOCOL_TYPE + ".", e);
throw new GroupIdNotFoundException(String.format("Cannot downgrade the classic group %s: %s.",
consumerGroup.groupId(), e.getMessage()));
}
groups.put(consumerGroup.groupId(), classicGroup);
metrics.onClassicGroupStateTransition(null, classicGroup.currentState());
CompletableFuture<Void> appendFuture = new CompletableFuture<>();
appendFuture.whenComplete((__, t) -> {
if (t == null) {
classicGroup.allMembers().forEach(member -> rescheduleClassicGroupMemberHeartbeat(classicGroup, member));
prepareRebalance(classicGroup, String.format("Downgrade group %s.", classicGroup.groupId()));
}
});
return appendFuture;
}
/**
* Validates the online upgrade if the Classic Group receives a ConsumerGroupHeartbeat request.
*
* @param classicGroup A ClassicGroup.
* @return The boolean indicating whether it's valid to online upgrade the classic group.
*/
private boolean validateOnlineUpgrade(ClassicGroup classicGroup) {
if (!consumerGroupMigrationPolicy.isUpgradeEnabled()) {
log.info("Cannot upgrade classic group {} to consumer group because the online upgrade is disabled.",
classicGroup.groupId());
return false;
} else if (!classicGroup.usesConsumerGroupProtocol()) {
log.info("Cannot upgrade classic group {} to consumer group because the group does not use the consumer embedded protocol.",
classicGroup.groupId());
return false;
} else if (classicGroup.size() > consumerGroupMaxSize) {
log.info("Cannot upgrade classic group {} to consumer group because the group size exceeds the consumer group maximum size.",
classicGroup.groupId());
return false;
}
return true;
}
/**
* Creates a ConsumerGroup corresponding to the given classic group.
*
* @param classicGroup The ClassicGroup to convert.
* @param records The list of Records.
* @return The created ConsumerGroup.
*/
ConsumerGroup convertToConsumerGroup(ClassicGroup classicGroup, List<Record> records) {
// The upgrade is always triggered by a new member joining the classic group, which always results in
// updatedMember.subscribedTopicNames changing, the group epoch being bumped, and triggering a new rebalance.
// If the ClassicGroup is rebalancing, inform the awaiting consumers of another ongoing rebalance
// so that they will rejoin for the new rebalance.
classicGroup.completeAllJoinFutures(Errors.REBALANCE_IN_PROGRESS);
classicGroup.completeAllSyncFutures(Errors.REBALANCE_IN_PROGRESS);
classicGroup.createGroupTombstoneRecords(records);
ConsumerGroup consumerGroup;
try {
consumerGroup = ConsumerGroup.fromClassicGroup(
snapshotRegistry,
metrics,
classicGroup,
metadataImage.topics()
);
} catch (SchemaException e) {
log.warn("Cannot upgrade the classic group " + classicGroup.groupId() +
" to consumer group because the embedded consumer protocol is malformed: "
+ e.getMessage() + ".", e);
throw new GroupIdNotFoundException("Cannot upgrade the classic group " + classicGroup.groupId() +
" to consumer group because the embedded consumer protocol is malformed.");
}
consumerGroup.createConsumerGroupRecords(records);
// Create the session timeouts for the new members. If the conversion fails, the group will remain a
// classic group, thus these timers will fail the group type check and do nothing.
consumerGroup.members().forEach((memberId, __) ->
scheduleConsumerGroupSessionTimeout(consumerGroup.groupId(), memberId)
);
return consumerGroup;
}
/**
* Removes the group.
*
* @param groupId The group id.
*/
private void removeGroup(
String groupId
) {
Group group = groups.remove(groupId);
if (group != null) {
switch (group.type()) {
case CONSUMER:
ConsumerGroup consumerGroup = (ConsumerGroup) group;
metrics.onConsumerGroupStateTransition(consumerGroup.state(), null);
break;
case CLASSIC:
ClassicGroup classicGroup = (ClassicGroup) group;
metrics.onClassicGroupStateTransition(classicGroup.currentState(), null);
break;
default:
log.warn("Removed group {} with an unknown group type {}.", groupId, group.type());
break;
}
}
}
/**
* Throws an InvalidRequestException if the value is non-null and empty.
* A string containing only whitespaces is also considered empty.
*
* @param value The value.
* @param error The error message.
* @throws InvalidRequestException
*/
private void throwIfEmptyString(
String value,
String error
) throws InvalidRequestException {
if (value != null && value.trim().isEmpty()) {
throw new InvalidRequestException(error);
}
}
/**
* Throws an InvalidRequestException if the value is non-null.
*
* @param value The value.
* @param error The error message.
* @throws InvalidRequestException
*/
private void throwIfNotNull(
Object value,
String error
) throws InvalidRequestException {
if (value != null) {
throw new InvalidRequestException(error);
}
}
/**
* Throws an InvalidRequestException if the value is null.
*
* @param value The value.
* @param error The error message.
* @throws InvalidRequestException
*/
private void throwIfNull(
Object value,
String error
) throws InvalidRequestException {
if (value == null) {
throw new InvalidRequestException(error);
}
}
/**
* Validates the request.
*
* @param request The request to validate.
*
* @throws InvalidRequestException if the request is not valid.
* @throws UnsupportedAssignorException if the assignor is not supported.
*/
private void throwIfConsumerGroupHeartbeatRequestIsInvalid(
ConsumerGroupHeartbeatRequestData request
) throws InvalidRequestException, UnsupportedAssignorException {
throwIfEmptyString(request.groupId(), "GroupId can't be empty.");
throwIfEmptyString(request.instanceId(), "InstanceId can't be empty.");
throwIfEmptyString(request.rackId(), "RackId can't be empty.");
if (request.memberEpoch() > 0 || request.memberEpoch() == LEAVE_GROUP_MEMBER_EPOCH) {
throwIfEmptyString(request.memberId(), "MemberId can't be empty.");
} else if (request.memberEpoch() == 0) {
if (request.rebalanceTimeoutMs() == -1) {
throw new InvalidRequestException("RebalanceTimeoutMs must be provided in first request.");
}
if (request.topicPartitions() == null || !request.topicPartitions().isEmpty()) {
throw new InvalidRequestException("TopicPartitions must be empty when (re-)joining.");
}
if (request.subscribedTopicNames() == null || request.subscribedTopicNames().isEmpty()) {