From 010a1f507dbbd412d51e0009eb303d8eb18b8b53 Mon Sep 17 00:00:00 2001 From: TheR1sing3un Date: Sat, 19 Aug 2023 23:59:25 +0800 Subject: [PATCH 01/10] feat(s3): add basic metadata type in Kafka on S3 architecture 1. add basic metadata type in Kafka on S3 architecture Signed-off-by: TheR1sing3un --- .../apache/kafka/common/protocol/ApiKeys.java | 10 +- .../common/message/CloseStreamRequest.json | 29 +++++ .../common/message/CloseStreamResponse.json | 26 ++++ .../common/message/CreateStreamRequest.json | 27 +++++ .../common/message/CreateStreamResponse.json | 26 ++++ .../common/message/DeleteStreamRequest.json | 27 +++++ .../common/message/DeleteStreamResponse.json | 26 ++++ .../common/message/OpenStreamRequest.json | 29 +++++ .../common/message/OpenStreamResponse.json | 26 ++++ .../org/apache/kafka/message/EntityType.java | 6 +- .../controller/stream/RangeMetadata.java | 52 ++++++++ .../stream/StreamControlManager.java | 62 ++++++++++ .../stream/s3/ObjectStreamIndex.java | 54 +++++++++ .../kafka/controller/stream/s3/S3Object.java | 111 ++++++++++++++++++ .../controller/stream/s3/S3ObjectManager.java | 30 +++++ .../controller/stream/s3/S3ObjectType.java | 45 +++++++ .../controller/stream/s3/StreamObject.java | 63 ++++++++++ .../kafka/controller/stream/s3/WALObject.java | 59 ++++++++++ .../image/BrokerStreamMetadataDelta.java | 44 +++++++ .../image/BrokerStreamMetadataImage.java | 64 ++++++++++ .../org/apache/kafka/image/MetadataDelta.java | 23 +++- .../org/apache/kafka/image/MetadataImage.java | 24 +++- .../kafka/image/StreamMetadataDelta.java | 69 +++++++++++ .../kafka/image/StreamMetadataImage.java | 93 +++++++++++++++ .../kafka/image/StreamsMetadataDelta.java | 88 ++++++++++++++ .../kafka/image/StreamsMetadataImage.java | 70 +++++++++++ .../common/metadata/RangeRecord.json | 60 ++++++++++ .../common/metadata/RemoveRangeRecord.json | 36 ++++++ .../metadata/RemoveStreamObjectRecord.json | 36 ++++++ .../common/metadata/RemoveStreamRecord.json | 30 +++++ .../metadata/RemoveWALObjectRecord.json | 36 ++++++ .../common/metadata/StreamObjectRecord.json | 78 ++++++++++++ .../common/metadata/StreamRecord.json | 42 +++++++ .../common/metadata/WALObjectRecord.json | 92 +++++++++++++++ 34 files changed, 1585 insertions(+), 8 deletions(-) create mode 100644 clients/src/main/resources/common/message/CloseStreamRequest.json create mode 100644 clients/src/main/resources/common/message/CloseStreamResponse.json create mode 100644 clients/src/main/resources/common/message/CreateStreamRequest.json create mode 100644 clients/src/main/resources/common/message/CreateStreamResponse.json create mode 100644 clients/src/main/resources/common/message/DeleteStreamRequest.json create mode 100644 clients/src/main/resources/common/message/DeleteStreamResponse.json create mode 100644 clients/src/main/resources/common/message/OpenStreamRequest.json create mode 100644 clients/src/main/resources/common/message/OpenStreamResponse.json create mode 100644 metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java create mode 100644 metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java create mode 100644 metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java create mode 100644 metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java create mode 100644 metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectManager.java create mode 100644 metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java create mode 100644 metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java create mode 100644 metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java create mode 100644 metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java create mode 100644 metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java create mode 100644 metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java create mode 100644 metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java create mode 100644 metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java create mode 100644 metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java create mode 100644 metadata/src/main/resources/common/metadata/RangeRecord.json create mode 100644 metadata/src/main/resources/common/metadata/RemoveRangeRecord.json create mode 100644 metadata/src/main/resources/common/metadata/RemoveStreamObjectRecord.json create mode 100644 metadata/src/main/resources/common/metadata/RemoveStreamRecord.json create mode 100644 metadata/src/main/resources/common/metadata/RemoveWALObjectRecord.json create mode 100644 metadata/src/main/resources/common/metadata/StreamObjectRecord.json create mode 100644 metadata/src/main/resources/common/metadata/StreamRecord.json create mode 100644 metadata/src/main/resources/common/metadata/WALObjectRecord.json diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index f727bd18e5..8672e2e785 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -108,7 +108,15 @@ public enum ApiKeys { UNREGISTER_BROKER(ApiMessageType.UNREGISTER_BROKER, false, RecordBatch.MAGIC_VALUE_V0, true), DESCRIBE_TRANSACTIONS(ApiMessageType.DESCRIBE_TRANSACTIONS), LIST_TRANSACTIONS(ApiMessageType.LIST_TRANSACTIONS), - ALLOCATE_PRODUCER_IDS(ApiMessageType.ALLOCATE_PRODUCER_IDS, true, true); + ALLOCATE_PRODUCER_IDS(ApiMessageType.ALLOCATE_PRODUCER_IDS, true, true), + + // stream start + + CREATE_STREAM(ApiMessageType.CREATE_STREAM, false, true), + DELETE_STREAM(ApiMessageType.DELETE_STREAM, false, true), + OPEN_STREAM(ApiMessageType.OPEN_STREAM, false, true), + CLOSE_STREAM(ApiMessageType.CLOSE_STREAM, false, true); + // stream end private static final Map> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); diff --git a/clients/src/main/resources/common/message/CloseStreamRequest.json b/clients/src/main/resources/common/message/CloseStreamRequest.json new file mode 100644 index 0000000000..1cfee577b6 --- /dev/null +++ b/clients/src/main/resources/common/message/CloseStreamRequest.json @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 70, + "type": "request", + "listeners": ["controller", "broker"], + "name": "CloseStreamRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "BrokerId", "type": "int32", "versions": "0+", "entityType": "brokerId", + "about": "The ID of the requesting broker" }, + { "name": "StreamId", "type": "int64", "versions": "0+", "entityType": "streamId", + "about": "The id of the requesting stream" } + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/CloseStreamResponse.json b/clients/src/main/resources/common/message/CloseStreamResponse.json new file mode 100644 index 0000000000..6d954b0e9e --- /dev/null +++ b/clients/src/main/resources/common/message/CloseStreamResponse.json @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 70, + "type": "response", + "name": "CloseStreamResponse", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The top level response error code" } + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/CreateStreamRequest.json b/clients/src/main/resources/common/message/CreateStreamRequest.json new file mode 100644 index 0000000000..4a086c26dc --- /dev/null +++ b/clients/src/main/resources/common/message/CreateStreamRequest.json @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 68, + "type": "request", + "listeners": ["controller", "broker"], + "name": "CreateStreamRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "StreamId", "type": "int64", "versions": "0+", "entityType": "streamId", + "about": "The id of the requesting stream" } + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/CreateStreamResponse.json b/clients/src/main/resources/common/message/CreateStreamResponse.json new file mode 100644 index 0000000000..65d50ec3b2 --- /dev/null +++ b/clients/src/main/resources/common/message/CreateStreamResponse.json @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 68, + "type": "response", + "name": "CreateStreamResponse", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The top level response error code" } + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/DeleteStreamRequest.json b/clients/src/main/resources/common/message/DeleteStreamRequest.json new file mode 100644 index 0000000000..5d70bc6428 --- /dev/null +++ b/clients/src/main/resources/common/message/DeleteStreamRequest.json @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 71, + "type": "request", + "listeners": ["controller", "broker"], + "name": "DeleteStreamRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "StreamId", "type": "int64", "versions": "0+", "entityType": "streamId", + "about": "The id of the requesting stream" } + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/DeleteStreamResponse.json b/clients/src/main/resources/common/message/DeleteStreamResponse.json new file mode 100644 index 0000000000..1cd1e5f570 --- /dev/null +++ b/clients/src/main/resources/common/message/DeleteStreamResponse.json @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 71, + "type": "response", + "name": "DeleteStreamResponse", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The top level response error code" } + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/OpenStreamRequest.json b/clients/src/main/resources/common/message/OpenStreamRequest.json new file mode 100644 index 0000000000..28c8f8e3a3 --- /dev/null +++ b/clients/src/main/resources/common/message/OpenStreamRequest.json @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 69, + "type": "request", + "listeners": ["controller", "broker"], + "name": "OpenStreamRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "BrokerId", "type": "int32", "versions": "0+", "entityType": "brokerId", + "about": "The ID of the requesting broker" }, + { "name": "StreamId", "type": "int64", "versions": "0+", "entityType": "streamId", + "about": "The id of the requesting stream" } + ] +} \ No newline at end of file diff --git a/clients/src/main/resources/common/message/OpenStreamResponse.json b/clients/src/main/resources/common/message/OpenStreamResponse.json new file mode 100644 index 0000000000..75175ea985 --- /dev/null +++ b/clients/src/main/resources/common/message/OpenStreamResponse.json @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 69, + "type": "response", + "name": "OpenStreamResponse", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The top level response error code" } + ] +} \ No newline at end of file diff --git a/generator/src/main/java/org/apache/kafka/message/EntityType.java b/generator/src/main/java/org/apache/kafka/message/EntityType.java index 225c987873..d2123a6874 100644 --- a/generator/src/main/java/org/apache/kafka/message/EntityType.java +++ b/generator/src/main/java/org/apache/kafka/message/EntityType.java @@ -18,6 +18,7 @@ package org.apache.kafka.message; import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.kafka.message.FieldType.Int64FieldType; public enum EntityType { @JsonProperty("unknown") @@ -36,7 +37,10 @@ public enum EntityType { TOPIC_NAME(FieldType.StringFieldType.INSTANCE), @JsonProperty("brokerId") - BROKER_ID(FieldType.Int32FieldType.INSTANCE); + BROKER_ID(FieldType.Int32FieldType.INSTANCE), + + @JsonProperty("streamId") + STREAM_ID(Int64FieldType.INSTANCE); private final FieldType baseType; diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java b/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java new file mode 100644 index 0000000000..bf3e249085 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller.stream; + +import java.util.Optional; + +public class RangeMetadata implements Comparable { + private Integer epoch; + private Integer rangeIndex; + private Long startOffset; + private Optional endOffset; + private Integer brokerId; + @Override + public int compareTo(RangeMetadata o) { + return this.rangeIndex.compareTo(o.rangeIndex); + } + + public Integer getEpoch() { + return epoch; + } + + public Integer getRangeIndex() { + return rangeIndex; + } + + public Long getStartOffset() { + return startOffset; + } + + public Optional getEndOffset() { + return endOffset; + } + + public Integer getBrokerId() { + return brokerId; + } +} diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java new file mode 100644 index 0000000000..ce8fea1abf --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller.stream; + +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.controller.stream.s3.StreamObject; +import org.apache.kafka.controller.stream.s3.WALObject; +import org.apache.kafka.timeline.SnapshotRegistry; +import org.apache.kafka.timeline.TimelineHashMap; +import org.apache.kafka.timeline.TimelineHashSet; +import org.slf4j.Logger; + +public class StreamControlManager { + + class StreamMetadata { + private Long streamId; + private Integer epoch; + private Long startOffset; + private TimelineHashSet ranges; + private TimelineHashSet streamObjects; + } + + class BrokerStreamMetadata { + private Integer brokerId; + private TimelineHashSet walObjects; + } + + private final SnapshotRegistry snapshotRegistry; + + private final Logger log; + + private final TimelineHashMap streamsMetadata; + + private final TimelineHashMap brokersMetadata; + + public StreamControlManager( + SnapshotRegistry snapshotRegistry, + LogContext logContext) { + this.snapshotRegistry = snapshotRegistry; + this.log = logContext.logger(StreamControlManager.class); + this.streamsMetadata = new TimelineHashMap<>(snapshotRegistry, 0); + this.brokersMetadata = new TimelineHashMap<>(snapshotRegistry, 0); + } + + + +} diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java new file mode 100644 index 0000000000..3ea742e389 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller.stream.s3; + +/** + * ObjectStreamIndex is the index of a stream range in a WAL object or STREAM object. + */ +public class ObjectStreamIndex implements Comparable { + + private final Long streamId; + + private final Long startOffset; + + private final Long endOffset; + + public ObjectStreamIndex(Long streamId, Long startOffset, Long endOffset) { + this.streamId = streamId; + this.startOffset = startOffset; + this.endOffset = endOffset; + } + + public Long getStreamId() { + return streamId; + } + + public Long getStartOffset() { + return startOffset; + } + + public Long getEndOffset() { + return endOffset; + } + + @Override + public int compareTo(ObjectStreamIndex o) { + int res = this.streamId.compareTo(o.streamId); + return res == 0 ? this.startOffset.compareTo(o.startOffset) : res; + } +} diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java new file mode 100644 index 0000000000..07708d4669 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller.stream.s3; + +import java.util.Optional; + +/** + * S3Object is the base class of object in S3. + * Manages the lifecycle of S3Object. + */ +public abstract class S3Object implements Comparable { + + protected final Long objectId; + + protected Optional objectSize = Optional.empty(); + + protected Optional objectAddress = Optional.empty(); + + protected Optional applyTimeInMs = Optional.empty(); + + protected Optional createTimeInMs = Optional.empty(); + + protected Optional destroyTimeInMs = Optional.empty(); + + protected ObjectState objectState = ObjectState.UNINITIALIZED; + + protected S3ObjectType objectType = S3ObjectType.UNKNOWN; + + protected S3Object(final Long objectId) { + this.objectId = objectId; + } + + public void onApply() { + if (this.objectState != ObjectState.UNINITIALIZED) { + throw new IllegalStateException("Object is not in UNINITIALIZED state"); + } + this.objectState = ObjectState.APPLIED; + this.applyTimeInMs = Optional.of(System.currentTimeMillis()); + } + + public void onCreate(S3ObjectCreateContext createContext) { + // TODO: decide fetch object metadata from S3 or let broker send it to controller + if (this.objectState != ObjectState.APPLIED) { + throw new IllegalStateException("Object is not in APPLIED state"); + } + this.objectState = ObjectState.CREATED; + this.createTimeInMs = Optional.of(createContext.createTimeInMs); + this.objectSize = Optional.of(createContext.objectSize); + this.objectAddress = Optional.of(createContext.objectAddress); + this.objectType = createContext.objectType; + } + + public void onDestroy() { + if (this.objectState != ObjectState.CREATED) { + throw new IllegalStateException("Object is not in CREATED state"); + } + S3ObjectManager.destroy(this, () -> { + this.objectState = ObjectState.DESTROYED; + this.destroyTimeInMs = Optional.of(System.currentTimeMillis()); + }); + } + + public S3ObjectType getObjectType() { + return objectType; + } + + enum ObjectState { + UNINITIALIZED, + APPLIED, + CREATED, + MARK_DESTROYED, + DESTROYED; + } + + public class S3ObjectCreateContext { + private final Long createTimeInMs; + private final Long objectSize; + private final String objectAddress; + private final S3ObjectType objectType; + public S3ObjectCreateContext( + final Long createTimeInMs, + final Long objectSize, + final String objectAddress, + final S3ObjectType objectType) { + this.createTimeInMs = createTimeInMs; + this.objectSize = objectSize; + this.objectAddress = objectAddress; + this.objectType = objectType; + } + } + + @Override + public int compareTo(S3Object o) { + return this.objectId.compareTo(o.objectId); + } +} diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectManager.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectManager.java new file mode 100644 index 0000000000..ea060c935e --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectManager.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller.stream.s3; + + +import org.apache.kafka.controller.stream.s3.S3Object; + +public class S3ObjectManager { + + public static boolean destroy(S3Object object, Runnable successCallback) { + // TODO: trigger delete object from S3 + return false; + } + +} diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java new file mode 100644 index 0000000000..eea9dc4394 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller.stream.s3; + +public enum S3ObjectType { + /** + * WAL object with loose records + */ + WAL_LOOSE, + + /** + * WAL object with minor compaction records + */ + WAL_MINOR, + + /** + * WAL object with major compaction records + */ + WAL_MAJOR, + + /** + * STREAM object with stream records of one stream + */ + STREAM, + + /** + * UNKNOWN object type + */ + UNKNOWN; +} diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java new file mode 100644 index 0000000000..e178275290 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller.stream.s3; + +public class StreamObject extends S3Object { + + private ObjectStreamIndex streamIndex; + + public StreamObject(final Long objectId) { + super(objectId); + } + + @Override + public void onCreate(S3ObjectCreateContext createContext) { + super.onCreate(createContext); + this.streamIndex = ((StreamObjectCreateContext) createContext).streamIndex; + } + + @Override + public int compareTo(S3Object o) { + if (!(o instanceof StreamObject)) { + throw new IllegalArgumentException("Cannot compare StreamObject with non-StreamObject"); + } + StreamObject streamObject = (StreamObject) o; + // order by streamId first, then startOffset + int res = this.streamIndex.getStreamId().compareTo(streamObject.streamIndex.getStreamId()); + return res == 0 ? this.streamIndex.getStartOffset().compareTo(streamObject.streamIndex.getStartOffset()) : res; + } + + class StreamObjectCreateContext extends S3ObjectCreateContext { + + private final ObjectStreamIndex streamIndex; + + public StreamObjectCreateContext( + final Long createTimeInMs, + final Long objectSize, + final String objectAddress, + final S3ObjectType objectType, + final ObjectStreamIndex streamIndex) { + super(createTimeInMs, objectSize, objectAddress, objectType); + this.streamIndex = streamIndex; + } + } + + public ObjectStreamIndex getStreamIndex() { + return streamIndex; + } +} diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java new file mode 100644 index 0000000000..dfd4ec6484 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller.stream.s3; + +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class WALObject extends S3Object { + private Integer brokerId; + private Map streamsIndex; + + private S3ObjectType objectType = S3ObjectType.UNKNOWN; + + public WALObject(Long objectId) { + super(objectId); + } + + @Override + public void onCreate(S3ObjectCreateContext createContext) { + super.onCreate(createContext); + WALObjectCreateContext walCreateContext = (WALObjectCreateContext) createContext; + this.streamsIndex = walCreateContext.streamIndexList.stream().collect(Collectors.toMap(ObjectStreamIndex::getStreamId, index -> index)); + this.brokerId = walCreateContext.brokerId; + } + + class WALObjectCreateContext extends S3ObjectCreateContext { + + private final List streamIndexList; + private final Integer brokerId; + + public WALObjectCreateContext( + final Long createTimeInMs, + final Long objectSize, + final String objectAddress, + final S3ObjectType objectType, + final List streamIndexList, + final Integer brokerId) { + super(createTimeInMs, objectSize, objectAddress, objectType); + this.streamIndexList = streamIndexList; + this.brokerId = brokerId; + } + } +} diff --git a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java new file mode 100644 index 0000000000..9aadbe46db --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image; + +import java.util.HashSet; +import java.util.Set; +import org.apache.kafka.controller.stream.s3.WALObject; + +public class BrokerStreamMetadataDelta { + + private final BrokerStreamMetadataImage image; + private final Set changedWALObjects = new HashSet<>(); + + private final Set removedWALObjects = new HashSet<>(); + + public BrokerStreamMetadataDelta(BrokerStreamMetadataImage image) { + this.image = image; + } + + public BrokerStreamMetadataImage apply() { + Set newWALObjects = new HashSet<>(image.getWalObjects()); + // remove all removed WAL objects + newWALObjects.removeAll(removedWALObjects); + // add all changed WAL objects + newWALObjects.addAll(changedWALObjects); + return new BrokerStreamMetadataImage(image.getBrokerId(), newWALObjects); + } + +} diff --git a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java new file mode 100644 index 0000000000..179c243760 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.kafka.image; + +import java.util.Objects; +import java.util.Set; +import org.apache.kafka.controller.stream.s3.WALObject; +import org.apache.kafka.image.writer.ImageWriter; +import org.apache.kafka.image.writer.ImageWriterOptions; + +public class BrokerStreamMetadataImage { + private final Integer brokerId; + private final Set walObjects; + + public BrokerStreamMetadataImage(Integer brokerId, Set walObjects) { + this.brokerId = brokerId; + this.walObjects = walObjects; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BrokerStreamMetadataImage that = (BrokerStreamMetadataImage) o; + return Objects.equals(brokerId, that.brokerId) && Objects.equals(walObjects, that.walObjects); + } + + @Override + public int hashCode() { + return Objects.hash(brokerId, walObjects); + } + + public void write(ImageWriter writer, ImageWriterOptions options) { + + } + + public Set getWalObjects() { + return walObjects; + } + + public Integer getBrokerId() { + return brokerId; + } +} diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java index ab4fd68f41..00ba907259 100644 --- a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java @@ -72,6 +72,8 @@ public MetadataDelta build() { private AclsDelta aclsDelta = null; + private StreamsMetadataDelta streamsMetadataDelta = null; + public MetadataDelta(MetadataImage image) { this.image = image; } @@ -145,6 +147,17 @@ public AclsDelta getOrCreateAclsDelta() { return aclsDelta; } + public StreamsMetadataDelta streamMetadataDelta() { + return streamsMetadataDelta; + } + + public StreamsMetadataDelta getOrCreateStreamsMetadataDelta() { + if (streamsMetadataDelta == null) { + streamsMetadataDelta = new StreamsMetadataDelta(image.streamsMetadata()); + } + return streamsMetadataDelta; + } + public Optional metadataVersionChanged() { if (featuresDelta == null) { return Optional.empty(); @@ -341,6 +354,12 @@ public MetadataImage apply(MetadataProvenance provenance) { } else { newAcls = aclsDelta.apply(); } + StreamsMetadataImage newStreamMetadata; + if (streamsMetadataDelta == null) { + newStreamMetadata = image.streamsMetadata(); + } else { + newStreamMetadata = streamsMetadataDelta.apply(); + } return new MetadataImage( provenance, newFeatures, @@ -349,7 +368,8 @@ public MetadataImage apply(MetadataProvenance provenance) { newConfigs, newClientQuotas, newProducerIds, - newAcls + newAcls, + newStreamMetadata ); } @@ -363,6 +383,7 @@ public String toString() { ", clientQuotasDelta=" + clientQuotasDelta + ", producerIdsDelta=" + producerIdsDelta + ", aclsDelta=" + aclsDelta + + ", streamMetadataDelta=" + streamsMetadataDelta + ')'; } } diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java index 2202b4fe2f..e0795bc51e 100644 --- a/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java @@ -38,7 +38,8 @@ public final class MetadataImage { ConfigurationsImage.EMPTY, ClientQuotasImage.EMPTY, ProducerIdsImage.EMPTY, - AclsImage.EMPTY); + AclsImage.EMPTY, + StreamsMetadataImage.EMPTY); private final MetadataProvenance provenance; @@ -56,6 +57,8 @@ public final class MetadataImage { private final AclsImage acls; + private final StreamsMetadataImage streamMetadata; + public MetadataImage( MetadataProvenance provenance, FeaturesImage features, @@ -64,7 +67,8 @@ public MetadataImage( ConfigurationsImage configs, ClientQuotasImage clientQuotas, ProducerIdsImage producerIds, - AclsImage acls + AclsImage acls, + StreamsMetadataImage streamMetadata ) { this.provenance = provenance; this.features = features; @@ -74,6 +78,7 @@ public MetadataImage( this.clientQuotas = clientQuotas; this.producerIds = producerIds; this.acls = acls; + this.streamMetadata = streamMetadata; } public boolean isEmpty() { @@ -83,7 +88,8 @@ public boolean isEmpty() { configs.isEmpty() && clientQuotas.isEmpty() && producerIds.isEmpty() && - acls.isEmpty(); + acls.isEmpty() && + streamMetadata.isEmpty(); } public MetadataProvenance provenance() { @@ -126,6 +132,10 @@ public AclsImage acls() { return acls; } + public StreamsMetadataImage streamsMetadata() { + return streamMetadata; + } + public void write(ImageWriter writer, ImageWriterOptions options) { // Features should be written out first so we can include the metadata.version at the beginning of the // snapshot @@ -136,6 +146,7 @@ public void write(ImageWriter writer, ImageWriterOptions options) { clientQuotas.write(writer, options); producerIds.write(writer, options); acls.write(writer, options); + streamMetadata.write(writer, options); writer.close(true); } @@ -150,7 +161,8 @@ public boolean equals(Object o) { configs.equals(other.configs) && clientQuotas.equals(other.clientQuotas) && producerIds.equals(other.producerIds) && - acls.equals(other.acls); + acls.equals(other.acls) && + streamMetadata.equals(other.streamMetadata); } @Override @@ -163,7 +175,8 @@ public int hashCode() { configs, clientQuotas, producerIds, - acls); + acls, + streamMetadata); } @Override @@ -177,6 +190,7 @@ public String toString() { ", clientQuotas=" + clientQuotas + ", producerIdsImage=" + producerIds + ", acls=" + acls + + ", streamMetadata=" + streamMetadata + ")"; } } diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java new file mode 100644 index 0000000000..316605fc8c --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import org.apache.kafka.controller.stream.RangeMetadata; +import org.apache.kafka.controller.stream.s3.StreamObject; + +public class StreamMetadataDelta { + private final StreamMetadataImage image; + + private Integer newEpoch; + + private final Map changedRanges = new HashMap<>(); + private final Set removedRanges = new HashSet<>(); + private final Set changedStreamObjects = new HashSet<>(); + private final Set removedStreamObjects = new HashSet<>(); + + public StreamMetadataDelta(StreamMetadataImage image) { + this.image = image; + this.newEpoch = image.getEpoch(); + } + + public StreamMetadataImage apply() { + Map newRanges = new HashMap<>(image.getRanges().size()); + // apply the delta changes of old ranges since the last image + image.getRanges().forEach((rangeIndex, range) -> { + RangeMetadata changedRange = changedRanges.get(rangeIndex); + if (changedRange == null) { + // no change, check if deleted + if (!removedRanges.contains(rangeIndex)) { + newRanges.put(rangeIndex, range); + } + } else { + // changed, apply the delta + newRanges.put(rangeIndex, changedRange); + } + }); + // apply the new created ranges + changedRanges.entrySet().stream().filter(entry -> !newRanges.containsKey(entry.getKey())) + .forEach(entry -> newRanges.put(entry.getKey(), entry.getValue())); + + Set newStreamObjects = new HashSet<>(image.getStreams()); + // remove all removed stream-objects + newStreamObjects.removeAll(removedStreamObjects); + // add all changed stream-objects + newStreamObjects.addAll(changedStreamObjects); + return new StreamMetadataImage(image.getStreamId(), newEpoch, image.getStartOffset(), newRanges, newStreamObjects); + } + +} diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java new file mode 100644 index 0000000000..9c608117dc --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image; + +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import org.apache.kafka.controller.stream.RangeMetadata; +import org.apache.kafka.controller.stream.s3.StreamObject; +import org.apache.kafka.image.writer.ImageWriter; +import org.apache.kafka.image.writer.ImageWriterOptions; + +public class StreamMetadataImage { + private final Long streamId; + + private final Integer epoch; + + private final Long startOffset; + + private final Map ranges; + + private final Set streams; + + public StreamMetadataImage( + Long streamId, + Integer epoch, + Long startOffset, + Map ranges, + Set streams) { + this.streamId = streamId; + this.epoch = epoch; + this.startOffset = startOffset; + this.ranges = ranges; + this.streams = streams; + } + + public void write(ImageWriter writer, ImageWriterOptions options) { + + } + + public Map getRanges() { + return ranges; + } + + public Set getStreams() { + return streams; + } + + public Integer getEpoch() { + return epoch; + } + + public Long getStartOffset() { + return startOffset; + } + + public Long getStreamId() { + return streamId; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + StreamMetadataImage that = (StreamMetadataImage) o; + return Objects.equals(streamId, that.streamId) && Objects.equals(epoch, that.epoch) && Objects.equals(startOffset, + that.startOffset) && Objects.equals(ranges, that.ranges) && Objects.equals(streams, that.streams); + } + + @Override + public int hashCode() { + return Objects.hash(streamId, epoch, startOffset, ranges, streams); + } +} diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java new file mode 100644 index 0000000000..eb4b86b6d5 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +public final class StreamsMetadataDelta { + + private final StreamsMetadataImage image; + + private final Map changedStreams = new HashMap<>(); + + private final Map changedBrokers = new HashMap<>(); + + private final Set deletedStreams = new HashSet<>(); + private final Set deletedBrokers = new HashSet<>(); + + public StreamsMetadataDelta(StreamsMetadataImage image) { + this.image = image; + } + + StreamsMetadataImage apply() { + Map newStreams = new HashMap<>(image.getStreamsMetadata().size()); + Map newBrokerStreams = new HashMap<>(image.getBrokerStreamsMetadata().size()); + // apply the delta changes of old streams since the last image + image.getStreamsMetadata().forEach((streamId, streamMetadataImage) -> { + StreamMetadataDelta delta = changedStreams.get(streamId); + if (delta == null) { + // no change, check if deleted + if (!deletedStreams.contains(streamId)) { + newStreams.put(streamId, streamMetadataImage); + } + } else { + // changed, apply the delta + StreamMetadataImage newStreamMetadataImage = delta.apply(); + newStreams.put(streamId, newStreamMetadataImage); + } + }); + // apply the new created streams + changedStreams.entrySet().stream().filter(entry -> !newStreams.containsKey(entry.getKey())) + .forEach(entry -> { + StreamMetadataImage newStreamMetadataImage = entry.getValue().apply(); + newStreams.put(entry.getKey(), newStreamMetadataImage); + }); + + // apply the delta changes of old brokers since the last image + image.getBrokerStreamsMetadata().forEach((brokerId, brokerStreamMetadataImage) -> { + BrokerStreamMetadataDelta delta = changedBrokers.get(brokerId); + if (delta == null) { + // no change, check if deleted + if (!deletedBrokers.contains(brokerId)) { + newBrokerStreams.put(brokerId, brokerStreamMetadataImage); + } + } else { + // changed, apply the delta + BrokerStreamMetadataImage newBrokerStreamMetadataImage = delta.apply(); + newBrokerStreams.put(brokerId, newBrokerStreamMetadataImage); + } + }); + // apply the new created streams + changedBrokers.entrySet().stream().filter(entry -> !newBrokerStreams.containsKey(entry.getKey())) + .forEach(entry -> { + BrokerStreamMetadataImage newBrokerStreamMetadataImage = entry.getValue().apply(); + newBrokerStreams.put(entry.getKey(), newBrokerStreamMetadataImage); + }); + + return new StreamsMetadataImage(newStreams, newBrokerStreams); + } + +} diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java new file mode 100644 index 0000000000..98a0569975 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import org.apache.kafka.image.writer.ImageWriter; +import org.apache.kafka.image.writer.ImageWriterOptions; + +public final class StreamsMetadataImage { + + public static final StreamsMetadataImage EMPTY = + new StreamsMetadataImage(Collections.emptyMap(), Collections.emptyMap()); + + private final Map streamsMetadata; + + private final Map brokerStreamsMetadata; + + public StreamsMetadataImage( + Map streamsMetadata, + Map brokerStreamsMetadata) { + this.streamsMetadata = streamsMetadata; + this.brokerStreamsMetadata = brokerStreamsMetadata; + } + + + boolean isEmpty() { + return this.brokerStreamsMetadata.isEmpty() && this.streamsMetadata.isEmpty(); + } + + public void write(ImageWriter writer, ImageWriterOptions options) { + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof StreamsMetadataImage)) return false; + StreamsMetadataImage other = (StreamsMetadataImage) obj; + return this.streamsMetadata.equals(other.streamsMetadata) + && this.brokerStreamsMetadata.equals(other.brokerStreamsMetadata); + } + + @Override + public int hashCode() { + return Objects.hash(streamsMetadata, brokerStreamsMetadata); + } + + public Map getBrokerStreamsMetadata() { + return brokerStreamsMetadata; + } + + public Map getStreamsMetadata() { + return streamsMetadata; + } +} diff --git a/metadata/src/main/resources/common/metadata/RangeRecord.json b/metadata/src/main/resources/common/metadata/RangeRecord.json new file mode 100644 index 0000000000..350d4c9a26 --- /dev/null +++ b/metadata/src/main/resources/common/metadata/RangeRecord.json @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 25, + "type": "metadata", + "name": "RangeRecord", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { + "name": "StreamId", + "type": "int64", + "versions": "0+", + "about": "The Stream ID of the range" + }, + { + "name": "Epoch", + "type": "int32", + "versions": "0+", + "about": "The epoch of the range" + }, + { + "name": "RangeIndex", + "type": "int32", + "versions": "0+", + "about": "The index of the range" + }, + { + "name": "StartOffset", + "type": "int64", + "versions": "0+", + "about": "The start offset of the range" + }, + { + "name": "EndOffset", + "type": "int64", + "versions": "0+", + "about": "The end offset of the range" + }, + { + "name": "BrokerId", + "type": "int32", + "versions": "0+", + "about": "The Broker which created this range" + } + ] +} \ No newline at end of file diff --git a/metadata/src/main/resources/common/metadata/RemoveRangeRecord.json b/metadata/src/main/resources/common/metadata/RemoveRangeRecord.json new file mode 100644 index 0000000000..dce6c5243c --- /dev/null +++ b/metadata/src/main/resources/common/metadata/RemoveRangeRecord.json @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 26, + "type": "metadata", + "name": "RemoveRangeRecord", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { + "name": "StreamId", + "type": "int64", + "versions": "0+", + "about": "The Stream ID of the range" + }, + { + "name": "RangeIndex", + "type": "int32", + "versions": "0+", + "about": "The index of the range" + } + ] +} \ No newline at end of file diff --git a/metadata/src/main/resources/common/metadata/RemoveStreamObjectRecord.json b/metadata/src/main/resources/common/metadata/RemoveStreamObjectRecord.json new file mode 100644 index 0000000000..746eeb47f7 --- /dev/null +++ b/metadata/src/main/resources/common/metadata/RemoveStreamObjectRecord.json @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 28, + "type": "metadata", + "name": "RemoveStreamObjectRecord", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { + "name": "StreamId", + "type": "int64", + "versions": "0+", + "about": "The Stream ID of the stream in this object" + }, + { + "name": "ObjectId", + "type": "int64", + "versions": "0+", + "about": "The object id of this object" + } + ] +} \ No newline at end of file diff --git a/metadata/src/main/resources/common/metadata/RemoveStreamRecord.json b/metadata/src/main/resources/common/metadata/RemoveStreamRecord.json new file mode 100644 index 0000000000..5befb960de --- /dev/null +++ b/metadata/src/main/resources/common/metadata/RemoveStreamRecord.json @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 23, + "type": "metadata", + "name": "RemoveStreamRecord", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { + "name": "StreamId", + "type": "int64", + "versions": "0+", + "about": "The ID of the stream to be removed" + } + ] +} diff --git a/metadata/src/main/resources/common/metadata/RemoveWALObjectRecord.json b/metadata/src/main/resources/common/metadata/RemoveWALObjectRecord.json new file mode 100644 index 0000000000..ae126bd42e --- /dev/null +++ b/metadata/src/main/resources/common/metadata/RemoveWALObjectRecord.json @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 30, + "type": "metadata", + "name": "RemoveWALObjectRecord", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { + "name": "BrokerId", + "type": "int32", + "versions": "0+", + "about": "The broker which owns the object" + }, + { + "name": "ObjectId", + "type": "int64", + "versions": "0+", + "about": "The object id of this object" + } + ] +} \ No newline at end of file diff --git a/metadata/src/main/resources/common/metadata/StreamObjectRecord.json b/metadata/src/main/resources/common/metadata/StreamObjectRecord.json new file mode 100644 index 0000000000..492a5e1eed --- /dev/null +++ b/metadata/src/main/resources/common/metadata/StreamObjectRecord.json @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 27, + "type": "metadata", + "name": "StreamObjectRecord", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { + "name": "StreamId", + "type": "int64", + "versions": "0+", + "about": "The Stream ID of the stream in this object" + }, + { + "name": "StartOffset", + "type": "int64", + "versions": "0+", + "about": "The start offset of the stream in this object" + }, + { + "name": "EndOffset", + "type": "int64", + "versions": "0+", + "about": "The end offset of the stream in this object" + }, + { + "name": "ObjectId", + "type": "int64", + "versions": "0+", + "about": "The object id of the S3 object" + }, + { + "name": "ApplyTimeInMs", + "type": "int64", + "versions": "0+", + "about": "The object be applied timestamp" + }, + { + "name": "CreateTimeInMs", + "type": "int64", + "versions": "0+", + "about": "The object be created timestamp" + }, + { + "name": "DestroyTimeInMs", + "type": "int64", + "versions": "0+", + "about": "The object be destroyed timestamp" + }, + { + "name": "ObjectState", + "type": "int8", + "versions": "0+", + "about": "The object state" + }, + { + "name": "ObjectType", + "type": "int8", + "versions": "0+", + "about": "The object type" + } + ] +} \ No newline at end of file diff --git a/metadata/src/main/resources/common/metadata/StreamRecord.json b/metadata/src/main/resources/common/metadata/StreamRecord.json new file mode 100644 index 0000000000..68138a1152 --- /dev/null +++ b/metadata/src/main/resources/common/metadata/StreamRecord.json @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 22, + "type": "metadata", + "name": "StreamRecord", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { + "name": "StreamId", + "type": "int64", + "versions": "0+", + "about": "The Stream ID" + }, + { + "name": "Epoch", + "type": "int32", + "versions": "0+", + "about": "The epoch" + }, + { + "name": "StartOffset", + "type": "int64", + "versions": "0+", + "about": "The start offset of the stream" + } + ] +} \ No newline at end of file diff --git a/metadata/src/main/resources/common/metadata/WALObjectRecord.json b/metadata/src/main/resources/common/metadata/WALObjectRecord.json new file mode 100644 index 0000000000..b422b9b1b5 --- /dev/null +++ b/metadata/src/main/resources/common/metadata/WALObjectRecord.json @@ -0,0 +1,92 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 29, + "type": "metadata", + "name": "WALObjectRecord", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { + "name": "BrokerId", + "type": "int32", + "versions": "0+", + "about": "The broker which owns the object" + }, + { + "name": "ObjectId", + "type": "int64", + "versions": "0+", + "about": "The object id of the S3 object" + }, + { + "name": "ApplyTimeInMs", + "type": "int64", + "versions": "0+", + "about": "The object be applied timestamp" + }, + { + "name": "CreateTimeInMs", + "type": "int64", + "versions": "0+", + "about": "The object be created timestamp" + }, + { + "name": "DestroyTimeInMs", + "type": "int64", + "versions": "0+", + "about": "The object be destroyed timestamp" + }, + { + "name": "ObjectState", + "type": "int8", + "versions": "0+", + "about": "The object state" + }, + { + "name": "ObjectType", + "type": "int8", + "versions": "0+", + "about": "The object type" + }, + { + "name": "StreamsIndex", + "type": "[]StreamIndex", + "versions": "0+", + "about": "The streams index in this object", + "fields": [ + { + "name": "StreamId", + "type": "int64", + "versions": "0+", + "about": "The Stream ID of the stream in this object" + }, + { + "name": "StartOffset", + "type": "int64", + "versions": "0+", + "about": "The start offset of the stream in this object" + }, + { + "name": "EndOffset", + "type": "int64", + "versions": "0+", + "about": "The end offset of the stream in this object" + } + ] + } + ] +} \ No newline at end of file From 22caeef0bb61ee59d5a664279c2d20da02981ca3 Mon Sep 17 00:00:00 2001 From: TheR1sing3un Date: Sun, 20 Aug 2023 00:51:33 +0800 Subject: [PATCH 02/10] feat(s3): support image::write function for Stream related images 1. support image::write function for Stream related images Signed-off-by: TheR1sing3un --- .../controller/stream/RangeMetadata.java | 13 ++++++++ .../stream/s3/ObjectStreamIndex.java | 9 ++++++ .../controller/stream/s3/StreamObject.java | 17 ++++++++++ .../kafka/controller/stream/s3/WALObject.java | 18 +++++++++++ .../image/BrokerStreamMetadataImage.java | 3 +- .../kafka/image/StreamMetadataImage.java | 10 +++++- .../kafka/image/StreamsMetadataImage.java | 2 ++ .../common/metadata/StreamObjectRecord.json | 6 ++++ .../common/metadata/WALObjectRecord.json | 6 ++++ .../apache/kafka/image/MetadataImageTest.java | 7 +++-- .../kafka/image/StreamsMetadataImageTest.java | 31 +++++++++++++++++++ 11 files changed, 118 insertions(+), 4 deletions(-) create mode 100644 metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java b/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java index bf3e249085..a3c310f6c5 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java @@ -18,8 +18,11 @@ package org.apache.kafka.controller.stream; import java.util.Optional; +import org.apache.kafka.common.metadata.RangeRecord; +import org.apache.kafka.server.common.ApiMessageAndVersion; public class RangeMetadata implements Comparable { + private Long streamId; private Integer epoch; private Integer rangeIndex; private Long startOffset; @@ -49,4 +52,14 @@ public Optional getEndOffset() { public Integer getBrokerId() { return brokerId; } + + public ApiMessageAndVersion toRecord() { + return new ApiMessageAndVersion(new RangeRecord() + .setStreamId(streamId) + .setEpoch(epoch) + .setBrokerId(brokerId) + .setRangeIndex(rangeIndex) + .setStartOffset(startOffset) + .setEndOffset(endOffset.get()), (short) 0); + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java index 3ea742e389..0e66db61a0 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java @@ -17,6 +17,8 @@ package org.apache.kafka.controller.stream.s3; +import org.apache.kafka.common.metadata.WALObjectRecord.StreamIndex; + /** * ObjectStreamIndex is the index of a stream range in a WAL object or STREAM object. */ @@ -51,4 +53,11 @@ public int compareTo(ObjectStreamIndex o) { int res = this.streamId.compareTo(o.streamId); return res == 0 ? this.startOffset.compareTo(o.startOffset) : res; } + + public StreamIndex toRecordStreamIndex() { + return new StreamIndex() + .setStreamId(streamId) + .setStartOffset(startOffset) + .setEndOffset(endOffset); + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java index e178275290..bbe0647321 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java @@ -17,6 +17,9 @@ package org.apache.kafka.controller.stream.s3; +import org.apache.kafka.common.metadata.StreamObjectRecord; +import org.apache.kafka.server.common.ApiMessageAndVersion; + public class StreamObject extends S3Object { private ObjectStreamIndex streamIndex; @@ -60,4 +63,18 @@ public StreamObjectCreateContext( public ObjectStreamIndex getStreamIndex() { return streamIndex; } + + public ApiMessageAndVersion toRecord() { + return new ApiMessageAndVersion(new StreamObjectRecord() + .setObjectId(objectId) + .setStreamId(streamIndex.getStreamId()) + .setObjectState((byte)objectState.ordinal()) + .setObjectType((byte)objectType.ordinal()) + .setApplyTimeInMs(applyTimeInMs.get()) + .setCreateTimeInMs(createTimeInMs.get()) + .setDestroyTimeInMs(destroyTimeInMs.get()) + .setObjectSize(objectSize.get()) + .setStartOffset(streamIndex.getStartOffset()) + .setEndOffset(streamIndex.getEndOffset()), (short) 0); + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java index dfd4ec6484..6f1fede45d 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java @@ -20,8 +20,11 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import org.apache.kafka.common.metadata.WALObjectRecord; +import org.apache.kafka.server.common.ApiMessageAndVersion; public class WALObject extends S3Object { + private Integer brokerId; private Map streamsIndex; @@ -56,4 +59,19 @@ public WALObjectCreateContext( this.brokerId = brokerId; } } + + public ApiMessageAndVersion toRecord() { + return new ApiMessageAndVersion(new WALObjectRecord() + .setObjectId(objectId) + .setObjectState((byte) objectState.ordinal()) + .setObjectType((byte) objectType.ordinal()) + .setApplyTimeInMs(applyTimeInMs.get()) + .setCreateTimeInMs(createTimeInMs.get()) + .setDestroyTimeInMs(destroyTimeInMs.get()) + .setObjectSize(objectSize.get()) + .setStreamsIndex( + streamsIndex.values().stream() + .map(ObjectStreamIndex::toRecordStreamIndex) + .collect(Collectors.toList())), (short) 0); + } } diff --git a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java index 179c243760..9c18123000 100644 --- a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java @@ -20,6 +20,7 @@ import java.util.Objects; import java.util.Set; +import org.apache.kafka.common.metadata.WALObjectRecord; import org.apache.kafka.controller.stream.s3.WALObject; import org.apache.kafka.image.writer.ImageWriter; import org.apache.kafka.image.writer.ImageWriterOptions; @@ -51,7 +52,7 @@ public int hashCode() { } public void write(ImageWriter writer, ImageWriterOptions options) { - + walObjects.forEach(walObject -> writer.write(walObject.toRecord())); } public Set getWalObjects() { diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java index 9c608117dc..9f992b7da7 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java @@ -20,12 +20,15 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import org.apache.kafka.common.metadata.RangeRecord; +import org.apache.kafka.common.metadata.StreamRecord; import org.apache.kafka.controller.stream.RangeMetadata; import org.apache.kafka.controller.stream.s3.StreamObject; import org.apache.kafka.image.writer.ImageWriter; import org.apache.kafka.image.writer.ImageWriterOptions; public class StreamMetadataImage { + private final Long streamId; private final Integer epoch; @@ -50,7 +53,12 @@ public StreamMetadataImage( } public void write(ImageWriter writer, ImageWriterOptions options) { - + writer.write(0, new StreamRecord() + .setStreamId(streamId) + .setEpoch(epoch) + .setStartOffset(startOffset)); + ranges.values().forEach(rangeMetadata -> writer.write(rangeMetadata.toRecord())); + streams.forEach(streamObject -> writer.write(streamObject.toRecord())); } public Map getRanges() { diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java index 98a0569975..f4ce20c7a3 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java @@ -45,6 +45,8 @@ boolean isEmpty() { } public void write(ImageWriter writer, ImageWriterOptions options) { + streamsMetadata.values().forEach(image -> image.write(writer, options)); + brokerStreamsMetadata.values().forEach(image -> image.write(writer, options)); } @Override diff --git a/metadata/src/main/resources/common/metadata/StreamObjectRecord.json b/metadata/src/main/resources/common/metadata/StreamObjectRecord.json index 492a5e1eed..3ea7a59795 100644 --- a/metadata/src/main/resources/common/metadata/StreamObjectRecord.json +++ b/metadata/src/main/resources/common/metadata/StreamObjectRecord.json @@ -44,6 +44,12 @@ "versions": "0+", "about": "The object id of the S3 object" }, + { + "name": "ObjectSize", + "type": "int64", + "versions": "0+", + "about": "The object size of the S3 object" + }, { "name": "ApplyTimeInMs", "type": "int64", diff --git a/metadata/src/main/resources/common/metadata/WALObjectRecord.json b/metadata/src/main/resources/common/metadata/WALObjectRecord.json index b422b9b1b5..51b0a2316a 100644 --- a/metadata/src/main/resources/common/metadata/WALObjectRecord.json +++ b/metadata/src/main/resources/common/metadata/WALObjectRecord.json @@ -32,6 +32,12 @@ "versions": "0+", "about": "The object id of the S3 object" }, + { + "name": "ObjectSize", + "type": "int64", + "versions": "0+", + "about": "The object size of the S3 object" + }, { "name": "ApplyTimeInMs", "type": "int64", diff --git a/metadata/src/test/java/org/apache/kafka/image/MetadataImageTest.java b/metadata/src/test/java/org/apache/kafka/image/MetadataImageTest.java index be21a87bd6..aa8a985b7d 100644 --- a/metadata/src/test/java/org/apache/kafka/image/MetadataImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/MetadataImageTest.java @@ -43,7 +43,8 @@ public class MetadataImageTest { ConfigurationsImageTest.IMAGE1, ClientQuotasImageTest.IMAGE1, ProducerIdsImageTest.IMAGE1, - AclsImageTest.IMAGE1); + AclsImageTest.IMAGE1, + StreamsMetadataImageTest.IMAGE1); DELTA1 = new MetadataDelta.Builder(). setImage(IMAGE1). @@ -55,6 +56,7 @@ public class MetadataImageTest { RecordTestUtils.replayAll(DELTA1, ClientQuotasImageTest.DELTA1_RECORDS); RecordTestUtils.replayAll(DELTA1, ProducerIdsImageTest.DELTA1_RECORDS); RecordTestUtils.replayAll(DELTA1, AclsImageTest.DELTA1_RECORDS); + RecordTestUtils.replayAll(DELTA1, StreamsMetadataImageTest.DELTA1_RECORDS); IMAGE2 = new MetadataImage( new MetadataProvenance(200, 5, 4000), @@ -64,7 +66,8 @@ public class MetadataImageTest { ConfigurationsImageTest.IMAGE2, ClientQuotasImageTest.IMAGE2, ProducerIdsImageTest.IMAGE2, - AclsImageTest.IMAGE2); + AclsImageTest.IMAGE2, + StreamsMetadataImageTest.IMAGE2); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java b/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java new file mode 100644 index 0000000000..ef1da82742 --- /dev/null +++ b/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image; + +import java.util.List; +import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.junit.jupiter.api.Timeout; + +@Timeout(value = 40) +public class StreamsMetadataImageTest { + static final StreamsMetadataImage IMAGE1; + + static final List DELTA1_RECORDS; + + static final StreamsMetadataImage IMAGE2; +} From bfe76105d75cf7dee3df17965ae85db26cd70672 Mon Sep 17 00:00:00 2001 From: TheR1sing3un Date: Sun, 20 Aug 2023 01:13:50 +0800 Subject: [PATCH 03/10] fix(s3): fix compile errors 1. fix compile errors Signed-off-by: TheR1sing3un --- .../test/scala/unit/kafka/server/MetadataCacheTest.scala | 3 ++- .../test/scala/unit/kafka/server/ReplicaManagerTest.scala | 5 +++-- .../org/apache/kafka/image/BrokerStreamMetadataImage.java | 1 - .../org/apache/kafka/image/StreamsMetadataImageTest.java | 8 ++++++++ 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala b/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala index d2df68f6da..8b0cdd640a 100644 --- a/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala +++ b/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala @@ -71,7 +71,8 @@ object MetadataCacheTest { image.configs(), image.clientQuotas(), image.producerIds(), - image.acls()) + image.acls(), + image.streamsMetadata()) val delta = new MetadataDelta.Builder().setImage(partialImage).build() def toRecord(broker: UpdateMetadataBroker): RegisterBrokerRecord = { diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index e623816c39..594c452b03 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -54,7 +54,7 @@ import org.apache.kafka.common.requests._ import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.utils.{LogContext, Time, Utils} import org.apache.kafka.common.{IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} -import org.apache.kafka.image.{AclsImage, ClientQuotasImage, ClusterImageTest, ConfigurationsImage, FeaturesImage, MetadataImage, MetadataProvenance, ProducerIdsImage, TopicsDelta, TopicsImage} +import org.apache.kafka.image.{AclsImage, ClientQuotasImage, ClusterImageTest, ConfigurationsImage, FeaturesImage, MetadataImage, MetadataProvenance, ProducerIdsImage, TopicsDelta, TopicsImage, StreamsMetadataImage} import org.apache.kafka.metadata.LeaderConstants.NO_LEADER import org.apache.kafka.metadata.LeaderRecoveryState import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0 @@ -4128,7 +4128,8 @@ class ReplicaManagerTest { ConfigurationsImage.EMPTY, ClientQuotasImage.EMPTY, ProducerIdsImage.EMPTY, - AclsImage.EMPTY + AclsImage.EMPTY, + StreamsMetadataImage.EMPTY ) } diff --git a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java index 9c18123000..adb61ee958 100644 --- a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java @@ -20,7 +20,6 @@ import java.util.Objects; import java.util.Set; -import org.apache.kafka.common.metadata.WALObjectRecord; import org.apache.kafka.controller.stream.s3.WALObject; import org.apache.kafka.image.writer.ImageWriter; import org.apache.kafka.image.writer.ImageWriterOptions; diff --git a/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java b/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java index ef1da82742..f0ed0e67a7 100644 --- a/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java @@ -28,4 +28,12 @@ public class StreamsMetadataImageTest { static final List DELTA1_RECORDS; static final StreamsMetadataImage IMAGE2; + + // TODO: complete the test for StreamsMetadataImage + + static { + IMAGE1 = null; + DELTA1_RECORDS = null; + IMAGE2 = null; + } } From 09e0fec55e3b1b0e491b5ff403f4bf223efb70af Mon Sep 17 00:00:00 2001 From: TheR1sing3un Date: Sun, 20 Aug 2023 12:22:02 +0800 Subject: [PATCH 04/10] feat(s3): support Stream related delta function: replay 1. support Stream related delta function: replay Signed-off-by: TheR1sing3un --- .../controller/stream/RangeMetadata.java | 11 +++ .../stream/s3/ObjectStreamIndex.java | 4 + .../kafka/controller/stream/s3/S3Object.java | 66 ++++++++++++--- .../controller/stream/s3/S3ObjectType.java | 8 ++ .../controller/stream/s3/StreamObject.java | 15 +++- .../kafka/controller/stream/s3/WALObject.java | 29 ++++++- .../image/BrokerStreamMetadataDelta.java | 12 ++- .../org/apache/kafka/image/MetadataDelta.java | 64 +++++++++++++++ .../kafka/image/StreamMetadataDelta.java | 20 +++++ .../kafka/image/StreamMetadataImage.java | 6 ++ .../kafka/image/StreamsMetadataDelta.java | 81 +++++++++++++++++++ 11 files changed, 303 insertions(+), 13 deletions(-) diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java b/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java index a3c310f6c5..6dfe9d53f5 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java @@ -62,4 +62,15 @@ public ApiMessageAndVersion toRecord() { .setStartOffset(startOffset) .setEndOffset(endOffset.get()), (short) 0); } + + public static RangeMetadata of(RangeRecord record) { + RangeMetadata rangeMetadata = new RangeMetadata(); + rangeMetadata.streamId = record.streamId(); + rangeMetadata.epoch = record.epoch(); + rangeMetadata.rangeIndex = record.rangeIndex(); + rangeMetadata.startOffset = record.startOffset(); + rangeMetadata.endOffset = Optional.ofNullable(record.endOffset()); + rangeMetadata.brokerId = record.brokerId(); + return rangeMetadata; + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java index 0e66db61a0..356dd8f2d4 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java @@ -60,4 +60,8 @@ public StreamIndex toRecordStreamIndex() { .setStartOffset(startOffset) .setEndOffset(endOffset); } + + public static ObjectStreamIndex of(StreamIndex index) { + return new ObjectStreamIndex(index.streamId(), index.startOffset(), index.endOffset()); + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java index 07708d4669..09b9a5a0ad 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java @@ -17,11 +17,11 @@ package org.apache.kafka.controller.stream.s3; +import java.util.Objects; import java.util.Optional; /** - * S3Object is the base class of object in S3. - * Manages the lifecycle of S3Object. + * S3Object is the base class of object in S3. Manages the lifecycle of S3Object. */ public abstract class S3Object implements Comparable { @@ -37,7 +37,7 @@ public abstract class S3Object implements Comparable { protected Optional destroyTimeInMs = Optional.empty(); - protected ObjectState objectState = ObjectState.UNINITIALIZED; + protected S3ObjectState s3ObjectState = S3ObjectState.UNINITIALIZED; protected S3ObjectType objectType = S3ObjectType.UNKNOWN; @@ -45,20 +45,39 @@ protected S3Object(final Long objectId) { this.objectId = objectId; } + protected S3Object( + final Long objectId, + final Long objectSize, + final String objectAddress, + final Long applyTimeInMs, + final Long createTimeInMs, + final Long destroyTimeInMs, + final S3ObjectState s3ObjectState, + final S3ObjectType objectType) { + this.objectId = objectId; + this.objectSize = Optional.of(objectSize); + this.objectAddress = Optional.of(objectAddress); + this.applyTimeInMs = Optional.of(applyTimeInMs); + this.createTimeInMs = Optional.of(createTimeInMs); + this.destroyTimeInMs = Optional.of(destroyTimeInMs); + this.objectType = objectType; + this.s3ObjectState = s3ObjectState; + } + public void onApply() { - if (this.objectState != ObjectState.UNINITIALIZED) { + if (this.s3ObjectState != S3ObjectState.UNINITIALIZED) { throw new IllegalStateException("Object is not in UNINITIALIZED state"); } - this.objectState = ObjectState.APPLIED; + this.s3ObjectState = S3ObjectState.APPLIED; this.applyTimeInMs = Optional.of(System.currentTimeMillis()); } public void onCreate(S3ObjectCreateContext createContext) { // TODO: decide fetch object metadata from S3 or let broker send it to controller - if (this.objectState != ObjectState.APPLIED) { + if (this.s3ObjectState != S3ObjectState.APPLIED) { throw new IllegalStateException("Object is not in APPLIED state"); } - this.objectState = ObjectState.CREATED; + this.s3ObjectState = S3ObjectState.CREATED; this.createTimeInMs = Optional.of(createContext.createTimeInMs); this.objectSize = Optional.of(createContext.objectSize); this.objectAddress = Optional.of(createContext.objectAddress); @@ -66,11 +85,11 @@ public void onCreate(S3ObjectCreateContext createContext) { } public void onDestroy() { - if (this.objectState != ObjectState.CREATED) { + if (this.s3ObjectState != S3ObjectState.CREATED) { throw new IllegalStateException("Object is not in CREATED state"); } S3ObjectManager.destroy(this, () -> { - this.objectState = ObjectState.DESTROYED; + this.s3ObjectState = S3ObjectState.DESTROYED; this.destroyTimeInMs = Optional.of(System.currentTimeMillis()); }); } @@ -79,19 +98,29 @@ public S3ObjectType getObjectType() { return objectType; } - enum ObjectState { + enum S3ObjectState { UNINITIALIZED, APPLIED, CREATED, MARK_DESTROYED, DESTROYED; + + public static S3ObjectState fromByte(Byte b) { + int ordinal = b.intValue(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IllegalArgumentException("Invalid ObjectState ordinal " + ordinal); + } + return values()[ordinal]; + } } public class S3ObjectCreateContext { + private final Long createTimeInMs; private final Long objectSize; private final String objectAddress; private final S3ObjectType objectType; + public S3ObjectCreateContext( final Long createTimeInMs, final Long objectSize, @@ -108,4 +137,21 @@ public S3ObjectCreateContext( public int compareTo(S3Object o) { return this.objectId.compareTo(o.objectId); } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + S3Object s3Object = (S3Object) o; + return Objects.equals(objectId, s3Object.objectId); + } + + @Override + public int hashCode() { + return Objects.hash(objectId); + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java index eea9dc4394..1a56adab69 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java @@ -42,4 +42,12 @@ public enum S3ObjectType { * UNKNOWN object type */ UNKNOWN; + + public static S3ObjectType fromByte(Byte b) { + int ordinal = b.intValue(); + if (ordinal < 0 || ordinal >= values().length) { + return UNKNOWN; + } + return values()[ordinal]; + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java index bbe0647321..49b1748740 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java @@ -17,6 +17,7 @@ package org.apache.kafka.controller.stream.s3; +import java.util.Optional; import org.apache.kafka.common.metadata.StreamObjectRecord; import org.apache.kafka.server.common.ApiMessageAndVersion; @@ -68,7 +69,7 @@ public ApiMessageAndVersion toRecord() { return new ApiMessageAndVersion(new StreamObjectRecord() .setObjectId(objectId) .setStreamId(streamIndex.getStreamId()) - .setObjectState((byte)objectState.ordinal()) + .setObjectState((byte) s3ObjectState.ordinal()) .setObjectType((byte)objectType.ordinal()) .setApplyTimeInMs(applyTimeInMs.get()) .setCreateTimeInMs(createTimeInMs.get()) @@ -77,4 +78,16 @@ public ApiMessageAndVersion toRecord() { .setStartOffset(streamIndex.getStartOffset()) .setEndOffset(streamIndex.getEndOffset()), (short) 0); } + + public static StreamObject of(StreamObjectRecord record) { + StreamObject streamObject = new StreamObject(record.objectId()); + streamObject.objectType = S3ObjectType.fromByte(record.objectType()); + streamObject.s3ObjectState = S3ObjectState.fromByte(record.objectState()); + streamObject.applyTimeInMs = Optional.of(record.applyTimeInMs()); + streamObject.createTimeInMs = Optional.of(record.createTimeInMs()); + streamObject.destroyTimeInMs = Optional.of(record.destroyTimeInMs()); + streamObject.objectSize = Optional.of(record.objectSize()); + streamObject.streamIndex = new ObjectStreamIndex(record.streamId(), record.startOffset(), record.endOffset()); + return streamObject; + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java index 6f1fede45d..bff239bc72 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java @@ -34,6 +34,24 @@ public WALObject(Long objectId) { super(objectId); } + private WALObject( + final Long objectId, + final Long objectSize, + final String objectAddress, + final Long applyTimeInMs, + final Long createTimeInMs, + final Long destroyTimeInMs, + final S3ObjectState s3ObjectState, + final S3ObjectType objectType, + final Integer brokerId, + final List streamsIndex) { + super(objectId, objectSize, objectAddress, applyTimeInMs, createTimeInMs, destroyTimeInMs, s3ObjectState, objectType); + this.objectType = objectType; + this.brokerId = brokerId; + this.streamsIndex = streamsIndex.stream().collect( + Collectors.toMap(ObjectStreamIndex::getStreamId, index -> index)); + } + @Override public void onCreate(S3ObjectCreateContext createContext) { super.onCreate(createContext); @@ -63,7 +81,7 @@ public WALObjectCreateContext( public ApiMessageAndVersion toRecord() { return new ApiMessageAndVersion(new WALObjectRecord() .setObjectId(objectId) - .setObjectState((byte) objectState.ordinal()) + .setObjectState((byte) s3ObjectState.ordinal()) .setObjectType((byte) objectType.ordinal()) .setApplyTimeInMs(applyTimeInMs.get()) .setCreateTimeInMs(createTimeInMs.get()) @@ -74,4 +92,13 @@ public ApiMessageAndVersion toRecord() { .map(ObjectStreamIndex::toRecordStreamIndex) .collect(Collectors.toList())), (short) 0); } + + public static WALObject of(WALObjectRecord record) { + WALObject walObject = new WALObject( + record.objectId(), record.objectSize(), null, + record.applyTimeInMs(), record.createTimeInMs(), record.destroyTimeInMs(), + S3ObjectState.fromByte(record.objectState()), S3ObjectType.fromByte(record.objectType()), + record.brokerId(), record.streamsIndex().stream().map(ObjectStreamIndex::of).collect(Collectors.toList())); + return walObject; + } } diff --git a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java index 9aadbe46db..b97c3dd292 100644 --- a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java @@ -19,6 +19,8 @@ import java.util.HashSet; import java.util.Set; +import org.apache.kafka.common.metadata.RemoveWALObjectRecord; +import org.apache.kafka.common.metadata.WALObjectRecord; import org.apache.kafka.controller.stream.s3.WALObject; public class BrokerStreamMetadataDelta { @@ -26,12 +28,20 @@ public class BrokerStreamMetadataDelta { private final BrokerStreamMetadataImage image; private final Set changedWALObjects = new HashSet<>(); - private final Set removedWALObjects = new HashSet<>(); + private final Set removedWALObjects = new HashSet<>(); public BrokerStreamMetadataDelta(BrokerStreamMetadataImage image) { this.image = image; } + public void replay(WALObjectRecord record) { + changedWALObjects.add(WALObject.of(record)); + } + + public void replay(RemoveWALObjectRecord record) { + removedWALObjects.add(new WALObject(record.objectId())); + } + public BrokerStreamMetadataImage apply() { Set newWALObjects = new HashSet<>(image.getWalObjects()); // remove all removed WAL objects diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java index 00ba907259..b64f242385 100644 --- a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java @@ -27,12 +27,20 @@ import org.apache.kafka.common.metadata.PartitionChangeRecord; import org.apache.kafka.common.metadata.PartitionRecord; import org.apache.kafka.common.metadata.ProducerIdsRecord; +import org.apache.kafka.common.metadata.RangeRecord; import org.apache.kafka.common.metadata.RegisterBrokerRecord; import org.apache.kafka.common.metadata.RemoveAccessControlEntryRecord; +import org.apache.kafka.common.metadata.RemoveRangeRecord; +import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; +import org.apache.kafka.common.metadata.RemoveStreamRecord; import org.apache.kafka.common.metadata.RemoveTopicRecord; +import org.apache.kafka.common.metadata.RemoveWALObjectRecord; +import org.apache.kafka.common.metadata.StreamObjectRecord; +import org.apache.kafka.common.metadata.StreamRecord; import org.apache.kafka.common.metadata.TopicRecord; import org.apache.kafka.common.metadata.UnfenceBrokerRecord; import org.apache.kafka.common.metadata.UnregisterBrokerRecord; +import org.apache.kafka.common.metadata.WALObjectRecord; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.server.common.MetadataVersion; @@ -222,6 +230,30 @@ public void replay(ApiMessage record) { case ZK_MIGRATION_STATE_RECORD: // TODO handle this break; + case STREAM_RECORD: + replay((StreamRecord) record); + break; + case REMOVE_STREAM_RECORD: + replay((RemoveStreamRecord) record); + break; + case RANGE_RECORD: + replay((RangeRecord) record); + break; + case REMOVE_RANGE_RECORD: + replay((RemoveRangeRecord) record); + break; + case STREAM_OBJECT_RECORD: + replay((StreamObjectRecord) record); + break; + case REMOVE_STREAM_OBJECT_RECORD: + replay((RemoveStreamObjectRecord) record); + break; + case WALOBJECT_RECORD: + replay((WALObjectRecord) record); + break; + case REMOVE_WALOBJECT_RECORD: + replay((RemoveWALObjectRecord) record); + break; default: throw new RuntimeException("Unknown metadata record type " + type); } @@ -297,6 +329,38 @@ public void replay(RemoveAccessControlEntryRecord record) { getOrCreateAclsDelta().replay(record); } + public void replay(StreamRecord record) { + getOrCreateStreamsMetadataDelta().replay(record); + } + + public void replay(RemoveStreamRecord record) { + getOrCreateStreamsMetadataDelta().replay(record); + } + + public void replay(RangeRecord record) { + getOrCreateStreamsMetadataDelta().replay(record); + } + + public void replay(RemoveRangeRecord record) { + getOrCreateStreamsMetadataDelta().replay(record); + } + + public void replay(StreamObjectRecord record) { + getOrCreateStreamsMetadataDelta().replay(record); + } + + public void replay(RemoveStreamObjectRecord record) { + getOrCreateStreamsMetadataDelta().replay(record); + } + + public void replay(WALObjectRecord record) { + getOrCreateStreamsMetadataDelta().replay(record); + } + + public void replay(RemoveWALObjectRecord record) { + getOrCreateStreamsMetadataDelta().replay(record); + } + /** * Create removal deltas for anything which was in the base image, but which was not * referenced in the snapshot records we just applied. diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java index 316605fc8c..4d0165df3c 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java @@ -21,6 +21,10 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; +import org.apache.kafka.common.metadata.RangeRecord; +import org.apache.kafka.common.metadata.RemoveRangeRecord; +import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; +import org.apache.kafka.common.metadata.StreamObjectRecord; import org.apache.kafka.controller.stream.RangeMetadata; import org.apache.kafka.controller.stream.s3.StreamObject; @@ -39,6 +43,22 @@ public StreamMetadataDelta(StreamMetadataImage image) { this.newEpoch = image.getEpoch(); } + public void replay(RangeRecord record) { + changedRanges.put(record.rangeIndex(), RangeMetadata.of(record)); + } + + public void replay(RemoveRangeRecord record) { + removedRanges.add(record.rangeIndex()); + } + + public void replay(StreamObjectRecord record) { + changedStreamObjects.add(StreamObject.of(record)); + } + + public void replay(RemoveStreamObjectRecord record) { + removedStreamObjects.add(new StreamObject(record.objectId())); + } + public StreamMetadataImage apply() { Map newRanges = new HashMap<>(image.getRanges().size()); // apply the delta changes of old ranges since the last image diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java index 9f992b7da7..2b9d6f45d5 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java @@ -52,6 +52,12 @@ public StreamMetadataImage( this.streams = streams; } + public StreamMetadataImage( + Long streamId, + Integer epoch, + Long startOffset, + ) + public void write(ImageWriter writer, ImageWriterOptions options) { writer.write(0, new StreamRecord() .setStreamId(streamId) diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java index eb4b86b6d5..b7e53f8299 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java @@ -17,10 +17,21 @@ package org.apache.kafka.image; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.stream.Stream; +import javax.swing.Spring; +import org.apache.kafka.common.metadata.RangeRecord; +import org.apache.kafka.common.metadata.RemoveRangeRecord; +import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; +import org.apache.kafka.common.metadata.RemoveStreamRecord; +import org.apache.kafka.common.metadata.RemoveWALObjectRecord; +import org.apache.kafka.common.metadata.StreamObjectRecord; +import org.apache.kafka.common.metadata.StreamRecord; +import org.apache.kafka.common.metadata.WALObjectRecord; public final class StreamsMetadataDelta { @@ -31,12 +42,82 @@ public final class StreamsMetadataDelta { private final Map changedBrokers = new HashMap<>(); private final Set deletedStreams = new HashSet<>(); + // TODO: when we recycle the broker's memory data structure + // We don't use pair of specify BrokerCreateRecord and BrokerRemoveRecord to create or remove brokers, and + // we create BrokerStreamMetadataImage when we create the first WALObjectRecord for a broker, + // so we should decide when to recycle the broker's memory data structure private final Set deletedBrokers = new HashSet<>(); public StreamsMetadataDelta(StreamsMetadataImage image) { this.image = image; } + public void replay(StreamRecord record) { + StreamMetadataDelta delta; + if (!image.getStreamsMetadata().containsKey(record.streamId())) { + // create a new StreamMetadata with empty ranges and streams if not exist + delta = new StreamMetadataDelta( + new StreamMetadataImage(record.streamId(), record.epoch(), record.startOffset(), Collections.emptyMap(), Collections.emptySet())); + } else { + // update the epoch if exist + StreamMetadataImage streamMetadataImage = image.getStreamsMetadata().get(record.streamId()); + delta = new StreamMetadataDelta( + new StreamMetadataImage(record.streamId(), record.epoch(), record.startOffset(), streamMetadataImage.getRanges(), + streamMetadataImage.getStreams())); + } + // add the delta to the changedStreams + changedStreams.put(record.streamId(), delta); + } + + public void replay(RemoveStreamRecord record) { + // add the streamId to the deletedStreams + deletedStreams.add(record.streamId()); + } + + public void replay(RangeRecord record) { + getOrCreateStreamMetadataDelta(record.streamId()).replay(record); + } + + public void replay(RemoveRangeRecord record) { + getOrCreateStreamMetadataDelta(record.streamId()).replay(record); + } + + public void replay(StreamObjectRecord record) { + getOrCreateStreamMetadataDelta(record.streamId()).replay(record); + } + + public void replay(RemoveStreamObjectRecord record) { + getOrCreateStreamMetadataDelta(record.streamId()).replay(record); + } + + public void replay(WALObjectRecord record) { + getOrCreateBrokerStreamMetadataDelta(record.brokerId()).replay(record); + } + + public void replay(RemoveWALObjectRecord record) { + getOrCreateBrokerStreamMetadataDelta(record.brokerId()).replay(record); + } + + private StreamMetadataDelta getOrCreateStreamMetadataDelta(Long streamId) { + StreamMetadataDelta delta = changedStreams.get(streamId); + if (delta == null) { + delta = new StreamMetadataDelta(image.getStreamsMetadata().get(streamId)); + changedStreams.put(streamId, delta); + } + return delta; + } + + private BrokerStreamMetadataDelta getOrCreateBrokerStreamMetadataDelta(Integer brokerId) { + BrokerStreamMetadataDelta delta = changedBrokers.get(brokerId); + if (delta == null) { + delta = new BrokerStreamMetadataDelta( + image.getBrokerStreamsMetadata(). + getOrDefault(brokerId, new BrokerStreamMetadataImage(brokerId, Collections.emptySet()))); + changedBrokers.put(brokerId, delta); + } + return delta; + } + StreamsMetadataImage apply() { Map newStreams = new HashMap<>(image.getStreamsMetadata().size()); Map newBrokerStreams = new HashMap<>(image.getBrokerStreamsMetadata().size()); From ba34afd917697dd1815efde0286fd2410b5493f5 Mon Sep 17 00:00:00 2001 From: TheR1sing3un Date: Sun, 20 Aug 2023 17:21:21 +0800 Subject: [PATCH 05/10] test(s3): add test to verify the Stream related image and delta 1. add test to verify the Stream related image and delta Signed-off-by: TheR1sing3un --- .../stream/StreamControlManager.java | 9 +- .../image/BrokerStreamMetadataDelta.java | 6 +- .../image/BrokerStreamMetadataImage.java | 10 +- .../kafka/image/StreamMetadataDelta.java | 8 +- .../kafka/image/StreamMetadataImage.java | 19 +- .../kafka/image/StreamsMetadataDelta.java | 6 +- .../stream}/ObjectStreamIndex.java | 2 +- .../stream/RangeMetadata.java | 2 +- .../s3 => metadata/stream}/S3Object.java | 59 ++- .../stream/S3ObjectState.java} | 24 +- .../s3 => metadata/stream}/S3ObjectType.java | 2 +- .../s3 => metadata/stream}/StreamObject.java | 7 +- .../s3 => metadata/stream}/WALObject.java | 18 +- .../kafka/image/StreamsMetadataImageTest.java | 395 ++++++++++++++++++ 14 files changed, 499 insertions(+), 68 deletions(-) rename metadata/src/main/java/org/apache/kafka/{controller/stream/s3 => metadata/stream}/ObjectStreamIndex.java (97%) rename metadata/src/main/java/org/apache/kafka/{controller => metadata}/stream/RangeMetadata.java (98%) rename metadata/src/main/java/org/apache/kafka/{controller/stream/s3 => metadata/stream}/S3Object.java (84%) rename metadata/src/main/java/org/apache/kafka/{controller/stream/s3/S3ObjectManager.java => metadata/stream/S3ObjectState.java} (65%) rename metadata/src/main/java/org/apache/kafka/{controller/stream/s3 => metadata/stream}/S3ObjectType.java (96%) rename metadata/src/main/java/org/apache/kafka/{controller/stream/s3 => metadata/stream}/StreamObject.java (94%) rename metadata/src/main/java/org/apache/kafka/{controller/stream/s3 => metadata/stream}/WALObject.java (90%) diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java index ce8fea1abf..69df13b992 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java @@ -18,8 +18,9 @@ package org.apache.kafka.controller.stream; import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.controller.stream.s3.StreamObject; -import org.apache.kafka.controller.stream.s3.WALObject; +import org.apache.kafka.metadata.stream.RangeMetadata; +import org.apache.kafka.metadata.stream.StreamObject; +import org.apache.kafka.metadata.stream.WALObject; import org.apache.kafka.timeline.SnapshotRegistry; import org.apache.kafka.timeline.TimelineHashMap; import org.apache.kafka.timeline.TimelineHashSet; @@ -27,7 +28,7 @@ public class StreamControlManager { - class StreamMetadata { + static class StreamMetadata { private Long streamId; private Integer epoch; private Long startOffset; @@ -35,7 +36,7 @@ class StreamMetadata { private TimelineHashSet streamObjects; } - class BrokerStreamMetadata { + static class BrokerStreamMetadata { private Integer brokerId; private TimelineHashSet walObjects; } diff --git a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java index b97c3dd292..9264bf572d 100644 --- a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java @@ -17,11 +17,13 @@ package org.apache.kafka.image; +import java.util.ArrayList; import java.util.HashSet; +import java.util.List; import java.util.Set; import org.apache.kafka.common.metadata.RemoveWALObjectRecord; import org.apache.kafka.common.metadata.WALObjectRecord; -import org.apache.kafka.controller.stream.s3.WALObject; +import org.apache.kafka.metadata.stream.WALObject; public class BrokerStreamMetadataDelta { @@ -43,7 +45,7 @@ public void replay(RemoveWALObjectRecord record) { } public BrokerStreamMetadataImage apply() { - Set newWALObjects = new HashSet<>(image.getWalObjects()); + List newWALObjects = new ArrayList<>(image.getWalObjects()); // remove all removed WAL objects newWALObjects.removeAll(removedWALObjects); // add all changed WAL objects diff --git a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java index adb61ee958..2732c2ff37 100644 --- a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java @@ -18,17 +18,17 @@ package org.apache.kafka.image; +import java.util.List; import java.util.Objects; -import java.util.Set; -import org.apache.kafka.controller.stream.s3.WALObject; +import org.apache.kafka.metadata.stream.WALObject; import org.apache.kafka.image.writer.ImageWriter; import org.apache.kafka.image.writer.ImageWriterOptions; public class BrokerStreamMetadataImage { private final Integer brokerId; - private final Set walObjects; + private final List walObjects; - public BrokerStreamMetadataImage(Integer brokerId, Set walObjects) { + public BrokerStreamMetadataImage(Integer brokerId, List walObjects) { this.brokerId = brokerId; this.walObjects = walObjects; } @@ -54,7 +54,7 @@ public void write(ImageWriter writer, ImageWriterOptions options) { walObjects.forEach(walObject -> writer.write(walObject.toRecord())); } - public Set getWalObjects() { + public List getWalObjects() { return walObjects; } diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java index 4d0165df3c..2d03c387ea 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java @@ -17,16 +17,18 @@ package org.apache.kafka.image; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import org.apache.kafka.common.metadata.RangeRecord; import org.apache.kafka.common.metadata.RemoveRangeRecord; import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; import org.apache.kafka.common.metadata.StreamObjectRecord; -import org.apache.kafka.controller.stream.RangeMetadata; -import org.apache.kafka.controller.stream.s3.StreamObject; +import org.apache.kafka.metadata.stream.RangeMetadata; +import org.apache.kafka.metadata.stream.StreamObject; public class StreamMetadataDelta { private final StreamMetadataImage image; @@ -78,7 +80,7 @@ public StreamMetadataImage apply() { changedRanges.entrySet().stream().filter(entry -> !newRanges.containsKey(entry.getKey())) .forEach(entry -> newRanges.put(entry.getKey(), entry.getValue())); - Set newStreamObjects = new HashSet<>(image.getStreams()); + List newStreamObjects = new ArrayList<>(image.getStreams()); // remove all removed stream-objects newStreamObjects.removeAll(removedStreamObjects); // add all changed stream-objects diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java index 2b9d6f45d5..955bcf1d27 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java @@ -17,13 +17,12 @@ package org.apache.kafka.image; +import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; -import org.apache.kafka.common.metadata.RangeRecord; import org.apache.kafka.common.metadata.StreamRecord; -import org.apache.kafka.controller.stream.RangeMetadata; -import org.apache.kafka.controller.stream.s3.StreamObject; +import org.apache.kafka.metadata.stream.RangeMetadata; +import org.apache.kafka.metadata.stream.StreamObject; import org.apache.kafka.image.writer.ImageWriter; import org.apache.kafka.image.writer.ImageWriterOptions; @@ -37,14 +36,14 @@ public class StreamMetadataImage { private final Map ranges; - private final Set streams; + private final List streams; public StreamMetadataImage( Long streamId, Integer epoch, Long startOffset, Map ranges, - Set streams) { + List streams) { this.streamId = streamId; this.epoch = epoch; this.startOffset = startOffset; @@ -52,12 +51,6 @@ public StreamMetadataImage( this.streams = streams; } - public StreamMetadataImage( - Long streamId, - Integer epoch, - Long startOffset, - ) - public void write(ImageWriter writer, ImageWriterOptions options) { writer.write(0, new StreamRecord() .setStreamId(streamId) @@ -71,7 +64,7 @@ public Map getRanges() { return ranges; } - public Set getStreams() { + public List getStreams() { return streams; } diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java index b7e53f8299..6014a3bc14 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java @@ -22,8 +22,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; -import java.util.stream.Stream; -import javax.swing.Spring; import org.apache.kafka.common.metadata.RangeRecord; import org.apache.kafka.common.metadata.RemoveRangeRecord; import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; @@ -57,7 +55,7 @@ public void replay(StreamRecord record) { if (!image.getStreamsMetadata().containsKey(record.streamId())) { // create a new StreamMetadata with empty ranges and streams if not exist delta = new StreamMetadataDelta( - new StreamMetadataImage(record.streamId(), record.epoch(), record.startOffset(), Collections.emptyMap(), Collections.emptySet())); + new StreamMetadataImage(record.streamId(), record.epoch(), record.startOffset(), Collections.emptyMap(), Collections.emptyList())); } else { // update the epoch if exist StreamMetadataImage streamMetadataImage = image.getStreamsMetadata().get(record.streamId()); @@ -112,7 +110,7 @@ private BrokerStreamMetadataDelta getOrCreateBrokerStreamMetadataDelta(Integer b if (delta == null) { delta = new BrokerStreamMetadataDelta( image.getBrokerStreamsMetadata(). - getOrDefault(brokerId, new BrokerStreamMetadataImage(brokerId, Collections.emptySet()))); + getOrDefault(brokerId, new BrokerStreamMetadataImage(brokerId, Collections.emptyList()))); changedBrokers.put(brokerId, delta); } return delta; diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/ObjectStreamIndex.java similarity index 97% rename from metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java rename to metadata/src/main/java/org/apache/kafka/metadata/stream/ObjectStreamIndex.java index 356dd8f2d4..7c2cfa2379 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/ObjectStreamIndex.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/ObjectStreamIndex.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.kafka.controller.stream.s3; +package org.apache.kafka.metadata.stream; import org.apache.kafka.common.metadata.WALObjectRecord.StreamIndex; diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/RangeMetadata.java similarity index 98% rename from metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java rename to metadata/src/main/java/org/apache/kafka/metadata/stream/RangeMetadata.java index 6dfe9d53f5..444a90856a 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/RangeMetadata.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/RangeMetadata.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.kafka.controller.stream; +package org.apache.kafka.metadata.stream; import java.util.Optional; import org.apache.kafka.common.metadata.RangeRecord; diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3Object.java similarity index 84% rename from metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java rename to metadata/src/main/java/org/apache/kafka/metadata/stream/S3Object.java index 09b9a5a0ad..c50d0fa33d 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3Object.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3Object.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.kafka.controller.stream.s3; +package org.apache.kafka.metadata.stream; import java.util.Objects; import java.util.Optional; @@ -84,36 +84,25 @@ public void onCreate(S3ObjectCreateContext createContext) { this.objectType = createContext.objectType; } + public void onMarkDestroy() { + if (this.s3ObjectState != S3ObjectState.CREATED) { + throw new IllegalStateException("Object is not in CREATED state"); + } + this.s3ObjectState = S3ObjectState.MARK_DESTROYED; + } + public void onDestroy() { if (this.s3ObjectState != S3ObjectState.CREATED) { throw new IllegalStateException("Object is not in CREATED state"); } - S3ObjectManager.destroy(this, () -> { - this.s3ObjectState = S3ObjectState.DESTROYED; - this.destroyTimeInMs = Optional.of(System.currentTimeMillis()); - }); + // TODO: trigger destroy + } public S3ObjectType getObjectType() { return objectType; } - enum S3ObjectState { - UNINITIALIZED, - APPLIED, - CREATED, - MARK_DESTROYED, - DESTROYED; - - public static S3ObjectState fromByte(Byte b) { - int ordinal = b.intValue(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IllegalArgumentException("Invalid ObjectState ordinal " + ordinal); - } - return values()[ordinal]; - } - } - public class S3ObjectCreateContext { private final Long createTimeInMs; @@ -154,4 +143,32 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(objectId); } + + public Long getObjectId() { + return objectId; + } + + public Optional getObjectSize() { + return objectSize; + } + + public Optional getObjectAddress() { + return objectAddress; + } + + public Optional getApplyTimeInMs() { + return applyTimeInMs; + } + + public Optional getCreateTimeInMs() { + return createTimeInMs; + } + + public Optional getDestroyTimeInMs() { + return destroyTimeInMs; + } + + public S3ObjectState getS3ObjectState() { + return s3ObjectState; + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectManager.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3ObjectState.java similarity index 65% rename from metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectManager.java rename to metadata/src/main/java/org/apache/kafka/metadata/stream/S3ObjectState.java index ea060c935e..228e682aea 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectManager.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3ObjectState.java @@ -15,16 +15,20 @@ * limitations under the License. */ -package org.apache.kafka.controller.stream.s3; +package org.apache.kafka.metadata.stream; +public enum S3ObjectState { + UNINITIALIZED, + APPLIED, + CREATED, + MARK_DESTROYED, + DESTROYED; -import org.apache.kafka.controller.stream.s3.S3Object; - -public class S3ObjectManager { - - public static boolean destroy(S3Object object, Runnable successCallback) { - // TODO: trigger delete object from S3 - return false; + public static S3ObjectState fromByte(Byte b) { + int ordinal = b.intValue(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IllegalArgumentException("Invalid ObjectState ordinal " + ordinal); + } + return values()[ordinal]; } - -} +} \ No newline at end of file diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3ObjectType.java similarity index 96% rename from metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java rename to metadata/src/main/java/org/apache/kafka/metadata/stream/S3ObjectType.java index 1a56adab69..30cdaf31a5 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/S3ObjectType.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3ObjectType.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.kafka.controller.stream.s3; +package org.apache.kafka.metadata.stream; public enum S3ObjectType { /** diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/StreamObject.java similarity index 94% rename from metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java rename to metadata/src/main/java/org/apache/kafka/metadata/stream/StreamObject.java index 49b1748740..c888c005a8 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/StreamObject.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/StreamObject.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.kafka.controller.stream.s3; +package org.apache.kafka.metadata.stream; import java.util.Optional; import org.apache.kafka.common.metadata.StreamObjectRecord; @@ -32,6 +32,9 @@ public StreamObject(final Long objectId) { @Override public void onCreate(S3ObjectCreateContext createContext) { super.onCreate(createContext); + if (!(createContext instanceof StreamObjectCreateContext)) { + throw new IllegalArgumentException(); + } this.streamIndex = ((StreamObjectCreateContext) createContext).streamIndex; } @@ -70,7 +73,7 @@ public ApiMessageAndVersion toRecord() { .setObjectId(objectId) .setStreamId(streamIndex.getStreamId()) .setObjectState((byte) s3ObjectState.ordinal()) - .setObjectType((byte)objectType.ordinal()) + .setObjectType((byte) objectType.ordinal()) .setApplyTimeInMs(applyTimeInMs.get()) .setCreateTimeInMs(createTimeInMs.get()) .setDestroyTimeInMs(destroyTimeInMs.get()) diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/WALObject.java similarity index 90% rename from metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java rename to metadata/src/main/java/org/apache/kafka/metadata/stream/WALObject.java index bff239bc72..c14ce05a8f 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/s3/WALObject.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/WALObject.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.kafka.controller.stream.s3; +package org.apache.kafka.metadata.stream; import java.util.List; import java.util.Map; @@ -55,6 +55,9 @@ private WALObject( @Override public void onCreate(S3ObjectCreateContext createContext) { super.onCreate(createContext); + if (!(createContext instanceof WALObjectCreateContext)) { + throw new IllegalArgumentException(); + } WALObjectCreateContext walCreateContext = (WALObjectCreateContext) createContext; this.streamsIndex = walCreateContext.streamIndexList.stream().collect(Collectors.toMap(ObjectStreamIndex::getStreamId, index -> index)); this.brokerId = walCreateContext.brokerId; @@ -101,4 +104,17 @@ public static WALObject of(WALObjectRecord record) { record.brokerId(), record.streamsIndex().stream().map(ObjectStreamIndex::of).collect(Collectors.toList())); return walObject; } + + public Integer getBrokerId() { + return brokerId; + } + + public Map getStreamsIndex() { + return streamsIndex; + } + + @Override + public S3ObjectType getObjectType() { + return objectType; + } } diff --git a/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java b/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java index f0ed0e67a7..434b36ea02 100644 --- a/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java @@ -17,12 +17,50 @@ package org.apache.kafka.image; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.kafka.common.metadata.RangeRecord; +import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; +import org.apache.kafka.common.metadata.RemoveStreamRecord; +import org.apache.kafka.common.metadata.RemoveWALObjectRecord; +import org.apache.kafka.common.metadata.StreamObjectRecord; +import org.apache.kafka.common.metadata.StreamRecord; +import org.apache.kafka.common.metadata.WALObjectRecord; +import org.apache.kafka.metadata.stream.RangeMetadata; +import org.apache.kafka.metadata.stream.ObjectStreamIndex; +import org.apache.kafka.metadata.stream.S3ObjectState; +import org.apache.kafka.metadata.stream.S3ObjectType; +import org.apache.kafka.metadata.stream.StreamObject; +import org.apache.kafka.metadata.stream.WALObject; +import org.apache.kafka.image.writer.ImageWriterOptions; +import org.apache.kafka.image.writer.RecordListWriter; +import org.apache.kafka.metadata.RecordTestUtils; import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @Timeout(value = 40) public class StreamsMetadataImageTest { + + private static final long KB = 1024; + + private static final long MB = 1024 * KB; + + private static final long GB = 1024 * MB; + private static final long WAL_LOOSE_SIZE = 40 * MB; + + private static final long WAL_MINOR_COMPACT_SIZE = 5 * GB; + + private static final long WAL_MAJOR_COMPACT_SIZE = 320 * GB; + + private static final long STREAM_OBJECT_SIZE = 320 * GB; + static final StreamsMetadataImage IMAGE1; static final List DELTA1_RECORDS; @@ -36,4 +74,361 @@ public class StreamsMetadataImageTest { DELTA1_RECORDS = null; IMAGE2 = null; } + + @Test + public void testBasicChange() { + List streamMetadataImages = new ArrayList<>(); + Integer brokerId0 = 0; + Integer brokerId1 = 1; + Integer brokerId2 = 2; + + // 1. empty image + StreamsMetadataImage image0 = StreamsMetadataImage.EMPTY; + + // 2. create stream and create range + Long streamId0 = 0L; + Long streamId1 = 1L; + List records = new ArrayList<>(); + StreamRecord streamRecord00 = new StreamRecord() + .setStreamId(streamId0) + .setEpoch(1) + .setStartOffset(0L); + records.add(new ApiMessageAndVersion(streamRecord00, (short) 0)); + RangeRecord rangeRecord00 = new RangeRecord() + .setStreamId(streamId0) + .setRangeIndex(0) + .setStartOffset(0L) + .setBrokerId(brokerId1) + .setEpoch(1); + records.add(new ApiMessageAndVersion(rangeRecord00, (short) 0)); + StreamRecord streamRecord01 = new StreamRecord() + .setStreamId(streamId1) + .setEpoch(1) + .setStartOffset(0L); + records.add(new ApiMessageAndVersion(streamRecord01, (short) 0)); + RangeRecord rangeRecord01 = new RangeRecord() + .setStreamId(streamId1) + .setRangeIndex(0) + .setStartOffset(0L) + .setBrokerId(brokerId1) + .setEpoch(1); + records.add(new ApiMessageAndVersion(rangeRecord01, (short) 0)); + StreamsMetadataDelta delta0 = new StreamsMetadataDelta(image0); + RecordTestUtils.replayAll(delta0, records); + StreamsMetadataImage image1 = delta0.apply(); + + // check the image1 + assertEquals(2, image1.getStreamsMetadata().size()); + StreamMetadataImage streamMetadataImage1 = image1.getStreamsMetadata().get(streamId0); + assertNotNull(streamMetadataImage1); + assertEquals(1, streamMetadataImage1.getRanges().size()); + assertEquals(1, streamMetadataImage1.getEpoch()); + assertEquals(0, streamMetadataImage1.getStartOffset()); + RangeMetadata rangeMetadata1 = streamMetadataImage1.getRanges().get(0); + assertNotNull(rangeMetadata1); + assertEquals(RangeMetadata.of(rangeRecord00), rangeMetadata1); + + StreamMetadataImage streamMetadataImage11 = image1.getStreamsMetadata().get(streamId1); + assertNotNull(streamMetadataImage11); + assertEquals(1, streamMetadataImage11.getRanges().size()); + assertEquals(1, streamMetadataImage11.getEpoch()); + assertEquals(0, streamMetadataImage11.getStartOffset()); + RangeMetadata rangeMetadata11 = streamMetadataImage11.getRanges().get(0); + assertNotNull(rangeMetadata11); + assertEquals(RangeMetadata.of(rangeRecord01), rangeMetadata11); + + // 3. apply WALObject0, WALObject1, WALObject2 + WALObjectRecord walObjectRecord0 = new WALObjectRecord() + .setBrokerId(brokerId0) + .setObjectId(0L) + .setApplyTimeInMs(System.currentTimeMillis()) + .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) + .setObjectState((byte) S3ObjectState.APPLIED.ordinal()); + WALObjectRecord walObjectRecord1 = new WALObjectRecord() + .setBrokerId(brokerId1) + .setObjectId(1L) + .setApplyTimeInMs(System.currentTimeMillis()) + .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) + .setObjectState((byte) S3ObjectState.APPLIED.ordinal()); + WALObjectRecord walObjectRecord2 = new WALObjectRecord() + .setBrokerId(brokerId1) + .setObjectId(2L) + .setApplyTimeInMs(System.currentTimeMillis()) + .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) + .setObjectState((byte) S3ObjectState.APPLIED.ordinal()); + records.clear(); + records.add(new ApiMessageAndVersion(walObjectRecord0, (short) 0)); + records.add(new ApiMessageAndVersion(walObjectRecord1, (short) 0)); + records.add(new ApiMessageAndVersion(walObjectRecord2, (short) 0)); + StreamsMetadataDelta delta1 = new StreamsMetadataDelta(image1); + RecordTestUtils.replayAll(delta1, records); + StreamsMetadataImage image2 = delta1.apply(); + + // check the image2 + assertEquals(2, image2.getBrokerStreamsMetadata().size()); + BrokerStreamMetadataImage brokerStreamMetadataImage20 = image2.getBrokerStreamsMetadata().get(brokerId0); + assertNotNull(brokerStreamMetadataImage20); + assertEquals(1, brokerStreamMetadataImage20.getWalObjects().size()); + WALObject walObject0 = brokerStreamMetadataImage20.getWalObjects().get(0); + assertEquals(brokerId0, walObject0.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, walObject0.getObjectType()); + assertEquals(S3ObjectState.APPLIED, walObject0.getS3ObjectState()); + assertEquals(0L, walObject0.getObjectId()); + BrokerStreamMetadataImage brokerStreamMetadataImage21 = image2.getBrokerStreamsMetadata().get(brokerId1); + assertNotNull(brokerStreamMetadataImage21); + assertEquals(2, brokerStreamMetadataImage21.getWalObjects().size()); + WALObject walObject1 = brokerStreamMetadataImage21.getWalObjects().get(0); + assertEquals(brokerId1, walObject1.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, walObject1.getObjectType()); + assertEquals(S3ObjectState.APPLIED, walObject1.getS3ObjectState()); + assertEquals(1L, walObject1.getObjectId()); + WALObject walObject2 = brokerStreamMetadataImage21.getWalObjects().get(1); + assertEquals(brokerId1, walObject2.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, walObject2.getObjectType()); + assertEquals(S3ObjectState.APPLIED, walObject2.getS3ObjectState()); + assertEquals(2L, walObject2.getObjectId()); + + // 4. create WALObject1, WALObject2, mark delete WALObject0 + List streamIndicesInWALObject1 = Arrays.asList( + new ObjectStreamIndex(streamId0, 0L, 100L), + new ObjectStreamIndex(streamId1, 0L, 200L) + ); + WALObjectRecord walObjectRecord11 = new WALObjectRecord() + .setBrokerId(brokerId1) + .setObjectId(1L) + .setObjectSize(WAL_LOOSE_SIZE) + .setCreateTimeInMs(System.currentTimeMillis()) + .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) + .setStreamsIndex(streamIndicesInWALObject1.stream().map(ObjectStreamIndex::toRecordStreamIndex).collect( + Collectors.toList())) + .setObjectState((byte) S3ObjectState.CREATED.ordinal()); + + List streamIndicesInWALObject2 = Arrays.asList( + new ObjectStreamIndex(streamId0, 101L, 200L), + new ObjectStreamIndex(streamId1, 201L, 300L) + ); + WALObjectRecord walObjectRecord21 = new WALObjectRecord() + .setBrokerId(brokerId1) + .setObjectId(2L) + .setObjectSize(WAL_LOOSE_SIZE) + .setCreateTimeInMs(System.currentTimeMillis()) + .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) + .setStreamsIndex(streamIndicesInWALObject2.stream().map(ObjectStreamIndex::toRecordStreamIndex).collect( + Collectors.toList())) + .setObjectState((byte) S3ObjectState.CREATED.ordinal()); + WALObjectRecord walObjectRecord01 = new WALObjectRecord() + .setBrokerId(brokerId0) + .setObjectId(0L) + .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) + .setObjectState((byte) S3ObjectState.MARK_DESTROYED.ordinal()); + records.clear(); + records.add(new ApiMessageAndVersion(walObjectRecord11, (short) 0)); + records.add(new ApiMessageAndVersion(walObjectRecord21, (short) 0)); + records.add(new ApiMessageAndVersion(walObjectRecord01, (short) 0)); + StreamsMetadataDelta delta2 = new StreamsMetadataDelta(image2); + RecordTestUtils.replayAll(delta2, records); + StreamsMetadataImage image3 = delta2.apply(); + + // check the image3 + assertEquals(2, image3.getBrokerStreamsMetadata().size()); + BrokerStreamMetadataImage brokerStreamMetadataImage30 = image3.getBrokerStreamsMetadata().get(brokerId0); + assertNotNull(brokerStreamMetadataImage30); + assertEquals(1, brokerStreamMetadataImage30.getWalObjects().size()); + WALObject walObject01 = brokerStreamMetadataImage30.getWalObjects().get(0); + assertEquals(brokerId0, walObject01.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, walObject01.getObjectType()); + assertEquals(S3ObjectState.MARK_DESTROYED, walObject01.getS3ObjectState()); + BrokerStreamMetadataImage brokerStreamMetadataImage31 = image3.getBrokerStreamsMetadata().get(brokerId1); + assertNotNull(brokerStreamMetadataImage31); + assertEquals(2, brokerStreamMetadataImage31.getWalObjects().size()); + WALObject walObject11 = brokerStreamMetadataImage31.getWalObjects().get(0); + assertEquals(brokerId1, walObject11.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, walObject11.getObjectType()); + assertEquals(S3ObjectState.CREATED, walObject11.getS3ObjectState()); + Map streamIndexVerify1 = walObject11.getStreamsIndex(); + assertEquals(2, streamIndexVerify1.size()); + assertEquals(0L, streamIndexVerify1.get(streamId0).getStartOffset()); + assertEquals(100L, streamIndexVerify1.get(streamId0).getEndOffset()); + assertEquals(0L, streamIndexVerify1.get(streamId1).getStartOffset()); + assertEquals(200L, streamIndexVerify1.get(streamId1).getEndOffset()); + WALObject walObject21 = brokerStreamMetadataImage31.getWalObjects().get(1); + assertEquals(brokerId1, walObject21.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, walObject21.getObjectType()); + assertEquals(S3ObjectState.CREATED, walObject21.getS3ObjectState()); + Map streamIndexVerify2 = walObject21.getStreamsIndex(); + assertEquals(2, streamIndexVerify2.size()); + assertEquals(101L, streamIndexVerify2.get(streamId0).getStartOffset()); + assertEquals(200L, streamIndexVerify2.get(streamId0).getEndOffset()); + assertEquals(201L, streamIndexVerify2.get(streamId1).getStartOffset()); + assertEquals(300L, streamIndexVerify2.get(streamId1).getEndOffset()); + + // 5. destroy WALObject0, mark delete WALObject1 and WALObject2, compact these to WALObject3 + RemoveWALObjectRecord removeWALObjectRecord0 = new RemoveWALObjectRecord() + .setObjectId(0L) + .setBrokerId(brokerId0); + WALObjectRecord walObjectRecord12 = new WALObjectRecord() + .setObjectId(1L) + .setBrokerId(brokerId1) + .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) + .setObjectState((byte) S3ObjectState.MARK_DESTROYED.ordinal()); + WALObjectRecord walObjectRecord22 = new WALObjectRecord() + .setObjectId(2L) + .setBrokerId(brokerId1) + .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) + .setObjectState((byte) S3ObjectState.MARK_DESTROYED.ordinal()); + List streamIndicesInWALObject3 = Arrays.asList( + new ObjectStreamIndex(streamId0, 0L, 200L), + new ObjectStreamIndex(streamId1, 0L, 300L) + ); + WALObjectRecord walObjectRecord3 = new WALObjectRecord() + .setObjectId(3L) + .setBrokerId(brokerId1) + .setObjectType((byte) S3ObjectType.WAL_MINOR.ordinal()) + .setCreateTimeInMs(System.currentTimeMillis()) + .setObjectState((byte) S3ObjectState.CREATED.ordinal()) + .setApplyTimeInMs(System.currentTimeMillis()) + .setObjectSize(WAL_MINOR_COMPACT_SIZE) + .setStreamsIndex(streamIndicesInWALObject3.stream().map(ObjectStreamIndex::toRecordStreamIndex).collect( + Collectors.toList())); + records.clear(); + records.add(new ApiMessageAndVersion(removeWALObjectRecord0, (short) 0)); + records.add(new ApiMessageAndVersion(walObjectRecord12, (short) 0)); + records.add(new ApiMessageAndVersion(walObjectRecord22, (short) 0)); + records.add(new ApiMessageAndVersion(walObjectRecord3, (short) 0)); + StreamsMetadataDelta delta3 = new StreamsMetadataDelta(image3); + RecordTestUtils.replayAll(delta3, records); + StreamsMetadataImage image4 = delta3.apply(); + + // check the image4 + assertEquals(2, image4.getBrokerStreamsMetadata().size()); + BrokerStreamMetadataImage brokerStreamMetadataImage40 = image4.getBrokerStreamsMetadata().get(brokerId0); + assertNotNull(brokerStreamMetadataImage40); + assertEquals(0, brokerStreamMetadataImage40.getWalObjects().size()); + BrokerStreamMetadataImage brokerStreamMetadataImage41 = image4.getBrokerStreamsMetadata().get(brokerId1); + assertNotNull(brokerStreamMetadataImage41); + assertEquals(3, brokerStreamMetadataImage41.getWalObjects().size()); + WALObject walObject12 = brokerStreamMetadataImage41.getWalObjects().get(0); + assertEquals(brokerId1, walObject12.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, walObject12.getObjectType()); + assertEquals(S3ObjectState.MARK_DESTROYED, walObject12.getS3ObjectState()); + WALObject walObject22 = brokerStreamMetadataImage41.getWalObjects().get(1); + assertEquals(brokerId1, walObject22.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, walObject22.getObjectType()); + assertEquals(S3ObjectState.MARK_DESTROYED, walObject22.getS3ObjectState()); + WALObject walObject3 = brokerStreamMetadataImage41.getWalObjects().get(2); + assertEquals(brokerId1, walObject3.getBrokerId()); + assertEquals(S3ObjectType.WAL_MINOR, walObject3.getObjectType()); + assertEquals(S3ObjectState.CREATED, walObject3.getS3ObjectState()); + assertEquals(3L, walObject3.getObjectId()); + Map streamIndexVerify3 = walObject3.getStreamsIndex(); + assertEquals(2, streamIndexVerify3.size()); + assertEquals(0L, streamIndexVerify3.get(streamId0).getStartOffset()); + assertEquals(200L, streamIndexVerify3.get(streamId0).getEndOffset()); + assertEquals(0L, streamIndexVerify3.get(streamId1).getStartOffset()); + assertEquals(300L, streamIndexVerify3.get(streamId1).getEndOffset()); + + // 6. split WALObject3 by streamId to StreamObject4 and StreamObject5 + ObjectStreamIndex objectStreamIndex4 = new ObjectStreamIndex(streamId0, 0L, 200L); + ObjectStreamIndex objectStreamIndex5 = new ObjectStreamIndex(streamId1, 0L, 300L); + StreamObjectRecord streamObjectRecord4 = new StreamObjectRecord() + .setObjectId(4L) + .setStreamId(streamId0) + .setObjectSize(STREAM_OBJECT_SIZE) + .setObjectType((byte) S3ObjectType.STREAM.ordinal()) + .setCreateTimeInMs(System.currentTimeMillis()) + .setStartOffset(objectStreamIndex4.getStartOffset()) + .setEndOffset(objectStreamIndex4.getEndOffset()); + StreamObjectRecord streamObjectRecord5 = new StreamObjectRecord() + .setObjectId(5L) + .setStreamId(streamId1) + .setObjectSize(STREAM_OBJECT_SIZE) + .setObjectType((byte) S3ObjectType.STREAM.ordinal()) + .setCreateTimeInMs(System.currentTimeMillis()) + .setStartOffset(objectStreamIndex5.getStartOffset()) + .setEndOffset(objectStreamIndex5.getEndOffset()); + RemoveWALObjectRecord removeWALObjectRecord3 = new RemoveWALObjectRecord() + .setObjectId(3L) + .setBrokerId(brokerId1); + records.clear(); + records.add(new ApiMessageAndVersion(streamObjectRecord4, (short) 0)); + records.add(new ApiMessageAndVersion(streamObjectRecord5, (short) 0)); + records.add(new ApiMessageAndVersion(removeWALObjectRecord3, (short) 0)); + StreamsMetadataDelta delta4 = new StreamsMetadataDelta(image4); + RecordTestUtils.replayAll(delta4, records); + StreamsMetadataImage image5 = delta4.apply(); + + // check the image5 + assertEquals(2, image5.getBrokerStreamsMetadata().size()); + BrokerStreamMetadataImage brokerStreamMetadataImage50 = image5.getBrokerStreamsMetadata().get(brokerId0); + assertNotNull(brokerStreamMetadataImage50); + assertEquals(0, brokerStreamMetadataImage50.getWalObjects().size()); + BrokerStreamMetadataImage brokerStreamMetadataImage51 = image5.getBrokerStreamsMetadata().get(brokerId1); + assertNotNull(brokerStreamMetadataImage51); + assertEquals(0, brokerStreamMetadataImage51.getWalObjects().size()); + assertEquals(2, image5.getStreamsMetadata().size()); + + StreamMetadataImage streamMetadataImage50 = image5.getStreamsMetadata().get(streamId0); + assertNotNull(streamMetadataImage50); + assertEquals(1, streamMetadataImage50.getRanges().size()); + assertEquals(1, streamMetadataImage50.getEpoch()); + assertEquals(0, streamMetadataImage50.getStartOffset()); + assertEquals(1, streamMetadataImage50.getStreams()); + StreamObject streamObject4 = streamMetadataImage50.getStreams().get(0); + assertEquals(4L, streamObject4.getObjectId()); + assertEquals(STREAM_OBJECT_SIZE, streamObject4.getObjectSize()); + assertEquals(S3ObjectType.STREAM, streamObject4.getObjectType()); + assertEquals(S3ObjectState.CREATED, streamObject4.getS3ObjectState()); + assertEquals(objectStreamIndex4, streamObject4.getStreamIndex()); + + StreamMetadataImage streamMetadataImage51 = image5.getStreamsMetadata().get(streamId1); + assertNotNull(streamMetadataImage51); + assertEquals(1, streamMetadataImage51.getRanges().size()); + assertEquals(1, streamMetadataImage51.getEpoch()); + assertEquals(0, streamMetadataImage51.getStartOffset()); + assertEquals(1, streamMetadataImage51.getStreams()); + StreamObject streamObject5 = streamMetadataImage51.getStreams().get(0); + assertEquals(5L, streamObject5.getObjectId()); + assertEquals(STREAM_OBJECT_SIZE, streamObject5.getObjectSize()); + assertEquals(S3ObjectType.STREAM, streamObject5.getObjectType()); + assertEquals(S3ObjectState.CREATED, streamObject5.getS3ObjectState()); + assertEquals(objectStreamIndex5, streamObject5.getStreamIndex()); + + // 7. remove streamObject4 and remove stream1 + RemoveStreamObjectRecord removeStreamObjectRecord4 = new RemoveStreamObjectRecord() + .setObjectId(4L) + .setStreamId(streamId0); + RemoveStreamRecord removeStreamRecord = new RemoveStreamRecord() + .setStreamId(streamId1); + records.clear(); + records.add(new ApiMessageAndVersion(removeStreamObjectRecord4, (short) 0)); + records.add(new ApiMessageAndVersion(removeStreamRecord, (short) 0)); + StreamsMetadataDelta delta5 = new StreamsMetadataDelta(image5); + RecordTestUtils.replayAll(delta5, records); + StreamsMetadataImage image6 = delta5.apply(); + + // check the image6 + assertEquals(2, image6.getBrokerStreamsMetadata().size()); + BrokerStreamMetadataImage brokerStreamMetadataImage60 = image6.getBrokerStreamsMetadata().get(brokerId0); + assertNotNull(brokerStreamMetadataImage60); + assertEquals(0, brokerStreamMetadataImage60.getWalObjects().size()); + BrokerStreamMetadataImage brokerStreamMetadataImage61 = image6.getBrokerStreamsMetadata().get(brokerId1); + assertNotNull(brokerStreamMetadataImage61); + assertEquals(0, brokerStreamMetadataImage61.getWalObjects().size()); + + assertEquals(1, image6.getStreamsMetadata().size()); + StreamMetadataImage streamMetadataImage60 = image6.getStreamsMetadata().get(streamId0); + assertNotNull(streamMetadataImage60); + assertEquals(1, streamMetadataImage60.getRanges().size()); + assertEquals(0, streamMetadataImage60.getStreams().size()); + } + + + private void testToImageAndBack(StreamsMetadataImage image) { + RecordListWriter writer = new RecordListWriter(); + image.write(writer, new ImageWriterOptions.Builder().build()); + StreamsMetadataDelta delta = new StreamsMetadataDelta(StreamsMetadataImage.EMPTY); + RecordTestUtils.replayAll(delta, writer.records()); + StreamsMetadataImage newImage = delta.apply(); + assertEquals(image, newImage); + } } From 5c15cbd61476d19e5581517d3c768d9157030918 Mon Sep 17 00:00:00 2001 From: TheR1sing3un Date: Mon, 21 Aug 2023 10:27:18 +0800 Subject: [PATCH 06/10] feat(s3): add S3ObjectControlManager to manage all S3Object's lifecycle 1. add S3ObjectControlManager to manage all S3Object's lifecycle Signed-off-by: TheR1sing3un --- .../stream/S3ObjectControlManager.java | 60 +++++++++++++++++++ .../stream/StreamControlManager.java | 9 ++- 2 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 metadata/src/main/java/org/apache/kafka/controller/stream/S3ObjectControlManager.java diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/S3ObjectControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/stream/S3ObjectControlManager.java new file mode 100644 index 0000000000..044acc7f76 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/S3ObjectControlManager.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller.stream; + +import java.util.LinkedList; +import java.util.Queue; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.metadata.stream.S3Object; +import org.apache.kafka.timeline.SnapshotRegistry; +import org.apache.kafka.timeline.TimelineHashMap; +import org.slf4j.Logger; + +/** + * The S3ObjectControlManager manages all S3Object's lifecycle, such as apply, create, destroy, etc. + */ +public class S3ObjectControlManager { + private final SnapshotRegistry snapshotRegistry; + private final Logger log; + + private final TimelineHashMap objectsMetadata; + + /** + * The objectId of the next object to be applied. (start from 0) + */ + private Long nextApplyObjectId = 0L; + + // TODO: add timer task to periodically check if there are objects to be destroyed or created + private final Queue appliedObjects; + private final Queue markDestroyedObjects; + + public S3ObjectControlManager( + SnapshotRegistry snapshotRegistry, + LogContext logContext) { + this.snapshotRegistry = snapshotRegistry; + this.log = logContext.logger(S3ObjectControlManager.class); + this.objectsMetadata = new TimelineHashMap<>(snapshotRegistry, 0); + this.appliedObjects = new LinkedList<>(); + this.markDestroyedObjects = new LinkedList<>(); + } + + public Long appliedObjectNum() { + return nextApplyObjectId; + } + +} diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java index 69df13b992..ed2ae09952 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java @@ -26,6 +26,9 @@ import org.apache.kafka.timeline.TimelineHashSet; import org.slf4j.Logger; +/** + * The StreamControlManager manages all Stream's lifecycle, such as create, open, delete, etc. + */ public class StreamControlManager { static class StreamMetadata { @@ -44,6 +47,8 @@ static class BrokerStreamMetadata { private final SnapshotRegistry snapshotRegistry; private final Logger log; + + private final S3ObjectControlManager s3ObjectControlManager; private final TimelineHashMap streamsMetadata; @@ -51,9 +56,11 @@ static class BrokerStreamMetadata { public StreamControlManager( SnapshotRegistry snapshotRegistry, - LogContext logContext) { + LogContext logContext, + S3ObjectControlManager s3ObjectControlManager) { this.snapshotRegistry = snapshotRegistry; this.log = logContext.logger(StreamControlManager.class); + this.s3ObjectControlManager = s3ObjectControlManager; this.streamsMetadata = new TimelineHashMap<>(snapshotRegistry, 0); this.brokersMetadata = new TimelineHashMap<>(snapshotRegistry, 0); } From b4407b1903a7f04a58c3df9dc5cef36f5f840e54 Mon Sep 17 00:00:00 2001 From: TheR1sing3un Date: Mon, 21 Aug 2023 10:44:58 +0800 Subject: [PATCH 07/10] refactor(s3): rename S3 related class name and change epoch from 32bit to 64bit 1. rename S3 related class name 2. change epoch from 32bit to 64bit Signed-off-by: TheR1sing3un --- .../kafka/server/ReplicaManagerTest.scala | 4 +- .../stream/StreamControlManager.java | 10 +- ...lta.java => BrokerS3WALMetadataDelta.java} | 26 +- ...age.java => BrokerS3WALMetadataImage.java} | 22 +- .../org/apache/kafka/image/MetadataDelta.java | 22 +- .../org/apache/kafka/image/MetadataImage.java | 8 +- ...aDelta.java => S3StreamMetadataDelta.java} | 26 +- ...aImage.java => S3StreamMetadataImage.java} | 20 +- ...Delta.java => S3StreamsMetadataDelta.java} | 66 ++-- ...Image.java => S3StreamsMetadataImage.java} | 24 +- .../kafka/metadata/stream/RangeMetadata.java | 4 +- ...eamIndex.java => S3ObjectStreamIndex.java} | 10 +- ...{StreamObject.java => S3StreamObject.java} | 40 +-- .../{WALObject.java => S3WALObject.java} | 30 +- .../common/metadata/RangeRecord.json | 2 +- ...d.json => RemoveS3StreamObjectRecord.json} | 2 +- ...mRecord.json => RemoveS3StreamRecord.json} | 2 +- ...tRecord.json => S3StreamObjectRecord.json} | 2 +- ...{StreamRecord.json => S3StreamRecord.json} | 4 +- .../apache/kafka/image/MetadataImageTest.java | 6 +- ...t.java => S3StreamsMetadataImageTest.java} | 310 +++++++++--------- 21 files changed, 320 insertions(+), 320 deletions(-) rename metadata/src/main/java/org/apache/kafka/image/{BrokerStreamMetadataDelta.java => BrokerS3WALMetadataDelta.java} (61%) rename metadata/src/main/java/org/apache/kafka/image/{BrokerStreamMetadataImage.java => BrokerS3WALMetadataImage.java} (72%) rename metadata/src/main/java/org/apache/kafka/image/{StreamMetadataDelta.java => S3StreamMetadataDelta.java} (76%) rename metadata/src/main/java/org/apache/kafka/image/{StreamMetadataImage.java => S3StreamMetadataImage.java} (87%) rename metadata/src/main/java/org/apache/kafka/image/{StreamsMetadataDelta.java => S3StreamsMetadataDelta.java} (65%) rename metadata/src/main/java/org/apache/kafka/image/{StreamsMetadataImage.java => S3StreamsMetadataImage.java} (70%) rename metadata/src/main/java/org/apache/kafka/metadata/stream/{ObjectStreamIndex.java => S3ObjectStreamIndex.java} (82%) rename metadata/src/main/java/org/apache/kafka/metadata/stream/{StreamObject.java => S3StreamObject.java} (67%) rename metadata/src/main/java/org/apache/kafka/metadata/stream/{WALObject.java => S3WALObject.java} (82%) rename metadata/src/main/resources/common/metadata/{RemoveStreamObjectRecord.json => RemoveS3StreamObjectRecord.json} (96%) rename metadata/src/main/resources/common/metadata/{RemoveStreamRecord.json => RemoveS3StreamRecord.json} (96%) rename metadata/src/main/resources/common/metadata/{StreamObjectRecord.json => S3StreamObjectRecord.json} (98%) rename metadata/src/main/resources/common/metadata/{StreamRecord.json => S3StreamRecord.json} (96%) rename metadata/src/test/java/org/apache/kafka/image/{StreamsMetadataImageTest.java => S3StreamsMetadataImageTest.java} (53%) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index 594c452b03..040481546d 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -54,7 +54,7 @@ import org.apache.kafka.common.requests._ import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.utils.{LogContext, Time, Utils} import org.apache.kafka.common.{IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} -import org.apache.kafka.image.{AclsImage, ClientQuotasImage, ClusterImageTest, ConfigurationsImage, FeaturesImage, MetadataImage, MetadataProvenance, ProducerIdsImage, TopicsDelta, TopicsImage, StreamsMetadataImage} +import org.apache.kafka.image.{AclsImage, ClientQuotasImage, ClusterImageTest, ConfigurationsImage, FeaturesImage, MetadataImage, MetadataProvenance, ProducerIdsImage, TopicsDelta, TopicsImage, S3StreamsMetadataImage} import org.apache.kafka.metadata.LeaderConstants.NO_LEADER import org.apache.kafka.metadata.LeaderRecoveryState import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0 @@ -4129,7 +4129,7 @@ class ReplicaManagerTest { ClientQuotasImage.EMPTY, ProducerIdsImage.EMPTY, AclsImage.EMPTY, - StreamsMetadataImage.EMPTY + S3StreamsMetadataImage.EMPTY ) } diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java index ed2ae09952..13d19a7b57 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java @@ -19,8 +19,8 @@ import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.metadata.stream.RangeMetadata; -import org.apache.kafka.metadata.stream.StreamObject; -import org.apache.kafka.metadata.stream.WALObject; +import org.apache.kafka.metadata.stream.S3StreamObject; +import org.apache.kafka.metadata.stream.S3WALObject; import org.apache.kafka.timeline.SnapshotRegistry; import org.apache.kafka.timeline.TimelineHashMap; import org.apache.kafka.timeline.TimelineHashSet; @@ -36,18 +36,18 @@ static class StreamMetadata { private Integer epoch; private Long startOffset; private TimelineHashSet ranges; - private TimelineHashSet streamObjects; + private TimelineHashSet s3StreamObjects; } static class BrokerStreamMetadata { private Integer brokerId; - private TimelineHashSet walObjects; + private TimelineHashSet s3WalObjects; } private final SnapshotRegistry snapshotRegistry; private final Logger log; - + private final S3ObjectControlManager s3ObjectControlManager; private final TimelineHashMap streamsMetadata; diff --git a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/BrokerS3WALMetadataDelta.java similarity index 61% rename from metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java rename to metadata/src/main/java/org/apache/kafka/image/BrokerS3WALMetadataDelta.java index 9264bf572d..5315a41524 100644 --- a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/BrokerS3WALMetadataDelta.java @@ -23,34 +23,34 @@ import java.util.Set; import org.apache.kafka.common.metadata.RemoveWALObjectRecord; import org.apache.kafka.common.metadata.WALObjectRecord; -import org.apache.kafka.metadata.stream.WALObject; +import org.apache.kafka.metadata.stream.S3WALObject; -public class BrokerStreamMetadataDelta { +public class BrokerS3WALMetadataDelta { - private final BrokerStreamMetadataImage image; - private final Set changedWALObjects = new HashSet<>(); + private final BrokerS3WALMetadataImage image; + private final Set changedS3WALObjects = new HashSet<>(); - private final Set removedWALObjects = new HashSet<>(); + private final Set removedS3WALObjects = new HashSet<>(); - public BrokerStreamMetadataDelta(BrokerStreamMetadataImage image) { + public BrokerS3WALMetadataDelta(BrokerS3WALMetadataImage image) { this.image = image; } public void replay(WALObjectRecord record) { - changedWALObjects.add(WALObject.of(record)); + changedS3WALObjects.add(S3WALObject.of(record)); } public void replay(RemoveWALObjectRecord record) { - removedWALObjects.add(new WALObject(record.objectId())); + removedS3WALObjects.add(new S3WALObject(record.objectId())); } - public BrokerStreamMetadataImage apply() { - List newWALObjects = new ArrayList<>(image.getWalObjects()); + public BrokerS3WALMetadataImage apply() { + List newS3WALObjects = new ArrayList<>(image.getWalObjects()); // remove all removed WAL objects - newWALObjects.removeAll(removedWALObjects); + newS3WALObjects.removeAll(removedS3WALObjects); // add all changed WAL objects - newWALObjects.addAll(changedWALObjects); - return new BrokerStreamMetadataImage(image.getBrokerId(), newWALObjects); + newS3WALObjects.addAll(changedS3WALObjects); + return new BrokerS3WALMetadataImage(image.getBrokerId(), newS3WALObjects); } } diff --git a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/BrokerS3WALMetadataImage.java similarity index 72% rename from metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java rename to metadata/src/main/java/org/apache/kafka/image/BrokerS3WALMetadataImage.java index 2732c2ff37..a5a48a40ec 100644 --- a/metadata/src/main/java/org/apache/kafka/image/BrokerStreamMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/BrokerS3WALMetadataImage.java @@ -20,17 +20,17 @@ import java.util.List; import java.util.Objects; -import org.apache.kafka.metadata.stream.WALObject; +import org.apache.kafka.metadata.stream.S3WALObject; import org.apache.kafka.image.writer.ImageWriter; import org.apache.kafka.image.writer.ImageWriterOptions; -public class BrokerStreamMetadataImage { +public class BrokerS3WALMetadataImage { private final Integer brokerId; - private final List walObjects; + private final List s3WalObjects; - public BrokerStreamMetadataImage(Integer brokerId, List walObjects) { + public BrokerS3WALMetadataImage(Integer brokerId, List s3WalObjects) { this.brokerId = brokerId; - this.walObjects = walObjects; + this.s3WalObjects = s3WalObjects; } @Override @@ -41,21 +41,21 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - BrokerStreamMetadataImage that = (BrokerStreamMetadataImage) o; - return Objects.equals(brokerId, that.brokerId) && Objects.equals(walObjects, that.walObjects); + BrokerS3WALMetadataImage that = (BrokerS3WALMetadataImage) o; + return Objects.equals(brokerId, that.brokerId) && Objects.equals(s3WalObjects, that.s3WalObjects); } @Override public int hashCode() { - return Objects.hash(brokerId, walObjects); + return Objects.hash(brokerId, s3WalObjects); } public void write(ImageWriter writer, ImageWriterOptions options) { - walObjects.forEach(walObject -> writer.write(walObject.toRecord())); + s3WalObjects.forEach(walObject -> writer.write(walObject.toRecord())); } - public List getWalObjects() { - return walObjects; + public List getWalObjects() { + return s3WalObjects; } public Integer getBrokerId() { diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java index b64f242385..24b824012d 100644 --- a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java @@ -80,7 +80,7 @@ public MetadataDelta build() { private AclsDelta aclsDelta = null; - private StreamsMetadataDelta streamsMetadataDelta = null; + private S3StreamsMetadataDelta s3StreamsMetadataDelta = null; public MetadataDelta(MetadataImage image) { this.image = image; @@ -155,15 +155,15 @@ public AclsDelta getOrCreateAclsDelta() { return aclsDelta; } - public StreamsMetadataDelta streamMetadataDelta() { - return streamsMetadataDelta; + public S3StreamsMetadataDelta streamMetadataDelta() { + return s3StreamsMetadataDelta; } - public StreamsMetadataDelta getOrCreateStreamsMetadataDelta() { - if (streamsMetadataDelta == null) { - streamsMetadataDelta = new StreamsMetadataDelta(image.streamsMetadata()); + public S3StreamsMetadataDelta getOrCreateStreamsMetadataDelta() { + if (s3StreamsMetadataDelta == null) { + s3StreamsMetadataDelta = new S3StreamsMetadataDelta(image.streamsMetadata()); } - return streamsMetadataDelta; + return s3StreamsMetadataDelta; } public Optional metadataVersionChanged() { @@ -418,11 +418,11 @@ public MetadataImage apply(MetadataProvenance provenance) { } else { newAcls = aclsDelta.apply(); } - StreamsMetadataImage newStreamMetadata; - if (streamsMetadataDelta == null) { + S3StreamsMetadataImage newStreamMetadata; + if (s3StreamsMetadataDelta == null) { newStreamMetadata = image.streamsMetadata(); } else { - newStreamMetadata = streamsMetadataDelta.apply(); + newStreamMetadata = s3StreamsMetadataDelta.apply(); } return new MetadataImage( provenance, @@ -447,7 +447,7 @@ public String toString() { ", clientQuotasDelta=" + clientQuotasDelta + ", producerIdsDelta=" + producerIdsDelta + ", aclsDelta=" + aclsDelta + - ", streamMetadataDelta=" + streamsMetadataDelta + + ", streamMetadataDelta=" + s3StreamsMetadataDelta + ')'; } } diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java index e0795bc51e..c7346a3d38 100644 --- a/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java @@ -39,7 +39,7 @@ public final class MetadataImage { ClientQuotasImage.EMPTY, ProducerIdsImage.EMPTY, AclsImage.EMPTY, - StreamsMetadataImage.EMPTY); + S3StreamsMetadataImage.EMPTY); private final MetadataProvenance provenance; @@ -57,7 +57,7 @@ public final class MetadataImage { private final AclsImage acls; - private final StreamsMetadataImage streamMetadata; + private final S3StreamsMetadataImage streamMetadata; public MetadataImage( MetadataProvenance provenance, @@ -68,7 +68,7 @@ public MetadataImage( ClientQuotasImage clientQuotas, ProducerIdsImage producerIds, AclsImage acls, - StreamsMetadataImage streamMetadata + S3StreamsMetadataImage streamMetadata ) { this.provenance = provenance; this.features = features; @@ -132,7 +132,7 @@ public AclsImage acls() { return acls; } - public StreamsMetadataImage streamsMetadata() { + public S3StreamsMetadataImage streamsMetadata() { return streamMetadata; } diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java similarity index 76% rename from metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java rename to metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java index 2d03c387ea..aa5fde9382 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java @@ -28,19 +28,19 @@ import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; import org.apache.kafka.common.metadata.StreamObjectRecord; import org.apache.kafka.metadata.stream.RangeMetadata; -import org.apache.kafka.metadata.stream.StreamObject; +import org.apache.kafka.metadata.stream.S3StreamObject; -public class StreamMetadataDelta { - private final StreamMetadataImage image; +public class S3StreamMetadataDelta { + private final S3StreamMetadataImage image; private Integer newEpoch; private final Map changedRanges = new HashMap<>(); private final Set removedRanges = new HashSet<>(); - private final Set changedStreamObjects = new HashSet<>(); - private final Set removedStreamObjects = new HashSet<>(); + private final Set changedS3StreamObjects = new HashSet<>(); + private final Set removedS3StreamObjects = new HashSet<>(); - public StreamMetadataDelta(StreamMetadataImage image) { + public S3StreamMetadataDelta(S3StreamMetadataImage image) { this.image = image; this.newEpoch = image.getEpoch(); } @@ -54,14 +54,14 @@ public void replay(RemoveRangeRecord record) { } public void replay(StreamObjectRecord record) { - changedStreamObjects.add(StreamObject.of(record)); + changedS3StreamObjects.add(S3StreamObject.of(record)); } public void replay(RemoveStreamObjectRecord record) { - removedStreamObjects.add(new StreamObject(record.objectId())); + removedS3StreamObjects.add(new S3StreamObject(record.objectId())); } - public StreamMetadataImage apply() { + public S3StreamMetadataImage apply() { Map newRanges = new HashMap<>(image.getRanges().size()); // apply the delta changes of old ranges since the last image image.getRanges().forEach((rangeIndex, range) -> { @@ -80,12 +80,12 @@ public StreamMetadataImage apply() { changedRanges.entrySet().stream().filter(entry -> !newRanges.containsKey(entry.getKey())) .forEach(entry -> newRanges.put(entry.getKey(), entry.getValue())); - List newStreamObjects = new ArrayList<>(image.getStreams()); + List newS3StreamObjects = new ArrayList<>(image.getStreams()); // remove all removed stream-objects - newStreamObjects.removeAll(removedStreamObjects); + newS3StreamObjects.removeAll(removedS3StreamObjects); // add all changed stream-objects - newStreamObjects.addAll(changedStreamObjects); - return new StreamMetadataImage(image.getStreamId(), newEpoch, image.getStartOffset(), newRanges, newStreamObjects); + newS3StreamObjects.addAll(changedS3StreamObjects); + return new S3StreamMetadataImage(image.getStreamId(), newEpoch, image.getStartOffset(), newRanges, newS3StreamObjects); } } diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java similarity index 87% rename from metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java rename to metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java index 955bcf1d27..b11c8c98a8 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java @@ -22,28 +22,28 @@ import java.util.Objects; import org.apache.kafka.common.metadata.StreamRecord; import org.apache.kafka.metadata.stream.RangeMetadata; -import org.apache.kafka.metadata.stream.StreamObject; +import org.apache.kafka.metadata.stream.S3StreamObject; import org.apache.kafka.image.writer.ImageWriter; import org.apache.kafka.image.writer.ImageWriterOptions; -public class StreamMetadataImage { +public class S3StreamMetadataImage { private final Long streamId; - private final Integer epoch; + private final Long epoch; private final Long startOffset; private final Map ranges; - private final List streams; + private final List streams; - public StreamMetadataImage( + public S3StreamMetadataImage( Long streamId, - Integer epoch, + Long epoch, Long startOffset, Map ranges, - List streams) { + List streams) { this.streamId = streamId; this.epoch = epoch; this.startOffset = startOffset; @@ -64,11 +64,11 @@ public Map getRanges() { return ranges; } - public List getStreams() { + public List getStreams() { return streams; } - public Integer getEpoch() { + public Long getEpoch() { return epoch; } @@ -88,7 +88,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - StreamMetadataImage that = (StreamMetadataImage) o; + S3StreamMetadataImage that = (S3StreamMetadataImage) o; return Objects.equals(streamId, that.streamId) && Objects.equals(epoch, that.epoch) && Objects.equals(startOffset, that.startOffset) && Objects.equals(ranges, that.ranges) && Objects.equals(streams, that.streams); } diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java similarity index 65% rename from metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java rename to metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java index 6014a3bc14..18d77f090d 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java @@ -31,13 +31,13 @@ import org.apache.kafka.common.metadata.StreamRecord; import org.apache.kafka.common.metadata.WALObjectRecord; -public final class StreamsMetadataDelta { +public final class S3StreamsMetadataDelta { - private final StreamsMetadataImage image; + private final S3StreamsMetadataImage image; - private final Map changedStreams = new HashMap<>(); + private final Map changedStreams = new HashMap<>(); - private final Map changedBrokers = new HashMap<>(); + private final Map changedBrokers = new HashMap<>(); private final Set deletedStreams = new HashSet<>(); // TODO: when we recycle the broker's memory data structure @@ -46,22 +46,22 @@ public final class StreamsMetadataDelta { // so we should decide when to recycle the broker's memory data structure private final Set deletedBrokers = new HashSet<>(); - public StreamsMetadataDelta(StreamsMetadataImage image) { + public S3StreamsMetadataDelta(S3StreamsMetadataImage image) { this.image = image; } public void replay(StreamRecord record) { - StreamMetadataDelta delta; + S3StreamMetadataDelta delta; if (!image.getStreamsMetadata().containsKey(record.streamId())) { // create a new StreamMetadata with empty ranges and streams if not exist - delta = new StreamMetadataDelta( - new StreamMetadataImage(record.streamId(), record.epoch(), record.startOffset(), Collections.emptyMap(), Collections.emptyList())); + delta = new S3StreamMetadataDelta( + new S3StreamMetadataImage(record.streamId(), record.epoch(), record.startOffset(), Collections.emptyMap(), Collections.emptyList())); } else { // update the epoch if exist - StreamMetadataImage streamMetadataImage = image.getStreamsMetadata().get(record.streamId()); - delta = new StreamMetadataDelta( - new StreamMetadataImage(record.streamId(), record.epoch(), record.startOffset(), streamMetadataImage.getRanges(), - streamMetadataImage.getStreams())); + S3StreamMetadataImage s3StreamMetadataImage = image.getStreamsMetadata().get(record.streamId()); + delta = new S3StreamMetadataDelta( + new S3StreamMetadataImage(record.streamId(), record.epoch(), record.startOffset(), s3StreamMetadataImage.getRanges(), + s3StreamMetadataImage.getStreams())); } // add the delta to the changedStreams changedStreams.put(record.streamId(), delta); @@ -96,32 +96,32 @@ public void replay(RemoveWALObjectRecord record) { getOrCreateBrokerStreamMetadataDelta(record.brokerId()).replay(record); } - private StreamMetadataDelta getOrCreateStreamMetadataDelta(Long streamId) { - StreamMetadataDelta delta = changedStreams.get(streamId); + private S3StreamMetadataDelta getOrCreateStreamMetadataDelta(Long streamId) { + S3StreamMetadataDelta delta = changedStreams.get(streamId); if (delta == null) { - delta = new StreamMetadataDelta(image.getStreamsMetadata().get(streamId)); + delta = new S3StreamMetadataDelta(image.getStreamsMetadata().get(streamId)); changedStreams.put(streamId, delta); } return delta; } - private BrokerStreamMetadataDelta getOrCreateBrokerStreamMetadataDelta(Integer brokerId) { - BrokerStreamMetadataDelta delta = changedBrokers.get(brokerId); + private BrokerS3WALMetadataDelta getOrCreateBrokerStreamMetadataDelta(Integer brokerId) { + BrokerS3WALMetadataDelta delta = changedBrokers.get(brokerId); if (delta == null) { - delta = new BrokerStreamMetadataDelta( + delta = new BrokerS3WALMetadataDelta( image.getBrokerStreamsMetadata(). - getOrDefault(brokerId, new BrokerStreamMetadataImage(brokerId, Collections.emptyList()))); + getOrDefault(brokerId, new BrokerS3WALMetadataImage(brokerId, Collections.emptyList()))); changedBrokers.put(brokerId, delta); } return delta; } - StreamsMetadataImage apply() { - Map newStreams = new HashMap<>(image.getStreamsMetadata().size()); - Map newBrokerStreams = new HashMap<>(image.getBrokerStreamsMetadata().size()); + S3StreamsMetadataImage apply() { + Map newStreams = new HashMap<>(image.getStreamsMetadata().size()); + Map newBrokerStreams = new HashMap<>(image.getBrokerStreamsMetadata().size()); // apply the delta changes of old streams since the last image image.getStreamsMetadata().forEach((streamId, streamMetadataImage) -> { - StreamMetadataDelta delta = changedStreams.get(streamId); + S3StreamMetadataDelta delta = changedStreams.get(streamId); if (delta == null) { // no change, check if deleted if (!deletedStreams.contains(streamId)) { @@ -129,20 +129,20 @@ StreamsMetadataImage apply() { } } else { // changed, apply the delta - StreamMetadataImage newStreamMetadataImage = delta.apply(); - newStreams.put(streamId, newStreamMetadataImage); + S3StreamMetadataImage newS3StreamMetadataImage = delta.apply(); + newStreams.put(streamId, newS3StreamMetadataImage); } }); // apply the new created streams changedStreams.entrySet().stream().filter(entry -> !newStreams.containsKey(entry.getKey())) .forEach(entry -> { - StreamMetadataImage newStreamMetadataImage = entry.getValue().apply(); - newStreams.put(entry.getKey(), newStreamMetadataImage); + S3StreamMetadataImage newS3StreamMetadataImage = entry.getValue().apply(); + newStreams.put(entry.getKey(), newS3StreamMetadataImage); }); // apply the delta changes of old brokers since the last image image.getBrokerStreamsMetadata().forEach((brokerId, brokerStreamMetadataImage) -> { - BrokerStreamMetadataDelta delta = changedBrokers.get(brokerId); + BrokerS3WALMetadataDelta delta = changedBrokers.get(brokerId); if (delta == null) { // no change, check if deleted if (!deletedBrokers.contains(brokerId)) { @@ -150,18 +150,18 @@ StreamsMetadataImage apply() { } } else { // changed, apply the delta - BrokerStreamMetadataImage newBrokerStreamMetadataImage = delta.apply(); - newBrokerStreams.put(brokerId, newBrokerStreamMetadataImage); + BrokerS3WALMetadataImage newBrokerS3WALMetadataImage = delta.apply(); + newBrokerStreams.put(brokerId, newBrokerS3WALMetadataImage); } }); // apply the new created streams changedBrokers.entrySet().stream().filter(entry -> !newBrokerStreams.containsKey(entry.getKey())) .forEach(entry -> { - BrokerStreamMetadataImage newBrokerStreamMetadataImage = entry.getValue().apply(); - newBrokerStreams.put(entry.getKey(), newBrokerStreamMetadataImage); + BrokerS3WALMetadataImage newBrokerS3WALMetadataImage = entry.getValue().apply(); + newBrokerStreams.put(entry.getKey(), newBrokerS3WALMetadataImage); }); - return new StreamsMetadataImage(newStreams, newBrokerStreams); + return new S3StreamsMetadataImage(newStreams, newBrokerStreams); } } diff --git a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataImage.java similarity index 70% rename from metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java rename to metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataImage.java index f4ce20c7a3..7a8291e4e3 100644 --- a/metadata/src/main/java/org/apache/kafka/image/StreamsMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataImage.java @@ -23,18 +23,18 @@ import org.apache.kafka.image.writer.ImageWriter; import org.apache.kafka.image.writer.ImageWriterOptions; -public final class StreamsMetadataImage { +public final class S3StreamsMetadataImage { - public static final StreamsMetadataImage EMPTY = - new StreamsMetadataImage(Collections.emptyMap(), Collections.emptyMap()); + public static final S3StreamsMetadataImage EMPTY = + new S3StreamsMetadataImage(Collections.emptyMap(), Collections.emptyMap()); - private final Map streamsMetadata; + private final Map streamsMetadata; - private final Map brokerStreamsMetadata; + private final Map brokerStreamsMetadata; - public StreamsMetadataImage( - Map streamsMetadata, - Map brokerStreamsMetadata) { + public S3StreamsMetadataImage( + Map streamsMetadata, + Map brokerStreamsMetadata) { this.streamsMetadata = streamsMetadata; this.brokerStreamsMetadata = brokerStreamsMetadata; } @@ -51,8 +51,8 @@ public void write(ImageWriter writer, ImageWriterOptions options) { @Override public boolean equals(Object obj) { - if (!(obj instanceof StreamsMetadataImage)) return false; - StreamsMetadataImage other = (StreamsMetadataImage) obj; + if (!(obj instanceof S3StreamsMetadataImage)) return false; + S3StreamsMetadataImage other = (S3StreamsMetadataImage) obj; return this.streamsMetadata.equals(other.streamsMetadata) && this.brokerStreamsMetadata.equals(other.brokerStreamsMetadata); } @@ -62,11 +62,11 @@ public int hashCode() { return Objects.hash(streamsMetadata, brokerStreamsMetadata); } - public Map getBrokerStreamsMetadata() { + public Map getBrokerStreamsMetadata() { return brokerStreamsMetadata; } - public Map getStreamsMetadata() { + public Map getStreamsMetadata() { return streamsMetadata; } } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/stream/RangeMetadata.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/RangeMetadata.java index 444a90856a..328dc46e4f 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/stream/RangeMetadata.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/RangeMetadata.java @@ -23,7 +23,7 @@ public class RangeMetadata implements Comparable { private Long streamId; - private Integer epoch; + private Long epoch; private Integer rangeIndex; private Long startOffset; private Optional endOffset; @@ -33,7 +33,7 @@ public int compareTo(RangeMetadata o) { return this.rangeIndex.compareTo(o.rangeIndex); } - public Integer getEpoch() { + public Long getEpoch() { return epoch; } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/stream/ObjectStreamIndex.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3ObjectStreamIndex.java similarity index 82% rename from metadata/src/main/java/org/apache/kafka/metadata/stream/ObjectStreamIndex.java rename to metadata/src/main/java/org/apache/kafka/metadata/stream/S3ObjectStreamIndex.java index 7c2cfa2379..5c8fd88071 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/stream/ObjectStreamIndex.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3ObjectStreamIndex.java @@ -22,7 +22,7 @@ /** * ObjectStreamIndex is the index of a stream range in a WAL object or STREAM object. */ -public class ObjectStreamIndex implements Comparable { +public class S3ObjectStreamIndex implements Comparable { private final Long streamId; @@ -30,7 +30,7 @@ public class ObjectStreamIndex implements Comparable { private final Long endOffset; - public ObjectStreamIndex(Long streamId, Long startOffset, Long endOffset) { + public S3ObjectStreamIndex(Long streamId, Long startOffset, Long endOffset) { this.streamId = streamId; this.startOffset = startOffset; this.endOffset = endOffset; @@ -49,7 +49,7 @@ public Long getEndOffset() { } @Override - public int compareTo(ObjectStreamIndex o) { + public int compareTo(S3ObjectStreamIndex o) { int res = this.streamId.compareTo(o.streamId); return res == 0 ? this.startOffset.compareTo(o.startOffset) : res; } @@ -61,7 +61,7 @@ public StreamIndex toRecordStreamIndex() { .setEndOffset(endOffset); } - public static ObjectStreamIndex of(StreamIndex index) { - return new ObjectStreamIndex(index.streamId(), index.startOffset(), index.endOffset()); + public static S3ObjectStreamIndex of(StreamIndex index) { + return new S3ObjectStreamIndex(index.streamId(), index.startOffset(), index.endOffset()); } } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/stream/StreamObject.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3StreamObject.java similarity index 67% rename from metadata/src/main/java/org/apache/kafka/metadata/stream/StreamObject.java rename to metadata/src/main/java/org/apache/kafka/metadata/stream/S3StreamObject.java index c888c005a8..74165874a2 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/stream/StreamObject.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3StreamObject.java @@ -21,11 +21,11 @@ import org.apache.kafka.common.metadata.StreamObjectRecord; import org.apache.kafka.server.common.ApiMessageAndVersion; -public class StreamObject extends S3Object { +public class S3StreamObject extends S3Object { - private ObjectStreamIndex streamIndex; + private S3ObjectStreamIndex streamIndex; - public StreamObject(final Long objectId) { + public S3StreamObject(final Long objectId) { super(objectId); } @@ -40,31 +40,31 @@ public void onCreate(S3ObjectCreateContext createContext) { @Override public int compareTo(S3Object o) { - if (!(o instanceof StreamObject)) { + if (!(o instanceof S3StreamObject)) { throw new IllegalArgumentException("Cannot compare StreamObject with non-StreamObject"); } - StreamObject streamObject = (StreamObject) o; + S3StreamObject s3StreamObject = (S3StreamObject) o; // order by streamId first, then startOffset - int res = this.streamIndex.getStreamId().compareTo(streamObject.streamIndex.getStreamId()); - return res == 0 ? this.streamIndex.getStartOffset().compareTo(streamObject.streamIndex.getStartOffset()) : res; + int res = this.streamIndex.getStreamId().compareTo(s3StreamObject.streamIndex.getStreamId()); + return res == 0 ? this.streamIndex.getStartOffset().compareTo(s3StreamObject.streamIndex.getStartOffset()) : res; } class StreamObjectCreateContext extends S3ObjectCreateContext { - private final ObjectStreamIndex streamIndex; + private final S3ObjectStreamIndex streamIndex; public StreamObjectCreateContext( final Long createTimeInMs, final Long objectSize, final String objectAddress, final S3ObjectType objectType, - final ObjectStreamIndex streamIndex) { + final S3ObjectStreamIndex streamIndex) { super(createTimeInMs, objectSize, objectAddress, objectType); this.streamIndex = streamIndex; } } - public ObjectStreamIndex getStreamIndex() { + public S3ObjectStreamIndex getStreamIndex() { return streamIndex; } @@ -82,15 +82,15 @@ public ApiMessageAndVersion toRecord() { .setEndOffset(streamIndex.getEndOffset()), (short) 0); } - public static StreamObject of(StreamObjectRecord record) { - StreamObject streamObject = new StreamObject(record.objectId()); - streamObject.objectType = S3ObjectType.fromByte(record.objectType()); - streamObject.s3ObjectState = S3ObjectState.fromByte(record.objectState()); - streamObject.applyTimeInMs = Optional.of(record.applyTimeInMs()); - streamObject.createTimeInMs = Optional.of(record.createTimeInMs()); - streamObject.destroyTimeInMs = Optional.of(record.destroyTimeInMs()); - streamObject.objectSize = Optional.of(record.objectSize()); - streamObject.streamIndex = new ObjectStreamIndex(record.streamId(), record.startOffset(), record.endOffset()); - return streamObject; + public static S3StreamObject of(StreamObjectRecord record) { + S3StreamObject s3StreamObject = new S3StreamObject(record.objectId()); + s3StreamObject.objectType = S3ObjectType.fromByte(record.objectType()); + s3StreamObject.s3ObjectState = S3ObjectState.fromByte(record.objectState()); + s3StreamObject.applyTimeInMs = Optional.of(record.applyTimeInMs()); + s3StreamObject.createTimeInMs = Optional.of(record.createTimeInMs()); + s3StreamObject.destroyTimeInMs = Optional.of(record.destroyTimeInMs()); + s3StreamObject.objectSize = Optional.of(record.objectSize()); + s3StreamObject.streamIndex = new S3ObjectStreamIndex(record.streamId(), record.startOffset(), record.endOffset()); + return s3StreamObject; } } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/stream/WALObject.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3WALObject.java similarity index 82% rename from metadata/src/main/java/org/apache/kafka/metadata/stream/WALObject.java rename to metadata/src/main/java/org/apache/kafka/metadata/stream/S3WALObject.java index c14ce05a8f..13e48d2312 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/stream/WALObject.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3WALObject.java @@ -23,18 +23,18 @@ import org.apache.kafka.common.metadata.WALObjectRecord; import org.apache.kafka.server.common.ApiMessageAndVersion; -public class WALObject extends S3Object { +public class S3WALObject extends S3Object { private Integer brokerId; - private Map streamsIndex; + private Map streamsIndex; private S3ObjectType objectType = S3ObjectType.UNKNOWN; - public WALObject(Long objectId) { + public S3WALObject(Long objectId) { super(objectId); } - private WALObject( + private S3WALObject( final Long objectId, final Long objectSize, final String objectAddress, @@ -44,12 +44,12 @@ private WALObject( final S3ObjectState s3ObjectState, final S3ObjectType objectType, final Integer brokerId, - final List streamsIndex) { + final List streamsIndex) { super(objectId, objectSize, objectAddress, applyTimeInMs, createTimeInMs, destroyTimeInMs, s3ObjectState, objectType); this.objectType = objectType; this.brokerId = brokerId; this.streamsIndex = streamsIndex.stream().collect( - Collectors.toMap(ObjectStreamIndex::getStreamId, index -> index)); + Collectors.toMap(S3ObjectStreamIndex::getStreamId, index -> index)); } @Override @@ -59,13 +59,13 @@ public void onCreate(S3ObjectCreateContext createContext) { throw new IllegalArgumentException(); } WALObjectCreateContext walCreateContext = (WALObjectCreateContext) createContext; - this.streamsIndex = walCreateContext.streamIndexList.stream().collect(Collectors.toMap(ObjectStreamIndex::getStreamId, index -> index)); + this.streamsIndex = walCreateContext.streamIndexList.stream().collect(Collectors.toMap(S3ObjectStreamIndex::getStreamId, index -> index)); this.brokerId = walCreateContext.brokerId; } class WALObjectCreateContext extends S3ObjectCreateContext { - private final List streamIndexList; + private final List streamIndexList; private final Integer brokerId; public WALObjectCreateContext( @@ -73,7 +73,7 @@ public WALObjectCreateContext( final Long objectSize, final String objectAddress, final S3ObjectType objectType, - final List streamIndexList, + final List streamIndexList, final Integer brokerId) { super(createTimeInMs, objectSize, objectAddress, objectType); this.streamIndexList = streamIndexList; @@ -92,24 +92,24 @@ public ApiMessageAndVersion toRecord() { .setObjectSize(objectSize.get()) .setStreamsIndex( streamsIndex.values().stream() - .map(ObjectStreamIndex::toRecordStreamIndex) + .map(S3ObjectStreamIndex::toRecordStreamIndex) .collect(Collectors.toList())), (short) 0); } - public static WALObject of(WALObjectRecord record) { - WALObject walObject = new WALObject( + public static S3WALObject of(WALObjectRecord record) { + S3WALObject s3WalObject = new S3WALObject( record.objectId(), record.objectSize(), null, record.applyTimeInMs(), record.createTimeInMs(), record.destroyTimeInMs(), S3ObjectState.fromByte(record.objectState()), S3ObjectType.fromByte(record.objectType()), - record.brokerId(), record.streamsIndex().stream().map(ObjectStreamIndex::of).collect(Collectors.toList())); - return walObject; + record.brokerId(), record.streamsIndex().stream().map(S3ObjectStreamIndex::of).collect(Collectors.toList())); + return s3WalObject; } public Integer getBrokerId() { return brokerId; } - public Map getStreamsIndex() { + public Map getStreamsIndex() { return streamsIndex; } diff --git a/metadata/src/main/resources/common/metadata/RangeRecord.json b/metadata/src/main/resources/common/metadata/RangeRecord.json index 350d4c9a26..8721c65752 100644 --- a/metadata/src/main/resources/common/metadata/RangeRecord.json +++ b/metadata/src/main/resources/common/metadata/RangeRecord.json @@ -28,7 +28,7 @@ }, { "name": "Epoch", - "type": "int32", + "type": "int64", "versions": "0+", "about": "The epoch of the range" }, diff --git a/metadata/src/main/resources/common/metadata/RemoveStreamObjectRecord.json b/metadata/src/main/resources/common/metadata/RemoveS3StreamObjectRecord.json similarity index 96% rename from metadata/src/main/resources/common/metadata/RemoveStreamObjectRecord.json rename to metadata/src/main/resources/common/metadata/RemoveS3StreamObjectRecord.json index 746eeb47f7..729a0fc472 100644 --- a/metadata/src/main/resources/common/metadata/RemoveStreamObjectRecord.json +++ b/metadata/src/main/resources/common/metadata/RemoveS3StreamObjectRecord.json @@ -16,7 +16,7 @@ { "apiKey": 28, "type": "metadata", - "name": "RemoveStreamObjectRecord", + "name": "RemoveS3StreamObjectRecord", "validVersions": "0", "flexibleVersions": "0+", "fields": [ diff --git a/metadata/src/main/resources/common/metadata/RemoveStreamRecord.json b/metadata/src/main/resources/common/metadata/RemoveS3StreamRecord.json similarity index 96% rename from metadata/src/main/resources/common/metadata/RemoveStreamRecord.json rename to metadata/src/main/resources/common/metadata/RemoveS3StreamRecord.json index 5befb960de..30156373a7 100644 --- a/metadata/src/main/resources/common/metadata/RemoveStreamRecord.json +++ b/metadata/src/main/resources/common/metadata/RemoveS3StreamRecord.json @@ -16,7 +16,7 @@ { "apiKey": 23, "type": "metadata", - "name": "RemoveStreamRecord", + "name": "RemoveS3StreamRecord", "validVersions": "0", "flexibleVersions": "0+", "fields": [ diff --git a/metadata/src/main/resources/common/metadata/StreamObjectRecord.json b/metadata/src/main/resources/common/metadata/S3StreamObjectRecord.json similarity index 98% rename from metadata/src/main/resources/common/metadata/StreamObjectRecord.json rename to metadata/src/main/resources/common/metadata/S3StreamObjectRecord.json index 3ea7a59795..4ae99fb78d 100644 --- a/metadata/src/main/resources/common/metadata/StreamObjectRecord.json +++ b/metadata/src/main/resources/common/metadata/S3StreamObjectRecord.json @@ -16,7 +16,7 @@ { "apiKey": 27, "type": "metadata", - "name": "StreamObjectRecord", + "name": "S3StreamObjectRecord", "validVersions": "0", "flexibleVersions": "0+", "fields": [ diff --git a/metadata/src/main/resources/common/metadata/StreamRecord.json b/metadata/src/main/resources/common/metadata/S3StreamRecord.json similarity index 96% rename from metadata/src/main/resources/common/metadata/StreamRecord.json rename to metadata/src/main/resources/common/metadata/S3StreamRecord.json index 68138a1152..b4d4a08f31 100644 --- a/metadata/src/main/resources/common/metadata/StreamRecord.json +++ b/metadata/src/main/resources/common/metadata/S3StreamRecord.json @@ -16,7 +16,7 @@ { "apiKey": 22, "type": "metadata", - "name": "StreamRecord", + "name": "S3StreamRecord", "validVersions": "0", "flexibleVersions": "0+", "fields": [ @@ -28,7 +28,7 @@ }, { "name": "Epoch", - "type": "int32", + "type": "int64", "versions": "0+", "about": "The epoch" }, diff --git a/metadata/src/test/java/org/apache/kafka/image/MetadataImageTest.java b/metadata/src/test/java/org/apache/kafka/image/MetadataImageTest.java index aa8a985b7d..0e9afe7240 100644 --- a/metadata/src/test/java/org/apache/kafka/image/MetadataImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/MetadataImageTest.java @@ -44,7 +44,7 @@ public class MetadataImageTest { ClientQuotasImageTest.IMAGE1, ProducerIdsImageTest.IMAGE1, AclsImageTest.IMAGE1, - StreamsMetadataImageTest.IMAGE1); + S3StreamsMetadataImageTest.IMAGE1); DELTA1 = new MetadataDelta.Builder(). setImage(IMAGE1). @@ -56,7 +56,7 @@ public class MetadataImageTest { RecordTestUtils.replayAll(DELTA1, ClientQuotasImageTest.DELTA1_RECORDS); RecordTestUtils.replayAll(DELTA1, ProducerIdsImageTest.DELTA1_RECORDS); RecordTestUtils.replayAll(DELTA1, AclsImageTest.DELTA1_RECORDS); - RecordTestUtils.replayAll(DELTA1, StreamsMetadataImageTest.DELTA1_RECORDS); + RecordTestUtils.replayAll(DELTA1, S3StreamsMetadataImageTest.DELTA1_RECORDS); IMAGE2 = new MetadataImage( new MetadataProvenance(200, 5, 4000), @@ -67,7 +67,7 @@ public class MetadataImageTest { ClientQuotasImageTest.IMAGE2, ProducerIdsImageTest.IMAGE2, AclsImageTest.IMAGE2, - StreamsMetadataImageTest.IMAGE2); + S3StreamsMetadataImageTest.IMAGE2); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java b/metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java similarity index 53% rename from metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java rename to metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java index 434b36ea02..9e3279e8ab 100644 --- a/metadata/src/test/java/org/apache/kafka/image/StreamsMetadataImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java @@ -33,11 +33,11 @@ import org.apache.kafka.common.metadata.StreamRecord; import org.apache.kafka.common.metadata.WALObjectRecord; import org.apache.kafka.metadata.stream.RangeMetadata; -import org.apache.kafka.metadata.stream.ObjectStreamIndex; +import org.apache.kafka.metadata.stream.S3ObjectStreamIndex; import org.apache.kafka.metadata.stream.S3ObjectState; import org.apache.kafka.metadata.stream.S3ObjectType; -import org.apache.kafka.metadata.stream.StreamObject; -import org.apache.kafka.metadata.stream.WALObject; +import org.apache.kafka.metadata.stream.S3StreamObject; +import org.apache.kafka.metadata.stream.S3WALObject; import org.apache.kafka.image.writer.ImageWriterOptions; import org.apache.kafka.image.writer.RecordListWriter; import org.apache.kafka.metadata.RecordTestUtils; @@ -46,7 +46,7 @@ import org.junit.jupiter.api.Timeout; @Timeout(value = 40) -public class StreamsMetadataImageTest { +public class S3StreamsMetadataImageTest { private static final long KB = 1024; @@ -61,11 +61,11 @@ public class StreamsMetadataImageTest { private static final long STREAM_OBJECT_SIZE = 320 * GB; - static final StreamsMetadataImage IMAGE1; + static final S3StreamsMetadataImage IMAGE1; static final List DELTA1_RECORDS; - static final StreamsMetadataImage IMAGE2; + static final S3StreamsMetadataImage IMAGE2; // TODO: complete the test for StreamsMetadataImage @@ -77,13 +77,13 @@ public class StreamsMetadataImageTest { @Test public void testBasicChange() { - List streamMetadataImages = new ArrayList<>(); + List s3StreamMetadataImages = new ArrayList<>(); Integer brokerId0 = 0; Integer brokerId1 = 1; Integer brokerId2 = 2; // 1. empty image - StreamsMetadataImage image0 = StreamsMetadataImage.EMPTY; + S3StreamsMetadataImage image0 = S3StreamsMetadataImage.EMPTY; // 2. create stream and create range Long streamId0 = 0L; @@ -113,27 +113,27 @@ public void testBasicChange() { .setBrokerId(brokerId1) .setEpoch(1); records.add(new ApiMessageAndVersion(rangeRecord01, (short) 0)); - StreamsMetadataDelta delta0 = new StreamsMetadataDelta(image0); + S3StreamsMetadataDelta delta0 = new S3StreamsMetadataDelta(image0); RecordTestUtils.replayAll(delta0, records); - StreamsMetadataImage image1 = delta0.apply(); + S3StreamsMetadataImage image1 = delta0.apply(); // check the image1 assertEquals(2, image1.getStreamsMetadata().size()); - StreamMetadataImage streamMetadataImage1 = image1.getStreamsMetadata().get(streamId0); - assertNotNull(streamMetadataImage1); - assertEquals(1, streamMetadataImage1.getRanges().size()); - assertEquals(1, streamMetadataImage1.getEpoch()); - assertEquals(0, streamMetadataImage1.getStartOffset()); - RangeMetadata rangeMetadata1 = streamMetadataImage1.getRanges().get(0); + S3StreamMetadataImage s3StreamMetadataImage1 = image1.getStreamsMetadata().get(streamId0); + assertNotNull(s3StreamMetadataImage1); + assertEquals(1, s3StreamMetadataImage1.getRanges().size()); + assertEquals(1, s3StreamMetadataImage1.getEpoch()); + assertEquals(0, s3StreamMetadataImage1.getStartOffset()); + RangeMetadata rangeMetadata1 = s3StreamMetadataImage1.getRanges().get(0); assertNotNull(rangeMetadata1); assertEquals(RangeMetadata.of(rangeRecord00), rangeMetadata1); - StreamMetadataImage streamMetadataImage11 = image1.getStreamsMetadata().get(streamId1); - assertNotNull(streamMetadataImage11); - assertEquals(1, streamMetadataImage11.getRanges().size()); - assertEquals(1, streamMetadataImage11.getEpoch()); - assertEquals(0, streamMetadataImage11.getStartOffset()); - RangeMetadata rangeMetadata11 = streamMetadataImage11.getRanges().get(0); + S3StreamMetadataImage s3StreamMetadataImage11 = image1.getStreamsMetadata().get(streamId1); + assertNotNull(s3StreamMetadataImage11); + assertEquals(1, s3StreamMetadataImage11.getRanges().size()); + assertEquals(1, s3StreamMetadataImage11.getEpoch()); + assertEquals(0, s3StreamMetadataImage11.getStartOffset()); + RangeMetadata rangeMetadata11 = s3StreamMetadataImage11.getRanges().get(0); assertNotNull(rangeMetadata11); assertEquals(RangeMetadata.of(rangeRecord01), rangeMetadata11); @@ -160,38 +160,38 @@ public void testBasicChange() { records.add(new ApiMessageAndVersion(walObjectRecord0, (short) 0)); records.add(new ApiMessageAndVersion(walObjectRecord1, (short) 0)); records.add(new ApiMessageAndVersion(walObjectRecord2, (short) 0)); - StreamsMetadataDelta delta1 = new StreamsMetadataDelta(image1); + S3StreamsMetadataDelta delta1 = new S3StreamsMetadataDelta(image1); RecordTestUtils.replayAll(delta1, records); - StreamsMetadataImage image2 = delta1.apply(); + S3StreamsMetadataImage image2 = delta1.apply(); // check the image2 assertEquals(2, image2.getBrokerStreamsMetadata().size()); - BrokerStreamMetadataImage brokerStreamMetadataImage20 = image2.getBrokerStreamsMetadata().get(brokerId0); - assertNotNull(brokerStreamMetadataImage20); - assertEquals(1, brokerStreamMetadataImage20.getWalObjects().size()); - WALObject walObject0 = brokerStreamMetadataImage20.getWalObjects().get(0); - assertEquals(brokerId0, walObject0.getBrokerId()); - assertEquals(S3ObjectType.WAL_LOOSE, walObject0.getObjectType()); - assertEquals(S3ObjectState.APPLIED, walObject0.getS3ObjectState()); - assertEquals(0L, walObject0.getObjectId()); - BrokerStreamMetadataImage brokerStreamMetadataImage21 = image2.getBrokerStreamsMetadata().get(brokerId1); - assertNotNull(brokerStreamMetadataImage21); - assertEquals(2, brokerStreamMetadataImage21.getWalObjects().size()); - WALObject walObject1 = brokerStreamMetadataImage21.getWalObjects().get(0); - assertEquals(brokerId1, walObject1.getBrokerId()); - assertEquals(S3ObjectType.WAL_LOOSE, walObject1.getObjectType()); - assertEquals(S3ObjectState.APPLIED, walObject1.getS3ObjectState()); - assertEquals(1L, walObject1.getObjectId()); - WALObject walObject2 = brokerStreamMetadataImage21.getWalObjects().get(1); - assertEquals(brokerId1, walObject2.getBrokerId()); - assertEquals(S3ObjectType.WAL_LOOSE, walObject2.getObjectType()); - assertEquals(S3ObjectState.APPLIED, walObject2.getS3ObjectState()); - assertEquals(2L, walObject2.getObjectId()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage20 = image2.getBrokerStreamsMetadata().get(brokerId0); + assertNotNull(brokerS3WALMetadataImage20); + assertEquals(1, brokerS3WALMetadataImage20.getWalObjects().size()); + S3WALObject s3WalObject0 = brokerS3WALMetadataImage20.getWalObjects().get(0); + assertEquals(brokerId0, s3WalObject0.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, s3WalObject0.getObjectType()); + assertEquals(S3ObjectState.APPLIED, s3WalObject0.getS3ObjectState()); + assertEquals(0L, s3WalObject0.getObjectId()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage21 = image2.getBrokerStreamsMetadata().get(brokerId1); + assertNotNull(brokerS3WALMetadataImage21); + assertEquals(2, brokerS3WALMetadataImage21.getWalObjects().size()); + S3WALObject s3WalObject1 = brokerS3WALMetadataImage21.getWalObjects().get(0); + assertEquals(brokerId1, s3WalObject1.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, s3WalObject1.getObjectType()); + assertEquals(S3ObjectState.APPLIED, s3WalObject1.getS3ObjectState()); + assertEquals(1L, s3WalObject1.getObjectId()); + S3WALObject s3WalObject2 = brokerS3WALMetadataImage21.getWalObjects().get(1); + assertEquals(brokerId1, s3WalObject2.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, s3WalObject2.getObjectType()); + assertEquals(S3ObjectState.APPLIED, s3WalObject2.getS3ObjectState()); + assertEquals(2L, s3WalObject2.getObjectId()); // 4. create WALObject1, WALObject2, mark delete WALObject0 - List streamIndicesInWALObject1 = Arrays.asList( - new ObjectStreamIndex(streamId0, 0L, 100L), - new ObjectStreamIndex(streamId1, 0L, 200L) + List streamIndicesInWALObject1 = Arrays.asList( + new S3ObjectStreamIndex(streamId0, 0L, 100L), + new S3ObjectStreamIndex(streamId1, 0L, 200L) ); WALObjectRecord walObjectRecord11 = new WALObjectRecord() .setBrokerId(brokerId1) @@ -199,13 +199,13 @@ public void testBasicChange() { .setObjectSize(WAL_LOOSE_SIZE) .setCreateTimeInMs(System.currentTimeMillis()) .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) - .setStreamsIndex(streamIndicesInWALObject1.stream().map(ObjectStreamIndex::toRecordStreamIndex).collect( + .setStreamsIndex(streamIndicesInWALObject1.stream().map(S3ObjectStreamIndex::toRecordStreamIndex).collect( Collectors.toList())) .setObjectState((byte) S3ObjectState.CREATED.ordinal()); - List streamIndicesInWALObject2 = Arrays.asList( - new ObjectStreamIndex(streamId0, 101L, 200L), - new ObjectStreamIndex(streamId1, 201L, 300L) + List streamIndicesInWALObject2 = Arrays.asList( + new S3ObjectStreamIndex(streamId0, 101L, 200L), + new S3ObjectStreamIndex(streamId1, 201L, 300L) ); WALObjectRecord walObjectRecord21 = new WALObjectRecord() .setBrokerId(brokerId1) @@ -213,7 +213,7 @@ public void testBasicChange() { .setObjectSize(WAL_LOOSE_SIZE) .setCreateTimeInMs(System.currentTimeMillis()) .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) - .setStreamsIndex(streamIndicesInWALObject2.stream().map(ObjectStreamIndex::toRecordStreamIndex).collect( + .setStreamsIndex(streamIndicesInWALObject2.stream().map(S3ObjectStreamIndex::toRecordStreamIndex).collect( Collectors.toList())) .setObjectState((byte) S3ObjectState.CREATED.ordinal()); WALObjectRecord walObjectRecord01 = new WALObjectRecord() @@ -225,37 +225,37 @@ public void testBasicChange() { records.add(new ApiMessageAndVersion(walObjectRecord11, (short) 0)); records.add(new ApiMessageAndVersion(walObjectRecord21, (short) 0)); records.add(new ApiMessageAndVersion(walObjectRecord01, (short) 0)); - StreamsMetadataDelta delta2 = new StreamsMetadataDelta(image2); + S3StreamsMetadataDelta delta2 = new S3StreamsMetadataDelta(image2); RecordTestUtils.replayAll(delta2, records); - StreamsMetadataImage image3 = delta2.apply(); + S3StreamsMetadataImage image3 = delta2.apply(); // check the image3 assertEquals(2, image3.getBrokerStreamsMetadata().size()); - BrokerStreamMetadataImage brokerStreamMetadataImage30 = image3.getBrokerStreamsMetadata().get(brokerId0); - assertNotNull(brokerStreamMetadataImage30); - assertEquals(1, brokerStreamMetadataImage30.getWalObjects().size()); - WALObject walObject01 = brokerStreamMetadataImage30.getWalObjects().get(0); - assertEquals(brokerId0, walObject01.getBrokerId()); - assertEquals(S3ObjectType.WAL_LOOSE, walObject01.getObjectType()); - assertEquals(S3ObjectState.MARK_DESTROYED, walObject01.getS3ObjectState()); - BrokerStreamMetadataImage brokerStreamMetadataImage31 = image3.getBrokerStreamsMetadata().get(brokerId1); - assertNotNull(brokerStreamMetadataImage31); - assertEquals(2, brokerStreamMetadataImage31.getWalObjects().size()); - WALObject walObject11 = brokerStreamMetadataImage31.getWalObjects().get(0); - assertEquals(brokerId1, walObject11.getBrokerId()); - assertEquals(S3ObjectType.WAL_LOOSE, walObject11.getObjectType()); - assertEquals(S3ObjectState.CREATED, walObject11.getS3ObjectState()); - Map streamIndexVerify1 = walObject11.getStreamsIndex(); + BrokerS3WALMetadataImage brokerS3WALMetadataImage30 = image3.getBrokerStreamsMetadata().get(brokerId0); + assertNotNull(brokerS3WALMetadataImage30); + assertEquals(1, brokerS3WALMetadataImage30.getWalObjects().size()); + S3WALObject s3WalObject01 = brokerS3WALMetadataImage30.getWalObjects().get(0); + assertEquals(brokerId0, s3WalObject01.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, s3WalObject01.getObjectType()); + assertEquals(S3ObjectState.MARK_DESTROYED, s3WalObject01.getS3ObjectState()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage31 = image3.getBrokerStreamsMetadata().get(brokerId1); + assertNotNull(brokerS3WALMetadataImage31); + assertEquals(2, brokerS3WALMetadataImage31.getWalObjects().size()); + S3WALObject s3WalObject11 = brokerS3WALMetadataImage31.getWalObjects().get(0); + assertEquals(brokerId1, s3WalObject11.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, s3WalObject11.getObjectType()); + assertEquals(S3ObjectState.CREATED, s3WalObject11.getS3ObjectState()); + Map streamIndexVerify1 = s3WalObject11.getStreamsIndex(); assertEquals(2, streamIndexVerify1.size()); assertEquals(0L, streamIndexVerify1.get(streamId0).getStartOffset()); assertEquals(100L, streamIndexVerify1.get(streamId0).getEndOffset()); assertEquals(0L, streamIndexVerify1.get(streamId1).getStartOffset()); assertEquals(200L, streamIndexVerify1.get(streamId1).getEndOffset()); - WALObject walObject21 = brokerStreamMetadataImage31.getWalObjects().get(1); - assertEquals(brokerId1, walObject21.getBrokerId()); - assertEquals(S3ObjectType.WAL_LOOSE, walObject21.getObjectType()); - assertEquals(S3ObjectState.CREATED, walObject21.getS3ObjectState()); - Map streamIndexVerify2 = walObject21.getStreamsIndex(); + S3WALObject s3WalObject21 = brokerS3WALMetadataImage31.getWalObjects().get(1); + assertEquals(brokerId1, s3WalObject21.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, s3WalObject21.getObjectType()); + assertEquals(S3ObjectState.CREATED, s3WalObject21.getS3ObjectState()); + Map streamIndexVerify2 = s3WalObject21.getStreamsIndex(); assertEquals(2, streamIndexVerify2.size()); assertEquals(101L, streamIndexVerify2.get(streamId0).getStartOffset()); assertEquals(200L, streamIndexVerify2.get(streamId0).getEndOffset()); @@ -276,9 +276,9 @@ public void testBasicChange() { .setBrokerId(brokerId1) .setObjectType((byte) S3ObjectType.WAL_LOOSE.ordinal()) .setObjectState((byte) S3ObjectState.MARK_DESTROYED.ordinal()); - List streamIndicesInWALObject3 = Arrays.asList( - new ObjectStreamIndex(streamId0, 0L, 200L), - new ObjectStreamIndex(streamId1, 0L, 300L) + List streamIndicesInWALObject3 = Arrays.asList( + new S3ObjectStreamIndex(streamId0, 0L, 200L), + new S3ObjectStreamIndex(streamId1, 0L, 300L) ); WALObjectRecord walObjectRecord3 = new WALObjectRecord() .setObjectId(3L) @@ -288,39 +288,39 @@ public void testBasicChange() { .setObjectState((byte) S3ObjectState.CREATED.ordinal()) .setApplyTimeInMs(System.currentTimeMillis()) .setObjectSize(WAL_MINOR_COMPACT_SIZE) - .setStreamsIndex(streamIndicesInWALObject3.stream().map(ObjectStreamIndex::toRecordStreamIndex).collect( + .setStreamsIndex(streamIndicesInWALObject3.stream().map(S3ObjectStreamIndex::toRecordStreamIndex).collect( Collectors.toList())); records.clear(); records.add(new ApiMessageAndVersion(removeWALObjectRecord0, (short) 0)); records.add(new ApiMessageAndVersion(walObjectRecord12, (short) 0)); records.add(new ApiMessageAndVersion(walObjectRecord22, (short) 0)); records.add(new ApiMessageAndVersion(walObjectRecord3, (short) 0)); - StreamsMetadataDelta delta3 = new StreamsMetadataDelta(image3); + S3StreamsMetadataDelta delta3 = new S3StreamsMetadataDelta(image3); RecordTestUtils.replayAll(delta3, records); - StreamsMetadataImage image4 = delta3.apply(); + S3StreamsMetadataImage image4 = delta3.apply(); // check the image4 assertEquals(2, image4.getBrokerStreamsMetadata().size()); - BrokerStreamMetadataImage brokerStreamMetadataImage40 = image4.getBrokerStreamsMetadata().get(brokerId0); - assertNotNull(brokerStreamMetadataImage40); - assertEquals(0, brokerStreamMetadataImage40.getWalObjects().size()); - BrokerStreamMetadataImage brokerStreamMetadataImage41 = image4.getBrokerStreamsMetadata().get(brokerId1); - assertNotNull(brokerStreamMetadataImage41); - assertEquals(3, brokerStreamMetadataImage41.getWalObjects().size()); - WALObject walObject12 = brokerStreamMetadataImage41.getWalObjects().get(0); - assertEquals(brokerId1, walObject12.getBrokerId()); - assertEquals(S3ObjectType.WAL_LOOSE, walObject12.getObjectType()); - assertEquals(S3ObjectState.MARK_DESTROYED, walObject12.getS3ObjectState()); - WALObject walObject22 = brokerStreamMetadataImage41.getWalObjects().get(1); - assertEquals(brokerId1, walObject22.getBrokerId()); - assertEquals(S3ObjectType.WAL_LOOSE, walObject22.getObjectType()); - assertEquals(S3ObjectState.MARK_DESTROYED, walObject22.getS3ObjectState()); - WALObject walObject3 = brokerStreamMetadataImage41.getWalObjects().get(2); - assertEquals(brokerId1, walObject3.getBrokerId()); - assertEquals(S3ObjectType.WAL_MINOR, walObject3.getObjectType()); - assertEquals(S3ObjectState.CREATED, walObject3.getS3ObjectState()); - assertEquals(3L, walObject3.getObjectId()); - Map streamIndexVerify3 = walObject3.getStreamsIndex(); + BrokerS3WALMetadataImage brokerS3WALMetadataImage40 = image4.getBrokerStreamsMetadata().get(brokerId0); + assertNotNull(brokerS3WALMetadataImage40); + assertEquals(0, brokerS3WALMetadataImage40.getWalObjects().size()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage41 = image4.getBrokerStreamsMetadata().get(brokerId1); + assertNotNull(brokerS3WALMetadataImage41); + assertEquals(3, brokerS3WALMetadataImage41.getWalObjects().size()); + S3WALObject s3WalObject12 = brokerS3WALMetadataImage41.getWalObjects().get(0); + assertEquals(brokerId1, s3WalObject12.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, s3WalObject12.getObjectType()); + assertEquals(S3ObjectState.MARK_DESTROYED, s3WalObject12.getS3ObjectState()); + S3WALObject s3WalObject22 = brokerS3WALMetadataImage41.getWalObjects().get(1); + assertEquals(brokerId1, s3WalObject22.getBrokerId()); + assertEquals(S3ObjectType.WAL_LOOSE, s3WalObject22.getObjectType()); + assertEquals(S3ObjectState.MARK_DESTROYED, s3WalObject22.getS3ObjectState()); + S3WALObject s3WalObject3 = brokerS3WALMetadataImage41.getWalObjects().get(2); + assertEquals(brokerId1, s3WalObject3.getBrokerId()); + assertEquals(S3ObjectType.WAL_MINOR, s3WalObject3.getObjectType()); + assertEquals(S3ObjectState.CREATED, s3WalObject3.getS3ObjectState()); + assertEquals(3L, s3WalObject3.getObjectId()); + Map streamIndexVerify3 = s3WalObject3.getStreamsIndex(); assertEquals(2, streamIndexVerify3.size()); assertEquals(0L, streamIndexVerify3.get(streamId0).getStartOffset()); assertEquals(200L, streamIndexVerify3.get(streamId0).getEndOffset()); @@ -328,24 +328,24 @@ public void testBasicChange() { assertEquals(300L, streamIndexVerify3.get(streamId1).getEndOffset()); // 6. split WALObject3 by streamId to StreamObject4 and StreamObject5 - ObjectStreamIndex objectStreamIndex4 = new ObjectStreamIndex(streamId0, 0L, 200L); - ObjectStreamIndex objectStreamIndex5 = new ObjectStreamIndex(streamId1, 0L, 300L); + S3ObjectStreamIndex s3ObjectStreamIndex4 = new S3ObjectStreamIndex(streamId0, 0L, 200L); + S3ObjectStreamIndex s3ObjectStreamIndex5 = new S3ObjectStreamIndex(streamId1, 0L, 300L); StreamObjectRecord streamObjectRecord4 = new StreamObjectRecord() .setObjectId(4L) .setStreamId(streamId0) .setObjectSize(STREAM_OBJECT_SIZE) .setObjectType((byte) S3ObjectType.STREAM.ordinal()) .setCreateTimeInMs(System.currentTimeMillis()) - .setStartOffset(objectStreamIndex4.getStartOffset()) - .setEndOffset(objectStreamIndex4.getEndOffset()); + .setStartOffset(s3ObjectStreamIndex4.getStartOffset()) + .setEndOffset(s3ObjectStreamIndex4.getEndOffset()); StreamObjectRecord streamObjectRecord5 = new StreamObjectRecord() .setObjectId(5L) .setStreamId(streamId1) .setObjectSize(STREAM_OBJECT_SIZE) .setObjectType((byte) S3ObjectType.STREAM.ordinal()) .setCreateTimeInMs(System.currentTimeMillis()) - .setStartOffset(objectStreamIndex5.getStartOffset()) - .setEndOffset(objectStreamIndex5.getEndOffset()); + .setStartOffset(s3ObjectStreamIndex5.getStartOffset()) + .setEndOffset(s3ObjectStreamIndex5.getEndOffset()); RemoveWALObjectRecord removeWALObjectRecord3 = new RemoveWALObjectRecord() .setObjectId(3L) .setBrokerId(brokerId1); @@ -353,45 +353,45 @@ public void testBasicChange() { records.add(new ApiMessageAndVersion(streamObjectRecord4, (short) 0)); records.add(new ApiMessageAndVersion(streamObjectRecord5, (short) 0)); records.add(new ApiMessageAndVersion(removeWALObjectRecord3, (short) 0)); - StreamsMetadataDelta delta4 = new StreamsMetadataDelta(image4); + S3StreamsMetadataDelta delta4 = new S3StreamsMetadataDelta(image4); RecordTestUtils.replayAll(delta4, records); - StreamsMetadataImage image5 = delta4.apply(); + S3StreamsMetadataImage image5 = delta4.apply(); // check the image5 assertEquals(2, image5.getBrokerStreamsMetadata().size()); - BrokerStreamMetadataImage brokerStreamMetadataImage50 = image5.getBrokerStreamsMetadata().get(brokerId0); - assertNotNull(brokerStreamMetadataImage50); - assertEquals(0, brokerStreamMetadataImage50.getWalObjects().size()); - BrokerStreamMetadataImage brokerStreamMetadataImage51 = image5.getBrokerStreamsMetadata().get(brokerId1); - assertNotNull(brokerStreamMetadataImage51); - assertEquals(0, brokerStreamMetadataImage51.getWalObjects().size()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage50 = image5.getBrokerStreamsMetadata().get(brokerId0); + assertNotNull(brokerS3WALMetadataImage50); + assertEquals(0, brokerS3WALMetadataImage50.getWalObjects().size()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage51 = image5.getBrokerStreamsMetadata().get(brokerId1); + assertNotNull(brokerS3WALMetadataImage51); + assertEquals(0, brokerS3WALMetadataImage51.getWalObjects().size()); assertEquals(2, image5.getStreamsMetadata().size()); - StreamMetadataImage streamMetadataImage50 = image5.getStreamsMetadata().get(streamId0); - assertNotNull(streamMetadataImage50); - assertEquals(1, streamMetadataImage50.getRanges().size()); - assertEquals(1, streamMetadataImage50.getEpoch()); - assertEquals(0, streamMetadataImage50.getStartOffset()); - assertEquals(1, streamMetadataImage50.getStreams()); - StreamObject streamObject4 = streamMetadataImage50.getStreams().get(0); - assertEquals(4L, streamObject4.getObjectId()); - assertEquals(STREAM_OBJECT_SIZE, streamObject4.getObjectSize()); - assertEquals(S3ObjectType.STREAM, streamObject4.getObjectType()); - assertEquals(S3ObjectState.CREATED, streamObject4.getS3ObjectState()); - assertEquals(objectStreamIndex4, streamObject4.getStreamIndex()); - - StreamMetadataImage streamMetadataImage51 = image5.getStreamsMetadata().get(streamId1); - assertNotNull(streamMetadataImage51); - assertEquals(1, streamMetadataImage51.getRanges().size()); - assertEquals(1, streamMetadataImage51.getEpoch()); - assertEquals(0, streamMetadataImage51.getStartOffset()); - assertEquals(1, streamMetadataImage51.getStreams()); - StreamObject streamObject5 = streamMetadataImage51.getStreams().get(0); - assertEquals(5L, streamObject5.getObjectId()); - assertEquals(STREAM_OBJECT_SIZE, streamObject5.getObjectSize()); - assertEquals(S3ObjectType.STREAM, streamObject5.getObjectType()); - assertEquals(S3ObjectState.CREATED, streamObject5.getS3ObjectState()); - assertEquals(objectStreamIndex5, streamObject5.getStreamIndex()); + S3StreamMetadataImage s3StreamMetadataImage50 = image5.getStreamsMetadata().get(streamId0); + assertNotNull(s3StreamMetadataImage50); + assertEquals(1, s3StreamMetadataImage50.getRanges().size()); + assertEquals(1, s3StreamMetadataImage50.getEpoch()); + assertEquals(0, s3StreamMetadataImage50.getStartOffset()); + assertEquals(1, s3StreamMetadataImage50.getStreams()); + S3StreamObject s3StreamObject4 = s3StreamMetadataImage50.getStreams().get(0); + assertEquals(4L, s3StreamObject4.getObjectId()); + assertEquals(STREAM_OBJECT_SIZE, s3StreamObject4.getObjectSize()); + assertEquals(S3ObjectType.STREAM, s3StreamObject4.getObjectType()); + assertEquals(S3ObjectState.CREATED, s3StreamObject4.getS3ObjectState()); + assertEquals(s3ObjectStreamIndex4, s3StreamObject4.getStreamIndex()); + + S3StreamMetadataImage s3StreamMetadataImage51 = image5.getStreamsMetadata().get(streamId1); + assertNotNull(s3StreamMetadataImage51); + assertEquals(1, s3StreamMetadataImage51.getRanges().size()); + assertEquals(1, s3StreamMetadataImage51.getEpoch()); + assertEquals(0, s3StreamMetadataImage51.getStartOffset()); + assertEquals(1, s3StreamMetadataImage51.getStreams()); + S3StreamObject s3StreamObject5 = s3StreamMetadataImage51.getStreams().get(0); + assertEquals(5L, s3StreamObject5.getObjectId()); + assertEquals(STREAM_OBJECT_SIZE, s3StreamObject5.getObjectSize()); + assertEquals(S3ObjectType.STREAM, s3StreamObject5.getObjectType()); + assertEquals(S3ObjectState.CREATED, s3StreamObject5.getS3ObjectState()); + assertEquals(s3ObjectStreamIndex5, s3StreamObject5.getStreamIndex()); // 7. remove streamObject4 and remove stream1 RemoveStreamObjectRecord removeStreamObjectRecord4 = new RemoveStreamObjectRecord() @@ -402,33 +402,33 @@ public void testBasicChange() { records.clear(); records.add(new ApiMessageAndVersion(removeStreamObjectRecord4, (short) 0)); records.add(new ApiMessageAndVersion(removeStreamRecord, (short) 0)); - StreamsMetadataDelta delta5 = new StreamsMetadataDelta(image5); + S3StreamsMetadataDelta delta5 = new S3StreamsMetadataDelta(image5); RecordTestUtils.replayAll(delta5, records); - StreamsMetadataImage image6 = delta5.apply(); + S3StreamsMetadataImage image6 = delta5.apply(); // check the image6 assertEquals(2, image6.getBrokerStreamsMetadata().size()); - BrokerStreamMetadataImage brokerStreamMetadataImage60 = image6.getBrokerStreamsMetadata().get(brokerId0); - assertNotNull(brokerStreamMetadataImage60); - assertEquals(0, brokerStreamMetadataImage60.getWalObjects().size()); - BrokerStreamMetadataImage brokerStreamMetadataImage61 = image6.getBrokerStreamsMetadata().get(brokerId1); - assertNotNull(brokerStreamMetadataImage61); - assertEquals(0, brokerStreamMetadataImage61.getWalObjects().size()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage60 = image6.getBrokerStreamsMetadata().get(brokerId0); + assertNotNull(brokerS3WALMetadataImage60); + assertEquals(0, brokerS3WALMetadataImage60.getWalObjects().size()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage61 = image6.getBrokerStreamsMetadata().get(brokerId1); + assertNotNull(brokerS3WALMetadataImage61); + assertEquals(0, brokerS3WALMetadataImage61.getWalObjects().size()); assertEquals(1, image6.getStreamsMetadata().size()); - StreamMetadataImage streamMetadataImage60 = image6.getStreamsMetadata().get(streamId0); - assertNotNull(streamMetadataImage60); - assertEquals(1, streamMetadataImage60.getRanges().size()); - assertEquals(0, streamMetadataImage60.getStreams().size()); + S3StreamMetadataImage s3StreamMetadataImage60 = image6.getStreamsMetadata().get(streamId0); + assertNotNull(s3StreamMetadataImage60); + assertEquals(1, s3StreamMetadataImage60.getRanges().size()); + assertEquals(0, s3StreamMetadataImage60.getStreams().size()); } - private void testToImageAndBack(StreamsMetadataImage image) { + private void testToImageAndBack(S3StreamsMetadataImage image) { RecordListWriter writer = new RecordListWriter(); image.write(writer, new ImageWriterOptions.Builder().build()); - StreamsMetadataDelta delta = new StreamsMetadataDelta(StreamsMetadataImage.EMPTY); + S3StreamsMetadataDelta delta = new S3StreamsMetadataDelta(S3StreamsMetadataImage.EMPTY); RecordTestUtils.replayAll(delta, writer.records()); - StreamsMetadataImage newImage = delta.apply(); + S3StreamsMetadataImage newImage = delta.apply(); assertEquals(image, newImage); } } From 3712dde01bf4e92844027a0c7b67a7c23b060cf3 Mon Sep 17 00:00:00 2001 From: TheR1sing3un Date: Mon, 21 Aug 2023 11:14:56 +0800 Subject: [PATCH 08/10] fix(s3): fix name changing related compile errors 1. fix name changing related compile errors Signed-off-by: TheR1sing3un --- .../org/apache/kafka/image/MetadataDelta.java | 32 ++++++------ .../kafka/image/S3StreamMetadataDelta.java | 10 ++-- .../kafka/image/S3StreamMetadataImage.java | 4 +- .../kafka/image/S3StreamsMetadataDelta.java | 23 +++++---- .../kafka/image/S3StreamsMetadataImage.java | 18 +++---- .../kafka/metadata/stream/S3StreamObject.java | 6 +-- .../image/S3StreamsMetadataImageTest.java | 50 +++++++++---------- 7 files changed, 72 insertions(+), 71 deletions(-) diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java index 24b824012d..4a41b0bf6a 100644 --- a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java @@ -31,12 +31,12 @@ import org.apache.kafka.common.metadata.RegisterBrokerRecord; import org.apache.kafka.common.metadata.RemoveAccessControlEntryRecord; import org.apache.kafka.common.metadata.RemoveRangeRecord; -import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; -import org.apache.kafka.common.metadata.RemoveStreamRecord; +import org.apache.kafka.common.metadata.RemoveS3StreamObjectRecord; +import org.apache.kafka.common.metadata.RemoveS3StreamRecord; import org.apache.kafka.common.metadata.RemoveTopicRecord; import org.apache.kafka.common.metadata.RemoveWALObjectRecord; -import org.apache.kafka.common.metadata.StreamObjectRecord; -import org.apache.kafka.common.metadata.StreamRecord; +import org.apache.kafka.common.metadata.S3StreamObjectRecord; +import org.apache.kafka.common.metadata.S3StreamRecord; import org.apache.kafka.common.metadata.TopicRecord; import org.apache.kafka.common.metadata.UnfenceBrokerRecord; import org.apache.kafka.common.metadata.UnregisterBrokerRecord; @@ -230,11 +230,11 @@ public void replay(ApiMessage record) { case ZK_MIGRATION_STATE_RECORD: // TODO handle this break; - case STREAM_RECORD: - replay((StreamRecord) record); + case S3_STREAM_RECORD: + replay((S3StreamRecord) record); break; - case REMOVE_STREAM_RECORD: - replay((RemoveStreamRecord) record); + case REMOVE_S3_STREAM_RECORD: + replay((RemoveS3StreamRecord) record); break; case RANGE_RECORD: replay((RangeRecord) record); @@ -242,11 +242,11 @@ public void replay(ApiMessage record) { case REMOVE_RANGE_RECORD: replay((RemoveRangeRecord) record); break; - case STREAM_OBJECT_RECORD: - replay((StreamObjectRecord) record); + case S3_STREAM_OBJECT_RECORD: + replay((S3StreamObjectRecord) record); break; - case REMOVE_STREAM_OBJECT_RECORD: - replay((RemoveStreamObjectRecord) record); + case REMOVE_S3_STREAM_OBJECT_RECORD: + replay((RemoveS3StreamObjectRecord) record); break; case WALOBJECT_RECORD: replay((WALObjectRecord) record); @@ -329,11 +329,11 @@ public void replay(RemoveAccessControlEntryRecord record) { getOrCreateAclsDelta().replay(record); } - public void replay(StreamRecord record) { + public void replay(S3StreamRecord record) { getOrCreateStreamsMetadataDelta().replay(record); } - public void replay(RemoveStreamRecord record) { + public void replay(RemoveS3StreamRecord record) { getOrCreateStreamsMetadataDelta().replay(record); } @@ -345,11 +345,11 @@ public void replay(RemoveRangeRecord record) { getOrCreateStreamsMetadataDelta().replay(record); } - public void replay(StreamObjectRecord record) { + public void replay(S3StreamObjectRecord record) { getOrCreateStreamsMetadataDelta().replay(record); } - public void replay(RemoveStreamObjectRecord record) { + public void replay(RemoveS3StreamObjectRecord record) { getOrCreateStreamsMetadataDelta().replay(record); } diff --git a/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java index aa5fde9382..9bf59e53f7 100644 --- a/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java @@ -25,15 +25,15 @@ import java.util.Set; import org.apache.kafka.common.metadata.RangeRecord; import org.apache.kafka.common.metadata.RemoveRangeRecord; -import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; -import org.apache.kafka.common.metadata.StreamObjectRecord; +import org.apache.kafka.common.metadata.RemoveS3StreamObjectRecord; +import org.apache.kafka.common.metadata.S3StreamObjectRecord; import org.apache.kafka.metadata.stream.RangeMetadata; import org.apache.kafka.metadata.stream.S3StreamObject; public class S3StreamMetadataDelta { private final S3StreamMetadataImage image; - private Integer newEpoch; + private Long newEpoch; private final Map changedRanges = new HashMap<>(); private final Set removedRanges = new HashSet<>(); @@ -53,11 +53,11 @@ public void replay(RemoveRangeRecord record) { removedRanges.add(record.rangeIndex()); } - public void replay(StreamObjectRecord record) { + public void replay(S3StreamObjectRecord record) { changedS3StreamObjects.add(S3StreamObject.of(record)); } - public void replay(RemoveStreamObjectRecord record) { + public void replay(RemoveS3StreamObjectRecord record) { removedS3StreamObjects.add(new S3StreamObject(record.objectId())); } diff --git a/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java index b11c8c98a8..2a95eaa934 100644 --- a/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java @@ -20,7 +20,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import org.apache.kafka.common.metadata.StreamRecord; +import org.apache.kafka.common.metadata.S3StreamRecord; import org.apache.kafka.metadata.stream.RangeMetadata; import org.apache.kafka.metadata.stream.S3StreamObject; import org.apache.kafka.image.writer.ImageWriter; @@ -52,7 +52,7 @@ public S3StreamMetadataImage( } public void write(ImageWriter writer, ImageWriterOptions options) { - writer.write(0, new StreamRecord() + writer.write(0, new S3StreamRecord() .setStreamId(streamId) .setEpoch(epoch) .setStartOffset(startOffset)); diff --git a/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java index 18d77f090d..56ab889daf 100644 --- a/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java @@ -24,11 +24,12 @@ import java.util.Set; import org.apache.kafka.common.metadata.RangeRecord; import org.apache.kafka.common.metadata.RemoveRangeRecord; -import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; -import org.apache.kafka.common.metadata.RemoveStreamRecord; +import org.apache.kafka.common.metadata.RemoveS3StreamObjectRecord; +import org.apache.kafka.common.metadata.RemoveS3StreamRecord; import org.apache.kafka.common.metadata.RemoveWALObjectRecord; -import org.apache.kafka.common.metadata.StreamObjectRecord; -import org.apache.kafka.common.metadata.StreamRecord; +import org.apache.kafka.common.metadata.S3StreamRecord; +import org.apache.kafka.common.metadata.S3StreamObjectRecord; +import org.apache.kafka.common.metadata.S3StreamRecord; import org.apache.kafka.common.metadata.WALObjectRecord; public final class S3StreamsMetadataDelta { @@ -50,7 +51,7 @@ public S3StreamsMetadataDelta(S3StreamsMetadataImage image) { this.image = image; } - public void replay(StreamRecord record) { + public void replay(S3StreamRecord record) { S3StreamMetadataDelta delta; if (!image.getStreamsMetadata().containsKey(record.streamId())) { // create a new StreamMetadata with empty ranges and streams if not exist @@ -67,7 +68,7 @@ public void replay(StreamRecord record) { changedStreams.put(record.streamId(), delta); } - public void replay(RemoveStreamRecord record) { + public void replay(RemoveS3StreamRecord record) { // add the streamId to the deletedStreams deletedStreams.add(record.streamId()); } @@ -80,11 +81,11 @@ public void replay(RemoveRangeRecord record) { getOrCreateStreamMetadataDelta(record.streamId()).replay(record); } - public void replay(StreamObjectRecord record) { + public void replay(S3StreamObjectRecord record) { getOrCreateStreamMetadataDelta(record.streamId()).replay(record); } - public void replay(RemoveStreamObjectRecord record) { + public void replay(RemoveS3StreamObjectRecord record) { getOrCreateStreamMetadataDelta(record.streamId()).replay(record); } @@ -109,7 +110,7 @@ private BrokerS3WALMetadataDelta getOrCreateBrokerStreamMetadataDelta(Integer br BrokerS3WALMetadataDelta delta = changedBrokers.get(brokerId); if (delta == null) { delta = new BrokerS3WALMetadataDelta( - image.getBrokerStreamsMetadata(). + image.getBrokerWALMetadata(). getOrDefault(brokerId, new BrokerS3WALMetadataImage(brokerId, Collections.emptyList()))); changedBrokers.put(brokerId, delta); } @@ -118,7 +119,7 @@ private BrokerS3WALMetadataDelta getOrCreateBrokerStreamMetadataDelta(Integer br S3StreamsMetadataImage apply() { Map newStreams = new HashMap<>(image.getStreamsMetadata().size()); - Map newBrokerStreams = new HashMap<>(image.getBrokerStreamsMetadata().size()); + Map newBrokerStreams = new HashMap<>(image.getBrokerWALMetadata().size()); // apply the delta changes of old streams since the last image image.getStreamsMetadata().forEach((streamId, streamMetadataImage) -> { S3StreamMetadataDelta delta = changedStreams.get(streamId); @@ -141,7 +142,7 @@ S3StreamsMetadataImage apply() { }); // apply the delta changes of old brokers since the last image - image.getBrokerStreamsMetadata().forEach((brokerId, brokerStreamMetadataImage) -> { + image.getBrokerWALMetadata().forEach((brokerId, brokerStreamMetadataImage) -> { BrokerS3WALMetadataDelta delta = changedBrokers.get(brokerId); if (delta == null) { // no change, check if deleted diff --git a/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataImage.java index 7a8291e4e3..727f4ee3e5 100644 --- a/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataImage.java @@ -30,23 +30,23 @@ public final class S3StreamsMetadataImage { private final Map streamsMetadata; - private final Map brokerStreamsMetadata; + private final Map brokerWALMetadata; public S3StreamsMetadataImage( Map streamsMetadata, - Map brokerStreamsMetadata) { + Map brokerWALMetadata) { this.streamsMetadata = streamsMetadata; - this.brokerStreamsMetadata = brokerStreamsMetadata; + this.brokerWALMetadata = brokerWALMetadata; } boolean isEmpty() { - return this.brokerStreamsMetadata.isEmpty() && this.streamsMetadata.isEmpty(); + return this.brokerWALMetadata.isEmpty() && this.streamsMetadata.isEmpty(); } public void write(ImageWriter writer, ImageWriterOptions options) { streamsMetadata.values().forEach(image -> image.write(writer, options)); - brokerStreamsMetadata.values().forEach(image -> image.write(writer, options)); + brokerWALMetadata.values().forEach(image -> image.write(writer, options)); } @Override @@ -54,16 +54,16 @@ public boolean equals(Object obj) { if (!(obj instanceof S3StreamsMetadataImage)) return false; S3StreamsMetadataImage other = (S3StreamsMetadataImage) obj; return this.streamsMetadata.equals(other.streamsMetadata) - && this.brokerStreamsMetadata.equals(other.brokerStreamsMetadata); + && this.brokerWALMetadata.equals(other.brokerWALMetadata); } @Override public int hashCode() { - return Objects.hash(streamsMetadata, brokerStreamsMetadata); + return Objects.hash(streamsMetadata, brokerWALMetadata); } - public Map getBrokerStreamsMetadata() { - return brokerStreamsMetadata; + public Map getBrokerWALMetadata() { + return brokerWALMetadata; } public Map getStreamsMetadata() { diff --git a/metadata/src/main/java/org/apache/kafka/metadata/stream/S3StreamObject.java b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3StreamObject.java index 74165874a2..6f93c6536b 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/stream/S3StreamObject.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/stream/S3StreamObject.java @@ -18,7 +18,7 @@ package org.apache.kafka.metadata.stream; import java.util.Optional; -import org.apache.kafka.common.metadata.StreamObjectRecord; +import org.apache.kafka.common.metadata.S3StreamObjectRecord; import org.apache.kafka.server.common.ApiMessageAndVersion; public class S3StreamObject extends S3Object { @@ -69,7 +69,7 @@ public S3ObjectStreamIndex getStreamIndex() { } public ApiMessageAndVersion toRecord() { - return new ApiMessageAndVersion(new StreamObjectRecord() + return new ApiMessageAndVersion(new S3StreamObjectRecord() .setObjectId(objectId) .setStreamId(streamIndex.getStreamId()) .setObjectState((byte) s3ObjectState.ordinal()) @@ -82,7 +82,7 @@ public ApiMessageAndVersion toRecord() { .setEndOffset(streamIndex.getEndOffset()), (short) 0); } - public static S3StreamObject of(StreamObjectRecord record) { + public static S3StreamObject of(S3StreamObjectRecord record) { S3StreamObject s3StreamObject = new S3StreamObject(record.objectId()); s3StreamObject.objectType = S3ObjectType.fromByte(record.objectType()); s3StreamObject.s3ObjectState = S3ObjectState.fromByte(record.objectState()); diff --git a/metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java b/metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java index 9e3279e8ab..92c9ff43fd 100644 --- a/metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java @@ -26,11 +26,11 @@ import java.util.Map; import java.util.stream.Collectors; import org.apache.kafka.common.metadata.RangeRecord; -import org.apache.kafka.common.metadata.RemoveStreamObjectRecord; -import org.apache.kafka.common.metadata.RemoveStreamRecord; +import org.apache.kafka.common.metadata.RemoveS3StreamObjectRecord; +import org.apache.kafka.common.metadata.RemoveS3StreamRecord; import org.apache.kafka.common.metadata.RemoveWALObjectRecord; -import org.apache.kafka.common.metadata.StreamObjectRecord; -import org.apache.kafka.common.metadata.StreamRecord; +import org.apache.kafka.common.metadata.S3StreamObjectRecord; +import org.apache.kafka.common.metadata.S3StreamRecord; import org.apache.kafka.common.metadata.WALObjectRecord; import org.apache.kafka.metadata.stream.RangeMetadata; import org.apache.kafka.metadata.stream.S3ObjectStreamIndex; @@ -89,7 +89,7 @@ public void testBasicChange() { Long streamId0 = 0L; Long streamId1 = 1L; List records = new ArrayList<>(); - StreamRecord streamRecord00 = new StreamRecord() + S3StreamRecord streamRecord00 = new S3StreamRecord() .setStreamId(streamId0) .setEpoch(1) .setStartOffset(0L); @@ -101,7 +101,7 @@ public void testBasicChange() { .setBrokerId(brokerId1) .setEpoch(1); records.add(new ApiMessageAndVersion(rangeRecord00, (short) 0)); - StreamRecord streamRecord01 = new StreamRecord() + S3StreamRecord streamRecord01 = new S3StreamRecord() .setStreamId(streamId1) .setEpoch(1) .setStartOffset(0L); @@ -165,8 +165,8 @@ public void testBasicChange() { S3StreamsMetadataImage image2 = delta1.apply(); // check the image2 - assertEquals(2, image2.getBrokerStreamsMetadata().size()); - BrokerS3WALMetadataImage brokerS3WALMetadataImage20 = image2.getBrokerStreamsMetadata().get(brokerId0); + assertEquals(2, image2.getBrokerWALMetadata().size()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage20 = image2.getBrokerWALMetadata().get(brokerId0); assertNotNull(brokerS3WALMetadataImage20); assertEquals(1, brokerS3WALMetadataImage20.getWalObjects().size()); S3WALObject s3WalObject0 = brokerS3WALMetadataImage20.getWalObjects().get(0); @@ -174,7 +174,7 @@ public void testBasicChange() { assertEquals(S3ObjectType.WAL_LOOSE, s3WalObject0.getObjectType()); assertEquals(S3ObjectState.APPLIED, s3WalObject0.getS3ObjectState()); assertEquals(0L, s3WalObject0.getObjectId()); - BrokerS3WALMetadataImage brokerS3WALMetadataImage21 = image2.getBrokerStreamsMetadata().get(brokerId1); + BrokerS3WALMetadataImage brokerS3WALMetadataImage21 = image2.getBrokerWALMetadata().get(brokerId1); assertNotNull(brokerS3WALMetadataImage21); assertEquals(2, brokerS3WALMetadataImage21.getWalObjects().size()); S3WALObject s3WalObject1 = brokerS3WALMetadataImage21.getWalObjects().get(0); @@ -230,15 +230,15 @@ public void testBasicChange() { S3StreamsMetadataImage image3 = delta2.apply(); // check the image3 - assertEquals(2, image3.getBrokerStreamsMetadata().size()); - BrokerS3WALMetadataImage brokerS3WALMetadataImage30 = image3.getBrokerStreamsMetadata().get(brokerId0); + assertEquals(2, image3.getBrokerWALMetadata().size()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage30 = image3.getBrokerWALMetadata().get(brokerId0); assertNotNull(brokerS3WALMetadataImage30); assertEquals(1, brokerS3WALMetadataImage30.getWalObjects().size()); S3WALObject s3WalObject01 = brokerS3WALMetadataImage30.getWalObjects().get(0); assertEquals(brokerId0, s3WalObject01.getBrokerId()); assertEquals(S3ObjectType.WAL_LOOSE, s3WalObject01.getObjectType()); assertEquals(S3ObjectState.MARK_DESTROYED, s3WalObject01.getS3ObjectState()); - BrokerS3WALMetadataImage brokerS3WALMetadataImage31 = image3.getBrokerStreamsMetadata().get(brokerId1); + BrokerS3WALMetadataImage brokerS3WALMetadataImage31 = image3.getBrokerWALMetadata().get(brokerId1); assertNotNull(brokerS3WALMetadataImage31); assertEquals(2, brokerS3WALMetadataImage31.getWalObjects().size()); S3WALObject s3WalObject11 = brokerS3WALMetadataImage31.getWalObjects().get(0); @@ -300,11 +300,11 @@ public void testBasicChange() { S3StreamsMetadataImage image4 = delta3.apply(); // check the image4 - assertEquals(2, image4.getBrokerStreamsMetadata().size()); - BrokerS3WALMetadataImage brokerS3WALMetadataImage40 = image4.getBrokerStreamsMetadata().get(brokerId0); + assertEquals(2, image4.getBrokerWALMetadata().size()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage40 = image4.getBrokerWALMetadata().get(brokerId0); assertNotNull(brokerS3WALMetadataImage40); assertEquals(0, brokerS3WALMetadataImage40.getWalObjects().size()); - BrokerS3WALMetadataImage brokerS3WALMetadataImage41 = image4.getBrokerStreamsMetadata().get(brokerId1); + BrokerS3WALMetadataImage brokerS3WALMetadataImage41 = image4.getBrokerWALMetadata().get(brokerId1); assertNotNull(brokerS3WALMetadataImage41); assertEquals(3, brokerS3WALMetadataImage41.getWalObjects().size()); S3WALObject s3WalObject12 = brokerS3WALMetadataImage41.getWalObjects().get(0); @@ -330,7 +330,7 @@ public void testBasicChange() { // 6. split WALObject3 by streamId to StreamObject4 and StreamObject5 S3ObjectStreamIndex s3ObjectStreamIndex4 = new S3ObjectStreamIndex(streamId0, 0L, 200L); S3ObjectStreamIndex s3ObjectStreamIndex5 = new S3ObjectStreamIndex(streamId1, 0L, 300L); - StreamObjectRecord streamObjectRecord4 = new StreamObjectRecord() + S3StreamObjectRecord streamObjectRecord4 = new S3StreamObjectRecord() .setObjectId(4L) .setStreamId(streamId0) .setObjectSize(STREAM_OBJECT_SIZE) @@ -338,7 +338,7 @@ public void testBasicChange() { .setCreateTimeInMs(System.currentTimeMillis()) .setStartOffset(s3ObjectStreamIndex4.getStartOffset()) .setEndOffset(s3ObjectStreamIndex4.getEndOffset()); - StreamObjectRecord streamObjectRecord5 = new StreamObjectRecord() + S3StreamObjectRecord streamObjectRecord5 = new S3StreamObjectRecord() .setObjectId(5L) .setStreamId(streamId1) .setObjectSize(STREAM_OBJECT_SIZE) @@ -358,11 +358,11 @@ public void testBasicChange() { S3StreamsMetadataImage image5 = delta4.apply(); // check the image5 - assertEquals(2, image5.getBrokerStreamsMetadata().size()); - BrokerS3WALMetadataImage brokerS3WALMetadataImage50 = image5.getBrokerStreamsMetadata().get(brokerId0); + assertEquals(2, image5.getBrokerWALMetadata().size()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage50 = image5.getBrokerWALMetadata().get(brokerId0); assertNotNull(brokerS3WALMetadataImage50); assertEquals(0, brokerS3WALMetadataImage50.getWalObjects().size()); - BrokerS3WALMetadataImage brokerS3WALMetadataImage51 = image5.getBrokerStreamsMetadata().get(brokerId1); + BrokerS3WALMetadataImage brokerS3WALMetadataImage51 = image5.getBrokerWALMetadata().get(brokerId1); assertNotNull(brokerS3WALMetadataImage51); assertEquals(0, brokerS3WALMetadataImage51.getWalObjects().size()); assertEquals(2, image5.getStreamsMetadata().size()); @@ -394,10 +394,10 @@ public void testBasicChange() { assertEquals(s3ObjectStreamIndex5, s3StreamObject5.getStreamIndex()); // 7. remove streamObject4 and remove stream1 - RemoveStreamObjectRecord removeStreamObjectRecord4 = new RemoveStreamObjectRecord() + RemoveS3StreamObjectRecord removeStreamObjectRecord4 = new RemoveS3StreamObjectRecord() .setObjectId(4L) .setStreamId(streamId0); - RemoveStreamRecord removeStreamRecord = new RemoveStreamRecord() + RemoveS3StreamRecord removeStreamRecord = new RemoveS3StreamRecord() .setStreamId(streamId1); records.clear(); records.add(new ApiMessageAndVersion(removeStreamObjectRecord4, (short) 0)); @@ -407,11 +407,11 @@ public void testBasicChange() { S3StreamsMetadataImage image6 = delta5.apply(); // check the image6 - assertEquals(2, image6.getBrokerStreamsMetadata().size()); - BrokerS3WALMetadataImage brokerS3WALMetadataImage60 = image6.getBrokerStreamsMetadata().get(brokerId0); + assertEquals(2, image6.getBrokerWALMetadata().size()); + BrokerS3WALMetadataImage brokerS3WALMetadataImage60 = image6.getBrokerWALMetadata().get(brokerId0); assertNotNull(brokerS3WALMetadataImage60); assertEquals(0, brokerS3WALMetadataImage60.getWalObjects().size()); - BrokerS3WALMetadataImage brokerS3WALMetadataImage61 = image6.getBrokerStreamsMetadata().get(brokerId1); + BrokerS3WALMetadataImage brokerS3WALMetadataImage61 = image6.getBrokerWALMetadata().get(brokerId1); assertNotNull(brokerS3WALMetadataImage61); assertEquals(0, brokerS3WALMetadataImage61.getWalObjects().size()); From e952e4b6bb3a6cfad219b68d34c758411ff4bae5 Mon Sep 17 00:00:00 2001 From: TheR1sing3un Date: Mon, 21 Aug 2023 11:33:55 +0800 Subject: [PATCH 09/10] refactor(s3): add comment to identify Kafka on S3 1. add comment to identify Kafka on S3 Signed-off-by: TheR1sing3un --- .../main/java/org/apache/kafka/common/protocol/ApiKeys.java | 4 ++-- .../src/main/java/org/apache/kafka/message/EntityType.java | 3 ++- .../src/main/java/org/apache/kafka/image/MetadataDelta.java | 2 ++ .../src/main/java/org/apache/kafka/image/MetadataImage.java | 4 ++++ 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index 8672e2e785..477a15e1bc 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -110,13 +110,13 @@ public enum ApiKeys { LIST_TRANSACTIONS(ApiMessageType.LIST_TRANSACTIONS), ALLOCATE_PRODUCER_IDS(ApiMessageType.ALLOCATE_PRODUCER_IDS, true, true), - // stream start + // Kafka on S3 inject start CREATE_STREAM(ApiMessageType.CREATE_STREAM, false, true), DELETE_STREAM(ApiMessageType.DELETE_STREAM, false, true), OPEN_STREAM(ApiMessageType.OPEN_STREAM, false, true), CLOSE_STREAM(ApiMessageType.CLOSE_STREAM, false, true); - // stream end + // Kafka on S3 inject end private static final Map> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); diff --git a/generator/src/main/java/org/apache/kafka/message/EntityType.java b/generator/src/main/java/org/apache/kafka/message/EntityType.java index d2123a6874..be756481f1 100644 --- a/generator/src/main/java/org/apache/kafka/message/EntityType.java +++ b/generator/src/main/java/org/apache/kafka/message/EntityType.java @@ -39,9 +39,10 @@ public enum EntityType { @JsonProperty("brokerId") BROKER_ID(FieldType.Int32FieldType.INSTANCE), + // Kafka on S3 inject start @JsonProperty("streamId") STREAM_ID(Int64FieldType.INSTANCE); - + // Kafka on S3 inject end private final FieldType baseType; EntityType(FieldType baseType) { diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java index 4a41b0bf6a..5fc776d9ed 100644 --- a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java @@ -230,6 +230,7 @@ public void replay(ApiMessage record) { case ZK_MIGRATION_STATE_RECORD: // TODO handle this break; + // Kafka on S3 inject start case S3_STREAM_RECORD: replay((S3StreamRecord) record); break; @@ -254,6 +255,7 @@ public void replay(ApiMessage record) { case REMOVE_WALOBJECT_RECORD: replay((RemoveWALObjectRecord) record); break; + // Kafka on S3 inject end default: throw new RuntimeException("Unknown metadata record type " + type); } diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java index c7346a3d38..0df8b32f5d 100644 --- a/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java @@ -57,8 +57,12 @@ public final class MetadataImage { private final AclsImage acls; + // Kafka on S3 inject start + private final S3StreamsMetadataImage streamMetadata; + // Kafka on S3 inject end + public MetadataImage( MetadataProvenance provenance, FeaturesImage features, From 52739d3921d17ad1ce5f74cc3fa6cf2263333dc9 Mon Sep 17 00:00:00 2001 From: TheR1sing3un Date: Mon, 21 Aug 2023 11:37:38 +0800 Subject: [PATCH 10/10] refactor(s3): rename streams with streamObjects 1. rename streams with streamObjects Signed-off-by: TheR1sing3un --- .../controller/stream/StreamControlManager.java | 14 +++++++------- .../kafka/image/S3StreamMetadataDelta.java | 2 +- .../kafka/image/S3StreamMetadataImage.java | 16 ++++++++-------- .../kafka/image/S3StreamsMetadataDelta.java | 3 +-- .../kafka/image/S3StreamsMetadataImageTest.java | 10 +++++----- 5 files changed, 22 insertions(+), 23 deletions(-) diff --git a/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java index 13d19a7b57..9d586e8bcb 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/stream/StreamControlManager.java @@ -31,17 +31,17 @@ */ public class StreamControlManager { - static class StreamMetadata { + static class S3StreamMetadata { private Long streamId; - private Integer epoch; + private Long epoch; private Long startOffset; private TimelineHashSet ranges; - private TimelineHashSet s3StreamObjects; + private TimelineHashSet streamObjects; } - static class BrokerStreamMetadata { + static class BrokerS3WALMetadata { private Integer brokerId; - private TimelineHashSet s3WalObjects; + private TimelineHashSet walObjects; } private final SnapshotRegistry snapshotRegistry; @@ -50,9 +50,9 @@ static class BrokerStreamMetadata { private final S3ObjectControlManager s3ObjectControlManager; - private final TimelineHashMap streamsMetadata; + private final TimelineHashMap streamsMetadata; - private final TimelineHashMap brokersMetadata; + private final TimelineHashMap brokersMetadata; public StreamControlManager( SnapshotRegistry snapshotRegistry, diff --git a/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java index 9bf59e53f7..e7867e4bb9 100644 --- a/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataDelta.java @@ -80,7 +80,7 @@ public S3StreamMetadataImage apply() { changedRanges.entrySet().stream().filter(entry -> !newRanges.containsKey(entry.getKey())) .forEach(entry -> newRanges.put(entry.getKey(), entry.getValue())); - List newS3StreamObjects = new ArrayList<>(image.getStreams()); + List newS3StreamObjects = new ArrayList<>(image.getStreamObjects()); // remove all removed stream-objects newS3StreamObjects.removeAll(removedS3StreamObjects); // add all changed stream-objects diff --git a/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java index 2a95eaa934..af30fad0c2 100644 --- a/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamMetadataImage.java @@ -36,19 +36,19 @@ public class S3StreamMetadataImage { private final Map ranges; - private final List streams; + private final List streamObjects; public S3StreamMetadataImage( Long streamId, Long epoch, Long startOffset, Map ranges, - List streams) { + List streamObjects) { this.streamId = streamId; this.epoch = epoch; this.startOffset = startOffset; this.ranges = ranges; - this.streams = streams; + this.streamObjects = streamObjects; } public void write(ImageWriter writer, ImageWriterOptions options) { @@ -57,15 +57,15 @@ public void write(ImageWriter writer, ImageWriterOptions options) { .setEpoch(epoch) .setStartOffset(startOffset)); ranges.values().forEach(rangeMetadata -> writer.write(rangeMetadata.toRecord())); - streams.forEach(streamObject -> writer.write(streamObject.toRecord())); + streamObjects.forEach(streamObject -> writer.write(streamObject.toRecord())); } public Map getRanges() { return ranges; } - public List getStreams() { - return streams; + public List getStreamObjects() { + return streamObjects; } public Long getEpoch() { @@ -90,11 +90,11 @@ public boolean equals(Object o) { } S3StreamMetadataImage that = (S3StreamMetadataImage) o; return Objects.equals(streamId, that.streamId) && Objects.equals(epoch, that.epoch) && Objects.equals(startOffset, - that.startOffset) && Objects.equals(ranges, that.ranges) && Objects.equals(streams, that.streams); + that.startOffset) && Objects.equals(ranges, that.ranges) && Objects.equals(streamObjects, that.streamObjects); } @Override public int hashCode() { - return Objects.hash(streamId, epoch, startOffset, ranges, streams); + return Objects.hash(streamId, epoch, startOffset, ranges, streamObjects); } } diff --git a/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java index 56ab889daf..f4ac0f58d6 100644 --- a/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/S3StreamsMetadataDelta.java @@ -29,7 +29,6 @@ import org.apache.kafka.common.metadata.RemoveWALObjectRecord; import org.apache.kafka.common.metadata.S3StreamRecord; import org.apache.kafka.common.metadata.S3StreamObjectRecord; -import org.apache.kafka.common.metadata.S3StreamRecord; import org.apache.kafka.common.metadata.WALObjectRecord; public final class S3StreamsMetadataDelta { @@ -62,7 +61,7 @@ public void replay(S3StreamRecord record) { S3StreamMetadataImage s3StreamMetadataImage = image.getStreamsMetadata().get(record.streamId()); delta = new S3StreamMetadataDelta( new S3StreamMetadataImage(record.streamId(), record.epoch(), record.startOffset(), s3StreamMetadataImage.getRanges(), - s3StreamMetadataImage.getStreams())); + s3StreamMetadataImage.getStreamObjects())); } // add the delta to the changedStreams changedStreams.put(record.streamId(), delta); diff --git a/metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java b/metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java index 92c9ff43fd..c79b390483 100644 --- a/metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/S3StreamsMetadataImageTest.java @@ -372,8 +372,8 @@ public void testBasicChange() { assertEquals(1, s3StreamMetadataImage50.getRanges().size()); assertEquals(1, s3StreamMetadataImage50.getEpoch()); assertEquals(0, s3StreamMetadataImage50.getStartOffset()); - assertEquals(1, s3StreamMetadataImage50.getStreams()); - S3StreamObject s3StreamObject4 = s3StreamMetadataImage50.getStreams().get(0); + assertEquals(1, s3StreamMetadataImage50.getStreamObjects()); + S3StreamObject s3StreamObject4 = s3StreamMetadataImage50.getStreamObjects().get(0); assertEquals(4L, s3StreamObject4.getObjectId()); assertEquals(STREAM_OBJECT_SIZE, s3StreamObject4.getObjectSize()); assertEquals(S3ObjectType.STREAM, s3StreamObject4.getObjectType()); @@ -385,8 +385,8 @@ public void testBasicChange() { assertEquals(1, s3StreamMetadataImage51.getRanges().size()); assertEquals(1, s3StreamMetadataImage51.getEpoch()); assertEquals(0, s3StreamMetadataImage51.getStartOffset()); - assertEquals(1, s3StreamMetadataImage51.getStreams()); - S3StreamObject s3StreamObject5 = s3StreamMetadataImage51.getStreams().get(0); + assertEquals(1, s3StreamMetadataImage51.getStreamObjects()); + S3StreamObject s3StreamObject5 = s3StreamMetadataImage51.getStreamObjects().get(0); assertEquals(5L, s3StreamObject5.getObjectId()); assertEquals(STREAM_OBJECT_SIZE, s3StreamObject5.getObjectSize()); assertEquals(S3ObjectType.STREAM, s3StreamObject5.getObjectType()); @@ -419,7 +419,7 @@ public void testBasicChange() { S3StreamMetadataImage s3StreamMetadataImage60 = image6.getStreamsMetadata().get(streamId0); assertNotNull(s3StreamMetadataImage60); assertEquals(1, s3StreamMetadataImage60.getRanges().size()); - assertEquals(0, s3StreamMetadataImage60.getStreams().size()); + assertEquals(0, s3StreamMetadataImage60.getStreamObjects().size()); }