Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
4ad03c8
HBASE-29082: Support for custom meta table name suffix (#6632)
kabhishek4 Mar 11, 2025
2ed18d7
HBASE-29083: Add global read-only mode to HBase (#6757)
sharmaar12 Apr 22, 2025
a8beb21
HBASE-29236: Add Support for Dynamic Configuration at the Coprocessor…
kgeisz May 29, 2025
0a7b45c
HBASE-29228 Add support to prevent running multiple active clusters (…
kabhishek4 Jun 3, 2025
8e4f705
HBASE-29291: Add a command to refresh/sync hbase:meta table (#7058)
Kota-SH Sep 11, 2025
56691db
HBASE-29328: Implement new HBase command: refresh_hfiles (#7149)
sharmaar12 Sep 15, 2025
8bdbb62
HBASE-29579: AssignmentManager is trying to pick up the other cluster…
Kota-SH Sep 19, 2025
97f337d
HBASE-29597 Supply meta table name for replica to the tests in TestMe…
kabhishek4 Sep 19, 2025
a8c7311
HBASE-29621: Remove the leading whitespace in the active.cluster.suff…
Kota-SH Sep 29, 2025
f4b7804
HBASE-29580: Clean-up hardcoded meta table names from log entries (#7…
kgeisz Sep 30, 2025
c37a90c
HBASE-29594: Add suffix to Master Region data directory (#7330)
Kota-SH Oct 20, 2025
fbc6e61
HBASE-29611: With FILE based SFT, the list of HFiles we maintain in .…
sharmaar12 Oct 27, 2025
446bca9
HBASE-29644: Refresh_meta triggering compaction on user table (#7385)
sharmaar12 Nov 13, 2025
f845a75
HBASE-29642 Active cluster file is not being updated after promoting …
kabhishek4 Nov 21, 2025
d6f454d
HBASE-29693: Implement the missing observer functions in the read-onl…
sharmaar12 Dec 8, 2025
1a4976e
HBASE-29715: AssignmentManager is trying to pick up the active cluste…
kgeisz Dec 15, 2025
4f08baa
HBASE-29778: Abort the retry operation if not allowed in read-only mo…
sharmaar12 Dec 18, 2025
38ef36a
HBASE-29779: Call super coprocessor instead of returning for system t…
sharmaar12 Jan 9, 2026
dbf093c
HBASE-29780: Addendum to HBASE-29715: Add an additional test case tha…
kgeisz Feb 4, 2026
900ce15
HBASE-29841: Split bulky ReadOnlyController into multiple smaller con…
sharmaar12 Feb 6, 2026
06cbbfb
HBASE-29756: Programmatically register related co-processor during in…
sharmaar12 Feb 27, 2026
33ff835
HBASE-29961 Secondary cluster is unable to replayWAL for meta (#7854)
sharmaar12 Mar 9, 2026
365e3f2
HBASE-29959 Cluster started in read-only mode mistakenly deletes suff…
sharmaar12 Mar 11, 2026
6150153
Build fix
anmolnar Mar 13, 2026
aa9dc68
Spotless apply
anmolnar Mar 13, 2026
a4b91da
Spotbugs check
anmolnar Mar 13, 2026
26b68a7
HBASE-29992: Implement regex check for configured replica suffix (#7923)
Kota-SH Mar 23, 2026
2b21a67
HBASE-30014: refresh_meta not working due to regionNode lock (#7974)
Kota-SH Mar 25, 2026
5a72893
HBASE-29960 java.lang.IllegalStateException: Should not call create w…
sharmaar12 Mar 26, 2026
b3675b1
HBASE-29958 Improve log messages (#7922)
sharmaar12 Mar 26, 2026
bcdd021
HBASE-29965: Unable to dynamically change readonly flag (#7964)
kgeisz Mar 30, 2026
b77465b
HBASE-29993. Refactor ClusterId/ActiveClusterSuffix reading and writi…
anmolnar Apr 8, 2026
ab3cf0d
HBASE-29081. Fix build errors
anmolnar Apr 8, 2026
07d825e
HBASE-30069:Instead of generic DoNotRetryIOException send specific Ex…
sharmaar12 Apr 10, 2026
f0ef8a6
HBASE-29081. Junit 5 fix
anmolnar Apr 10, 2026
9f129eb
HBASE-30080: Send exception to user when refreshing store file fails …
sharmaar12 Apr 14, 2026
6293a0e
Run spotless (#8083)
sharmaar12 Apr 14, 2026
4ca19d4
Update hbase-server/src/test/java/org/apache/hadoop/hbase/security/ac…
anmolnar Apr 15, 2026
d0cad05
Update hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClust…
anmolnar Apr 15, 2026
72dd3b6
Update hbase-server/src/main/java/org/apache/hadoop/hbase/regionserve…
anmolnar Apr 15, 2026
6f5624f
Update hbase-server/src/main/java/org/apache/hadoop/hbase/regionserve…
anmolnar Apr 15, 2026
0d662c6
HBASE-29081. Code review feedback
anmolnar Apr 15, 2026
eacf4d1
HBASE-30085: Migrate all unit tests to JUnit 5 (#8096)
sharmaar12 Apr 17, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.util.BackupUtils;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
Expand Down Expand Up @@ -156,7 +157,7 @@ private List<String> getLogFilesForNewBackup(Map<String, Long> olderTimestamps,
LOG.debug("currentLogFile: " + log.getPath().toString());
if (AbstractFSWALProvider.isMetaFile(log.getPath())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip hbase:meta log file: " + log.getPath().getName());
LOG.debug("Skip {} log file: {}", TableName.META_TABLE_NAME, log.getPath().getName());
}
continue;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,6 @@ private static boolean isHMasterWAL(Path path) {
String fn = path.getName();
return fn.startsWith(WALProcedureStore.LOG_PREFIX)
|| fn.endsWith(MasterRegionFactory.ARCHIVED_WAL_SUFFIX)
|| path.toString().contains("/%s/".formatted(MasterRegionFactory.MASTER_STORE_DIR));
|| path.toString().contains("/%s/".formatted(MasterRegionFactory.MASTER_REGION_DIR_NAME));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,8 @@ private void processMetaRecord(Result result) throws IOException {
* Initialize the region assignment snapshot by scanning the hbase:meta table
*/
public void initialize() throws IOException {
LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot");
LOG.info("Start to scan {} for the current region assignment snapshot",
TableName.META_TABLE_NAME);
// Scan hbase:meta to pick up user regions
try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME);
ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) {
Expand All @@ -187,7 +188,8 @@ public void initialize() throws IOException {
}
}
}
LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot");
LOG.info("Finished scanning {} for the current region assignment snapshot",
TableName.META_TABLE_NAME);
}

private void addRegion(RegionInfo regionInfo) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;

import java.io.IOException;
import java.util.Objects;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;

import org.apache.hbase.thirdparty.com.google.common.base.Strings;

import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ActiveClusterSuffixProtos;

/**
* The read-replica cluster id for this cluster. It is serialized to the filesystem and up into
* zookeeper. This is a container for the id. Also knows how to serialize and deserialize the
* cluster id.
*/
@InterfaceAudience.Private
public class ActiveClusterSuffix implements ClusterIdFile {
private final String clusterId;
private final String suffix;

public static class Parser implements ClusterIdFileParser<ActiveClusterSuffix> {

@Override
public String getFileName() {
return HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME;
}

/**
* Parse the serialized representation of the {@link ActiveClusterSuffix}
* @param bytes A pb serialized {@link ActiveClusterSuffix} instance with pb magic prefix
* @return An instance of {@link ActiveClusterSuffix} made from <code>bytes</code>
* @see #toByteArray()
*/
@Override
public ActiveClusterSuffix parseFrom(byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ActiveClusterSuffixProtos.ActiveClusterSuffix.Builder builder =
ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder();
ActiveClusterSuffixProtos.ActiveClusterSuffix cs = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cs = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cs);
} else {
// Presume it was written out this way, the old way.
return new ActiveClusterSuffix(Bytes.toString(bytes));
}
}

@Override
public ActiveClusterSuffix readString(String input) {
return new ActiveClusterSuffix(input);
}
}

public ActiveClusterSuffix(final String clusterId, final String suffix) {
this.clusterId = clusterId;
this.suffix = suffix;
}

public ActiveClusterSuffix(final String input) {
String[] parts = input.split(":", 2);
this.clusterId = parts[0];
if (parts.length > 1) {
this.suffix = parts[1];
} else {
this.suffix = "";
}
}

public static ActiveClusterSuffix parseFrom(byte[] bytes) throws DeserializationException {
return new Parser().parseFrom(bytes);
}

public static ActiveClusterSuffix fromConfig(Configuration conf, ClusterId clusterId) {
return new ActiveClusterSuffix(clusterId.toString(), conf
.get(HConstants.HBASE_META_TABLE_SUFFIX, HConstants.HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE));
}

/** Returns The active cluster suffix serialized using pb w/ pb magic prefix */
public byte[] toByteArray() {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
}

/** Returns A pb instance to represent this instance. */
public ActiveClusterSuffixProtos.ActiveClusterSuffix convert() {
return ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder().setClusterId(clusterId)
.setSuffix(suffix).build();
}

/** Returns A {@link ActiveClusterSuffix} made from the passed in <code>cs</code> */
public static ActiveClusterSuffix
convert(final ActiveClusterSuffixProtos.ActiveClusterSuffix cs) {
return new ActiveClusterSuffix(cs.getClusterId(), cs.getSuffix());
}

/**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return String.format("%s:%s", this.clusterId,
Strings.isNullOrEmpty(this.suffix) ? "<blank>" : this.suffix);
}

@Override
public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
ActiveClusterSuffix that = (ActiveClusterSuffix) o;
return Objects.equals(clusterId, that.clusterId) && Objects.equals(suffix, that.suffix);
}

@Override
public int hashCode() {
return Objects.hash(clusterId, suffix);
}
}
64 changes: 39 additions & 25 deletions hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,47 @@
* is a container for the id. Also knows how to serialize and deserialize the cluster id.
*/
@InterfaceAudience.Private
public class ClusterId {
public class ClusterId implements ClusterIdFile {
private final String id;

public static class Parser implements ClusterIdFileParser<ClusterId> {

@Override
public String getFileName() {
return HConstants.CLUSTER_ID_FILE_NAME;
}

/**
* Parse the serialized representation of the {@link ClusterId}
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code>
* @see #toByteArray()
*/
@Override
public ClusterId parseFrom(byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
ClusterIdProtos.ClusterId cid = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cid = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cid);
} else {
// Presume it was written out this way, the old way.
return new ClusterId(Bytes.toString(bytes));
}
}

@Override
public ClusterId readString(String input) {
return new ClusterId(input);
}
}

/**
* New ClusterID. Generates a uniqueid.
*/
Expand All @@ -50,30 +88,6 @@ public byte[] toByteArray() {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
}

/**
* Parse the serialized representation of the {@link ClusterId}
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code>
* @see #toByteArray()
*/
public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
ClusterIdProtos.ClusterId cid = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cid = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cid);
} else {
// Presume it was written out this way, the old way.
return new ClusterId(Bytes.toString(bytes));
}
}

/** Returns A pb instance to represent this instance. */
public ClusterIdProtos.ClusterId convert() {
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;

import org.apache.yetus.audience.InterfaceAudience;

/**
* Represents a cluster identification file on the master file system. e.g. Cluster ID = hbase.id
* Active read-replica cluster ID = active.cluster.suffix.id
*/
@InterfaceAudience.Private
public interface ClusterIdFile {

/**
* Return file contents in a byte array.
*/
byte[] toByteArray();

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;

import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.yetus.audience.InterfaceAudience;

/**
* Generic parser interface for Cluster Id files.
* @see ClusterIdFile
*/
@InterfaceAudience.Private
public interface ClusterIdFileParser<T> {

/**
* Get default file name of cluster id file.
*/
String getFileName();

/**
* Parse cluster id data from byte representation.
* @param bytes the protobuf data
* @return the cluster id data object
*/
T parseFrom(final byte[] bytes) throws DeserializationException;

/**
* Parser cluster id data from String representation.
* @param input the input string
* @return the cluster id data object
*/
T readString(String input);
}
Loading
Loading