Skip to content

Commit

Permalink
HBASE-19596 RegionMetrics/ServerMetrics/ClusterMetrics should apply t…
Browse files Browse the repository at this point in the history
…o all public classes
  • Loading branch information
chia7712 committed Jan 4, 2018
1 parent 2bd259b commit 8119acf
Show file tree
Hide file tree
Showing 83 changed files with 902 additions and 933 deletions.
Expand Up @@ -29,12 +29,20 @@


import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;

import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;


@InterfaceAudience.Private @InterfaceAudience.Private
public final class RegionMetricsBuilder { public final class RegionMetricsBuilder {


public static List<RegionMetrics> toRegionMetrics(
AdminProtos.GetRegionLoadResponse regionLoadResponse) {
return regionLoadResponse.getRegionLoadsList().stream()
.map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList());
}

public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regionLoadPB) { public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regionLoadPB) {
return RegionMetricsBuilder return RegionMetricsBuilder
.newBuilder(regionLoadPB.getRegionSpecifier().getValue().toByteArray()) .newBuilder(regionLoadPB.getRegionSpecifier().getValue().toByteArray())
Expand Down
119 changes: 61 additions & 58 deletions hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
Expand Up @@ -25,19 +25,19 @@
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.CacheEvictionStats;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.NamespaceNotFoundException; import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
Expand All @@ -56,7 +56,6 @@
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;


Expand Down Expand Up @@ -1452,95 +1451,84 @@ Future<Void> modifyTableAsync(TableDescriptor td)
* </pre> * </pre>
* @return cluster status * @return cluster status
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
* @deprecated since 2.0 version and will be removed in 3.0 version.
* use {@link #getClusterMetrics()}
*/
@Deprecated
default ClusterStatus getClusterStatus() throws IOException {
return new ClusterStatus(getClusterMetrics());
}

/**
* Get whole cluster metrics, containing status about:
* <pre>
* hbase version
* cluster id
* primary/backup master(s)
* master's coprocessors
* live/dead regionservers
* balancer
* regions in transition
* </pre>
* @return cluster metrics
* @throws IOException if a remote or network exception occurs
*/ */
ClusterStatus getClusterStatus() throws IOException; default ClusterMetrics getClusterMetrics() throws IOException {
return getClusterMetrics(EnumSet.allOf(ClusterMetrics.Option.class));
}


/** /**
* Get cluster status with a set of {@link Option} to get desired status. * Get cluster status with a set of {@link Option} to get desired status.
* @return cluster status * @return cluster status
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
*/ */
ClusterStatus getClusterStatus(EnumSet<Option> options) throws IOException; ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException;


/** /**
* @return current master server name * @return current master server name
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
*/ */
default ServerName getMaster() throws IOException { default ServerName getMaster() throws IOException {
return getClusterStatus(EnumSet.of(Option.MASTER)).getMaster(); return getClusterMetrics(EnumSet.of(Option.MASTER)).getMasterName();
} }


/** /**
* @return current backup master list * @return current backup master list
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
*/ */
default Collection<ServerName> getBackupMasters() throws IOException { default Collection<ServerName> getBackupMasters() throws IOException {
return getClusterStatus(EnumSet.of(Option.BACKUP_MASTERS)).getBackupMasters(); return getClusterMetrics(EnumSet.of(Option.BACKUP_MASTERS)).getBackupMasterNames();
} }


/** /**
* @return current live region servers list * @return current live region servers list
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
*/ */
default Collection<ServerName> getRegionServers() throws IOException { default Collection<ServerName> getRegionServers() throws IOException {
return getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers(); return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
} }


/** /**
* Get {@link RegionLoad} of all regions hosted on a regionserver. * Get {@link RegionMetrics} of all regions hosted on a regionserver.
* *
* @param serverName region server from which regionload is required. * @param serverName region server from which {@link RegionMetrics} is required.
* @return region load map of all regions hosted on a region server * @return a {@link RegionMetrics} list of all regions hosted on a region server
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
* @deprecated since 2.0 version and will be removed in 3.0 version.
* use {@link #getRegionLoads(ServerName)}
*/ */
@Deprecated default List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
default Map<byte[], RegionLoad> getRegionLoad(ServerName serverName) throws IOException { return getRegionMetrics(serverName, null);
return getRegionLoad(serverName, null);
} }


/** /**
* Get {@link RegionLoad} of all regions hosted on a regionserver. * Get {@link RegionMetrics} of all regions hosted on a regionserver for a table.
* *
* @param serverName region server from which regionload is required. * @param serverName region server from which {@link RegionMetrics} is required.
* @return a region load list of all regions hosted on a region server * @param tableName get {@link RegionMetrics} of regions belonging to the table
* @return region metrics map of all regions of a table hosted on a region server
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
*/ */
default List<RegionLoad> getRegionLoads(ServerName serverName) throws IOException { List<RegionMetrics> getRegionMetrics(ServerName serverName,
return getRegionLoads(serverName, null); TableName tableName) throws IOException;
}

/**
* Get {@link RegionLoad} of all regions hosted on a regionserver for a table.
*
* @param serverName region server from which regionload is required.
* @param tableName get region load of regions belonging to the table
* @return region load map of all regions of a table hosted on a region server
* @throws IOException if a remote or network exception occurs
* @deprecated since 2.0 version and will be removed in 3.0 version.
* use {@link #getRegionLoads(ServerName, TableName)}
*/
@Deprecated
default Map<byte[], RegionLoad> getRegionLoad(ServerName serverName, TableName tableName)
throws IOException {
List<RegionLoad> regionLoads = getRegionLoads(serverName, tableName);
Map<byte[], RegionLoad> resultMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (RegionLoad regionLoad : regionLoads) {
resultMap.put(regionLoad.getName(), regionLoad);
}
return resultMap;
}

/**
* Get {@link RegionLoad} of all regions hosted on a regionserver for a table.
*
* @param serverName region server from which regionload is required.
* @param tableName get region load of regions belonging to the table
* @return region load map of all regions of a table hosted on a region server
* @throws IOException if a remote or network exception occurs
*/
List<RegionLoad> getRegionLoads(ServerName serverName, TableName tableName) throws IOException;


/** /**
* @return Configuration used by the instance. * @return Configuration used by the instance.
Expand Down Expand Up @@ -1772,11 +1760,26 @@ Future<Boolean> abortProcedureAsync(
void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException; void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException;


/** /**
* Helper that delegates to getClusterStatus().getMasterCoprocessors(). * Helper that delegates to getClusterMetrics().getMasterCoprocessorNames().
* @return an array of master coprocessors
* @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames()
* @deprecated since 2.0 version and will be removed in 3.0 version.
* use {@link #getMasterCoprocessorNames()}
*/
@Deprecated
default String[] getMasterCoprocessors() throws IOException {
return getMasterCoprocessorNames().stream().toArray(size -> new String[size]);
}

/**
* Helper that delegates to getClusterMetrics().getMasterCoprocessorNames().
* @return an array of master coprocessors * @return an array of master coprocessors
* @see org.apache.hadoop.hbase.ClusterStatus#getMasterCoprocessors() * @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames()
*/ */
String[] getMasterCoprocessors() throws IOException; default List<String> getMasterCoprocessorNames() throws IOException {
return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS))
.getMasterCoprocessorNames();
}


/** /**
* Get the current compaction state of a table. It could be in a major compaction, a minor * Get the current compaction state of a table. It could be in a major compaction, a minor
Expand Down Expand Up @@ -2371,7 +2374,7 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
* @throws IOException * @throws IOException
*/ */
default int getMasterInfoPort() throws IOException { default int getMasterInfoPort() throws IOException {
return getClusterStatus(EnumSet.of(Option.MASTER_INFO_PORT)).getMasterInfoPort(); return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)).getMasterInfoPort();
} }


/** /**
Expand Down Expand Up @@ -2617,7 +2620,7 @@ void clearCompactionQueues(ServerName serverName, Set<String> queues)
* @return List of dead region servers. * @return List of dead region servers.
*/ */
default List<ServerName> listDeadServers() throws IOException { default List<ServerName> listDeadServers() throws IOException {
return getClusterStatus(EnumSet.of(Option.DEAD_SERVERS)).getDeadServerNames(); return getClusterMetrics(EnumSet.of(Option.DEAD_SERVERS)).getDeadServerNames();
} }


/** /**
Expand Down
Expand Up @@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;


import com.google.protobuf.RpcChannel; import com.google.protobuf.RpcChannel;
import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List; import java.util.List;
Expand All @@ -28,10 +27,10 @@
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.function.Function; import java.util.function.Function;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.replication.TableCFs; import org.apache.hadoop.hbase.client.replication.TableCFs;
Expand Down Expand Up @@ -886,49 +885,51 @@ CompletableFuture<Void> recommissionRegionServer(ServerName server,
/** /**
* @return cluster status wrapped by {@link CompletableFuture} * @return cluster status wrapped by {@link CompletableFuture}
*/ */
CompletableFuture<ClusterStatus> getClusterStatus(); CompletableFuture<ClusterMetrics> getClusterMetrics();


/** /**
* @return cluster status wrapped by {@link CompletableFuture} * @return cluster status wrapped by {@link CompletableFuture}
*/ */
CompletableFuture<ClusterStatus> getClusterStatus(EnumSet<Option> options); CompletableFuture<ClusterMetrics> getClusterMetrics(EnumSet<Option> options);


/** /**
* @return current master server name wrapped by {@link CompletableFuture} * @return current master server name wrapped by {@link CompletableFuture}
*/ */
default CompletableFuture<ServerName> getMaster() { default CompletableFuture<ServerName> getMaster() {
return getClusterStatus(EnumSet.of(Option.MASTER)).thenApply(ClusterStatus::getMaster); return getClusterMetrics(EnumSet.of(Option.MASTER)).thenApply(ClusterMetrics::getMasterName);
} }


/** /**
* @return current backup master list wrapped by {@link CompletableFuture} * @return current backup master list wrapped by {@link CompletableFuture}
*/ */
default CompletableFuture<Collection<ServerName>> getBackupMasters() { default CompletableFuture<Collection<ServerName>> getBackupMasters() {
return getClusterStatus(EnumSet.of(Option.BACKUP_MASTERS)).thenApply(ClusterStatus::getBackupMasters); return getClusterMetrics(EnumSet.of(Option.BACKUP_MASTERS))
.thenApply(ClusterMetrics::getBackupMasterNames);
} }


/** /**
* @return current live region servers list wrapped by {@link CompletableFuture} * @return current live region servers list wrapped by {@link CompletableFuture}
*/ */
default CompletableFuture<Collection<ServerName>> getRegionServers() { default CompletableFuture<Collection<ServerName>> getRegionServers() {
return getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).thenApply(ClusterStatus::getServers); return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
.thenApply(cm -> cm.getLiveServerMetrics().keySet());
} }


/** /**
* @return a list of master coprocessors wrapped by {@link CompletableFuture} * @return a list of master coprocessors wrapped by {@link CompletableFuture}
*/ */
default CompletableFuture<List<String>> getMasterCoprocessors() { default CompletableFuture<List<String>> getMasterCoprocessorNames() {
return getClusterStatus(EnumSet.of(Option.MASTER_COPROCESSORS)) return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS))
.thenApply(ClusterStatus::getMasterCoprocessors).thenApply(Arrays::asList); .thenApply(ClusterMetrics::getMasterCoprocessorNames);
} }


/** /**
* Get the info port of the current master if one is available. * Get the info port of the current master if one is available.
* @return master info port * @return master info port
*/ */
default CompletableFuture<Integer> getMasterInfoPort() { default CompletableFuture<Integer> getMasterInfoPort() {
return getClusterStatus(EnumSet.of(Option.MASTER_INFO_PORT)).thenApply( return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)).thenApply(
ClusterStatus::getMasterInfoPort); ClusterMetrics::getMasterInfoPort);
} }


/** /**
Expand Down Expand Up @@ -978,19 +979,20 @@ default CompletableFuture<Integer> getMasterInfoPort() {
CompletableFuture<Void> clearCompactionQueues(ServerName serverName, Set<String> queues); CompletableFuture<Void> clearCompactionQueues(ServerName serverName, Set<String> queues);


/** /**
* Get a list of {@link RegionLoad} of all regions hosted on a region seerver. * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver.
* @param serverName * @param serverName
* @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture} * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture}
*/ */
CompletableFuture<List<RegionLoad>> getRegionLoads(ServerName serverName); CompletableFuture<List<RegionMetrics>> getRegionMetrics(ServerName serverName);


/** /**
* Get a list of {@link RegionLoad} of all regions hosted on a region seerver for a table. * Get a list of {@link RegionMetrics} of all regions hosted on a region seerver for a table.
* @param serverName * @param serverName
* @param tableName * @param tableName
* @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture} * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture}
*/ */
CompletableFuture<List<RegionLoad>> getRegionLoads(ServerName serverName, TableName tableName); CompletableFuture<List<RegionMetrics>> getRegionMetrics(ServerName serverName,
TableName tableName);


/** /**
* Check whether master is in maintenance mode * Check whether master is in maintenance mode
Expand Down Expand Up @@ -1199,8 +1201,8 @@ <S, R> CompletableFuture<R> coprocessorService(Function<RpcChannel, S> stubMaker
* List all the dead region servers. * List all the dead region servers.
*/ */
default CompletableFuture<List<ServerName>> listDeadServers() { default CompletableFuture<List<ServerName>> listDeadServers() {
return this.getClusterStatus(EnumSet.of(Option.DEAD_SERVERS)) return this.getClusterMetrics(EnumSet.of(Option.DEAD_SERVERS))
.thenApply(ClusterStatus::getDeadServerNames); .thenApply(ClusterMetrics::getDeadServerNames);
} }


/** /**
Expand Down

0 comments on commit 8119acf

Please sign in to comment.