Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.RegionSplitRestriction;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
Expand Down Expand Up @@ -110,6 +111,21 @@ public SplitTableRegionProcedure(final MasterProcedureEnv env,
// we fail-fast on construction. There it skips the split with just a warning.
checkOnline(env, regionToSplit);
this.bestSplitRow = splitRow;
TableDescriptor tableDescriptor = env.getMasterServices().getTableDescriptors()
.get(getTableName());
Configuration conf = env.getMasterConfiguration();
if (hasBestSplitRow()) {
// Apply the split restriction for the table to the user-specified split point
RegionSplitRestriction splitRestriction =
RegionSplitRestriction.create(tableDescriptor, conf);
byte[] restrictedSplitRow = splitRestriction.getRestrictedSplitPoint(bestSplitRow);
if (!Bytes.equals(bestSplitRow, restrictedSplitRow)) {
LOG.warn("The specified split point {} violates the split restriction of the table. "
+ "Using {} as a split point.", Bytes.toStringBinary(bestSplitRow),
Bytes.toStringBinary(restrictedSplitRow));
bestSplitRow = restrictedSplitRow;
}
}
checkSplittable(env, regionToSplit);
final TableName table = regionToSplit.getTable();
final long rid = getDaughterRegionIdTimestamp(regionToSplit);
Expand All @@ -125,15 +141,14 @@ public SplitTableRegionProcedure(final MasterProcedureEnv env,
.setSplit(false)
.setRegionId(rid)
.build();
TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
if(htd.getRegionSplitPolicyClassName() != null) {
if(tableDescriptor.getRegionSplitPolicyClassName() != null) {
// Since we don't have region reference here, creating the split policy instance without it.
// This can be used to invoke methods which don't require Region reference. This instantiation
// of a class on Master-side though it only makes sense on the RegionServer-side is
// for Phoenix Local Indexing. Refer HBASE-12583 for more information.
Class<? extends RegionSplitPolicy> clazz =
RegionSplitPolicy.getSplitPolicyClass(htd, env.getMasterConfiguration());
this.splitPolicy = ReflectionUtils.newInstance(clazz, env.getMasterConfiguration());
RegionSplitPolicy.getSplitPolicyClass(tableDescriptor, conf);
this.splitPolicy = ReflectionUtils.newInstance(clazz, conf);
}
}

Expand Down Expand Up @@ -167,6 +182,10 @@ public RegionInfo getDaughterTwoRI() {
return daughterTwoRI;
}

private boolean hasBestSplitRow() {
return bestSplitRow != null && bestSplitRow.length > 0;
}

/**
* Check whether the region is splittable
* @param env MasterProcedureEnv
Expand Down Expand Up @@ -216,7 +235,7 @@ private void checkSplittable(final MasterProcedureEnv env,
throw e;
}

if (bestSplitRow == null || bestSplitRow.length == 0) {
if (!hasBestSplitRow()) {
throw new DoNotRetryIOException("Region not splittable because bestSplitPoint = null, " +
"maybe table is too small for auto split. For force split, try specifying split row");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,11 @@
* <code>userid_eventtype_eventid</code>, and use prefix delimiter _, this split policy
* ensures that all rows starting with the same userid, belongs to the same region.
* @see KeyPrefixRegionSplitPolicy
*
* @deprecated since 2.4.3 and will be removed in 4.0.0. Use {@link RegionSplitRestriction},
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You need to update this deprecation string to also include the applicable 2.3.x version number.

* instead.
*/
@Deprecated
@InterfaceAudience.Private
public class DelimitedKeyPrefixRegionSplitPolicy extends IncreasingToUpperBoundRegionSplitPolicy {

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;

import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* A {@link RegionSplitRestriction} implementation that groups rows by a prefix of the row-key with
* a delimiter. Only the first delimiter for the row key will define the prefix of the row key that
* is used for grouping.
* <p>
* This ensures that a region is not split "inside" a prefix of a row key.
* I.e. rows can be co-located in a region by their prefix.
*
* As an example, if you have row keys delimited with <code>_</code>, like
* <code>userid_eventtype_eventid</code>, and use prefix delimiter _, this split policy ensures
* that all rows starting with the same userid, belongs to the same region.
*/
@InterfaceAudience.Private
public class DelimitedKeyPrefixRegionSplitRestriction extends RegionSplitRestriction {
private static final Logger LOG =
LoggerFactory.getLogger(DelimitedKeyPrefixRegionSplitRestriction.class);

public static final String DELIMITER_KEY =
"hbase.regionserver.region.split_restriction.delimiter";

private byte[] delimiter = null;

@Override
public void initialize(TableDescriptor tableDescriptor, Configuration conf) throws IOException {
String delimiterString = tableDescriptor.getValue(DELIMITER_KEY);
if (delimiterString == null || delimiterString.length() == 0) {
delimiterString = conf.get(DELIMITER_KEY);
if (delimiterString == null || delimiterString.length() == 0) {
LOG.error("{} not specified for table {}. "
+ "Using the default RegionSplitRestriction", DELIMITER_KEY,
tableDescriptor.getTableName());
return;
}
}
delimiter = Bytes.toBytes(delimiterString);
}

@Override
public byte[] getRestrictedSplitPoint(byte[] splitPoint) {
if (delimiter != null) {
// find the first occurrence of delimiter in split point
int index = org.apache.hbase.thirdparty.com.google.common.primitives.Bytes.indexOf(
splitPoint, delimiter);
if (index < 0) {
LOG.warn("Delimiter {} not found for split key {}", Bytes.toString(delimiter),
Bytes.toStringBinary(splitPoint));
return splitPoint;
}

// group split keys by a prefix
return Arrays.copyOf(splitPoint, Math.min(index, splitPoint.length));
} else {
return splitPoint;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -701,6 +701,7 @@ void sawNoSuchFamily() {

private TableDescriptor htableDescriptor = null;
private RegionSplitPolicy splitPolicy;
private RegionSplitRestriction splitRestriction;
private FlushPolicy flushPolicy;

private final MetricsRegion metricsRegion;
Expand Down Expand Up @@ -1044,6 +1045,9 @@ private long initializeRegionInternals(final CancelableProgressable reporter,
// Initialize split policy
this.splitPolicy = RegionSplitPolicy.create(this, conf);

// Initialize split restriction
splitRestriction = RegionSplitRestriction.create(getTableDescriptor(), conf);

// Initialize flush policy
this.flushPolicy = FlushPolicyFactory.create(this, conf);

Expand Down Expand Up @@ -8542,6 +8546,9 @@ public byte[] checkSplit() {
}

byte[] ret = splitPolicy.getSplitPoint();
if (ret != null && ret.length > 0) {
ret = splitRestriction.getRestrictedSplitPoint(ret);
}

if (ret != null) {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,11 @@
*
* This ensures that a region is not split "inside" a prefix of a row key.
* I.e. rows can be co-located in a region by their prefix.
*
* @deprecated since 2.4.3 and will be removed in 4.0.0. Use {@link RegionSplitRestriction},
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You need to update this deprecation string to also include the applicable 2.3.x version number.

* instead.
*/
@Deprecated
@InterfaceAudience.Private
public class KeyPrefixRegionSplitPolicy extends IncreasingToUpperBoundRegionSplitPolicy {
private static final Logger LOG = LoggerFactory
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;

import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* A {@link RegionSplitRestriction} implementation that groups rows by a prefix of the row-key.
* <p>
* This ensures that a region is not split "inside" a prefix of a row key.
* I.e. rows can be co-located in a region by their prefix.
*/
@InterfaceAudience.Private
public class KeyPrefixRegionSplitRestriction extends RegionSplitRestriction {
private static final Logger LOG =
LoggerFactory.getLogger(KeyPrefixRegionSplitRestriction.class);

public static final String PREFIX_LENGTH_KEY =
"hbase.regionserver.region.split_restriction.prefix_length";

private int prefixLength;

@Override
public void initialize(TableDescriptor tableDescriptor, Configuration conf) throws IOException {
String prefixLengthString = tableDescriptor.getValue(PREFIX_LENGTH_KEY);
if (prefixLengthString == null) {
prefixLengthString = conf.get(PREFIX_LENGTH_KEY);
if (prefixLengthString == null) {
LOG.error("{} not specified for table {}. "
+ "Using the default RegionSplitRestriction", PREFIX_LENGTH_KEY,
tableDescriptor.getTableName());
return;
}
}
try {
prefixLength = Integer.parseInt(prefixLengthString);
} catch (NumberFormatException ignored) {
}
if (prefixLength <= 0) {
LOG.error("Invalid value for {} for table {}:{}. "
+ "Using the default RegionSplitRestriction", PREFIX_LENGTH_KEY,
tableDescriptor.getTableName(), prefixLengthString);
}
}

@Override
public byte[] getRestrictedSplitPoint(byte[] splitPoint) {
if (prefixLength > 0) {
// group split keys by a prefix
return Arrays.copyOf(splitPoint, Math.min(prefixLength, splitPoint.length));
} else {
return splitPoint;
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.yetus.audience.InterfaceAudience;

/**
* A {@link RegionSplitRestriction} implementation that does nothing.
*/
@InterfaceAudience.Private
public class NoRegionSplitRestriction extends RegionSplitRestriction {

@Override
public void initialize(TableDescriptor tableDescriptor, Configuration conf) throws IOException {
}

@Override
public byte[] getRestrictedSplitPoint(byte[] splitPoint) {
// Do nothing
return splitPoint;
}
}
Loading