Skip to content

Commit

Permalink
PHOENIX-4440 Local index split/merge IT tests are failing(Rajeshbabu)
Browse files Browse the repository at this point in the history
  • Loading branch information
chrajeshbabu committed Mar 22, 2018
1 parent 8ee15d9 commit 31c674a
Show file tree
Hide file tree
Showing 12 changed files with 174 additions and 54 deletions.
Expand Up @@ -52,7 +52,6 @@
import com.google.common.collect.Maps; import com.google.common.collect.Maps;


@Category(NeedsOwnMiniClusterTest.class) @Category(NeedsOwnMiniClusterTest.class)
@Ignore
public class LocalIndexSplitMergeIT extends BaseTest { public class LocalIndexSplitMergeIT extends BaseTest {


@BeforeClass @BeforeClass
Expand Down Expand Up @@ -265,4 +264,63 @@ public void testLocalIndexScanAfterRegionsMerge() throws Exception {
} }
} }


@Test
public void testLocalIndexScanWithMergeSpecialCase() throws Exception {
String schemaName = generateUniqueName();
String tableName = schemaName + "." + generateUniqueName();
String indexName = "IDX_" + generateUniqueName();
TableName physicalTableName = SchemaUtil.getPhysicalTableName(tableName.getBytes(), false);
createBaseTable(tableName, "('a','aaaab','def')");
Connection conn1 = getConnectionForLocalIndexTest();
try {
String[] strings =
{ "aa", "aaa", "aaaa", "bb", "cc", "dd", "dff", "g", "h", "i", "j", "k", "l",
"m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z" };
for (int i = 0; i < 26; i++) {
conn1.createStatement()
.execute("UPSERT INTO " + tableName + " values('" + strings[i] + "'," + i
+ "," + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
}
conn1.commit();
conn1.createStatement()
.execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)");
conn1.createStatement()
.execute("CREATE LOCAL INDEX " + indexName + "_2 ON " + tableName + "(k3)");

Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
List<RegionInfo> regionsOfUserTable =
MetaTableAccessor.getTableRegions(admin.getConnection(), physicalTableName,
false);
admin.mergeRegionsAsync(regionsOfUserTable.get(0).getEncodedNameAsBytes(),
regionsOfUserTable.get(1).getEncodedNameAsBytes(), false);
regionsOfUserTable =
MetaTableAccessor.getTableRegions(admin.getConnection(), physicalTableName,
false);

while (regionsOfUserTable.size() != 3) {
Thread.sleep(100);
regionsOfUserTable =
MetaTableAccessor.getTableRegions(admin.getConnection(), physicalTableName,
false);
}
String query = "SELECT t_id,k1,v1 FROM " + tableName;
ResultSet rs = conn1.createStatement().executeQuery(query);
for (int j = 0; j < 26; j++) {
assertTrue(rs.next());
assertEquals(strings[25-j], rs.getString("t_id"));
assertEquals(25-j, rs.getInt("k1"));
assertEquals(strings[j], rs.getString("V1"));
}
query = "SELECT t_id,k1,k3 FROM " + tableName;
rs = conn1.createStatement().executeQuery(query);
for (int j = 0; j < 26; j++) {
assertTrue(rs.next());
assertEquals(strings[j], rs.getString("t_id"));
assertEquals(j, rs.getInt("k1"));
assertEquals(j + 2, rs.getInt("k3"));
}
} finally {
conn1.close();
}
}
} }
Expand Up @@ -19,8 +19,7 @@


import org.junit.Test; import org.junit.Test;


//TODO: re-enable once PHOENIX-4273 is fixed public class MutableIndexSplitForwardScanIT extends MutableIndexSplitIT {
public abstract class MutableIndexSplitForwardScanIT extends MutableIndexSplitIT {


public MutableIndexSplitForwardScanIT(boolean localIndex, boolean multiTenant) { public MutableIndexSplitForwardScanIT(boolean localIndex, boolean multiTenant) {
super(localIndex, multiTenant); super(localIndex, multiTenant);
Expand Down
Expand Up @@ -19,8 +19,7 @@


import org.junit.Test; import org.junit.Test;


//TODO: re-enable once PHOENIX-4273 is fixeds public class MutableIndexSplitReverseScanIT extends MutableIndexSplitIT {
public abstract class MutableIndexSplitReverseScanIT extends MutableIndexSplitIT {


public MutableIndexSplitReverseScanIT(boolean localIndex, boolean multiTenant) { public MutableIndexSplitReverseScanIT(boolean localIndex, boolean multiTenant) {
super(localIndex, multiTenant); super(localIndex, multiTenant);
Expand Down
@@ -0,0 +1,24 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;

public class RegionInfoUtil {
public static byte[] toByteArray(RegionInfo regionInfo) {
return RegionInfo.toByteArray(regionInfo);
}
}
Expand Up @@ -17,6 +17,8 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;


import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_START_ROW_SUFFIX;

import java.io.IOException; import java.io.IOException;
import java.util.Map; import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
Expand All @@ -25,13 +27,14 @@
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.index.IndexMaintainer; import org.apache.phoenix.index.IndexMaintainer;


/** /**
Expand All @@ -58,8 +61,10 @@ public class IndexHalfStoreFileReader extends StoreFileReader {
private final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers; private final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers;
private final byte[][] viewConstants; private final byte[][] viewConstants;
private final int offset; private final int offset;
private final RegionInfo regionInfo; private final RegionInfo childRegionInfo;
private final byte[] regionStartKeyInHFile; private final byte[] regionStartKeyInHFile;
private final AtomicInteger refCount;
private final RegionInfo currentRegion;


/** /**
* @param fs * @param fs
Expand All @@ -81,18 +86,21 @@ public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheCo
final Configuration conf, final Configuration conf,
final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers, final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
final byte[][] viewConstants, final RegionInfo regionInfo, final byte[][] viewConstants, final RegionInfo regionInfo,
byte[] regionStartKeyInHFile, byte[] splitKey, boolean primaryReplicaStoreFile) throws IOException { byte[] regionStartKeyInHFile, byte[] splitKey, boolean primaryReplicaStoreFile,
super(fs, p, in, size, cacheConf, primaryReplicaStoreFile, new AtomicInteger(0), false, AtomicInteger refCount, RegionInfo currentRegion) throws IOException {
super(fs, p, in, size, cacheConf, primaryReplicaStoreFile, refCount, false,
conf); conf);
this.splitkey = splitKey == null ? r.getSplitKey() : splitKey; this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
// Is it top or bottom half? // Is it top or bottom half?
this.top = Reference.isTopFileRegion(r.getFileRegion()); this.top = Reference.isTopFileRegion(r.getFileRegion());
this.splitRow = CellUtil.cloneRow(KeyValueUtil.createKeyValueFromKey(splitkey)); this.splitRow = CellUtil.cloneRow(new KeyValue.KeyOnlyKeyValue(splitkey));
this.indexMaintainers = indexMaintainers; this.indexMaintainers = indexMaintainers;
this.viewConstants = viewConstants; this.viewConstants = viewConstants;
this.regionInfo = regionInfo; this.childRegionInfo = regionInfo;
this.regionStartKeyInHFile = regionStartKeyInHFile; this.regionStartKeyInHFile = regionStartKeyInHFile;
this.offset = regionStartKeyInHFile.length; this.offset = regionStartKeyInHFile.length;
this.refCount = refCount;
this.currentRegion = currentRegion;
} }


public int getOffset() { public int getOffset() {
Expand All @@ -108,7 +116,7 @@ public Map<ImmutableBytesWritable, IndexMaintainer> getIndexMaintainers() {
} }


public RegionInfo getRegionInfo() { public RegionInfo getRegionInfo() {
return regionInfo; return childRegionInfo;
} }


public byte[] getRegionStartKeyInHFile() { public byte[] getRegionStartKeyInHFile() {
Expand All @@ -131,7 +139,28 @@ public boolean isTop() {
public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread, public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread,
boolean isCompaction, long readPt, long scannerOrder, boolean isCompaction, long readPt, long scannerOrder,
boolean canOptimizeForNonNullColumn) { boolean canOptimizeForNonNullColumn) {
refCount.incrementAndGet();
return new LocalIndexStoreFileScanner(this, cacheBlocks, pread, isCompaction, readPt, return new LocalIndexStoreFileScanner(this, cacheBlocks, pread, isCompaction, readPt,
scannerOrder, canOptimizeForNonNullColumn); scannerOrder, canOptimizeForNonNullColumn);
} }

@Override
public boolean passesKeyRangeFilter(Scan scan) {
if (scan.getAttribute(SCAN_START_ROW_SUFFIX) == null) {
// Scan from compaction.
return true;
}
byte[] startKey = currentRegion.getStartKey();
byte[] endKey = currentRegion.getEndKey();
// If the region start key is not the prefix of the scan start row then we can return empty
// scanners. This is possible during merge where one of the child region scan should not return any
// results as we go through merged region.
int prefixLength = scan.getStartRow().length - scan.getAttribute(SCAN_START_ROW_SUFFIX).length;
if (Bytes.compareTo(scan.getStartRow(), 0, prefixLength,
(startKey.length == 0 ? new byte[endKey.length] : startKey), 0,
(startKey.length == 0 ? endKey.length : startKey.length)) != 0) {
return false;
}
return true;
}
} }
Expand Up @@ -22,6 +22,8 @@
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;


import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
Expand All @@ -30,18 +32,19 @@
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoUtil;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
Expand All @@ -68,11 +71,15 @@


import com.google.common.collect.Lists; import com.google.common.collect.Lists;


public class IndexHalfStoreFileReaderGenerator implements RegionObserver { public class IndexHalfStoreFileReaderGenerator implements RegionObserver, RegionCoprocessor{


private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = "local.index.automatic.repair"; private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = "local.index.automatic.repair";
public static final Log LOG = LogFactory.getLog(IndexHalfStoreFileReaderGenerator.class); public static final Log LOG = LogFactory.getLog(IndexHalfStoreFileReaderGenerator.class);


@Override
public Optional<RegionObserver> getRegionObserver() {
return Optional.of(this);
}


@Override @Override
public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
Expand All @@ -82,7 +89,6 @@ public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorE
Region region = ctx.getEnvironment().getRegion(); Region region = ctx.getEnvironment().getRegion();
RegionInfo childRegion = region.getRegionInfo(); RegionInfo childRegion = region.getRegionInfo();
byte[] splitKey = null; byte[] splitKey = null;

if (reader == null && r != null) { if (reader == null && r != null) {
if(!p.toString().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { if(!p.toString().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
return reader; return reader;
Expand All @@ -96,11 +102,11 @@ public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorE
SingleColumnValueFilter scvf = null; SingleColumnValueFilter scvf = null;
if (Reference.isTopFileRegion(r.getFileRegion())) { if (Reference.isTopFileRegion(r.getFileRegion())) {
scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY, scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY,
HConstants.SPLITB_QUALIFIER, CompareOperator.EQUAL, ((HRegionInfo)region.getRegionInfo()).toByteArray()); HConstants.SPLITB_QUALIFIER, CompareOperator.EQUAL, RegionInfoUtil.toByteArray(region.getRegionInfo()));
scvf.setFilterIfMissing(true); scvf.setFilterIfMissing(true);
} else { } else {
scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY, scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY,
HConstants.SPLITA_QUALIFIER, CompareOperator.EQUAL, ((HRegionInfo)region.getRegionInfo()).toByteArray()); HConstants.SPLITA_QUALIFIER, CompareOperator.EQUAL, RegionInfoUtil.toByteArray(region.getRegionInfo()));
scvf.setFilterIfMissing(true); scvf.setFilterIfMissing(true);
} }
if(scvf != null) scan.setFilter(scvf); if(scvf != null) scan.setFilter(scvf);
Expand Down Expand Up @@ -168,7 +174,8 @@ public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorE
return new IndexHalfStoreFileReader(fs, p, cacheConf, in, size, r, ctx return new IndexHalfStoreFileReader(fs, p, cacheConf, in, size, r, ctx
.getEnvironment().getConfiguration(), indexMaintainers, viewConstants, .getEnvironment().getConfiguration(), indexMaintainers, viewConstants,
childRegion, regionStartKeyInHFile, splitKey, childRegion, regionStartKeyInHFile, splitKey,
childRegion.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID); childRegion.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID,
new AtomicInteger(0), region.getRegionInfo());
} catch (ClassNotFoundException e) { } catch (ClassNotFoundException e) {
throw new IOException(e); throw new IOException(e);
} catch (SQLException e) { } catch (SQLException e) {
Expand Down

0 comments on commit 31c674a

Please sign in to comment.