Skip to content

Commit

Permalink
PHOENIX-3253 Make changes in various classes for method level paralle…
Browse files Browse the repository at this point in the history
…lization in BaseHBaseManagedTimeTableReuseIT
  • Loading branch information
JamesRTaylor committed Sep 15, 2016
1 parent 2c2b552 commit 27697b3
Show file tree
Hide file tree
Showing 10 changed files with 91 additions and 120 deletions.
Expand Up @@ -20,6 +20,8 @@
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;


import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Properties; import java.util.Properties;
import java.util.Set; import java.util.Set;


Expand Down Expand Up @@ -61,9 +63,10 @@ public synchronized void removeConnection(PhoenixConnection connection) throws S
@Override @Override
public void close() throws SQLException { public void close() throws SQLException {
try { try {
Set<PhoenixConnection> connections; Collection<PhoenixConnection> connections;
synchronized(this) { synchronized(this) {
connections = this.connections; // Make copy to prevent ConcurrentModificationException (TODO: figure out why this is necessary)
connections = new ArrayList<>(this.connections);
this.connections = Sets.newHashSet(); this.connections = Sets.newHashSet();
} }
SQLCloseables.closeAll(connections); SQLCloseables.closeAll(connections);
Expand Down
Expand Up @@ -34,7 +34,6 @@
import java.util.List; import java.util.List;


import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.QueryUtil;
import org.junit.Assert;
import org.junit.Test; import org.junit.Test;




Expand Down Expand Up @@ -506,16 +505,17 @@ private void testDeleteAllFromTable(boolean autoCommit) throws SQLException {


@Test @Test
public void testDeleteForTableWithRowTimestampColServer() throws Exception { public void testDeleteForTableWithRowTimestampColServer() throws Exception {
testDeleteForTableWithRowTimestampCol(true); String tableName = generateRandomString();
testDeleteForTableWithRowTimestampCol(true, tableName);
} }


@Test @Test
public void testDeleteForTableWithRowTimestampColClient() throws Exception { public void testDeleteForTableWithRowTimestampColClient() throws Exception {
testDeleteForTableWithRowTimestampCol(false); String tableName = generateRandomString();
testDeleteForTableWithRowTimestampCol(false, tableName);
} }


private void testDeleteForTableWithRowTimestampCol(boolean autoCommit) throws Exception { private void testDeleteForTableWithRowTimestampCol(boolean autoCommit, String tableName) throws Exception {
String tableName = "testDeleteForTableWithRowTimestampCol".toUpperCase();
try (Connection conn = DriverManager.getConnection(getUrl())) { try (Connection conn = DriverManager.getConnection(getUrl())) {
conn.setAutoCommit(autoCommit); conn.setAutoCommit(autoCommit);
Statement stm = conn.createStatement(); Statement stm = conn.createStatement();
Expand Down
Expand Up @@ -18,18 +18,17 @@
package org.apache.phoenix.end2end; package org.apache.phoenix.end2end;


import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;


import java.io.IOException; import java.io.IOException;
import java.sql.Connection; import java.sql.Connection;
import java.sql.DriverManager; import java.sql.DriverManager;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.HashSet;
import java.util.List;
import java.util.Properties; import java.util.Properties;
import java.util.Set;


import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
Expand All @@ -53,7 +52,7 @@
import org.apache.phoenix.util.TestUtil; import org.apache.phoenix.util.TestUtil;
import org.junit.Test; import org.junit.Test;


import com.google.common.collect.Lists; import com.google.common.collect.Sets;


public class PhoenixRuntimeIT extends BaseHBaseManagedTimeTableReuseIT { public class PhoenixRuntimeIT extends BaseHBaseManagedTimeTableReuseIT {
private static void assertTenantIds(Expression e, HTableInterface htable, Filter filter, String[] tenantIds) throws IOException { private static void assertTenantIds(Expression e, HTableInterface htable, Filter filter, String[] tenantIds) throws IOException {
Expand All @@ -63,17 +62,15 @@ private static void assertTenantIds(Expression e, HTableInterface htable, Filter
ResultScanner scanner = htable.getScanner(scan); ResultScanner scanner = htable.getScanner(scan);
Result result = null; Result result = null;
ResultTuple tuple = new ResultTuple(); ResultTuple tuple = new ResultTuple();
List<String> actualTenantIds = Lists.newArrayListWithExpectedSize(tenantIds.length); Set<String> actualTenantIds = Sets.newHashSetWithExpectedSize(tenantIds.length);
List<String> expectedTenantIds = Arrays.asList(tenantIds); Set<String> expectedTenantIds = new HashSet<>(Arrays.asList(tenantIds));
while ((result = scanner.next()) != null) { while ((result = scanner.next()) != null) {
tuple.setResult(result); tuple.setResult(result);
e.evaluate(tuple, ptr); e.evaluate(tuple, ptr);
String tenantId = (String)PVarchar.INSTANCE.toObject(ptr); String tenantId = (String)PVarchar.INSTANCE.toObject(ptr);
actualTenantIds.add(tenantId == null ? "" : tenantId); actualTenantIds.add(tenantId == null ? "" : tenantId);
} }
// Need to sort because of salting assertTrue(actualTenantIds.containsAll(expectedTenantIds));
Collections.sort(actualTenantIds);
assertEquals(expectedTenantIds, actualTenantIds);
} }


@Test @Test
Expand All @@ -96,35 +93,34 @@ private static Filter getUserTableAndViewsFilter() {
} }


private void testGetTenantIdExpression(boolean isSalted) throws Exception { private void testGetTenantIdExpression(boolean isSalted) throws Exception {
//Have to delete metaData tables because BaseHBaseManagedTimeTableReuseIT doesn't delete them after each test case , and tenant list will create issues between test cases
deletePriorMetaData(HConstants.LATEST_TIMESTAMP, getUrl());

Connection conn = DriverManager.getConnection(getUrl()); Connection conn = DriverManager.getConnection(getUrl());
conn.setAutoCommit(true); conn.setAutoCommit(true);
String tableName = generateRandomString() ; String tableName = generateRandomString() ;
String sequenceName = generateRandomString() ; String sequenceName = generateRandomString();
String t1 = generateRandomString();
String t2 = t1 + generateRandomString(); // ensure bigger
conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true" + (isSalted ? ",SALT_BUCKETS=3" : "")); conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true" + (isSalted ? ",SALT_BUCKETS=3" : ""));
conn.createStatement().execute("CREATE SEQUENCE " + sequenceName); conn.createStatement().execute("CREATE SEQUENCE " + sequenceName);
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t1','x')"); conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t1 + "','x')");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t2','y')"); conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t2 + "','y')");


Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "t1"); props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, t1);
Connection tsconn = DriverManager.getConnection(getUrl(), props); Connection tsconn = DriverManager.getConnection(getUrl(), props);
tsconn.createStatement().execute("CREATE SEQUENCE " + sequenceName); tsconn.createStatement().execute("CREATE SEQUENCE " + sequenceName);
Expression e1 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME); Expression e1 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
HTableInterface htable1 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES); HTableInterface htable1 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
assertTenantIds(e1, htable1, new FirstKeyOnlyFilter(), new String[] {"", "t1"} ); assertTenantIds(e1, htable1, new FirstKeyOnlyFilter(), new String[] {"", t1} );


String viewName = generateRandomString(); String viewName = generateRandomString();
tsconn.createStatement().execute("CREATE VIEW " + viewName + "(V1 VARCHAR) AS SELECT * FROM " + tableName); tsconn.createStatement().execute("CREATE VIEW " + viewName + "(V1 VARCHAR) AS SELECT * FROM " + tableName);
Expression e2 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME); Expression e2 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
HTableInterface htable2 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES); HTableInterface htable2 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
assertTenantIds(e2, htable2, getUserTableAndViewsFilter(), new String[] {"", "t1"} ); assertTenantIds(e2, htable2, getUserTableAndViewsFilter(), new String[] {"", t1} );


Expression e3 = PhoenixRuntime.getTenantIdExpression(conn, tableName); Expression e3 = PhoenixRuntime.getTenantIdExpression(conn, tableName);
HTableInterface htable3 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName)); HTableInterface htable3 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
assertTenantIds(e3, htable3, new FirstKeyOnlyFilter(), new String[] {"t1", "t2"} ); assertTenantIds(e3, htable3, new FirstKeyOnlyFilter(), new String[] {t1, t2} );


String basTableName = generateRandomString(); String basTableName = generateRandomString();
conn.createStatement().execute("CREATE TABLE " + basTableName + " (k1 VARCHAR PRIMARY KEY)"); conn.createStatement().execute("CREATE TABLE " + basTableName + " (k1 VARCHAR PRIMARY KEY)");
Expand All @@ -135,23 +131,21 @@ private void testGetTenantIdExpression(boolean isSalted) throws Exception {
tsconn.createStatement().execute("CREATE INDEX " + indexName1 + " ON " + viewName + "(V1)"); tsconn.createStatement().execute("CREATE INDEX " + indexName1 + " ON " + viewName + "(V1)");
Expression e5 = PhoenixRuntime.getTenantIdExpression(tsconn, indexName1); Expression e5 = PhoenixRuntime.getTenantIdExpression(tsconn, indexName1);
HTableInterface htable5 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX + tableName)); HTableInterface htable5 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX + tableName));
assertTenantIds(e5, htable5, new FirstKeyOnlyFilter(), new String[] {"t1"} ); assertTenantIds(e5, htable5, new FirstKeyOnlyFilter(), new String[] {t1} );


String indexName2 = generateRandomString(); String indexName2 = generateRandomString();
conn.createStatement().execute("CREATE INDEX " + indexName2 + " ON " + tableName + "(k2)"); conn.createStatement().execute("CREATE INDEX " + indexName2 + " ON " + tableName + "(k2)");
Expression e6 = PhoenixRuntime.getTenantIdExpression(conn, indexName2); Expression e6 = PhoenixRuntime.getTenantIdExpression(conn, indexName2);
HTableInterface htable6 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(indexName2)); HTableInterface htable6 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(indexName2));
assertTenantIds(e6, htable6, new FirstKeyOnlyFilter(), new String[] {"t1", "t2"} ); assertTenantIds(e6, htable6, new FirstKeyOnlyFilter(), new String[] {t1, t2} );


tableName = generateRandomString() + "BAR_" + (isSalted ? "SALTED" : "UNSALTED"); tableName = generateRandomString() + "BAR_" + (isSalted ? "SALTED" : "UNSALTED");
conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) " + (isSalted ? "SALT_BUCKETS=3" : "")); conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) " + (isSalted ? "SALT_BUCKETS=3" : ""));
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t1','x')"); conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t1 + "','x')");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t2','y')"); conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t2 + "','y')");
Expression e7 = PhoenixRuntime.getFirstPKColumnExpression(conn, tableName); Expression e7 = PhoenixRuntime.getFirstPKColumnExpression(conn, tableName);
HTableInterface htable7 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName)); HTableInterface htable7 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
assertTenantIds(e7, htable7, new FirstKeyOnlyFilter(), new String[] {"t1", "t2"} ); assertTenantIds(e7, htable7, new FirstKeyOnlyFilter(), new String[] {t1, t2} );


} }


} }
Expand Up @@ -31,53 +31,29 @@
import java.sql.SQLException; import java.sql.SQLException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Map;
import java.util.Properties; import java.util.Properties;


import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.end2end.BaseHBaseManagedTimeTableReuseIT; import org.apache.phoenix.end2end.BaseHBaseManagedTimeTableReuseIT;
import org.apache.phoenix.end2end.Shadower;
import org.apache.phoenix.jdbc.PhoenixStatement; import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.PNameFactory; import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.TestUtil;
import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.junit.runners.Parameterized; import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters; import org.junit.runners.Parameterized.Parameters;


import com.google.common.collect.Maps;

@RunWith(Parameterized.class) @RunWith(Parameterized.class)
public class ViewIndexIT extends BaseHBaseManagedTimeTableReuseIT { public class ViewIndexIT extends BaseHBaseManagedTimeTableReuseIT {


private String schemaName="TEST";
private boolean isNamespaceMapped; private boolean isNamespaceMapped;



@BeforeClass
@Shadower(classBeingShadowed = BaseHBaseManagedTimeTableReuseIT.class)
public static void doSetup() throws Exception {
Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
// Drop the HBase table metadata for this test to confirm that view index table dropped
props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
// Must update config before starting server
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}

@Parameters(name = "isNamespaceMapped = {0}") @Parameters(name = "isNamespaceMapped = {0}")
public static Collection<Boolean> data() { public static Collection<Boolean> data() {
return Arrays.asList(true, false); return Arrays.asList(true, false);
} }


private void createBaseTable(String tableName, boolean multiTenant, Integer saltBuckets, String splits) private void createBaseTable(String schemaName, String tableName, boolean multiTenant, Integer saltBuckets, String splits)
throws SQLException { throws SQLException {
Connection conn = getConnection(); Connection conn = getConnection();
if (isNamespaceMapped) { if (isNamespaceMapped) {
Expand All @@ -104,9 +80,8 @@ private void createBaseTable(String tableName, boolean multiTenant, Integer salt
conn.close(); conn.close();
} }


public Connection getConnection() throws SQLException{ private Connection getConnection() throws SQLException{
Properties props = new Properties(); Properties props = new Properties();
props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(isNamespaceMapped)); props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(isNamespaceMapped));
return DriverManager.getConnection(getUrl(),props); return DriverManager.getConnection(getUrl(),props);
} }
Expand All @@ -117,14 +92,13 @@ public ViewIndexIT(boolean isNamespaceMapped) {


@Test @Test
public void testDeleteViewIndexSequences() throws Exception { public void testDeleteViewIndexSequences() throws Exception {
String schemaName = generateRandomString();
String tableName = schemaName + "." + generateRandomString(); String tableName = schemaName + "." + generateRandomString();
String indexName = "IND_" + generateRandomString(); String indexName = "IND_" + generateRandomString();
String VIEW_NAME = "VIEW_" + generateRandomString(); String VIEW_NAME = "VIEW_" + generateRandomString();
TableName physicalTableName = SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
String viewIndexPhysicalTableName = physicalTableName.getNameAsString();
String viewName = schemaName + "." + VIEW_NAME; String viewName = schemaName + "." + VIEW_NAME;


createBaseTable(tableName, false, null, null); createBaseTable(schemaName, tableName, false, null, null);
Connection conn1 = getConnection(); Connection conn1 = getConnection();
Connection conn2 = getConnection(); Connection conn2 = getConnection();
conn1.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName); conn1.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName);
Expand All @@ -138,22 +112,19 @@ public void testDeleteViewIndexSequences() throws Exception {
// Check other format of sequence is not there as Sequences format is different for views/indexes created on // Check other format of sequence is not there as Sequences format is different for views/indexes created on
// table which are namespace mapped and which are not. // table which are namespace mapped and which are not.
verifySequence(null, seqName, seqSchemaName, false); verifySequence(null, seqName, seqSchemaName, false);
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
conn1.createStatement().execute("DROP VIEW " + viewName); conn1.createStatement().execute("DROP VIEW " + viewName);
conn1.createStatement().execute("DROP TABLE "+ tableName); conn1.createStatement().execute("DROP TABLE "+ tableName);
admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
assertFalse("View index table should be deleted.", admin.tableExists(TableName.valueOf(viewIndexPhysicalTableName)));


verifySequence(null, sequenceName, sequenceSchemaName, false); verifySequence(null, sequenceName, sequenceSchemaName, false);

} }


@Test @Test
public void testMultiTenantViewLocalIndex() throws Exception { public void testMultiTenantViewLocalIndex() throws Exception {
String schemaName = generateRandomString();
String tableName = generateRandomString(); String tableName = generateRandomString();
String indexName = "IND_" + generateRandomString(); String indexName = "IND_" + generateRandomString();
String VIEW_NAME = "VIEW_" + generateRandomString(); String VIEW_NAME = "VIEW_" + generateRandomString();
createBaseTable(tableName, true, null, null); createBaseTable(schemaName, tableName, true, null, null);
Connection conn = DriverManager.getConnection(getUrl()); Connection conn = DriverManager.getConnection(getUrl());
PreparedStatement stmt = conn.prepareStatement( PreparedStatement stmt = conn.prepareStatement(
"UPSERT INTO " + tableName "UPSERT INTO " + tableName
Expand Down
Expand Up @@ -17,18 +17,18 @@
*/ */
package org.apache.phoenix.trace; package org.apache.phoenix.trace;


import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;

import java.sql.Connection;
import java.util.Collection;

import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.trace.TraceReader.SpanInfo; import org.apache.phoenix.trace.TraceReader.SpanInfo;
import org.apache.phoenix.trace.TraceReader.TraceHolder; import org.apache.phoenix.trace.TraceReader.TraceHolder;
import org.junit.Test; import org.junit.Test;


import java.sql.Connection;
import java.util.Collection;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;

/** /**
* Test that the logging sink stores the expected metrics/stats * Test that the logging sink stores the expected metrics/stats
*/ */
Expand All @@ -43,7 +43,8 @@ public class PhoenixTableMetricsWriterIT extends BaseTracingTestIT {
public void testCreatesTable() throws Exception { public void testCreatesTable() throws Exception {
PhoenixMetricsSink sink = new PhoenixMetricsSink(); PhoenixMetricsSink sink = new PhoenixMetricsSink();
Connection conn = getConnectionWithoutTracing(); Connection conn = getConnectionWithoutTracing();
sink.initForTesting(conn); String tableName = generateRandomString();
sink.initForTesting(conn, tableName);


// check for existence of the tracing table // check for existence of the tracing table
try { try {
Expand All @@ -57,7 +58,7 @@ public void testCreatesTable() throws Exception {


// initialize sink again, which should attempt to create the table, but not fail // initialize sink again, which should attempt to create the table, but not fail
try { try {
sink.initForTesting(conn); sink.initForTesting(conn, tableName);
} catch (Exception e) { } catch (Exception e) {
fail("Initialization shouldn't fail if table already exists!"); fail("Initialization shouldn't fail if table already exists!");
} }
Expand All @@ -73,7 +74,8 @@ public void writeMetrics() throws Exception {
// hook up a phoenix sink // hook up a phoenix sink
PhoenixMetricsSink sink = new PhoenixMetricsSink(); PhoenixMetricsSink sink = new PhoenixMetricsSink();
Connection conn = getConnectionWithoutTracing(); Connection conn = getConnectionWithoutTracing();
sink.initForTesting(conn); String tableName = generateRandomString();
sink.initForTesting(conn, tableName);


// create a simple metrics record // create a simple metrics record
long traceid = 987654; long traceid = 987654;
Expand All @@ -94,7 +96,7 @@ public void writeMetrics() throws Exception {


// make sure we only get expected stat entry (matcing the trace id), otherwise we could the // make sure we only get expected stat entry (matcing the trace id), otherwise we could the
// stats for the update as well // stats for the update as well
TraceReader reader = new TraceReader(conn); TraceReader reader = new TraceReader(conn, tableName);
Collection<TraceHolder> traces = reader.readAll(10); Collection<TraceHolder> traces = reader.readAll(10);
assertEquals("Wrong number of traces in the tracing table", 1, traces.size()); assertEquals("Wrong number of traces in the tracing table", 1, traces.size());


Expand Down

0 comments on commit 27697b3

Please sign in to comment.